1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/types.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/ioport.h>
47#include <linux/delay.h>
48#include <linux/init.h>
49#include <linux/interrupt.h>
50#include <linux/rtnetlink.h>
51#include <linux/pci.h>
52#include <linux/dma-mapping.h>
53#include <linux/gfp.h>
54
55#include <asm/io.h>
56#include <asm/dma.h>
57#include <asm/byteorder.h>
58
59#include <linux/pm.h>
60
61#include <net/irda/wrapper.h>
62#include <net/irda/irda.h>
63#include <net/irda/irda_device.h>
64
65#include "via-ircc.h"
66
67#define VIA_MODULE_NAME "via-ircc"
68#define CHIP_IO_EXTENT 0x40
69
70static char *driver_name = VIA_MODULE_NAME;
71
72
73static int qos_mtt_bits = 0x07;
74static int dongle_id = 0;
75
76
77module_param(dongle_id, int, 0);
78
79
80static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
81 unsigned int id);
82static int via_ircc_dma_receive(struct via_ircc_cb *self);
83static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
84 int iobase);
85static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
86 struct net_device *dev);
87static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
88 struct net_device *dev);
89static void via_hw_init(struct via_ircc_cb *self);
90static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
92static int via_ircc_is_receiving(struct via_ircc_cb *self);
93static int via_ircc_read_dongle_id(int iobase);
94
95static int via_ircc_net_open(struct net_device *dev);
96static int via_ircc_net_close(struct net_device *dev);
97static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
98 int cmd);
99static void via_ircc_change_dongle_speed(int iobase, int speed,
100 int dongle_id);
101static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
102static void hwreset(struct via_ircc_cb *self);
103static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
104static int upload_rxdata(struct via_ircc_cb *self, int iobase);
105static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
106static void via_remove_one(struct pci_dev *pdev);
107
108
109static void iodelay(int udelay)
110{
111 u8 data;
112 int i;
113
114 for (i = 0; i < udelay; i++) {
115 data = inb(0x80);
116 }
117}
118
119static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
120 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
121 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
122 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
123 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
124 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
125 { 0, }
126};
127
128MODULE_DEVICE_TABLE(pci,via_pci_tbl);
129
130
131static struct pci_driver via_driver = {
132 .name = VIA_MODULE_NAME,
133 .id_table = via_pci_tbl,
134 .probe = via_init_one,
135 .remove = via_remove_one,
136};
137
138
139
140
141
142
143
144static int __init via_ircc_init(void)
145{
146 int rc;
147
148 IRDA_DEBUG(3, "%s()\n", __func__);
149
150 rc = pci_register_driver(&via_driver);
151 if (rc < 0) {
152 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
153 __func__, rc);
154 return -ENODEV;
155 }
156 return 0;
157}
158
159static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
160{
161 int rc;
162 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
163 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
164 chipio_t info;
165
166 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
167
168 rc = pci_enable_device (pcidev);
169 if (rc) {
170 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
171 return -ENODEV;
172 }
173
174
175 if ( ReadLPCReg(0x20) != 0x3C )
176 Chipset=0x3096;
177 else
178 Chipset=0x3076;
179
180 if (Chipset==0x3076) {
181 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
182
183 WriteLPCReg(7,0x0c );
184 temp=ReadLPCReg(0x30);
185 if((temp&0x01)==1) {
186 WriteLPCReg(0x1d, 0x82 );
187 WriteLPCReg(0x23,0x18);
188 temp=ReadLPCReg(0xF0);
189 if((temp&0x01)==0) {
190 temp=(ReadLPCReg(0x74)&0x03);
191 FirDRQ0=temp + 4;
192 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
193 FirDRQ1=temp + 4;
194 } else {
195 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
196 FirDRQ0=temp + 4;
197 FirDRQ1=FirDRQ0;
198 }
199 FirIRQ=(ReadLPCReg(0x70)&0x0f);
200 FirIOBase=ReadLPCReg(0x60 ) << 8;
201 FirIOBase=FirIOBase| ReadLPCReg(0x61) ;
202 FirIOBase=FirIOBase ;
203 info.fir_base=FirIOBase;
204 info.irq=FirIRQ;
205 info.dma=FirDRQ1;
206 info.dma2=FirDRQ0;
207 pci_read_config_byte(pcidev,0x40,&bTmp);
208 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
209 pci_read_config_byte(pcidev,0x42,&bTmp);
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 );
213 rc = via_ircc_open(pcidev, &info, 0x3076);
214 } else
215 rc = -ENODEV;
216 } else {
217 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
218
219 pci_read_config_byte(pcidev,0x67,&bTmp);
220 if((bTmp&0x01)==1) {
221
222 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
223 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
224 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
225 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
226 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
227 pci_write_config_byte(pcidev,0x44,0x4e);
228
229 if((bTmp&0x02)==0) {
230 pci_read_config_byte(pcidev,0x44,&bTmp1);
231 FirDRQ0 = (bTmp1 & 0x30) >> 4;
232 pci_read_config_byte(pcidev,0x44,&bTmp1);
233 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
234 } else {
235 pci_read_config_byte(pcidev,0x44,&bTmp1);
236 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
237 FirDRQ1=0;
238 }
239 pci_read_config_byte(pcidev,0x47,&bTmp1);
240 FirIRQ = bTmp1 & 0x0f;
241
242 pci_read_config_byte(pcidev,0x69,&bTmp);
243 FirIOBase = bTmp << 8;
244 pci_read_config_byte(pcidev,0x68,&bTmp);
245 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
246
247 info.fir_base=FirIOBase;
248 info.irq=FirIRQ;
249 info.dma=FirDRQ1;
250 info.dma2=FirDRQ0;
251 rc = via_ircc_open(pcidev, &info, 0x3096);
252 } else
253 rc = -ENODEV;
254 }
255
256 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
257 return rc;
258}
259
260static void __exit via_ircc_cleanup(void)
261{
262 IRDA_DEBUG(3, "%s()\n", __func__);
263
264
265 pci_unregister_driver (&via_driver);
266}
267
268static const struct net_device_ops via_ircc_sir_ops = {
269 .ndo_start_xmit = via_ircc_hard_xmit_sir,
270 .ndo_open = via_ircc_net_open,
271 .ndo_stop = via_ircc_net_close,
272 .ndo_do_ioctl = via_ircc_net_ioctl,
273};
274static const struct net_device_ops via_ircc_fir_ops = {
275 .ndo_start_xmit = via_ircc_hard_xmit_fir,
276 .ndo_open = via_ircc_net_open,
277 .ndo_stop = via_ircc_net_close,
278 .ndo_do_ioctl = via_ircc_net_ioctl,
279};
280
281
282
283
284
285
286
287static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
288{
289 struct net_device *dev;
290 struct via_ircc_cb *self;
291 int err;
292
293 IRDA_DEBUG(3, "%s()\n", __func__);
294
295
296 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
297 if (dev == NULL)
298 return -ENOMEM;
299
300 self = netdev_priv(dev);
301 self->netdev = dev;
302 spin_lock_init(&self->lock);
303
304 pci_set_drvdata(pdev, self);
305
306
307 self->io.cfg_base = info->cfg_base;
308 self->io.fir_base = info->fir_base;
309 self->io.irq = info->irq;
310 self->io.fir_ext = CHIP_IO_EXTENT;
311 self->io.dma = info->dma;
312 self->io.dma2 = info->dma2;
313 self->io.fifo_size = 32;
314 self->chip_id = id;
315 self->st_fifo.len = 0;
316 self->RxDataReady = 0;
317
318
319 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
320 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
321 __func__, self->io.fir_base);
322 err = -ENODEV;
323 goto err_out1;
324 }
325
326
327 irda_init_max_qos_capabilies(&self->qos);
328
329
330 if (!dongle_id)
331 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
332 self->io.dongle_id = dongle_id;
333
334
335
336 switch( self->io.dongle_id ){
337 case 0x0d:
338 self->qos.baud_rate.bits =
339 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
340 IR_576000 | IR_1152000 | (IR_4000000 << 8);
341 break;
342 default:
343 self->qos.baud_rate.bits =
344 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
345 break;
346 }
347
348
349
350
351
352
353
354
355 self->qos.min_turn_time.bits = qos_mtt_bits;
356 irda_qos_bits_to_value(&self->qos);
357
358
359 self->rx_buff.truesize = 14384 + 2048;
360 self->tx_buff.truesize = 14384 + 2048;
361
362
363 self->rx_buff.head =
364 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
365 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
366 if (self->rx_buff.head == NULL) {
367 err = -ENOMEM;
368 goto err_out2;
369 }
370
371 self->tx_buff.head =
372 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
373 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
374 if (self->tx_buff.head == NULL) {
375 err = -ENOMEM;
376 goto err_out3;
377 }
378
379 self->rx_buff.in_frame = FALSE;
380 self->rx_buff.state = OUTSIDE_FRAME;
381 self->tx_buff.data = self->tx_buff.head;
382 self->rx_buff.data = self->rx_buff.head;
383
384
385 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
386 self->tx_fifo.tail = self->tx_buff.head;
387
388
389 dev->netdev_ops = &via_ircc_sir_ops;
390
391 err = register_netdev(dev);
392 if (err)
393 goto err_out4;
394
395 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
396
397
398
399 self->io.speed = 9600;
400 via_hw_init(self);
401 return 0;
402 err_out4:
403 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
404 self->tx_buff.head, self->tx_buff_dma);
405 err_out3:
406 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
407 self->rx_buff.head, self->rx_buff_dma);
408 err_out2:
409 release_region(self->io.fir_base, self->io.fir_ext);
410 err_out1:
411 pci_set_drvdata(pdev, NULL);
412 free_netdev(dev);
413 return err;
414}
415
416
417
418
419
420
421
422static void via_remove_one(struct pci_dev *pdev)
423{
424 struct via_ircc_cb *self = pci_get_drvdata(pdev);
425 int iobase;
426
427 IRDA_DEBUG(3, "%s()\n", __func__);
428
429 iobase = self->io.fir_base;
430
431 ResetChip(iobase, 5);
432
433 unregister_netdev(self->netdev);
434
435
436 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
437 __func__, self->io.fir_base);
438 release_region(self->io.fir_base, self->io.fir_ext);
439 if (self->tx_buff.head)
440 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
441 self->tx_buff.head, self->tx_buff_dma);
442 if (self->rx_buff.head)
443 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
444 self->rx_buff.head, self->rx_buff_dma);
445 pci_set_drvdata(pdev, NULL);
446
447 free_netdev(self->netdev);
448
449 pci_disable_device(pdev);
450}
451
452
453
454
455
456
457
458
459static void via_hw_init(struct via_ircc_cb *self)
460{
461 int iobase = self->io.fir_base;
462
463 IRDA_DEBUG(3, "%s()\n", __func__);
464
465 SetMaxRxPacketSize(iobase, 0x0fff);
466
467 EnRXFIFOReadyInt(iobase, OFF);
468 EnRXFIFOHalfLevelInt(iobase, OFF);
469 EnTXFIFOHalfLevelInt(iobase, OFF);
470 EnTXFIFOUnderrunEOMInt(iobase, ON);
471 EnTXFIFOReadyInt(iobase, OFF);
472 InvertTX(iobase, OFF);
473 InvertRX(iobase, OFF);
474
475 if (ReadLPCReg(0x20) == 0x3c)
476 WriteLPCReg(0xF0, 0);
477
478 EnRXSpecInt(iobase, ON);
479
480
481
482 ResetChip(iobase, 5);
483 EnableDMA(iobase, OFF);
484 EnableTX(iobase, OFF);
485 EnableRX(iobase, OFF);
486 EnRXDMA(iobase, OFF);
487 EnTXDMA(iobase, OFF);
488 RXStart(iobase, OFF);
489 TXStart(iobase, OFF);
490 InitCard(iobase);
491 CommonInit(iobase);
492 SIRFilter(iobase, ON);
493 SetSIR(iobase, ON);
494 CRC16(iobase, ON);
495 EnTXCRC(iobase, 0);
496 WriteReg(iobase, I_ST_CT_0, 0x00);
497 SetBaudRate(iobase, 9600);
498 SetPulseWidth(iobase, 12);
499 SetSendPreambleCount(iobase, 0);
500
501 self->io.speed = 9600;
502 self->st_fifo.len = 0;
503
504 via_ircc_change_dongle_speed(iobase, self->io.speed,
505 self->io.dongle_id);
506
507 WriteReg(iobase, I_ST_CT_0, 0x80);
508}
509
510
511
512
513
514static int via_ircc_read_dongle_id(int iobase)
515{
516 int dongle_id = 9;
517
518 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
519 return dongle_id;
520}
521
522
523
524
525
526
527static void via_ircc_change_dongle_speed(int iobase, int speed,
528 int dongle_id)
529{
530 u8 mode = 0;
531
532
533 speed = speed;
534
535 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
536 __func__, speed, iobase, dongle_id);
537
538 switch (dongle_id) {
539
540
541
542
543 case 0x08:
544 UseOneRX(iobase, ON);
545 InvertTX(iobase, OFF);
546 InvertRX(iobase, OFF);
547
548 EnRX2(iobase, ON);
549 EnGPIOtoRX2(iobase, OFF);
550
551 if (IsSIROn(iobase)) {
552
553 SlowIRRXLowActive(iobase, ON);
554 udelay(1000);
555 SlowIRRXLowActive(iobase, OFF);
556 } else {
557 if (IsMIROn(iobase)) {
558
559 SlowIRRXLowActive(iobase, OFF);
560 udelay(20);
561 } else {
562 if (IsFIROn(iobase)) {
563
564 SlowIRRXLowActive(iobase, OFF);
565 udelay(20);
566 }
567 }
568 }
569 break;
570
571 case 0x09:
572 UseOneRX(iobase, ON);
573 InvertTX(iobase, OFF);
574 InvertRX(iobase, OFF);
575
576 EnRX2(iobase, ON);
577 EnGPIOtoRX2(iobase, OFF);
578 if (IsSIROn(iobase)) {
579
580 SlowIRRXLowActive(iobase, ON);
581 udelay(20);
582
583 SlowIRRXLowActive(iobase, OFF);
584 }
585 if (IsMIROn(iobase)) {
586
587 SlowIRRXLowActive(iobase, OFF);
588 udelay(20);
589
590 SlowIRRXLowActive(iobase, ON);
591 } else {
592 if (IsFIROn(iobase)) {
593
594 SlowIRRXLowActive(iobase, OFF);
595
596 WriteTX(iobase, ON);
597 udelay(20);
598
599 SlowIRRXLowActive(iobase, ON);
600 udelay(20);
601
602 WriteTX(iobase, OFF);
603 }
604 }
605 break;
606
607 case 0x0d:
608 UseOneRX(iobase, OFF);
609 InvertTX(iobase, OFF);
610 InvertRX(iobase, OFF);
611 SlowIRRXLowActive(iobase, OFF);
612 if (IsSIROn(iobase)) {
613 EnGPIOtoRX2(iobase, OFF);
614 WriteGIO(iobase, OFF);
615 EnRX2(iobase, OFF);
616 } else {
617 EnGPIOtoRX2(iobase, OFF);
618 WriteGIO(iobase, OFF);
619 EnRX2(iobase, OFF);
620 }
621 break;
622
623 case 0x11:
624
625 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
626
627 UseOneRX(iobase, ON);
628 InvertTX(iobase, OFF);
629 InvertRX(iobase, ON);
630
631 EnRX2(iobase, ON);
632 EnGPIOtoRX2(iobase, OFF);
633
634 if( IsSIROn(iobase) ){
635
636
637 SlowIRRXLowActive(iobase, ON);
638 udelay(20);
639
640 SlowIRRXLowActive(iobase, OFF);
641
642 } else{
643 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
644 }
645 break;
646
647 case 0x0ff:
648 if (IsSIROn(iobase))
649 mode = 0;
650 else if (IsMIROn(iobase))
651 mode = 1;
652 else if (IsFIROn(iobase))
653 mode = 2;
654 else if (IsVFIROn(iobase))
655 mode = 5;
656 SI_SetMode(iobase, mode);
657 break;
658
659 default:
660 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
661 __func__, dongle_id);
662 }
663}
664
665
666
667
668
669
670
671static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
672{
673 struct net_device *dev = self->netdev;
674 u16 iobase;
675 u8 value = 0, bTmp;
676
677 iobase = self->io.fir_base;
678
679 self->io.speed = speed;
680 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
681
682 WriteReg(iobase, I_ST_CT_0, 0x0);
683
684
685 switch (speed) {
686 case 2400:
687 case 9600:
688 case 19200:
689 case 38400:
690 case 57600:
691 case 115200:
692 value = (115200/speed)-1;
693 SetSIR(iobase, ON);
694 CRC16(iobase, ON);
695 break;
696 case 576000:
697
698
699 value = 0;
700 SetSIR(iobase, ON);
701 CRC16(iobase, ON);
702 break;
703 case 1152000:
704 value = 0;
705 SetMIR(iobase, ON);
706
707 break;
708 case 4000000:
709 value = 0;
710 SetFIR(iobase, ON);
711 SetPulseWidth(iobase, 0);
712 SetSendPreambleCount(iobase, 14);
713 CRC16(iobase, OFF);
714 EnTXCRC(iobase, ON);
715 break;
716 case 16000000:
717 value = 0;
718 SetVFIR(iobase, ON);
719
720 break;
721 default:
722 value = 0;
723 break;
724 }
725
726
727 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
728 bTmp |= value << 2;
729 WriteReg(iobase, I_CF_H_1, bTmp);
730
731
732 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
733
734
735 SetFIFO(iobase, 64);
736
737
738 WriteReg(iobase, I_ST_CT_0, 0x80);
739
740
741
742
743
744
745 if (IsSIROn(iobase)) {
746 SIRFilter(iobase, ON);
747 SIRRecvAny(iobase, ON);
748 } else {
749 SIRFilter(iobase, OFF);
750 SIRRecvAny(iobase, OFF);
751 }
752
753 if (speed > 115200) {
754
755 dev->netdev_ops = &via_ircc_fir_ops;
756 via_ircc_dma_receive(self);
757 } else {
758
759 dev->netdev_ops = &via_ircc_sir_ops;
760 }
761 netif_wake_queue(dev);
762}
763
764
765
766
767
768
769
770static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
771 struct net_device *dev)
772{
773 struct via_ircc_cb *self;
774 unsigned long flags;
775 u16 iobase;
776 __u32 speed;
777
778 self = netdev_priv(dev);
779 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
780 iobase = self->io.fir_base;
781
782 netif_stop_queue(dev);
783
784 speed = irda_get_next_speed(skb);
785 if ((speed != self->io.speed) && (speed != -1)) {
786
787 if (!skb->len) {
788 via_ircc_change_speed(self, speed);
789 dev->trans_start = jiffies;
790 dev_kfree_skb(skb);
791 return NETDEV_TX_OK;
792 } else
793 self->new_speed = speed;
794 }
795 InitCard(iobase);
796 CommonInit(iobase);
797 SIRFilter(iobase, ON);
798 SetSIR(iobase, ON);
799 CRC16(iobase, ON);
800 EnTXCRC(iobase, 0);
801 WriteReg(iobase, I_ST_CT_0, 0x00);
802
803 spin_lock_irqsave(&self->lock, flags);
804 self->tx_buff.data = self->tx_buff.head;
805 self->tx_buff.len =
806 async_wrap_skb(skb, self->tx_buff.data,
807 self->tx_buff.truesize);
808
809 dev->stats.tx_bytes += self->tx_buff.len;
810
811 SetBaudRate(iobase, self->io.speed);
812 SetPulseWidth(iobase, 12);
813 SetSendPreambleCount(iobase, 0);
814 WriteReg(iobase, I_ST_CT_0, 0x80);
815
816 EnableTX(iobase, ON);
817 EnableRX(iobase, OFF);
818
819 ResetChip(iobase, 0);
820 ResetChip(iobase, 1);
821 ResetChip(iobase, 2);
822 ResetChip(iobase, 3);
823 ResetChip(iobase, 4);
824
825 EnAllInt(iobase, ON);
826 EnTXDMA(iobase, ON);
827 EnRXDMA(iobase, OFF);
828
829 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
830 DMA_TX_MODE);
831
832 SetSendByte(iobase, self->tx_buff.len);
833 RXStart(iobase, OFF);
834 TXStart(iobase, ON);
835
836 dev->trans_start = jiffies;
837 spin_unlock_irqrestore(&self->lock, flags);
838 dev_kfree_skb(skb);
839 return NETDEV_TX_OK;
840}
841
842static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
843 struct net_device *dev)
844{
845 struct via_ircc_cb *self;
846 u16 iobase;
847 __u32 speed;
848 unsigned long flags;
849
850 self = netdev_priv(dev);
851 iobase = self->io.fir_base;
852
853 if (self->st_fifo.len)
854 return NETDEV_TX_OK;
855 if (self->chip_id == 0x3076)
856 iodelay(1500);
857 else
858 udelay(1500);
859 netif_stop_queue(dev);
860 speed = irda_get_next_speed(skb);
861 if ((speed != self->io.speed) && (speed != -1)) {
862 if (!skb->len) {
863 via_ircc_change_speed(self, speed);
864 dev->trans_start = jiffies;
865 dev_kfree_skb(skb);
866 return NETDEV_TX_OK;
867 } else
868 self->new_speed = speed;
869 }
870 spin_lock_irqsave(&self->lock, flags);
871 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
872 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
873
874 self->tx_fifo.tail += skb->len;
875 dev->stats.tx_bytes += skb->len;
876 skb_copy_from_linear_data(skb,
877 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
878 self->tx_fifo.len++;
879 self->tx_fifo.free++;
880
881 via_ircc_dma_xmit(self, iobase);
882
883
884 dev->trans_start = jiffies;
885 dev_kfree_skb(skb);
886 spin_unlock_irqrestore(&self->lock, flags);
887 return NETDEV_TX_OK;
888
889}
890
891static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
892{
893 EnTXDMA(iobase, OFF);
894 self->io.direction = IO_XMIT;
895 EnPhys(iobase, ON);
896 EnableTX(iobase, ON);
897 EnableRX(iobase, OFF);
898 ResetChip(iobase, 0);
899 ResetChip(iobase, 1);
900 ResetChip(iobase, 2);
901 ResetChip(iobase, 3);
902 ResetChip(iobase, 4);
903 EnAllInt(iobase, ON);
904 EnTXDMA(iobase, ON);
905 EnRXDMA(iobase, OFF);
906 irda_setup_dma(self->io.dma,
907 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
908 self->tx_buff.head) + self->tx_buff_dma,
909 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
910 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
911 __func__, self->tx_fifo.ptr,
912 self->tx_fifo.queue[self->tx_fifo.ptr].len,
913 self->tx_fifo.len);
914
915 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
916 RXStart(iobase, OFF);
917 TXStart(iobase, ON);
918 return 0;
919
920}
921
922
923
924
925
926
927
928
929static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
930{
931 int iobase;
932 int ret = TRUE;
933 u8 Tx_status;
934
935 IRDA_DEBUG(3, "%s()\n", __func__);
936
937 iobase = self->io.fir_base;
938
939
940
941
942 Tx_status = GetTXStatus(iobase);
943 if (Tx_status & 0x08) {
944 self->netdev->stats.tx_errors++;
945 self->netdev->stats.tx_fifo_errors++;
946 hwreset(self);
947
948 } else {
949 self->netdev->stats.tx_packets++;
950 ResetChip(iobase, 3);
951 ResetChip(iobase, 4);
952 }
953
954 if (self->new_speed) {
955 via_ircc_change_speed(self, self->new_speed);
956 self->new_speed = 0;
957 }
958
959
960 if (IsFIROn(iobase)) {
961 if (self->tx_fifo.len) {
962 self->tx_fifo.len--;
963 self->tx_fifo.ptr++;
964 }
965 }
966 IRDA_DEBUG(1,
967 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
968 __func__,
969 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
970
971
972
973
974
975
976
977
978
979 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
980 self->tx_fifo.tail = self->tx_buff.head;
981
982
983
984
985
986
987 netif_wake_queue(self->netdev);
988
989 return ret;
990}
991
992
993
994
995
996
997
998static int via_ircc_dma_receive(struct via_ircc_cb *self)
999{
1000 int iobase;
1001
1002 iobase = self->io.fir_base;
1003
1004 IRDA_DEBUG(3, "%s()\n", __func__);
1005
1006 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1007 self->tx_fifo.tail = self->tx_buff.head;
1008 self->RxDataReady = 0;
1009 self->io.direction = IO_RECV;
1010 self->rx_buff.data = self->rx_buff.head;
1011 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1012 self->st_fifo.tail = self->st_fifo.head = 0;
1013
1014 EnPhys(iobase, ON);
1015 EnableTX(iobase, OFF);
1016 EnableRX(iobase, ON);
1017
1018 ResetChip(iobase, 0);
1019 ResetChip(iobase, 1);
1020 ResetChip(iobase, 2);
1021 ResetChip(iobase, 3);
1022 ResetChip(iobase, 4);
1023
1024 EnAllInt(iobase, ON);
1025 EnTXDMA(iobase, OFF);
1026 EnRXDMA(iobase, ON);
1027 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1028 self->rx_buff.truesize, DMA_RX_MODE);
1029 TXStart(iobase, OFF);
1030 RXStart(iobase, ON);
1031
1032 return 0;
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1043 int iobase)
1044{
1045 struct st_fifo *st_fifo;
1046 struct sk_buff *skb;
1047 int len, i;
1048 u8 status = 0;
1049
1050 iobase = self->io.fir_base;
1051 st_fifo = &self->st_fifo;
1052
1053 if (self->io.speed < 4000000) {
1054 len = GetRecvByte(iobase, self);
1055 skb = dev_alloc_skb(len + 1);
1056 if (skb == NULL)
1057 return FALSE;
1058
1059 skb_reserve(skb, 1);
1060 skb_put(skb, len - 2);
1061 if (self->chip_id == 0x3076) {
1062 for (i = 0; i < len - 2; i++)
1063 skb->data[i] = self->rx_buff.data[i * 2];
1064 } else {
1065 if (self->chip_id == 0x3096) {
1066 for (i = 0; i < len - 2; i++)
1067 skb->data[i] =
1068 self->rx_buff.data[i];
1069 }
1070 }
1071
1072 self->rx_buff.data += len;
1073 self->netdev->stats.rx_bytes += len;
1074 self->netdev->stats.rx_packets++;
1075 skb->dev = self->netdev;
1076 skb_reset_mac_header(skb);
1077 skb->protocol = htons(ETH_P_IRDA);
1078 netif_rx(skb);
1079 return TRUE;
1080 }
1081
1082 else {
1083 len = GetRecvByte(iobase, self);
1084 if (len == 0)
1085 return TRUE;
1086 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1087 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1088 __func__, len, RxCurCount(iobase, self),
1089 self->RxLastCount);
1090 hwreset(self);
1091 return FALSE;
1092 }
1093 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1094 __func__,
1095 st_fifo->len, len - 4, RxCurCount(iobase, self));
1096
1097 st_fifo->entries[st_fifo->tail].status = status;
1098 st_fifo->entries[st_fifo->tail].len = len;
1099 st_fifo->pending_bytes += len;
1100 st_fifo->tail++;
1101 st_fifo->len++;
1102 if (st_fifo->tail > MAX_RX_WINDOW)
1103 st_fifo->tail = 0;
1104 self->RxDataReady = 0;
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 EnableRX(iobase, OFF);
1116 EnRXDMA(iobase, OFF);
1117 RXStart(iobase, OFF);
1118
1119
1120 if (st_fifo->head > MAX_RX_WINDOW)
1121 st_fifo->head = 0;
1122 status = st_fifo->entries[st_fifo->head].status;
1123 len = st_fifo->entries[st_fifo->head].len;
1124 st_fifo->head++;
1125 st_fifo->len--;
1126
1127 skb = dev_alloc_skb(len + 1 - 4);
1128
1129
1130
1131
1132 if ((skb == NULL) || (skb->data == NULL) ||
1133 (self->rx_buff.data == NULL) || (len < 6)) {
1134 self->netdev->stats.rx_dropped++;
1135 kfree_skb(skb);
1136 return TRUE;
1137 }
1138 skb_reserve(skb, 1);
1139 skb_put(skb, len - 4);
1140
1141 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1142 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1143 len - 4, self->rx_buff.data);
1144
1145
1146 self->rx_buff.data += len;
1147 self->netdev->stats.rx_bytes += len;
1148 self->netdev->stats.rx_packets++;
1149 skb->dev = self->netdev;
1150 skb_reset_mac_header(skb);
1151 skb->protocol = htons(ETH_P_IRDA);
1152 netif_rx(skb);
1153
1154
1155 }
1156 return TRUE;
1157
1158}
1159
1160
1161
1162
1163static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1164{
1165 struct sk_buff *skb;
1166 int len;
1167 struct st_fifo *st_fifo;
1168 st_fifo = &self->st_fifo;
1169
1170 len = GetRecvByte(iobase, self);
1171
1172 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1173
1174 if ((len - 4) < 2) {
1175 self->netdev->stats.rx_dropped++;
1176 return FALSE;
1177 }
1178
1179 skb = dev_alloc_skb(len + 1);
1180 if (skb == NULL) {
1181 self->netdev->stats.rx_dropped++;
1182 return FALSE;
1183 }
1184 skb_reserve(skb, 1);
1185 skb_put(skb, len - 4 + 1);
1186 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1187 st_fifo->tail++;
1188 st_fifo->len++;
1189 if (st_fifo->tail > MAX_RX_WINDOW)
1190 st_fifo->tail = 0;
1191
1192 self->rx_buff.data += len;
1193 self->netdev->stats.rx_bytes += len;
1194 self->netdev->stats.rx_packets++;
1195 skb->dev = self->netdev;
1196 skb_reset_mac_header(skb);
1197 skb->protocol = htons(ETH_P_IRDA);
1198 netif_rx(skb);
1199 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1200 RXStart(iobase, ON);
1201 } else {
1202 EnableRX(iobase, OFF);
1203 EnRXDMA(iobase, OFF);
1204 RXStart(iobase, OFF);
1205 }
1206 return TRUE;
1207}
1208
1209
1210
1211
1212
1213static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1214{
1215 struct st_fifo *st_fifo;
1216 struct sk_buff *skb;
1217 int len;
1218 u8 status;
1219
1220 st_fifo = &self->st_fifo;
1221
1222 if (CkRxRecv(iobase, self)) {
1223
1224 self->RetryCount = 0;
1225 SetTimer(iobase, 20);
1226 self->RxDataReady++;
1227 return FALSE;
1228 } else
1229 self->RetryCount++;
1230
1231 if ((self->RetryCount >= 1) ||
1232 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1233 (st_fifo->len >= (MAX_RX_WINDOW))) {
1234 while (st_fifo->len > 0) {
1235
1236 if (st_fifo->head > MAX_RX_WINDOW)
1237 st_fifo->head = 0;
1238 status = st_fifo->entries[st_fifo->head].status;
1239 len = st_fifo->entries[st_fifo->head].len;
1240 st_fifo->head++;
1241 st_fifo->len--;
1242
1243 skb = dev_alloc_skb(len + 1 - 4);
1244
1245
1246
1247
1248 if ((skb == NULL) || (skb->data == NULL) ||
1249 (self->rx_buff.data == NULL) || (len < 6)) {
1250 self->netdev->stats.rx_dropped++;
1251 continue;
1252 }
1253 skb_reserve(skb, 1);
1254 skb_put(skb, len - 4);
1255 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1256
1257 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1258 len - 4, st_fifo->head);
1259
1260
1261 self->rx_buff.data += len;
1262 self->netdev->stats.rx_bytes += len;
1263 self->netdev->stats.rx_packets++;
1264 skb->dev = self->netdev;
1265 skb_reset_mac_header(skb);
1266 skb->protocol = htons(ETH_P_IRDA);
1267 netif_rx(skb);
1268 }
1269 self->RetryCount = 0;
1270
1271 IRDA_DEBUG(2,
1272 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1273 __func__,
1274 GetHostStatus(iobase), GetRXStatus(iobase));
1275
1276
1277
1278
1279
1280 if ((GetRXStatus(iobase) & 0x10) &&
1281 (RxCurCount(iobase, self) != self->RxLastCount)) {
1282 upload_rxdata(self, iobase);
1283 if (irda_device_txqueue_empty(self->netdev))
1284 via_ircc_dma_receive(self);
1285 }
1286 }
1287 else
1288 SetTimer(iobase, 4);
1289 return TRUE;
1290
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1302{
1303 struct net_device *dev = dev_id;
1304 struct via_ircc_cb *self = netdev_priv(dev);
1305 int iobase;
1306 u8 iHostIntType, iRxIntType, iTxIntType;
1307
1308 iobase = self->io.fir_base;
1309 spin_lock(&self->lock);
1310 iHostIntType = GetHostStatus(iobase);
1311
1312 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1313 __func__, iHostIntType,
1314 (iHostIntType & 0x40) ? "Timer" : "",
1315 (iHostIntType & 0x20) ? "Tx" : "",
1316 (iHostIntType & 0x10) ? "Rx" : "",
1317 (iHostIntType & 0x0e) >> 1);
1318
1319 if ((iHostIntType & 0x40) != 0) {
1320 self->EventFlag.TimeOut++;
1321 ClearTimerInt(iobase, 1);
1322 if (self->io.direction == IO_XMIT) {
1323 via_ircc_dma_xmit(self, iobase);
1324 }
1325 if (self->io.direction == IO_RECV) {
1326
1327
1328
1329 if (self->RxDataReady > 30) {
1330 hwreset(self);
1331 if (irda_device_txqueue_empty(self->netdev)) {
1332 via_ircc_dma_receive(self);
1333 }
1334 } else {
1335 RxTimerHandler(self, iobase);
1336 }
1337 }
1338 }
1339 if ((iHostIntType & 0x20) != 0) {
1340 iTxIntType = GetTXStatus(iobase);
1341
1342 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1343 __func__, iTxIntType,
1344 (iTxIntType & 0x08) ? "FIFO underr." : "",
1345 (iTxIntType & 0x04) ? "EOM" : "",
1346 (iTxIntType & 0x02) ? "FIFO ready" : "",
1347 (iTxIntType & 0x01) ? "Early EOM" : "");
1348
1349 if (iTxIntType & 0x4) {
1350 self->EventFlag.EOMessage++;
1351 if (via_ircc_dma_xmit_complete(self)) {
1352 if (irda_device_txqueue_empty
1353 (self->netdev)) {
1354 via_ircc_dma_receive(self);
1355 }
1356 } else {
1357 self->EventFlag.Unknown++;
1358 }
1359 }
1360 }
1361
1362 if ((iHostIntType & 0x10) != 0) {
1363
1364 iRxIntType = GetRXStatus(iobase);
1365
1366 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1367 __func__, iRxIntType,
1368 (iRxIntType & 0x80) ? "PHY err." : "",
1369 (iRxIntType & 0x40) ? "CRC err" : "",
1370 (iRxIntType & 0x20) ? "FIFO overr." : "",
1371 (iRxIntType & 0x10) ? "EOF" : "",
1372 (iRxIntType & 0x08) ? "RxData" : "",
1373 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1374 (iRxIntType & 0x01) ? "SIR bad" : "");
1375 if (!iRxIntType)
1376 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1377
1378 if (iRxIntType & 0x10) {
1379 if (via_ircc_dma_receive_complete(self, iobase)) {
1380
1381 via_ircc_dma_receive(self);
1382 }
1383 }
1384 else {
1385 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1386 __func__, iRxIntType, iHostIntType,
1387 RxCurCount(iobase, self),
1388 self->RxLastCount);
1389
1390 if (iRxIntType & 0x20) {
1391 ResetChip(iobase, 0);
1392 ResetChip(iobase, 1);
1393 } else {
1394
1395 if (iRxIntType != 0x08)
1396 hwreset(self);
1397 }
1398 via_ircc_dma_receive(self);
1399 }
1400
1401 }
1402 spin_unlock(&self->lock);
1403 return IRQ_RETVAL(iHostIntType);
1404}
1405
1406static void hwreset(struct via_ircc_cb *self)
1407{
1408 int iobase;
1409 iobase = self->io.fir_base;
1410
1411 IRDA_DEBUG(3, "%s()\n", __func__);
1412
1413 ResetChip(iobase, 5);
1414 EnableDMA(iobase, OFF);
1415 EnableTX(iobase, OFF);
1416 EnableRX(iobase, OFF);
1417 EnRXDMA(iobase, OFF);
1418 EnTXDMA(iobase, OFF);
1419 RXStart(iobase, OFF);
1420 TXStart(iobase, OFF);
1421 InitCard(iobase);
1422 CommonInit(iobase);
1423 SIRFilter(iobase, ON);
1424 SetSIR(iobase, ON);
1425 CRC16(iobase, ON);
1426 EnTXCRC(iobase, 0);
1427 WriteReg(iobase, I_ST_CT_0, 0x00);
1428 SetBaudRate(iobase, 9600);
1429 SetPulseWidth(iobase, 12);
1430 SetSendPreambleCount(iobase, 0);
1431 WriteReg(iobase, I_ST_CT_0, 0x80);
1432
1433
1434 via_ircc_change_speed(self, self->io.speed);
1435
1436 self->st_fifo.len = 0;
1437}
1438
1439
1440
1441
1442
1443
1444
1445static int via_ircc_is_receiving(struct via_ircc_cb *self)
1446{
1447 int status = FALSE;
1448 int iobase;
1449
1450 IRDA_ASSERT(self != NULL, return FALSE;);
1451
1452 iobase = self->io.fir_base;
1453 if (CkRxRecv(iobase, self))
1454 status = TRUE;
1455
1456 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1457
1458 return status;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468static int via_ircc_net_open(struct net_device *dev)
1469{
1470 struct via_ircc_cb *self;
1471 int iobase;
1472 char hwname[32];
1473
1474 IRDA_DEBUG(3, "%s()\n", __func__);
1475
1476 IRDA_ASSERT(dev != NULL, return -1;);
1477 self = netdev_priv(dev);
1478 dev->stats.rx_packets = 0;
1479 IRDA_ASSERT(self != NULL, return 0;);
1480 iobase = self->io.fir_base;
1481 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1482 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1483 self->io.irq);
1484 return -EAGAIN;
1485 }
1486
1487
1488
1489
1490 if (request_dma(self->io.dma, dev->name)) {
1491 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1492 self->io.dma);
1493 free_irq(self->io.irq, dev);
1494 return -EAGAIN;
1495 }
1496 if (self->io.dma2 != self->io.dma) {
1497 if (request_dma(self->io.dma2, dev->name)) {
1498 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1499 driver_name, self->io.dma2);
1500 free_irq(self->io.irq, dev);
1501 free_dma(self->io.dma);
1502 return -EAGAIN;
1503 }
1504 }
1505
1506
1507
1508 EnAllInt(iobase, ON);
1509 EnInternalLoop(iobase, OFF);
1510 EnExternalLoop(iobase, OFF);
1511
1512
1513 via_ircc_dma_receive(self);
1514
1515
1516 netif_start_queue(dev);
1517
1518
1519
1520
1521
1522 sprintf(hwname, "VIA @ 0x%x", iobase);
1523 self->irlap = irlap_open(dev, &self->qos, hwname);
1524
1525 self->RxLastCount = 0;
1526
1527 return 0;
1528}
1529
1530
1531
1532
1533
1534
1535
1536static int via_ircc_net_close(struct net_device *dev)
1537{
1538 struct via_ircc_cb *self;
1539 int iobase;
1540
1541 IRDA_DEBUG(3, "%s()\n", __func__);
1542
1543 IRDA_ASSERT(dev != NULL, return -1;);
1544 self = netdev_priv(dev);
1545 IRDA_ASSERT(self != NULL, return 0;);
1546
1547
1548 netif_stop_queue(dev);
1549
1550 if (self->irlap)
1551 irlap_close(self->irlap);
1552 self->irlap = NULL;
1553 iobase = self->io.fir_base;
1554 EnTXDMA(iobase, OFF);
1555 EnRXDMA(iobase, OFF);
1556 DisableDmaChannel(self->io.dma);
1557
1558
1559 EnAllInt(iobase, OFF);
1560 free_irq(self->io.irq, dev);
1561 free_dma(self->io.dma);
1562 if (self->io.dma2 != self->io.dma)
1563 free_dma(self->io.dma2);
1564
1565 return 0;
1566}
1567
1568
1569
1570
1571
1572
1573
1574static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1575 int cmd)
1576{
1577 struct if_irda_req *irq = (struct if_irda_req *) rq;
1578 struct via_ircc_cb *self;
1579 unsigned long flags;
1580 int ret = 0;
1581
1582 IRDA_ASSERT(dev != NULL, return -1;);
1583 self = netdev_priv(dev);
1584 IRDA_ASSERT(self != NULL, return -1;);
1585 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1586 cmd);
1587
1588 spin_lock_irqsave(&self->lock, flags);
1589 switch (cmd) {
1590 case SIOCSBANDWIDTH:
1591 if (!capable(CAP_NET_ADMIN)) {
1592 ret = -EPERM;
1593 goto out;
1594 }
1595 via_ircc_change_speed(self, irq->ifr_baudrate);
1596 break;
1597 case SIOCSMEDIABUSY:
1598 if (!capable(CAP_NET_ADMIN)) {
1599 ret = -EPERM;
1600 goto out;
1601 }
1602 irda_device_set_media_busy(self->netdev, TRUE);
1603 break;
1604 case SIOCGRECEIVING:
1605 irq->ifr_receiving = via_ircc_is_receiving(self);
1606 break;
1607 default:
1608 ret = -EOPNOTSUPP;
1609 }
1610 out:
1611 spin_unlock_irqrestore(&self->lock, flags);
1612 return ret;
1613}
1614
1615MODULE_AUTHOR("VIA Technologies,inc");
1616MODULE_DESCRIPTION("VIA IrDA Device Driver");
1617MODULE_LICENSE("GPL");
1618
1619module_init(via_ircc_init);
1620module_exit(via_ircc_cleanup);
1621