1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200static int debug;
201#define DEBUG_VERBOSE 1
202#define DEBUG_UPPER 2
203#define DEBUG_LOWER 4
204
205static int io;
206static int irq;
207static int dma;
208
209#include <linux/module.h>
210#include <linux/kernel.h>
211#include <linux/types.h>
212#include <linux/fcntl.h>
213#include <linux/interrupt.h>
214#include <linux/ptrace.h>
215#include <linux/ioport.h>
216#include <linux/spinlock.h>
217#include <linux/in.h>
218#include <linux/string.h>
219#include <linux/errno.h>
220#include <linux/init.h>
221#include <linux/netdevice.h>
222#include <linux/etherdevice.h>
223#include <linux/skbuff.h>
224#include <linux/if_arp.h>
225#include <linux/if_ltalk.h>
226#include <linux/delay.h>
227#include <linux/timer.h>
228#include <linux/atalk.h>
229#include <linux/bitops.h>
230#include <linux/gfp.h>
231
232#include <asm/dma.h>
233#include <asm/io.h>
234
235
236#include "ltpc.h"
237
238static DEFINE_SPINLOCK(txqueue_lock);
239static DEFINE_SPINLOCK(mbox_lock);
240
241
242static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
243 void *dbuf, int dbuflen);
244static int sendup_buffer (struct net_device *dev);
245
246
247
248static unsigned long dma_mem_alloc(int size)
249{
250 int order = get_order(size);
251
252 return __get_dma_pages(GFP_KERNEL, order);
253}
254
255
256static unsigned char *ltdmabuf;
257static unsigned char *ltdmacbuf;
258
259
260
261struct ltpc_private
262{
263 struct atalk_addr my_addr;
264};
265
266
267
268struct xmitQel {
269 struct xmitQel *next;
270
271 unsigned char *cbuf;
272 short cbuflen;
273
274 unsigned char *dbuf;
275 short dbuflen;
276 unsigned char QWrite;
277 unsigned char mailbox;
278};
279
280
281
282static struct xmitQel *xmQhd, *xmQtl;
283
284static void enQ(struct xmitQel *qel)
285{
286 unsigned long flags;
287 qel->next = NULL;
288
289 spin_lock_irqsave(&txqueue_lock, flags);
290 if (xmQtl) {
291 xmQtl->next = qel;
292 } else {
293 xmQhd = qel;
294 }
295 xmQtl = qel;
296 spin_unlock_irqrestore(&txqueue_lock, flags);
297
298 if (debug & DEBUG_LOWER)
299 printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
300}
301
302static struct xmitQel *deQ(void)
303{
304 unsigned long flags;
305 int i;
306 struct xmitQel *qel=NULL;
307
308 spin_lock_irqsave(&txqueue_lock, flags);
309 if (xmQhd) {
310 qel = xmQhd;
311 xmQhd = qel->next;
312 if(!xmQhd) xmQtl = NULL;
313 }
314 spin_unlock_irqrestore(&txqueue_lock, flags);
315
316 if ((debug & DEBUG_LOWER) && qel) {
317 int n;
318 printk(KERN_DEBUG "ltpc: dequeued command ");
319 n = qel->cbuflen;
320 if (n>100) n=100;
321 for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
322 printk("\n");
323 }
324
325 return qel;
326}
327
328
329static struct xmitQel qels[16];
330
331
332static unsigned char mailbox[16];
333static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
334
335static int wait_timeout(struct net_device *dev, int c)
336{
337
338
339 int i;
340
341
342
343 for(i=0;i<200000;i++) {
344 if ( c != inb_p(dev->base_addr+6) ) return 0;
345 udelay(100);
346 }
347 return 1;
348}
349
350
351
352static int getmbox(void)
353{
354 unsigned long flags;
355 int i;
356
357 spin_lock_irqsave(&mbox_lock, flags);
358 for(i=1;i<16;i++) if(!mboxinuse[i]) {
359 mboxinuse[i]=1;
360 spin_unlock_irqrestore(&mbox_lock, flags);
361 return i;
362 }
363 spin_unlock_irqrestore(&mbox_lock, flags);
364 return 0;
365}
366
367
368static void handlefc(struct net_device *dev)
369{
370
371 int dma = dev->dma;
372 int base = dev->base_addr;
373 unsigned long flags;
374
375
376 flags=claim_dma_lock();
377 disable_dma(dma);
378 clear_dma_ff(dma);
379 set_dma_mode(dma,DMA_MODE_READ);
380 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
381 set_dma_count(dma,50);
382 enable_dma(dma);
383 release_dma_lock(flags);
384
385 inb_p(base+3);
386 inb_p(base+2);
387
388 if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
389}
390
391
392static void handlefd(struct net_device *dev)
393{
394 int dma = dev->dma;
395 int base = dev->base_addr;
396 unsigned long flags;
397
398 flags=claim_dma_lock();
399 disable_dma(dma);
400 clear_dma_ff(dma);
401 set_dma_mode(dma,DMA_MODE_READ);
402 set_dma_addr(dma,virt_to_bus(ltdmabuf));
403 set_dma_count(dma,800);
404 enable_dma(dma);
405 release_dma_lock(flags);
406
407 inb_p(base+3);
408 inb_p(base+2);
409
410 if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
411 sendup_buffer(dev);
412}
413
414static void handlewrite(struct net_device *dev)
415{
416
417
418 int dma = dev->dma;
419 int base = dev->base_addr;
420 unsigned long flags;
421
422 flags=claim_dma_lock();
423 disable_dma(dma);
424 clear_dma_ff(dma);
425 set_dma_mode(dma,DMA_MODE_WRITE);
426 set_dma_addr(dma,virt_to_bus(ltdmabuf));
427 set_dma_count(dma,800);
428 enable_dma(dma);
429 release_dma_lock(flags);
430
431 inb_p(base+3);
432 inb_p(base+2);
433
434 if ( wait_timeout(dev,0xfb) ) {
435 flags=claim_dma_lock();
436 printk("timed out in handlewrite, dma res %d\n",
437 get_dma_residue(dev->dma) );
438 release_dma_lock(flags);
439 }
440}
441
442static void handleread(struct net_device *dev)
443{
444
445
446 int dma = dev->dma;
447 int base = dev->base_addr;
448 unsigned long flags;
449
450
451 flags=claim_dma_lock();
452 disable_dma(dma);
453 clear_dma_ff(dma);
454 set_dma_mode(dma,DMA_MODE_READ);
455 set_dma_addr(dma,virt_to_bus(ltdmabuf));
456 set_dma_count(dma,800);
457 enable_dma(dma);
458 release_dma_lock(flags);
459
460 inb_p(base+3);
461 inb_p(base+2);
462 if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
463}
464
465static void handlecommand(struct net_device *dev)
466{
467
468 int dma = dev->dma;
469 int base = dev->base_addr;
470 unsigned long flags;
471
472 flags=claim_dma_lock();
473 disable_dma(dma);
474 clear_dma_ff(dma);
475 set_dma_mode(dma,DMA_MODE_WRITE);
476 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
477 set_dma_count(dma,50);
478 enable_dma(dma);
479 release_dma_lock(flags);
480 inb_p(base+3);
481 inb_p(base+2);
482 if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
483}
484
485
486static unsigned char rescbuf[2] = {LT_GETRESULT,0};
487static unsigned char resdbuf[2];
488
489static int QInIdle;
490
491
492
493
494
495static void idle(struct net_device *dev)
496{
497 unsigned long flags;
498 int state;
499
500
501
502 struct xmitQel *q = NULL;
503 int oops;
504 int i;
505 int base = dev->base_addr;
506
507 spin_lock_irqsave(&txqueue_lock, flags);
508 if(QInIdle) {
509 spin_unlock_irqrestore(&txqueue_lock, flags);
510 return;
511 }
512 QInIdle = 1;
513 spin_unlock_irqrestore(&txqueue_lock, flags);
514
515
516 (void) inb_p(base+6);
517
518 oops = 100;
519
520loop:
521 if (0>oops--) {
522 printk("idle: looped too many times\n");
523 goto done;
524 }
525
526 state = inb_p(base+6);
527 if (state != inb_p(base+6)) goto loop;
528
529 switch(state) {
530 case 0xfc:
531
532 if (debug & DEBUG_LOWER) printk("idle: fc\n");
533 handlefc(dev);
534 break;
535 case 0xfd:
536
537 if(debug & DEBUG_LOWER) printk("idle: fd\n");
538 handlefd(dev);
539 break;
540 case 0xf9:
541
542 if (debug & DEBUG_LOWER) printk("idle: f9\n");
543 if(!mboxinuse[0]) {
544 mboxinuse[0] = 1;
545 qels[0].cbuf = rescbuf;
546 qels[0].cbuflen = 2;
547 qels[0].dbuf = resdbuf;
548 qels[0].dbuflen = 2;
549 qels[0].QWrite = 0;
550 qels[0].mailbox = 0;
551 enQ(&qels[0]);
552 }
553 inb_p(dev->base_addr+1);
554 inb_p(dev->base_addr+0);
555 if( wait_timeout(dev,0xf9) )
556 printk("timed out idle f9\n");
557 break;
558 case 0xf8:
559
560 if (xmQhd) {
561 inb_p(dev->base_addr+1);
562 inb_p(dev->base_addr+0);
563 if(wait_timeout(dev,0xf8) )
564 printk("timed out idle f8\n");
565 } else {
566 goto done;
567 }
568 break;
569 case 0xfa:
570
571 if(debug & DEBUG_LOWER) printk("idle: fa\n");
572 if (xmQhd) {
573 q=deQ();
574 memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
575 ltdmacbuf[1] = q->mailbox;
576 if (debug>1) {
577 int n;
578 printk("ltpc: sent command ");
579 n = q->cbuflen;
580 if (n>100) n=100;
581 for(i=0;i<n;i++)
582 printk("%02x ",ltdmacbuf[i]);
583 printk("\n");
584 }
585 handlecommand(dev);
586 if(0xfa==inb_p(base+6)) {
587
588 goto done;
589 }
590 } else {
591
592 if (!mboxinuse[0]) {
593 mboxinuse[0] = 1;
594 qels[0].cbuf = rescbuf;
595 qels[0].cbuflen = 2;
596 qels[0].dbuf = resdbuf;
597 qels[0].dbuflen = 2;
598 qels[0].QWrite = 0;
599 qels[0].mailbox = 0;
600 enQ(&qels[0]);
601 } else {
602 printk("trouble: response command already queued\n");
603 goto done;
604 }
605 }
606 break;
607 case 0Xfb:
608
609 if(debug & DEBUG_LOWER) printk("idle: fb\n");
610 if(q->QWrite) {
611 memcpy(ltdmabuf,q->dbuf,q->dbuflen);
612 handlewrite(dev);
613 } else {
614 handleread(dev);
615
616
617
618 if(q->mailbox) {
619 memcpy(q->dbuf,ltdmabuf,q->dbuflen);
620 } else {
621
622 mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
623 mboxinuse[0]=0;
624 }
625 }
626 break;
627 }
628 goto loop;
629
630done:
631 QInIdle=0;
632
633
634
635
636
637
638
639 if (dev->irq) {
640 inb_p(base+7);
641 inb_p(base+7);
642 }
643}
644
645
646static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
647 void *dbuf, int dbuflen)
648{
649
650 int i = getmbox();
651 int ret;
652
653 if(i) {
654 qels[i].cbuf = cbuf;
655 qels[i].cbuflen = cbuflen;
656 qels[i].dbuf = dbuf;
657 qels[i].dbuflen = dbuflen;
658 qels[i].QWrite = 1;
659 qels[i].mailbox = i;
660 enQ(&qels[i]);
661 idle(dev);
662 ret = mailbox[i];
663 mboxinuse[i]=0;
664 return ret;
665 }
666 printk("ltpc: could not allocate mbox\n");
667 return -1;
668}
669
670static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
671 void *dbuf, int dbuflen)
672{
673
674 int i = getmbox();
675 int ret;
676
677 if(i) {
678 qels[i].cbuf = cbuf;
679 qels[i].cbuflen = cbuflen;
680 qels[i].dbuf = dbuf;
681 qels[i].dbuflen = dbuflen;
682 qels[i].QWrite = 0;
683 qels[i].mailbox = i;
684 enQ(&qels[i]);
685 idle(dev);
686 ret = mailbox[i];
687 mboxinuse[i]=0;
688 return ret;
689 }
690 printk("ltpc: could not allocate mbox\n");
691 return -1;
692}
693
694
695
696static struct timer_list ltpc_timer;
697
698static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
699
700static int read_30 ( struct net_device *dev)
701{
702 lt_command c;
703 c.getflags.command = LT_GETFLAGS;
704 return do_read(dev, &c, sizeof(c.getflags),&c,0);
705}
706
707static int set_30 (struct net_device *dev,int x)
708{
709 lt_command c;
710 c.setflags.command = LT_SETFLAGS;
711 c.setflags.flags = x;
712 return do_write(dev, &c, sizeof(c.setflags),&c,0);
713}
714
715
716
717static int sendup_buffer (struct net_device *dev)
718{
719
720
721
722 int dnode, snode, llaptype, len;
723 int sklen;
724 struct sk_buff *skb;
725 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
726
727 if (ltc->command != LT_RCVLAP) {
728 printk("unknown command 0x%02x from ltpc card\n",ltc->command);
729 return -1;
730 }
731 dnode = ltc->dnode;
732 snode = ltc->snode;
733 llaptype = ltc->laptype;
734 len = ltc->length;
735
736 sklen = len;
737 if (llaptype == 1)
738 sklen += 8;
739 if(sklen > 800) {
740 printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
741 dev->name,sklen);
742 return -1;
743 }
744
745 if ( (llaptype==0) || (llaptype>2) ) {
746 printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
747 return -1;
748 }
749
750
751 skb = dev_alloc_skb(3+sklen);
752 if (skb == NULL)
753 {
754 printk("%s: dropping packet due to memory squeeze.\n",
755 dev->name);
756 return -1;
757 }
758 skb->dev = dev;
759
760 if (sklen > len)
761 skb_reserve(skb,8);
762 skb_put(skb,len+3);
763 skb->protocol = htons(ETH_P_LOCALTALK);
764
765 skb->data[0] = dnode;
766 skb->data[1] = snode;
767 skb->data[2] = llaptype;
768 skb_reset_mac_header(skb);
769 skb_pull(skb,3);
770
771
772 skb_copy_to_linear_data(skb, ltdmabuf, len);
773
774 skb_reset_transport_header(skb);
775
776 dev->stats.rx_packets++;
777 dev->stats.rx_bytes += skb->len;
778
779
780 netif_rx(skb);
781 return 0;
782}
783
784
785
786static irqreturn_t
787ltpc_interrupt(int irq, void *dev_id)
788{
789 struct net_device *dev = dev_id;
790
791 if (dev==NULL) {
792 printk("ltpc_interrupt: unknown device.\n");
793 return IRQ_NONE;
794 }
795
796 inb_p(dev->base_addr+6);
797
798 idle(dev);
799
800
801
802 return IRQ_HANDLED;
803}
804
805
806
807
808
809
810
811
812
813
814
815
816static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
817{
818 struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
819
820 struct ltpc_private *ltpc_priv = netdev_priv(dev);
821 struct atalk_addr *aa = <pc_priv->my_addr;
822 struct lt_init c;
823 int ltflags;
824
825 if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
826
827 switch(cmd) {
828 case SIOCSIFADDR:
829
830 aa->s_net = sa->sat_addr.s_net;
831
832
833 c.command = LT_INIT;
834 c.hint = sa->sat_addr.s_node;
835
836 aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
837
838
839 ltflags = read_30(dev);
840 ltflags |= LT_FLAG_ALLLAP;
841 set_30 (dev,ltflags);
842
843 dev->broadcast[0] = 0xFF;
844 dev->dev_addr[0] = aa->s_node;
845
846 dev->addr_len=1;
847
848 return 0;
849
850 case SIOCGIFADDR:
851
852 sa->sat_addr.s_net = aa->s_net;
853 sa->sat_addr.s_node = aa->s_node;
854
855 return 0;
856
857 default:
858 return -EINVAL;
859 }
860}
861
862static void set_multicast_list(struct net_device *dev)
863{
864
865
866}
867
868static int ltpc_poll_counter;
869
870static void ltpc_poll(unsigned long l)
871{
872 struct net_device *dev = (struct net_device *) l;
873
874 del_timer(<pc_timer);
875
876 if(debug & DEBUG_VERBOSE) {
877 if (!ltpc_poll_counter) {
878 ltpc_poll_counter = 50;
879 printk("ltpc poll is alive\n");
880 }
881 ltpc_poll_counter--;
882 }
883
884 if (!dev)
885 return;
886
887
888 idle(dev);
889 ltpc_timer.expires = jiffies + HZ/20;
890
891 add_timer(<pc_timer);
892}
893
894
895
896static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
897{
898
899
900
901 int i;
902 struct lt_sendlap cbuf;
903 unsigned char *hdr;
904
905 cbuf.command = LT_SENDLAP;
906 cbuf.dnode = skb->data[0];
907 cbuf.laptype = skb->data[2];
908 skb_pull(skb,3);
909 cbuf.length = skb->len;
910 skb_reset_transport_header(skb);
911
912 if(debug & DEBUG_UPPER) {
913 printk("command ");
914 for(i=0;i<6;i++)
915 printk("%02x ",((unsigned char *)&cbuf)[i]);
916 printk("\n");
917 }
918
919 hdr = skb_transport_header(skb);
920 do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
921
922 if(debug & DEBUG_UPPER) {
923 printk("sent %d ddp bytes\n",skb->len);
924 for (i = 0; i < skb->len; i++)
925 printk("%02x ", hdr[i]);
926 printk("\n");
927 }
928
929 dev->stats.tx_packets++;
930 dev->stats.tx_bytes += skb->len;
931
932 dev_kfree_skb(skb);
933 return NETDEV_TX_OK;
934}
935
936
937
938static int __init ltpc_probe_dma(int base, int dma)
939{
940 int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
941 unsigned long timeout;
942 unsigned long f;
943
944 if (want & 1) {
945 if (request_dma(1,"ltpc")) {
946 want &= ~1;
947 } else {
948 f=claim_dma_lock();
949 disable_dma(1);
950 clear_dma_ff(1);
951 set_dma_mode(1,DMA_MODE_WRITE);
952 set_dma_addr(1,virt_to_bus(ltdmabuf));
953 set_dma_count(1,sizeof(struct lt_mem));
954 enable_dma(1);
955 release_dma_lock(f);
956 }
957 }
958 if (want & 2) {
959 if (request_dma(3,"ltpc")) {
960 want &= ~2;
961 } else {
962 f=claim_dma_lock();
963 disable_dma(3);
964 clear_dma_ff(3);
965 set_dma_mode(3,DMA_MODE_WRITE);
966 set_dma_addr(3,virt_to_bus(ltdmabuf));
967 set_dma_count(3,sizeof(struct lt_mem));
968 enable_dma(3);
969 release_dma_lock(f);
970 }
971 }
972
973
974
975
976 ltdmabuf[0] = LT_READMEM;
977 ltdmabuf[1] = 1;
978 ltdmabuf[2] = 0; ltdmabuf[3] = 0;
979 ltdmabuf[4] = 0; ltdmabuf[5] = 1;
980 ltdmabuf[6] = 0;
981
982 inb_p(io+1);
983 inb_p(io+0);
984 timeout = jiffies+100*HZ/100;
985 while(time_before(jiffies, timeout)) {
986 if ( 0xfa == inb_p(io+6) ) break;
987 }
988
989 inb_p(io+3);
990 inb_p(io+2);
991 while(time_before(jiffies, timeout)) {
992 if ( 0xfb == inb_p(io+6) ) break;
993 }
994
995
996
997 if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
998 want &= ~2;
999 free_dma(3);
1000 }
1001
1002 if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
1003 want &= ~1;
1004 free_dma(1);
1005 }
1006
1007 if (!want)
1008 return 0;
1009
1010 return (want & 2) ? 3 : 1;
1011}
1012
1013static const struct net_device_ops ltpc_netdev = {
1014 .ndo_start_xmit = ltpc_xmit,
1015 .ndo_do_ioctl = ltpc_ioctl,
1016 .ndo_set_rx_mode = set_multicast_list,
1017};
1018
1019struct net_device * __init ltpc_probe(void)
1020{
1021 struct net_device *dev;
1022 int err = -ENOMEM;
1023 int x=0,y=0;
1024 int autoirq;
1025 unsigned long f;
1026 unsigned long timeout;
1027
1028 dev = alloc_ltalkdev(sizeof(struct ltpc_private));
1029 if (!dev)
1030 goto out;
1031
1032
1033
1034 if (io != 0x240 && request_region(0x220,8,"ltpc")) {
1035 x = inb_p(0x220+6);
1036 if ( (x!=0xff) && (x>=0xf0) ) {
1037 io = 0x220;
1038 goto got_port;
1039 }
1040 release_region(0x220,8);
1041 }
1042 if (io != 0x220 && request_region(0x240,8,"ltpc")) {
1043 y = inb_p(0x240+6);
1044 if ( (y!=0xff) && (y>=0xf0) ){
1045 io = 0x240;
1046 goto got_port;
1047 }
1048 release_region(0x240,8);
1049 }
1050
1051
1052 printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
1053 err = -ENODEV;
1054 goto out1;
1055
1056 got_port:
1057
1058 if (irq < 2) {
1059 unsigned long irq_mask;
1060
1061 irq_mask = probe_irq_on();
1062
1063 inb_p(io+7);
1064 inb_p(io+7);
1065
1066 inb_p(io+6);
1067 mdelay(2);
1068 autoirq = probe_irq_off(irq_mask);
1069
1070 if (autoirq == 0) {
1071 printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
1072 } else {
1073 irq = autoirq;
1074 }
1075 }
1076
1077
1078 ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
1079 if (!ltdmabuf) {
1080 printk(KERN_ERR "ltpc: mem alloc failed\n");
1081 err = -ENOMEM;
1082 goto out2;
1083 }
1084
1085 ltdmacbuf = <dmabuf[800];
1086
1087 if(debug & DEBUG_VERBOSE) {
1088 printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
1089 }
1090
1091
1092
1093 inb_p(io+1);
1094 inb_p(io+3);
1095
1096 msleep(20);
1097
1098 inb_p(io+0);
1099 inb_p(io+2);
1100 inb_p(io+7);
1101 inb_p(io+4);
1102 inb_p(io+5);
1103 inb_p(io+5);
1104 inb_p(io+6);
1105
1106 ssleep(1);
1107
1108
1109
1110
1111
1112 dma = ltpc_probe_dma(io, dma);
1113 if (!dma) {
1114 printk(KERN_ERR "No DMA channel found on ltpc card.\n");
1115 err = -ENODEV;
1116 goto out3;
1117 }
1118
1119
1120 if(irq)
1121 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
1122 else
1123 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1124
1125 dev->netdev_ops = <pc_netdev;
1126 dev->base_addr = io;
1127 dev->irq = irq;
1128 dev->dma = dma;
1129
1130
1131
1132
1133
1134 f=claim_dma_lock();
1135 disable_dma(dma);
1136 clear_dma_ff(dma);
1137 set_dma_mode(dma,DMA_MODE_READ);
1138 set_dma_addr(dma,virt_to_bus(ltdmabuf));
1139 set_dma_count(dma,0x100);
1140 enable_dma(dma);
1141 release_dma_lock(f);
1142
1143 (void) inb_p(io+3);
1144 (void) inb_p(io+2);
1145 timeout = jiffies+100*HZ/100;
1146
1147 while(time_before(jiffies, timeout)) {
1148 if( 0xf9 == inb_p(io+6))
1149 break;
1150 schedule();
1151 }
1152
1153 if(debug & DEBUG_VERBOSE) {
1154 printk("setting up timer and irq\n");
1155 }
1156
1157
1158 if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
1159 {
1160 (void) inb_p(io+7);
1161 (void) inb_p(io+7);
1162 } else {
1163 if( irq )
1164 printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
1165 dev->irq = 0;
1166
1167
1168 init_timer(<pc_timer);
1169 ltpc_timer.function=ltpc_poll;
1170 ltpc_timer.data = (unsigned long) dev;
1171
1172 ltpc_timer.expires = jiffies + HZ/20;
1173 add_timer(<pc_timer);
1174 }
1175 err = register_netdev(dev);
1176 if (err)
1177 goto out4;
1178
1179 return NULL;
1180out4:
1181 del_timer_sync(<pc_timer);
1182 if (dev->irq)
1183 free_irq(dev->irq, dev);
1184out3:
1185 free_pages((unsigned long)ltdmabuf, get_order(1000));
1186out2:
1187 release_region(io, 8);
1188out1:
1189 free_netdev(dev);
1190out:
1191 return ERR_PTR(err);
1192}
1193
1194#ifndef MODULE
1195
1196static int __init ltpc_setup(char *str)
1197{
1198 int ints[5];
1199
1200 str = get_options(str, ARRAY_SIZE(ints), ints);
1201
1202 if (ints[0] == 0) {
1203 if (str && !strncmp(str, "auto", 4)) {
1204
1205 }
1206 else {
1207
1208 printk (KERN_ERR
1209 "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
1210 return 0;
1211 }
1212 } else {
1213 io = ints[1];
1214 if (ints[0] > 1) {
1215 irq = ints[2];
1216 }
1217 if (ints[0] > 2) {
1218 dma = ints[3];
1219 }
1220
1221 }
1222 return 1;
1223}
1224
1225__setup("ltpc=", ltpc_setup);
1226#endif
1227
1228static struct net_device *dev_ltpc;
1229
1230#ifdef MODULE
1231
1232MODULE_LICENSE("GPL");
1233module_param(debug, int, 0);
1234module_param(io, int, 0);
1235module_param(irq, int, 0);
1236module_param(dma, int, 0);
1237
1238
1239static int __init ltpc_module_init(void)
1240{
1241 if(io == 0)
1242 printk(KERN_NOTICE
1243 "ltpc: Autoprobing is not recommended for modules\n");
1244
1245 dev_ltpc = ltpc_probe();
1246 return PTR_ERR_OR_ZERO(dev_ltpc);
1247}
1248module_init(ltpc_module_init);
1249#endif
1250
1251static void __exit ltpc_cleanup(void)
1252{
1253
1254 if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
1255 unregister_netdev(dev_ltpc);
1256
1257 ltpc_timer.data = 0;
1258
1259 del_timer_sync(<pc_timer);
1260
1261 if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
1262
1263 if (dev_ltpc->irq)
1264 free_irq(dev_ltpc->irq, dev_ltpc);
1265
1266 if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
1267
1268 if (dev_ltpc->dma)
1269 free_dma(dev_ltpc->dma);
1270
1271 if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
1272
1273 if (dev_ltpc->base_addr)
1274 release_region(dev_ltpc->base_addr,8);
1275
1276 free_netdev(dev_ltpc);
1277
1278 if(debug & DEBUG_VERBOSE) printk("free_pages\n");
1279
1280 free_pages( (unsigned long) ltdmabuf, get_order(1000));
1281
1282 if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
1283}
1284
1285module_exit(ltpc_cleanup);
1286