1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200static int debug;
201#define DEBUG_VERBOSE 1
202#define DEBUG_UPPER 2
203#define DEBUG_LOWER 4
204
205static int io;
206static int irq;
207static int dma;
208
209#include <linux/module.h>
210#include <linux/kernel.h>
211#include <linux/types.h>
212#include <linux/fcntl.h>
213#include <linux/interrupt.h>
214#include <linux/ptrace.h>
215#include <linux/ioport.h>
216#include <linux/spinlock.h>
217#include <linux/in.h>
218#include <linux/string.h>
219#include <linux/errno.h>
220#include <linux/init.h>
221#include <linux/netdevice.h>
222#include <linux/etherdevice.h>
223#include <linux/skbuff.h>
224#include <linux/if_arp.h>
225#include <linux/if_ltalk.h>
226#include <linux/delay.h>
227#include <linux/timer.h>
228#include <linux/atalk.h>
229#include <linux/bitops.h>
230#include <linux/gfp.h>
231
232#include <net/Space.h>
233
234#include <asm/dma.h>
235#include <asm/io.h>
236
237
238#include "ltpc.h"
239
240static DEFINE_SPINLOCK(txqueue_lock);
241static DEFINE_SPINLOCK(mbox_lock);
242
243
244static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
245 void *dbuf, int dbuflen);
246static int sendup_buffer (struct net_device *dev);
247
248
249
250static unsigned long dma_mem_alloc(int size)
251{
252 int order = get_order(size);
253
254 return __get_dma_pages(GFP_KERNEL, order);
255}
256
257
258static unsigned char *ltdmabuf;
259static unsigned char *ltdmacbuf;
260
261
262
263struct ltpc_private
264{
265 struct atalk_addr my_addr;
266};
267
268
269
270struct xmitQel {
271 struct xmitQel *next;
272
273 unsigned char *cbuf;
274 short cbuflen;
275
276 unsigned char *dbuf;
277 short dbuflen;
278 unsigned char QWrite;
279 unsigned char mailbox;
280};
281
282
283
284static struct xmitQel *xmQhd, *xmQtl;
285
286static void enQ(struct xmitQel *qel)
287{
288 unsigned long flags;
289 qel->next = NULL;
290
291 spin_lock_irqsave(&txqueue_lock, flags);
292 if (xmQtl) {
293 xmQtl->next = qel;
294 } else {
295 xmQhd = qel;
296 }
297 xmQtl = qel;
298 spin_unlock_irqrestore(&txqueue_lock, flags);
299
300 if (debug & DEBUG_LOWER)
301 printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
302}
303
304static struct xmitQel *deQ(void)
305{
306 unsigned long flags;
307 int i;
308 struct xmitQel *qel=NULL;
309
310 spin_lock_irqsave(&txqueue_lock, flags);
311 if (xmQhd) {
312 qel = xmQhd;
313 xmQhd = qel->next;
314 if(!xmQhd) xmQtl = NULL;
315 }
316 spin_unlock_irqrestore(&txqueue_lock, flags);
317
318 if ((debug & DEBUG_LOWER) && qel) {
319 int n;
320 printk(KERN_DEBUG "ltpc: dequeued command ");
321 n = qel->cbuflen;
322 if (n>100) n=100;
323 for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
324 printk("\n");
325 }
326
327 return qel;
328}
329
330
331static struct xmitQel qels[16];
332
333
334static unsigned char mailbox[16];
335static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
336
337static int wait_timeout(struct net_device *dev, int c)
338{
339
340
341 int i;
342
343
344
345 for(i=0;i<200000;i++) {
346 if ( c != inb_p(dev->base_addr+6) ) return 0;
347 udelay(100);
348 }
349 return 1;
350}
351
352
353
354static int getmbox(void)
355{
356 unsigned long flags;
357 int i;
358
359 spin_lock_irqsave(&mbox_lock, flags);
360 for(i=1;i<16;i++) if(!mboxinuse[i]) {
361 mboxinuse[i]=1;
362 spin_unlock_irqrestore(&mbox_lock, flags);
363 return i;
364 }
365 spin_unlock_irqrestore(&mbox_lock, flags);
366 return 0;
367}
368
369
370static void handlefc(struct net_device *dev)
371{
372
373 int dma = dev->dma;
374 int base = dev->base_addr;
375 unsigned long flags;
376
377
378 flags=claim_dma_lock();
379 disable_dma(dma);
380 clear_dma_ff(dma);
381 set_dma_mode(dma,DMA_MODE_READ);
382 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
383 set_dma_count(dma,50);
384 enable_dma(dma);
385 release_dma_lock(flags);
386
387 inb_p(base+3);
388 inb_p(base+2);
389
390 if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
391}
392
393
394static void handlefd(struct net_device *dev)
395{
396 int dma = dev->dma;
397 int base = dev->base_addr;
398 unsigned long flags;
399
400 flags=claim_dma_lock();
401 disable_dma(dma);
402 clear_dma_ff(dma);
403 set_dma_mode(dma,DMA_MODE_READ);
404 set_dma_addr(dma,virt_to_bus(ltdmabuf));
405 set_dma_count(dma,800);
406 enable_dma(dma);
407 release_dma_lock(flags);
408
409 inb_p(base+3);
410 inb_p(base+2);
411
412 if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
413 sendup_buffer(dev);
414}
415
416static void handlewrite(struct net_device *dev)
417{
418
419
420 int dma = dev->dma;
421 int base = dev->base_addr;
422 unsigned long flags;
423
424 flags=claim_dma_lock();
425 disable_dma(dma);
426 clear_dma_ff(dma);
427 set_dma_mode(dma,DMA_MODE_WRITE);
428 set_dma_addr(dma,virt_to_bus(ltdmabuf));
429 set_dma_count(dma,800);
430 enable_dma(dma);
431 release_dma_lock(flags);
432
433 inb_p(base+3);
434 inb_p(base+2);
435
436 if ( wait_timeout(dev,0xfb) ) {
437 flags=claim_dma_lock();
438 printk("timed out in handlewrite, dma res %d\n",
439 get_dma_residue(dev->dma) );
440 release_dma_lock(flags);
441 }
442}
443
444static void handleread(struct net_device *dev)
445{
446
447
448 int dma = dev->dma;
449 int base = dev->base_addr;
450 unsigned long flags;
451
452
453 flags=claim_dma_lock();
454 disable_dma(dma);
455 clear_dma_ff(dma);
456 set_dma_mode(dma,DMA_MODE_READ);
457 set_dma_addr(dma,virt_to_bus(ltdmabuf));
458 set_dma_count(dma,800);
459 enable_dma(dma);
460 release_dma_lock(flags);
461
462 inb_p(base+3);
463 inb_p(base+2);
464 if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
465}
466
467static void handlecommand(struct net_device *dev)
468{
469
470 int dma = dev->dma;
471 int base = dev->base_addr;
472 unsigned long flags;
473
474 flags=claim_dma_lock();
475 disable_dma(dma);
476 clear_dma_ff(dma);
477 set_dma_mode(dma,DMA_MODE_WRITE);
478 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
479 set_dma_count(dma,50);
480 enable_dma(dma);
481 release_dma_lock(flags);
482 inb_p(base+3);
483 inb_p(base+2);
484 if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
485}
486
487
488static unsigned char rescbuf[2] = {LT_GETRESULT,0};
489static unsigned char resdbuf[2];
490
491static int QInIdle;
492
493
494
495
496
497static void idle(struct net_device *dev)
498{
499 unsigned long flags;
500 int state;
501
502
503
504 struct xmitQel *q = NULL;
505 int oops;
506 int i;
507 int base = dev->base_addr;
508
509 spin_lock_irqsave(&txqueue_lock, flags);
510 if(QInIdle) {
511 spin_unlock_irqrestore(&txqueue_lock, flags);
512 return;
513 }
514 QInIdle = 1;
515 spin_unlock_irqrestore(&txqueue_lock, flags);
516
517
518 (void) inb_p(base+6);
519
520 oops = 100;
521
522loop:
523 if (0>oops--) {
524 printk("idle: looped too many times\n");
525 goto done;
526 }
527
528 state = inb_p(base+6);
529 if (state != inb_p(base+6)) goto loop;
530
531 switch(state) {
532 case 0xfc:
533
534 if (debug & DEBUG_LOWER) printk("idle: fc\n");
535 handlefc(dev);
536 break;
537 case 0xfd:
538
539 if(debug & DEBUG_LOWER) printk("idle: fd\n");
540 handlefd(dev);
541 break;
542 case 0xf9:
543
544 if (debug & DEBUG_LOWER) printk("idle: f9\n");
545 if(!mboxinuse[0]) {
546 mboxinuse[0] = 1;
547 qels[0].cbuf = rescbuf;
548 qels[0].cbuflen = 2;
549 qels[0].dbuf = resdbuf;
550 qels[0].dbuflen = 2;
551 qels[0].QWrite = 0;
552 qels[0].mailbox = 0;
553 enQ(&qels[0]);
554 }
555 inb_p(dev->base_addr+1);
556 inb_p(dev->base_addr+0);
557 if( wait_timeout(dev,0xf9) )
558 printk("timed out idle f9\n");
559 break;
560 case 0xf8:
561
562 if (xmQhd) {
563 inb_p(dev->base_addr+1);
564 inb_p(dev->base_addr+0);
565 if(wait_timeout(dev,0xf8) )
566 printk("timed out idle f8\n");
567 } else {
568 goto done;
569 }
570 break;
571 case 0xfa:
572
573 if(debug & DEBUG_LOWER) printk("idle: fa\n");
574 if (xmQhd) {
575 q=deQ();
576 memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
577 ltdmacbuf[1] = q->mailbox;
578 if (debug>1) {
579 int n;
580 printk("ltpc: sent command ");
581 n = q->cbuflen;
582 if (n>100) n=100;
583 for(i=0;i<n;i++)
584 printk("%02x ",ltdmacbuf[i]);
585 printk("\n");
586 }
587 handlecommand(dev);
588 if(0xfa==inb_p(base+6)) {
589
590 goto done;
591 }
592 } else {
593
594 if (!mboxinuse[0]) {
595 mboxinuse[0] = 1;
596 qels[0].cbuf = rescbuf;
597 qels[0].cbuflen = 2;
598 qels[0].dbuf = resdbuf;
599 qels[0].dbuflen = 2;
600 qels[0].QWrite = 0;
601 qels[0].mailbox = 0;
602 enQ(&qels[0]);
603 } else {
604 printk("trouble: response command already queued\n");
605 goto done;
606 }
607 }
608 break;
609 case 0Xfb:
610
611 if(debug & DEBUG_LOWER) printk("idle: fb\n");
612 if(q->QWrite) {
613 memcpy(ltdmabuf,q->dbuf,q->dbuflen);
614 handlewrite(dev);
615 } else {
616 handleread(dev);
617
618
619
620 if(q->mailbox) {
621 memcpy(q->dbuf,ltdmabuf,q->dbuflen);
622 } else {
623
624 mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
625 mboxinuse[0]=0;
626 }
627 }
628 break;
629 }
630 goto loop;
631
632done:
633 QInIdle=0;
634
635
636
637
638
639
640
641 if (dev->irq) {
642 inb_p(base+7);
643 inb_p(base+7);
644 }
645}
646
647
648static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
649 void *dbuf, int dbuflen)
650{
651
652 int i = getmbox();
653 int ret;
654
655 if(i) {
656 qels[i].cbuf = cbuf;
657 qels[i].cbuflen = cbuflen;
658 qels[i].dbuf = dbuf;
659 qels[i].dbuflen = dbuflen;
660 qels[i].QWrite = 1;
661 qels[i].mailbox = i;
662 enQ(&qels[i]);
663 idle(dev);
664 ret = mailbox[i];
665 mboxinuse[i]=0;
666 return ret;
667 }
668 printk("ltpc: could not allocate mbox\n");
669 return -1;
670}
671
672static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
673 void *dbuf, int dbuflen)
674{
675
676 int i = getmbox();
677 int ret;
678
679 if(i) {
680 qels[i].cbuf = cbuf;
681 qels[i].cbuflen = cbuflen;
682 qels[i].dbuf = dbuf;
683 qels[i].dbuflen = dbuflen;
684 qels[i].QWrite = 0;
685 qels[i].mailbox = i;
686 enQ(&qels[i]);
687 idle(dev);
688 ret = mailbox[i];
689 mboxinuse[i]=0;
690 return ret;
691 }
692 printk("ltpc: could not allocate mbox\n");
693 return -1;
694}
695
696
697
698static struct timer_list ltpc_timer;
699static struct net_device *ltpc_timer_dev;
700
701static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
702
703static int read_30 ( struct net_device *dev)
704{
705 lt_command c;
706 c.getflags.command = LT_GETFLAGS;
707 return do_read(dev, &c, sizeof(c.getflags),&c,0);
708}
709
710static int set_30 (struct net_device *dev,int x)
711{
712 lt_command c;
713 c.setflags.command = LT_SETFLAGS;
714 c.setflags.flags = x;
715 return do_write(dev, &c, sizeof(c.setflags),&c,0);
716}
717
718
719
720static int sendup_buffer (struct net_device *dev)
721{
722
723
724
725 int dnode, snode, llaptype, len;
726 int sklen;
727 struct sk_buff *skb;
728 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
729
730 if (ltc->command != LT_RCVLAP) {
731 printk("unknown command 0x%02x from ltpc card\n",ltc->command);
732 return -1;
733 }
734 dnode = ltc->dnode;
735 snode = ltc->snode;
736 llaptype = ltc->laptype;
737 len = ltc->length;
738
739 sklen = len;
740 if (llaptype == 1)
741 sklen += 8;
742 if(sklen > 800) {
743 printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
744 dev->name,sklen);
745 return -1;
746 }
747
748 if ( (llaptype==0) || (llaptype>2) ) {
749 printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
750 return -1;
751 }
752
753
754 skb = dev_alloc_skb(3+sklen);
755 if (skb == NULL)
756 {
757 printk("%s: dropping packet due to memory squeeze.\n",
758 dev->name);
759 return -1;
760 }
761 skb->dev = dev;
762
763 if (sklen > len)
764 skb_reserve(skb,8);
765 skb_put(skb,len+3);
766 skb->protocol = htons(ETH_P_LOCALTALK);
767
768 skb->data[0] = dnode;
769 skb->data[1] = snode;
770 skb->data[2] = llaptype;
771 skb_reset_mac_header(skb);
772 skb_pull(skb,3);
773
774
775 skb_copy_to_linear_data(skb, ltdmabuf, len);
776
777 skb_reset_transport_header(skb);
778
779 dev->stats.rx_packets++;
780 dev->stats.rx_bytes += skb->len;
781
782
783 netif_rx(skb);
784 return 0;
785}
786
787
788
789static irqreturn_t
790ltpc_interrupt(int irq, void *dev_id)
791{
792 struct net_device *dev = dev_id;
793
794 if (dev==NULL) {
795 printk("ltpc_interrupt: unknown device.\n");
796 return IRQ_NONE;
797 }
798
799 inb_p(dev->base_addr+6);
800
801 idle(dev);
802
803
804
805 return IRQ_HANDLED;
806}
807
808
809
810
811
812
813
814
815
816
817
818
819static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
820{
821 struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
822
823 struct ltpc_private *ltpc_priv = netdev_priv(dev);
824 struct atalk_addr *aa = <pc_priv->my_addr;
825 struct lt_init c;
826 int ltflags;
827
828 if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
829
830 switch(cmd) {
831 case SIOCSIFADDR:
832
833 aa->s_net = sa->sat_addr.s_net;
834
835
836 c.command = LT_INIT;
837 c.hint = sa->sat_addr.s_node;
838
839 aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
840
841
842 ltflags = read_30(dev);
843 ltflags |= LT_FLAG_ALLLAP;
844 set_30 (dev,ltflags);
845
846 dev->broadcast[0] = 0xFF;
847 dev->dev_addr[0] = aa->s_node;
848
849 dev->addr_len=1;
850
851 return 0;
852
853 case SIOCGIFADDR:
854
855 sa->sat_addr.s_net = aa->s_net;
856 sa->sat_addr.s_node = aa->s_node;
857
858 return 0;
859
860 default:
861 return -EINVAL;
862 }
863}
864
865static void set_multicast_list(struct net_device *dev)
866{
867
868
869}
870
871static int ltpc_poll_counter;
872
873static void ltpc_poll(struct timer_list *unused)
874{
875 del_timer(<pc_timer);
876
877 if(debug & DEBUG_VERBOSE) {
878 if (!ltpc_poll_counter) {
879 ltpc_poll_counter = 50;
880 printk("ltpc poll is alive\n");
881 }
882 ltpc_poll_counter--;
883 }
884
885
886 idle(ltpc_timer_dev);
887 ltpc_timer.expires = jiffies + HZ/20;
888 add_timer(<pc_timer);
889}
890
891
892
893static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
894{
895
896
897
898 int i;
899 struct lt_sendlap cbuf;
900 unsigned char *hdr;
901
902 cbuf.command = LT_SENDLAP;
903 cbuf.dnode = skb->data[0];
904 cbuf.laptype = skb->data[2];
905 skb_pull(skb,3);
906 cbuf.length = skb->len;
907 skb_reset_transport_header(skb);
908
909 if(debug & DEBUG_UPPER) {
910 printk("command ");
911 for(i=0;i<6;i++)
912 printk("%02x ",((unsigned char *)&cbuf)[i]);
913 printk("\n");
914 }
915
916 hdr = skb_transport_header(skb);
917 do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
918
919 if(debug & DEBUG_UPPER) {
920 printk("sent %d ddp bytes\n",skb->len);
921 for (i = 0; i < skb->len; i++)
922 printk("%02x ", hdr[i]);
923 printk("\n");
924 }
925
926 dev->stats.tx_packets++;
927 dev->stats.tx_bytes += skb->len;
928
929 dev_kfree_skb(skb);
930 return NETDEV_TX_OK;
931}
932
933
934
935static int __init ltpc_probe_dma(int base, int dma)
936{
937 int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
938 unsigned long timeout;
939 unsigned long f;
940
941 if (want & 1) {
942 if (request_dma(1,"ltpc")) {
943 want &= ~1;
944 } else {
945 f=claim_dma_lock();
946 disable_dma(1);
947 clear_dma_ff(1);
948 set_dma_mode(1,DMA_MODE_WRITE);
949 set_dma_addr(1,virt_to_bus(ltdmabuf));
950 set_dma_count(1,sizeof(struct lt_mem));
951 enable_dma(1);
952 release_dma_lock(f);
953 }
954 }
955 if (want & 2) {
956 if (request_dma(3,"ltpc")) {
957 want &= ~2;
958 } else {
959 f=claim_dma_lock();
960 disable_dma(3);
961 clear_dma_ff(3);
962 set_dma_mode(3,DMA_MODE_WRITE);
963 set_dma_addr(3,virt_to_bus(ltdmabuf));
964 set_dma_count(3,sizeof(struct lt_mem));
965 enable_dma(3);
966 release_dma_lock(f);
967 }
968 }
969
970
971
972
973 ltdmabuf[0] = LT_READMEM;
974 ltdmabuf[1] = 1;
975 ltdmabuf[2] = 0; ltdmabuf[3] = 0;
976 ltdmabuf[4] = 0; ltdmabuf[5] = 1;
977 ltdmabuf[6] = 0;
978
979 inb_p(io+1);
980 inb_p(io+0);
981 timeout = jiffies+100*HZ/100;
982 while(time_before(jiffies, timeout)) {
983 if ( 0xfa == inb_p(io+6) ) break;
984 }
985
986 inb_p(io+3);
987 inb_p(io+2);
988 while(time_before(jiffies, timeout)) {
989 if ( 0xfb == inb_p(io+6) ) break;
990 }
991
992
993
994 if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
995 want &= ~2;
996 free_dma(3);
997 }
998
999 if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
1000 want &= ~1;
1001 free_dma(1);
1002 }
1003
1004 if (!want)
1005 return 0;
1006
1007 return (want & 2) ? 3 : 1;
1008}
1009
1010static const struct net_device_ops ltpc_netdev = {
1011 .ndo_start_xmit = ltpc_xmit,
1012 .ndo_do_ioctl = ltpc_ioctl,
1013 .ndo_set_rx_mode = set_multicast_list,
1014};
1015
1016struct net_device * __init ltpc_probe(void)
1017{
1018 struct net_device *dev;
1019 int err = -ENOMEM;
1020 int x=0,y=0;
1021 int autoirq;
1022 unsigned long f;
1023 unsigned long timeout;
1024
1025 dev = alloc_ltalkdev(sizeof(struct ltpc_private));
1026 if (!dev)
1027 goto out;
1028
1029
1030
1031 if (io != 0x240 && request_region(0x220,8,"ltpc")) {
1032 x = inb_p(0x220+6);
1033 if ( (x!=0xff) && (x>=0xf0) ) {
1034 io = 0x220;
1035 goto got_port;
1036 }
1037 release_region(0x220,8);
1038 }
1039 if (io != 0x220 && request_region(0x240,8,"ltpc")) {
1040 y = inb_p(0x240+6);
1041 if ( (y!=0xff) && (y>=0xf0) ){
1042 io = 0x240;
1043 goto got_port;
1044 }
1045 release_region(0x240,8);
1046 }
1047
1048
1049 printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
1050 err = -ENODEV;
1051 goto out1;
1052
1053 got_port:
1054
1055 if (irq < 2) {
1056 unsigned long irq_mask;
1057
1058 irq_mask = probe_irq_on();
1059
1060 inb_p(io+7);
1061 inb_p(io+7);
1062
1063 inb_p(io+6);
1064 mdelay(2);
1065 autoirq = probe_irq_off(irq_mask);
1066
1067 if (autoirq == 0) {
1068 printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
1069 } else {
1070 irq = autoirq;
1071 }
1072 }
1073
1074
1075 ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
1076 if (!ltdmabuf) {
1077 printk(KERN_ERR "ltpc: mem alloc failed\n");
1078 err = -ENOMEM;
1079 goto out2;
1080 }
1081
1082 ltdmacbuf = <dmabuf[800];
1083
1084 if(debug & DEBUG_VERBOSE) {
1085 printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
1086 }
1087
1088
1089
1090 inb_p(io+1);
1091 inb_p(io+3);
1092
1093 msleep(20);
1094
1095 inb_p(io+0);
1096 inb_p(io+2);
1097 inb_p(io+7);
1098 inb_p(io+4);
1099 inb_p(io+5);
1100 inb_p(io+5);
1101 inb_p(io+6);
1102
1103 ssleep(1);
1104
1105
1106
1107
1108
1109 dma = ltpc_probe_dma(io, dma);
1110 if (!dma) {
1111 printk(KERN_ERR "No DMA channel found on ltpc card.\n");
1112 err = -ENODEV;
1113 goto out3;
1114 }
1115
1116
1117 if(irq)
1118 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
1119 else
1120 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1121
1122 dev->netdev_ops = <pc_netdev;
1123 dev->base_addr = io;
1124 dev->irq = irq;
1125 dev->dma = dma;
1126
1127
1128
1129
1130
1131 f=claim_dma_lock();
1132 disable_dma(dma);
1133 clear_dma_ff(dma);
1134 set_dma_mode(dma,DMA_MODE_READ);
1135 set_dma_addr(dma,virt_to_bus(ltdmabuf));
1136 set_dma_count(dma,0x100);
1137 enable_dma(dma);
1138 release_dma_lock(f);
1139
1140 (void) inb_p(io+3);
1141 (void) inb_p(io+2);
1142 timeout = jiffies+100*HZ/100;
1143
1144 while(time_before(jiffies, timeout)) {
1145 if( 0xf9 == inb_p(io+6))
1146 break;
1147 schedule();
1148 }
1149
1150 if(debug & DEBUG_VERBOSE) {
1151 printk("setting up timer and irq\n");
1152 }
1153
1154
1155 if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
1156 {
1157 (void) inb_p(io+7);
1158 (void) inb_p(io+7);
1159 } else {
1160 if( irq )
1161 printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
1162 dev->irq = 0;
1163
1164
1165 ltpc_timer_dev = dev;
1166 timer_setup(<pc_timer, ltpc_poll, 0);
1167
1168 ltpc_timer.expires = jiffies + HZ/20;
1169 add_timer(<pc_timer);
1170 }
1171 err = register_netdev(dev);
1172 if (err)
1173 goto out4;
1174
1175 return NULL;
1176out4:
1177 del_timer_sync(<pc_timer);
1178 if (dev->irq)
1179 free_irq(dev->irq, dev);
1180out3:
1181 free_pages((unsigned long)ltdmabuf, get_order(1000));
1182out2:
1183 release_region(io, 8);
1184out1:
1185 free_netdev(dev);
1186out:
1187 return ERR_PTR(err);
1188}
1189
1190#ifndef MODULE
1191
1192static int __init ltpc_setup(char *str)
1193{
1194 int ints[5];
1195
1196 str = get_options(str, ARRAY_SIZE(ints), ints);
1197
1198 if (ints[0] == 0) {
1199 if (str && !strncmp(str, "auto", 4)) {
1200
1201 }
1202 else {
1203
1204 printk (KERN_ERR
1205 "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
1206 return 0;
1207 }
1208 } else {
1209 io = ints[1];
1210 if (ints[0] > 1) {
1211 irq = ints[2];
1212 }
1213 if (ints[0] > 2) {
1214 dma = ints[3];
1215 }
1216
1217 }
1218 return 1;
1219}
1220
1221__setup("ltpc=", ltpc_setup);
1222#endif
1223
1224static struct net_device *dev_ltpc;
1225
1226#ifdef MODULE
1227
1228MODULE_LICENSE("GPL");
1229module_param(debug, int, 0);
1230module_param_hw(io, int, ioport, 0);
1231module_param_hw(irq, int, irq, 0);
1232module_param_hw(dma, int, dma, 0);
1233
1234
1235static int __init ltpc_module_init(void)
1236{
1237 if(io == 0)
1238 printk(KERN_NOTICE
1239 "ltpc: Autoprobing is not recommended for modules\n");
1240
1241 dev_ltpc = ltpc_probe();
1242 return PTR_ERR_OR_ZERO(dev_ltpc);
1243}
1244module_init(ltpc_module_init);
1245#endif
1246
1247static void __exit ltpc_cleanup(void)
1248{
1249
1250 if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
1251 unregister_netdev(dev_ltpc);
1252
1253 del_timer_sync(<pc_timer);
1254
1255 if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
1256
1257 if (dev_ltpc->irq)
1258 free_irq(dev_ltpc->irq, dev_ltpc);
1259
1260 if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
1261
1262 if (dev_ltpc->dma)
1263 free_dma(dev_ltpc->dma);
1264
1265 if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
1266
1267 if (dev_ltpc->base_addr)
1268 release_region(dev_ltpc->base_addr,8);
1269
1270 free_netdev(dev_ltpc);
1271
1272 if(debug & DEBUG_VERBOSE) printk("free_pages\n");
1273
1274 free_pages( (unsigned long) ltdmabuf, get_order(1000));
1275
1276 if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
1277}
1278
1279module_exit(ltpc_cleanup);
1280