1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200static int debug;
201#define DEBUG_VERBOSE 1
202#define DEBUG_UPPER 2
203#define DEBUG_LOWER 4
204
205static int io;
206static int irq;
207static int dma;
208
209#include <linux/module.h>
210#include <linux/kernel.h>
211#include <linux/types.h>
212#include <linux/fcntl.h>
213#include <linux/interrupt.h>
214#include <linux/ptrace.h>
215#include <linux/ioport.h>
216#include <linux/spinlock.h>
217#include <linux/in.h>
218#include <linux/string.h>
219#include <linux/errno.h>
220#include <linux/init.h>
221#include <linux/netdevice.h>
222#include <linux/etherdevice.h>
223#include <linux/skbuff.h>
224#include <linux/if_arp.h>
225#include <linux/if_ltalk.h>
226#include <linux/delay.h>
227#include <linux/timer.h>
228#include <linux/atalk.h>
229#include <linux/bitops.h>
230#include <linux/gfp.h>
231
232#include <net/Space.h>
233
234#include <asm/dma.h>
235#include <asm/io.h>
236
237
238#include "ltpc.h"
239
240static DEFINE_SPINLOCK(txqueue_lock);
241static DEFINE_SPINLOCK(mbox_lock);
242
243
244static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
245 void *dbuf, int dbuflen);
246static int sendup_buffer (struct net_device *dev);
247
248
249
250static unsigned long dma_mem_alloc(int size)
251{
252 int order = get_order(size);
253
254 return __get_dma_pages(GFP_KERNEL, order);
255}
256
257
258static unsigned char *ltdmabuf;
259static unsigned char *ltdmacbuf;
260
261
262
263struct ltpc_private
264{
265 struct atalk_addr my_addr;
266};
267
268
269
270struct xmitQel {
271 struct xmitQel *next;
272
273 unsigned char *cbuf;
274 short cbuflen;
275
276 unsigned char *dbuf;
277 short dbuflen;
278 unsigned char QWrite;
279 unsigned char mailbox;
280};
281
282
283
284static struct xmitQel *xmQhd, *xmQtl;
285
286static void enQ(struct xmitQel *qel)
287{
288 unsigned long flags;
289 qel->next = NULL;
290
291 spin_lock_irqsave(&txqueue_lock, flags);
292 if (xmQtl) {
293 xmQtl->next = qel;
294 } else {
295 xmQhd = qel;
296 }
297 xmQtl = qel;
298 spin_unlock_irqrestore(&txqueue_lock, flags);
299
300 if (debug & DEBUG_LOWER)
301 printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
302}
303
304static struct xmitQel *deQ(void)
305{
306 unsigned long flags;
307 int i;
308 struct xmitQel *qel=NULL;
309
310 spin_lock_irqsave(&txqueue_lock, flags);
311 if (xmQhd) {
312 qel = xmQhd;
313 xmQhd = qel->next;
314 if(!xmQhd) xmQtl = NULL;
315 }
316 spin_unlock_irqrestore(&txqueue_lock, flags);
317
318 if ((debug & DEBUG_LOWER) && qel) {
319 int n;
320 printk(KERN_DEBUG "ltpc: dequeued command ");
321 n = qel->cbuflen;
322 if (n>100) n=100;
323 for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
324 printk("\n");
325 }
326
327 return qel;
328}
329
330
331static struct xmitQel qels[16];
332
333
334static unsigned char mailbox[16];
335static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
336
337static int wait_timeout(struct net_device *dev, int c)
338{
339
340
341 int i;
342
343
344
345 for(i=0;i<200000;i++) {
346 if ( c != inb_p(dev->base_addr+6) ) return 0;
347 udelay(100);
348 }
349 return 1;
350}
351
352
353
354static int getmbox(void)
355{
356 unsigned long flags;
357 int i;
358
359 spin_lock_irqsave(&mbox_lock, flags);
360 for(i=1;i<16;i++) if(!mboxinuse[i]) {
361 mboxinuse[i]=1;
362 spin_unlock_irqrestore(&mbox_lock, flags);
363 return i;
364 }
365 spin_unlock_irqrestore(&mbox_lock, flags);
366 return 0;
367}
368
369
370static void handlefc(struct net_device *dev)
371{
372
373 int dma = dev->dma;
374 int base = dev->base_addr;
375 unsigned long flags;
376
377
378 flags=claim_dma_lock();
379 disable_dma(dma);
380 clear_dma_ff(dma);
381 set_dma_mode(dma,DMA_MODE_READ);
382 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
383 set_dma_count(dma,50);
384 enable_dma(dma);
385 release_dma_lock(flags);
386
387 inb_p(base+3);
388 inb_p(base+2);
389
390 if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
391}
392
393
394static void handlefd(struct net_device *dev)
395{
396 int dma = dev->dma;
397 int base = dev->base_addr;
398 unsigned long flags;
399
400 flags=claim_dma_lock();
401 disable_dma(dma);
402 clear_dma_ff(dma);
403 set_dma_mode(dma,DMA_MODE_READ);
404 set_dma_addr(dma,virt_to_bus(ltdmabuf));
405 set_dma_count(dma,800);
406 enable_dma(dma);
407 release_dma_lock(flags);
408
409 inb_p(base+3);
410 inb_p(base+2);
411
412 if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
413 sendup_buffer(dev);
414}
415
416static void handlewrite(struct net_device *dev)
417{
418
419
420 int dma = dev->dma;
421 int base = dev->base_addr;
422 unsigned long flags;
423
424 flags=claim_dma_lock();
425 disable_dma(dma);
426 clear_dma_ff(dma);
427 set_dma_mode(dma,DMA_MODE_WRITE);
428 set_dma_addr(dma,virt_to_bus(ltdmabuf));
429 set_dma_count(dma,800);
430 enable_dma(dma);
431 release_dma_lock(flags);
432
433 inb_p(base+3);
434 inb_p(base+2);
435
436 if ( wait_timeout(dev,0xfb) ) {
437 flags=claim_dma_lock();
438 printk("timed out in handlewrite, dma res %d\n",
439 get_dma_residue(dev->dma) );
440 release_dma_lock(flags);
441 }
442}
443
444static void handleread(struct net_device *dev)
445{
446
447
448 int dma = dev->dma;
449 int base = dev->base_addr;
450 unsigned long flags;
451
452
453 flags=claim_dma_lock();
454 disable_dma(dma);
455 clear_dma_ff(dma);
456 set_dma_mode(dma,DMA_MODE_READ);
457 set_dma_addr(dma,virt_to_bus(ltdmabuf));
458 set_dma_count(dma,800);
459 enable_dma(dma);
460 release_dma_lock(flags);
461
462 inb_p(base+3);
463 inb_p(base+2);
464 if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
465}
466
467static void handlecommand(struct net_device *dev)
468{
469
470 int dma = dev->dma;
471 int base = dev->base_addr;
472 unsigned long flags;
473
474 flags=claim_dma_lock();
475 disable_dma(dma);
476 clear_dma_ff(dma);
477 set_dma_mode(dma,DMA_MODE_WRITE);
478 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
479 set_dma_count(dma,50);
480 enable_dma(dma);
481 release_dma_lock(flags);
482 inb_p(base+3);
483 inb_p(base+2);
484 if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
485}
486
487
488static unsigned char rescbuf[2] = {LT_GETRESULT,0};
489static unsigned char resdbuf[2];
490
491static int QInIdle;
492
493
494
495
496
497static void idle(struct net_device *dev)
498{
499 unsigned long flags;
500 int state;
501
502
503
504 struct xmitQel *q = NULL;
505 int oops;
506 int i;
507 int base = dev->base_addr;
508
509 spin_lock_irqsave(&txqueue_lock, flags);
510 if(QInIdle) {
511 spin_unlock_irqrestore(&txqueue_lock, flags);
512 return;
513 }
514 QInIdle = 1;
515 spin_unlock_irqrestore(&txqueue_lock, flags);
516
517
518 (void) inb_p(base+6);
519
520 oops = 100;
521
522loop:
523 if (0>oops--) {
524 printk("idle: looped too many times\n");
525 goto done;
526 }
527
528 state = inb_p(base+6);
529 if (state != inb_p(base+6)) goto loop;
530
531 switch(state) {
532 case 0xfc:
533
534 if (debug & DEBUG_LOWER) printk("idle: fc\n");
535 handlefc(dev);
536 break;
537 case 0xfd:
538
539 if(debug & DEBUG_LOWER) printk("idle: fd\n");
540 handlefd(dev);
541 break;
542 case 0xf9:
543
544 if (debug & DEBUG_LOWER) printk("idle: f9\n");
545 if(!mboxinuse[0]) {
546 mboxinuse[0] = 1;
547 qels[0].cbuf = rescbuf;
548 qels[0].cbuflen = 2;
549 qels[0].dbuf = resdbuf;
550 qels[0].dbuflen = 2;
551 qels[0].QWrite = 0;
552 qels[0].mailbox = 0;
553 enQ(&qels[0]);
554 }
555 inb_p(dev->base_addr+1);
556 inb_p(dev->base_addr+0);
557 if( wait_timeout(dev,0xf9) )
558 printk("timed out idle f9\n");
559 break;
560 case 0xf8:
561
562 if (xmQhd) {
563 inb_p(dev->base_addr+1);
564 inb_p(dev->base_addr+0);
565 if(wait_timeout(dev,0xf8) )
566 printk("timed out idle f8\n");
567 } else {
568 goto done;
569 }
570 break;
571 case 0xfa:
572
573 if(debug & DEBUG_LOWER) printk("idle: fa\n");
574 if (xmQhd) {
575 q=deQ();
576 memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
577 ltdmacbuf[1] = q->mailbox;
578 if (debug>1) {
579 int n;
580 printk("ltpc: sent command ");
581 n = q->cbuflen;
582 if (n>100) n=100;
583 for(i=0;i<n;i++)
584 printk("%02x ",ltdmacbuf[i]);
585 printk("\n");
586 }
587
588 handlecommand(dev);
589
590 if (0xfa == inb_p(base + 6)) {
591
592 goto done;
593 }
594 } else {
595
596 if (!mboxinuse[0]) {
597 mboxinuse[0] = 1;
598 qels[0].cbuf = rescbuf;
599 qels[0].cbuflen = 2;
600 qels[0].dbuf = resdbuf;
601 qels[0].dbuflen = 2;
602 qels[0].QWrite = 0;
603 qels[0].mailbox = 0;
604 enQ(&qels[0]);
605 } else {
606 printk("trouble: response command already queued\n");
607 goto done;
608 }
609 }
610 break;
611 case 0Xfb:
612
613 if(debug & DEBUG_LOWER) printk("idle: fb\n");
614 if(q->QWrite) {
615 memcpy(ltdmabuf,q->dbuf,q->dbuflen);
616 handlewrite(dev);
617 } else {
618 handleread(dev);
619
620
621
622 if(q->mailbox) {
623 memcpy(q->dbuf,ltdmabuf,q->dbuflen);
624 } else {
625
626 mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
627 mboxinuse[0]=0;
628 }
629 }
630 break;
631 }
632 goto loop;
633
634done:
635 QInIdle=0;
636
637
638
639
640
641
642
643 if (dev->irq) {
644 inb_p(base+7);
645 inb_p(base+7);
646 }
647}
648
649
650static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
651 void *dbuf, int dbuflen)
652{
653
654 int i = getmbox();
655 int ret;
656
657 if(i) {
658 qels[i].cbuf = cbuf;
659 qels[i].cbuflen = cbuflen;
660 qels[i].dbuf = dbuf;
661 qels[i].dbuflen = dbuflen;
662 qels[i].QWrite = 1;
663 qels[i].mailbox = i;
664 enQ(&qels[i]);
665 idle(dev);
666 ret = mailbox[i];
667 mboxinuse[i]=0;
668 return ret;
669 }
670 printk("ltpc: could not allocate mbox\n");
671 return -1;
672}
673
674static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
675 void *dbuf, int dbuflen)
676{
677
678 int i = getmbox();
679 int ret;
680
681 if(i) {
682 qels[i].cbuf = cbuf;
683 qels[i].cbuflen = cbuflen;
684 qels[i].dbuf = dbuf;
685 qels[i].dbuflen = dbuflen;
686 qels[i].QWrite = 0;
687 qels[i].mailbox = i;
688 enQ(&qels[i]);
689 idle(dev);
690 ret = mailbox[i];
691 mboxinuse[i]=0;
692 return ret;
693 }
694 printk("ltpc: could not allocate mbox\n");
695 return -1;
696}
697
698
699
700static struct timer_list ltpc_timer;
701static struct net_device *ltpc_timer_dev;
702
703static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
704
705static int read_30 ( struct net_device *dev)
706{
707 lt_command c;
708 c.getflags.command = LT_GETFLAGS;
709 return do_read(dev, &c, sizeof(c.getflags),&c,0);
710}
711
712static int set_30 (struct net_device *dev,int x)
713{
714 lt_command c;
715 c.setflags.command = LT_SETFLAGS;
716 c.setflags.flags = x;
717 return do_write(dev, &c, sizeof(c.setflags),&c,0);
718}
719
720
721
722static int sendup_buffer (struct net_device *dev)
723{
724
725
726
727 int dnode, snode, llaptype, len;
728 int sklen;
729 struct sk_buff *skb;
730 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
731
732 if (ltc->command != LT_RCVLAP) {
733 printk("unknown command 0x%02x from ltpc card\n",ltc->command);
734 return -1;
735 }
736 dnode = ltc->dnode;
737 snode = ltc->snode;
738 llaptype = ltc->laptype;
739 len = ltc->length;
740
741 sklen = len;
742 if (llaptype == 1)
743 sklen += 8;
744 if(sklen > 800) {
745 printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
746 dev->name,sklen);
747 return -1;
748 }
749
750 if ( (llaptype==0) || (llaptype>2) ) {
751 printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
752 return -1;
753 }
754
755
756 skb = dev_alloc_skb(3+sklen);
757 if (skb == NULL)
758 {
759 printk("%s: dropping packet due to memory squeeze.\n",
760 dev->name);
761 return -1;
762 }
763 skb->dev = dev;
764
765 if (sklen > len)
766 skb_reserve(skb,8);
767 skb_put(skb,len+3);
768 skb->protocol = htons(ETH_P_LOCALTALK);
769
770 skb->data[0] = dnode;
771 skb->data[1] = snode;
772 skb->data[2] = llaptype;
773 skb_reset_mac_header(skb);
774 skb_pull(skb,3);
775
776
777 skb_copy_to_linear_data(skb, ltdmabuf, len);
778
779 skb_reset_transport_header(skb);
780
781 dev->stats.rx_packets++;
782 dev->stats.rx_bytes += skb->len;
783
784
785 netif_rx(skb);
786 return 0;
787}
788
789
790
791static irqreturn_t
792ltpc_interrupt(int irq, void *dev_id)
793{
794 struct net_device *dev = dev_id;
795
796 if (dev==NULL) {
797 printk("ltpc_interrupt: unknown device.\n");
798 return IRQ_NONE;
799 }
800
801 inb_p(dev->base_addr+6);
802
803 idle(dev);
804
805
806
807 return IRQ_HANDLED;
808}
809
810
811
812
813
814
815
816
817
818
819
820
821static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
822{
823 struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
824
825 struct ltpc_private *ltpc_priv = netdev_priv(dev);
826 struct atalk_addr *aa = <pc_priv->my_addr;
827 struct lt_init c;
828 int ltflags;
829
830 if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
831
832 switch(cmd) {
833 case SIOCSIFADDR:
834
835 aa->s_net = sa->sat_addr.s_net;
836
837
838 c.command = LT_INIT;
839 c.hint = sa->sat_addr.s_node;
840
841 aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
842
843
844 ltflags = read_30(dev);
845 ltflags |= LT_FLAG_ALLLAP;
846 set_30 (dev,ltflags);
847
848 dev->broadcast[0] = 0xFF;
849 dev->addr_len=1;
850 dev_addr_set(dev, &aa->s_node);
851
852 return 0;
853
854 case SIOCGIFADDR:
855
856 sa->sat_addr.s_net = aa->s_net;
857 sa->sat_addr.s_node = aa->s_node;
858
859 return 0;
860
861 default:
862 return -EINVAL;
863 }
864}
865
866static void set_multicast_list(struct net_device *dev)
867{
868
869
870}
871
872static int ltpc_poll_counter;
873
874static void ltpc_poll(struct timer_list *unused)
875{
876 del_timer(<pc_timer);
877
878 if(debug & DEBUG_VERBOSE) {
879 if (!ltpc_poll_counter) {
880 ltpc_poll_counter = 50;
881 printk("ltpc poll is alive\n");
882 }
883 ltpc_poll_counter--;
884 }
885
886
887 idle(ltpc_timer_dev);
888 ltpc_timer.expires = jiffies + HZ/20;
889 add_timer(<pc_timer);
890}
891
892
893
894static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
895{
896
897
898
899 int i;
900 struct lt_sendlap cbuf;
901 unsigned char *hdr;
902
903 cbuf.command = LT_SENDLAP;
904 cbuf.dnode = skb->data[0];
905 cbuf.laptype = skb->data[2];
906 skb_pull(skb,3);
907 cbuf.length = skb->len;
908 skb_reset_transport_header(skb);
909
910 if(debug & DEBUG_UPPER) {
911 printk("command ");
912 for(i=0;i<6;i++)
913 printk("%02x ",((unsigned char *)&cbuf)[i]);
914 printk("\n");
915 }
916
917 hdr = skb_transport_header(skb);
918 do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
919
920 if(debug & DEBUG_UPPER) {
921 printk("sent %d ddp bytes\n",skb->len);
922 for (i = 0; i < skb->len; i++)
923 printk("%02x ", hdr[i]);
924 printk("\n");
925 }
926
927 dev->stats.tx_packets++;
928 dev->stats.tx_bytes += skb->len;
929
930 dev_kfree_skb(skb);
931 return NETDEV_TX_OK;
932}
933
934
935
936static int __init ltpc_probe_dma(int base, int dma)
937{
938 int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
939 unsigned long timeout;
940 unsigned long f;
941
942 if (want & 1) {
943 if (request_dma(1,"ltpc")) {
944 want &= ~1;
945 } else {
946 f=claim_dma_lock();
947 disable_dma(1);
948 clear_dma_ff(1);
949 set_dma_mode(1,DMA_MODE_WRITE);
950 set_dma_addr(1,virt_to_bus(ltdmabuf));
951 set_dma_count(1,sizeof(struct lt_mem));
952 enable_dma(1);
953 release_dma_lock(f);
954 }
955 }
956 if (want & 2) {
957 if (request_dma(3,"ltpc")) {
958 want &= ~2;
959 } else {
960 f=claim_dma_lock();
961 disable_dma(3);
962 clear_dma_ff(3);
963 set_dma_mode(3,DMA_MODE_WRITE);
964 set_dma_addr(3,virt_to_bus(ltdmabuf));
965 set_dma_count(3,sizeof(struct lt_mem));
966 enable_dma(3);
967 release_dma_lock(f);
968 }
969 }
970
971
972
973
974 ltdmabuf[0] = LT_READMEM;
975 ltdmabuf[1] = 1;
976 ltdmabuf[2] = 0; ltdmabuf[3] = 0;
977 ltdmabuf[4] = 0; ltdmabuf[5] = 1;
978 ltdmabuf[6] = 0;
979
980 inb_p(io+1);
981 inb_p(io+0);
982 timeout = jiffies+100*HZ/100;
983 while(time_before(jiffies, timeout)) {
984 if ( 0xfa == inb_p(io+6) ) break;
985 }
986
987 inb_p(io+3);
988 inb_p(io+2);
989 while(time_before(jiffies, timeout)) {
990 if ( 0xfb == inb_p(io+6) ) break;
991 }
992
993
994
995 if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
996 want &= ~2;
997 free_dma(3);
998 }
999
1000 if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
1001 want &= ~1;
1002 free_dma(1);
1003 }
1004
1005 if (!want)
1006 return 0;
1007
1008 return (want & 2) ? 3 : 1;
1009}
1010
1011static const struct net_device_ops ltpc_netdev = {
1012 .ndo_start_xmit = ltpc_xmit,
1013 .ndo_do_ioctl = ltpc_ioctl,
1014 .ndo_set_rx_mode = set_multicast_list,
1015};
1016
1017static struct net_device * __init ltpc_probe(void)
1018{
1019 struct net_device *dev;
1020 int err = -ENOMEM;
1021 int x=0,y=0;
1022 int autoirq;
1023 unsigned long f;
1024 unsigned long timeout;
1025
1026 dev = alloc_ltalkdev(sizeof(struct ltpc_private));
1027 if (!dev)
1028 goto out;
1029
1030
1031
1032 if (io != 0x240 && request_region(0x220,8,"ltpc")) {
1033 x = inb_p(0x220+6);
1034 if ( (x!=0xff) && (x>=0xf0) ) {
1035 io = 0x220;
1036 goto got_port;
1037 }
1038 release_region(0x220,8);
1039 }
1040 if (io != 0x220 && request_region(0x240,8,"ltpc")) {
1041 y = inb_p(0x240+6);
1042 if ( (y!=0xff) && (y>=0xf0) ){
1043 io = 0x240;
1044 goto got_port;
1045 }
1046 release_region(0x240,8);
1047 }
1048
1049
1050 printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
1051 err = -ENODEV;
1052 goto out1;
1053
1054 got_port:
1055
1056 if (irq < 2) {
1057 unsigned long irq_mask;
1058
1059 irq_mask = probe_irq_on();
1060
1061 inb_p(io+7);
1062 inb_p(io+7);
1063
1064 inb_p(io+6);
1065 mdelay(2);
1066 autoirq = probe_irq_off(irq_mask);
1067
1068 if (autoirq == 0) {
1069 printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
1070 } else {
1071 irq = autoirq;
1072 }
1073 }
1074
1075
1076 ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
1077 if (!ltdmabuf) {
1078 printk(KERN_ERR "ltpc: mem alloc failed\n");
1079 err = -ENOMEM;
1080 goto out2;
1081 }
1082
1083 ltdmacbuf = <dmabuf[800];
1084
1085 if(debug & DEBUG_VERBOSE) {
1086 printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
1087 }
1088
1089
1090
1091 inb_p(io+1);
1092 inb_p(io+3);
1093
1094 msleep(20);
1095
1096 inb_p(io+0);
1097 inb_p(io+2);
1098 inb_p(io+7);
1099 inb_p(io+4);
1100 inb_p(io+5);
1101 inb_p(io+5);
1102 inb_p(io+6);
1103
1104 ssleep(1);
1105
1106
1107
1108
1109
1110 dma = ltpc_probe_dma(io, dma);
1111 if (!dma) {
1112 printk(KERN_ERR "No DMA channel found on ltpc card.\n");
1113 err = -ENODEV;
1114 goto out3;
1115 }
1116
1117
1118 if(irq)
1119 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
1120 else
1121 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1122
1123 dev->netdev_ops = <pc_netdev;
1124 dev->base_addr = io;
1125 dev->irq = irq;
1126 dev->dma = dma;
1127
1128
1129
1130
1131
1132 f=claim_dma_lock();
1133 disable_dma(dma);
1134 clear_dma_ff(dma);
1135 set_dma_mode(dma,DMA_MODE_READ);
1136 set_dma_addr(dma,virt_to_bus(ltdmabuf));
1137 set_dma_count(dma,0x100);
1138 enable_dma(dma);
1139 release_dma_lock(f);
1140
1141 (void) inb_p(io+3);
1142 (void) inb_p(io+2);
1143 timeout = jiffies+100*HZ/100;
1144
1145 while(time_before(jiffies, timeout)) {
1146 if( 0xf9 == inb_p(io+6))
1147 break;
1148 schedule();
1149 }
1150
1151 if(debug & DEBUG_VERBOSE) {
1152 printk("setting up timer and irq\n");
1153 }
1154
1155
1156 if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
1157 {
1158 (void) inb_p(io+7);
1159 (void) inb_p(io+7);
1160 } else {
1161 if( irq )
1162 printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
1163 dev->irq = 0;
1164
1165
1166 ltpc_timer_dev = dev;
1167 timer_setup(<pc_timer, ltpc_poll, 0);
1168
1169 ltpc_timer.expires = jiffies + HZ/20;
1170 add_timer(<pc_timer);
1171 }
1172 err = register_netdev(dev);
1173 if (err)
1174 goto out4;
1175
1176 return NULL;
1177out4:
1178 del_timer_sync(<pc_timer);
1179 if (dev->irq)
1180 free_irq(dev->irq, dev);
1181out3:
1182 free_pages((unsigned long)ltdmabuf, get_order(1000));
1183out2:
1184 release_region(io, 8);
1185out1:
1186 free_netdev(dev);
1187out:
1188 return ERR_PTR(err);
1189}
1190
1191#ifndef MODULE
1192
1193static int __init ltpc_setup(char *str)
1194{
1195 int ints[5];
1196
1197 str = get_options(str, ARRAY_SIZE(ints), ints);
1198
1199 if (ints[0] == 0) {
1200 if (str && !strncmp(str, "auto", 4)) {
1201
1202 }
1203 else {
1204
1205 printk (KERN_ERR
1206 "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
1207 return 0;
1208 }
1209 } else {
1210 io = ints[1];
1211 if (ints[0] > 1) {
1212 irq = ints[2];
1213 }
1214 if (ints[0] > 2) {
1215 dma = ints[3];
1216 }
1217
1218 }
1219 return 1;
1220}
1221
1222__setup("ltpc=", ltpc_setup);
1223#endif
1224
1225static struct net_device *dev_ltpc;
1226
1227MODULE_LICENSE("GPL");
1228module_param(debug, int, 0);
1229module_param_hw(io, int, ioport, 0);
1230module_param_hw(irq, int, irq, 0);
1231module_param_hw(dma, int, dma, 0);
1232
1233
1234static int __init ltpc_module_init(void)
1235{
1236 if(io == 0)
1237 printk(KERN_NOTICE
1238 "ltpc: Autoprobing is not recommended for modules\n");
1239
1240 dev_ltpc = ltpc_probe();
1241 return PTR_ERR_OR_ZERO(dev_ltpc);
1242}
1243module_init(ltpc_module_init);
1244
1245static void __exit ltpc_cleanup(void)
1246{
1247
1248 if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
1249 unregister_netdev(dev_ltpc);
1250
1251 del_timer_sync(<pc_timer);
1252
1253 if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
1254
1255 if (dev_ltpc->irq)
1256 free_irq(dev_ltpc->irq, dev_ltpc);
1257
1258 if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
1259
1260 if (dev_ltpc->dma)
1261 free_dma(dev_ltpc->dma);
1262
1263 if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
1264
1265 if (dev_ltpc->base_addr)
1266 release_region(dev_ltpc->base_addr,8);
1267
1268 free_netdev(dev_ltpc);
1269
1270 if(debug & DEBUG_VERBOSE) printk("free_pages\n");
1271
1272 free_pages( (unsigned long) ltdmabuf, get_order(1000));
1273
1274 if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
1275}
1276
1277module_exit(ltpc_cleanup);
1278