1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200static int debug;
201#define DEBUG_VERBOSE 1
202#define DEBUG_UPPER 2
203#define DEBUG_LOWER 4
204
205static int io;
206static int irq;
207static int dma;
208
209#include <linux/module.h>
210#include <linux/kernel.h>
211#include <linux/types.h>
212#include <linux/fcntl.h>
213#include <linux/interrupt.h>
214#include <linux/ptrace.h>
215#include <linux/ioport.h>
216#include <linux/spinlock.h>
217#include <linux/in.h>
218#include <linux/string.h>
219#include <linux/errno.h>
220#include <linux/init.h>
221#include <linux/netdevice.h>
222#include <linux/etherdevice.h>
223#include <linux/skbuff.h>
224#include <linux/if_arp.h>
225#include <linux/if_ltalk.h>
226#include <linux/delay.h>
227#include <linux/timer.h>
228#include <linux/atalk.h>
229#include <linux/bitops.h>
230#include <linux/gfp.h>
231
232#include <net/Space.h>
233
234#include <asm/dma.h>
235#include <asm/io.h>
236
237
238#include "ltpc.h"
239
240static DEFINE_SPINLOCK(txqueue_lock);
241static DEFINE_SPINLOCK(mbox_lock);
242
243
244static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
245 void *dbuf, int dbuflen);
246static int sendup_buffer (struct net_device *dev);
247
248
249
250static unsigned long dma_mem_alloc(int size)
251{
252 int order = get_order(size);
253
254 return __get_dma_pages(GFP_KERNEL, order);
255}
256
257
258static unsigned char *ltdmabuf;
259static unsigned char *ltdmacbuf;
260
261
262
263struct ltpc_private
264{
265 struct atalk_addr my_addr;
266};
267
268
269
270struct xmitQel {
271 struct xmitQel *next;
272
273 unsigned char *cbuf;
274 short cbuflen;
275
276 unsigned char *dbuf;
277 short dbuflen;
278 unsigned char QWrite;
279 unsigned char mailbox;
280};
281
282
283
284static struct xmitQel *xmQhd, *xmQtl;
285
286static void enQ(struct xmitQel *qel)
287{
288 unsigned long flags;
289 qel->next = NULL;
290
291 spin_lock_irqsave(&txqueue_lock, flags);
292 if (xmQtl) {
293 xmQtl->next = qel;
294 } else {
295 xmQhd = qel;
296 }
297 xmQtl = qel;
298 spin_unlock_irqrestore(&txqueue_lock, flags);
299
300 if (debug & DEBUG_LOWER)
301 printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
302}
303
304static struct xmitQel *deQ(void)
305{
306 unsigned long flags;
307 int i;
308 struct xmitQel *qel=NULL;
309
310 spin_lock_irqsave(&txqueue_lock, flags);
311 if (xmQhd) {
312 qel = xmQhd;
313 xmQhd = qel->next;
314 if(!xmQhd) xmQtl = NULL;
315 }
316 spin_unlock_irqrestore(&txqueue_lock, flags);
317
318 if ((debug & DEBUG_LOWER) && qel) {
319 int n;
320 printk(KERN_DEBUG "ltpc: dequeued command ");
321 n = qel->cbuflen;
322 if (n>100) n=100;
323 for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
324 printk("\n");
325 }
326
327 return qel;
328}
329
330
331static struct xmitQel qels[16];
332
333
334static unsigned char mailbox[16];
335static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
336
337static int wait_timeout(struct net_device *dev, int c)
338{
339
340
341 int i;
342
343
344
345 for(i=0;i<200000;i++) {
346 if ( c != inb_p(dev->base_addr+6) ) return 0;
347 udelay(100);
348 }
349 return 1;
350}
351
352
353
354static int getmbox(void)
355{
356 unsigned long flags;
357 int i;
358
359 spin_lock_irqsave(&mbox_lock, flags);
360 for(i=1;i<16;i++) if(!mboxinuse[i]) {
361 mboxinuse[i]=1;
362 spin_unlock_irqrestore(&mbox_lock, flags);
363 return i;
364 }
365 spin_unlock_irqrestore(&mbox_lock, flags);
366 return 0;
367}
368
369
370static void handlefc(struct net_device *dev)
371{
372
373 int dma = dev->dma;
374 int base = dev->base_addr;
375 unsigned long flags;
376
377
378 flags=claim_dma_lock();
379 disable_dma(dma);
380 clear_dma_ff(dma);
381 set_dma_mode(dma,DMA_MODE_READ);
382 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
383 set_dma_count(dma,50);
384 enable_dma(dma);
385 release_dma_lock(flags);
386
387 inb_p(base+3);
388 inb_p(base+2);
389
390 if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
391}
392
393
394static void handlefd(struct net_device *dev)
395{
396 int dma = dev->dma;
397 int base = dev->base_addr;
398 unsigned long flags;
399
400 flags=claim_dma_lock();
401 disable_dma(dma);
402 clear_dma_ff(dma);
403 set_dma_mode(dma,DMA_MODE_READ);
404 set_dma_addr(dma,virt_to_bus(ltdmabuf));
405 set_dma_count(dma,800);
406 enable_dma(dma);
407 release_dma_lock(flags);
408
409 inb_p(base+3);
410 inb_p(base+2);
411
412 if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
413 sendup_buffer(dev);
414}
415
416static void handlewrite(struct net_device *dev)
417{
418
419
420 int dma = dev->dma;
421 int base = dev->base_addr;
422 unsigned long flags;
423
424 flags=claim_dma_lock();
425 disable_dma(dma);
426 clear_dma_ff(dma);
427 set_dma_mode(dma,DMA_MODE_WRITE);
428 set_dma_addr(dma,virt_to_bus(ltdmabuf));
429 set_dma_count(dma,800);
430 enable_dma(dma);
431 release_dma_lock(flags);
432
433 inb_p(base+3);
434 inb_p(base+2);
435
436 if ( wait_timeout(dev,0xfb) ) {
437 flags=claim_dma_lock();
438 printk("timed out in handlewrite, dma res %d\n",
439 get_dma_residue(dev->dma) );
440 release_dma_lock(flags);
441 }
442}
443
444static void handleread(struct net_device *dev)
445{
446
447
448 int dma = dev->dma;
449 int base = dev->base_addr;
450 unsigned long flags;
451
452
453 flags=claim_dma_lock();
454 disable_dma(dma);
455 clear_dma_ff(dma);
456 set_dma_mode(dma,DMA_MODE_READ);
457 set_dma_addr(dma,virt_to_bus(ltdmabuf));
458 set_dma_count(dma,800);
459 enable_dma(dma);
460 release_dma_lock(flags);
461
462 inb_p(base+3);
463 inb_p(base+2);
464 if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
465}
466
467static void handlecommand(struct net_device *dev)
468{
469
470 int dma = dev->dma;
471 int base = dev->base_addr;
472 unsigned long flags;
473
474 flags=claim_dma_lock();
475 disable_dma(dma);
476 clear_dma_ff(dma);
477 set_dma_mode(dma,DMA_MODE_WRITE);
478 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
479 set_dma_count(dma,50);
480 enable_dma(dma);
481 release_dma_lock(flags);
482 inb_p(base+3);
483 inb_p(base+2);
484 if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
485}
486
487
488static unsigned char rescbuf[2] = {LT_GETRESULT,0};
489static unsigned char resdbuf[2];
490
491static int QInIdle;
492
493
494
495
496
497static void idle(struct net_device *dev)
498{
499 unsigned long flags;
500 int state;
501
502
503
504 struct xmitQel *q = NULL;
505 int oops;
506 int i;
507 int base = dev->base_addr;
508
509 spin_lock_irqsave(&txqueue_lock, flags);
510 if(QInIdle) {
511 spin_unlock_irqrestore(&txqueue_lock, flags);
512 return;
513 }
514 QInIdle = 1;
515 spin_unlock_irqrestore(&txqueue_lock, flags);
516
517
518 (void) inb_p(base+6);
519
520 oops = 100;
521
522loop:
523 if (0>oops--) {
524 printk("idle: looped too many times\n");
525 goto done;
526 }
527
528 state = inb_p(base+6);
529 if (state != inb_p(base+6)) goto loop;
530
531 switch(state) {
532 case 0xfc:
533
534 if (debug & DEBUG_LOWER) printk("idle: fc\n");
535 handlefc(dev);
536 break;
537 case 0xfd:
538
539 if(debug & DEBUG_LOWER) printk("idle: fd\n");
540 handlefd(dev);
541 break;
542 case 0xf9:
543
544 if (debug & DEBUG_LOWER) printk("idle: f9\n");
545 if(!mboxinuse[0]) {
546 mboxinuse[0] = 1;
547 qels[0].cbuf = rescbuf;
548 qels[0].cbuflen = 2;
549 qels[0].dbuf = resdbuf;
550 qels[0].dbuflen = 2;
551 qels[0].QWrite = 0;
552 qels[0].mailbox = 0;
553 enQ(&qels[0]);
554 }
555 inb_p(dev->base_addr+1);
556 inb_p(dev->base_addr+0);
557 if( wait_timeout(dev,0xf9) )
558 printk("timed out idle f9\n");
559 break;
560 case 0xf8:
561
562 if (xmQhd) {
563 inb_p(dev->base_addr+1);
564 inb_p(dev->base_addr+0);
565 if(wait_timeout(dev,0xf8) )
566 printk("timed out idle f8\n");
567 } else {
568 goto done;
569 }
570 break;
571 case 0xfa:
572
573 if(debug & DEBUG_LOWER) printk("idle: fa\n");
574 if (xmQhd) {
575 q=deQ();
576 memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
577 ltdmacbuf[1] = q->mailbox;
578 if (debug>1) {
579 int n;
580 printk("ltpc: sent command ");
581 n = q->cbuflen;
582 if (n>100) n=100;
583 for(i=0;i<n;i++)
584 printk("%02x ",ltdmacbuf[i]);
585 printk("\n");
586 }
587
588 handlecommand(dev);
589
590 if (0xfa == inb_p(base + 6)) {
591
592 goto done;
593 }
594 } else {
595
596 if (!mboxinuse[0]) {
597 mboxinuse[0] = 1;
598 qels[0].cbuf = rescbuf;
599 qels[0].cbuflen = 2;
600 qels[0].dbuf = resdbuf;
601 qels[0].dbuflen = 2;
602 qels[0].QWrite = 0;
603 qels[0].mailbox = 0;
604 enQ(&qels[0]);
605 } else {
606 printk("trouble: response command already queued\n");
607 goto done;
608 }
609 }
610 break;
611 case 0Xfb:
612
613 if(debug & DEBUG_LOWER) printk("idle: fb\n");
614 if(q->QWrite) {
615 memcpy(ltdmabuf,q->dbuf,q->dbuflen);
616 handlewrite(dev);
617 } else {
618 handleread(dev);
619
620
621
622 if(q->mailbox) {
623 memcpy(q->dbuf,ltdmabuf,q->dbuflen);
624 } else {
625
626 mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
627 mboxinuse[0]=0;
628 }
629 }
630 break;
631 }
632 goto loop;
633
634done:
635 QInIdle=0;
636
637
638
639
640
641
642
643 if (dev->irq) {
644 inb_p(base+7);
645 inb_p(base+7);
646 }
647}
648
649
650static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
651 void *dbuf, int dbuflen)
652{
653
654 int i = getmbox();
655 int ret;
656
657 if(i) {
658 qels[i].cbuf = cbuf;
659 qels[i].cbuflen = cbuflen;
660 qels[i].dbuf = dbuf;
661 qels[i].dbuflen = dbuflen;
662 qels[i].QWrite = 1;
663 qels[i].mailbox = i;
664 enQ(&qels[i]);
665 idle(dev);
666 ret = mailbox[i];
667 mboxinuse[i]=0;
668 return ret;
669 }
670 printk("ltpc: could not allocate mbox\n");
671 return -1;
672}
673
674static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
675 void *dbuf, int dbuflen)
676{
677
678 int i = getmbox();
679 int ret;
680
681 if(i) {
682 qels[i].cbuf = cbuf;
683 qels[i].cbuflen = cbuflen;
684 qels[i].dbuf = dbuf;
685 qels[i].dbuflen = dbuflen;
686 qels[i].QWrite = 0;
687 qels[i].mailbox = i;
688 enQ(&qels[i]);
689 idle(dev);
690 ret = mailbox[i];
691 mboxinuse[i]=0;
692 return ret;
693 }
694 printk("ltpc: could not allocate mbox\n");
695 return -1;
696}
697
698
699
700static struct timer_list ltpc_timer;
701static struct net_device *ltpc_timer_dev;
702
703static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
704
705static int read_30 ( struct net_device *dev)
706{
707 lt_command c;
708 c.getflags.command = LT_GETFLAGS;
709 return do_read(dev, &c, sizeof(c.getflags),&c,0);
710}
711
712static int set_30 (struct net_device *dev,int x)
713{
714 lt_command c;
715 c.setflags.command = LT_SETFLAGS;
716 c.setflags.flags = x;
717 return do_write(dev, &c, sizeof(c.setflags),&c,0);
718}
719
720
721
722static int sendup_buffer (struct net_device *dev)
723{
724
725
726
727 int dnode, snode, llaptype, len;
728 int sklen;
729 struct sk_buff *skb;
730 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
731
732 if (ltc->command != LT_RCVLAP) {
733 printk("unknown command 0x%02x from ltpc card\n",ltc->command);
734 return -1;
735 }
736 dnode = ltc->dnode;
737 snode = ltc->snode;
738 llaptype = ltc->laptype;
739 len = ltc->length;
740
741 sklen = len;
742 if (llaptype == 1)
743 sklen += 8;
744 if(sklen > 800) {
745 printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
746 dev->name,sklen);
747 return -1;
748 }
749
750 if ( (llaptype==0) || (llaptype>2) ) {
751 printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
752 return -1;
753 }
754
755
756 skb = dev_alloc_skb(3+sklen);
757 if (skb == NULL)
758 {
759 printk("%s: dropping packet due to memory squeeze.\n",
760 dev->name);
761 return -1;
762 }
763 skb->dev = dev;
764
765 if (sklen > len)
766 skb_reserve(skb,8);
767 skb_put(skb,len+3);
768 skb->protocol = htons(ETH_P_LOCALTALK);
769
770 skb->data[0] = dnode;
771 skb->data[1] = snode;
772 skb->data[2] = llaptype;
773 skb_reset_mac_header(skb);
774 skb_pull(skb,3);
775
776
777 skb_copy_to_linear_data(skb, ltdmabuf, len);
778
779 skb_reset_transport_header(skb);
780
781 dev->stats.rx_packets++;
782 dev->stats.rx_bytes += skb->len;
783
784
785 netif_rx(skb);
786 return 0;
787}
788
789
790
791static irqreturn_t
792ltpc_interrupt(int irq, void *dev_id)
793{
794 struct net_device *dev = dev_id;
795
796 if (dev==NULL) {
797 printk("ltpc_interrupt: unknown device.\n");
798 return IRQ_NONE;
799 }
800
801 inb_p(dev->base_addr+6);
802
803 idle(dev);
804
805
806
807 return IRQ_HANDLED;
808}
809
810
811
812
813
814
815
816
817
818
819
820
821static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
822{
823 struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
824
825 struct ltpc_private *ltpc_priv = netdev_priv(dev);
826 struct atalk_addr *aa = <pc_priv->my_addr;
827 struct lt_init c;
828 int ltflags;
829
830 if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
831
832 switch(cmd) {
833 case SIOCSIFADDR:
834
835 aa->s_net = sa->sat_addr.s_net;
836
837
838 c.command = LT_INIT;
839 c.hint = sa->sat_addr.s_node;
840
841 aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
842
843
844 ltflags = read_30(dev);
845 ltflags |= LT_FLAG_ALLLAP;
846 set_30 (dev,ltflags);
847
848 dev->broadcast[0] = 0xFF;
849 dev->dev_addr[0] = aa->s_node;
850
851 dev->addr_len=1;
852
853 return 0;
854
855 case SIOCGIFADDR:
856
857 sa->sat_addr.s_net = aa->s_net;
858 sa->sat_addr.s_node = aa->s_node;
859
860 return 0;
861
862 default:
863 return -EINVAL;
864 }
865}
866
867static void set_multicast_list(struct net_device *dev)
868{
869
870
871}
872
873static int ltpc_poll_counter;
874
875static void ltpc_poll(struct timer_list *unused)
876{
877 del_timer(<pc_timer);
878
879 if(debug & DEBUG_VERBOSE) {
880 if (!ltpc_poll_counter) {
881 ltpc_poll_counter = 50;
882 printk("ltpc poll is alive\n");
883 }
884 ltpc_poll_counter--;
885 }
886
887
888 idle(ltpc_timer_dev);
889 ltpc_timer.expires = jiffies + HZ/20;
890 add_timer(<pc_timer);
891}
892
893
894
895static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
896{
897
898
899
900 int i;
901 struct lt_sendlap cbuf;
902 unsigned char *hdr;
903
904 cbuf.command = LT_SENDLAP;
905 cbuf.dnode = skb->data[0];
906 cbuf.laptype = skb->data[2];
907 skb_pull(skb,3);
908 cbuf.length = skb->len;
909 skb_reset_transport_header(skb);
910
911 if(debug & DEBUG_UPPER) {
912 printk("command ");
913 for(i=0;i<6;i++)
914 printk("%02x ",((unsigned char *)&cbuf)[i]);
915 printk("\n");
916 }
917
918 hdr = skb_transport_header(skb);
919 do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
920
921 if(debug & DEBUG_UPPER) {
922 printk("sent %d ddp bytes\n",skb->len);
923 for (i = 0; i < skb->len; i++)
924 printk("%02x ", hdr[i]);
925 printk("\n");
926 }
927
928 dev->stats.tx_packets++;
929 dev->stats.tx_bytes += skb->len;
930
931 dev_kfree_skb(skb);
932 return NETDEV_TX_OK;
933}
934
935
936
937static int __init ltpc_probe_dma(int base, int dma)
938{
939 int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
940 unsigned long timeout;
941 unsigned long f;
942
943 if (want & 1) {
944 if (request_dma(1,"ltpc")) {
945 want &= ~1;
946 } else {
947 f=claim_dma_lock();
948 disable_dma(1);
949 clear_dma_ff(1);
950 set_dma_mode(1,DMA_MODE_WRITE);
951 set_dma_addr(1,virt_to_bus(ltdmabuf));
952 set_dma_count(1,sizeof(struct lt_mem));
953 enable_dma(1);
954 release_dma_lock(f);
955 }
956 }
957 if (want & 2) {
958 if (request_dma(3,"ltpc")) {
959 want &= ~2;
960 } else {
961 f=claim_dma_lock();
962 disable_dma(3);
963 clear_dma_ff(3);
964 set_dma_mode(3,DMA_MODE_WRITE);
965 set_dma_addr(3,virt_to_bus(ltdmabuf));
966 set_dma_count(3,sizeof(struct lt_mem));
967 enable_dma(3);
968 release_dma_lock(f);
969 }
970 }
971
972
973
974
975 ltdmabuf[0] = LT_READMEM;
976 ltdmabuf[1] = 1;
977 ltdmabuf[2] = 0; ltdmabuf[3] = 0;
978 ltdmabuf[4] = 0; ltdmabuf[5] = 1;
979 ltdmabuf[6] = 0;
980
981 inb_p(io+1);
982 inb_p(io+0);
983 timeout = jiffies+100*HZ/100;
984 while(time_before(jiffies, timeout)) {
985 if ( 0xfa == inb_p(io+6) ) break;
986 }
987
988 inb_p(io+3);
989 inb_p(io+2);
990 while(time_before(jiffies, timeout)) {
991 if ( 0xfb == inb_p(io+6) ) break;
992 }
993
994
995
996 if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
997 want &= ~2;
998 free_dma(3);
999 }
1000
1001 if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
1002 want &= ~1;
1003 free_dma(1);
1004 }
1005
1006 if (!want)
1007 return 0;
1008
1009 return (want & 2) ? 3 : 1;
1010}
1011
1012static const struct net_device_ops ltpc_netdev = {
1013 .ndo_start_xmit = ltpc_xmit,
1014 .ndo_do_ioctl = ltpc_ioctl,
1015 .ndo_set_rx_mode = set_multicast_list,
1016};
1017
1018static struct net_device * __init ltpc_probe(void)
1019{
1020 struct net_device *dev;
1021 int err = -ENOMEM;
1022 int x=0,y=0;
1023 int autoirq;
1024 unsigned long f;
1025 unsigned long timeout;
1026
1027 dev = alloc_ltalkdev(sizeof(struct ltpc_private));
1028 if (!dev)
1029 goto out;
1030
1031
1032
1033 if (io != 0x240 && request_region(0x220,8,"ltpc")) {
1034 x = inb_p(0x220+6);
1035 if ( (x!=0xff) && (x>=0xf0) ) {
1036 io = 0x220;
1037 goto got_port;
1038 }
1039 release_region(0x220,8);
1040 }
1041 if (io != 0x220 && request_region(0x240,8,"ltpc")) {
1042 y = inb_p(0x240+6);
1043 if ( (y!=0xff) && (y>=0xf0) ){
1044 io = 0x240;
1045 goto got_port;
1046 }
1047 release_region(0x240,8);
1048 }
1049
1050
1051 printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
1052 err = -ENODEV;
1053 goto out1;
1054
1055 got_port:
1056
1057 if (irq < 2) {
1058 unsigned long irq_mask;
1059
1060 irq_mask = probe_irq_on();
1061
1062 inb_p(io+7);
1063 inb_p(io+7);
1064
1065 inb_p(io+6);
1066 mdelay(2);
1067 autoirq = probe_irq_off(irq_mask);
1068
1069 if (autoirq == 0) {
1070 printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
1071 } else {
1072 irq = autoirq;
1073 }
1074 }
1075
1076
1077 ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
1078 if (!ltdmabuf) {
1079 printk(KERN_ERR "ltpc: mem alloc failed\n");
1080 err = -ENOMEM;
1081 goto out2;
1082 }
1083
1084 ltdmacbuf = <dmabuf[800];
1085
1086 if(debug & DEBUG_VERBOSE) {
1087 printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
1088 }
1089
1090
1091
1092 inb_p(io+1);
1093 inb_p(io+3);
1094
1095 msleep(20);
1096
1097 inb_p(io+0);
1098 inb_p(io+2);
1099 inb_p(io+7);
1100 inb_p(io+4);
1101 inb_p(io+5);
1102 inb_p(io+5);
1103 inb_p(io+6);
1104
1105 ssleep(1);
1106
1107
1108
1109
1110
1111 dma = ltpc_probe_dma(io, dma);
1112 if (!dma) {
1113 printk(KERN_ERR "No DMA channel found on ltpc card.\n");
1114 err = -ENODEV;
1115 goto out3;
1116 }
1117
1118
1119 if(irq)
1120 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
1121 else
1122 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1123
1124 dev->netdev_ops = <pc_netdev;
1125 dev->base_addr = io;
1126 dev->irq = irq;
1127 dev->dma = dma;
1128
1129
1130
1131
1132
1133 f=claim_dma_lock();
1134 disable_dma(dma);
1135 clear_dma_ff(dma);
1136 set_dma_mode(dma,DMA_MODE_READ);
1137 set_dma_addr(dma,virt_to_bus(ltdmabuf));
1138 set_dma_count(dma,0x100);
1139 enable_dma(dma);
1140 release_dma_lock(f);
1141
1142 (void) inb_p(io+3);
1143 (void) inb_p(io+2);
1144 timeout = jiffies+100*HZ/100;
1145
1146 while(time_before(jiffies, timeout)) {
1147 if( 0xf9 == inb_p(io+6))
1148 break;
1149 schedule();
1150 }
1151
1152 if(debug & DEBUG_VERBOSE) {
1153 printk("setting up timer and irq\n");
1154 }
1155
1156
1157 if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
1158 {
1159 (void) inb_p(io+7);
1160 (void) inb_p(io+7);
1161 } else {
1162 if( irq )
1163 printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
1164 dev->irq = 0;
1165
1166
1167 ltpc_timer_dev = dev;
1168 timer_setup(<pc_timer, ltpc_poll, 0);
1169
1170 ltpc_timer.expires = jiffies + HZ/20;
1171 add_timer(<pc_timer);
1172 }
1173 err = register_netdev(dev);
1174 if (err)
1175 goto out4;
1176
1177 return NULL;
1178out4:
1179 del_timer_sync(<pc_timer);
1180 if (dev->irq)
1181 free_irq(dev->irq, dev);
1182out3:
1183 free_pages((unsigned long)ltdmabuf, get_order(1000));
1184out2:
1185 release_region(io, 8);
1186out1:
1187 free_netdev(dev);
1188out:
1189 return ERR_PTR(err);
1190}
1191
1192#ifndef MODULE
1193
1194static int __init ltpc_setup(char *str)
1195{
1196 int ints[5];
1197
1198 str = get_options(str, ARRAY_SIZE(ints), ints);
1199
1200 if (ints[0] == 0) {
1201 if (str && !strncmp(str, "auto", 4)) {
1202
1203 }
1204 else {
1205
1206 printk (KERN_ERR
1207 "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
1208 return 0;
1209 }
1210 } else {
1211 io = ints[1];
1212 if (ints[0] > 1) {
1213 irq = ints[2];
1214 }
1215 if (ints[0] > 2) {
1216 dma = ints[3];
1217 }
1218
1219 }
1220 return 1;
1221}
1222
1223__setup("ltpc=", ltpc_setup);
1224#endif
1225
1226static struct net_device *dev_ltpc;
1227
1228MODULE_LICENSE("GPL");
1229module_param(debug, int, 0);
1230module_param_hw(io, int, ioport, 0);
1231module_param_hw(irq, int, irq, 0);
1232module_param_hw(dma, int, dma, 0);
1233
1234
1235static int __init ltpc_module_init(void)
1236{
1237 if(io == 0)
1238 printk(KERN_NOTICE
1239 "ltpc: Autoprobing is not recommended for modules\n");
1240
1241 dev_ltpc = ltpc_probe();
1242 return PTR_ERR_OR_ZERO(dev_ltpc);
1243}
1244module_init(ltpc_module_init);
1245
1246static void __exit ltpc_cleanup(void)
1247{
1248
1249 if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
1250 unregister_netdev(dev_ltpc);
1251
1252 del_timer_sync(<pc_timer);
1253
1254 if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
1255
1256 if (dev_ltpc->irq)
1257 free_irq(dev_ltpc->irq, dev_ltpc);
1258
1259 if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
1260
1261 if (dev_ltpc->dma)
1262 free_dma(dev_ltpc->dma);
1263
1264 if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
1265
1266 if (dev_ltpc->base_addr)
1267 release_region(dev_ltpc->base_addr,8);
1268
1269 free_netdev(dev_ltpc);
1270
1271 if(debug & DEBUG_VERBOSE) printk("free_pages\n");
1272
1273 free_pages( (unsigned long) ltdmabuf, get_order(1000));
1274
1275 if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
1276}
1277
1278module_exit(ltpc_cleanup);
1279