1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200static int debug;
201#define DEBUG_VERBOSE 1
202#define DEBUG_UPPER 2
203#define DEBUG_LOWER 4
204
205static int io;
206static int irq;
207static int dma;
208
209#include <linux/module.h>
210#include <linux/kernel.h>
211#include <linux/types.h>
212#include <linux/fcntl.h>
213#include <linux/interrupt.h>
214#include <linux/ptrace.h>
215#include <linux/ioport.h>
216#include <linux/spinlock.h>
217#include <linux/in.h>
218#include <linux/string.h>
219#include <linux/errno.h>
220#include <linux/init.h>
221#include <linux/netdevice.h>
222#include <linux/etherdevice.h>
223#include <linux/skbuff.h>
224#include <linux/if_arp.h>
225#include <linux/if_ltalk.h>
226#include <linux/delay.h>
227#include <linux/timer.h>
228#include <linux/atalk.h>
229#include <linux/bitops.h>
230#include <linux/gfp.h>
231
232#include <asm/dma.h>
233#include <asm/io.h>
234
235
236#include "ltpc.h"
237
238static DEFINE_SPINLOCK(txqueue_lock);
239static DEFINE_SPINLOCK(mbox_lock);
240
241
242static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
243 void *dbuf, int dbuflen);
244static int sendup_buffer (struct net_device *dev);
245
246
247
248static unsigned long dma_mem_alloc(int size)
249{
250 int order = get_order(size);
251
252 return __get_dma_pages(GFP_KERNEL, order);
253}
254
255
256static unsigned char *ltdmabuf;
257static unsigned char *ltdmacbuf;
258
259
260
261struct ltpc_private
262{
263 struct atalk_addr my_addr;
264};
265
266
267
268struct xmitQel {
269 struct xmitQel *next;
270
271 unsigned char *cbuf;
272 short cbuflen;
273
274 unsigned char *dbuf;
275 short dbuflen;
276 unsigned char QWrite;
277 unsigned char mailbox;
278};
279
280
281
282static struct xmitQel *xmQhd, *xmQtl;
283
284static void enQ(struct xmitQel *qel)
285{
286 unsigned long flags;
287 qel->next = NULL;
288
289 spin_lock_irqsave(&txqueue_lock, flags);
290 if (xmQtl) {
291 xmQtl->next = qel;
292 } else {
293 xmQhd = qel;
294 }
295 xmQtl = qel;
296 spin_unlock_irqrestore(&txqueue_lock, flags);
297
298 if (debug & DEBUG_LOWER)
299 printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
300}
301
302static struct xmitQel *deQ(void)
303{
304 unsigned long flags;
305 int i;
306 struct xmitQel *qel=NULL;
307
308 spin_lock_irqsave(&txqueue_lock, flags);
309 if (xmQhd) {
310 qel = xmQhd;
311 xmQhd = qel->next;
312 if(!xmQhd) xmQtl = NULL;
313 }
314 spin_unlock_irqrestore(&txqueue_lock, flags);
315
316 if ((debug & DEBUG_LOWER) && qel) {
317 int n;
318 printk(KERN_DEBUG "ltpc: dequeued command ");
319 n = qel->cbuflen;
320 if (n>100) n=100;
321 for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
322 printk("\n");
323 }
324
325 return qel;
326}
327
328
329static struct xmitQel qels[16];
330
331
332static unsigned char mailbox[16];
333static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
334
335static int wait_timeout(struct net_device *dev, int c)
336{
337
338
339 int i;
340
341
342
343 for(i=0;i<200000;i++) {
344 if ( c != inb_p(dev->base_addr+6) ) return 0;
345 udelay(100);
346 }
347 return 1;
348}
349
350
351
352static int getmbox(void)
353{
354 unsigned long flags;
355 int i;
356
357 spin_lock_irqsave(&mbox_lock, flags);
358 for(i=1;i<16;i++) if(!mboxinuse[i]) {
359 mboxinuse[i]=1;
360 spin_unlock_irqrestore(&mbox_lock, flags);
361 return i;
362 }
363 spin_unlock_irqrestore(&mbox_lock, flags);
364 return 0;
365}
366
367
368static void handlefc(struct net_device *dev)
369{
370
371 int dma = dev->dma;
372 int base = dev->base_addr;
373 unsigned long flags;
374
375
376 flags=claim_dma_lock();
377 disable_dma(dma);
378 clear_dma_ff(dma);
379 set_dma_mode(dma,DMA_MODE_READ);
380 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
381 set_dma_count(dma,50);
382 enable_dma(dma);
383 release_dma_lock(flags);
384
385 inb_p(base+3);
386 inb_p(base+2);
387
388 if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
389}
390
391
392static void handlefd(struct net_device *dev)
393{
394 int dma = dev->dma;
395 int base = dev->base_addr;
396 unsigned long flags;
397
398 flags=claim_dma_lock();
399 disable_dma(dma);
400 clear_dma_ff(dma);
401 set_dma_mode(dma,DMA_MODE_READ);
402 set_dma_addr(dma,virt_to_bus(ltdmabuf));
403 set_dma_count(dma,800);
404 enable_dma(dma);
405 release_dma_lock(flags);
406
407 inb_p(base+3);
408 inb_p(base+2);
409
410 if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
411 sendup_buffer(dev);
412}
413
414static void handlewrite(struct net_device *dev)
415{
416
417
418 int dma = dev->dma;
419 int base = dev->base_addr;
420 unsigned long flags;
421
422 flags=claim_dma_lock();
423 disable_dma(dma);
424 clear_dma_ff(dma);
425 set_dma_mode(dma,DMA_MODE_WRITE);
426 set_dma_addr(dma,virt_to_bus(ltdmabuf));
427 set_dma_count(dma,800);
428 enable_dma(dma);
429 release_dma_lock(flags);
430
431 inb_p(base+3);
432 inb_p(base+2);
433
434 if ( wait_timeout(dev,0xfb) ) {
435 flags=claim_dma_lock();
436 printk("timed out in handlewrite, dma res %d\n",
437 get_dma_residue(dev->dma) );
438 release_dma_lock(flags);
439 }
440}
441
442static void handleread(struct net_device *dev)
443{
444
445
446 int dma = dev->dma;
447 int base = dev->base_addr;
448 unsigned long flags;
449
450
451 flags=claim_dma_lock();
452 disable_dma(dma);
453 clear_dma_ff(dma);
454 set_dma_mode(dma,DMA_MODE_READ);
455 set_dma_addr(dma,virt_to_bus(ltdmabuf));
456 set_dma_count(dma,800);
457 enable_dma(dma);
458 release_dma_lock(flags);
459
460 inb_p(base+3);
461 inb_p(base+2);
462 if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
463}
464
465static void handlecommand(struct net_device *dev)
466{
467
468 int dma = dev->dma;
469 int base = dev->base_addr;
470 unsigned long flags;
471
472 flags=claim_dma_lock();
473 disable_dma(dma);
474 clear_dma_ff(dma);
475 set_dma_mode(dma,DMA_MODE_WRITE);
476 set_dma_addr(dma,virt_to_bus(ltdmacbuf));
477 set_dma_count(dma,50);
478 enable_dma(dma);
479 release_dma_lock(flags);
480 inb_p(base+3);
481 inb_p(base+2);
482 if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
483}
484
485
486static unsigned char rescbuf[2] = {LT_GETRESULT,0};
487static unsigned char resdbuf[2];
488
489static int QInIdle;
490
491
492
493
494
495static void idle(struct net_device *dev)
496{
497 unsigned long flags;
498 int state;
499
500
501
502 struct xmitQel *q = NULL;
503 int oops;
504 int i;
505 int base = dev->base_addr;
506
507 spin_lock_irqsave(&txqueue_lock, flags);
508 if(QInIdle) {
509 spin_unlock_irqrestore(&txqueue_lock, flags);
510 return;
511 }
512 QInIdle = 1;
513 spin_unlock_irqrestore(&txqueue_lock, flags);
514
515
516 (void) inb_p(base+6);
517
518 oops = 100;
519
520loop:
521 if (0>oops--) {
522 printk("idle: looped too many times\n");
523 goto done;
524 }
525
526 state = inb_p(base+6);
527 if (state != inb_p(base+6)) goto loop;
528
529 switch(state) {
530 case 0xfc:
531
532 if (debug & DEBUG_LOWER) printk("idle: fc\n");
533 handlefc(dev);
534 break;
535 case 0xfd:
536
537 if(debug & DEBUG_LOWER) printk("idle: fd\n");
538 handlefd(dev);
539 break;
540 case 0xf9:
541
542 if (debug & DEBUG_LOWER) printk("idle: f9\n");
543 if(!mboxinuse[0]) {
544 mboxinuse[0] = 1;
545 qels[0].cbuf = rescbuf;
546 qels[0].cbuflen = 2;
547 qels[0].dbuf = resdbuf;
548 qels[0].dbuflen = 2;
549 qels[0].QWrite = 0;
550 qels[0].mailbox = 0;
551 enQ(&qels[0]);
552 }
553 inb_p(dev->base_addr+1);
554 inb_p(dev->base_addr+0);
555 if( wait_timeout(dev,0xf9) )
556 printk("timed out idle f9\n");
557 break;
558 case 0xf8:
559
560 if (xmQhd) {
561 inb_p(dev->base_addr+1);
562 inb_p(dev->base_addr+0);
563 if(wait_timeout(dev,0xf8) )
564 printk("timed out idle f8\n");
565 } else {
566 goto done;
567 }
568 break;
569 case 0xfa:
570
571 if(debug & DEBUG_LOWER) printk("idle: fa\n");
572 if (xmQhd) {
573 q=deQ();
574 memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
575 ltdmacbuf[1] = q->mailbox;
576 if (debug>1) {
577 int n;
578 printk("ltpc: sent command ");
579 n = q->cbuflen;
580 if (n>100) n=100;
581 for(i=0;i<n;i++)
582 printk("%02x ",ltdmacbuf[i]);
583 printk("\n");
584 }
585 handlecommand(dev);
586 if(0xfa==inb_p(base+6)) {
587
588 goto done;
589 }
590 } else {
591
592 if (!mboxinuse[0]) {
593 mboxinuse[0] = 1;
594 qels[0].cbuf = rescbuf;
595 qels[0].cbuflen = 2;
596 qels[0].dbuf = resdbuf;
597 qels[0].dbuflen = 2;
598 qels[0].QWrite = 0;
599 qels[0].mailbox = 0;
600 enQ(&qels[0]);
601 } else {
602 printk("trouble: response command already queued\n");
603 goto done;
604 }
605 }
606 break;
607 case 0Xfb:
608
609 if(debug & DEBUG_LOWER) printk("idle: fb\n");
610 if(q->QWrite) {
611 memcpy(ltdmabuf,q->dbuf,q->dbuflen);
612 handlewrite(dev);
613 } else {
614 handleread(dev);
615
616
617
618 if(q->mailbox) {
619 memcpy(q->dbuf,ltdmabuf,q->dbuflen);
620 } else {
621
622 mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
623 mboxinuse[0]=0;
624 }
625 }
626 break;
627 }
628 goto loop;
629
630done:
631 QInIdle=0;
632
633
634
635
636
637
638
639 if (dev->irq) {
640 inb_p(base+7);
641 inb_p(base+7);
642 }
643}
644
645
646static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
647 void *dbuf, int dbuflen)
648{
649
650 int i = getmbox();
651 int ret;
652
653 if(i) {
654 qels[i].cbuf = cbuf;
655 qels[i].cbuflen = cbuflen;
656 qels[i].dbuf = dbuf;
657 qels[i].dbuflen = dbuflen;
658 qels[i].QWrite = 1;
659 qels[i].mailbox = i;
660 enQ(&qels[i]);
661 idle(dev);
662 ret = mailbox[i];
663 mboxinuse[i]=0;
664 return ret;
665 }
666 printk("ltpc: could not allocate mbox\n");
667 return -1;
668}
669
670static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
671 void *dbuf, int dbuflen)
672{
673
674 int i = getmbox();
675 int ret;
676
677 if(i) {
678 qels[i].cbuf = cbuf;
679 qels[i].cbuflen = cbuflen;
680 qels[i].dbuf = dbuf;
681 qels[i].dbuflen = dbuflen;
682 qels[i].QWrite = 0;
683 qels[i].mailbox = i;
684 enQ(&qels[i]);
685 idle(dev);
686 ret = mailbox[i];
687 mboxinuse[i]=0;
688 return ret;
689 }
690 printk("ltpc: could not allocate mbox\n");
691 return -1;
692}
693
694
695
696static struct timer_list ltpc_timer;
697static struct net_device *ltpc_timer_dev;
698
699static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
700
701static int read_30 ( struct net_device *dev)
702{
703 lt_command c;
704 c.getflags.command = LT_GETFLAGS;
705 return do_read(dev, &c, sizeof(c.getflags),&c,0);
706}
707
708static int set_30 (struct net_device *dev,int x)
709{
710 lt_command c;
711 c.setflags.command = LT_SETFLAGS;
712 c.setflags.flags = x;
713 return do_write(dev, &c, sizeof(c.setflags),&c,0);
714}
715
716
717
718static int sendup_buffer (struct net_device *dev)
719{
720
721
722
723 int dnode, snode, llaptype, len;
724 int sklen;
725 struct sk_buff *skb;
726 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
727
728 if (ltc->command != LT_RCVLAP) {
729 printk("unknown command 0x%02x from ltpc card\n",ltc->command);
730 return -1;
731 }
732 dnode = ltc->dnode;
733 snode = ltc->snode;
734 llaptype = ltc->laptype;
735 len = ltc->length;
736
737 sklen = len;
738 if (llaptype == 1)
739 sklen += 8;
740 if(sklen > 800) {
741 printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
742 dev->name,sklen);
743 return -1;
744 }
745
746 if ( (llaptype==0) || (llaptype>2) ) {
747 printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
748 return -1;
749 }
750
751
752 skb = dev_alloc_skb(3+sklen);
753 if (skb == NULL)
754 {
755 printk("%s: dropping packet due to memory squeeze.\n",
756 dev->name);
757 return -1;
758 }
759 skb->dev = dev;
760
761 if (sklen > len)
762 skb_reserve(skb,8);
763 skb_put(skb,len+3);
764 skb->protocol = htons(ETH_P_LOCALTALK);
765
766 skb->data[0] = dnode;
767 skb->data[1] = snode;
768 skb->data[2] = llaptype;
769 skb_reset_mac_header(skb);
770 skb_pull(skb,3);
771
772
773 skb_copy_to_linear_data(skb, ltdmabuf, len);
774
775 skb_reset_transport_header(skb);
776
777 dev->stats.rx_packets++;
778 dev->stats.rx_bytes += skb->len;
779
780
781 netif_rx(skb);
782 return 0;
783}
784
785
786
787static irqreturn_t
788ltpc_interrupt(int irq, void *dev_id)
789{
790 struct net_device *dev = dev_id;
791
792 if (dev==NULL) {
793 printk("ltpc_interrupt: unknown device.\n");
794 return IRQ_NONE;
795 }
796
797 inb_p(dev->base_addr+6);
798
799 idle(dev);
800
801
802
803 return IRQ_HANDLED;
804}
805
806
807
808
809
810
811
812
813
814
815
816
817static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
818{
819 struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
820
821 struct ltpc_private *ltpc_priv = netdev_priv(dev);
822 struct atalk_addr *aa = <pc_priv->my_addr;
823 struct lt_init c;
824 int ltflags;
825
826 if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
827
828 switch(cmd) {
829 case SIOCSIFADDR:
830
831 aa->s_net = sa->sat_addr.s_net;
832
833
834 c.command = LT_INIT;
835 c.hint = sa->sat_addr.s_node;
836
837 aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
838
839
840 ltflags = read_30(dev);
841 ltflags |= LT_FLAG_ALLLAP;
842 set_30 (dev,ltflags);
843
844 dev->broadcast[0] = 0xFF;
845 dev->dev_addr[0] = aa->s_node;
846
847 dev->addr_len=1;
848
849 return 0;
850
851 case SIOCGIFADDR:
852
853 sa->sat_addr.s_net = aa->s_net;
854 sa->sat_addr.s_node = aa->s_node;
855
856 return 0;
857
858 default:
859 return -EINVAL;
860 }
861}
862
863static void set_multicast_list(struct net_device *dev)
864{
865
866
867}
868
869static int ltpc_poll_counter;
870
871static void ltpc_poll(struct timer_list *unused)
872{
873 del_timer(<pc_timer);
874
875 if(debug & DEBUG_VERBOSE) {
876 if (!ltpc_poll_counter) {
877 ltpc_poll_counter = 50;
878 printk("ltpc poll is alive\n");
879 }
880 ltpc_poll_counter--;
881 }
882
883
884 idle(ltpc_timer_dev);
885 ltpc_timer.expires = jiffies + HZ/20;
886 add_timer(<pc_timer);
887}
888
889
890
891static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
892{
893
894
895
896 int i;
897 struct lt_sendlap cbuf;
898 unsigned char *hdr;
899
900 cbuf.command = LT_SENDLAP;
901 cbuf.dnode = skb->data[0];
902 cbuf.laptype = skb->data[2];
903 skb_pull(skb,3);
904 cbuf.length = skb->len;
905 skb_reset_transport_header(skb);
906
907 if(debug & DEBUG_UPPER) {
908 printk("command ");
909 for(i=0;i<6;i++)
910 printk("%02x ",((unsigned char *)&cbuf)[i]);
911 printk("\n");
912 }
913
914 hdr = skb_transport_header(skb);
915 do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
916
917 if(debug & DEBUG_UPPER) {
918 printk("sent %d ddp bytes\n",skb->len);
919 for (i = 0; i < skb->len; i++)
920 printk("%02x ", hdr[i]);
921 printk("\n");
922 }
923
924 dev->stats.tx_packets++;
925 dev->stats.tx_bytes += skb->len;
926
927 dev_kfree_skb(skb);
928 return NETDEV_TX_OK;
929}
930
931
932
933static int __init ltpc_probe_dma(int base, int dma)
934{
935 int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
936 unsigned long timeout;
937 unsigned long f;
938
939 if (want & 1) {
940 if (request_dma(1,"ltpc")) {
941 want &= ~1;
942 } else {
943 f=claim_dma_lock();
944 disable_dma(1);
945 clear_dma_ff(1);
946 set_dma_mode(1,DMA_MODE_WRITE);
947 set_dma_addr(1,virt_to_bus(ltdmabuf));
948 set_dma_count(1,sizeof(struct lt_mem));
949 enable_dma(1);
950 release_dma_lock(f);
951 }
952 }
953 if (want & 2) {
954 if (request_dma(3,"ltpc")) {
955 want &= ~2;
956 } else {
957 f=claim_dma_lock();
958 disable_dma(3);
959 clear_dma_ff(3);
960 set_dma_mode(3,DMA_MODE_WRITE);
961 set_dma_addr(3,virt_to_bus(ltdmabuf));
962 set_dma_count(3,sizeof(struct lt_mem));
963 enable_dma(3);
964 release_dma_lock(f);
965 }
966 }
967
968
969
970
971 ltdmabuf[0] = LT_READMEM;
972 ltdmabuf[1] = 1;
973 ltdmabuf[2] = 0; ltdmabuf[3] = 0;
974 ltdmabuf[4] = 0; ltdmabuf[5] = 1;
975 ltdmabuf[6] = 0;
976
977 inb_p(io+1);
978 inb_p(io+0);
979 timeout = jiffies+100*HZ/100;
980 while(time_before(jiffies, timeout)) {
981 if ( 0xfa == inb_p(io+6) ) break;
982 }
983
984 inb_p(io+3);
985 inb_p(io+2);
986 while(time_before(jiffies, timeout)) {
987 if ( 0xfb == inb_p(io+6) ) break;
988 }
989
990
991
992 if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
993 want &= ~2;
994 free_dma(3);
995 }
996
997 if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
998 want &= ~1;
999 free_dma(1);
1000 }
1001
1002 if (!want)
1003 return 0;
1004
1005 return (want & 2) ? 3 : 1;
1006}
1007
1008static const struct net_device_ops ltpc_netdev = {
1009 .ndo_start_xmit = ltpc_xmit,
1010 .ndo_do_ioctl = ltpc_ioctl,
1011 .ndo_set_rx_mode = set_multicast_list,
1012};
1013
1014struct net_device * __init ltpc_probe(void)
1015{
1016 struct net_device *dev;
1017 int err = -ENOMEM;
1018 int x=0,y=0;
1019 int autoirq;
1020 unsigned long f;
1021 unsigned long timeout;
1022
1023 dev = alloc_ltalkdev(sizeof(struct ltpc_private));
1024 if (!dev)
1025 goto out;
1026
1027
1028
1029 if (io != 0x240 && request_region(0x220,8,"ltpc")) {
1030 x = inb_p(0x220+6);
1031 if ( (x!=0xff) && (x>=0xf0) ) {
1032 io = 0x220;
1033 goto got_port;
1034 }
1035 release_region(0x220,8);
1036 }
1037 if (io != 0x220 && request_region(0x240,8,"ltpc")) {
1038 y = inb_p(0x240+6);
1039 if ( (y!=0xff) && (y>=0xf0) ){
1040 io = 0x240;
1041 goto got_port;
1042 }
1043 release_region(0x240,8);
1044 }
1045
1046
1047 printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
1048 err = -ENODEV;
1049 goto out1;
1050
1051 got_port:
1052
1053 if (irq < 2) {
1054 unsigned long irq_mask;
1055
1056 irq_mask = probe_irq_on();
1057
1058 inb_p(io+7);
1059 inb_p(io+7);
1060
1061 inb_p(io+6);
1062 mdelay(2);
1063 autoirq = probe_irq_off(irq_mask);
1064
1065 if (autoirq == 0) {
1066 printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
1067 } else {
1068 irq = autoirq;
1069 }
1070 }
1071
1072
1073 ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
1074 if (!ltdmabuf) {
1075 printk(KERN_ERR "ltpc: mem alloc failed\n");
1076 err = -ENOMEM;
1077 goto out2;
1078 }
1079
1080 ltdmacbuf = <dmabuf[800];
1081
1082 if(debug & DEBUG_VERBOSE) {
1083 printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
1084 }
1085
1086
1087
1088 inb_p(io+1);
1089 inb_p(io+3);
1090
1091 msleep(20);
1092
1093 inb_p(io+0);
1094 inb_p(io+2);
1095 inb_p(io+7);
1096 inb_p(io+4);
1097 inb_p(io+5);
1098 inb_p(io+5);
1099 inb_p(io+6);
1100
1101 ssleep(1);
1102
1103
1104
1105
1106
1107 dma = ltpc_probe_dma(io, dma);
1108 if (!dma) {
1109 printk(KERN_ERR "No DMA channel found on ltpc card.\n");
1110 err = -ENODEV;
1111 goto out3;
1112 }
1113
1114
1115 if(irq)
1116 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
1117 else
1118 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
1119
1120 dev->netdev_ops = <pc_netdev;
1121 dev->base_addr = io;
1122 dev->irq = irq;
1123 dev->dma = dma;
1124
1125
1126
1127
1128
1129 f=claim_dma_lock();
1130 disable_dma(dma);
1131 clear_dma_ff(dma);
1132 set_dma_mode(dma,DMA_MODE_READ);
1133 set_dma_addr(dma,virt_to_bus(ltdmabuf));
1134 set_dma_count(dma,0x100);
1135 enable_dma(dma);
1136 release_dma_lock(f);
1137
1138 (void) inb_p(io+3);
1139 (void) inb_p(io+2);
1140 timeout = jiffies+100*HZ/100;
1141
1142 while(time_before(jiffies, timeout)) {
1143 if( 0xf9 == inb_p(io+6))
1144 break;
1145 schedule();
1146 }
1147
1148 if(debug & DEBUG_VERBOSE) {
1149 printk("setting up timer and irq\n");
1150 }
1151
1152
1153 if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
1154 {
1155 (void) inb_p(io+7);
1156 (void) inb_p(io+7);
1157 } else {
1158 if( irq )
1159 printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
1160 dev->irq = 0;
1161
1162
1163 ltpc_timer_dev = dev;
1164 timer_setup(<pc_timer, ltpc_poll, 0);
1165
1166 ltpc_timer.expires = jiffies + HZ/20;
1167 add_timer(<pc_timer);
1168 }
1169 err = register_netdev(dev);
1170 if (err)
1171 goto out4;
1172
1173 return NULL;
1174out4:
1175 del_timer_sync(<pc_timer);
1176 if (dev->irq)
1177 free_irq(dev->irq, dev);
1178out3:
1179 free_pages((unsigned long)ltdmabuf, get_order(1000));
1180out2:
1181 release_region(io, 8);
1182out1:
1183 free_netdev(dev);
1184out:
1185 return ERR_PTR(err);
1186}
1187
1188#ifndef MODULE
1189
1190static int __init ltpc_setup(char *str)
1191{
1192 int ints[5];
1193
1194 str = get_options(str, ARRAY_SIZE(ints), ints);
1195
1196 if (ints[0] == 0) {
1197 if (str && !strncmp(str, "auto", 4)) {
1198
1199 }
1200 else {
1201
1202 printk (KERN_ERR
1203 "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
1204 return 0;
1205 }
1206 } else {
1207 io = ints[1];
1208 if (ints[0] > 1) {
1209 irq = ints[2];
1210 }
1211 if (ints[0] > 2) {
1212 dma = ints[3];
1213 }
1214
1215 }
1216 return 1;
1217}
1218
1219__setup("ltpc=", ltpc_setup);
1220#endif
1221
1222static struct net_device *dev_ltpc;
1223
1224#ifdef MODULE
1225
1226MODULE_LICENSE("GPL");
1227module_param(debug, int, 0);
1228module_param_hw(io, int, ioport, 0);
1229module_param_hw(irq, int, irq, 0);
1230module_param_hw(dma, int, dma, 0);
1231
1232
1233static int __init ltpc_module_init(void)
1234{
1235 if(io == 0)
1236 printk(KERN_NOTICE
1237 "ltpc: Autoprobing is not recommended for modules\n");
1238
1239 dev_ltpc = ltpc_probe();
1240 return PTR_ERR_OR_ZERO(dev_ltpc);
1241}
1242module_init(ltpc_module_init);
1243#endif
1244
1245static void __exit ltpc_cleanup(void)
1246{
1247
1248 if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
1249 unregister_netdev(dev_ltpc);
1250
1251 del_timer_sync(<pc_timer);
1252
1253 if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
1254
1255 if (dev_ltpc->irq)
1256 free_irq(dev_ltpc->irq, dev_ltpc);
1257
1258 if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
1259
1260 if (dev_ltpc->dma)
1261 free_dma(dev_ltpc->dma);
1262
1263 if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
1264
1265 if (dev_ltpc->base_addr)
1266 release_region(dev_ltpc->base_addr,8);
1267
1268 free_netdev(dev_ltpc);
1269
1270 if(debug & DEBUG_VERBOSE) printk("free_pages\n");
1271
1272 free_pages( (unsigned long) ltdmabuf, get_order(1000));
1273
1274 if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
1275}
1276
1277module_exit(ltpc_cleanup);
1278