1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87#include <linux/module.h>
88#include <linux/kernel.h>
89#include <linux/types.h>
90#include <linux/fcntl.h>
91#include <linux/interrupt.h>
92#include <linux/string.h>
93#include <linux/slab.h>
94#include <linux/if_ether.h>
95#include <linux/in.h>
96#include <linux/errno.h>
97#include <linux/delay.h>
98#include <linux/init.h>
99#include <linux/netdevice.h>
100#include <linux/etherdevice.h>
101#include <linux/inetdevice.h>
102#include <linux/skbuff.h>
103#include <linux/if_plip.h>
104#include <linux/workqueue.h>
105#include <linux/spinlock.h>
106#include <linux/completion.h>
107#include <linux/parport.h>
108#include <linux/bitops.h>
109
110#include <net/neighbour.h>
111
112#include <asm/irq.h>
113#include <asm/byteorder.h>
114
115
116#define PLIP_MAX 8
117
118
119#ifndef NET_DEBUG
120#define NET_DEBUG 1
121#endif
122static const unsigned int net_debug = NET_DEBUG;
123
124#define ENABLE(irq) if (irq != -1) enable_irq(irq)
125#define DISABLE(irq) if (irq != -1) disable_irq(irq)
126
127
128#define PLIP_DELAY_UNIT 1
129
130
131#define PLIP_TRIGGER_WAIT 500
132
133
134#define PLIP_NIBBLE_WAIT 3000
135
136
137static void plip_kick_bh(struct work_struct *work);
138static void plip_bh(struct work_struct *work);
139static void plip_timer_bh(struct work_struct *work);
140
141
142static void plip_interrupt(void *dev_id);
143
144
145static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
146static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
147 unsigned short type, const void *daddr,
148 const void *saddr, unsigned len);
149static int plip_hard_header_cache(const struct neighbour *neigh,
150 struct hh_cache *hh, __be16 type);
151static int plip_open(struct net_device *dev);
152static int plip_close(struct net_device *dev);
153static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
154static int plip_preempt(void *handle);
155static void plip_wakeup(void *handle);
156
157enum plip_connection_state {
158 PLIP_CN_NONE=0,
159 PLIP_CN_RECEIVE,
160 PLIP_CN_SEND,
161 PLIP_CN_CLOSING,
162 PLIP_CN_ERROR
163};
164
165enum plip_packet_state {
166 PLIP_PK_DONE=0,
167 PLIP_PK_TRIGGER,
168 PLIP_PK_LENGTH_LSB,
169 PLIP_PK_LENGTH_MSB,
170 PLIP_PK_DATA,
171 PLIP_PK_CHECKSUM
172};
173
174enum plip_nibble_state {
175 PLIP_NB_BEGIN,
176 PLIP_NB_1,
177 PLIP_NB_2,
178};
179
180struct plip_local {
181 enum plip_packet_state state;
182 enum plip_nibble_state nibble;
183 union {
184 struct {
185#if defined(__LITTLE_ENDIAN)
186 unsigned char lsb;
187 unsigned char msb;
188#elif defined(__BIG_ENDIAN)
189 unsigned char msb;
190 unsigned char lsb;
191#else
192#error "Please fix the endianness defines in <asm/byteorder.h>"
193#endif
194 } b;
195 unsigned short h;
196 } length;
197 unsigned short byte;
198 unsigned char checksum;
199 unsigned char data;
200 struct sk_buff *skb;
201};
202
203struct net_local {
204 struct net_device *dev;
205 struct work_struct immediate;
206 struct delayed_work deferred;
207 struct delayed_work timer;
208 struct plip_local snd_data;
209 struct plip_local rcv_data;
210 struct pardevice *pardev;
211 unsigned long trigger;
212 unsigned long nibble;
213 enum plip_connection_state connection;
214 unsigned short timeout_count;
215 int is_deferred;
216 int port_owner;
217 int should_relinquish;
218 spinlock_t lock;
219 atomic_t kill_timer;
220 struct completion killed_timer_cmp;
221};
222
223static inline void enable_parport_interrupts (struct net_device *dev)
224{
225 if (dev->irq != -1)
226 {
227 struct parport *port =
228 ((struct net_local *)netdev_priv(dev))->pardev->port;
229 port->ops->enable_irq (port);
230 }
231}
232
233static inline void disable_parport_interrupts (struct net_device *dev)
234{
235 if (dev->irq != -1)
236 {
237 struct parport *port =
238 ((struct net_local *)netdev_priv(dev))->pardev->port;
239 port->ops->disable_irq (port);
240 }
241}
242
243static inline void write_data (struct net_device *dev, unsigned char data)
244{
245 struct parport *port =
246 ((struct net_local *)netdev_priv(dev))->pardev->port;
247
248 port->ops->write_data (port, data);
249}
250
251static inline unsigned char read_status (struct net_device *dev)
252{
253 struct parport *port =
254 ((struct net_local *)netdev_priv(dev))->pardev->port;
255
256 return port->ops->read_status (port);
257}
258
259static const struct header_ops plip_header_ops = {
260 .create = plip_hard_header,
261 .cache = plip_hard_header_cache,
262};
263
264static const struct net_device_ops plip_netdev_ops = {
265 .ndo_open = plip_open,
266 .ndo_stop = plip_close,
267 .ndo_start_xmit = plip_tx_packet,
268 .ndo_do_ioctl = plip_ioctl,
269 .ndo_set_mac_address = eth_mac_addr,
270 .ndo_validate_addr = eth_validate_addr,
271};
272
273
274
275
276
277
278
279
280
281
282static void
283plip_init_netdev(struct net_device *dev)
284{
285 struct net_local *nl = netdev_priv(dev);
286
287
288 dev->tx_queue_len = 10;
289 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
290 memset(dev->dev_addr, 0xfc, ETH_ALEN);
291
292 dev->netdev_ops = &plip_netdev_ops;
293 dev->header_ops = &plip_header_ops;
294
295
296 nl->port_owner = 0;
297
298
299 nl->trigger = PLIP_TRIGGER_WAIT;
300 nl->nibble = PLIP_NIBBLE_WAIT;
301
302
303 INIT_WORK(&nl->immediate, plip_bh);
304 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
305
306 if (dev->irq == -1)
307 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
308
309 spin_lock_init(&nl->lock);
310}
311
312
313
314
315static void
316plip_kick_bh(struct work_struct *work)
317{
318 struct net_local *nl =
319 container_of(work, struct net_local, deferred.work);
320
321 if (nl->is_deferred)
322 schedule_work(&nl->immediate);
323}
324
325
326static int plip_none(struct net_device *, struct net_local *,
327 struct plip_local *, struct plip_local *);
328static int plip_receive_packet(struct net_device *, struct net_local *,
329 struct plip_local *, struct plip_local *);
330static int plip_send_packet(struct net_device *, struct net_local *,
331 struct plip_local *, struct plip_local *);
332static int plip_connection_close(struct net_device *, struct net_local *,
333 struct plip_local *, struct plip_local *);
334static int plip_error(struct net_device *, struct net_local *,
335 struct plip_local *, struct plip_local *);
336static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
337 struct plip_local *snd,
338 struct plip_local *rcv,
339 int error);
340
341#define OK 0
342#define TIMEOUT 1
343#define ERROR 2
344#define HS_TIMEOUT 3
345
346typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
347 struct plip_local *snd, struct plip_local *rcv);
348
349static const plip_func connection_state_table[] =
350{
351 plip_none,
352 plip_receive_packet,
353 plip_send_packet,
354 plip_connection_close,
355 plip_error
356};
357
358
359static void
360plip_bh(struct work_struct *work)
361{
362 struct net_local *nl = container_of(work, struct net_local, immediate);
363 struct plip_local *snd = &nl->snd_data;
364 struct plip_local *rcv = &nl->rcv_data;
365 plip_func f;
366 int r;
367
368 nl->is_deferred = 0;
369 f = connection_state_table[nl->connection];
370 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
371 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
372 nl->is_deferred = 1;
373 schedule_delayed_work(&nl->deferred, 1);
374 }
375}
376
377static void
378plip_timer_bh(struct work_struct *work)
379{
380 struct net_local *nl =
381 container_of(work, struct net_local, timer.work);
382
383 if (!(atomic_read (&nl->kill_timer))) {
384 plip_interrupt (nl->dev);
385
386 schedule_delayed_work(&nl->timer, 1);
387 }
388 else {
389 complete(&nl->killed_timer_cmp);
390 }
391}
392
393static int
394plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
395 struct plip_local *snd, struct plip_local *rcv,
396 int error)
397{
398 unsigned char c0;
399
400
401
402
403
404
405
406
407
408
409 spin_lock_irq(&nl->lock);
410 if (nl->connection == PLIP_CN_SEND) {
411
412 if (error != ERROR) {
413 nl->timeout_count++;
414 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
415 nl->timeout_count <= 3) {
416 spin_unlock_irq(&nl->lock);
417
418 return TIMEOUT;
419 }
420 c0 = read_status(dev);
421 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
422 dev->name, snd->state, c0);
423 } else
424 error = HS_TIMEOUT;
425 dev->stats.tx_errors++;
426 dev->stats.tx_aborted_errors++;
427 } else if (nl->connection == PLIP_CN_RECEIVE) {
428 if (rcv->state == PLIP_PK_TRIGGER) {
429
430 spin_unlock_irq(&nl->lock);
431 return OK;
432 }
433 if (error != ERROR) {
434 if (++nl->timeout_count <= 3) {
435 spin_unlock_irq(&nl->lock);
436
437 return TIMEOUT;
438 }
439 c0 = read_status(dev);
440 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
441 dev->name, rcv->state, c0);
442 }
443 dev->stats.rx_dropped++;
444 }
445 rcv->state = PLIP_PK_DONE;
446 if (rcv->skb) {
447 kfree_skb(rcv->skb);
448 rcv->skb = NULL;
449 }
450 snd->state = PLIP_PK_DONE;
451 if (snd->skb) {
452 dev_kfree_skb(snd->skb);
453 snd->skb = NULL;
454 }
455 spin_unlock_irq(&nl->lock);
456 if (error == HS_TIMEOUT) {
457 DISABLE(dev->irq);
458 synchronize_irq(dev->irq);
459 }
460 disable_parport_interrupts (dev);
461 netif_stop_queue (dev);
462 nl->connection = PLIP_CN_ERROR;
463 write_data (dev, 0x00);
464
465 return TIMEOUT;
466}
467
468static int
469plip_none(struct net_device *dev, struct net_local *nl,
470 struct plip_local *snd, struct plip_local *rcv)
471{
472 return OK;
473}
474
475
476
477static inline int
478plip_receive(unsigned short nibble_timeout, struct net_device *dev,
479 enum plip_nibble_state *ns_p, unsigned char *data_p)
480{
481 unsigned char c0, c1;
482 unsigned int cx;
483
484 switch (*ns_p) {
485 case PLIP_NB_BEGIN:
486 cx = nibble_timeout;
487 while (1) {
488 c0 = read_status(dev);
489 udelay(PLIP_DELAY_UNIT);
490 if ((c0 & 0x80) == 0) {
491 c1 = read_status(dev);
492 if (c0 == c1)
493 break;
494 }
495 if (--cx == 0)
496 return TIMEOUT;
497 }
498 *data_p = (c0 >> 3) & 0x0f;
499 write_data (dev, 0x10);
500 *ns_p = PLIP_NB_1;
501
502
503 case PLIP_NB_1:
504 cx = nibble_timeout;
505 while (1) {
506 c0 = read_status(dev);
507 udelay(PLIP_DELAY_UNIT);
508 if (c0 & 0x80) {
509 c1 = read_status(dev);
510 if (c0 == c1)
511 break;
512 }
513 if (--cx == 0)
514 return TIMEOUT;
515 }
516 *data_p |= (c0 << 1) & 0xf0;
517 write_data (dev, 0x00);
518 *ns_p = PLIP_NB_BEGIN;
519 case PLIP_NB_2:
520 break;
521 }
522 return OK;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
538{
539 struct ethhdr *eth;
540 unsigned char *rawp;
541
542 skb_reset_mac_header(skb);
543 skb_pull(skb,dev->hard_header_len);
544 eth = eth_hdr(skb);
545
546 if(is_multicast_ether_addr(eth->h_dest))
547 {
548 if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
549 skb->pkt_type=PACKET_BROADCAST;
550 else
551 skb->pkt_type=PACKET_MULTICAST;
552 }
553
554
555
556
557
558
559 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
560 return eth->h_proto;
561
562 rawp = skb->data;
563
564
565
566
567
568
569
570 if (*(unsigned short *)rawp == 0xFFFF)
571 return htons(ETH_P_802_3);
572
573
574
575
576 return htons(ETH_P_802_2);
577}
578
579
580static int
581plip_receive_packet(struct net_device *dev, struct net_local *nl,
582 struct plip_local *snd, struct plip_local *rcv)
583{
584 unsigned short nibble_timeout = nl->nibble;
585 unsigned char *lbuf;
586
587 switch (rcv->state) {
588 case PLIP_PK_TRIGGER:
589 DISABLE(dev->irq);
590
591 disable_parport_interrupts (dev);
592 write_data (dev, 0x01);
593 if (net_debug > 2)
594 printk(KERN_DEBUG "%s: receive start\n", dev->name);
595 rcv->state = PLIP_PK_LENGTH_LSB;
596 rcv->nibble = PLIP_NB_BEGIN;
597
598
599 case PLIP_PK_LENGTH_LSB:
600 if (snd->state != PLIP_PK_DONE) {
601 if (plip_receive(nl->trigger, dev,
602 &rcv->nibble, &rcv->length.b.lsb)) {
603
604 rcv->state = PLIP_PK_DONE;
605 nl->is_deferred = 1;
606 nl->connection = PLIP_CN_SEND;
607 schedule_delayed_work(&nl->deferred, 1);
608 enable_parport_interrupts (dev);
609 ENABLE(dev->irq);
610 return OK;
611 }
612 } else {
613 if (plip_receive(nibble_timeout, dev,
614 &rcv->nibble, &rcv->length.b.lsb))
615 return TIMEOUT;
616 }
617 rcv->state = PLIP_PK_LENGTH_MSB;
618
619
620 case PLIP_PK_LENGTH_MSB:
621 if (plip_receive(nibble_timeout, dev,
622 &rcv->nibble, &rcv->length.b.msb))
623 return TIMEOUT;
624 if (rcv->length.h > dev->mtu + dev->hard_header_len ||
625 rcv->length.h < 8) {
626 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
627 return ERROR;
628 }
629
630 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
631 if (rcv->skb == NULL) {
632 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
633 return ERROR;
634 }
635 skb_reserve(rcv->skb, 2);
636 skb_put(rcv->skb,rcv->length.h);
637 rcv->skb->dev = dev;
638 rcv->state = PLIP_PK_DATA;
639 rcv->byte = 0;
640 rcv->checksum = 0;
641
642
643 case PLIP_PK_DATA:
644 lbuf = rcv->skb->data;
645 do {
646 if (plip_receive(nibble_timeout, dev,
647 &rcv->nibble, &lbuf[rcv->byte]))
648 return TIMEOUT;
649 } while (++rcv->byte < rcv->length.h);
650 do {
651 rcv->checksum += lbuf[--rcv->byte];
652 } while (rcv->byte);
653 rcv->state = PLIP_PK_CHECKSUM;
654
655
656 case PLIP_PK_CHECKSUM:
657 if (plip_receive(nibble_timeout, dev,
658 &rcv->nibble, &rcv->data))
659 return TIMEOUT;
660 if (rcv->data != rcv->checksum) {
661 dev->stats.rx_crc_errors++;
662 if (net_debug)
663 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
664 return ERROR;
665 }
666 rcv->state = PLIP_PK_DONE;
667
668
669 case PLIP_PK_DONE:
670
671 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
672 netif_rx_ni(rcv->skb);
673 dev->stats.rx_bytes += rcv->length.h;
674 dev->stats.rx_packets++;
675 rcv->skb = NULL;
676 if (net_debug > 2)
677 printk(KERN_DEBUG "%s: receive end\n", dev->name);
678
679
680 write_data (dev, 0x00);
681 spin_lock_irq(&nl->lock);
682 if (snd->state != PLIP_PK_DONE) {
683 nl->connection = PLIP_CN_SEND;
684 spin_unlock_irq(&nl->lock);
685 schedule_work(&nl->immediate);
686 enable_parport_interrupts (dev);
687 ENABLE(dev->irq);
688 return OK;
689 } else {
690 nl->connection = PLIP_CN_NONE;
691 spin_unlock_irq(&nl->lock);
692 enable_parport_interrupts (dev);
693 ENABLE(dev->irq);
694 return OK;
695 }
696 }
697 return OK;
698}
699
700
701
702static inline int
703plip_send(unsigned short nibble_timeout, struct net_device *dev,
704 enum plip_nibble_state *ns_p, unsigned char data)
705{
706 unsigned char c0;
707 unsigned int cx;
708
709 switch (*ns_p) {
710 case PLIP_NB_BEGIN:
711 write_data (dev, data & 0x0f);
712 *ns_p = PLIP_NB_1;
713
714
715 case PLIP_NB_1:
716 write_data (dev, 0x10 | (data & 0x0f));
717 cx = nibble_timeout;
718 while (1) {
719 c0 = read_status(dev);
720 if ((c0 & 0x80) == 0)
721 break;
722 if (--cx == 0)
723 return TIMEOUT;
724 udelay(PLIP_DELAY_UNIT);
725 }
726 write_data (dev, 0x10 | (data >> 4));
727 *ns_p = PLIP_NB_2;
728
729
730 case PLIP_NB_2:
731 write_data (dev, (data >> 4));
732 cx = nibble_timeout;
733 while (1) {
734 c0 = read_status(dev);
735 if (c0 & 0x80)
736 break;
737 if (--cx == 0)
738 return TIMEOUT;
739 udelay(PLIP_DELAY_UNIT);
740 }
741 *ns_p = PLIP_NB_BEGIN;
742 return OK;
743 }
744 return OK;
745}
746
747
748static int
749plip_send_packet(struct net_device *dev, struct net_local *nl,
750 struct plip_local *snd, struct plip_local *rcv)
751{
752 unsigned short nibble_timeout = nl->nibble;
753 unsigned char *lbuf;
754 unsigned char c0;
755 unsigned int cx;
756
757 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
758 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
759 snd->state = PLIP_PK_DONE;
760 snd->skb = NULL;
761 return ERROR;
762 }
763
764 switch (snd->state) {
765 case PLIP_PK_TRIGGER:
766 if ((read_status(dev) & 0xf8) != 0x80)
767 return HS_TIMEOUT;
768
769
770 write_data (dev, 0x08);
771 cx = nl->trigger;
772 while (1) {
773 udelay(PLIP_DELAY_UNIT);
774 spin_lock_irq(&nl->lock);
775 if (nl->connection == PLIP_CN_RECEIVE) {
776 spin_unlock_irq(&nl->lock);
777
778 dev->stats.collisions++;
779 return OK;
780 }
781 c0 = read_status(dev);
782 if (c0 & 0x08) {
783 spin_unlock_irq(&nl->lock);
784 DISABLE(dev->irq);
785 synchronize_irq(dev->irq);
786 if (nl->connection == PLIP_CN_RECEIVE) {
787
788
789
790
791
792
793 ENABLE(dev->irq);
794 dev->stats.collisions++;
795 return OK;
796 }
797 disable_parport_interrupts (dev);
798 if (net_debug > 2)
799 printk(KERN_DEBUG "%s: send start\n", dev->name);
800 snd->state = PLIP_PK_LENGTH_LSB;
801 snd->nibble = PLIP_NB_BEGIN;
802 nl->timeout_count = 0;
803 break;
804 }
805 spin_unlock_irq(&nl->lock);
806 if (--cx == 0) {
807 write_data (dev, 0x00);
808 return HS_TIMEOUT;
809 }
810 }
811
812 case PLIP_PK_LENGTH_LSB:
813 if (plip_send(nibble_timeout, dev,
814 &snd->nibble, snd->length.b.lsb))
815 return TIMEOUT;
816 snd->state = PLIP_PK_LENGTH_MSB;
817
818
819 case PLIP_PK_LENGTH_MSB:
820 if (plip_send(nibble_timeout, dev,
821 &snd->nibble, snd->length.b.msb))
822 return TIMEOUT;
823 snd->state = PLIP_PK_DATA;
824 snd->byte = 0;
825 snd->checksum = 0;
826
827
828 case PLIP_PK_DATA:
829 do {
830 if (plip_send(nibble_timeout, dev,
831 &snd->nibble, lbuf[snd->byte]))
832 return TIMEOUT;
833 } while (++snd->byte < snd->length.h);
834 do {
835 snd->checksum += lbuf[--snd->byte];
836 } while (snd->byte);
837 snd->state = PLIP_PK_CHECKSUM;
838
839
840 case PLIP_PK_CHECKSUM:
841 if (plip_send(nibble_timeout, dev,
842 &snd->nibble, snd->checksum))
843 return TIMEOUT;
844
845 dev->stats.tx_bytes += snd->skb->len;
846 dev_kfree_skb(snd->skb);
847 dev->stats.tx_packets++;
848 snd->state = PLIP_PK_DONE;
849
850
851 case PLIP_PK_DONE:
852
853 write_data (dev, 0x00);
854 snd->skb = NULL;
855 if (net_debug > 2)
856 printk(KERN_DEBUG "%s: send end\n", dev->name);
857 nl->connection = PLIP_CN_CLOSING;
858 nl->is_deferred = 1;
859 schedule_delayed_work(&nl->deferred, 1);
860 enable_parport_interrupts (dev);
861 ENABLE(dev->irq);
862 return OK;
863 }
864 return OK;
865}
866
867static int
868plip_connection_close(struct net_device *dev, struct net_local *nl,
869 struct plip_local *snd, struct plip_local *rcv)
870{
871 spin_lock_irq(&nl->lock);
872 if (nl->connection == PLIP_CN_CLOSING) {
873 nl->connection = PLIP_CN_NONE;
874 netif_wake_queue (dev);
875 }
876 spin_unlock_irq(&nl->lock);
877 if (nl->should_relinquish) {
878 nl->should_relinquish = nl->port_owner = 0;
879 parport_release(nl->pardev);
880 }
881 return OK;
882}
883
884
885static int
886plip_error(struct net_device *dev, struct net_local *nl,
887 struct plip_local *snd, struct plip_local *rcv)
888{
889 unsigned char status;
890
891 status = read_status(dev);
892 if ((status & 0xf8) == 0x80) {
893 if (net_debug > 2)
894 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
895 nl->connection = PLIP_CN_NONE;
896 nl->should_relinquish = 0;
897 netif_start_queue (dev);
898 enable_parport_interrupts (dev);
899 ENABLE(dev->irq);
900 netif_wake_queue (dev);
901 } else {
902 nl->is_deferred = 1;
903 schedule_delayed_work(&nl->deferred, 1);
904 }
905
906 return OK;
907}
908
909
910static void
911plip_interrupt(void *dev_id)
912{
913 struct net_device *dev = dev_id;
914 struct net_local *nl;
915 struct plip_local *rcv;
916 unsigned char c0;
917 unsigned long flags;
918
919 nl = netdev_priv(dev);
920 rcv = &nl->rcv_data;
921
922 spin_lock_irqsave (&nl->lock, flags);
923
924 c0 = read_status(dev);
925 if ((c0 & 0xf8) != 0xc0) {
926 if ((dev->irq != -1) && (net_debug > 1))
927 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
928 spin_unlock_irqrestore (&nl->lock, flags);
929 return;
930 }
931
932 if (net_debug > 3)
933 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
934
935 switch (nl->connection) {
936 case PLIP_CN_CLOSING:
937 netif_wake_queue (dev);
938
939 case PLIP_CN_NONE:
940 case PLIP_CN_SEND:
941 rcv->state = PLIP_PK_TRIGGER;
942 nl->connection = PLIP_CN_RECEIVE;
943 nl->timeout_count = 0;
944 schedule_work(&nl->immediate);
945 break;
946
947 case PLIP_CN_RECEIVE:
948
949
950
951 break;
952
953 case PLIP_CN_ERROR:
954 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
955 break;
956 }
957
958 spin_unlock_irqrestore(&nl->lock, flags);
959}
960
961static int
962plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
963{
964 struct net_local *nl = netdev_priv(dev);
965 struct plip_local *snd = &nl->snd_data;
966
967 if (netif_queue_stopped(dev))
968 return NETDEV_TX_BUSY;
969
970
971 if (!nl->port_owner) {
972 if (parport_claim(nl->pardev))
973 return NETDEV_TX_BUSY;
974 nl->port_owner = 1;
975 }
976
977 netif_stop_queue (dev);
978
979 if (skb->len > dev->mtu + dev->hard_header_len) {
980 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
981 netif_start_queue (dev);
982 return NETDEV_TX_BUSY;
983 }
984
985 if (net_debug > 2)
986 printk(KERN_DEBUG "%s: send request\n", dev->name);
987
988 spin_lock_irq(&nl->lock);
989 snd->skb = skb;
990 snd->length.h = skb->len;
991 snd->state = PLIP_PK_TRIGGER;
992 if (nl->connection == PLIP_CN_NONE) {
993 nl->connection = PLIP_CN_SEND;
994 nl->timeout_count = 0;
995 }
996 schedule_work(&nl->immediate);
997 spin_unlock_irq(&nl->lock);
998
999 return NETDEV_TX_OK;
1000}
1001
1002static void
1003plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1004{
1005 const struct in_device *in_dev;
1006
1007 rcu_read_lock();
1008 in_dev = __in_dev_get_rcu(dev);
1009 if (in_dev) {
1010
1011 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1012 if (ifa) {
1013 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1014 memset(eth->h_dest, 0xfc, 2);
1015 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1016 }
1017 }
1018 rcu_read_unlock();
1019}
1020
1021static int
1022plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1023 unsigned short type, const void *daddr,
1024 const void *saddr, unsigned len)
1025{
1026 int ret;
1027
1028 ret = eth_header(skb, dev, type, daddr, saddr, len);
1029 if (ret >= 0)
1030 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1031
1032 return ret;
1033}
1034
1035static int plip_hard_header_cache(const struct neighbour *neigh,
1036 struct hh_cache *hh, __be16 type)
1037{
1038 int ret;
1039
1040 ret = eth_header_cache(neigh, hh, type);
1041 if (ret == 0) {
1042 struct ethhdr *eth;
1043
1044 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1045 HH_DATA_OFF(sizeof(*eth)));
1046 plip_rewrite_address (neigh->dev, eth);
1047 }
1048
1049 return ret;
1050}
1051
1052
1053
1054
1055
1056
1057
1058static int
1059plip_open(struct net_device *dev)
1060{
1061 struct net_local *nl = netdev_priv(dev);
1062 struct in_device *in_dev;
1063
1064
1065 if (!nl->port_owner) {
1066 if (parport_claim(nl->pardev)) return -EAGAIN;
1067 nl->port_owner = 1;
1068 }
1069
1070 nl->should_relinquish = 0;
1071
1072
1073 write_data (dev, 0x00);
1074
1075
1076 enable_parport_interrupts (dev);
1077 if (dev->irq == -1)
1078 {
1079 atomic_set (&nl->kill_timer, 0);
1080 schedule_delayed_work(&nl->timer, 1);
1081 }
1082
1083
1084 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1085 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1086 nl->connection = PLIP_CN_NONE;
1087 nl->is_deferred = 0;
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 in_dev=__in_dev_get_rtnl(dev);
1102 if (in_dev) {
1103
1104
1105
1106 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1107 if (ifa != NULL) {
1108 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1109 }
1110 }
1111
1112 netif_start_queue (dev);
1113
1114 return 0;
1115}
1116
1117
1118static int
1119plip_close(struct net_device *dev)
1120{
1121 struct net_local *nl = netdev_priv(dev);
1122 struct plip_local *snd = &nl->snd_data;
1123 struct plip_local *rcv = &nl->rcv_data;
1124
1125 netif_stop_queue (dev);
1126 DISABLE(dev->irq);
1127 synchronize_irq(dev->irq);
1128
1129 if (dev->irq == -1)
1130 {
1131 init_completion(&nl->killed_timer_cmp);
1132 atomic_set (&nl->kill_timer, 1);
1133 wait_for_completion(&nl->killed_timer_cmp);
1134 }
1135
1136#ifdef NOTDEF
1137 outb(0x00, PAR_DATA(dev));
1138#endif
1139 nl->is_deferred = 0;
1140 nl->connection = PLIP_CN_NONE;
1141 if (nl->port_owner) {
1142 parport_release(nl->pardev);
1143 nl->port_owner = 0;
1144 }
1145
1146 snd->state = PLIP_PK_DONE;
1147 if (snd->skb) {
1148 dev_kfree_skb(snd->skb);
1149 snd->skb = NULL;
1150 }
1151 rcv->state = PLIP_PK_DONE;
1152 if (rcv->skb) {
1153 kfree_skb(rcv->skb);
1154 rcv->skb = NULL;
1155 }
1156
1157#ifdef NOTDEF
1158
1159 outb(0x00, PAR_CONTROL(dev));
1160#endif
1161 return 0;
1162}
1163
1164static int
1165plip_preempt(void *handle)
1166{
1167 struct net_device *dev = (struct net_device *)handle;
1168 struct net_local *nl = netdev_priv(dev);
1169
1170
1171 if (nl->connection != PLIP_CN_NONE) {
1172 nl->should_relinquish = 1;
1173 return 1;
1174 }
1175
1176 nl->port_owner = 0;
1177 return 0;
1178}
1179
1180static void
1181plip_wakeup(void *handle)
1182{
1183 struct net_device *dev = (struct net_device *)handle;
1184 struct net_local *nl = netdev_priv(dev);
1185
1186 if (nl->port_owner) {
1187
1188 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1189 if (!parport_claim(nl->pardev))
1190
1191 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1192 else
1193 return;
1194 }
1195
1196 if (!(dev->flags & IFF_UP))
1197
1198 return;
1199
1200 if (!parport_claim(nl->pardev)) {
1201 nl->port_owner = 1;
1202
1203 write_data (dev, 0x00);
1204 }
1205}
1206
1207static int
1208plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1209{
1210 struct net_local *nl = netdev_priv(dev);
1211 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1212
1213 if (cmd != SIOCDEVPLIP)
1214 return -EOPNOTSUPP;
1215
1216 switch(pc->pcmd) {
1217 case PLIP_GET_TIMEOUT:
1218 pc->trigger = nl->trigger;
1219 pc->nibble = nl->nibble;
1220 break;
1221 case PLIP_SET_TIMEOUT:
1222 if(!capable(CAP_NET_ADMIN))
1223 return -EPERM;
1224 nl->trigger = pc->trigger;
1225 nl->nibble = pc->nibble;
1226 break;
1227 default:
1228 return -EOPNOTSUPP;
1229 }
1230 return 0;
1231}
1232
1233static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1234static int timid;
1235
1236module_param_array(parport, int, NULL, 0);
1237module_param(timid, int, 0);
1238MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1239
1240static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1241
1242static inline int
1243plip_searchfor(int list[], int a)
1244{
1245 int i;
1246 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1247 if (list[i] == a) return 1;
1248 }
1249 return 0;
1250}
1251
1252
1253
1254static void plip_attach (struct parport *port)
1255{
1256 static int unit;
1257 struct net_device *dev;
1258 struct net_local *nl;
1259 char name[IFNAMSIZ];
1260 struct pardev_cb plip_cb;
1261
1262 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1263 plip_searchfor(parport, port->number)) {
1264 if (unit == PLIP_MAX) {
1265 printk(KERN_ERR "plip: too many devices\n");
1266 return;
1267 }
1268
1269 sprintf(name, "plip%d", unit);
1270 dev = alloc_etherdev(sizeof(struct net_local));
1271 if (!dev)
1272 return;
1273
1274 strcpy(dev->name, name);
1275
1276 dev->irq = port->irq;
1277 dev->base_addr = port->base;
1278 if (port->irq == -1) {
1279 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1280 "which is fairly inefficient!\n", port->name);
1281 }
1282
1283 nl = netdev_priv(dev);
1284 nl->dev = dev;
1285
1286 memset(&plip_cb, 0, sizeof(plip_cb));
1287 plip_cb.private = dev;
1288 plip_cb.preempt = plip_preempt;
1289 plip_cb.wakeup = plip_wakeup;
1290 plip_cb.irq_func = plip_interrupt;
1291
1292 nl->pardev = parport_register_dev_model(port, dev->name,
1293 &plip_cb, unit);
1294
1295 if (!nl->pardev) {
1296 printk(KERN_ERR "%s: parport_register failed\n", name);
1297 goto err_free_dev;
1298 }
1299
1300 plip_init_netdev(dev);
1301
1302 if (register_netdev(dev)) {
1303 printk(KERN_ERR "%s: network register failed\n", name);
1304 goto err_parport_unregister;
1305 }
1306
1307 printk(KERN_INFO "%s", version);
1308 if (dev->irq != -1)
1309 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1310 "using IRQ %d.\n",
1311 dev->name, dev->base_addr, dev->irq);
1312 else
1313 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1314 "not using IRQ.\n",
1315 dev->name, dev->base_addr);
1316 dev_plip[unit++] = dev;
1317 }
1318 return;
1319
1320err_parport_unregister:
1321 parport_unregister_device(nl->pardev);
1322err_free_dev:
1323 free_netdev(dev);
1324}
1325
1326
1327
1328static void plip_detach (struct parport *port)
1329{
1330
1331}
1332
1333static int plip_probe(struct pardevice *par_dev)
1334{
1335 struct device_driver *drv = par_dev->dev.driver;
1336 int len = strlen(drv->name);
1337
1338 if (strncmp(par_dev->name, drv->name, len))
1339 return -ENODEV;
1340
1341 return 0;
1342}
1343
1344static struct parport_driver plip_driver = {
1345 .name = "plip",
1346 .probe = plip_probe,
1347 .match_port = plip_attach,
1348 .detach = plip_detach,
1349 .devmodel = true,
1350};
1351
1352static void __exit plip_cleanup_module (void)
1353{
1354 struct net_device *dev;
1355 int i;
1356
1357 for (i=0; i < PLIP_MAX; i++) {
1358 if ((dev = dev_plip[i])) {
1359 struct net_local *nl = netdev_priv(dev);
1360 unregister_netdev(dev);
1361 if (nl->port_owner)
1362 parport_release(nl->pardev);
1363 parport_unregister_device(nl->pardev);
1364 free_netdev(dev);
1365 dev_plip[i] = NULL;
1366 }
1367 }
1368
1369 parport_unregister_driver(&plip_driver);
1370}
1371
1372#ifndef MODULE
1373
1374static int parport_ptr;
1375
1376static int __init plip_setup(char *str)
1377{
1378 int ints[4];
1379
1380 str = get_options(str, ARRAY_SIZE(ints), ints);
1381
1382
1383 if (!strncmp(str, "parport", 7)) {
1384 int n = simple_strtoul(str+7, NULL, 10);
1385 if (parport_ptr < PLIP_MAX)
1386 parport[parport_ptr++] = n;
1387 else
1388 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1389 str);
1390 } else if (!strcmp(str, "timid")) {
1391 timid = 1;
1392 } else {
1393 if (ints[0] == 0 || ints[1] == 0) {
1394
1395 parport[0] = -2;
1396 } else {
1397 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1398 ints[1]);
1399 }
1400 }
1401 return 1;
1402}
1403
1404__setup("plip=", plip_setup);
1405
1406#endif
1407
1408static int __init plip_init (void)
1409{
1410 if (parport[0] == -2)
1411 return 0;
1412
1413 if (parport[0] != -1 && timid) {
1414 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1415 timid = 0;
1416 }
1417
1418 if (parport_register_driver (&plip_driver)) {
1419 printk (KERN_WARNING "plip: couldn't register driver\n");
1420 return 1;
1421 }
1422
1423 return 0;
1424}
1425
1426module_init(plip_init);
1427module_exit(plip_cleanup_module);
1428MODULE_LICENSE("GPL");
1429