1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87#include <linux/compat.h>
88#include <linux/module.h>
89#include <linux/kernel.h>
90#include <linux/types.h>
91#include <linux/fcntl.h>
92#include <linux/interrupt.h>
93#include <linux/string.h>
94#include <linux/slab.h>
95#include <linux/if_ether.h>
96#include <linux/in.h>
97#include <linux/errno.h>
98#include <linux/delay.h>
99#include <linux/init.h>
100#include <linux/netdevice.h>
101#include <linux/etherdevice.h>
102#include <linux/inetdevice.h>
103#include <linux/skbuff.h>
104#include <linux/if_plip.h>
105#include <linux/workqueue.h>
106#include <linux/spinlock.h>
107#include <linux/completion.h>
108#include <linux/parport.h>
109#include <linux/bitops.h>
110
111#include <net/neighbour.h>
112
113#include <asm/irq.h>
114#include <asm/byteorder.h>
115
116
117#define PLIP_MAX 8
118
119
120#ifndef NET_DEBUG
121#define NET_DEBUG 1
122#endif
123static const unsigned int net_debug = NET_DEBUG;
124
125#define ENABLE(irq) if (irq != -1) enable_irq(irq)
126#define DISABLE(irq) if (irq != -1) disable_irq(irq)
127
128
129#define PLIP_DELAY_UNIT 1
130
131
132#define PLIP_TRIGGER_WAIT 500
133
134
135#define PLIP_NIBBLE_WAIT 3000
136
137
138static void plip_kick_bh(struct work_struct *work);
139static void plip_bh(struct work_struct *work);
140static void plip_timer_bh(struct work_struct *work);
141
142
143static void plip_interrupt(void *dev_id);
144
145
146static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
147static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
148 unsigned short type, const void *daddr,
149 const void *saddr, unsigned len);
150static int plip_hard_header_cache(const struct neighbour *neigh,
151 struct hh_cache *hh, __be16 type);
152static int plip_open(struct net_device *dev);
153static int plip_close(struct net_device *dev);
154static int plip_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
155 void __user *data, int cmd);
156static int plip_preempt(void *handle);
157static void plip_wakeup(void *handle);
158
159enum plip_connection_state {
160 PLIP_CN_NONE=0,
161 PLIP_CN_RECEIVE,
162 PLIP_CN_SEND,
163 PLIP_CN_CLOSING,
164 PLIP_CN_ERROR
165};
166
167enum plip_packet_state {
168 PLIP_PK_DONE=0,
169 PLIP_PK_TRIGGER,
170 PLIP_PK_LENGTH_LSB,
171 PLIP_PK_LENGTH_MSB,
172 PLIP_PK_DATA,
173 PLIP_PK_CHECKSUM
174};
175
176enum plip_nibble_state {
177 PLIP_NB_BEGIN,
178 PLIP_NB_1,
179 PLIP_NB_2,
180};
181
182struct plip_local {
183 enum plip_packet_state state;
184 enum plip_nibble_state nibble;
185 union {
186 struct {
187#if defined(__LITTLE_ENDIAN)
188 unsigned char lsb;
189 unsigned char msb;
190#elif defined(__BIG_ENDIAN)
191 unsigned char msb;
192 unsigned char lsb;
193#else
194#error "Please fix the endianness defines in <asm/byteorder.h>"
195#endif
196 } b;
197 unsigned short h;
198 } length;
199 unsigned short byte;
200 unsigned char checksum;
201 unsigned char data;
202 struct sk_buff *skb;
203};
204
205struct net_local {
206 struct net_device *dev;
207 struct work_struct immediate;
208 struct delayed_work deferred;
209 struct delayed_work timer;
210 struct plip_local snd_data;
211 struct plip_local rcv_data;
212 struct pardevice *pardev;
213 unsigned long trigger;
214 unsigned long nibble;
215 enum plip_connection_state connection;
216 unsigned short timeout_count;
217 int is_deferred;
218 int port_owner;
219 int should_relinquish;
220 spinlock_t lock;
221 atomic_t kill_timer;
222 struct completion killed_timer_cmp;
223};
224
225static inline void enable_parport_interrupts (struct net_device *dev)
226{
227 if (dev->irq != -1)
228 {
229 struct parport *port =
230 ((struct net_local *)netdev_priv(dev))->pardev->port;
231 port->ops->enable_irq (port);
232 }
233}
234
235static inline void disable_parport_interrupts (struct net_device *dev)
236{
237 if (dev->irq != -1)
238 {
239 struct parport *port =
240 ((struct net_local *)netdev_priv(dev))->pardev->port;
241 port->ops->disable_irq (port);
242 }
243}
244
245static inline void write_data (struct net_device *dev, unsigned char data)
246{
247 struct parport *port =
248 ((struct net_local *)netdev_priv(dev))->pardev->port;
249
250 port->ops->write_data (port, data);
251}
252
253static inline unsigned char read_status (struct net_device *dev)
254{
255 struct parport *port =
256 ((struct net_local *)netdev_priv(dev))->pardev->port;
257
258 return port->ops->read_status (port);
259}
260
261static const struct header_ops plip_header_ops = {
262 .create = plip_hard_header,
263 .cache = plip_hard_header_cache,
264};
265
266static const struct net_device_ops plip_netdev_ops = {
267 .ndo_open = plip_open,
268 .ndo_stop = plip_close,
269 .ndo_start_xmit = plip_tx_packet,
270 .ndo_siocdevprivate = plip_siocdevprivate,
271 .ndo_set_mac_address = eth_mac_addr,
272 .ndo_validate_addr = eth_validate_addr,
273};
274
275
276
277
278
279
280
281
282
283
284static void
285plip_init_netdev(struct net_device *dev)
286{
287 static const u8 addr_init[ETH_ALEN] = {
288 0xfc, 0xfc, 0xfc,
289 0xfc, 0xfc, 0xfc,
290 };
291 struct net_local *nl = netdev_priv(dev);
292
293
294 dev->tx_queue_len = 10;
295 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
296 eth_hw_addr_set(dev, addr_init);
297
298 dev->netdev_ops = &plip_netdev_ops;
299 dev->header_ops = &plip_header_ops;
300
301
302 nl->port_owner = 0;
303
304
305 nl->trigger = PLIP_TRIGGER_WAIT;
306 nl->nibble = PLIP_NIBBLE_WAIT;
307
308
309 INIT_WORK(&nl->immediate, plip_bh);
310 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
311
312 if (dev->irq == -1)
313 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
314
315 spin_lock_init(&nl->lock);
316}
317
318
319
320
321static void
322plip_kick_bh(struct work_struct *work)
323{
324 struct net_local *nl =
325 container_of(work, struct net_local, deferred.work);
326
327 if (nl->is_deferred)
328 schedule_work(&nl->immediate);
329}
330
331
332static int plip_none(struct net_device *, struct net_local *,
333 struct plip_local *, struct plip_local *);
334static int plip_receive_packet(struct net_device *, struct net_local *,
335 struct plip_local *, struct plip_local *);
336static int plip_send_packet(struct net_device *, struct net_local *,
337 struct plip_local *, struct plip_local *);
338static int plip_connection_close(struct net_device *, struct net_local *,
339 struct plip_local *, struct plip_local *);
340static int plip_error(struct net_device *, struct net_local *,
341 struct plip_local *, struct plip_local *);
342static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
343 struct plip_local *snd,
344 struct plip_local *rcv,
345 int error);
346
347#define OK 0
348#define TIMEOUT 1
349#define ERROR 2
350#define HS_TIMEOUT 3
351
352typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
353 struct plip_local *snd, struct plip_local *rcv);
354
355static const plip_func connection_state_table[] =
356{
357 plip_none,
358 plip_receive_packet,
359 plip_send_packet,
360 plip_connection_close,
361 plip_error
362};
363
364
365static void
366plip_bh(struct work_struct *work)
367{
368 struct net_local *nl = container_of(work, struct net_local, immediate);
369 struct plip_local *snd = &nl->snd_data;
370 struct plip_local *rcv = &nl->rcv_data;
371 plip_func f;
372 int r;
373
374 nl->is_deferred = 0;
375 f = connection_state_table[nl->connection];
376 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
377 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
378 nl->is_deferred = 1;
379 schedule_delayed_work(&nl->deferred, 1);
380 }
381}
382
383static void
384plip_timer_bh(struct work_struct *work)
385{
386 struct net_local *nl =
387 container_of(work, struct net_local, timer.work);
388
389 if (!(atomic_read (&nl->kill_timer))) {
390 plip_interrupt (nl->dev);
391
392 schedule_delayed_work(&nl->timer, 1);
393 }
394 else {
395 complete(&nl->killed_timer_cmp);
396 }
397}
398
399static int
400plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
401 struct plip_local *snd, struct plip_local *rcv,
402 int error)
403{
404 unsigned char c0;
405
406
407
408
409
410
411
412
413
414
415 spin_lock_irq(&nl->lock);
416 if (nl->connection == PLIP_CN_SEND) {
417
418 if (error != ERROR) {
419 nl->timeout_count++;
420 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
421 nl->timeout_count <= 3) {
422 spin_unlock_irq(&nl->lock);
423
424 return TIMEOUT;
425 }
426 c0 = read_status(dev);
427 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
428 dev->name, snd->state, c0);
429 } else
430 error = HS_TIMEOUT;
431 dev->stats.tx_errors++;
432 dev->stats.tx_aborted_errors++;
433 } else if (nl->connection == PLIP_CN_RECEIVE) {
434 if (rcv->state == PLIP_PK_TRIGGER) {
435
436 spin_unlock_irq(&nl->lock);
437 return OK;
438 }
439 if (error != ERROR) {
440 if (++nl->timeout_count <= 3) {
441 spin_unlock_irq(&nl->lock);
442
443 return TIMEOUT;
444 }
445 c0 = read_status(dev);
446 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
447 dev->name, rcv->state, c0);
448 }
449 dev->stats.rx_dropped++;
450 }
451 rcv->state = PLIP_PK_DONE;
452 if (rcv->skb) {
453 kfree_skb(rcv->skb);
454 rcv->skb = NULL;
455 }
456 snd->state = PLIP_PK_DONE;
457 if (snd->skb) {
458 dev_kfree_skb(snd->skb);
459 snd->skb = NULL;
460 }
461 spin_unlock_irq(&nl->lock);
462 if (error == HS_TIMEOUT) {
463 DISABLE(dev->irq);
464 synchronize_irq(dev->irq);
465 }
466 disable_parport_interrupts (dev);
467 netif_stop_queue (dev);
468 nl->connection = PLIP_CN_ERROR;
469 write_data (dev, 0x00);
470
471 return TIMEOUT;
472}
473
474static int
475plip_none(struct net_device *dev, struct net_local *nl,
476 struct plip_local *snd, struct plip_local *rcv)
477{
478 return OK;
479}
480
481
482
483static inline int
484plip_receive(unsigned short nibble_timeout, struct net_device *dev,
485 enum plip_nibble_state *ns_p, unsigned char *data_p)
486{
487 unsigned char c0, c1;
488 unsigned int cx;
489
490 switch (*ns_p) {
491 case PLIP_NB_BEGIN:
492 cx = nibble_timeout;
493 while (1) {
494 c0 = read_status(dev);
495 udelay(PLIP_DELAY_UNIT);
496 if ((c0 & 0x80) == 0) {
497 c1 = read_status(dev);
498 if (c0 == c1)
499 break;
500 }
501 if (--cx == 0)
502 return TIMEOUT;
503 }
504 *data_p = (c0 >> 3) & 0x0f;
505 write_data (dev, 0x10);
506 *ns_p = PLIP_NB_1;
507 fallthrough;
508
509 case PLIP_NB_1:
510 cx = nibble_timeout;
511 while (1) {
512 c0 = read_status(dev);
513 udelay(PLIP_DELAY_UNIT);
514 if (c0 & 0x80) {
515 c1 = read_status(dev);
516 if (c0 == c1)
517 break;
518 }
519 if (--cx == 0)
520 return TIMEOUT;
521 }
522 *data_p |= (c0 << 1) & 0xf0;
523 write_data (dev, 0x00);
524 *ns_p = PLIP_NB_BEGIN;
525 break;
526 case PLIP_NB_2:
527 break;
528 }
529 return OK;
530}
531
532
533
534
535
536
537
538
539
540
541
542
543
544static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
545{
546 struct ethhdr *eth;
547 unsigned char *rawp;
548
549 skb_reset_mac_header(skb);
550 skb_pull(skb,dev->hard_header_len);
551 eth = eth_hdr(skb);
552
553 if(is_multicast_ether_addr(eth->h_dest))
554 {
555 if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
556 skb->pkt_type=PACKET_BROADCAST;
557 else
558 skb->pkt_type=PACKET_MULTICAST;
559 }
560
561
562
563
564
565
566 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
567 return eth->h_proto;
568
569 rawp = skb->data;
570
571
572
573
574
575
576
577 if (*(unsigned short *)rawp == 0xFFFF)
578 return htons(ETH_P_802_3);
579
580
581
582
583 return htons(ETH_P_802_2);
584}
585
586
587static int
588plip_receive_packet(struct net_device *dev, struct net_local *nl,
589 struct plip_local *snd, struct plip_local *rcv)
590{
591 unsigned short nibble_timeout = nl->nibble;
592 unsigned char *lbuf;
593
594 switch (rcv->state) {
595 case PLIP_PK_TRIGGER:
596 DISABLE(dev->irq);
597
598 disable_parport_interrupts (dev);
599 write_data (dev, 0x01);
600 if (net_debug > 2)
601 printk(KERN_DEBUG "%s: receive start\n", dev->name);
602 rcv->state = PLIP_PK_LENGTH_LSB;
603 rcv->nibble = PLIP_NB_BEGIN;
604 fallthrough;
605
606 case PLIP_PK_LENGTH_LSB:
607 if (snd->state != PLIP_PK_DONE) {
608 if (plip_receive(nl->trigger, dev,
609 &rcv->nibble, &rcv->length.b.lsb)) {
610
611 rcv->state = PLIP_PK_DONE;
612 nl->is_deferred = 1;
613 nl->connection = PLIP_CN_SEND;
614 schedule_delayed_work(&nl->deferred, 1);
615 enable_parport_interrupts (dev);
616 ENABLE(dev->irq);
617 return OK;
618 }
619 } else {
620 if (plip_receive(nibble_timeout, dev,
621 &rcv->nibble, &rcv->length.b.lsb))
622 return TIMEOUT;
623 }
624 rcv->state = PLIP_PK_LENGTH_MSB;
625 fallthrough;
626
627 case PLIP_PK_LENGTH_MSB:
628 if (plip_receive(nibble_timeout, dev,
629 &rcv->nibble, &rcv->length.b.msb))
630 return TIMEOUT;
631 if (rcv->length.h > dev->mtu + dev->hard_header_len ||
632 rcv->length.h < 8) {
633 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
634 return ERROR;
635 }
636
637 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
638 if (rcv->skb == NULL) {
639 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
640 return ERROR;
641 }
642 skb_reserve(rcv->skb, 2);
643 skb_put(rcv->skb,rcv->length.h);
644 rcv->skb->dev = dev;
645 rcv->state = PLIP_PK_DATA;
646 rcv->byte = 0;
647 rcv->checksum = 0;
648 fallthrough;
649
650 case PLIP_PK_DATA:
651 lbuf = rcv->skb->data;
652 do {
653 if (plip_receive(nibble_timeout, dev,
654 &rcv->nibble, &lbuf[rcv->byte]))
655 return TIMEOUT;
656 } while (++rcv->byte < rcv->length.h);
657 do {
658 rcv->checksum += lbuf[--rcv->byte];
659 } while (rcv->byte);
660 rcv->state = PLIP_PK_CHECKSUM;
661 fallthrough;
662
663 case PLIP_PK_CHECKSUM:
664 if (plip_receive(nibble_timeout, dev,
665 &rcv->nibble, &rcv->data))
666 return TIMEOUT;
667 if (rcv->data != rcv->checksum) {
668 dev->stats.rx_crc_errors++;
669 if (net_debug)
670 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
671 return ERROR;
672 }
673 rcv->state = PLIP_PK_DONE;
674 fallthrough;
675
676 case PLIP_PK_DONE:
677
678 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
679 netif_rx_ni(rcv->skb);
680 dev->stats.rx_bytes += rcv->length.h;
681 dev->stats.rx_packets++;
682 rcv->skb = NULL;
683 if (net_debug > 2)
684 printk(KERN_DEBUG "%s: receive end\n", dev->name);
685
686
687 write_data (dev, 0x00);
688 spin_lock_irq(&nl->lock);
689 if (snd->state != PLIP_PK_DONE) {
690 nl->connection = PLIP_CN_SEND;
691 spin_unlock_irq(&nl->lock);
692 schedule_work(&nl->immediate);
693 enable_parport_interrupts (dev);
694 ENABLE(dev->irq);
695 return OK;
696 } else {
697 nl->connection = PLIP_CN_NONE;
698 spin_unlock_irq(&nl->lock);
699 enable_parport_interrupts (dev);
700 ENABLE(dev->irq);
701 return OK;
702 }
703 }
704 return OK;
705}
706
707
708
709static inline int
710plip_send(unsigned short nibble_timeout, struct net_device *dev,
711 enum plip_nibble_state *ns_p, unsigned char data)
712{
713 unsigned char c0;
714 unsigned int cx;
715
716 switch (*ns_p) {
717 case PLIP_NB_BEGIN:
718 write_data (dev, data & 0x0f);
719 *ns_p = PLIP_NB_1;
720 fallthrough;
721
722 case PLIP_NB_1:
723 write_data (dev, 0x10 | (data & 0x0f));
724 cx = nibble_timeout;
725 while (1) {
726 c0 = read_status(dev);
727 if ((c0 & 0x80) == 0)
728 break;
729 if (--cx == 0)
730 return TIMEOUT;
731 udelay(PLIP_DELAY_UNIT);
732 }
733 write_data (dev, 0x10 | (data >> 4));
734 *ns_p = PLIP_NB_2;
735 fallthrough;
736
737 case PLIP_NB_2:
738 write_data (dev, (data >> 4));
739 cx = nibble_timeout;
740 while (1) {
741 c0 = read_status(dev);
742 if (c0 & 0x80)
743 break;
744 if (--cx == 0)
745 return TIMEOUT;
746 udelay(PLIP_DELAY_UNIT);
747 }
748 *ns_p = PLIP_NB_BEGIN;
749 return OK;
750 }
751 return OK;
752}
753
754
755static int
756plip_send_packet(struct net_device *dev, struct net_local *nl,
757 struct plip_local *snd, struct plip_local *rcv)
758{
759 unsigned short nibble_timeout = nl->nibble;
760 unsigned char *lbuf;
761 unsigned char c0;
762 unsigned int cx;
763
764 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
765 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
766 snd->state = PLIP_PK_DONE;
767 snd->skb = NULL;
768 return ERROR;
769 }
770
771 switch (snd->state) {
772 case PLIP_PK_TRIGGER:
773 if ((read_status(dev) & 0xf8) != 0x80)
774 return HS_TIMEOUT;
775
776
777 write_data (dev, 0x08);
778 cx = nl->trigger;
779 while (1) {
780 udelay(PLIP_DELAY_UNIT);
781 spin_lock_irq(&nl->lock);
782 if (nl->connection == PLIP_CN_RECEIVE) {
783 spin_unlock_irq(&nl->lock);
784
785 dev->stats.collisions++;
786 return OK;
787 }
788 c0 = read_status(dev);
789 if (c0 & 0x08) {
790 spin_unlock_irq(&nl->lock);
791 DISABLE(dev->irq);
792 synchronize_irq(dev->irq);
793 if (nl->connection == PLIP_CN_RECEIVE) {
794
795
796
797
798
799
800 ENABLE(dev->irq);
801 dev->stats.collisions++;
802 return OK;
803 }
804 disable_parport_interrupts (dev);
805 if (net_debug > 2)
806 printk(KERN_DEBUG "%s: send start\n", dev->name);
807 snd->state = PLIP_PK_LENGTH_LSB;
808 snd->nibble = PLIP_NB_BEGIN;
809 nl->timeout_count = 0;
810 break;
811 }
812 spin_unlock_irq(&nl->lock);
813 if (--cx == 0) {
814 write_data (dev, 0x00);
815 return HS_TIMEOUT;
816 }
817 }
818 break;
819
820 case PLIP_PK_LENGTH_LSB:
821 if (plip_send(nibble_timeout, dev,
822 &snd->nibble, snd->length.b.lsb))
823 return TIMEOUT;
824 snd->state = PLIP_PK_LENGTH_MSB;
825 fallthrough;
826
827 case PLIP_PK_LENGTH_MSB:
828 if (plip_send(nibble_timeout, dev,
829 &snd->nibble, snd->length.b.msb))
830 return TIMEOUT;
831 snd->state = PLIP_PK_DATA;
832 snd->byte = 0;
833 snd->checksum = 0;
834 fallthrough;
835
836 case PLIP_PK_DATA:
837 do {
838 if (plip_send(nibble_timeout, dev,
839 &snd->nibble, lbuf[snd->byte]))
840 return TIMEOUT;
841 } while (++snd->byte < snd->length.h);
842 do {
843 snd->checksum += lbuf[--snd->byte];
844 } while (snd->byte);
845 snd->state = PLIP_PK_CHECKSUM;
846 fallthrough;
847
848 case PLIP_PK_CHECKSUM:
849 if (plip_send(nibble_timeout, dev,
850 &snd->nibble, snd->checksum))
851 return TIMEOUT;
852
853 dev->stats.tx_bytes += snd->skb->len;
854 dev_kfree_skb(snd->skb);
855 dev->stats.tx_packets++;
856 snd->state = PLIP_PK_DONE;
857 fallthrough;
858
859 case PLIP_PK_DONE:
860
861 write_data (dev, 0x00);
862 snd->skb = NULL;
863 if (net_debug > 2)
864 printk(KERN_DEBUG "%s: send end\n", dev->name);
865 nl->connection = PLIP_CN_CLOSING;
866 nl->is_deferred = 1;
867 schedule_delayed_work(&nl->deferred, 1);
868 enable_parport_interrupts (dev);
869 ENABLE(dev->irq);
870 return OK;
871 }
872 return OK;
873}
874
875static int
876plip_connection_close(struct net_device *dev, struct net_local *nl,
877 struct plip_local *snd, struct plip_local *rcv)
878{
879 spin_lock_irq(&nl->lock);
880 if (nl->connection == PLIP_CN_CLOSING) {
881 nl->connection = PLIP_CN_NONE;
882 netif_wake_queue (dev);
883 }
884 spin_unlock_irq(&nl->lock);
885 if (nl->should_relinquish) {
886 nl->should_relinquish = nl->port_owner = 0;
887 parport_release(nl->pardev);
888 }
889 return OK;
890}
891
892
893static int
894plip_error(struct net_device *dev, struct net_local *nl,
895 struct plip_local *snd, struct plip_local *rcv)
896{
897 unsigned char status;
898
899 status = read_status(dev);
900 if ((status & 0xf8) == 0x80) {
901 if (net_debug > 2)
902 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
903 nl->connection = PLIP_CN_NONE;
904 nl->should_relinquish = 0;
905 netif_start_queue (dev);
906 enable_parport_interrupts (dev);
907 ENABLE(dev->irq);
908 netif_wake_queue (dev);
909 } else {
910 nl->is_deferred = 1;
911 schedule_delayed_work(&nl->deferred, 1);
912 }
913
914 return OK;
915}
916
917
918static void
919plip_interrupt(void *dev_id)
920{
921 struct net_device *dev = dev_id;
922 struct net_local *nl;
923 struct plip_local *rcv;
924 unsigned char c0;
925 unsigned long flags;
926
927 nl = netdev_priv(dev);
928 rcv = &nl->rcv_data;
929
930 spin_lock_irqsave (&nl->lock, flags);
931
932 c0 = read_status(dev);
933 if ((c0 & 0xf8) != 0xc0) {
934 if ((dev->irq != -1) && (net_debug > 1))
935 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
936 spin_unlock_irqrestore (&nl->lock, flags);
937 return;
938 }
939
940 if (net_debug > 3)
941 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
942
943 switch (nl->connection) {
944 case PLIP_CN_CLOSING:
945 netif_wake_queue (dev);
946 fallthrough;
947 case PLIP_CN_NONE:
948 case PLIP_CN_SEND:
949 rcv->state = PLIP_PK_TRIGGER;
950 nl->connection = PLIP_CN_RECEIVE;
951 nl->timeout_count = 0;
952 schedule_work(&nl->immediate);
953 break;
954
955 case PLIP_CN_RECEIVE:
956
957
958
959 break;
960
961 case PLIP_CN_ERROR:
962 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
963 break;
964 }
965
966 spin_unlock_irqrestore(&nl->lock, flags);
967}
968
969static netdev_tx_t
970plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
971{
972 struct net_local *nl = netdev_priv(dev);
973 struct plip_local *snd = &nl->snd_data;
974
975 if (netif_queue_stopped(dev))
976 return NETDEV_TX_BUSY;
977
978
979 if (!nl->port_owner) {
980 if (parport_claim(nl->pardev))
981 return NETDEV_TX_BUSY;
982 nl->port_owner = 1;
983 }
984
985 netif_stop_queue (dev);
986
987 if (skb->len > dev->mtu + dev->hard_header_len) {
988 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
989 netif_start_queue (dev);
990 return NETDEV_TX_BUSY;
991 }
992
993 if (net_debug > 2)
994 printk(KERN_DEBUG "%s: send request\n", dev->name);
995
996 spin_lock_irq(&nl->lock);
997 snd->skb = skb;
998 snd->length.h = skb->len;
999 snd->state = PLIP_PK_TRIGGER;
1000 if (nl->connection == PLIP_CN_NONE) {
1001 nl->connection = PLIP_CN_SEND;
1002 nl->timeout_count = 0;
1003 }
1004 schedule_work(&nl->immediate);
1005 spin_unlock_irq(&nl->lock);
1006
1007 return NETDEV_TX_OK;
1008}
1009
1010static void
1011plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1012{
1013 const struct in_device *in_dev;
1014
1015 rcu_read_lock();
1016 in_dev = __in_dev_get_rcu(dev);
1017 if (in_dev) {
1018
1019 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1020 if (ifa) {
1021 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1022 memset(eth->h_dest, 0xfc, 2);
1023 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1024 }
1025 }
1026 rcu_read_unlock();
1027}
1028
1029static int
1030plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1031 unsigned short type, const void *daddr,
1032 const void *saddr, unsigned len)
1033{
1034 int ret;
1035
1036 ret = eth_header(skb, dev, type, daddr, saddr, len);
1037 if (ret >= 0)
1038 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1039
1040 return ret;
1041}
1042
1043static int plip_hard_header_cache(const struct neighbour *neigh,
1044 struct hh_cache *hh, __be16 type)
1045{
1046 int ret;
1047
1048 ret = eth_header_cache(neigh, hh, type);
1049 if (ret == 0) {
1050 struct ethhdr *eth;
1051
1052 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1053 HH_DATA_OFF(sizeof(*eth)));
1054 plip_rewrite_address (neigh->dev, eth);
1055 }
1056
1057 return ret;
1058}
1059
1060
1061
1062
1063
1064
1065
1066static int
1067plip_open(struct net_device *dev)
1068{
1069 struct net_local *nl = netdev_priv(dev);
1070 struct in_device *in_dev;
1071
1072
1073 if (!nl->port_owner) {
1074 if (parport_claim(nl->pardev)) return -EAGAIN;
1075 nl->port_owner = 1;
1076 }
1077
1078 nl->should_relinquish = 0;
1079
1080
1081 write_data (dev, 0x00);
1082
1083
1084 enable_parport_interrupts (dev);
1085 if (dev->irq == -1)
1086 {
1087 atomic_set (&nl->kill_timer, 0);
1088 schedule_delayed_work(&nl->timer, 1);
1089 }
1090
1091
1092 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1093 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1094 nl->connection = PLIP_CN_NONE;
1095 nl->is_deferred = 0;
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 in_dev=__in_dev_get_rtnl(dev);
1110 if (in_dev) {
1111
1112
1113
1114 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1115 if (ifa != NULL) {
1116 dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
1117 }
1118 }
1119
1120 netif_start_queue (dev);
1121
1122 return 0;
1123}
1124
1125
1126static int
1127plip_close(struct net_device *dev)
1128{
1129 struct net_local *nl = netdev_priv(dev);
1130 struct plip_local *snd = &nl->snd_data;
1131 struct plip_local *rcv = &nl->rcv_data;
1132
1133 netif_stop_queue (dev);
1134 DISABLE(dev->irq);
1135 synchronize_irq(dev->irq);
1136
1137 if (dev->irq == -1)
1138 {
1139 init_completion(&nl->killed_timer_cmp);
1140 atomic_set (&nl->kill_timer, 1);
1141 wait_for_completion(&nl->killed_timer_cmp);
1142 }
1143
1144#ifdef NOTDEF
1145 outb(0x00, PAR_DATA(dev));
1146#endif
1147 nl->is_deferred = 0;
1148 nl->connection = PLIP_CN_NONE;
1149 if (nl->port_owner) {
1150 parport_release(nl->pardev);
1151 nl->port_owner = 0;
1152 }
1153
1154 snd->state = PLIP_PK_DONE;
1155 if (snd->skb) {
1156 dev_kfree_skb(snd->skb);
1157 snd->skb = NULL;
1158 }
1159 rcv->state = PLIP_PK_DONE;
1160 if (rcv->skb) {
1161 kfree_skb(rcv->skb);
1162 rcv->skb = NULL;
1163 }
1164
1165#ifdef NOTDEF
1166
1167 outb(0x00, PAR_CONTROL(dev));
1168#endif
1169 return 0;
1170}
1171
1172static int
1173plip_preempt(void *handle)
1174{
1175 struct net_device *dev = (struct net_device *)handle;
1176 struct net_local *nl = netdev_priv(dev);
1177
1178
1179 if (nl->connection != PLIP_CN_NONE) {
1180 nl->should_relinquish = 1;
1181 return 1;
1182 }
1183
1184 nl->port_owner = 0;
1185 return 0;
1186}
1187
1188static void
1189plip_wakeup(void *handle)
1190{
1191 struct net_device *dev = (struct net_device *)handle;
1192 struct net_local *nl = netdev_priv(dev);
1193
1194 if (nl->port_owner) {
1195
1196 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1197 if (!parport_claim(nl->pardev))
1198
1199 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1200 else
1201 return;
1202 }
1203
1204 if (!(dev->flags & IFF_UP))
1205
1206 return;
1207
1208 if (!parport_claim(nl->pardev)) {
1209 nl->port_owner = 1;
1210
1211 write_data (dev, 0x00);
1212 }
1213}
1214
1215static int
1216plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1217 void __user *data, int cmd)
1218{
1219 struct net_local *nl = netdev_priv(dev);
1220 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1221
1222 if (cmd != SIOCDEVPLIP)
1223 return -EOPNOTSUPP;
1224
1225 if (in_compat_syscall())
1226 return -EOPNOTSUPP;
1227
1228 switch(pc->pcmd) {
1229 case PLIP_GET_TIMEOUT:
1230 pc->trigger = nl->trigger;
1231 pc->nibble = nl->nibble;
1232 break;
1233 case PLIP_SET_TIMEOUT:
1234 if(!capable(CAP_NET_ADMIN))
1235 return -EPERM;
1236 nl->trigger = pc->trigger;
1237 nl->nibble = pc->nibble;
1238 break;
1239 default:
1240 return -EOPNOTSUPP;
1241 }
1242 return 0;
1243}
1244
1245static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1246static int timid;
1247
1248module_param_array(parport, int, NULL, 0);
1249module_param(timid, int, 0);
1250MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1251
1252static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1253
1254static inline int
1255plip_searchfor(int list[], int a)
1256{
1257 int i;
1258 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1259 if (list[i] == a) return 1;
1260 }
1261 return 0;
1262}
1263
1264
1265
1266static void plip_attach (struct parport *port)
1267{
1268 static int unit;
1269 struct net_device *dev;
1270 struct net_local *nl;
1271 char name[IFNAMSIZ];
1272 struct pardev_cb plip_cb;
1273
1274 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1275 plip_searchfor(parport, port->number)) {
1276 if (unit == PLIP_MAX) {
1277 printk(KERN_ERR "plip: too many devices\n");
1278 return;
1279 }
1280
1281 sprintf(name, "plip%d", unit);
1282 dev = alloc_etherdev(sizeof(struct net_local));
1283 if (!dev)
1284 return;
1285
1286 strcpy(dev->name, name);
1287
1288 dev->irq = port->irq;
1289 dev->base_addr = port->base;
1290 if (port->irq == -1) {
1291 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1292 "which is fairly inefficient!\n", port->name);
1293 }
1294
1295 nl = netdev_priv(dev);
1296 nl->dev = dev;
1297
1298 memset(&plip_cb, 0, sizeof(plip_cb));
1299 plip_cb.private = dev;
1300 plip_cb.preempt = plip_preempt;
1301 plip_cb.wakeup = plip_wakeup;
1302 plip_cb.irq_func = plip_interrupt;
1303
1304 nl->pardev = parport_register_dev_model(port, dev->name,
1305 &plip_cb, unit);
1306
1307 if (!nl->pardev) {
1308 printk(KERN_ERR "%s: parport_register failed\n", name);
1309 goto err_free_dev;
1310 }
1311
1312 plip_init_netdev(dev);
1313
1314 if (register_netdev(dev)) {
1315 printk(KERN_ERR "%s: network register failed\n", name);
1316 goto err_parport_unregister;
1317 }
1318
1319 printk(KERN_INFO "%s", version);
1320 if (dev->irq != -1)
1321 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1322 "using IRQ %d.\n",
1323 dev->name, dev->base_addr, dev->irq);
1324 else
1325 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1326 "not using IRQ.\n",
1327 dev->name, dev->base_addr);
1328 dev_plip[unit++] = dev;
1329 }
1330 return;
1331
1332err_parport_unregister:
1333 parport_unregister_device(nl->pardev);
1334err_free_dev:
1335 free_netdev(dev);
1336}
1337
1338
1339
1340static void plip_detach (struct parport *port)
1341{
1342
1343}
1344
1345static int plip_probe(struct pardevice *par_dev)
1346{
1347 struct device_driver *drv = par_dev->dev.driver;
1348 int len = strlen(drv->name);
1349
1350 if (strncmp(par_dev->name, drv->name, len))
1351 return -ENODEV;
1352
1353 return 0;
1354}
1355
1356static struct parport_driver plip_driver = {
1357 .name = "plip",
1358 .probe = plip_probe,
1359 .match_port = plip_attach,
1360 .detach = plip_detach,
1361 .devmodel = true,
1362};
1363
1364static void __exit plip_cleanup_module (void)
1365{
1366 struct net_device *dev;
1367 int i;
1368
1369 for (i=0; i < PLIP_MAX; i++) {
1370 if ((dev = dev_plip[i])) {
1371 struct net_local *nl = netdev_priv(dev);
1372 unregister_netdev(dev);
1373 if (nl->port_owner)
1374 parport_release(nl->pardev);
1375 parport_unregister_device(nl->pardev);
1376 free_netdev(dev);
1377 dev_plip[i] = NULL;
1378 }
1379 }
1380
1381 parport_unregister_driver(&plip_driver);
1382}
1383
1384#ifndef MODULE
1385
1386static int parport_ptr;
1387
1388static int __init plip_setup(char *str)
1389{
1390 int ints[4];
1391
1392 str = get_options(str, ARRAY_SIZE(ints), ints);
1393
1394
1395 if (!strncmp(str, "parport", 7)) {
1396 int n = simple_strtoul(str+7, NULL, 10);
1397 if (parport_ptr < PLIP_MAX)
1398 parport[parport_ptr++] = n;
1399 else
1400 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1401 str);
1402 } else if (!strcmp(str, "timid")) {
1403 timid = 1;
1404 } else {
1405 if (ints[0] == 0 || ints[1] == 0) {
1406
1407 parport[0] = -2;
1408 } else {
1409 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1410 ints[1]);
1411 }
1412 }
1413 return 1;
1414}
1415
1416__setup("plip=", plip_setup);
1417
1418#endif
1419
1420static int __init plip_init (void)
1421{
1422 if (parport[0] == -2)
1423 return 0;
1424
1425 if (parport[0] != -1 && timid) {
1426 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1427 timid = 0;
1428 }
1429
1430 if (parport_register_driver (&plip_driver)) {
1431 printk (KERN_WARNING "plip: couldn't register driver\n");
1432 return 1;
1433 }
1434
1435 return 0;
1436}
1437
1438module_init(plip_init);
1439module_exit(plip_cleanup_module);
1440MODULE_LICENSE("GPL");
1441