1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87#include <linux/module.h>
88#include <linux/kernel.h>
89#include <linux/types.h>
90#include <linux/fcntl.h>
91#include <linux/interrupt.h>
92#include <linux/string.h>
93#include <linux/slab.h>
94#include <linux/if_ether.h>
95#include <linux/in.h>
96#include <linux/errno.h>
97#include <linux/delay.h>
98#include <linux/init.h>
99#include <linux/netdevice.h>
100#include <linux/etherdevice.h>
101#include <linux/inetdevice.h>
102#include <linux/skbuff.h>
103#include <linux/if_plip.h>
104#include <linux/workqueue.h>
105#include <linux/spinlock.h>
106#include <linux/completion.h>
107#include <linux/parport.h>
108#include <linux/bitops.h>
109
110#include <net/neighbour.h>
111
112#include <asm/irq.h>
113#include <asm/byteorder.h>
114
115
116#define PLIP_MAX 8
117
118
119#ifndef NET_DEBUG
120#define NET_DEBUG 1
121#endif
122static const unsigned int net_debug = NET_DEBUG;
123
124#define ENABLE(irq) if (irq != -1) enable_irq(irq)
125#define DISABLE(irq) if (irq != -1) disable_irq(irq)
126
127
128#define PLIP_DELAY_UNIT 1
129
130
131#define PLIP_TRIGGER_WAIT 500
132
133
134#define PLIP_NIBBLE_WAIT 3000
135
136
137static void plip_kick_bh(struct work_struct *work);
138static void plip_bh(struct work_struct *work);
139static void plip_timer_bh(struct work_struct *work);
140
141
142static void plip_interrupt(void *dev_id);
143
144
145static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
146static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
147 unsigned short type, const void *daddr,
148 const void *saddr, unsigned len);
149static int plip_hard_header_cache(const struct neighbour *neigh,
150 struct hh_cache *hh, __be16 type);
151static int plip_open(struct net_device *dev);
152static int plip_close(struct net_device *dev);
153static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
154static int plip_preempt(void *handle);
155static void plip_wakeup(void *handle);
156
157enum plip_connection_state {
158 PLIP_CN_NONE=0,
159 PLIP_CN_RECEIVE,
160 PLIP_CN_SEND,
161 PLIP_CN_CLOSING,
162 PLIP_CN_ERROR
163};
164
165enum plip_packet_state {
166 PLIP_PK_DONE=0,
167 PLIP_PK_TRIGGER,
168 PLIP_PK_LENGTH_LSB,
169 PLIP_PK_LENGTH_MSB,
170 PLIP_PK_DATA,
171 PLIP_PK_CHECKSUM
172};
173
174enum plip_nibble_state {
175 PLIP_NB_BEGIN,
176 PLIP_NB_1,
177 PLIP_NB_2,
178};
179
180struct plip_local {
181 enum plip_packet_state state;
182 enum plip_nibble_state nibble;
183 union {
184 struct {
185#if defined(__LITTLE_ENDIAN)
186 unsigned char lsb;
187 unsigned char msb;
188#elif defined(__BIG_ENDIAN)
189 unsigned char msb;
190 unsigned char lsb;
191#else
192#error "Please fix the endianness defines in <asm/byteorder.h>"
193#endif
194 } b;
195 unsigned short h;
196 } length;
197 unsigned short byte;
198 unsigned char checksum;
199 unsigned char data;
200 struct sk_buff *skb;
201};
202
203struct net_local {
204 struct net_device *dev;
205 struct work_struct immediate;
206 struct delayed_work deferred;
207 struct delayed_work timer;
208 struct plip_local snd_data;
209 struct plip_local rcv_data;
210 struct pardevice *pardev;
211 unsigned long trigger;
212 unsigned long nibble;
213 enum plip_connection_state connection;
214 unsigned short timeout_count;
215 int is_deferred;
216 int port_owner;
217 int should_relinquish;
218 spinlock_t lock;
219 atomic_t kill_timer;
220 struct completion killed_timer_cmp;
221};
222
223static inline void enable_parport_interrupts (struct net_device *dev)
224{
225 if (dev->irq != -1)
226 {
227 struct parport *port =
228 ((struct net_local *)netdev_priv(dev))->pardev->port;
229 port->ops->enable_irq (port);
230 }
231}
232
233static inline void disable_parport_interrupts (struct net_device *dev)
234{
235 if (dev->irq != -1)
236 {
237 struct parport *port =
238 ((struct net_local *)netdev_priv(dev))->pardev->port;
239 port->ops->disable_irq (port);
240 }
241}
242
243static inline void write_data (struct net_device *dev, unsigned char data)
244{
245 struct parport *port =
246 ((struct net_local *)netdev_priv(dev))->pardev->port;
247
248 port->ops->write_data (port, data);
249}
250
251static inline unsigned char read_status (struct net_device *dev)
252{
253 struct parport *port =
254 ((struct net_local *)netdev_priv(dev))->pardev->port;
255
256 return port->ops->read_status (port);
257}
258
259static const struct header_ops plip_header_ops = {
260 .create = plip_hard_header,
261 .cache = plip_hard_header_cache,
262};
263
264static const struct net_device_ops plip_netdev_ops = {
265 .ndo_open = plip_open,
266 .ndo_stop = plip_close,
267 .ndo_start_xmit = plip_tx_packet,
268 .ndo_do_ioctl = plip_ioctl,
269 .ndo_set_mac_address = eth_mac_addr,
270 .ndo_validate_addr = eth_validate_addr,
271};
272
273
274
275
276
277
278
279
280
281
282static void
283plip_init_netdev(struct net_device *dev)
284{
285 struct net_local *nl = netdev_priv(dev);
286
287
288 dev->tx_queue_len = 10;
289 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
290 memset(dev->dev_addr, 0xfc, ETH_ALEN);
291
292 dev->netdev_ops = &plip_netdev_ops;
293 dev->header_ops = &plip_header_ops;
294
295
296 nl->port_owner = 0;
297
298
299 nl->trigger = PLIP_TRIGGER_WAIT;
300 nl->nibble = PLIP_NIBBLE_WAIT;
301
302
303 INIT_WORK(&nl->immediate, plip_bh);
304 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
305
306 if (dev->irq == -1)
307 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
308
309 spin_lock_init(&nl->lock);
310}
311
312
313
314
315static void
316plip_kick_bh(struct work_struct *work)
317{
318 struct net_local *nl =
319 container_of(work, struct net_local, deferred.work);
320
321 if (nl->is_deferred)
322 schedule_work(&nl->immediate);
323}
324
325
326static int plip_none(struct net_device *, struct net_local *,
327 struct plip_local *, struct plip_local *);
328static int plip_receive_packet(struct net_device *, struct net_local *,
329 struct plip_local *, struct plip_local *);
330static int plip_send_packet(struct net_device *, struct net_local *,
331 struct plip_local *, struct plip_local *);
332static int plip_connection_close(struct net_device *, struct net_local *,
333 struct plip_local *, struct plip_local *);
334static int plip_error(struct net_device *, struct net_local *,
335 struct plip_local *, struct plip_local *);
336static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
337 struct plip_local *snd,
338 struct plip_local *rcv,
339 int error);
340
341#define OK 0
342#define TIMEOUT 1
343#define ERROR 2
344#define HS_TIMEOUT 3
345
346typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
347 struct plip_local *snd, struct plip_local *rcv);
348
349static const plip_func connection_state_table[] =
350{
351 plip_none,
352 plip_receive_packet,
353 plip_send_packet,
354 plip_connection_close,
355 plip_error
356};
357
358
359static void
360plip_bh(struct work_struct *work)
361{
362 struct net_local *nl = container_of(work, struct net_local, immediate);
363 struct plip_local *snd = &nl->snd_data;
364 struct plip_local *rcv = &nl->rcv_data;
365 plip_func f;
366 int r;
367
368 nl->is_deferred = 0;
369 f = connection_state_table[nl->connection];
370 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
371 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
372 nl->is_deferred = 1;
373 schedule_delayed_work(&nl->deferred, 1);
374 }
375}
376
377static void
378plip_timer_bh(struct work_struct *work)
379{
380 struct net_local *nl =
381 container_of(work, struct net_local, timer.work);
382
383 if (!(atomic_read (&nl->kill_timer))) {
384 plip_interrupt (nl->dev);
385
386 schedule_delayed_work(&nl->timer, 1);
387 }
388 else {
389 complete(&nl->killed_timer_cmp);
390 }
391}
392
393static int
394plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
395 struct plip_local *snd, struct plip_local *rcv,
396 int error)
397{
398 unsigned char c0;
399
400
401
402
403
404
405
406
407
408
409 spin_lock_irq(&nl->lock);
410 if (nl->connection == PLIP_CN_SEND) {
411
412 if (error != ERROR) {
413 nl->timeout_count++;
414 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
415 nl->timeout_count <= 3) {
416 spin_unlock_irq(&nl->lock);
417
418 return TIMEOUT;
419 }
420 c0 = read_status(dev);
421 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
422 dev->name, snd->state, c0);
423 } else
424 error = HS_TIMEOUT;
425 dev->stats.tx_errors++;
426 dev->stats.tx_aborted_errors++;
427 } else if (nl->connection == PLIP_CN_RECEIVE) {
428 if (rcv->state == PLIP_PK_TRIGGER) {
429
430 spin_unlock_irq(&nl->lock);
431 return OK;
432 }
433 if (error != ERROR) {
434 if (++nl->timeout_count <= 3) {
435 spin_unlock_irq(&nl->lock);
436
437 return TIMEOUT;
438 }
439 c0 = read_status(dev);
440 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
441 dev->name, rcv->state, c0);
442 }
443 dev->stats.rx_dropped++;
444 }
445 rcv->state = PLIP_PK_DONE;
446 if (rcv->skb) {
447 kfree_skb(rcv->skb);
448 rcv->skb = NULL;
449 }
450 snd->state = PLIP_PK_DONE;
451 if (snd->skb) {
452 dev_kfree_skb(snd->skb);
453 snd->skb = NULL;
454 }
455 spin_unlock_irq(&nl->lock);
456 if (error == HS_TIMEOUT) {
457 DISABLE(dev->irq);
458 synchronize_irq(dev->irq);
459 }
460 disable_parport_interrupts (dev);
461 netif_stop_queue (dev);
462 nl->connection = PLIP_CN_ERROR;
463 write_data (dev, 0x00);
464
465 return TIMEOUT;
466}
467
468static int
469plip_none(struct net_device *dev, struct net_local *nl,
470 struct plip_local *snd, struct plip_local *rcv)
471{
472 return OK;
473}
474
475
476
477static inline int
478plip_receive(unsigned short nibble_timeout, struct net_device *dev,
479 enum plip_nibble_state *ns_p, unsigned char *data_p)
480{
481 unsigned char c0, c1;
482 unsigned int cx;
483
484 switch (*ns_p) {
485 case PLIP_NB_BEGIN:
486 cx = nibble_timeout;
487 while (1) {
488 c0 = read_status(dev);
489 udelay(PLIP_DELAY_UNIT);
490 if ((c0 & 0x80) == 0) {
491 c1 = read_status(dev);
492 if (c0 == c1)
493 break;
494 }
495 if (--cx == 0)
496 return TIMEOUT;
497 }
498 *data_p = (c0 >> 3) & 0x0f;
499 write_data (dev, 0x10);
500 *ns_p = PLIP_NB_1;
501 fallthrough;
502
503 case PLIP_NB_1:
504 cx = nibble_timeout;
505 while (1) {
506 c0 = read_status(dev);
507 udelay(PLIP_DELAY_UNIT);
508 if (c0 & 0x80) {
509 c1 = read_status(dev);
510 if (c0 == c1)
511 break;
512 }
513 if (--cx == 0)
514 return TIMEOUT;
515 }
516 *data_p |= (c0 << 1) & 0xf0;
517 write_data (dev, 0x00);
518 *ns_p = PLIP_NB_BEGIN;
519 break;
520 case PLIP_NB_2:
521 break;
522 }
523 return OK;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
539{
540 struct ethhdr *eth;
541 unsigned char *rawp;
542
543 skb_reset_mac_header(skb);
544 skb_pull(skb,dev->hard_header_len);
545 eth = eth_hdr(skb);
546
547 if(is_multicast_ether_addr(eth->h_dest))
548 {
549 if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
550 skb->pkt_type=PACKET_BROADCAST;
551 else
552 skb->pkt_type=PACKET_MULTICAST;
553 }
554
555
556
557
558
559
560 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
561 return eth->h_proto;
562
563 rawp = skb->data;
564
565
566
567
568
569
570
571 if (*(unsigned short *)rawp == 0xFFFF)
572 return htons(ETH_P_802_3);
573
574
575
576
577 return htons(ETH_P_802_2);
578}
579
580
581static int
582plip_receive_packet(struct net_device *dev, struct net_local *nl,
583 struct plip_local *snd, struct plip_local *rcv)
584{
585 unsigned short nibble_timeout = nl->nibble;
586 unsigned char *lbuf;
587
588 switch (rcv->state) {
589 case PLIP_PK_TRIGGER:
590 DISABLE(dev->irq);
591
592 disable_parport_interrupts (dev);
593 write_data (dev, 0x01);
594 if (net_debug > 2)
595 printk(KERN_DEBUG "%s: receive start\n", dev->name);
596 rcv->state = PLIP_PK_LENGTH_LSB;
597 rcv->nibble = PLIP_NB_BEGIN;
598 fallthrough;
599
600 case PLIP_PK_LENGTH_LSB:
601 if (snd->state != PLIP_PK_DONE) {
602 if (plip_receive(nl->trigger, dev,
603 &rcv->nibble, &rcv->length.b.lsb)) {
604
605 rcv->state = PLIP_PK_DONE;
606 nl->is_deferred = 1;
607 nl->connection = PLIP_CN_SEND;
608 schedule_delayed_work(&nl->deferred, 1);
609 enable_parport_interrupts (dev);
610 ENABLE(dev->irq);
611 return OK;
612 }
613 } else {
614 if (plip_receive(nibble_timeout, dev,
615 &rcv->nibble, &rcv->length.b.lsb))
616 return TIMEOUT;
617 }
618 rcv->state = PLIP_PK_LENGTH_MSB;
619 fallthrough;
620
621 case PLIP_PK_LENGTH_MSB:
622 if (plip_receive(nibble_timeout, dev,
623 &rcv->nibble, &rcv->length.b.msb))
624 return TIMEOUT;
625 if (rcv->length.h > dev->mtu + dev->hard_header_len ||
626 rcv->length.h < 8) {
627 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
628 return ERROR;
629 }
630
631 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
632 if (rcv->skb == NULL) {
633 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
634 return ERROR;
635 }
636 skb_reserve(rcv->skb, 2);
637 skb_put(rcv->skb,rcv->length.h);
638 rcv->skb->dev = dev;
639 rcv->state = PLIP_PK_DATA;
640 rcv->byte = 0;
641 rcv->checksum = 0;
642 fallthrough;
643
644 case PLIP_PK_DATA:
645 lbuf = rcv->skb->data;
646 do {
647 if (plip_receive(nibble_timeout, dev,
648 &rcv->nibble, &lbuf[rcv->byte]))
649 return TIMEOUT;
650 } while (++rcv->byte < rcv->length.h);
651 do {
652 rcv->checksum += lbuf[--rcv->byte];
653 } while (rcv->byte);
654 rcv->state = PLIP_PK_CHECKSUM;
655 fallthrough;
656
657 case PLIP_PK_CHECKSUM:
658 if (plip_receive(nibble_timeout, dev,
659 &rcv->nibble, &rcv->data))
660 return TIMEOUT;
661 if (rcv->data != rcv->checksum) {
662 dev->stats.rx_crc_errors++;
663 if (net_debug)
664 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
665 return ERROR;
666 }
667 rcv->state = PLIP_PK_DONE;
668 fallthrough;
669
670 case PLIP_PK_DONE:
671
672 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
673 netif_rx_ni(rcv->skb);
674 dev->stats.rx_bytes += rcv->length.h;
675 dev->stats.rx_packets++;
676 rcv->skb = NULL;
677 if (net_debug > 2)
678 printk(KERN_DEBUG "%s: receive end\n", dev->name);
679
680
681 write_data (dev, 0x00);
682 spin_lock_irq(&nl->lock);
683 if (snd->state != PLIP_PK_DONE) {
684 nl->connection = PLIP_CN_SEND;
685 spin_unlock_irq(&nl->lock);
686 schedule_work(&nl->immediate);
687 enable_parport_interrupts (dev);
688 ENABLE(dev->irq);
689 return OK;
690 } else {
691 nl->connection = PLIP_CN_NONE;
692 spin_unlock_irq(&nl->lock);
693 enable_parport_interrupts (dev);
694 ENABLE(dev->irq);
695 return OK;
696 }
697 }
698 return OK;
699}
700
701
702
703static inline int
704plip_send(unsigned short nibble_timeout, struct net_device *dev,
705 enum plip_nibble_state *ns_p, unsigned char data)
706{
707 unsigned char c0;
708 unsigned int cx;
709
710 switch (*ns_p) {
711 case PLIP_NB_BEGIN:
712 write_data (dev, data & 0x0f);
713 *ns_p = PLIP_NB_1;
714 fallthrough;
715
716 case PLIP_NB_1:
717 write_data (dev, 0x10 | (data & 0x0f));
718 cx = nibble_timeout;
719 while (1) {
720 c0 = read_status(dev);
721 if ((c0 & 0x80) == 0)
722 break;
723 if (--cx == 0)
724 return TIMEOUT;
725 udelay(PLIP_DELAY_UNIT);
726 }
727 write_data (dev, 0x10 | (data >> 4));
728 *ns_p = PLIP_NB_2;
729 fallthrough;
730
731 case PLIP_NB_2:
732 write_data (dev, (data >> 4));
733 cx = nibble_timeout;
734 while (1) {
735 c0 = read_status(dev);
736 if (c0 & 0x80)
737 break;
738 if (--cx == 0)
739 return TIMEOUT;
740 udelay(PLIP_DELAY_UNIT);
741 }
742 *ns_p = PLIP_NB_BEGIN;
743 return OK;
744 }
745 return OK;
746}
747
748
749static int
750plip_send_packet(struct net_device *dev, struct net_local *nl,
751 struct plip_local *snd, struct plip_local *rcv)
752{
753 unsigned short nibble_timeout = nl->nibble;
754 unsigned char *lbuf;
755 unsigned char c0;
756 unsigned int cx;
757
758 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
759 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
760 snd->state = PLIP_PK_DONE;
761 snd->skb = NULL;
762 return ERROR;
763 }
764
765 switch (snd->state) {
766 case PLIP_PK_TRIGGER:
767 if ((read_status(dev) & 0xf8) != 0x80)
768 return HS_TIMEOUT;
769
770
771 write_data (dev, 0x08);
772 cx = nl->trigger;
773 while (1) {
774 udelay(PLIP_DELAY_UNIT);
775 spin_lock_irq(&nl->lock);
776 if (nl->connection == PLIP_CN_RECEIVE) {
777 spin_unlock_irq(&nl->lock);
778
779 dev->stats.collisions++;
780 return OK;
781 }
782 c0 = read_status(dev);
783 if (c0 & 0x08) {
784 spin_unlock_irq(&nl->lock);
785 DISABLE(dev->irq);
786 synchronize_irq(dev->irq);
787 if (nl->connection == PLIP_CN_RECEIVE) {
788
789
790
791
792
793
794 ENABLE(dev->irq);
795 dev->stats.collisions++;
796 return OK;
797 }
798 disable_parport_interrupts (dev);
799 if (net_debug > 2)
800 printk(KERN_DEBUG "%s: send start\n", dev->name);
801 snd->state = PLIP_PK_LENGTH_LSB;
802 snd->nibble = PLIP_NB_BEGIN;
803 nl->timeout_count = 0;
804 break;
805 }
806 spin_unlock_irq(&nl->lock);
807 if (--cx == 0) {
808 write_data (dev, 0x00);
809 return HS_TIMEOUT;
810 }
811 }
812 break;
813
814 case PLIP_PK_LENGTH_LSB:
815 if (plip_send(nibble_timeout, dev,
816 &snd->nibble, snd->length.b.lsb))
817 return TIMEOUT;
818 snd->state = PLIP_PK_LENGTH_MSB;
819 fallthrough;
820
821 case PLIP_PK_LENGTH_MSB:
822 if (plip_send(nibble_timeout, dev,
823 &snd->nibble, snd->length.b.msb))
824 return TIMEOUT;
825 snd->state = PLIP_PK_DATA;
826 snd->byte = 0;
827 snd->checksum = 0;
828 fallthrough;
829
830 case PLIP_PK_DATA:
831 do {
832 if (plip_send(nibble_timeout, dev,
833 &snd->nibble, lbuf[snd->byte]))
834 return TIMEOUT;
835 } while (++snd->byte < snd->length.h);
836 do {
837 snd->checksum += lbuf[--snd->byte];
838 } while (snd->byte);
839 snd->state = PLIP_PK_CHECKSUM;
840 fallthrough;
841
842 case PLIP_PK_CHECKSUM:
843 if (plip_send(nibble_timeout, dev,
844 &snd->nibble, snd->checksum))
845 return TIMEOUT;
846
847 dev->stats.tx_bytes += snd->skb->len;
848 dev_kfree_skb(snd->skb);
849 dev->stats.tx_packets++;
850 snd->state = PLIP_PK_DONE;
851 fallthrough;
852
853 case PLIP_PK_DONE:
854
855 write_data (dev, 0x00);
856 snd->skb = NULL;
857 if (net_debug > 2)
858 printk(KERN_DEBUG "%s: send end\n", dev->name);
859 nl->connection = PLIP_CN_CLOSING;
860 nl->is_deferred = 1;
861 schedule_delayed_work(&nl->deferred, 1);
862 enable_parport_interrupts (dev);
863 ENABLE(dev->irq);
864 return OK;
865 }
866 return OK;
867}
868
869static int
870plip_connection_close(struct net_device *dev, struct net_local *nl,
871 struct plip_local *snd, struct plip_local *rcv)
872{
873 spin_lock_irq(&nl->lock);
874 if (nl->connection == PLIP_CN_CLOSING) {
875 nl->connection = PLIP_CN_NONE;
876 netif_wake_queue (dev);
877 }
878 spin_unlock_irq(&nl->lock);
879 if (nl->should_relinquish) {
880 nl->should_relinquish = nl->port_owner = 0;
881 parport_release(nl->pardev);
882 }
883 return OK;
884}
885
886
887static int
888plip_error(struct net_device *dev, struct net_local *nl,
889 struct plip_local *snd, struct plip_local *rcv)
890{
891 unsigned char status;
892
893 status = read_status(dev);
894 if ((status & 0xf8) == 0x80) {
895 if (net_debug > 2)
896 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
897 nl->connection = PLIP_CN_NONE;
898 nl->should_relinquish = 0;
899 netif_start_queue (dev);
900 enable_parport_interrupts (dev);
901 ENABLE(dev->irq);
902 netif_wake_queue (dev);
903 } else {
904 nl->is_deferred = 1;
905 schedule_delayed_work(&nl->deferred, 1);
906 }
907
908 return OK;
909}
910
911
912static void
913plip_interrupt(void *dev_id)
914{
915 struct net_device *dev = dev_id;
916 struct net_local *nl;
917 struct plip_local *rcv;
918 unsigned char c0;
919 unsigned long flags;
920
921 nl = netdev_priv(dev);
922 rcv = &nl->rcv_data;
923
924 spin_lock_irqsave (&nl->lock, flags);
925
926 c0 = read_status(dev);
927 if ((c0 & 0xf8) != 0xc0) {
928 if ((dev->irq != -1) && (net_debug > 1))
929 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
930 spin_unlock_irqrestore (&nl->lock, flags);
931 return;
932 }
933
934 if (net_debug > 3)
935 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
936
937 switch (nl->connection) {
938 case PLIP_CN_CLOSING:
939 netif_wake_queue (dev);
940 fallthrough;
941 case PLIP_CN_NONE:
942 case PLIP_CN_SEND:
943 rcv->state = PLIP_PK_TRIGGER;
944 nl->connection = PLIP_CN_RECEIVE;
945 nl->timeout_count = 0;
946 schedule_work(&nl->immediate);
947 break;
948
949 case PLIP_CN_RECEIVE:
950
951
952
953 break;
954
955 case PLIP_CN_ERROR:
956 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
957 break;
958 }
959
960 spin_unlock_irqrestore(&nl->lock, flags);
961}
962
963static netdev_tx_t
964plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
965{
966 struct net_local *nl = netdev_priv(dev);
967 struct plip_local *snd = &nl->snd_data;
968
969 if (netif_queue_stopped(dev))
970 return NETDEV_TX_BUSY;
971
972
973 if (!nl->port_owner) {
974 if (parport_claim(nl->pardev))
975 return NETDEV_TX_BUSY;
976 nl->port_owner = 1;
977 }
978
979 netif_stop_queue (dev);
980
981 if (skb->len > dev->mtu + dev->hard_header_len) {
982 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
983 netif_start_queue (dev);
984 return NETDEV_TX_BUSY;
985 }
986
987 if (net_debug > 2)
988 printk(KERN_DEBUG "%s: send request\n", dev->name);
989
990 spin_lock_irq(&nl->lock);
991 snd->skb = skb;
992 snd->length.h = skb->len;
993 snd->state = PLIP_PK_TRIGGER;
994 if (nl->connection == PLIP_CN_NONE) {
995 nl->connection = PLIP_CN_SEND;
996 nl->timeout_count = 0;
997 }
998 schedule_work(&nl->immediate);
999 spin_unlock_irq(&nl->lock);
1000
1001 return NETDEV_TX_OK;
1002}
1003
1004static void
1005plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1006{
1007 const struct in_device *in_dev;
1008
1009 rcu_read_lock();
1010 in_dev = __in_dev_get_rcu(dev);
1011 if (in_dev) {
1012
1013 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1014 if (ifa) {
1015 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1016 memset(eth->h_dest, 0xfc, 2);
1017 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1018 }
1019 }
1020 rcu_read_unlock();
1021}
1022
1023static int
1024plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1025 unsigned short type, const void *daddr,
1026 const void *saddr, unsigned len)
1027{
1028 int ret;
1029
1030 ret = eth_header(skb, dev, type, daddr, saddr, len);
1031 if (ret >= 0)
1032 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1033
1034 return ret;
1035}
1036
1037static int plip_hard_header_cache(const struct neighbour *neigh,
1038 struct hh_cache *hh, __be16 type)
1039{
1040 int ret;
1041
1042 ret = eth_header_cache(neigh, hh, type);
1043 if (ret == 0) {
1044 struct ethhdr *eth;
1045
1046 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1047 HH_DATA_OFF(sizeof(*eth)));
1048 plip_rewrite_address (neigh->dev, eth);
1049 }
1050
1051 return ret;
1052}
1053
1054
1055
1056
1057
1058
1059
1060static int
1061plip_open(struct net_device *dev)
1062{
1063 struct net_local *nl = netdev_priv(dev);
1064 struct in_device *in_dev;
1065
1066
1067 if (!nl->port_owner) {
1068 if (parport_claim(nl->pardev)) return -EAGAIN;
1069 nl->port_owner = 1;
1070 }
1071
1072 nl->should_relinquish = 0;
1073
1074
1075 write_data (dev, 0x00);
1076
1077
1078 enable_parport_interrupts (dev);
1079 if (dev->irq == -1)
1080 {
1081 atomic_set (&nl->kill_timer, 0);
1082 schedule_delayed_work(&nl->timer, 1);
1083 }
1084
1085
1086 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1087 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1088 nl->connection = PLIP_CN_NONE;
1089 nl->is_deferred = 0;
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 in_dev=__in_dev_get_rtnl(dev);
1104 if (in_dev) {
1105
1106
1107
1108 const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
1109 if (ifa != NULL) {
1110 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1111 }
1112 }
1113
1114 netif_start_queue (dev);
1115
1116 return 0;
1117}
1118
1119
1120static int
1121plip_close(struct net_device *dev)
1122{
1123 struct net_local *nl = netdev_priv(dev);
1124 struct plip_local *snd = &nl->snd_data;
1125 struct plip_local *rcv = &nl->rcv_data;
1126
1127 netif_stop_queue (dev);
1128 DISABLE(dev->irq);
1129 synchronize_irq(dev->irq);
1130
1131 if (dev->irq == -1)
1132 {
1133 init_completion(&nl->killed_timer_cmp);
1134 atomic_set (&nl->kill_timer, 1);
1135 wait_for_completion(&nl->killed_timer_cmp);
1136 }
1137
1138#ifdef NOTDEF
1139 outb(0x00, PAR_DATA(dev));
1140#endif
1141 nl->is_deferred = 0;
1142 nl->connection = PLIP_CN_NONE;
1143 if (nl->port_owner) {
1144 parport_release(nl->pardev);
1145 nl->port_owner = 0;
1146 }
1147
1148 snd->state = PLIP_PK_DONE;
1149 if (snd->skb) {
1150 dev_kfree_skb(snd->skb);
1151 snd->skb = NULL;
1152 }
1153 rcv->state = PLIP_PK_DONE;
1154 if (rcv->skb) {
1155 kfree_skb(rcv->skb);
1156 rcv->skb = NULL;
1157 }
1158
1159#ifdef NOTDEF
1160
1161 outb(0x00, PAR_CONTROL(dev));
1162#endif
1163 return 0;
1164}
1165
1166static int
1167plip_preempt(void *handle)
1168{
1169 struct net_device *dev = (struct net_device *)handle;
1170 struct net_local *nl = netdev_priv(dev);
1171
1172
1173 if (nl->connection != PLIP_CN_NONE) {
1174 nl->should_relinquish = 1;
1175 return 1;
1176 }
1177
1178 nl->port_owner = 0;
1179 return 0;
1180}
1181
1182static void
1183plip_wakeup(void *handle)
1184{
1185 struct net_device *dev = (struct net_device *)handle;
1186 struct net_local *nl = netdev_priv(dev);
1187
1188 if (nl->port_owner) {
1189
1190 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1191 if (!parport_claim(nl->pardev))
1192
1193 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1194 else
1195 return;
1196 }
1197
1198 if (!(dev->flags & IFF_UP))
1199
1200 return;
1201
1202 if (!parport_claim(nl->pardev)) {
1203 nl->port_owner = 1;
1204
1205 write_data (dev, 0x00);
1206 }
1207}
1208
1209static int
1210plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1211{
1212 struct net_local *nl = netdev_priv(dev);
1213 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1214
1215 if (cmd != SIOCDEVPLIP)
1216 return -EOPNOTSUPP;
1217
1218 switch(pc->pcmd) {
1219 case PLIP_GET_TIMEOUT:
1220 pc->trigger = nl->trigger;
1221 pc->nibble = nl->nibble;
1222 break;
1223 case PLIP_SET_TIMEOUT:
1224 if(!capable(CAP_NET_ADMIN))
1225 return -EPERM;
1226 nl->trigger = pc->trigger;
1227 nl->nibble = pc->nibble;
1228 break;
1229 default:
1230 return -EOPNOTSUPP;
1231 }
1232 return 0;
1233}
1234
1235static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1236static int timid;
1237
1238module_param_array(parport, int, NULL, 0);
1239module_param(timid, int, 0);
1240MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1241
1242static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1243
1244static inline int
1245plip_searchfor(int list[], int a)
1246{
1247 int i;
1248 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1249 if (list[i] == a) return 1;
1250 }
1251 return 0;
1252}
1253
1254
1255
1256static void plip_attach (struct parport *port)
1257{
1258 static int unit;
1259 struct net_device *dev;
1260 struct net_local *nl;
1261 char name[IFNAMSIZ];
1262 struct pardev_cb plip_cb;
1263
1264 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1265 plip_searchfor(parport, port->number)) {
1266 if (unit == PLIP_MAX) {
1267 printk(KERN_ERR "plip: too many devices\n");
1268 return;
1269 }
1270
1271 sprintf(name, "plip%d", unit);
1272 dev = alloc_etherdev(sizeof(struct net_local));
1273 if (!dev)
1274 return;
1275
1276 strcpy(dev->name, name);
1277
1278 dev->irq = port->irq;
1279 dev->base_addr = port->base;
1280 if (port->irq == -1) {
1281 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1282 "which is fairly inefficient!\n", port->name);
1283 }
1284
1285 nl = netdev_priv(dev);
1286 nl->dev = dev;
1287
1288 memset(&plip_cb, 0, sizeof(plip_cb));
1289 plip_cb.private = dev;
1290 plip_cb.preempt = plip_preempt;
1291 plip_cb.wakeup = plip_wakeup;
1292 plip_cb.irq_func = plip_interrupt;
1293
1294 nl->pardev = parport_register_dev_model(port, dev->name,
1295 &plip_cb, unit);
1296
1297 if (!nl->pardev) {
1298 printk(KERN_ERR "%s: parport_register failed\n", name);
1299 goto err_free_dev;
1300 }
1301
1302 plip_init_netdev(dev);
1303
1304 if (register_netdev(dev)) {
1305 printk(KERN_ERR "%s: network register failed\n", name);
1306 goto err_parport_unregister;
1307 }
1308
1309 printk(KERN_INFO "%s", version);
1310 if (dev->irq != -1)
1311 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1312 "using IRQ %d.\n",
1313 dev->name, dev->base_addr, dev->irq);
1314 else
1315 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1316 "not using IRQ.\n",
1317 dev->name, dev->base_addr);
1318 dev_plip[unit++] = dev;
1319 }
1320 return;
1321
1322err_parport_unregister:
1323 parport_unregister_device(nl->pardev);
1324err_free_dev:
1325 free_netdev(dev);
1326}
1327
1328
1329
1330static void plip_detach (struct parport *port)
1331{
1332
1333}
1334
1335static int plip_probe(struct pardevice *par_dev)
1336{
1337 struct device_driver *drv = par_dev->dev.driver;
1338 int len = strlen(drv->name);
1339
1340 if (strncmp(par_dev->name, drv->name, len))
1341 return -ENODEV;
1342
1343 return 0;
1344}
1345
1346static struct parport_driver plip_driver = {
1347 .name = "plip",
1348 .probe = plip_probe,
1349 .match_port = plip_attach,
1350 .detach = plip_detach,
1351 .devmodel = true,
1352};
1353
1354static void __exit plip_cleanup_module (void)
1355{
1356 struct net_device *dev;
1357 int i;
1358
1359 for (i=0; i < PLIP_MAX; i++) {
1360 if ((dev = dev_plip[i])) {
1361 struct net_local *nl = netdev_priv(dev);
1362 unregister_netdev(dev);
1363 if (nl->port_owner)
1364 parport_release(nl->pardev);
1365 parport_unregister_device(nl->pardev);
1366 free_netdev(dev);
1367 dev_plip[i] = NULL;
1368 }
1369 }
1370
1371 parport_unregister_driver(&plip_driver);
1372}
1373
1374#ifndef MODULE
1375
1376static int parport_ptr;
1377
1378static int __init plip_setup(char *str)
1379{
1380 int ints[4];
1381
1382 str = get_options(str, ARRAY_SIZE(ints), ints);
1383
1384
1385 if (!strncmp(str, "parport", 7)) {
1386 int n = simple_strtoul(str+7, NULL, 10);
1387 if (parport_ptr < PLIP_MAX)
1388 parport[parport_ptr++] = n;
1389 else
1390 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1391 str);
1392 } else if (!strcmp(str, "timid")) {
1393 timid = 1;
1394 } else {
1395 if (ints[0] == 0 || ints[1] == 0) {
1396
1397 parport[0] = -2;
1398 } else {
1399 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1400 ints[1]);
1401 }
1402 }
1403 return 1;
1404}
1405
1406__setup("plip=", plip_setup);
1407
1408#endif
1409
1410static int __init plip_init (void)
1411{
1412 if (parport[0] == -2)
1413 return 0;
1414
1415 if (parport[0] != -1 && timid) {
1416 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1417 timid = 0;
1418 }
1419
1420 if (parport_register_driver (&plip_driver)) {
1421 printk (KERN_WARNING "plip: couldn't register driver\n");
1422 return 1;
1423 }
1424
1425 return 0;
1426}
1427
1428module_init(plip_init);
1429module_exit(plip_cleanup_module);
1430MODULE_LICENSE("GPL");
1431