1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91#include <linux/module.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/string.h>
97#include <linux/slab.h>
98#include <linux/if_ether.h>
99#include <linux/in.h>
100#include <linux/errno.h>
101#include <linux/delay.h>
102#include <linux/init.h>
103#include <linux/netdevice.h>
104#include <linux/etherdevice.h>
105#include <linux/inetdevice.h>
106#include <linux/skbuff.h>
107#include <linux/if_plip.h>
108#include <linux/workqueue.h>
109#include <linux/spinlock.h>
110#include <linux/completion.h>
111#include <linux/parport.h>
112#include <linux/bitops.h>
113
114#include <net/neighbour.h>
115
116#include <asm/irq.h>
117#include <asm/byteorder.h>
118
119
120#define PLIP_MAX 8
121
122
123#ifndef NET_DEBUG
124#define NET_DEBUG 1
125#endif
126static const unsigned int net_debug = NET_DEBUG;
127
128#define ENABLE(irq) if (irq != -1) enable_irq(irq)
129#define DISABLE(irq) if (irq != -1) disable_irq(irq)
130
131
132#define PLIP_DELAY_UNIT 1
133
134
135#define PLIP_TRIGGER_WAIT 500
136
137
138#define PLIP_NIBBLE_WAIT 3000
139
140
141static void plip_kick_bh(struct work_struct *work);
142static void plip_bh(struct work_struct *work);
143static void plip_timer_bh(struct work_struct *work);
144
145
146static void plip_interrupt(void *dev_id);
147
148
149static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
150static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
151 unsigned short type, const void *daddr,
152 const void *saddr, unsigned len);
153static int plip_hard_header_cache(const struct neighbour *neigh,
154 struct hh_cache *hh, __be16 type);
155static int plip_open(struct net_device *dev);
156static int plip_close(struct net_device *dev);
157static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
158static int plip_preempt(void *handle);
159static void plip_wakeup(void *handle);
160
161enum plip_connection_state {
162 PLIP_CN_NONE=0,
163 PLIP_CN_RECEIVE,
164 PLIP_CN_SEND,
165 PLIP_CN_CLOSING,
166 PLIP_CN_ERROR
167};
168
169enum plip_packet_state {
170 PLIP_PK_DONE=0,
171 PLIP_PK_TRIGGER,
172 PLIP_PK_LENGTH_LSB,
173 PLIP_PK_LENGTH_MSB,
174 PLIP_PK_DATA,
175 PLIP_PK_CHECKSUM
176};
177
178enum plip_nibble_state {
179 PLIP_NB_BEGIN,
180 PLIP_NB_1,
181 PLIP_NB_2,
182};
183
184struct plip_local {
185 enum plip_packet_state state;
186 enum plip_nibble_state nibble;
187 union {
188 struct {
189#if defined(__LITTLE_ENDIAN)
190 unsigned char lsb;
191 unsigned char msb;
192#elif defined(__BIG_ENDIAN)
193 unsigned char msb;
194 unsigned char lsb;
195#else
196#error "Please fix the endianness defines in <asm/byteorder.h>"
197#endif
198 } b;
199 unsigned short h;
200 } length;
201 unsigned short byte;
202 unsigned char checksum;
203 unsigned char data;
204 struct sk_buff *skb;
205};
206
207struct net_local {
208 struct net_device *dev;
209 struct work_struct immediate;
210 struct delayed_work deferred;
211 struct delayed_work timer;
212 struct plip_local snd_data;
213 struct plip_local rcv_data;
214 struct pardevice *pardev;
215 unsigned long trigger;
216 unsigned long nibble;
217 enum plip_connection_state connection;
218 unsigned short timeout_count;
219 int is_deferred;
220 int port_owner;
221 int should_relinquish;
222 spinlock_t lock;
223 atomic_t kill_timer;
224 struct completion killed_timer_cmp;
225};
226
227static inline void enable_parport_interrupts (struct net_device *dev)
228{
229 if (dev->irq != -1)
230 {
231 struct parport *port =
232 ((struct net_local *)netdev_priv(dev))->pardev->port;
233 port->ops->enable_irq (port);
234 }
235}
236
237static inline void disable_parport_interrupts (struct net_device *dev)
238{
239 if (dev->irq != -1)
240 {
241 struct parport *port =
242 ((struct net_local *)netdev_priv(dev))->pardev->port;
243 port->ops->disable_irq (port);
244 }
245}
246
247static inline void write_data (struct net_device *dev, unsigned char data)
248{
249 struct parport *port =
250 ((struct net_local *)netdev_priv(dev))->pardev->port;
251
252 port->ops->write_data (port, data);
253}
254
255static inline unsigned char read_status (struct net_device *dev)
256{
257 struct parport *port =
258 ((struct net_local *)netdev_priv(dev))->pardev->port;
259
260 return port->ops->read_status (port);
261}
262
263static const struct header_ops plip_header_ops = {
264 .create = plip_hard_header,
265 .cache = plip_hard_header_cache,
266};
267
268static const struct net_device_ops plip_netdev_ops = {
269 .ndo_open = plip_open,
270 .ndo_stop = plip_close,
271 .ndo_start_xmit = plip_tx_packet,
272 .ndo_do_ioctl = plip_ioctl,
273 .ndo_set_mac_address = eth_mac_addr,
274 .ndo_validate_addr = eth_validate_addr,
275};
276
277
278
279
280
281
282
283
284
285
286static void
287plip_init_netdev(struct net_device *dev)
288{
289 struct net_local *nl = netdev_priv(dev);
290
291
292 dev->tx_queue_len = 10;
293 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
294 memset(dev->dev_addr, 0xfc, ETH_ALEN);
295
296 dev->netdev_ops = &plip_netdev_ops;
297 dev->header_ops = &plip_header_ops;
298
299
300 nl->port_owner = 0;
301
302
303 nl->trigger = PLIP_TRIGGER_WAIT;
304 nl->nibble = PLIP_NIBBLE_WAIT;
305
306
307 INIT_WORK(&nl->immediate, plip_bh);
308 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
309
310 if (dev->irq == -1)
311 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
312
313 spin_lock_init(&nl->lock);
314}
315
316
317
318
319static void
320plip_kick_bh(struct work_struct *work)
321{
322 struct net_local *nl =
323 container_of(work, struct net_local, deferred.work);
324
325 if (nl->is_deferred)
326 schedule_work(&nl->immediate);
327}
328
329
330static int plip_none(struct net_device *, struct net_local *,
331 struct plip_local *, struct plip_local *);
332static int plip_receive_packet(struct net_device *, struct net_local *,
333 struct plip_local *, struct plip_local *);
334static int plip_send_packet(struct net_device *, struct net_local *,
335 struct plip_local *, struct plip_local *);
336static int plip_connection_close(struct net_device *, struct net_local *,
337 struct plip_local *, struct plip_local *);
338static int plip_error(struct net_device *, struct net_local *,
339 struct plip_local *, struct plip_local *);
340static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
341 struct plip_local *snd,
342 struct plip_local *rcv,
343 int error);
344
345#define OK 0
346#define TIMEOUT 1
347#define ERROR 2
348#define HS_TIMEOUT 3
349
350typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
351 struct plip_local *snd, struct plip_local *rcv);
352
353static const plip_func connection_state_table[] =
354{
355 plip_none,
356 plip_receive_packet,
357 plip_send_packet,
358 plip_connection_close,
359 plip_error
360};
361
362
363static void
364plip_bh(struct work_struct *work)
365{
366 struct net_local *nl = container_of(work, struct net_local, immediate);
367 struct plip_local *snd = &nl->snd_data;
368 struct plip_local *rcv = &nl->rcv_data;
369 plip_func f;
370 int r;
371
372 nl->is_deferred = 0;
373 f = connection_state_table[nl->connection];
374 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
375 (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
376 nl->is_deferred = 1;
377 schedule_delayed_work(&nl->deferred, 1);
378 }
379}
380
381static void
382plip_timer_bh(struct work_struct *work)
383{
384 struct net_local *nl =
385 container_of(work, struct net_local, timer.work);
386
387 if (!(atomic_read (&nl->kill_timer))) {
388 plip_interrupt (nl->dev);
389
390 schedule_delayed_work(&nl->timer, 1);
391 }
392 else {
393 complete(&nl->killed_timer_cmp);
394 }
395}
396
397static int
398plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
399 struct plip_local *snd, struct plip_local *rcv,
400 int error)
401{
402 unsigned char c0;
403
404
405
406
407
408
409
410
411
412
413 spin_lock_irq(&nl->lock);
414 if (nl->connection == PLIP_CN_SEND) {
415
416 if (error != ERROR) {
417 nl->timeout_count++;
418 if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
419 nl->timeout_count <= 3) {
420 spin_unlock_irq(&nl->lock);
421
422 return TIMEOUT;
423 }
424 c0 = read_status(dev);
425 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
426 dev->name, snd->state, c0);
427 } else
428 error = HS_TIMEOUT;
429 dev->stats.tx_errors++;
430 dev->stats.tx_aborted_errors++;
431 } else if (nl->connection == PLIP_CN_RECEIVE) {
432 if (rcv->state == PLIP_PK_TRIGGER) {
433
434 spin_unlock_irq(&nl->lock);
435 return OK;
436 }
437 if (error != ERROR) {
438 if (++nl->timeout_count <= 3) {
439 spin_unlock_irq(&nl->lock);
440
441 return TIMEOUT;
442 }
443 c0 = read_status(dev);
444 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
445 dev->name, rcv->state, c0);
446 }
447 dev->stats.rx_dropped++;
448 }
449 rcv->state = PLIP_PK_DONE;
450 if (rcv->skb) {
451 kfree_skb(rcv->skb);
452 rcv->skb = NULL;
453 }
454 snd->state = PLIP_PK_DONE;
455 if (snd->skb) {
456 dev_kfree_skb(snd->skb);
457 snd->skb = NULL;
458 }
459 spin_unlock_irq(&nl->lock);
460 if (error == HS_TIMEOUT) {
461 DISABLE(dev->irq);
462 synchronize_irq(dev->irq);
463 }
464 disable_parport_interrupts (dev);
465 netif_stop_queue (dev);
466 nl->connection = PLIP_CN_ERROR;
467 write_data (dev, 0x00);
468
469 return TIMEOUT;
470}
471
472static int
473plip_none(struct net_device *dev, struct net_local *nl,
474 struct plip_local *snd, struct plip_local *rcv)
475{
476 return OK;
477}
478
479
480
481static inline int
482plip_receive(unsigned short nibble_timeout, struct net_device *dev,
483 enum plip_nibble_state *ns_p, unsigned char *data_p)
484{
485 unsigned char c0, c1;
486 unsigned int cx;
487
488 switch (*ns_p) {
489 case PLIP_NB_BEGIN:
490 cx = nibble_timeout;
491 while (1) {
492 c0 = read_status(dev);
493 udelay(PLIP_DELAY_UNIT);
494 if ((c0 & 0x80) == 0) {
495 c1 = read_status(dev);
496 if (c0 == c1)
497 break;
498 }
499 if (--cx == 0)
500 return TIMEOUT;
501 }
502 *data_p = (c0 >> 3) & 0x0f;
503 write_data (dev, 0x10);
504 *ns_p = PLIP_NB_1;
505
506
507 case PLIP_NB_1:
508 cx = nibble_timeout;
509 while (1) {
510 c0 = read_status(dev);
511 udelay(PLIP_DELAY_UNIT);
512 if (c0 & 0x80) {
513 c1 = read_status(dev);
514 if (c0 == c1)
515 break;
516 }
517 if (--cx == 0)
518 return TIMEOUT;
519 }
520 *data_p |= (c0 << 1) & 0xf0;
521 write_data (dev, 0x00);
522 *ns_p = PLIP_NB_BEGIN;
523 case PLIP_NB_2:
524 break;
525 }
526 return OK;
527}
528
529
530
531
532
533
534
535
536
537
538
539
540
541static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
542{
543 struct ethhdr *eth;
544 unsigned char *rawp;
545
546 skb_reset_mac_header(skb);
547 skb_pull(skb,dev->hard_header_len);
548 eth = eth_hdr(skb);
549
550 if(is_multicast_ether_addr(eth->h_dest))
551 {
552 if(ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
553 skb->pkt_type=PACKET_BROADCAST;
554 else
555 skb->pkt_type=PACKET_MULTICAST;
556 }
557
558
559
560
561
562
563 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
564 return eth->h_proto;
565
566 rawp = skb->data;
567
568
569
570
571
572
573
574 if (*(unsigned short *)rawp == 0xFFFF)
575 return htons(ETH_P_802_3);
576
577
578
579
580 return htons(ETH_P_802_2);
581}
582
583
584static int
585plip_receive_packet(struct net_device *dev, struct net_local *nl,
586 struct plip_local *snd, struct plip_local *rcv)
587{
588 unsigned short nibble_timeout = nl->nibble;
589 unsigned char *lbuf;
590
591 switch (rcv->state) {
592 case PLIP_PK_TRIGGER:
593 DISABLE(dev->irq);
594
595 disable_parport_interrupts (dev);
596 write_data (dev, 0x01);
597 if (net_debug > 2)
598 printk(KERN_DEBUG "%s: receive start\n", dev->name);
599 rcv->state = PLIP_PK_LENGTH_LSB;
600 rcv->nibble = PLIP_NB_BEGIN;
601
602
603 case PLIP_PK_LENGTH_LSB:
604 if (snd->state != PLIP_PK_DONE) {
605 if (plip_receive(nl->trigger, dev,
606 &rcv->nibble, &rcv->length.b.lsb)) {
607
608 rcv->state = PLIP_PK_DONE;
609 nl->is_deferred = 1;
610 nl->connection = PLIP_CN_SEND;
611 schedule_delayed_work(&nl->deferred, 1);
612 enable_parport_interrupts (dev);
613 ENABLE(dev->irq);
614 return OK;
615 }
616 } else {
617 if (plip_receive(nibble_timeout, dev,
618 &rcv->nibble, &rcv->length.b.lsb))
619 return TIMEOUT;
620 }
621 rcv->state = PLIP_PK_LENGTH_MSB;
622
623
624 case PLIP_PK_LENGTH_MSB:
625 if (plip_receive(nibble_timeout, dev,
626 &rcv->nibble, &rcv->length.b.msb))
627 return TIMEOUT;
628 if (rcv->length.h > dev->mtu + dev->hard_header_len ||
629 rcv->length.h < 8) {
630 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
631 return ERROR;
632 }
633
634 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
635 if (rcv->skb == NULL) {
636 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
637 return ERROR;
638 }
639 skb_reserve(rcv->skb, 2);
640 skb_put(rcv->skb,rcv->length.h);
641 rcv->skb->dev = dev;
642 rcv->state = PLIP_PK_DATA;
643 rcv->byte = 0;
644 rcv->checksum = 0;
645
646
647 case PLIP_PK_DATA:
648 lbuf = rcv->skb->data;
649 do {
650 if (plip_receive(nibble_timeout, dev,
651 &rcv->nibble, &lbuf[rcv->byte]))
652 return TIMEOUT;
653 } while (++rcv->byte < rcv->length.h);
654 do {
655 rcv->checksum += lbuf[--rcv->byte];
656 } while (rcv->byte);
657 rcv->state = PLIP_PK_CHECKSUM;
658
659
660 case PLIP_PK_CHECKSUM:
661 if (plip_receive(nibble_timeout, dev,
662 &rcv->nibble, &rcv->data))
663 return TIMEOUT;
664 if (rcv->data != rcv->checksum) {
665 dev->stats.rx_crc_errors++;
666 if (net_debug)
667 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
668 return ERROR;
669 }
670 rcv->state = PLIP_PK_DONE;
671
672
673 case PLIP_PK_DONE:
674
675 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
676 netif_rx_ni(rcv->skb);
677 dev->stats.rx_bytes += rcv->length.h;
678 dev->stats.rx_packets++;
679 rcv->skb = NULL;
680 if (net_debug > 2)
681 printk(KERN_DEBUG "%s: receive end\n", dev->name);
682
683
684 write_data (dev, 0x00);
685 spin_lock_irq(&nl->lock);
686 if (snd->state != PLIP_PK_DONE) {
687 nl->connection = PLIP_CN_SEND;
688 spin_unlock_irq(&nl->lock);
689 schedule_work(&nl->immediate);
690 enable_parport_interrupts (dev);
691 ENABLE(dev->irq);
692 return OK;
693 } else {
694 nl->connection = PLIP_CN_NONE;
695 spin_unlock_irq(&nl->lock);
696 enable_parport_interrupts (dev);
697 ENABLE(dev->irq);
698 return OK;
699 }
700 }
701 return OK;
702}
703
704
705
706static inline int
707plip_send(unsigned short nibble_timeout, struct net_device *dev,
708 enum plip_nibble_state *ns_p, unsigned char data)
709{
710 unsigned char c0;
711 unsigned int cx;
712
713 switch (*ns_p) {
714 case PLIP_NB_BEGIN:
715 write_data (dev, data & 0x0f);
716 *ns_p = PLIP_NB_1;
717
718
719 case PLIP_NB_1:
720 write_data (dev, 0x10 | (data & 0x0f));
721 cx = nibble_timeout;
722 while (1) {
723 c0 = read_status(dev);
724 if ((c0 & 0x80) == 0)
725 break;
726 if (--cx == 0)
727 return TIMEOUT;
728 udelay(PLIP_DELAY_UNIT);
729 }
730 write_data (dev, 0x10 | (data >> 4));
731 *ns_p = PLIP_NB_2;
732
733
734 case PLIP_NB_2:
735 write_data (dev, (data >> 4));
736 cx = nibble_timeout;
737 while (1) {
738 c0 = read_status(dev);
739 if (c0 & 0x80)
740 break;
741 if (--cx == 0)
742 return TIMEOUT;
743 udelay(PLIP_DELAY_UNIT);
744 }
745 *ns_p = PLIP_NB_BEGIN;
746 return OK;
747 }
748 return OK;
749}
750
751
752static int
753plip_send_packet(struct net_device *dev, struct net_local *nl,
754 struct plip_local *snd, struct plip_local *rcv)
755{
756 unsigned short nibble_timeout = nl->nibble;
757 unsigned char *lbuf;
758 unsigned char c0;
759 unsigned int cx;
760
761 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
762 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
763 snd->state = PLIP_PK_DONE;
764 snd->skb = NULL;
765 return ERROR;
766 }
767
768 switch (snd->state) {
769 case PLIP_PK_TRIGGER:
770 if ((read_status(dev) & 0xf8) != 0x80)
771 return HS_TIMEOUT;
772
773
774 write_data (dev, 0x08);
775 cx = nl->trigger;
776 while (1) {
777 udelay(PLIP_DELAY_UNIT);
778 spin_lock_irq(&nl->lock);
779 if (nl->connection == PLIP_CN_RECEIVE) {
780 spin_unlock_irq(&nl->lock);
781
782 dev->stats.collisions++;
783 return OK;
784 }
785 c0 = read_status(dev);
786 if (c0 & 0x08) {
787 spin_unlock_irq(&nl->lock);
788 DISABLE(dev->irq);
789 synchronize_irq(dev->irq);
790 if (nl->connection == PLIP_CN_RECEIVE) {
791
792
793
794
795
796
797 ENABLE(dev->irq);
798 dev->stats.collisions++;
799 return OK;
800 }
801 disable_parport_interrupts (dev);
802 if (net_debug > 2)
803 printk(KERN_DEBUG "%s: send start\n", dev->name);
804 snd->state = PLIP_PK_LENGTH_LSB;
805 snd->nibble = PLIP_NB_BEGIN;
806 nl->timeout_count = 0;
807 break;
808 }
809 spin_unlock_irq(&nl->lock);
810 if (--cx == 0) {
811 write_data (dev, 0x00);
812 return HS_TIMEOUT;
813 }
814 }
815
816 case PLIP_PK_LENGTH_LSB:
817 if (plip_send(nibble_timeout, dev,
818 &snd->nibble, snd->length.b.lsb))
819 return TIMEOUT;
820 snd->state = PLIP_PK_LENGTH_MSB;
821
822
823 case PLIP_PK_LENGTH_MSB:
824 if (plip_send(nibble_timeout, dev,
825 &snd->nibble, snd->length.b.msb))
826 return TIMEOUT;
827 snd->state = PLIP_PK_DATA;
828 snd->byte = 0;
829 snd->checksum = 0;
830
831
832 case PLIP_PK_DATA:
833 do {
834 if (plip_send(nibble_timeout, dev,
835 &snd->nibble, lbuf[snd->byte]))
836 return TIMEOUT;
837 } while (++snd->byte < snd->length.h);
838 do {
839 snd->checksum += lbuf[--snd->byte];
840 } while (snd->byte);
841 snd->state = PLIP_PK_CHECKSUM;
842
843
844 case PLIP_PK_CHECKSUM:
845 if (plip_send(nibble_timeout, dev,
846 &snd->nibble, snd->checksum))
847 return TIMEOUT;
848
849 dev->stats.tx_bytes += snd->skb->len;
850 dev_kfree_skb(snd->skb);
851 dev->stats.tx_packets++;
852 snd->state = PLIP_PK_DONE;
853
854
855 case PLIP_PK_DONE:
856
857 write_data (dev, 0x00);
858 snd->skb = NULL;
859 if (net_debug > 2)
860 printk(KERN_DEBUG "%s: send end\n", dev->name);
861 nl->connection = PLIP_CN_CLOSING;
862 nl->is_deferred = 1;
863 schedule_delayed_work(&nl->deferred, 1);
864 enable_parport_interrupts (dev);
865 ENABLE(dev->irq);
866 return OK;
867 }
868 return OK;
869}
870
871static int
872plip_connection_close(struct net_device *dev, struct net_local *nl,
873 struct plip_local *snd, struct plip_local *rcv)
874{
875 spin_lock_irq(&nl->lock);
876 if (nl->connection == PLIP_CN_CLOSING) {
877 nl->connection = PLIP_CN_NONE;
878 netif_wake_queue (dev);
879 }
880 spin_unlock_irq(&nl->lock);
881 if (nl->should_relinquish) {
882 nl->should_relinquish = nl->port_owner = 0;
883 parport_release(nl->pardev);
884 }
885 return OK;
886}
887
888
889static int
890plip_error(struct net_device *dev, struct net_local *nl,
891 struct plip_local *snd, struct plip_local *rcv)
892{
893 unsigned char status;
894
895 status = read_status(dev);
896 if ((status & 0xf8) == 0x80) {
897 if (net_debug > 2)
898 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
899 nl->connection = PLIP_CN_NONE;
900 nl->should_relinquish = 0;
901 netif_start_queue (dev);
902 enable_parport_interrupts (dev);
903 ENABLE(dev->irq);
904 netif_wake_queue (dev);
905 } else {
906 nl->is_deferred = 1;
907 schedule_delayed_work(&nl->deferred, 1);
908 }
909
910 return OK;
911}
912
913
914static void
915plip_interrupt(void *dev_id)
916{
917 struct net_device *dev = dev_id;
918 struct net_local *nl;
919 struct plip_local *rcv;
920 unsigned char c0;
921 unsigned long flags;
922
923 nl = netdev_priv(dev);
924 rcv = &nl->rcv_data;
925
926 spin_lock_irqsave (&nl->lock, flags);
927
928 c0 = read_status(dev);
929 if ((c0 & 0xf8) != 0xc0) {
930 if ((dev->irq != -1) && (net_debug > 1))
931 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
932 spin_unlock_irqrestore (&nl->lock, flags);
933 return;
934 }
935
936 if (net_debug > 3)
937 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
938
939 switch (nl->connection) {
940 case PLIP_CN_CLOSING:
941 netif_wake_queue (dev);
942
943 case PLIP_CN_NONE:
944 case PLIP_CN_SEND:
945 rcv->state = PLIP_PK_TRIGGER;
946 nl->connection = PLIP_CN_RECEIVE;
947 nl->timeout_count = 0;
948 schedule_work(&nl->immediate);
949 break;
950
951 case PLIP_CN_RECEIVE:
952
953
954
955 break;
956
957 case PLIP_CN_ERROR:
958 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
959 break;
960 }
961
962 spin_unlock_irqrestore(&nl->lock, flags);
963}
964
965static int
966plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
967{
968 struct net_local *nl = netdev_priv(dev);
969 struct plip_local *snd = &nl->snd_data;
970
971 if (netif_queue_stopped(dev))
972 return NETDEV_TX_BUSY;
973
974
975 if (!nl->port_owner) {
976 if (parport_claim(nl->pardev))
977 return NETDEV_TX_BUSY;
978 nl->port_owner = 1;
979 }
980
981 netif_stop_queue (dev);
982
983 if (skb->len > dev->mtu + dev->hard_header_len) {
984 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
985 netif_start_queue (dev);
986 return NETDEV_TX_BUSY;
987 }
988
989 if (net_debug > 2)
990 printk(KERN_DEBUG "%s: send request\n", dev->name);
991
992 spin_lock_irq(&nl->lock);
993 snd->skb = skb;
994 snd->length.h = skb->len;
995 snd->state = PLIP_PK_TRIGGER;
996 if (nl->connection == PLIP_CN_NONE) {
997 nl->connection = PLIP_CN_SEND;
998 nl->timeout_count = 0;
999 }
1000 schedule_work(&nl->immediate);
1001 spin_unlock_irq(&nl->lock);
1002
1003 return NETDEV_TX_OK;
1004}
1005
1006static void
1007plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1008{
1009 const struct in_device *in_dev;
1010
1011 rcu_read_lock();
1012 in_dev = __in_dev_get_rcu(dev);
1013 if (in_dev) {
1014
1015 const struct in_ifaddr *ifa = in_dev->ifa_list;
1016 if (ifa) {
1017 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
1018 memset(eth->h_dest, 0xfc, 2);
1019 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1020 }
1021 }
1022 rcu_read_unlock();
1023}
1024
1025static int
1026plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1027 unsigned short type, const void *daddr,
1028 const void *saddr, unsigned len)
1029{
1030 int ret;
1031
1032 ret = eth_header(skb, dev, type, daddr, saddr, len);
1033 if (ret >= 0)
1034 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1035
1036 return ret;
1037}
1038
1039static int plip_hard_header_cache(const struct neighbour *neigh,
1040 struct hh_cache *hh, __be16 type)
1041{
1042 int ret;
1043
1044 ret = eth_header_cache(neigh, hh, type);
1045 if (ret == 0) {
1046 struct ethhdr *eth;
1047
1048 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1049 HH_DATA_OFF(sizeof(*eth)));
1050 plip_rewrite_address (neigh->dev, eth);
1051 }
1052
1053 return ret;
1054}
1055
1056
1057
1058
1059
1060
1061
1062static int
1063plip_open(struct net_device *dev)
1064{
1065 struct net_local *nl = netdev_priv(dev);
1066 struct in_device *in_dev;
1067
1068
1069 if (!nl->port_owner) {
1070 if (parport_claim(nl->pardev)) return -EAGAIN;
1071 nl->port_owner = 1;
1072 }
1073
1074 nl->should_relinquish = 0;
1075
1076
1077 write_data (dev, 0x00);
1078
1079
1080 enable_parport_interrupts (dev);
1081 if (dev->irq == -1)
1082 {
1083 atomic_set (&nl->kill_timer, 0);
1084 schedule_delayed_work(&nl->timer, 1);
1085 }
1086
1087
1088 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1089 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1090 nl->connection = PLIP_CN_NONE;
1091 nl->is_deferred = 0;
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 in_dev=__in_dev_get_rtnl(dev);
1106 if (in_dev) {
1107
1108
1109
1110 struct in_ifaddr *ifa=in_dev->ifa_list;
1111 if (ifa != NULL) {
1112 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1113 }
1114 }
1115
1116 netif_start_queue (dev);
1117
1118 return 0;
1119}
1120
1121
1122static int
1123plip_close(struct net_device *dev)
1124{
1125 struct net_local *nl = netdev_priv(dev);
1126 struct plip_local *snd = &nl->snd_data;
1127 struct plip_local *rcv = &nl->rcv_data;
1128
1129 netif_stop_queue (dev);
1130 DISABLE(dev->irq);
1131 synchronize_irq(dev->irq);
1132
1133 if (dev->irq == -1)
1134 {
1135 init_completion(&nl->killed_timer_cmp);
1136 atomic_set (&nl->kill_timer, 1);
1137 wait_for_completion(&nl->killed_timer_cmp);
1138 }
1139
1140#ifdef NOTDEF
1141 outb(0x00, PAR_DATA(dev));
1142#endif
1143 nl->is_deferred = 0;
1144 nl->connection = PLIP_CN_NONE;
1145 if (nl->port_owner) {
1146 parport_release(nl->pardev);
1147 nl->port_owner = 0;
1148 }
1149
1150 snd->state = PLIP_PK_DONE;
1151 if (snd->skb) {
1152 dev_kfree_skb(snd->skb);
1153 snd->skb = NULL;
1154 }
1155 rcv->state = PLIP_PK_DONE;
1156 if (rcv->skb) {
1157 kfree_skb(rcv->skb);
1158 rcv->skb = NULL;
1159 }
1160
1161#ifdef NOTDEF
1162
1163 outb(0x00, PAR_CONTROL(dev));
1164#endif
1165 return 0;
1166}
1167
1168static int
1169plip_preempt(void *handle)
1170{
1171 struct net_device *dev = (struct net_device *)handle;
1172 struct net_local *nl = netdev_priv(dev);
1173
1174
1175 if (nl->connection != PLIP_CN_NONE) {
1176 nl->should_relinquish = 1;
1177 return 1;
1178 }
1179
1180 nl->port_owner = 0;
1181 return 0;
1182}
1183
1184static void
1185plip_wakeup(void *handle)
1186{
1187 struct net_device *dev = (struct net_device *)handle;
1188 struct net_local *nl = netdev_priv(dev);
1189
1190 if (nl->port_owner) {
1191
1192 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1193 if (!parport_claim(nl->pardev))
1194
1195 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1196 else
1197 return;
1198 }
1199
1200 if (!(dev->flags & IFF_UP))
1201
1202 return;
1203
1204 if (!parport_claim(nl->pardev)) {
1205 nl->port_owner = 1;
1206
1207 write_data (dev, 0x00);
1208 }
1209}
1210
1211static int
1212plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1213{
1214 struct net_local *nl = netdev_priv(dev);
1215 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1216
1217 if (cmd != SIOCDEVPLIP)
1218 return -EOPNOTSUPP;
1219
1220 switch(pc->pcmd) {
1221 case PLIP_GET_TIMEOUT:
1222 pc->trigger = nl->trigger;
1223 pc->nibble = nl->nibble;
1224 break;
1225 case PLIP_SET_TIMEOUT:
1226 if(!capable(CAP_NET_ADMIN))
1227 return -EPERM;
1228 nl->trigger = pc->trigger;
1229 nl->nibble = pc->nibble;
1230 break;
1231 default:
1232 return -EOPNOTSUPP;
1233 }
1234 return 0;
1235}
1236
1237static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1238static int timid;
1239
1240module_param_array(parport, int, NULL, 0);
1241module_param(timid, int, 0);
1242MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1243
1244static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1245
1246static inline int
1247plip_searchfor(int list[], int a)
1248{
1249 int i;
1250 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1251 if (list[i] == a) return 1;
1252 }
1253 return 0;
1254}
1255
1256
1257
1258static void plip_attach (struct parport *port)
1259{
1260 static int unit;
1261 struct net_device *dev;
1262 struct net_local *nl;
1263 char name[IFNAMSIZ];
1264 struct pardev_cb plip_cb;
1265
1266 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1267 plip_searchfor(parport, port->number)) {
1268 if (unit == PLIP_MAX) {
1269 printk(KERN_ERR "plip: too many devices\n");
1270 return;
1271 }
1272
1273 sprintf(name, "plip%d", unit);
1274 dev = alloc_etherdev(sizeof(struct net_local));
1275 if (!dev)
1276 return;
1277
1278 strcpy(dev->name, name);
1279
1280 dev->irq = port->irq;
1281 dev->base_addr = port->base;
1282 if (port->irq == -1) {
1283 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1284 "which is fairly inefficient!\n", port->name);
1285 }
1286
1287 nl = netdev_priv(dev);
1288 nl->dev = dev;
1289
1290 memset(&plip_cb, 0, sizeof(plip_cb));
1291 plip_cb.private = dev;
1292 plip_cb.preempt = plip_preempt;
1293 plip_cb.wakeup = plip_wakeup;
1294 plip_cb.irq_func = plip_interrupt;
1295
1296 nl->pardev = parport_register_dev_model(port, dev->name,
1297 &plip_cb, unit);
1298
1299 if (!nl->pardev) {
1300 printk(KERN_ERR "%s: parport_register failed\n", name);
1301 goto err_free_dev;
1302 }
1303
1304 plip_init_netdev(dev);
1305
1306 if (register_netdev(dev)) {
1307 printk(KERN_ERR "%s: network register failed\n", name);
1308 goto err_parport_unregister;
1309 }
1310
1311 printk(KERN_INFO "%s", version);
1312 if (dev->irq != -1)
1313 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1314 "using IRQ %d.\n",
1315 dev->name, dev->base_addr, dev->irq);
1316 else
1317 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1318 "not using IRQ.\n",
1319 dev->name, dev->base_addr);
1320 dev_plip[unit++] = dev;
1321 }
1322 return;
1323
1324err_parport_unregister:
1325 parport_unregister_device(nl->pardev);
1326err_free_dev:
1327 free_netdev(dev);
1328}
1329
1330
1331
1332static void plip_detach (struct parport *port)
1333{
1334
1335}
1336
1337static int plip_probe(struct pardevice *par_dev)
1338{
1339 struct device_driver *drv = par_dev->dev.driver;
1340 int len = strlen(drv->name);
1341
1342 if (strncmp(par_dev->name, drv->name, len))
1343 return -ENODEV;
1344
1345 return 0;
1346}
1347
1348static struct parport_driver plip_driver = {
1349 .name = "plip",
1350 .probe = plip_probe,
1351 .match_port = plip_attach,
1352 .detach = plip_detach,
1353 .devmodel = true,
1354};
1355
1356static void __exit plip_cleanup_module (void)
1357{
1358 struct net_device *dev;
1359 int i;
1360
1361 for (i=0; i < PLIP_MAX; i++) {
1362 if ((dev = dev_plip[i])) {
1363 struct net_local *nl = netdev_priv(dev);
1364 unregister_netdev(dev);
1365 if (nl->port_owner)
1366 parport_release(nl->pardev);
1367 parport_unregister_device(nl->pardev);
1368 free_netdev(dev);
1369 dev_plip[i] = NULL;
1370 }
1371 }
1372
1373 parport_unregister_driver(&plip_driver);
1374}
1375
1376#ifndef MODULE
1377
1378static int parport_ptr;
1379
1380static int __init plip_setup(char *str)
1381{
1382 int ints[4];
1383
1384 str = get_options(str, ARRAY_SIZE(ints), ints);
1385
1386
1387 if (!strncmp(str, "parport", 7)) {
1388 int n = simple_strtoul(str+7, NULL, 10);
1389 if (parport_ptr < PLIP_MAX)
1390 parport[parport_ptr++] = n;
1391 else
1392 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1393 str);
1394 } else if (!strcmp(str, "timid")) {
1395 timid = 1;
1396 } else {
1397 if (ints[0] == 0 || ints[1] == 0) {
1398
1399 parport[0] = -2;
1400 } else {
1401 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1402 ints[1]);
1403 }
1404 }
1405 return 1;
1406}
1407
1408__setup("plip=", plip_setup);
1409
1410#endif
1411
1412static int __init plip_init (void)
1413{
1414 if (parport[0] == -2)
1415 return 0;
1416
1417 if (parport[0] != -1 && timid) {
1418 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1419 timid = 0;
1420 }
1421
1422 if (parport_register_driver (&plip_driver)) {
1423 printk (KERN_WARNING "plip: couldn't register driver\n");
1424 return 1;
1425 }
1426
1427 return 0;
1428}
1429
1430module_init(plip_init);
1431module_exit(plip_cleanup_module);
1432MODULE_LICENSE("GPL");
1433