1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91#include <linux/module.h>
92#include <linux/kernel.h>
93#include <linux/types.h>
94#include <linux/fcntl.h>
95#include <linux/interrupt.h>
96#include <linux/string.h>
97#include <linux/if_ether.h>
98#include <linux/in.h>
99#include <linux/errno.h>
100#include <linux/delay.h>
101#include <linux/init.h>
102#include <linux/netdevice.h>
103#include <linux/etherdevice.h>
104#include <linux/inetdevice.h>
105#include <linux/skbuff.h>
106#include <linux/if_plip.h>
107#include <linux/workqueue.h>
108#include <linux/spinlock.h>
109#include <linux/parport.h>
110#include <linux/bitops.h>
111
112#include <net/neighbour.h>
113
114#include <asm/system.h>
115#include <asm/irq.h>
116#include <asm/byteorder.h>
117#include <asm/semaphore.h>
118
119
120#define PLIP_MAX 8
121
122
123#ifndef NET_DEBUG
124#define NET_DEBUG 1
125#endif
126static const unsigned int net_debug = NET_DEBUG;
127
128#define ENABLE(irq) if (irq != -1) enable_irq(irq)
129#define DISABLE(irq) if (irq != -1) disable_irq(irq)
130
131
132#define PLIP_DELAY_UNIT 1
133
134
135#define PLIP_TRIGGER_WAIT 500
136
137
138#define PLIP_NIBBLE_WAIT 3000
139
140
141static void plip_kick_bh(struct work_struct *work);
142static void plip_bh(struct work_struct *work);
143static void plip_timer_bh(struct work_struct *work);
144
145
146static void plip_interrupt(void *dev_id);
147
148
149static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
150static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
151 unsigned short type, const void *daddr,
152 const void *saddr, unsigned len);
153static int plip_hard_header_cache(const struct neighbour *neigh,
154 struct hh_cache *hh);
155static int plip_open(struct net_device *dev);
156static int plip_close(struct net_device *dev);
157static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
158static int plip_preempt(void *handle);
159static void plip_wakeup(void *handle);
160
161enum plip_connection_state {
162 PLIP_CN_NONE=0,
163 PLIP_CN_RECEIVE,
164 PLIP_CN_SEND,
165 PLIP_CN_CLOSING,
166 PLIP_CN_ERROR
167};
168
169enum plip_packet_state {
170 PLIP_PK_DONE=0,
171 PLIP_PK_TRIGGER,
172 PLIP_PK_LENGTH_LSB,
173 PLIP_PK_LENGTH_MSB,
174 PLIP_PK_DATA,
175 PLIP_PK_CHECKSUM
176};
177
178enum plip_nibble_state {
179 PLIP_NB_BEGIN,
180 PLIP_NB_1,
181 PLIP_NB_2,
182};
183
184struct plip_local {
185 enum plip_packet_state state;
186 enum plip_nibble_state nibble;
187 union {
188 struct {
189#if defined(__LITTLE_ENDIAN)
190 unsigned char lsb;
191 unsigned char msb;
192#elif defined(__BIG_ENDIAN)
193 unsigned char msb;
194 unsigned char lsb;
195#else
196#error "Please fix the endianness defines in <asm/byteorder.h>"
197#endif
198 } b;
199 unsigned short h;
200 } length;
201 unsigned short byte;
202 unsigned char checksum;
203 unsigned char data;
204 struct sk_buff *skb;
205};
206
207struct net_local {
208 struct net_device *dev;
209 struct work_struct immediate;
210 struct delayed_work deferred;
211 struct delayed_work timer;
212 struct plip_local snd_data;
213 struct plip_local rcv_data;
214 struct pardevice *pardev;
215 unsigned long trigger;
216 unsigned long nibble;
217 enum plip_connection_state connection;
218 unsigned short timeout_count;
219 int is_deferred;
220 int port_owner;
221 int should_relinquish;
222 spinlock_t lock;
223 atomic_t kill_timer;
224 struct semaphore killed_timer_sem;
225};
226
227static inline void enable_parport_interrupts (struct net_device *dev)
228{
229 if (dev->irq != -1)
230 {
231 struct parport *port =
232 ((struct net_local *)dev->priv)->pardev->port;
233 port->ops->enable_irq (port);
234 }
235}
236
237static inline void disable_parport_interrupts (struct net_device *dev)
238{
239 if (dev->irq != -1)
240 {
241 struct parport *port =
242 ((struct net_local *)dev->priv)->pardev->port;
243 port->ops->disable_irq (port);
244 }
245}
246
247static inline void write_data (struct net_device *dev, unsigned char data)
248{
249 struct parport *port =
250 ((struct net_local *)dev->priv)->pardev->port;
251
252 port->ops->write_data (port, data);
253}
254
255static inline unsigned char read_status (struct net_device *dev)
256{
257 struct parport *port =
258 ((struct net_local *)dev->priv)->pardev->port;
259
260 return port->ops->read_status (port);
261}
262
263static const struct header_ops plip_header_ops = {
264 .create = plip_hard_header,
265 .cache = plip_hard_header_cache,
266};
267
268
269
270
271
272
273
274
275
276
277static void
278plip_init_netdev(struct net_device *dev)
279{
280 struct net_local *nl = netdev_priv(dev);
281
282
283 dev->hard_start_xmit = plip_tx_packet;
284 dev->open = plip_open;
285 dev->stop = plip_close;
286 dev->do_ioctl = plip_ioctl;
287
288 dev->tx_queue_len = 10;
289 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
290 memset(dev->dev_addr, 0xfc, ETH_ALEN);
291
292 dev->header_ops = &plip_header_ops;
293
294
295 nl->port_owner = 0;
296
297
298 nl->trigger = PLIP_TRIGGER_WAIT;
299 nl->nibble = PLIP_NIBBLE_WAIT;
300
301
302 INIT_WORK(&nl->immediate, plip_bh);
303 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
304
305 if (dev->irq == -1)
306 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
307
308 spin_lock_init(&nl->lock);
309}
310
311
312
313
314static void
315plip_kick_bh(struct work_struct *work)
316{
317 struct net_local *nl =
318 container_of(work, struct net_local, deferred.work);
319
320 if (nl->is_deferred)
321 schedule_work(&nl->immediate);
322}
323
324
325static int plip_none(struct net_device *, struct net_local *,
326 struct plip_local *, struct plip_local *);
327static int plip_receive_packet(struct net_device *, struct net_local *,
328 struct plip_local *, struct plip_local *);
329static int plip_send_packet(struct net_device *, struct net_local *,
330 struct plip_local *, struct plip_local *);
331static int plip_connection_close(struct net_device *, struct net_local *,
332 struct plip_local *, struct plip_local *);
333static int plip_error(struct net_device *, struct net_local *,
334 struct plip_local *, struct plip_local *);
335static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
336 struct plip_local *snd,
337 struct plip_local *rcv,
338 int error);
339
340#define OK 0
341#define TIMEOUT 1
342#define ERROR 2
343#define HS_TIMEOUT 3
344
345typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
346 struct plip_local *snd, struct plip_local *rcv);
347
348static const plip_func connection_state_table[] =
349{
350 plip_none,
351 plip_receive_packet,
352 plip_send_packet,
353 plip_connection_close,
354 plip_error
355};
356
357
358static void
359plip_bh(struct work_struct *work)
360{
361 struct net_local *nl = container_of(work, struct net_local, immediate);
362 struct plip_local *snd = &nl->snd_data;
363 struct plip_local *rcv = &nl->rcv_data;
364 plip_func f;
365 int r;
366
367 nl->is_deferred = 0;
368 f = connection_state_table[nl->connection];
369 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
370 && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
371 nl->is_deferred = 1;
372 schedule_delayed_work(&nl->deferred, 1);
373 }
374}
375
376static void
377plip_timer_bh(struct work_struct *work)
378{
379 struct net_local *nl =
380 container_of(work, struct net_local, timer.work);
381
382 if (!(atomic_read (&nl->kill_timer))) {
383 plip_interrupt (nl->dev);
384
385 schedule_delayed_work(&nl->timer, 1);
386 }
387 else {
388 up (&nl->killed_timer_sem);
389 }
390}
391
392static int
393plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
394 struct plip_local *snd, struct plip_local *rcv,
395 int error)
396{
397 unsigned char c0;
398
399
400
401
402
403
404
405
406
407
408 spin_lock_irq(&nl->lock);
409 if (nl->connection == PLIP_CN_SEND) {
410
411 if (error != ERROR) {
412 nl->timeout_count++;
413 if ((error == HS_TIMEOUT
414 && nl->timeout_count <= 10)
415 || nl->timeout_count <= 3) {
416 spin_unlock_irq(&nl->lock);
417
418 return TIMEOUT;
419 }
420 c0 = read_status(dev);
421 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
422 dev->name, snd->state, c0);
423 } else
424 error = HS_TIMEOUT;
425 dev->stats.tx_errors++;
426 dev->stats.tx_aborted_errors++;
427 } else if (nl->connection == PLIP_CN_RECEIVE) {
428 if (rcv->state == PLIP_PK_TRIGGER) {
429
430 spin_unlock_irq(&nl->lock);
431 return OK;
432 }
433 if (error != ERROR) {
434 if (++nl->timeout_count <= 3) {
435 spin_unlock_irq(&nl->lock);
436
437 return TIMEOUT;
438 }
439 c0 = read_status(dev);
440 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
441 dev->name, rcv->state, c0);
442 }
443 dev->stats.rx_dropped++;
444 }
445 rcv->state = PLIP_PK_DONE;
446 if (rcv->skb) {
447 kfree_skb(rcv->skb);
448 rcv->skb = NULL;
449 }
450 snd->state = PLIP_PK_DONE;
451 if (snd->skb) {
452 dev_kfree_skb(snd->skb);
453 snd->skb = NULL;
454 }
455 spin_unlock_irq(&nl->lock);
456 if (error == HS_TIMEOUT) {
457 DISABLE(dev->irq);
458 synchronize_irq(dev->irq);
459 }
460 disable_parport_interrupts (dev);
461 netif_stop_queue (dev);
462 nl->connection = PLIP_CN_ERROR;
463 write_data (dev, 0x00);
464
465 return TIMEOUT;
466}
467
468static int
469plip_none(struct net_device *dev, struct net_local *nl,
470 struct plip_local *snd, struct plip_local *rcv)
471{
472 return OK;
473}
474
475
476
477static inline int
478plip_receive(unsigned short nibble_timeout, struct net_device *dev,
479 enum plip_nibble_state *ns_p, unsigned char *data_p)
480{
481 unsigned char c0, c1;
482 unsigned int cx;
483
484 switch (*ns_p) {
485 case PLIP_NB_BEGIN:
486 cx = nibble_timeout;
487 while (1) {
488 c0 = read_status(dev);
489 udelay(PLIP_DELAY_UNIT);
490 if ((c0 & 0x80) == 0) {
491 c1 = read_status(dev);
492 if (c0 == c1)
493 break;
494 }
495 if (--cx == 0)
496 return TIMEOUT;
497 }
498 *data_p = (c0 >> 3) & 0x0f;
499 write_data (dev, 0x10);
500 *ns_p = PLIP_NB_1;
501
502 case PLIP_NB_1:
503 cx = nibble_timeout;
504 while (1) {
505 c0 = read_status(dev);
506 udelay(PLIP_DELAY_UNIT);
507 if (c0 & 0x80) {
508 c1 = read_status(dev);
509 if (c0 == c1)
510 break;
511 }
512 if (--cx == 0)
513 return TIMEOUT;
514 }
515 *data_p |= (c0 << 1) & 0xf0;
516 write_data (dev, 0x00);
517 *ns_p = PLIP_NB_BEGIN;
518 case PLIP_NB_2:
519 break;
520 }
521 return OK;
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
537{
538 struct ethhdr *eth;
539 unsigned char *rawp;
540
541 skb_reset_mac_header(skb);
542 skb_pull(skb,dev->hard_header_len);
543 eth = eth_hdr(skb);
544
545 if(*eth->h_dest&1)
546 {
547 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
548 skb->pkt_type=PACKET_BROADCAST;
549 else
550 skb->pkt_type=PACKET_MULTICAST;
551 }
552
553
554
555
556
557
558 if (ntohs(eth->h_proto) >= 1536)
559 return eth->h_proto;
560
561 rawp = skb->data;
562
563
564
565
566
567
568
569 if (*(unsigned short *)rawp == 0xFFFF)
570 return htons(ETH_P_802_3);
571
572
573
574
575 return htons(ETH_P_802_2);
576}
577
578
579static int
580plip_receive_packet(struct net_device *dev, struct net_local *nl,
581 struct plip_local *snd, struct plip_local *rcv)
582{
583 unsigned short nibble_timeout = nl->nibble;
584 unsigned char *lbuf;
585
586 switch (rcv->state) {
587 case PLIP_PK_TRIGGER:
588 DISABLE(dev->irq);
589
590 disable_parport_interrupts (dev);
591 write_data (dev, 0x01);
592 if (net_debug > 2)
593 printk(KERN_DEBUG "%s: receive start\n", dev->name);
594 rcv->state = PLIP_PK_LENGTH_LSB;
595 rcv->nibble = PLIP_NB_BEGIN;
596
597 case PLIP_PK_LENGTH_LSB:
598 if (snd->state != PLIP_PK_DONE) {
599 if (plip_receive(nl->trigger, dev,
600 &rcv->nibble, &rcv->length.b.lsb)) {
601
602 rcv->state = PLIP_PK_DONE;
603 nl->is_deferred = 1;
604 nl->connection = PLIP_CN_SEND;
605 schedule_delayed_work(&nl->deferred, 1);
606 enable_parport_interrupts (dev);
607 ENABLE(dev->irq);
608 return OK;
609 }
610 } else {
611 if (plip_receive(nibble_timeout, dev,
612 &rcv->nibble, &rcv->length.b.lsb))
613 return TIMEOUT;
614 }
615 rcv->state = PLIP_PK_LENGTH_MSB;
616
617 case PLIP_PK_LENGTH_MSB:
618 if (plip_receive(nibble_timeout, dev,
619 &rcv->nibble, &rcv->length.b.msb))
620 return TIMEOUT;
621 if (rcv->length.h > dev->mtu + dev->hard_header_len
622 || rcv->length.h < 8) {
623 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
624 return ERROR;
625 }
626
627 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
628 if (rcv->skb == NULL) {
629 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
630 return ERROR;
631 }
632 skb_reserve(rcv->skb, 2);
633 skb_put(rcv->skb,rcv->length.h);
634 rcv->skb->dev = dev;
635 rcv->state = PLIP_PK_DATA;
636 rcv->byte = 0;
637 rcv->checksum = 0;
638
639 case PLIP_PK_DATA:
640 lbuf = rcv->skb->data;
641 do
642 if (plip_receive(nibble_timeout, dev,
643 &rcv->nibble, &lbuf[rcv->byte]))
644 return TIMEOUT;
645 while (++rcv->byte < rcv->length.h);
646 do
647 rcv->checksum += lbuf[--rcv->byte];
648 while (rcv->byte);
649 rcv->state = PLIP_PK_CHECKSUM;
650
651 case PLIP_PK_CHECKSUM:
652 if (plip_receive(nibble_timeout, dev,
653 &rcv->nibble, &rcv->data))
654 return TIMEOUT;
655 if (rcv->data != rcv->checksum) {
656 dev->stats.rx_crc_errors++;
657 if (net_debug)
658 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
659 return ERROR;
660 }
661 rcv->state = PLIP_PK_DONE;
662
663 case PLIP_PK_DONE:
664
665 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
666 netif_rx_ni(rcv->skb);
667 dev->last_rx = jiffies;
668 dev->stats.rx_bytes += rcv->length.h;
669 dev->stats.rx_packets++;
670 rcv->skb = NULL;
671 if (net_debug > 2)
672 printk(KERN_DEBUG "%s: receive end\n", dev->name);
673
674
675 write_data (dev, 0x00);
676 spin_lock_irq(&nl->lock);
677 if (snd->state != PLIP_PK_DONE) {
678 nl->connection = PLIP_CN_SEND;
679 spin_unlock_irq(&nl->lock);
680 schedule_work(&nl->immediate);
681 enable_parport_interrupts (dev);
682 ENABLE(dev->irq);
683 return OK;
684 } else {
685 nl->connection = PLIP_CN_NONE;
686 spin_unlock_irq(&nl->lock);
687 enable_parport_interrupts (dev);
688 ENABLE(dev->irq);
689 return OK;
690 }
691 }
692 return OK;
693}
694
695
696
697static inline int
698plip_send(unsigned short nibble_timeout, struct net_device *dev,
699 enum plip_nibble_state *ns_p, unsigned char data)
700{
701 unsigned char c0;
702 unsigned int cx;
703
704 switch (*ns_p) {
705 case PLIP_NB_BEGIN:
706 write_data (dev, data & 0x0f);
707 *ns_p = PLIP_NB_1;
708
709 case PLIP_NB_1:
710 write_data (dev, 0x10 | (data & 0x0f));
711 cx = nibble_timeout;
712 while (1) {
713 c0 = read_status(dev);
714 if ((c0 & 0x80) == 0)
715 break;
716 if (--cx == 0)
717 return TIMEOUT;
718 udelay(PLIP_DELAY_UNIT);
719 }
720 write_data (dev, 0x10 | (data >> 4));
721 *ns_p = PLIP_NB_2;
722
723 case PLIP_NB_2:
724 write_data (dev, (data >> 4));
725 cx = nibble_timeout;
726 while (1) {
727 c0 = read_status(dev);
728 if (c0 & 0x80)
729 break;
730 if (--cx == 0)
731 return TIMEOUT;
732 udelay(PLIP_DELAY_UNIT);
733 }
734 *ns_p = PLIP_NB_BEGIN;
735 return OK;
736 }
737 return OK;
738}
739
740
741static int
742plip_send_packet(struct net_device *dev, struct net_local *nl,
743 struct plip_local *snd, struct plip_local *rcv)
744{
745 unsigned short nibble_timeout = nl->nibble;
746 unsigned char *lbuf;
747 unsigned char c0;
748 unsigned int cx;
749
750 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
751 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
752 snd->state = PLIP_PK_DONE;
753 snd->skb = NULL;
754 return ERROR;
755 }
756
757 switch (snd->state) {
758 case PLIP_PK_TRIGGER:
759 if ((read_status(dev) & 0xf8) != 0x80)
760 return HS_TIMEOUT;
761
762
763 write_data (dev, 0x08);
764 cx = nl->trigger;
765 while (1) {
766 udelay(PLIP_DELAY_UNIT);
767 spin_lock_irq(&nl->lock);
768 if (nl->connection == PLIP_CN_RECEIVE) {
769 spin_unlock_irq(&nl->lock);
770
771 dev->stats.collisions++;
772 return OK;
773 }
774 c0 = read_status(dev);
775 if (c0 & 0x08) {
776 spin_unlock_irq(&nl->lock);
777 DISABLE(dev->irq);
778 synchronize_irq(dev->irq);
779 if (nl->connection == PLIP_CN_RECEIVE) {
780
781
782
783
784
785
786 ENABLE(dev->irq);
787 dev->stats.collisions++;
788 return OK;
789 }
790 disable_parport_interrupts (dev);
791 if (net_debug > 2)
792 printk(KERN_DEBUG "%s: send start\n", dev->name);
793 snd->state = PLIP_PK_LENGTH_LSB;
794 snd->nibble = PLIP_NB_BEGIN;
795 nl->timeout_count = 0;
796 break;
797 }
798 spin_unlock_irq(&nl->lock);
799 if (--cx == 0) {
800 write_data (dev, 0x00);
801 return HS_TIMEOUT;
802 }
803 }
804
805 case PLIP_PK_LENGTH_LSB:
806 if (plip_send(nibble_timeout, dev,
807 &snd->nibble, snd->length.b.lsb))
808 return TIMEOUT;
809 snd->state = PLIP_PK_LENGTH_MSB;
810
811 case PLIP_PK_LENGTH_MSB:
812 if (plip_send(nibble_timeout, dev,
813 &snd->nibble, snd->length.b.msb))
814 return TIMEOUT;
815 snd->state = PLIP_PK_DATA;
816 snd->byte = 0;
817 snd->checksum = 0;
818
819 case PLIP_PK_DATA:
820 do
821 if (plip_send(nibble_timeout, dev,
822 &snd->nibble, lbuf[snd->byte]))
823 return TIMEOUT;
824 while (++snd->byte < snd->length.h);
825 do
826 snd->checksum += lbuf[--snd->byte];
827 while (snd->byte);
828 snd->state = PLIP_PK_CHECKSUM;
829
830 case PLIP_PK_CHECKSUM:
831 if (plip_send(nibble_timeout, dev,
832 &snd->nibble, snd->checksum))
833 return TIMEOUT;
834
835 dev->stats.tx_bytes += snd->skb->len;
836 dev_kfree_skb(snd->skb);
837 dev->stats.tx_packets++;
838 snd->state = PLIP_PK_DONE;
839
840 case PLIP_PK_DONE:
841
842 write_data (dev, 0x00);
843 snd->skb = NULL;
844 if (net_debug > 2)
845 printk(KERN_DEBUG "%s: send end\n", dev->name);
846 nl->connection = PLIP_CN_CLOSING;
847 nl->is_deferred = 1;
848 schedule_delayed_work(&nl->deferred, 1);
849 enable_parport_interrupts (dev);
850 ENABLE(dev->irq);
851 return OK;
852 }
853 return OK;
854}
855
856static int
857plip_connection_close(struct net_device *dev, struct net_local *nl,
858 struct plip_local *snd, struct plip_local *rcv)
859{
860 spin_lock_irq(&nl->lock);
861 if (nl->connection == PLIP_CN_CLOSING) {
862 nl->connection = PLIP_CN_NONE;
863 netif_wake_queue (dev);
864 }
865 spin_unlock_irq(&nl->lock);
866 if (nl->should_relinquish) {
867 nl->should_relinquish = nl->port_owner = 0;
868 parport_release(nl->pardev);
869 }
870 return OK;
871}
872
873
874static int
875plip_error(struct net_device *dev, struct net_local *nl,
876 struct plip_local *snd, struct plip_local *rcv)
877{
878 unsigned char status;
879
880 status = read_status(dev);
881 if ((status & 0xf8) == 0x80) {
882 if (net_debug > 2)
883 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
884 nl->connection = PLIP_CN_NONE;
885 nl->should_relinquish = 0;
886 netif_start_queue (dev);
887 enable_parport_interrupts (dev);
888 ENABLE(dev->irq);
889 netif_wake_queue (dev);
890 } else {
891 nl->is_deferred = 1;
892 schedule_delayed_work(&nl->deferred, 1);
893 }
894
895 return OK;
896}
897
898
899static void
900plip_interrupt(void *dev_id)
901{
902 struct net_device *dev = dev_id;
903 struct net_local *nl;
904 struct plip_local *rcv;
905 unsigned char c0;
906
907 nl = netdev_priv(dev);
908 rcv = &nl->rcv_data;
909
910 spin_lock_irq (&nl->lock);
911
912 c0 = read_status(dev);
913 if ((c0 & 0xf8) != 0xc0) {
914 if ((dev->irq != -1) && (net_debug > 1))
915 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
916 spin_unlock_irq (&nl->lock);
917 return;
918 }
919
920 if (net_debug > 3)
921 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
922
923 switch (nl->connection) {
924 case PLIP_CN_CLOSING:
925 netif_wake_queue (dev);
926 case PLIP_CN_NONE:
927 case PLIP_CN_SEND:
928 rcv->state = PLIP_PK_TRIGGER;
929 nl->connection = PLIP_CN_RECEIVE;
930 nl->timeout_count = 0;
931 schedule_work(&nl->immediate);
932 break;
933
934 case PLIP_CN_RECEIVE:
935
936
937
938 break;
939
940 case PLIP_CN_ERROR:
941 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
942 break;
943 }
944
945 spin_unlock_irq(&nl->lock);
946}
947
948static int
949plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
950{
951 struct net_local *nl = netdev_priv(dev);
952 struct plip_local *snd = &nl->snd_data;
953
954 if (netif_queue_stopped(dev))
955 return 1;
956
957
958 if (!nl->port_owner) {
959 if (parport_claim(nl->pardev))
960 return 1;
961 nl->port_owner = 1;
962 }
963
964 netif_stop_queue (dev);
965
966 if (skb->len > dev->mtu + dev->hard_header_len) {
967 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
968 netif_start_queue (dev);
969 return 1;
970 }
971
972 if (net_debug > 2)
973 printk(KERN_DEBUG "%s: send request\n", dev->name);
974
975 spin_lock_irq(&nl->lock);
976 dev->trans_start = jiffies;
977 snd->skb = skb;
978 snd->length.h = skb->len;
979 snd->state = PLIP_PK_TRIGGER;
980 if (nl->connection == PLIP_CN_NONE) {
981 nl->connection = PLIP_CN_SEND;
982 nl->timeout_count = 0;
983 }
984 schedule_work(&nl->immediate);
985 spin_unlock_irq(&nl->lock);
986
987 return 0;
988}
989
990static void
991plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
992{
993 const struct in_device *in_dev = dev->ip_ptr;
994
995 if (in_dev) {
996
997 const struct in_ifaddr *ifa = in_dev->ifa_list;
998 if (ifa) {
999 memcpy(eth->h_source, dev->dev_addr, 6);
1000 memset(eth->h_dest, 0xfc, 2);
1001 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1002 }
1003 }
1004}
1005
1006static int
1007plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1008 unsigned short type, const void *daddr,
1009 const void *saddr, unsigned len)
1010{
1011 int ret;
1012
1013 ret = eth_header(skb, dev, type, daddr, saddr, len);
1014 if (ret >= 0)
1015 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1016
1017 return ret;
1018}
1019
1020int plip_hard_header_cache(const struct neighbour *neigh,
1021 struct hh_cache *hh)
1022{
1023 int ret;
1024
1025 ret = eth_header_cache(neigh, hh);
1026 if (ret == 0) {
1027 struct ethhdr *eth;
1028
1029 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1030 HH_DATA_OFF(sizeof(*eth)));
1031 plip_rewrite_address (neigh->dev, eth);
1032 }
1033
1034 return ret;
1035}
1036
1037
1038
1039
1040
1041
1042
1043static int
1044plip_open(struct net_device *dev)
1045{
1046 struct net_local *nl = netdev_priv(dev);
1047 struct in_device *in_dev;
1048
1049
1050 if (!nl->port_owner) {
1051 if (parport_claim(nl->pardev)) return -EAGAIN;
1052 nl->port_owner = 1;
1053 }
1054
1055 nl->should_relinquish = 0;
1056
1057
1058 write_data (dev, 0x00);
1059
1060
1061 enable_parport_interrupts (dev);
1062 if (dev->irq == -1)
1063 {
1064 atomic_set (&nl->kill_timer, 0);
1065 schedule_delayed_work(&nl->timer, 1);
1066 }
1067
1068
1069 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1070 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1071 nl->connection = PLIP_CN_NONE;
1072 nl->is_deferred = 0;
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 if ((in_dev=dev->ip_ptr) != NULL) {
1087
1088
1089
1090 struct in_ifaddr *ifa=in_dev->ifa_list;
1091 if (ifa != NULL) {
1092 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1093 }
1094 }
1095
1096 netif_start_queue (dev);
1097
1098 return 0;
1099}
1100
1101
1102static int
1103plip_close(struct net_device *dev)
1104{
1105 struct net_local *nl = netdev_priv(dev);
1106 struct plip_local *snd = &nl->snd_data;
1107 struct plip_local *rcv = &nl->rcv_data;
1108
1109 netif_stop_queue (dev);
1110 DISABLE(dev->irq);
1111 synchronize_irq(dev->irq);
1112
1113 if (dev->irq == -1)
1114 {
1115 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1116 atomic_set (&nl->kill_timer, 1);
1117 down (&nl->killed_timer_sem);
1118 }
1119
1120#ifdef NOTDEF
1121 outb(0x00, PAR_DATA(dev));
1122#endif
1123 nl->is_deferred = 0;
1124 nl->connection = PLIP_CN_NONE;
1125 if (nl->port_owner) {
1126 parport_release(nl->pardev);
1127 nl->port_owner = 0;
1128 }
1129
1130 snd->state = PLIP_PK_DONE;
1131 if (snd->skb) {
1132 dev_kfree_skb(snd->skb);
1133 snd->skb = NULL;
1134 }
1135 rcv->state = PLIP_PK_DONE;
1136 if (rcv->skb) {
1137 kfree_skb(rcv->skb);
1138 rcv->skb = NULL;
1139 }
1140
1141#ifdef NOTDEF
1142
1143 outb(0x00, PAR_CONTROL(dev));
1144#endif
1145 return 0;
1146}
1147
1148static int
1149plip_preempt(void *handle)
1150{
1151 struct net_device *dev = (struct net_device *)handle;
1152 struct net_local *nl = netdev_priv(dev);
1153
1154
1155 if (nl->connection != PLIP_CN_NONE) {
1156 nl->should_relinquish = 1;
1157 return 1;
1158 }
1159
1160 nl->port_owner = 0;
1161 return 0;
1162}
1163
1164static void
1165plip_wakeup(void *handle)
1166{
1167 struct net_device *dev = (struct net_device *)handle;
1168 struct net_local *nl = netdev_priv(dev);
1169
1170 if (nl->port_owner) {
1171
1172 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1173 if (!parport_claim(nl->pardev))
1174
1175 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1176 else
1177 return;
1178 }
1179
1180 if (!(dev->flags & IFF_UP))
1181
1182 return;
1183
1184 if (!parport_claim(nl->pardev)) {
1185 nl->port_owner = 1;
1186
1187 write_data (dev, 0x00);
1188 }
1189
1190 return;
1191}
1192
1193static int
1194plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1195{
1196 struct net_local *nl = netdev_priv(dev);
1197 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1198
1199 if (cmd != SIOCDEVPLIP)
1200 return -EOPNOTSUPP;
1201
1202 switch(pc->pcmd) {
1203 case PLIP_GET_TIMEOUT:
1204 pc->trigger = nl->trigger;
1205 pc->nibble = nl->nibble;
1206 break;
1207 case PLIP_SET_TIMEOUT:
1208 if(!capable(CAP_NET_ADMIN))
1209 return -EPERM;
1210 nl->trigger = pc->trigger;
1211 nl->nibble = pc->nibble;
1212 break;
1213 default:
1214 return -EOPNOTSUPP;
1215 }
1216 return 0;
1217}
1218
1219static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1220static int timid;
1221
1222module_param_array(parport, int, NULL, 0);
1223module_param(timid, int, 0);
1224MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1225
1226static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1227
1228static inline int
1229plip_searchfor(int list[], int a)
1230{
1231 int i;
1232 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1233 if (list[i] == a) return 1;
1234 }
1235 return 0;
1236}
1237
1238
1239
1240static void plip_attach (struct parport *port)
1241{
1242 static int unit;
1243 struct net_device *dev;
1244 struct net_local *nl;
1245 char name[IFNAMSIZ];
1246
1247 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1248 plip_searchfor(parport, port->number)) {
1249 if (unit == PLIP_MAX) {
1250 printk(KERN_ERR "plip: too many devices\n");
1251 return;
1252 }
1253
1254 sprintf(name, "plip%d", unit);
1255 dev = alloc_etherdev(sizeof(struct net_local));
1256 if (!dev) {
1257 printk(KERN_ERR "plip: memory squeeze\n");
1258 return;
1259 }
1260
1261 strcpy(dev->name, name);
1262
1263 dev->irq = port->irq;
1264 dev->base_addr = port->base;
1265 if (port->irq == -1) {
1266 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1267 "which is fairly inefficient!\n", port->name);
1268 }
1269
1270 nl = netdev_priv(dev);
1271 nl->dev = dev;
1272 nl->pardev = parport_register_device(port, dev->name, plip_preempt,
1273 plip_wakeup, plip_interrupt,
1274 0, dev);
1275
1276 if (!nl->pardev) {
1277 printk(KERN_ERR "%s: parport_register failed\n", name);
1278 goto err_free_dev;
1279 return;
1280 }
1281
1282 plip_init_netdev(dev);
1283
1284 if (register_netdev(dev)) {
1285 printk(KERN_ERR "%s: network register failed\n", name);
1286 goto err_parport_unregister;
1287 }
1288
1289 printk(KERN_INFO "%s", version);
1290 if (dev->irq != -1)
1291 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1292 "using IRQ %d.\n",
1293 dev->name, dev->base_addr, dev->irq);
1294 else
1295 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1296 "not using IRQ.\n",
1297 dev->name, dev->base_addr);
1298 dev_plip[unit++] = dev;
1299 }
1300 return;
1301
1302err_parport_unregister:
1303 parport_unregister_device(nl->pardev);
1304err_free_dev:
1305 free_netdev(dev);
1306 return;
1307}
1308
1309
1310
1311static void plip_detach (struct parport *port)
1312{
1313
1314}
1315
1316static struct parport_driver plip_driver = {
1317 .name = "plip",
1318 .attach = plip_attach,
1319 .detach = plip_detach
1320};
1321
1322static void __exit plip_cleanup_module (void)
1323{
1324 struct net_device *dev;
1325 int i;
1326
1327 parport_unregister_driver (&plip_driver);
1328
1329 for (i=0; i < PLIP_MAX; i++) {
1330 if ((dev = dev_plip[i])) {
1331 struct net_local *nl = netdev_priv(dev);
1332 unregister_netdev(dev);
1333 if (nl->port_owner)
1334 parport_release(nl->pardev);
1335 parport_unregister_device(nl->pardev);
1336 free_netdev(dev);
1337 dev_plip[i] = NULL;
1338 }
1339 }
1340}
1341
1342#ifndef MODULE
1343
1344static int parport_ptr;
1345
1346static int __init plip_setup(char *str)
1347{
1348 int ints[4];
1349
1350 str = get_options(str, ARRAY_SIZE(ints), ints);
1351
1352
1353 if (!strncmp(str, "parport", 7)) {
1354 int n = simple_strtoul(str+7, NULL, 10);
1355 if (parport_ptr < PLIP_MAX)
1356 parport[parport_ptr++] = n;
1357 else
1358 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1359 str);
1360 } else if (!strcmp(str, "timid")) {
1361 timid = 1;
1362 } else {
1363 if (ints[0] == 0 || ints[1] == 0) {
1364
1365 parport[0] = -2;
1366 } else {
1367 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1368 ints[1]);
1369 }
1370 }
1371 return 1;
1372}
1373
1374__setup("plip=", plip_setup);
1375
1376#endif
1377
1378static int __init plip_init (void)
1379{
1380 if (parport[0] == -2)
1381 return 0;
1382
1383 if (parport[0] != -1 && timid) {
1384 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1385 timid = 0;
1386 }
1387
1388 if (parport_register_driver (&plip_driver)) {
1389 printk (KERN_WARNING "plip: couldn't register driver\n");
1390 return 1;
1391 }
1392
1393 return 0;
1394}
1395
1396module_init(plip_init);
1397module_exit(plip_cleanup_module);
1398MODULE_LICENSE("GPL");
1399
1400
1401
1402
1403
1404
1405