1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/module.h>
54#include <linux/kernel.h>
55#include <linux/jiffies.h>
56#include <linux/fs.h>
57#include <linux/types.h>
58#include <linux/string.h>
59#include <linux/bitops.h>
60#include <linux/uaccess.h>
61#include <linux/io.h>
62#include <asm/irq.h>
63#include <linux/delay.h>
64#include <linux/errno.h>
65#include <linux/fcntl.h>
66#include <linux/in.h>
67#include <linux/interrupt.h>
68#include <linux/init.h>
69#include <linux/crc32.h>
70
71#include <linux/netdevice.h>
72#include <linux/etherdevice.h>
73
74#define NS8390_CORE
75#include "8390.h"
76
77#define BUG_83C690
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97#define ei_reset_8390 (ei_local->reset_8390)
98#define ei_block_output (ei_local->block_output)
99#define ei_block_input (ei_local->block_input)
100#define ei_get_8390_hdr (ei_local->get_8390_hdr)
101
102
103static void ei_tx_intr(struct net_device *dev);
104static void ei_tx_err(struct net_device *dev);
105static void ei_receive(struct net_device *dev);
106static void ei_rx_overrun(struct net_device *dev);
107
108
109static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
110 int start_page);
111static void do_set_multicast_list(struct net_device *dev);
112static void __NS8390_init(struct net_device *dev, int startp);
113
114static unsigned version_printed;
115static u32 msg_enable;
116module_param(msg_enable, uint, 0444);
117MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201static int __ei_open(struct net_device *dev)
202{
203 unsigned long flags;
204 struct ei_device *ei_local = netdev_priv(dev);
205
206 if (dev->watchdog_timeo <= 0)
207 dev->watchdog_timeo = TX_TIMEOUT;
208
209
210
211
212
213
214 spin_lock_irqsave(&ei_local->page_lock, flags);
215 __NS8390_init(dev, 1);
216
217
218 netif_start_queue(dev);
219 spin_unlock_irqrestore(&ei_local->page_lock, flags);
220 ei_local->irqlock = 0;
221 return 0;
222}
223
224
225
226
227
228
229
230static int __ei_close(struct net_device *dev)
231{
232 struct ei_device *ei_local = netdev_priv(dev);
233 unsigned long flags;
234
235
236
237
238
239 spin_lock_irqsave(&ei_local->page_lock, flags);
240 __NS8390_init(dev, 0);
241 spin_unlock_irqrestore(&ei_local->page_lock, flags);
242 netif_stop_queue(dev);
243 return 0;
244}
245
246
247
248
249
250
251
252
253
254static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
255{
256 unsigned long e8390_base = dev->base_addr;
257 struct ei_device *ei_local = netdev_priv(dev);
258 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
259 unsigned long flags;
260
261 dev->stats.tx_errors++;
262
263 spin_lock_irqsave(&ei_local->page_lock, flags);
264 txsr = ei_inb(e8390_base+EN0_TSR);
265 isr = ei_inb(e8390_base+EN0_ISR);
266 spin_unlock_irqrestore(&ei_local->page_lock, flags);
267
268 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
269 (txsr & ENTSR_ABT) ? "excess collisions." :
270 (isr) ? "lost interrupt?" : "cable problem?",
271 txsr, isr, tickssofar);
272
273 if (!isr && !dev->stats.tx_packets) {
274
275 ei_local->interface_num ^= 1;
276 }
277
278
279
280 disable_irq_nosync_lockdep(dev->irq);
281 spin_lock(&ei_local->page_lock);
282
283
284 ei_reset_8390(dev);
285 __NS8390_init(dev, 1);
286
287 spin_unlock(&ei_local->page_lock);
288 enable_irq_lockdep(dev->irq);
289 netif_wake_queue(dev);
290}
291
292
293
294
295
296
297
298
299
300static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
301 struct net_device *dev)
302{
303 unsigned long e8390_base = dev->base_addr;
304 struct ei_device *ei_local = netdev_priv(dev);
305 int send_length = skb->len, output_page;
306 unsigned long flags;
307 char buf[ETH_ZLEN];
308 char *data = skb->data;
309
310 if (skb->len < ETH_ZLEN) {
311 memset(buf, 0, ETH_ZLEN);
312 memcpy(buf, data, skb->len);
313 send_length = ETH_ZLEN;
314 data = buf;
315 }
316
317
318
319
320
321
322 spin_lock_irqsave(&ei_local->page_lock, flags);
323 ei_outb_p(0x00, e8390_base + EN0_IMR);
324 spin_unlock_irqrestore(&ei_local->page_lock, flags);
325
326
327
328
329
330
331 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
332
333 spin_lock(&ei_local->page_lock);
334
335 ei_local->irqlock = 1;
336
337
338
339
340
341
342
343
344
345 if (ei_local->tx1 == 0) {
346 output_page = ei_local->tx_start_page;
347 ei_local->tx1 = send_length;
348 if ((netif_msg_tx_queued(ei_local)) &&
349 ei_local->tx2 > 0)
350 netdev_dbg(dev,
351 "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
352 ei_local->tx2, ei_local->lasttx, ei_local->txing);
353 } else if (ei_local->tx2 == 0) {
354 output_page = ei_local->tx_start_page + TX_PAGES/2;
355 ei_local->tx2 = send_length;
356 if ((netif_msg_tx_queued(ei_local)) &&
357 ei_local->tx1 > 0)
358 netdev_dbg(dev,
359 "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
360 ei_local->tx1, ei_local->lasttx, ei_local->txing);
361 } else {
362 netif_dbg(ei_local, tx_err, dev,
363 "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
364 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
365 ei_local->irqlock = 0;
366 netif_stop_queue(dev);
367 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
368 spin_unlock(&ei_local->page_lock);
369 enable_irq_lockdep_irqrestore(dev->irq, &flags);
370 dev->stats.tx_errors++;
371 return NETDEV_TX_BUSY;
372 }
373
374
375
376
377
378
379
380 ei_block_output(dev, send_length, data, output_page);
381
382 if (!ei_local->txing) {
383 ei_local->txing = 1;
384 NS8390_trigger_send(dev, send_length, output_page);
385 if (output_page == ei_local->tx_start_page) {
386 ei_local->tx1 = -1;
387 ei_local->lasttx = -1;
388 } else {
389 ei_local->tx2 = -1;
390 ei_local->lasttx = -2;
391 }
392 } else
393 ei_local->txqueue++;
394
395 if (ei_local->tx1 && ei_local->tx2)
396 netif_stop_queue(dev);
397 else
398 netif_start_queue(dev);
399
400
401 ei_local->irqlock = 0;
402 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
403
404 spin_unlock(&ei_local->page_lock);
405 enable_irq_lockdep_irqrestore(dev->irq, &flags);
406 skb_tx_timestamp(skb);
407 dev_consume_skb_any(skb);
408 dev->stats.tx_bytes += send_length;
409
410 return NETDEV_TX_OK;
411}
412
413
414
415
416
417
418
419
420
421
422
423
424
425static irqreturn_t __ei_interrupt(int irq, void *dev_id)
426{
427 struct net_device *dev = dev_id;
428 unsigned long e8390_base = dev->base_addr;
429 int interrupts, nr_serviced = 0;
430 struct ei_device *ei_local = netdev_priv(dev);
431
432
433
434
435
436 spin_lock(&ei_local->page_lock);
437
438 if (ei_local->irqlock) {
439
440
441
442
443 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
444 ei_inb_p(e8390_base + EN0_ISR),
445 ei_inb_p(e8390_base + EN0_IMR));
446 spin_unlock(&ei_local->page_lock);
447 return IRQ_NONE;
448 }
449
450
451 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
452 netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
453 ei_inb_p(e8390_base + EN0_ISR));
454
455
456 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
457 ++nr_serviced < MAX_SERVICE) {
458 if (!netif_running(dev)) {
459 netdev_warn(dev, "interrupt from stopped card\n");
460
461 ei_outb_p(interrupts, e8390_base + EN0_ISR);
462 interrupts = 0;
463 break;
464 }
465 if (interrupts & ENISR_OVER)
466 ei_rx_overrun(dev);
467 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
468
469 ei_receive(dev);
470 }
471
472 if (interrupts & ENISR_TX)
473 ei_tx_intr(dev);
474 else if (interrupts & ENISR_TX_ERR)
475 ei_tx_err(dev);
476
477 if (interrupts & ENISR_COUNTERS) {
478 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
479 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
480 dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
481 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR);
482 }
483
484
485 if (interrupts & ENISR_RDC)
486 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
487
488 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
489 }
490
491 if (interrupts && (netif_msg_intr(ei_local))) {
492 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
493 if (nr_serviced >= MAX_SERVICE) {
494
495 if (interrupts != 0xFF)
496 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
497 interrupts);
498 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR);
499 } else {
500 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
501 ei_outb_p(0xff, e8390_base + EN0_ISR);
502 }
503 }
504 spin_unlock(&ei_local->page_lock);
505 return IRQ_RETVAL(nr_serviced > 0);
506}
507
508#ifdef CONFIG_NET_POLL_CONTROLLER
509static void __ei_poll(struct net_device *dev)
510{
511 disable_irq(dev->irq);
512 __ei_interrupt(dev->irq, dev);
513 enable_irq(dev->irq);
514}
515#endif
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531static void ei_tx_err(struct net_device *dev)
532{
533 unsigned long e8390_base = dev->base_addr;
534
535 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
536 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
537 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
538
539#ifdef VERBOSE_ERROR_DUMP
540 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
541 if (txsr & ENTSR_ABT)
542 pr_cont(" excess-collisions ");
543 if (txsr & ENTSR_ND)
544 pr_cont(" non-deferral ");
545 if (txsr & ENTSR_CRS)
546 pr_cont(" lost-carrier ");
547 if (txsr & ENTSR_FU)
548 pr_cont(" FIFO-underrun ");
549 if (txsr & ENTSR_CDH)
550 pr_cont(" lost-heartbeat ");
551 pr_cont("\n");
552#endif
553
554 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR);
555
556 if (tx_was_aborted)
557 ei_tx_intr(dev);
558 else {
559 dev->stats.tx_errors++;
560 if (txsr & ENTSR_CRS)
561 dev->stats.tx_carrier_errors++;
562 if (txsr & ENTSR_CDH)
563 dev->stats.tx_heartbeat_errors++;
564 if (txsr & ENTSR_OWC)
565 dev->stats.tx_window_errors++;
566 }
567}
568
569
570
571
572
573
574
575
576
577static void ei_tx_intr(struct net_device *dev)
578{
579 unsigned long e8390_base = dev->base_addr;
580 struct ei_device *ei_local = netdev_priv(dev);
581 int status = ei_inb(e8390_base + EN0_TSR);
582
583 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR);
584
585
586
587
588
589 ei_local->txqueue--;
590
591 if (ei_local->tx1 < 0) {
592 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
593 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
594 ei_local->name, ei_local->lasttx, ei_local->tx1);
595 ei_local->tx1 = 0;
596 if (ei_local->tx2 > 0) {
597 ei_local->txing = 1;
598 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
599 netif_trans_update(dev);
600 ei_local->tx2 = -1,
601 ei_local->lasttx = 2;
602 } else
603 ei_local->lasttx = 20, ei_local->txing = 0;
604 } else if (ei_local->tx2 < 0) {
605 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
606 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
607 ei_local->name, ei_local->lasttx, ei_local->tx2);
608 ei_local->tx2 = 0;
609 if (ei_local->tx1 > 0) {
610 ei_local->txing = 1;
611 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
612 netif_trans_update(dev);
613 ei_local->tx1 = -1;
614 ei_local->lasttx = 1;
615 } else
616 ei_local->lasttx = 10, ei_local->txing = 0;
617 }
618
619
620
621
622
623 if (status & ENTSR_COL)
624 dev->stats.collisions++;
625 if (status & ENTSR_PTX)
626 dev->stats.tx_packets++;
627 else {
628 dev->stats.tx_errors++;
629 if (status & ENTSR_ABT) {
630 dev->stats.tx_aborted_errors++;
631 dev->stats.collisions += 16;
632 }
633 if (status & ENTSR_CRS)
634 dev->stats.tx_carrier_errors++;
635 if (status & ENTSR_FU)
636 dev->stats.tx_fifo_errors++;
637 if (status & ENTSR_CDH)
638 dev->stats.tx_heartbeat_errors++;
639 if (status & ENTSR_OWC)
640 dev->stats.tx_window_errors++;
641 }
642 netif_wake_queue(dev);
643}
644
645
646
647
648
649
650
651
652
653static void ei_receive(struct net_device *dev)
654{
655 unsigned long e8390_base = dev->base_addr;
656 struct ei_device *ei_local = netdev_priv(dev);
657 unsigned char rxing_page, this_frame, next_frame;
658 unsigned short current_offset;
659 int rx_pkt_count = 0;
660 struct e8390_pkt_hdr rx_frame;
661 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
662
663 while (++rx_pkt_count < 10) {
664 int pkt_len, pkt_stat;
665
666
667 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
668 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
669 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
670
671
672 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
673 if (this_frame >= ei_local->stop_page)
674 this_frame = ei_local->rx_start_page;
675
676
677
678
679
680
681
682 if ((netif_msg_rx_status(ei_local)) &&
683 this_frame != ei_local->current_page &&
684 (this_frame != 0x0 || rxing_page != 0xFF))
685 netdev_err(dev,
686 "mismatched read page pointers %2x vs %2x\n",
687 this_frame, ei_local->current_page);
688
689 if (this_frame == rxing_page)
690 break;
691
692 current_offset = this_frame << 8;
693 ei_get_8390_hdr(dev, &rx_frame, this_frame);
694
695 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
696 pkt_stat = rx_frame.status;
697
698 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
699
700
701
702
703 if (rx_frame.next != next_frame &&
704 rx_frame.next != next_frame + 1 &&
705 rx_frame.next != next_frame - num_rx_pages &&
706 rx_frame.next != next_frame + 1 - num_rx_pages) {
707 ei_local->current_page = rxing_page;
708 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
709 dev->stats.rx_errors++;
710 continue;
711 }
712
713 if (pkt_len < 60 || pkt_len > 1518) {
714 netif_dbg(ei_local, rx_status, dev,
715 "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
716 rx_frame.count, rx_frame.status,
717 rx_frame.next);
718 dev->stats.rx_errors++;
719 dev->stats.rx_length_errors++;
720 } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
721 struct sk_buff *skb;
722
723 skb = netdev_alloc_skb(dev, pkt_len + 2);
724 if (skb == NULL) {
725 netif_err(ei_local, rx_err, dev,
726 "Couldn't allocate a sk_buff of size %d\n",
727 pkt_len);
728 dev->stats.rx_dropped++;
729 break;
730 } else {
731 skb_reserve(skb, 2);
732 skb_put(skb, pkt_len);
733 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
734 skb->protocol = eth_type_trans(skb, dev);
735 if (!skb_defer_rx_timestamp(skb))
736 netif_rx(skb);
737 dev->stats.rx_packets++;
738 dev->stats.rx_bytes += pkt_len;
739 if (pkt_stat & ENRSR_PHY)
740 dev->stats.multicast++;
741 }
742 } else {
743 netif_err(ei_local, rx_err, dev,
744 "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
745 rx_frame.status, rx_frame.next,
746 rx_frame.count);
747 dev->stats.rx_errors++;
748
749 if (pkt_stat & ENRSR_FO)
750 dev->stats.rx_fifo_errors++;
751 }
752 next_frame = rx_frame.next;
753
754
755 if (next_frame >= ei_local->stop_page) {
756 netdev_notice(dev, "next frame inconsistency, %#2x\n",
757 next_frame);
758 next_frame = ei_local->rx_start_page;
759 }
760 ei_local->current_page = next_frame;
761 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
762 }
763
764
765
766 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static void ei_rx_overrun(struct net_device *dev)
783{
784 unsigned long e8390_base = dev->base_addr;
785 unsigned char was_txing, must_resend = 0;
786
787 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
788
789
790
791
792
793 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
794 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
795
796 netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
797 dev->stats.rx_over_errors++;
798
799
800
801
802
803
804
805
806 mdelay(10);
807
808
809
810
811 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
812 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
813
814
815
816
817
818
819 if (was_txing) {
820 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
821 if (!tx_completed)
822 must_resend = 1;
823 }
824
825
826
827
828
829 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
830 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
831
832
833
834
835 ei_receive(dev);
836 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
837
838
839
840
841 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
842 if (must_resend)
843 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
844}
845
846
847
848
849
850static struct net_device_stats *__ei_get_stats(struct net_device *dev)
851{
852 unsigned long ioaddr = dev->base_addr;
853 struct ei_device *ei_local = netdev_priv(dev);
854 unsigned long flags;
855
856
857 if (!netif_running(dev))
858 return &dev->stats;
859
860 spin_lock_irqsave(&ei_local->page_lock, flags);
861
862 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
863 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
864 dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
865 spin_unlock_irqrestore(&ei_local->page_lock, flags);
866
867 return &dev->stats;
868}
869
870
871
872
873
874
875static inline void make_mc_bits(u8 *bits, struct net_device *dev)
876{
877 struct netdev_hw_addr *ha;
878
879 netdev_for_each_mc_addr(ha, dev) {
880 u32 crc = ether_crc(ETH_ALEN, ha->addr);
881
882
883
884
885 bits[crc>>29] |= (1<<((crc>>26)&7));
886 }
887}
888
889
890
891
892
893
894
895
896
897static void do_set_multicast_list(struct net_device *dev)
898{
899 unsigned long e8390_base = dev->base_addr;
900 int i;
901 struct ei_device *ei_local = netdev_priv(dev);
902
903 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
904 memset(ei_local->mcfilter, 0, 8);
905 if (!netdev_mc_empty(dev))
906 make_mc_bits(ei_local->mcfilter, dev);
907 } else
908 memset(ei_local->mcfilter, 0xFF, 8);
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923 if (netif_running(dev))
924 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
925 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
926 for (i = 0; i < 8; i++) {
927 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
928#ifndef BUG_83C690
929 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
930 netdev_err(dev, "Multicast filter read/write mismap %d\n",
931 i);
932#endif
933 }
934 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
935
936 if (dev->flags&IFF_PROMISC)
937 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
938 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
939 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
940 else
941 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
942}
943
944
945
946
947
948
949
950static void __ei_set_multicast_list(struct net_device *dev)
951{
952 unsigned long flags;
953 struct ei_device *ei_local = netdev_priv(dev);
954
955 spin_lock_irqsave(&ei_local->page_lock, flags);
956 do_set_multicast_list(dev);
957 spin_unlock_irqrestore(&ei_local->page_lock, flags);
958}
959
960
961
962
963
964
965
966
967
968static void ethdev_setup(struct net_device *dev)
969{
970 struct ei_device *ei_local = netdev_priv(dev);
971
972 if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
973 pr_info("%s", version);
974
975 ether_setup(dev);
976
977 spin_lock_init(&ei_local->page_lock);
978
979 ei_local->msg_enable = msg_enable;
980}
981
982
983
984
985
986
987
988static struct net_device *____alloc_ei_netdev(int size)
989{
990 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
991 NET_NAME_UNKNOWN, ethdev_setup);
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static void __NS8390_init(struct net_device *dev, int startp)
1009{
1010 unsigned long e8390_base = dev->base_addr;
1011 struct ei_device *ei_local = netdev_priv(dev);
1012 int i;
1013 int endcfg = ei_local->word16
1014 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1015 : 0x48;
1016
1017 if (sizeof(struct e8390_pkt_hdr) != 4)
1018 panic("8390.c: header struct mispacked\n");
1019
1020 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1021 ei_outb_p(endcfg, e8390_base + EN0_DCFG);
1022
1023 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1024 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1025
1026 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR);
1027 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
1028
1029 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1030 ei_local->tx1 = ei_local->tx2 = 0;
1031 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1032 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);
1033 ei_local->current_page = ei_local->rx_start_page;
1034 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1035
1036 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1037 ei_outb_p(0x00, e8390_base + EN0_IMR);
1038
1039
1040
1041 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD);
1042 for (i = 0; i < 6; i++) {
1043 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1044 if ((netif_msg_probe(ei_local)) &&
1045 ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1046 netdev_err(dev,
1047 "Hw. address read/write mismap %d\n", i);
1048 }
1049
1050 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1051 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1052
1053 ei_local->tx1 = ei_local->tx2 = 0;
1054 ei_local->txing = 0;
1055
1056 if (startp) {
1057 ei_outb_p(0xff, e8390_base + EN0_ISR);
1058 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1059 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1060 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
1061
1062 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
1063 do_set_multicast_list(dev);
1064 }
1065}
1066
1067
1068
1069
1070static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1071 int start_page)
1072{
1073 unsigned long e8390_base = dev->base_addr;
1074 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1075
1076 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1077
1078 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1079 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1080 return;
1081 }
1082 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1083 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1084 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1085 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1086}
1087