1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/module.h>
54#include <linux/kernel.h>
55#include <linux/jiffies.h>
56#include <linux/fs.h>
57#include <linux/types.h>
58#include <linux/string.h>
59#include <linux/bitops.h>
60#include <asm/system.h>
61#include <linux/uaccess.h>
62#include <linux/io.h>
63#include <asm/irq.h>
64#include <linux/delay.h>
65#include <linux/errno.h>
66#include <linux/fcntl.h>
67#include <linux/in.h>
68#include <linux/interrupt.h>
69#include <linux/init.h>
70#include <linux/crc32.h>
71
72#include <linux/netdevice.h>
73#include <linux/etherdevice.h>
74
75#define NS8390_CORE
76#include "8390.h"
77
78#define BUG_83C690
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98#define ei_reset_8390 (ei_local->reset_8390)
99#define ei_block_output (ei_local->block_output)
100#define ei_block_input (ei_local->block_input)
101#define ei_get_8390_hdr (ei_local->get_8390_hdr)
102
103
104#ifndef ei_debug
105int ei_debug = 1;
106#endif
107
108
109static void ei_tx_intr(struct net_device *dev);
110static void ei_tx_err(struct net_device *dev);
111static void ei_receive(struct net_device *dev);
112static void ei_rx_overrun(struct net_device *dev);
113
114
115static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
116 int start_page);
117static void do_set_multicast_list(struct net_device *dev);
118static void __NS8390_init(struct net_device *dev, int startp);
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202static int __ei_open(struct net_device *dev)
203{
204 unsigned long flags;
205 struct ei_device *ei_local = netdev_priv(dev);
206
207 if (dev->watchdog_timeo <= 0)
208 dev->watchdog_timeo = TX_TIMEOUT;
209
210
211
212
213
214
215 spin_lock_irqsave(&ei_local->page_lock, flags);
216 __NS8390_init(dev, 1);
217
218
219 netif_start_queue(dev);
220 spin_unlock_irqrestore(&ei_local->page_lock, flags);
221 ei_local->irqlock = 0;
222 return 0;
223}
224
225
226
227
228
229
230
231static int __ei_close(struct net_device *dev)
232{
233 struct ei_device *ei_local = netdev_priv(dev);
234 unsigned long flags;
235
236
237
238
239
240 spin_lock_irqsave(&ei_local->page_lock, flags);
241 __NS8390_init(dev, 0);
242 spin_unlock_irqrestore(&ei_local->page_lock, flags);
243 netif_stop_queue(dev);
244 return 0;
245}
246
247
248
249
250
251
252
253
254
255static void __ei_tx_timeout(struct net_device *dev)
256{
257 unsigned long e8390_base = dev->base_addr;
258 struct ei_device *ei_local = netdev_priv(dev);
259 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
260 unsigned long flags;
261
262 dev->stats.tx_errors++;
263
264 spin_lock_irqsave(&ei_local->page_lock, flags);
265 txsr = ei_inb(e8390_base+EN0_TSR);
266 isr = ei_inb(e8390_base+EN0_ISR);
267 spin_unlock_irqrestore(&ei_local->page_lock, flags);
268
269 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
270 (txsr & ENTSR_ABT) ? "excess collisions." :
271 (isr) ? "lost interrupt?" : "cable problem?",
272 txsr, isr, tickssofar);
273
274 if (!isr && !dev->stats.tx_packets) {
275
276 ei_local->interface_num ^= 1;
277 }
278
279
280
281 disable_irq_nosync_lockdep(dev->irq);
282 spin_lock(&ei_local->page_lock);
283
284
285 ei_reset_8390(dev);
286 __NS8390_init(dev, 1);
287
288 spin_unlock(&ei_local->page_lock);
289 enable_irq_lockdep(dev->irq);
290 netif_wake_queue(dev);
291}
292
293
294
295
296
297
298
299
300
301static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
302 struct net_device *dev)
303{
304 unsigned long e8390_base = dev->base_addr;
305 struct ei_device *ei_local = netdev_priv(dev);
306 int send_length = skb->len, output_page;
307 unsigned long flags;
308 char buf[ETH_ZLEN];
309 char *data = skb->data;
310
311 if (skb->len < ETH_ZLEN) {
312 memset(buf, 0, ETH_ZLEN);
313 memcpy(buf, data, skb->len);
314 send_length = ETH_ZLEN;
315 data = buf;
316 }
317
318
319
320
321
322
323 spin_lock_irqsave(&ei_local->page_lock, flags);
324 ei_outb_p(0x00, e8390_base + EN0_IMR);
325 spin_unlock_irqrestore(&ei_local->page_lock, flags);
326
327
328
329
330
331
332 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
333
334 spin_lock(&ei_local->page_lock);
335
336 ei_local->irqlock = 1;
337
338
339
340
341
342
343
344
345
346 if (ei_local->tx1 == 0) {
347 output_page = ei_local->tx_start_page;
348 ei_local->tx1 = send_length;
349 if (ei_debug && ei_local->tx2 > 0)
350 netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
351 ei_local->tx2, ei_local->lasttx, ei_local->txing);
352 } else if (ei_local->tx2 == 0) {
353 output_page = ei_local->tx_start_page + TX_PAGES/2;
354 ei_local->tx2 = send_length;
355 if (ei_debug && ei_local->tx1 > 0)
356 netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
357 ei_local->tx1, ei_local->lasttx, ei_local->txing);
358 } else {
359 if (ei_debug)
360 netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
361 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
362 ei_local->irqlock = 0;
363 netif_stop_queue(dev);
364 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
365 spin_unlock(&ei_local->page_lock);
366 enable_irq_lockdep_irqrestore(dev->irq, &flags);
367 dev->stats.tx_errors++;
368 return NETDEV_TX_BUSY;
369 }
370
371
372
373
374
375
376
377 ei_block_output(dev, send_length, data, output_page);
378
379 if (!ei_local->txing) {
380 ei_local->txing = 1;
381 NS8390_trigger_send(dev, send_length, output_page);
382 if (output_page == ei_local->tx_start_page) {
383 ei_local->tx1 = -1;
384 ei_local->lasttx = -1;
385 } else {
386 ei_local->tx2 = -1;
387 ei_local->lasttx = -2;
388 }
389 } else
390 ei_local->txqueue++;
391
392 if (ei_local->tx1 && ei_local->tx2)
393 netif_stop_queue(dev);
394 else
395 netif_start_queue(dev);
396
397
398 ei_local->irqlock = 0;
399 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
400
401 spin_unlock(&ei_local->page_lock);
402 enable_irq_lockdep_irqrestore(dev->irq, &flags);
403 skb_tx_timestamp(skb);
404 dev_kfree_skb(skb);
405 dev->stats.tx_bytes += send_length;
406
407 return NETDEV_TX_OK;
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422static irqreturn_t __ei_interrupt(int irq, void *dev_id)
423{
424 struct net_device *dev = dev_id;
425 unsigned long e8390_base = dev->base_addr;
426 int interrupts, nr_serviced = 0;
427 struct ei_device *ei_local = netdev_priv(dev);
428
429
430
431
432
433 spin_lock(&ei_local->page_lock);
434
435 if (ei_local->irqlock) {
436
437
438
439
440 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
441 ei_inb_p(e8390_base + EN0_ISR),
442 ei_inb_p(e8390_base + EN0_IMR));
443 spin_unlock(&ei_local->page_lock);
444 return IRQ_NONE;
445 }
446
447
448 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
449 if (ei_debug > 3)
450 netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
451 ei_inb_p(e8390_base + EN0_ISR));
452
453
454 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
455 ++nr_serviced < MAX_SERVICE) {
456 if (!netif_running(dev)) {
457 netdev_warn(dev, "interrupt from stopped card\n");
458
459 ei_outb_p(interrupts, e8390_base + EN0_ISR);
460 interrupts = 0;
461 break;
462 }
463 if (interrupts & ENISR_OVER)
464 ei_rx_overrun(dev);
465 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
466
467 ei_receive(dev);
468 }
469
470 if (interrupts & ENISR_TX)
471 ei_tx_intr(dev);
472 else if (interrupts & ENISR_TX_ERR)
473 ei_tx_err(dev);
474
475 if (interrupts & ENISR_COUNTERS) {
476 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
477 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
478 dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
479 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR);
480 }
481
482
483 if (interrupts & ENISR_RDC)
484 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
485
486 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
487 }
488
489 if (interrupts && ei_debug) {
490 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
491 if (nr_serviced >= MAX_SERVICE) {
492
493 if (interrupts != 0xFF)
494 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
495 interrupts);
496 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR);
497 } else {
498 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
499 ei_outb_p(0xff, e8390_base + EN0_ISR);
500 }
501 }
502 spin_unlock(&ei_local->page_lock);
503 return IRQ_RETVAL(nr_serviced > 0);
504}
505
506#ifdef CONFIG_NET_POLL_CONTROLLER
507static void __ei_poll(struct net_device *dev)
508{
509 disable_irq(dev->irq);
510 __ei_interrupt(dev->irq, dev);
511 enable_irq(dev->irq);
512}
513#endif
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529static void ei_tx_err(struct net_device *dev)
530{
531 unsigned long e8390_base = dev->base_addr;
532
533 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
534 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
535 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
536
537#ifdef VERBOSE_ERROR_DUMP
538 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
539 if (txsr & ENTSR_ABT)
540 pr_cont(" excess-collisions ");
541 if (txsr & ENTSR_ND)
542 pr_cont(" non-deferral ");
543 if (txsr & ENTSR_CRS)
544 pr_cont(" lost-carrier ");
545 if (txsr & ENTSR_FU)
546 pr_cont(" FIFO-underrun ");
547 if (txsr & ENTSR_CDH)
548 pr_cont(" lost-heartbeat ");
549 pr_cont("\n");
550#endif
551
552 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR);
553
554 if (tx_was_aborted)
555 ei_tx_intr(dev);
556 else {
557 dev->stats.tx_errors++;
558 if (txsr & ENTSR_CRS)
559 dev->stats.tx_carrier_errors++;
560 if (txsr & ENTSR_CDH)
561 dev->stats.tx_heartbeat_errors++;
562 if (txsr & ENTSR_OWC)
563 dev->stats.tx_window_errors++;
564 }
565}
566
567
568
569
570
571
572
573
574
575static void ei_tx_intr(struct net_device *dev)
576{
577 unsigned long e8390_base = dev->base_addr;
578 struct ei_device *ei_local = netdev_priv(dev);
579 int status = ei_inb(e8390_base + EN0_TSR);
580
581 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR);
582
583
584
585
586
587 ei_local->txqueue--;
588
589 if (ei_local->tx1 < 0) {
590 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
591 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
592 ei_local->name, ei_local->lasttx, ei_local->tx1);
593 ei_local->tx1 = 0;
594 if (ei_local->tx2 > 0) {
595 ei_local->txing = 1;
596 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
597 dev->trans_start = jiffies;
598 ei_local->tx2 = -1,
599 ei_local->lasttx = 2;
600 } else
601 ei_local->lasttx = 20, ei_local->txing = 0;
602 } else if (ei_local->tx2 < 0) {
603 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
604 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
605 ei_local->name, ei_local->lasttx, ei_local->tx2);
606 ei_local->tx2 = 0;
607 if (ei_local->tx1 > 0) {
608 ei_local->txing = 1;
609 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
610 dev->trans_start = jiffies;
611 ei_local->tx1 = -1;
612 ei_local->lasttx = 1;
613 } else
614 ei_local->lasttx = 10, ei_local->txing = 0;
615 }
616
617
618
619
620
621 if (status & ENTSR_COL)
622 dev->stats.collisions++;
623 if (status & ENTSR_PTX)
624 dev->stats.tx_packets++;
625 else {
626 dev->stats.tx_errors++;
627 if (status & ENTSR_ABT) {
628 dev->stats.tx_aborted_errors++;
629 dev->stats.collisions += 16;
630 }
631 if (status & ENTSR_CRS)
632 dev->stats.tx_carrier_errors++;
633 if (status & ENTSR_FU)
634 dev->stats.tx_fifo_errors++;
635 if (status & ENTSR_CDH)
636 dev->stats.tx_heartbeat_errors++;
637 if (status & ENTSR_OWC)
638 dev->stats.tx_window_errors++;
639 }
640 netif_wake_queue(dev);
641}
642
643
644
645
646
647
648
649
650
651static void ei_receive(struct net_device *dev)
652{
653 unsigned long e8390_base = dev->base_addr;
654 struct ei_device *ei_local = netdev_priv(dev);
655 unsigned char rxing_page, this_frame, next_frame;
656 unsigned short current_offset;
657 int rx_pkt_count = 0;
658 struct e8390_pkt_hdr rx_frame;
659 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
660
661 while (++rx_pkt_count < 10) {
662 int pkt_len, pkt_stat;
663
664
665 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
666 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
667 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
668
669
670 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
671 if (this_frame >= ei_local->stop_page)
672 this_frame = ei_local->rx_start_page;
673
674
675
676
677
678
679
680 if (ei_debug > 0 &&
681 this_frame != ei_local->current_page &&
682 (this_frame != 0x0 || rxing_page != 0xFF))
683 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
684 this_frame, ei_local->current_page);
685
686 if (this_frame == rxing_page)
687 break;
688
689 current_offset = this_frame << 8;
690 ei_get_8390_hdr(dev, &rx_frame, this_frame);
691
692 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
693 pkt_stat = rx_frame.status;
694
695 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
696
697
698
699
700 if (rx_frame.next != next_frame &&
701 rx_frame.next != next_frame + 1 &&
702 rx_frame.next != next_frame - num_rx_pages &&
703 rx_frame.next != next_frame + 1 - num_rx_pages) {
704 ei_local->current_page = rxing_page;
705 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
706 dev->stats.rx_errors++;
707 continue;
708 }
709
710 if (pkt_len < 60 || pkt_len > 1518) {
711 if (ei_debug)
712 netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
713 rx_frame.count, rx_frame.status,
714 rx_frame.next);
715 dev->stats.rx_errors++;
716 dev->stats.rx_length_errors++;
717 } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
718 struct sk_buff *skb;
719
720 skb = dev_alloc_skb(pkt_len+2);
721 if (skb == NULL) {
722 if (ei_debug > 1)
723 netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
724 pkt_len);
725 dev->stats.rx_dropped++;
726 break;
727 } else {
728 skb_reserve(skb, 2);
729 skb_put(skb, pkt_len);
730 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
731 skb->protocol = eth_type_trans(skb, dev);
732 if (!skb_defer_rx_timestamp(skb))
733 netif_rx(skb);
734 dev->stats.rx_packets++;
735 dev->stats.rx_bytes += pkt_len;
736 if (pkt_stat & ENRSR_PHY)
737 dev->stats.multicast++;
738 }
739 } else {
740 if (ei_debug)
741 netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
742 rx_frame.status, rx_frame.next,
743 rx_frame.count);
744 dev->stats.rx_errors++;
745
746 if (pkt_stat & ENRSR_FO)
747 dev->stats.rx_fifo_errors++;
748 }
749 next_frame = rx_frame.next;
750
751
752 if (next_frame >= ei_local->stop_page) {
753 netdev_notice(dev, "next frame inconsistency, %#2x\n",
754 next_frame);
755 next_frame = ei_local->rx_start_page;
756 }
757 ei_local->current_page = next_frame;
758 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
759 }
760
761
762
763 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779static void ei_rx_overrun(struct net_device *dev)
780{
781 unsigned long e8390_base = dev->base_addr;
782 unsigned char was_txing, must_resend = 0;
783
784 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
785
786
787
788
789
790 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
791 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
792
793 if (ei_debug > 1)
794 netdev_dbg(dev, "Receiver overrun\n");
795 dev->stats.rx_over_errors++;
796
797
798
799
800
801
802
803
804 mdelay(10);
805
806
807
808
809 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
810 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
811
812
813
814
815
816
817 if (was_txing) {
818 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
819 if (!tx_completed)
820 must_resend = 1;
821 }
822
823
824
825
826
827 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
828 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
829
830
831
832
833 ei_receive(dev);
834 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
835
836
837
838
839 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
840 if (must_resend)
841 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
842}
843
844
845
846
847
848static struct net_device_stats *__ei_get_stats(struct net_device *dev)
849{
850 unsigned long ioaddr = dev->base_addr;
851 struct ei_device *ei_local = netdev_priv(dev);
852 unsigned long flags;
853
854
855 if (!netif_running(dev))
856 return &dev->stats;
857
858 spin_lock_irqsave(&ei_local->page_lock, flags);
859
860 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
861 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
862 dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
863 spin_unlock_irqrestore(&ei_local->page_lock, flags);
864
865 return &dev->stats;
866}
867
868
869
870
871
872
873static inline void make_mc_bits(u8 *bits, struct net_device *dev)
874{
875 struct netdev_hw_addr *ha;
876
877 netdev_for_each_mc_addr(ha, dev) {
878 u32 crc = ether_crc(ETH_ALEN, ha->addr);
879
880
881
882
883 bits[crc>>29] |= (1<<((crc>>26)&7));
884 }
885}
886
887
888
889
890
891
892
893
894
895static void do_set_multicast_list(struct net_device *dev)
896{
897 unsigned long e8390_base = dev->base_addr;
898 int i;
899 struct ei_device *ei_local = netdev_priv(dev);
900
901 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
902 memset(ei_local->mcfilter, 0, 8);
903 if (!netdev_mc_empty(dev))
904 make_mc_bits(ei_local->mcfilter, dev);
905 } else
906 memset(ei_local->mcfilter, 0xFF, 8);
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921 if (netif_running(dev))
922 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
923 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
924 for (i = 0; i < 8; i++) {
925 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
926#ifndef BUG_83C690
927 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
928 netdev_err(dev, "Multicast filter read/write mismap %d\n",
929 i);
930#endif
931 }
932 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
933
934 if (dev->flags&IFF_PROMISC)
935 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
936 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
937 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
938 else
939 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
940}
941
942
943
944
945
946
947
948static void __ei_set_multicast_list(struct net_device *dev)
949{
950 unsigned long flags;
951 struct ei_device *ei_local = netdev_priv(dev);
952
953 spin_lock_irqsave(&ei_local->page_lock, flags);
954 do_set_multicast_list(dev);
955 spin_unlock_irqrestore(&ei_local->page_lock, flags);
956}
957
958
959
960
961
962
963
964
965
966static void ethdev_setup(struct net_device *dev)
967{
968 struct ei_device *ei_local = netdev_priv(dev);
969 if (ei_debug > 1)
970 printk(version);
971
972 ether_setup(dev);
973
974 spin_lock_init(&ei_local->page_lock);
975}
976
977
978
979
980
981
982
983static struct net_device *____alloc_ei_netdev(int size)
984{
985 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
986 ethdev_setup);
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003static void __NS8390_init(struct net_device *dev, int startp)
1004{
1005 unsigned long e8390_base = dev->base_addr;
1006 struct ei_device *ei_local = netdev_priv(dev);
1007 int i;
1008 int endcfg = ei_local->word16
1009 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1010 : 0x48;
1011
1012 if (sizeof(struct e8390_pkt_hdr) != 4)
1013 panic("8390.c: header struct mispacked\n");
1014
1015 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1016 ei_outb_p(endcfg, e8390_base + EN0_DCFG);
1017
1018 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1019 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1020
1021 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR);
1022 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
1023
1024 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1025 ei_local->tx1 = ei_local->tx2 = 0;
1026 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1027 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);
1028 ei_local->current_page = ei_local->rx_start_page;
1029 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1030
1031 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1032 ei_outb_p(0x00, e8390_base + EN0_IMR);
1033
1034
1035
1036 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD);
1037 for (i = 0; i < 6; i++) {
1038 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1039 if (ei_debug > 1 &&
1040 ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1041 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1042 }
1043
1044 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1045 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1046
1047 ei_local->tx1 = ei_local->tx2 = 0;
1048 ei_local->txing = 0;
1049
1050 if (startp) {
1051 ei_outb_p(0xff, e8390_base + EN0_ISR);
1052 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1053 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1054 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
1055
1056 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
1057 do_set_multicast_list(dev);
1058 }
1059}
1060
1061
1062
1063
1064static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1065 int start_page)
1066{
1067 unsigned long e8390_base = dev->base_addr;
1068 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1069
1070 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1071
1072 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1073 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1074 return;
1075 }
1076 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1077 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1078 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1079 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1080}
1081