1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/module.h>
54#include <linux/kernel.h>
55#include <linux/jiffies.h>
56#include <linux/fs.h>
57#include <linux/types.h>
58#include <linux/string.h>
59#include <linux/bitops.h>
60#include <linux/uaccess.h>
61#include <linux/io.h>
62#include <asm/irq.h>
63#include <linux/delay.h>
64#include <linux/errno.h>
65#include <linux/fcntl.h>
66#include <linux/in.h>
67#include <linux/interrupt.h>
68#include <linux/init.h>
69#include <linux/crc32.h>
70
71#include <linux/netdevice.h>
72#include <linux/etherdevice.h>
73
74#define NS8390_CORE
75#include "8390.h"
76
77#define BUG_83C690
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97#define ei_reset_8390 (ei_local->reset_8390)
98#define ei_block_output (ei_local->block_output)
99#define ei_block_input (ei_local->block_input)
100#define ei_get_8390_hdr (ei_local->get_8390_hdr)
101
102
103#ifndef ei_debug
104int ei_debug = 1;
105#endif
106
107
108static void ei_tx_intr(struct net_device *dev);
109static void ei_tx_err(struct net_device *dev);
110static void ei_receive(struct net_device *dev);
111static void ei_rx_overrun(struct net_device *dev);
112
113
114static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
115 int start_page);
116static void do_set_multicast_list(struct net_device *dev);
117static void __NS8390_init(struct net_device *dev, int startp);
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201static int __ei_open(struct net_device *dev)
202{
203 unsigned long flags;
204 struct ei_device *ei_local = netdev_priv(dev);
205
206 if (dev->watchdog_timeo <= 0)
207 dev->watchdog_timeo = TX_TIMEOUT;
208
209
210
211
212
213
214 spin_lock_irqsave(&ei_local->page_lock, flags);
215 __NS8390_init(dev, 1);
216
217
218 netif_start_queue(dev);
219 spin_unlock_irqrestore(&ei_local->page_lock, flags);
220 ei_local->irqlock = 0;
221 return 0;
222}
223
224
225
226
227
228
229
230static int __ei_close(struct net_device *dev)
231{
232 struct ei_device *ei_local = netdev_priv(dev);
233 unsigned long flags;
234
235
236
237
238
239 spin_lock_irqsave(&ei_local->page_lock, flags);
240 __NS8390_init(dev, 0);
241 spin_unlock_irqrestore(&ei_local->page_lock, flags);
242 netif_stop_queue(dev);
243 return 0;
244}
245
246
247
248
249
250
251
252
253
254static void __ei_tx_timeout(struct net_device *dev)
255{
256 unsigned long e8390_base = dev->base_addr;
257 struct ei_device *ei_local = netdev_priv(dev);
258 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
259 unsigned long flags;
260
261 dev->stats.tx_errors++;
262
263 spin_lock_irqsave(&ei_local->page_lock, flags);
264 txsr = ei_inb(e8390_base+EN0_TSR);
265 isr = ei_inb(e8390_base+EN0_ISR);
266 spin_unlock_irqrestore(&ei_local->page_lock, flags);
267
268 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
269 (txsr & ENTSR_ABT) ? "excess collisions." :
270 (isr) ? "lost interrupt?" : "cable problem?",
271 txsr, isr, tickssofar);
272
273 if (!isr && !dev->stats.tx_packets) {
274
275 ei_local->interface_num ^= 1;
276 }
277
278
279
280 disable_irq_nosync_lockdep(dev->irq);
281 spin_lock(&ei_local->page_lock);
282
283
284 ei_reset_8390(dev);
285 __NS8390_init(dev, 1);
286
287 spin_unlock(&ei_local->page_lock);
288 enable_irq_lockdep(dev->irq);
289 netif_wake_queue(dev);
290}
291
292
293
294
295
296
297
298
299
300static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
301 struct net_device *dev)
302{
303 unsigned long e8390_base = dev->base_addr;
304 struct ei_device *ei_local = netdev_priv(dev);
305 int send_length = skb->len, output_page;
306 unsigned long flags;
307 char buf[ETH_ZLEN];
308 char *data = skb->data;
309
310 if (skb->len < ETH_ZLEN) {
311 memset(buf, 0, ETH_ZLEN);
312 memcpy(buf, data, skb->len);
313 send_length = ETH_ZLEN;
314 data = buf;
315 }
316
317
318
319
320
321
322 spin_lock_irqsave(&ei_local->page_lock, flags);
323 ei_outb_p(0x00, e8390_base + EN0_IMR);
324 spin_unlock_irqrestore(&ei_local->page_lock, flags);
325
326
327
328
329
330
331 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
332
333 spin_lock(&ei_local->page_lock);
334
335 ei_local->irqlock = 1;
336
337
338
339
340
341
342
343
344
345 if (ei_local->tx1 == 0) {
346 output_page = ei_local->tx_start_page;
347 ei_local->tx1 = send_length;
348 if (ei_debug && ei_local->tx2 > 0)
349 netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
350 ei_local->tx2, ei_local->lasttx, ei_local->txing);
351 } else if (ei_local->tx2 == 0) {
352 output_page = ei_local->tx_start_page + TX_PAGES/2;
353 ei_local->tx2 = send_length;
354 if (ei_debug && ei_local->tx1 > 0)
355 netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
356 ei_local->tx1, ei_local->lasttx, ei_local->txing);
357 } else {
358 if (ei_debug)
359 netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
360 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
361 ei_local->irqlock = 0;
362 netif_stop_queue(dev);
363 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
364 spin_unlock(&ei_local->page_lock);
365 enable_irq_lockdep_irqrestore(dev->irq, &flags);
366 dev->stats.tx_errors++;
367 return NETDEV_TX_BUSY;
368 }
369
370
371
372
373
374
375
376 ei_block_output(dev, send_length, data, output_page);
377
378 if (!ei_local->txing) {
379 ei_local->txing = 1;
380 NS8390_trigger_send(dev, send_length, output_page);
381 if (output_page == ei_local->tx_start_page) {
382 ei_local->tx1 = -1;
383 ei_local->lasttx = -1;
384 } else {
385 ei_local->tx2 = -1;
386 ei_local->lasttx = -2;
387 }
388 } else
389 ei_local->txqueue++;
390
391 if (ei_local->tx1 && ei_local->tx2)
392 netif_stop_queue(dev);
393 else
394 netif_start_queue(dev);
395
396
397 ei_local->irqlock = 0;
398 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
399
400 spin_unlock(&ei_local->page_lock);
401 enable_irq_lockdep_irqrestore(dev->irq, &flags);
402 skb_tx_timestamp(skb);
403 dev_kfree_skb(skb);
404 dev->stats.tx_bytes += send_length;
405
406 return NETDEV_TX_OK;
407}
408
409
410
411
412
413
414
415
416
417
418
419
420
421static irqreturn_t __ei_interrupt(int irq, void *dev_id)
422{
423 struct net_device *dev = dev_id;
424 unsigned long e8390_base = dev->base_addr;
425 int interrupts, nr_serviced = 0;
426 struct ei_device *ei_local = netdev_priv(dev);
427
428
429
430
431
432 spin_lock(&ei_local->page_lock);
433
434 if (ei_local->irqlock) {
435
436
437
438
439 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
440 ei_inb_p(e8390_base + EN0_ISR),
441 ei_inb_p(e8390_base + EN0_IMR));
442 spin_unlock(&ei_local->page_lock);
443 return IRQ_NONE;
444 }
445
446
447 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
448 if (ei_debug > 3)
449 netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
450 ei_inb_p(e8390_base + EN0_ISR));
451
452
453 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
454 ++nr_serviced < MAX_SERVICE) {
455 if (!netif_running(dev)) {
456 netdev_warn(dev, "interrupt from stopped card\n");
457
458 ei_outb_p(interrupts, e8390_base + EN0_ISR);
459 interrupts = 0;
460 break;
461 }
462 if (interrupts & ENISR_OVER)
463 ei_rx_overrun(dev);
464 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
465
466 ei_receive(dev);
467 }
468
469 if (interrupts & ENISR_TX)
470 ei_tx_intr(dev);
471 else if (interrupts & ENISR_TX_ERR)
472 ei_tx_err(dev);
473
474 if (interrupts & ENISR_COUNTERS) {
475 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
476 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
477 dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
478 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR);
479 }
480
481
482 if (interrupts & ENISR_RDC)
483 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
484
485 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
486 }
487
488 if (interrupts && ei_debug) {
489 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
490 if (nr_serviced >= MAX_SERVICE) {
491
492 if (interrupts != 0xFF)
493 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
494 interrupts);
495 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR);
496 } else {
497 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
498 ei_outb_p(0xff, e8390_base + EN0_ISR);
499 }
500 }
501 spin_unlock(&ei_local->page_lock);
502 return IRQ_RETVAL(nr_serviced > 0);
503}
504
505#ifdef CONFIG_NET_POLL_CONTROLLER
506static void __ei_poll(struct net_device *dev)
507{
508 disable_irq(dev->irq);
509 __ei_interrupt(dev->irq, dev);
510 enable_irq(dev->irq);
511}
512#endif
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528static void ei_tx_err(struct net_device *dev)
529{
530 unsigned long e8390_base = dev->base_addr;
531
532 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
533 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
534 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
535
536#ifdef VERBOSE_ERROR_DUMP
537 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
538 if (txsr & ENTSR_ABT)
539 pr_cont(" excess-collisions ");
540 if (txsr & ENTSR_ND)
541 pr_cont(" non-deferral ");
542 if (txsr & ENTSR_CRS)
543 pr_cont(" lost-carrier ");
544 if (txsr & ENTSR_FU)
545 pr_cont(" FIFO-underrun ");
546 if (txsr & ENTSR_CDH)
547 pr_cont(" lost-heartbeat ");
548 pr_cont("\n");
549#endif
550
551 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR);
552
553 if (tx_was_aborted)
554 ei_tx_intr(dev);
555 else {
556 dev->stats.tx_errors++;
557 if (txsr & ENTSR_CRS)
558 dev->stats.tx_carrier_errors++;
559 if (txsr & ENTSR_CDH)
560 dev->stats.tx_heartbeat_errors++;
561 if (txsr & ENTSR_OWC)
562 dev->stats.tx_window_errors++;
563 }
564}
565
566
567
568
569
570
571
572
573
574static void ei_tx_intr(struct net_device *dev)
575{
576 unsigned long e8390_base = dev->base_addr;
577 struct ei_device *ei_local = netdev_priv(dev);
578 int status = ei_inb(e8390_base + EN0_TSR);
579
580 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR);
581
582
583
584
585
586 ei_local->txqueue--;
587
588 if (ei_local->tx1 < 0) {
589 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
590 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
591 ei_local->name, ei_local->lasttx, ei_local->tx1);
592 ei_local->tx1 = 0;
593 if (ei_local->tx2 > 0) {
594 ei_local->txing = 1;
595 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
596 dev->trans_start = jiffies;
597 ei_local->tx2 = -1,
598 ei_local->lasttx = 2;
599 } else
600 ei_local->lasttx = 20, ei_local->txing = 0;
601 } else if (ei_local->tx2 < 0) {
602 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
603 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
604 ei_local->name, ei_local->lasttx, ei_local->tx2);
605 ei_local->tx2 = 0;
606 if (ei_local->tx1 > 0) {
607 ei_local->txing = 1;
608 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
609 dev->trans_start = jiffies;
610 ei_local->tx1 = -1;
611 ei_local->lasttx = 1;
612 } else
613 ei_local->lasttx = 10, ei_local->txing = 0;
614 }
615
616
617
618
619
620 if (status & ENTSR_COL)
621 dev->stats.collisions++;
622 if (status & ENTSR_PTX)
623 dev->stats.tx_packets++;
624 else {
625 dev->stats.tx_errors++;
626 if (status & ENTSR_ABT) {
627 dev->stats.tx_aborted_errors++;
628 dev->stats.collisions += 16;
629 }
630 if (status & ENTSR_CRS)
631 dev->stats.tx_carrier_errors++;
632 if (status & ENTSR_FU)
633 dev->stats.tx_fifo_errors++;
634 if (status & ENTSR_CDH)
635 dev->stats.tx_heartbeat_errors++;
636 if (status & ENTSR_OWC)
637 dev->stats.tx_window_errors++;
638 }
639 netif_wake_queue(dev);
640}
641
642
643
644
645
646
647
648
649
650static void ei_receive(struct net_device *dev)
651{
652 unsigned long e8390_base = dev->base_addr;
653 struct ei_device *ei_local = netdev_priv(dev);
654 unsigned char rxing_page, this_frame, next_frame;
655 unsigned short current_offset;
656 int rx_pkt_count = 0;
657 struct e8390_pkt_hdr rx_frame;
658 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
659
660 while (++rx_pkt_count < 10) {
661 int pkt_len, pkt_stat;
662
663
664 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
665 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
666 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
667
668
669 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
670 if (this_frame >= ei_local->stop_page)
671 this_frame = ei_local->rx_start_page;
672
673
674
675
676
677
678
679 if (ei_debug > 0 &&
680 this_frame != ei_local->current_page &&
681 (this_frame != 0x0 || rxing_page != 0xFF))
682 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
683 this_frame, ei_local->current_page);
684
685 if (this_frame == rxing_page)
686 break;
687
688 current_offset = this_frame << 8;
689 ei_get_8390_hdr(dev, &rx_frame, this_frame);
690
691 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
692 pkt_stat = rx_frame.status;
693
694 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
695
696
697
698
699 if (rx_frame.next != next_frame &&
700 rx_frame.next != next_frame + 1 &&
701 rx_frame.next != next_frame - num_rx_pages &&
702 rx_frame.next != next_frame + 1 - num_rx_pages) {
703 ei_local->current_page = rxing_page;
704 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
705 dev->stats.rx_errors++;
706 continue;
707 }
708
709 if (pkt_len < 60 || pkt_len > 1518) {
710 if (ei_debug)
711 netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
712 rx_frame.count, rx_frame.status,
713 rx_frame.next);
714 dev->stats.rx_errors++;
715 dev->stats.rx_length_errors++;
716 } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
717 struct sk_buff *skb;
718
719 skb = netdev_alloc_skb(dev, pkt_len + 2);
720 if (skb == NULL) {
721 if (ei_debug > 1)
722 netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
723 pkt_len);
724 dev->stats.rx_dropped++;
725 break;
726 } else {
727 skb_reserve(skb, 2);
728 skb_put(skb, pkt_len);
729 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
730 skb->protocol = eth_type_trans(skb, dev);
731 if (!skb_defer_rx_timestamp(skb))
732 netif_rx(skb);
733 dev->stats.rx_packets++;
734 dev->stats.rx_bytes += pkt_len;
735 if (pkt_stat & ENRSR_PHY)
736 dev->stats.multicast++;
737 }
738 } else {
739 if (ei_debug)
740 netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
741 rx_frame.status, rx_frame.next,
742 rx_frame.count);
743 dev->stats.rx_errors++;
744
745 if (pkt_stat & ENRSR_FO)
746 dev->stats.rx_fifo_errors++;
747 }
748 next_frame = rx_frame.next;
749
750
751 if (next_frame >= ei_local->stop_page) {
752 netdev_notice(dev, "next frame inconsistency, %#2x\n",
753 next_frame);
754 next_frame = ei_local->rx_start_page;
755 }
756 ei_local->current_page = next_frame;
757 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
758 }
759
760
761
762 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
763}
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778static void ei_rx_overrun(struct net_device *dev)
779{
780 unsigned long e8390_base = dev->base_addr;
781 unsigned char was_txing, must_resend = 0;
782
783 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
784
785
786
787
788
789 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
790 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
791
792 if (ei_debug > 1)
793 netdev_dbg(dev, "Receiver overrun\n");
794 dev->stats.rx_over_errors++;
795
796
797
798
799
800
801
802
803 mdelay(10);
804
805
806
807
808 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
809 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
810
811
812
813
814
815
816 if (was_txing) {
817 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
818 if (!tx_completed)
819 must_resend = 1;
820 }
821
822
823
824
825
826 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
827 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
828
829
830
831
832 ei_receive(dev);
833 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
834
835
836
837
838 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
839 if (must_resend)
840 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
841}
842
843
844
845
846
847static struct net_device_stats *__ei_get_stats(struct net_device *dev)
848{
849 unsigned long ioaddr = dev->base_addr;
850 struct ei_device *ei_local = netdev_priv(dev);
851 unsigned long flags;
852
853
854 if (!netif_running(dev))
855 return &dev->stats;
856
857 spin_lock_irqsave(&ei_local->page_lock, flags);
858
859 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
860 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
861 dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
862 spin_unlock_irqrestore(&ei_local->page_lock, flags);
863
864 return &dev->stats;
865}
866
867
868
869
870
871
872static inline void make_mc_bits(u8 *bits, struct net_device *dev)
873{
874 struct netdev_hw_addr *ha;
875
876 netdev_for_each_mc_addr(ha, dev) {
877 u32 crc = ether_crc(ETH_ALEN, ha->addr);
878
879
880
881
882 bits[crc>>29] |= (1<<((crc>>26)&7));
883 }
884}
885
886
887
888
889
890
891
892
893
894static void do_set_multicast_list(struct net_device *dev)
895{
896 unsigned long e8390_base = dev->base_addr;
897 int i;
898 struct ei_device *ei_local = netdev_priv(dev);
899
900 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
901 memset(ei_local->mcfilter, 0, 8);
902 if (!netdev_mc_empty(dev))
903 make_mc_bits(ei_local->mcfilter, dev);
904 } else
905 memset(ei_local->mcfilter, 0xFF, 8);
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920 if (netif_running(dev))
921 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
922 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
923 for (i = 0; i < 8; i++) {
924 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
925#ifndef BUG_83C690
926 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
927 netdev_err(dev, "Multicast filter read/write mismap %d\n",
928 i);
929#endif
930 }
931 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
932
933 if (dev->flags&IFF_PROMISC)
934 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
935 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
936 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
937 else
938 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
939}
940
941
942
943
944
945
946
947static void __ei_set_multicast_list(struct net_device *dev)
948{
949 unsigned long flags;
950 struct ei_device *ei_local = netdev_priv(dev);
951
952 spin_lock_irqsave(&ei_local->page_lock, flags);
953 do_set_multicast_list(dev);
954 spin_unlock_irqrestore(&ei_local->page_lock, flags);
955}
956
957
958
959
960
961
962
963
964
965static void ethdev_setup(struct net_device *dev)
966{
967 struct ei_device *ei_local = netdev_priv(dev);
968 if (ei_debug > 1)
969 printk(version);
970
971 ether_setup(dev);
972
973 spin_lock_init(&ei_local->page_lock);
974}
975
976
977
978
979
980
981
982static struct net_device *____alloc_ei_netdev(int size)
983{
984 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
985 ethdev_setup);
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002static void __NS8390_init(struct net_device *dev, int startp)
1003{
1004 unsigned long e8390_base = dev->base_addr;
1005 struct ei_device *ei_local = netdev_priv(dev);
1006 int i;
1007 int endcfg = ei_local->word16
1008 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1009 : 0x48;
1010
1011 if (sizeof(struct e8390_pkt_hdr) != 4)
1012 panic("8390.c: header struct mispacked\n");
1013
1014 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1015 ei_outb_p(endcfg, e8390_base + EN0_DCFG);
1016
1017 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1018 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1019
1020 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR);
1021 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
1022
1023 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1024 ei_local->tx1 = ei_local->tx2 = 0;
1025 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1026 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);
1027 ei_local->current_page = ei_local->rx_start_page;
1028 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1029
1030 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1031 ei_outb_p(0x00, e8390_base + EN0_IMR);
1032
1033
1034
1035 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD);
1036 for (i = 0; i < 6; i++) {
1037 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1038 if (ei_debug > 1 &&
1039 ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1040 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1041 }
1042
1043 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1044 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1045
1046 ei_local->tx1 = ei_local->tx2 = 0;
1047 ei_local->txing = 0;
1048
1049 if (startp) {
1050 ei_outb_p(0xff, e8390_base + EN0_ISR);
1051 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1052 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1053 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
1054
1055 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
1056 do_set_multicast_list(dev);
1057 }
1058}
1059
1060
1061
1062
1063static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1064 int start_page)
1065{
1066 unsigned long e8390_base = dev->base_addr;
1067 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1068
1069 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1070
1071 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1072 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1073 return;
1074 }
1075 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1076 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1077 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1078 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1079}
1080