1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/build_bug.h>
54#include <linux/module.h>
55#include <linux/kernel.h>
56#include <linux/jiffies.h>
57#include <linux/fs.h>
58#include <linux/types.h>
59#include <linux/string.h>
60#include <linux/bitops.h>
61#include <linux/uaccess.h>
62#include <linux/io.h>
63#include <asm/irq.h>
64#include <linux/delay.h>
65#include <linux/errno.h>
66#include <linux/fcntl.h>
67#include <linux/in.h>
68#include <linux/interrupt.h>
69#include <linux/init.h>
70#include <linux/crc32.h>
71
72#include <linux/netdevice.h>
73#include <linux/etherdevice.h>
74
75#define NS8390_CORE
76#include "8390.h"
77
78#define BUG_83C690
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98#define ei_reset_8390 (ei_local->reset_8390)
99#define ei_block_output (ei_local->block_output)
100#define ei_block_input (ei_local->block_input)
101#define ei_get_8390_hdr (ei_local->get_8390_hdr)
102
103
104static void ei_tx_intr(struct net_device *dev);
105static void ei_tx_err(struct net_device *dev);
106static void ei_receive(struct net_device *dev);
107static void ei_rx_overrun(struct net_device *dev);
108
109
110static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
111 int start_page);
112static void do_set_multicast_list(struct net_device *dev);
113static void __NS8390_init(struct net_device *dev, int startp);
114
115static unsigned version_printed;
116static int msg_enable;
117static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_RX_ERR |
118 NETIF_MSG_TX_ERR);
119module_param(msg_enable, int, 0444);
120MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204static int __ei_open(struct net_device *dev)
205{
206 unsigned long flags;
207 struct ei_device *ei_local = netdev_priv(dev);
208
209 if (dev->watchdog_timeo <= 0)
210 dev->watchdog_timeo = TX_TIMEOUT;
211
212
213
214
215
216
217 spin_lock_irqsave(&ei_local->page_lock, flags);
218 __NS8390_init(dev, 1);
219
220
221 netif_start_queue(dev);
222 spin_unlock_irqrestore(&ei_local->page_lock, flags);
223 ei_local->irqlock = 0;
224 return 0;
225}
226
227
228
229
230
231
232
233static int __ei_close(struct net_device *dev)
234{
235 struct ei_device *ei_local = netdev_priv(dev);
236 unsigned long flags;
237
238
239
240
241
242 spin_lock_irqsave(&ei_local->page_lock, flags);
243 __NS8390_init(dev, 0);
244 spin_unlock_irqrestore(&ei_local->page_lock, flags);
245 netif_stop_queue(dev);
246 return 0;
247}
248
249
250
251
252
253
254
255
256
257static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
258{
259 unsigned long e8390_base = dev->base_addr;
260 struct ei_device *ei_local = netdev_priv(dev);
261 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
262 unsigned long flags;
263
264 dev->stats.tx_errors++;
265
266 spin_lock_irqsave(&ei_local->page_lock, flags);
267 txsr = ei_inb(e8390_base+EN0_TSR);
268 isr = ei_inb(e8390_base+EN0_ISR);
269 spin_unlock_irqrestore(&ei_local->page_lock, flags);
270
271 netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
272 (txsr & ENTSR_ABT) ? "excess collisions." :
273 (isr) ? "lost interrupt?" : "cable problem?",
274 txsr, isr, tickssofar);
275
276 if (!isr && !dev->stats.tx_packets) {
277
278 ei_local->interface_num ^= 1;
279 }
280
281
282
283 disable_irq_nosync_lockdep(dev->irq);
284 spin_lock(&ei_local->page_lock);
285
286
287 ei_reset_8390(dev);
288 __NS8390_init(dev, 1);
289
290 spin_unlock(&ei_local->page_lock);
291 enable_irq_lockdep(dev->irq);
292 netif_wake_queue(dev);
293}
294
295
296
297
298
299
300
301
302
303static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
304 struct net_device *dev)
305{
306 unsigned long e8390_base = dev->base_addr;
307 struct ei_device *ei_local = netdev_priv(dev);
308 int send_length = skb->len, output_page;
309 unsigned long flags;
310 char buf[ETH_ZLEN];
311 char *data = skb->data;
312
313 if (skb->len < ETH_ZLEN) {
314 memset(buf, 0, ETH_ZLEN);
315 memcpy(buf, data, skb->len);
316 send_length = ETH_ZLEN;
317 data = buf;
318 }
319
320
321
322
323
324
325 spin_lock_irqsave(&ei_local->page_lock, flags);
326 ei_outb_p(0x00, e8390_base + EN0_IMR);
327 spin_unlock_irqrestore(&ei_local->page_lock, flags);
328
329
330
331
332
333
334 disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
335
336 spin_lock(&ei_local->page_lock);
337
338 ei_local->irqlock = 1;
339
340
341
342
343
344
345
346
347
348 if (ei_local->tx1 == 0) {
349 output_page = ei_local->tx_start_page;
350 ei_local->tx1 = send_length;
351 if ((netif_msg_tx_queued(ei_local)) &&
352 ei_local->tx2 > 0)
353 netdev_dbg(dev,
354 "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
355 ei_local->tx2, ei_local->lasttx, ei_local->txing);
356 } else if (ei_local->tx2 == 0) {
357 output_page = ei_local->tx_start_page + TX_PAGES/2;
358 ei_local->tx2 = send_length;
359 if ((netif_msg_tx_queued(ei_local)) &&
360 ei_local->tx1 > 0)
361 netdev_dbg(dev,
362 "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
363 ei_local->tx1, ei_local->lasttx, ei_local->txing);
364 } else {
365 netif_dbg(ei_local, tx_err, dev,
366 "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
367 ei_local->tx1, ei_local->tx2, ei_local->lasttx);
368 ei_local->irqlock = 0;
369 netif_stop_queue(dev);
370 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
371 spin_unlock(&ei_local->page_lock);
372 enable_irq_lockdep_irqrestore(dev->irq, &flags);
373 dev->stats.tx_errors++;
374 return NETDEV_TX_BUSY;
375 }
376
377
378
379
380
381
382
383 ei_block_output(dev, send_length, data, output_page);
384
385 if (!ei_local->txing) {
386 ei_local->txing = 1;
387 NS8390_trigger_send(dev, send_length, output_page);
388 if (output_page == ei_local->tx_start_page) {
389 ei_local->tx1 = -1;
390 ei_local->lasttx = -1;
391 } else {
392 ei_local->tx2 = -1;
393 ei_local->lasttx = -2;
394 }
395 } else
396 ei_local->txqueue++;
397
398 if (ei_local->tx1 && ei_local->tx2)
399 netif_stop_queue(dev);
400 else
401 netif_start_queue(dev);
402
403
404 ei_local->irqlock = 0;
405 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
406
407 spin_unlock(&ei_local->page_lock);
408 enable_irq_lockdep_irqrestore(dev->irq, &flags);
409 skb_tx_timestamp(skb);
410 dev_consume_skb_any(skb);
411 dev->stats.tx_bytes += send_length;
412
413 return NETDEV_TX_OK;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428static irqreturn_t __ei_interrupt(int irq, void *dev_id)
429{
430 struct net_device *dev = dev_id;
431 unsigned long e8390_base = dev->base_addr;
432 int interrupts, nr_serviced = 0;
433 struct ei_device *ei_local = netdev_priv(dev);
434
435
436
437
438
439 spin_lock(&ei_local->page_lock);
440
441 if (ei_local->irqlock) {
442
443
444
445
446 netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
447 ei_inb_p(e8390_base + EN0_ISR),
448 ei_inb_p(e8390_base + EN0_IMR));
449 spin_unlock(&ei_local->page_lock);
450 return IRQ_NONE;
451 }
452
453
454 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
455 netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
456 ei_inb_p(e8390_base + EN0_ISR));
457
458
459 while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
460 ++nr_serviced < MAX_SERVICE) {
461 if (!netif_running(dev)) {
462 netdev_warn(dev, "interrupt from stopped card\n");
463
464 ei_outb_p(interrupts, e8390_base + EN0_ISR);
465 interrupts = 0;
466 break;
467 }
468 if (interrupts & ENISR_OVER)
469 ei_rx_overrun(dev);
470 else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
471
472 ei_receive(dev);
473 }
474
475 if (interrupts & ENISR_TX)
476 ei_tx_intr(dev);
477 else if (interrupts & ENISR_TX_ERR)
478 ei_tx_err(dev);
479
480 if (interrupts & ENISR_COUNTERS) {
481 dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
482 dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
483 dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
484 ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR);
485 }
486
487
488 if (interrupts & ENISR_RDC)
489 ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
490
491 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
492 }
493
494 if (interrupts && (netif_msg_intr(ei_local))) {
495 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
496 if (nr_serviced >= MAX_SERVICE) {
497
498 if (interrupts != 0xFF)
499 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
500 interrupts);
501 ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR);
502 } else {
503 netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
504 ei_outb_p(0xff, e8390_base + EN0_ISR);
505 }
506 }
507 spin_unlock(&ei_local->page_lock);
508 return IRQ_RETVAL(nr_serviced > 0);
509}
510
511#ifdef CONFIG_NET_POLL_CONTROLLER
512static void __ei_poll(struct net_device *dev)
513{
514 disable_irq(dev->irq);
515 __ei_interrupt(dev->irq, dev);
516 enable_irq(dev->irq);
517}
518#endif
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534static void ei_tx_err(struct net_device *dev)
535{
536 unsigned long e8390_base = dev->base_addr;
537
538 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
539 unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
540 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
541
542#ifdef VERBOSE_ERROR_DUMP
543 netdev_dbg(dev, "transmitter error (%#2x):", txsr);
544 if (txsr & ENTSR_ABT)
545 pr_cont(" excess-collisions ");
546 if (txsr & ENTSR_ND)
547 pr_cont(" non-deferral ");
548 if (txsr & ENTSR_CRS)
549 pr_cont(" lost-carrier ");
550 if (txsr & ENTSR_FU)
551 pr_cont(" FIFO-underrun ");
552 if (txsr & ENTSR_CDH)
553 pr_cont(" lost-heartbeat ");
554 pr_cont("\n");
555#endif
556
557 ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR);
558
559 if (tx_was_aborted)
560 ei_tx_intr(dev);
561 else {
562 dev->stats.tx_errors++;
563 if (txsr & ENTSR_CRS)
564 dev->stats.tx_carrier_errors++;
565 if (txsr & ENTSR_CDH)
566 dev->stats.tx_heartbeat_errors++;
567 if (txsr & ENTSR_OWC)
568 dev->stats.tx_window_errors++;
569 }
570}
571
572
573
574
575
576
577
578
579
580static void ei_tx_intr(struct net_device *dev)
581{
582 unsigned long e8390_base = dev->base_addr;
583 struct ei_device *ei_local = netdev_priv(dev);
584 int status = ei_inb(e8390_base + EN0_TSR);
585
586 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR);
587
588
589
590
591
592 ei_local->txqueue--;
593
594 if (ei_local->tx1 < 0) {
595 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
596 pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
597 ei_local->name, ei_local->lasttx, ei_local->tx1);
598 ei_local->tx1 = 0;
599 if (ei_local->tx2 > 0) {
600 ei_local->txing = 1;
601 NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
602 netif_trans_update(dev);
603 ei_local->tx2 = -1;
604 ei_local->lasttx = 2;
605 } else {
606 ei_local->lasttx = 20;
607 ei_local->txing = 0;
608 }
609 } else if (ei_local->tx2 < 0) {
610 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
611 pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
612 ei_local->name, ei_local->lasttx, ei_local->tx2);
613 ei_local->tx2 = 0;
614 if (ei_local->tx1 > 0) {
615 ei_local->txing = 1;
616 NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
617 netif_trans_update(dev);
618 ei_local->tx1 = -1;
619 ei_local->lasttx = 1;
620 } else {
621 ei_local->lasttx = 10;
622 ei_local->txing = 0;
623 }
624 }
625
626
627
628
629
630 if (status & ENTSR_COL)
631 dev->stats.collisions++;
632 if (status & ENTSR_PTX)
633 dev->stats.tx_packets++;
634 else {
635 dev->stats.tx_errors++;
636 if (status & ENTSR_ABT) {
637 dev->stats.tx_aborted_errors++;
638 dev->stats.collisions += 16;
639 }
640 if (status & ENTSR_CRS)
641 dev->stats.tx_carrier_errors++;
642 if (status & ENTSR_FU)
643 dev->stats.tx_fifo_errors++;
644 if (status & ENTSR_CDH)
645 dev->stats.tx_heartbeat_errors++;
646 if (status & ENTSR_OWC)
647 dev->stats.tx_window_errors++;
648 }
649 netif_wake_queue(dev);
650}
651
652
653
654
655
656
657
658
659
660static void ei_receive(struct net_device *dev)
661{
662 unsigned long e8390_base = dev->base_addr;
663 struct ei_device *ei_local = netdev_priv(dev);
664 unsigned char rxing_page, this_frame, next_frame;
665 unsigned short current_offset;
666 int rx_pkt_count = 0;
667 struct e8390_pkt_hdr rx_frame;
668 int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
669
670 while (++rx_pkt_count < 10) {
671 int pkt_len, pkt_stat;
672
673
674 ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
675 rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
676 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
677
678
679 this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
680 if (this_frame >= ei_local->stop_page)
681 this_frame = ei_local->rx_start_page;
682
683
684
685
686
687
688
689 if ((netif_msg_rx_status(ei_local)) &&
690 this_frame != ei_local->current_page &&
691 (this_frame != 0x0 || rxing_page != 0xFF))
692 netdev_err(dev,
693 "mismatched read page pointers %2x vs %2x\n",
694 this_frame, ei_local->current_page);
695
696 if (this_frame == rxing_page)
697 break;
698
699 current_offset = this_frame << 8;
700 ei_get_8390_hdr(dev, &rx_frame, this_frame);
701
702 pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
703 pkt_stat = rx_frame.status;
704
705 next_frame = this_frame + 1 + ((pkt_len+4)>>8);
706
707
708
709
710 if (rx_frame.next != next_frame &&
711 rx_frame.next != next_frame + 1 &&
712 rx_frame.next != next_frame - num_rx_pages &&
713 rx_frame.next != next_frame + 1 - num_rx_pages) {
714 ei_local->current_page = rxing_page;
715 ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
716 dev->stats.rx_errors++;
717 continue;
718 }
719
720 if (pkt_len < 60 || pkt_len > 1518) {
721 netif_dbg(ei_local, rx_status, dev,
722 "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
723 rx_frame.count, rx_frame.status,
724 rx_frame.next);
725 dev->stats.rx_errors++;
726 dev->stats.rx_length_errors++;
727 } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
728 struct sk_buff *skb;
729
730 skb = netdev_alloc_skb(dev, pkt_len + 2);
731 if (skb == NULL) {
732 netif_err(ei_local, rx_err, dev,
733 "Couldn't allocate a sk_buff of size %d\n",
734 pkt_len);
735 dev->stats.rx_dropped++;
736 break;
737 } else {
738 skb_reserve(skb, 2);
739 skb_put(skb, pkt_len);
740 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
741 skb->protocol = eth_type_trans(skb, dev);
742 if (!skb_defer_rx_timestamp(skb))
743 netif_rx(skb);
744 dev->stats.rx_packets++;
745 dev->stats.rx_bytes += pkt_len;
746 if (pkt_stat & ENRSR_PHY)
747 dev->stats.multicast++;
748 }
749 } else {
750 netif_err(ei_local, rx_err, dev,
751 "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
752 rx_frame.status, rx_frame.next,
753 rx_frame.count);
754 dev->stats.rx_errors++;
755
756 if (pkt_stat & ENRSR_FO)
757 dev->stats.rx_fifo_errors++;
758 }
759 next_frame = rx_frame.next;
760
761
762 if (next_frame >= ei_local->stop_page) {
763 netdev_notice(dev, "next frame inconsistency, %#2x\n",
764 next_frame);
765 next_frame = ei_local->rx_start_page;
766 }
767 ei_local->current_page = next_frame;
768 ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
769 }
770
771
772
773 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
774}
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789static void ei_rx_overrun(struct net_device *dev)
790{
791 unsigned long e8390_base = dev->base_addr;
792 unsigned char was_txing, must_resend = 0;
793
794 struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
795
796
797
798
799
800 was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
801 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
802
803 netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
804 dev->stats.rx_over_errors++;
805
806
807
808
809
810
811
812
813 mdelay(10);
814
815
816
817
818 ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
819 ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
820
821
822
823
824
825
826 if (was_txing) {
827 unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
828 if (!tx_completed)
829 must_resend = 1;
830 }
831
832
833
834
835
836 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
837 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
838
839
840
841
842 ei_receive(dev);
843 ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
844
845
846
847
848 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
849 if (must_resend)
850 ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
851}
852
853
854
855
856
857static struct net_device_stats *__ei_get_stats(struct net_device *dev)
858{
859 unsigned long ioaddr = dev->base_addr;
860 struct ei_device *ei_local = netdev_priv(dev);
861 unsigned long flags;
862
863
864 if (!netif_running(dev))
865 return &dev->stats;
866
867 spin_lock_irqsave(&ei_local->page_lock, flags);
868
869 dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
870 dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
871 dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
872 spin_unlock_irqrestore(&ei_local->page_lock, flags);
873
874 return &dev->stats;
875}
876
877
878
879
880
881
882static inline void make_mc_bits(u8 *bits, struct net_device *dev)
883{
884 struct netdev_hw_addr *ha;
885
886 netdev_for_each_mc_addr(ha, dev) {
887 u32 crc = ether_crc(ETH_ALEN, ha->addr);
888
889
890
891
892 bits[crc>>29] |= (1<<((crc>>26)&7));
893 }
894}
895
896
897
898
899
900
901
902
903
904static void do_set_multicast_list(struct net_device *dev)
905{
906 unsigned long e8390_base = dev->base_addr;
907 int i;
908 struct ei_device *ei_local = netdev_priv(dev);
909
910 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
911 memset(ei_local->mcfilter, 0, 8);
912 if (!netdev_mc_empty(dev))
913 make_mc_bits(ei_local->mcfilter, dev);
914 } else
915 memset(ei_local->mcfilter, 0xFF, 8);
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930 if (netif_running(dev))
931 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
932 ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
933 for (i = 0; i < 8; i++) {
934 ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
935#ifndef BUG_83C690
936 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
937 netdev_err(dev, "Multicast filter read/write mismap %d\n",
938 i);
939#endif
940 }
941 ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
942
943 if (dev->flags&IFF_PROMISC)
944 ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
945 else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
946 ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
947 else
948 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
949}
950
951
952
953
954
955
956
957static void __ei_set_multicast_list(struct net_device *dev)
958{
959 unsigned long flags;
960 struct ei_device *ei_local = netdev_priv(dev);
961
962 spin_lock_irqsave(&ei_local->page_lock, flags);
963 do_set_multicast_list(dev);
964 spin_unlock_irqrestore(&ei_local->page_lock, flags);
965}
966
967
968
969
970
971
972
973
974
975static void ethdev_setup(struct net_device *dev)
976{
977 struct ei_device *ei_local = netdev_priv(dev);
978
979 ether_setup(dev);
980
981 spin_lock_init(&ei_local->page_lock);
982
983 ei_local->msg_enable = netif_msg_init(msg_enable, default_msg_level);
984
985 if (netif_msg_drv(ei_local) && (version_printed++ == 0))
986 pr_info("%s", version);
987}
988
989
990
991
992
993
994
995static struct net_device *____alloc_ei_netdev(int size)
996{
997 return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
998 NET_NAME_UNKNOWN, ethdev_setup);
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static void __NS8390_init(struct net_device *dev, int startp)
1016{
1017 unsigned long e8390_base = dev->base_addr;
1018 struct ei_device *ei_local = netdev_priv(dev);
1019 int i;
1020 int endcfg = ei_local->word16
1021 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1022 : 0x48;
1023
1024 BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
1025
1026 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1027 ei_outb_p(endcfg, e8390_base + EN0_DCFG);
1028
1029 ei_outb_p(0x00, e8390_base + EN0_RCNTLO);
1030 ei_outb_p(0x00, e8390_base + EN0_RCNTHI);
1031
1032 ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR);
1033 ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
1034
1035 ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1036 ei_local->tx1 = ei_local->tx2 = 0;
1037 ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1038 ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);
1039 ei_local->current_page = ei_local->rx_start_page;
1040 ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1041
1042 ei_outb_p(0xFF, e8390_base + EN0_ISR);
1043 ei_outb_p(0x00, e8390_base + EN0_IMR);
1044
1045
1046
1047 ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD);
1048 for (i = 0; i < 6; i++) {
1049 ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1050 if ((netif_msg_probe(ei_local)) &&
1051 ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1052 netdev_err(dev,
1053 "Hw. address read/write mismap %d\n", i);
1054 }
1055
1056 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1057 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1058
1059 ei_local->tx1 = ei_local->tx2 = 0;
1060 ei_local->txing = 0;
1061
1062 if (startp) {
1063 ei_outb_p(0xff, e8390_base + EN0_ISR);
1064 ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
1065 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1066 ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
1067
1068 ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
1069 do_set_multicast_list(dev);
1070 }
1071}
1072
1073
1074
1075
1076static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1077 int start_page)
1078{
1079 unsigned long e8390_base = dev->base_addr;
1080 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1081
1082 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1083
1084 if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1085 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1086 return;
1087 }
1088 ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1089 ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1090 ei_outb_p(start_page, e8390_base + EN0_TPSR);
1091 ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1092}
1093