1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#define DRV_NAME "de2104x"
33#define DRV_VERSION "0.7"
34#define DRV_RELDATE "Mar 17, 2004"
35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/init.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/delay.h>
44#include <linux/ethtool.h>
45#include <linux/compiler.h>
46#include <linux/rtnetlink.h>
47#include <linux/crc32.h>
48#include <linux/slab.h>
49
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/uaccess.h>
53#include <asm/unaligned.h>
54
55
56static char version[] =
57"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
58
59MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
60MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
61MODULE_LICENSE("GPL");
62MODULE_VERSION(DRV_VERSION);
63
64static int debug = -1;
65module_param (debug, int, 0);
66MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
67
68
69#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
70 defined(CONFIG_SPARC) || defined(__ia64__) || \
71 defined(__sh__) || defined(__mips__)
72static int rx_copybreak = 1518;
73#else
74static int rx_copybreak = 100;
75#endif
76module_param (rx_copybreak, int, 0);
77MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
78
79#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_IFDOWN | \
83 NETIF_MSG_IFUP | \
84 NETIF_MSG_RX_ERR | \
85 NETIF_MSG_TX_ERR)
86
87
88#ifndef CONFIG_DE2104X_DSL
89#define DSL 0
90#else
91#define DSL CONFIG_DE2104X_DSL
92#endif
93
94#define DE_RX_RING_SIZE 64
95#define DE_TX_RING_SIZE 64
96#define DE_RING_BYTES \
97 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
98 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
99#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
100#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
101#define TX_BUFFS_AVAIL(CP) \
102 (((CP)->tx_tail <= (CP)->tx_head) ? \
103 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
104 (CP)->tx_tail - (CP)->tx_head - 1)
105
106#define PKT_BUF_SZ 1536
107#define RX_OFFSET 2
108
109#define DE_SETUP_SKB ((struct sk_buff *) 1)
110#define DE_DUMMY_SKB ((struct sk_buff *) 2)
111#define DE_SETUP_FRAME_WORDS 96
112#define DE_EEPROM_WORDS 256
113#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
114#define DE_MAX_MEDIA 5
115
116#define DE_MEDIA_TP_AUTO 0
117#define DE_MEDIA_BNC 1
118#define DE_MEDIA_AUI 2
119#define DE_MEDIA_TP 3
120#define DE_MEDIA_TP_FD 4
121#define DE_MEDIA_INVALID DE_MAX_MEDIA
122#define DE_MEDIA_FIRST 0
123#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
124#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
125
126#define DE_TIMER_LINK (60 * HZ)
127#define DE_TIMER_NO_LINK (5 * HZ)
128
129#define DE_NUM_REGS 16
130#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
131#define DE_REGS_VER 1
132
133
134#define TX_TIMEOUT (6*HZ)
135
136
137
138
139
140#define FULL_DUPLEX_MAGIC 0x6969
141
142enum {
143
144 BusMode = 0x00,
145 TxPoll = 0x08,
146 RxPoll = 0x10,
147 RxRingAddr = 0x18,
148 TxRingAddr = 0x20,
149 MacStatus = 0x28,
150 MacMode = 0x30,
151 IntrMask = 0x38,
152 RxMissed = 0x40,
153 ROMCmd = 0x48,
154 CSR11 = 0x58,
155 SIAStatus = 0x60,
156 CSR13 = 0x68,
157 CSR14 = 0x70,
158 CSR15 = 0x78,
159 PCIPM = 0x40,
160
161
162 CmdReset = (1 << 0),
163 CacheAlign16 = 0x00008000,
164 BurstLen4 = 0x00000400,
165 DescSkipLen = (DSL << 2),
166
167
168 NormalTxPoll = (1 << 0),
169 NormalRxPoll = (1 << 0),
170
171
172 DescOwn = (1 << 31),
173 RxError = (1 << 15),
174 RxErrLong = (1 << 7),
175 RxErrCRC = (1 << 1),
176 RxErrFIFO = (1 << 0),
177 RxErrRunt = (1 << 11),
178 RxErrFrame = (1 << 14),
179 RingEnd = (1 << 25),
180 FirstFrag = (1 << 29),
181 LastFrag = (1 << 30),
182 TxError = (1 << 15),
183 TxFIFOUnder = (1 << 1),
184 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
185 TxMaxCol = (1 << 8),
186 TxOWC = (1 << 9),
187 TxJabber = (1 << 14),
188 SetupFrame = (1 << 27),
189 TxSwInt = (1 << 31),
190
191
192 IntrOK = (1 << 16),
193 IntrErr = (1 << 15),
194 RxIntr = (1 << 6),
195 RxEmpty = (1 << 7),
196 TxIntr = (1 << 0),
197 TxEmpty = (1 << 2),
198 PciErr = (1 << 13),
199 TxState = (1 << 22) | (1 << 21) | (1 << 20),
200 RxState = (1 << 19) | (1 << 18) | (1 << 17),
201 LinkFail = (1 << 12),
202 LinkPass = (1 << 4),
203 RxStopped = (1 << 8),
204 TxStopped = (1 << 1),
205
206
207 TxEnable = (1 << 13),
208 RxEnable = (1 << 1),
209 RxTx = TxEnable | RxEnable,
210 FullDuplex = (1 << 9),
211 AcceptAllMulticast = (1 << 7),
212 AcceptAllPhys = (1 << 6),
213 BOCnt = (1 << 5),
214 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
215 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
216
217
218 EE_SHIFT_CLK = 0x02,
219 EE_CS = 0x01,
220 EE_DATA_WRITE = 0x04,
221 EE_WRITE_0 = 0x01,
222 EE_WRITE_1 = 0x05,
223 EE_DATA_READ = 0x08,
224 EE_ENB = (0x4800 | EE_CS),
225
226
227 EE_READ_CMD = 6,
228
229
230 RxMissedOver = (1 << 16),
231 RxMissedMask = 0xffff,
232
233
234 SROMC0InfoLeaf = 27,
235 MediaBlockMask = 0x3f,
236 MediaCustomCSRs = (1 << 6),
237
238
239 PM_Sleep = (1 << 31),
240 PM_Snooze = (1 << 30),
241 PM_Mask = PM_Sleep | PM_Snooze,
242
243
244 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
245 NWayRestart = (1 << 12),
246 NonselPortActive = (1 << 9),
247 SelPortActive = (1 << 8),
248 LinkFailStatus = (1 << 2),
249 NetCxnErr = (1 << 1),
250};
251
252static const u32 de_intr_mask =
253 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
254 LinkPass | LinkFail | PciErr;
255
256
257
258
259
260static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
261
262struct de_srom_media_block {
263 u8 opts;
264 u16 csr13;
265 u16 csr14;
266 u16 csr15;
267} __packed;
268
269struct de_srom_info_leaf {
270 u16 default_media;
271 u8 n_blocks;
272 u8 unused;
273} __packed;
274
275struct de_desc {
276 __le32 opts1;
277 __le32 opts2;
278 __le32 addr1;
279 __le32 addr2;
280#if DSL
281 __le32 skip[DSL];
282#endif
283};
284
285struct media_info {
286 u16 type;
287 u16 csr13;
288 u16 csr14;
289 u16 csr15;
290};
291
292struct ring_info {
293 struct sk_buff *skb;
294 dma_addr_t mapping;
295};
296
297struct de_private {
298 unsigned tx_head;
299 unsigned tx_tail;
300 unsigned rx_tail;
301
302 void __iomem *regs;
303 struct net_device *dev;
304 spinlock_t lock;
305
306 struct de_desc *rx_ring;
307 struct de_desc *tx_ring;
308 struct ring_info tx_skb[DE_TX_RING_SIZE];
309 struct ring_info rx_skb[DE_RX_RING_SIZE];
310 unsigned rx_buf_sz;
311 dma_addr_t ring_dma;
312
313 u32 msg_enable;
314
315 struct net_device_stats net_stats;
316
317 struct pci_dev *pdev;
318
319 u16 setup_frame[DE_SETUP_FRAME_WORDS];
320
321 u32 media_type;
322 u32 media_supported;
323 u32 media_advertise;
324 struct media_info media[DE_MAX_MEDIA];
325 struct timer_list media_timer;
326
327 u8 *ee_data;
328 unsigned board_idx;
329 unsigned de21040 : 1;
330 unsigned media_lock : 1;
331};
332
333
334static void de_set_rx_mode (struct net_device *dev);
335static void de_tx (struct de_private *de);
336static void de_clean_rings (struct de_private *de);
337static void de_media_interrupt (struct de_private *de, u32 status);
338static void de21040_media_timer (unsigned long data);
339static void de21041_media_timer (unsigned long data);
340static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
341
342
343static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
344 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
345 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
346 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
347 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
348 { },
349};
350MODULE_DEVICE_TABLE(pci, de_pci_tbl);
351
352static const char * const media_name[DE_MAX_MEDIA] = {
353 "10baseT auto",
354 "BNC",
355 "AUI",
356 "10baseT-HD",
357 "10baseT-FD"
358};
359
360
361
362static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
363static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
364static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
365
366
367static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
368static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
369
370static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
371static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
372
373
374#define dr32(reg) ioread32(de->regs + (reg))
375#define dw32(reg, val) iowrite32((val), de->regs + (reg))
376
377
378static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
379 u32 status, u32 len)
380{
381 netif_dbg(de, rx_err, de->dev,
382 "rx err, slot %d status 0x%x len %d\n",
383 rx_tail, status, len);
384
385 if ((status & 0x38000300) != 0x0300) {
386
387 if ((status & 0xffff) != 0x7fff) {
388 netif_warn(de, rx_err, de->dev,
389 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390 status);
391 de->net_stats.rx_length_errors++;
392 }
393 } else if (status & RxError) {
394
395 de->net_stats.rx_errors++;
396 if (status & 0x0890) de->net_stats.rx_length_errors++;
397 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
398 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
399 }
400}
401
402static void de_rx (struct de_private *de)
403{
404 unsigned rx_tail = de->rx_tail;
405 unsigned rx_work = DE_RX_RING_SIZE;
406 unsigned drop = 0;
407 int rc;
408
409 while (--rx_work) {
410 u32 status, len;
411 dma_addr_t mapping;
412 struct sk_buff *skb, *copy_skb;
413 unsigned copying_skb, buflen;
414
415 skb = de->rx_skb[rx_tail].skb;
416 BUG_ON(!skb);
417 rmb();
418 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
419 if (status & DescOwn)
420 break;
421
422 len = ((status >> 16) & 0x7ff) - 4;
423 mapping = de->rx_skb[rx_tail].mapping;
424
425 if (unlikely(drop)) {
426 de->net_stats.rx_dropped++;
427 goto rx_next;
428 }
429
430 if (unlikely((status & 0x38008300) != 0x0300)) {
431 de_rx_err_acct(de, rx_tail, status, len);
432 goto rx_next;
433 }
434
435 copying_skb = (len <= rx_copybreak);
436
437 netif_dbg(de, rx_status, de->dev,
438 "rx slot %d status 0x%x len %d copying? %d\n",
439 rx_tail, status, len, copying_skb);
440
441 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
442 copy_skb = dev_alloc_skb (buflen);
443 if (unlikely(!copy_skb)) {
444 de->net_stats.rx_dropped++;
445 drop = 1;
446 rx_work = 100;
447 goto rx_next;
448 }
449
450 if (!copying_skb) {
451 pci_unmap_single(de->pdev, mapping,
452 buflen, PCI_DMA_FROMDEVICE);
453 skb_put(skb, len);
454
455 mapping =
456 de->rx_skb[rx_tail].mapping =
457 pci_map_single(de->pdev, copy_skb->data,
458 buflen, PCI_DMA_FROMDEVICE);
459 de->rx_skb[rx_tail].skb = copy_skb;
460 } else {
461 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
462 skb_reserve(copy_skb, RX_OFFSET);
463 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
464 len);
465 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
466
467
468 skb = copy_skb;
469 }
470
471 skb->protocol = eth_type_trans (skb, de->dev);
472
473 de->net_stats.rx_packets++;
474 de->net_stats.rx_bytes += skb->len;
475 rc = netif_rx (skb);
476 if (rc == NET_RX_DROP)
477 drop = 1;
478
479rx_next:
480 if (rx_tail == (DE_RX_RING_SIZE - 1))
481 de->rx_ring[rx_tail].opts2 =
482 cpu_to_le32(RingEnd | de->rx_buf_sz);
483 else
484 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
485 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
486 wmb();
487 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
488 rx_tail = NEXT_RX(rx_tail);
489 }
490
491 if (!rx_work)
492 netdev_warn(de->dev, "rx work limit reached\n");
493
494 de->rx_tail = rx_tail;
495}
496
497static irqreturn_t de_interrupt (int irq, void *dev_instance)
498{
499 struct net_device *dev = dev_instance;
500 struct de_private *de = netdev_priv(dev);
501 u32 status;
502
503 status = dr32(MacStatus);
504 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
505 return IRQ_NONE;
506
507 netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
508 status, dr32(MacMode),
509 de->rx_tail, de->tx_head, de->tx_tail);
510
511 dw32(MacStatus, status);
512
513 if (status & (RxIntr | RxEmpty)) {
514 de_rx(de);
515 if (status & RxEmpty)
516 dw32(RxPoll, NormalRxPoll);
517 }
518
519 spin_lock(&de->lock);
520
521 if (status & (TxIntr | TxEmpty))
522 de_tx(de);
523
524 if (status & (LinkPass | LinkFail))
525 de_media_interrupt(de, status);
526
527 spin_unlock(&de->lock);
528
529 if (status & PciErr) {
530 u16 pci_status;
531
532 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
533 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
534 netdev_err(de->dev,
535 "PCI bus error, status=%08x, PCI status=%04x\n",
536 status, pci_status);
537 }
538
539 return IRQ_HANDLED;
540}
541
542static void de_tx (struct de_private *de)
543{
544 unsigned tx_head = de->tx_head;
545 unsigned tx_tail = de->tx_tail;
546
547 while (tx_tail != tx_head) {
548 struct sk_buff *skb;
549 u32 status;
550
551 rmb();
552 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
553 if (status & DescOwn)
554 break;
555
556 skb = de->tx_skb[tx_tail].skb;
557 BUG_ON(!skb);
558 if (unlikely(skb == DE_DUMMY_SKB))
559 goto next;
560
561 if (unlikely(skb == DE_SETUP_SKB)) {
562 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
563 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
564 goto next;
565 }
566
567 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
568 skb->len, PCI_DMA_TODEVICE);
569
570 if (status & LastFrag) {
571 if (status & TxError) {
572 netif_dbg(de, tx_err, de->dev,
573 "tx err, status 0x%x\n",
574 status);
575 de->net_stats.tx_errors++;
576 if (status & TxOWC)
577 de->net_stats.tx_window_errors++;
578 if (status & TxMaxCol)
579 de->net_stats.tx_aborted_errors++;
580 if (status & TxLinkFail)
581 de->net_stats.tx_carrier_errors++;
582 if (status & TxFIFOUnder)
583 de->net_stats.tx_fifo_errors++;
584 } else {
585 de->net_stats.tx_packets++;
586 de->net_stats.tx_bytes += skb->len;
587 netif_dbg(de, tx_done, de->dev,
588 "tx done, slot %d\n", tx_tail);
589 }
590 dev_kfree_skb_irq(skb);
591 }
592
593next:
594 de->tx_skb[tx_tail].skb = NULL;
595
596 tx_tail = NEXT_TX(tx_tail);
597 }
598
599 de->tx_tail = tx_tail;
600
601 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
602 netif_wake_queue(de->dev);
603}
604
605static netdev_tx_t de_start_xmit (struct sk_buff *skb,
606 struct net_device *dev)
607{
608 struct de_private *de = netdev_priv(dev);
609 unsigned int entry, tx_free;
610 u32 mapping, len, flags = FirstFrag | LastFrag;
611 struct de_desc *txd;
612
613 spin_lock_irq(&de->lock);
614
615 tx_free = TX_BUFFS_AVAIL(de);
616 if (tx_free == 0) {
617 netif_stop_queue(dev);
618 spin_unlock_irq(&de->lock);
619 return NETDEV_TX_BUSY;
620 }
621 tx_free--;
622
623 entry = de->tx_head;
624
625 txd = &de->tx_ring[entry];
626
627 len = skb->len;
628 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
629 if (entry == (DE_TX_RING_SIZE - 1))
630 flags |= RingEnd;
631 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
632 flags |= TxSwInt;
633 flags |= len;
634 txd->opts2 = cpu_to_le32(flags);
635 txd->addr1 = cpu_to_le32(mapping);
636
637 de->tx_skb[entry].skb = skb;
638 de->tx_skb[entry].mapping = mapping;
639 wmb();
640
641 txd->opts1 = cpu_to_le32(DescOwn);
642 wmb();
643
644 de->tx_head = NEXT_TX(entry);
645 netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
646 entry, skb->len);
647
648 if (tx_free == 0)
649 netif_stop_queue(dev);
650
651 spin_unlock_irq(&de->lock);
652
653
654 dw32(TxPoll, NormalTxPoll);
655
656 return NETDEV_TX_OK;
657}
658
659
660
661
662
663
664#undef set_bit_le
665#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
666
667static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
668{
669 struct de_private *de = netdev_priv(dev);
670 u16 hash_table[32];
671 struct netdev_hw_addr *ha;
672 int i;
673 u16 *eaddrs;
674
675 memset(hash_table, 0, sizeof(hash_table));
676 set_bit_le(255, hash_table);
677
678 netdev_for_each_mc_addr(ha, dev) {
679 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
680
681 set_bit_le(index, hash_table);
682 }
683
684 for (i = 0; i < 32; i++) {
685 *setup_frm++ = hash_table[i];
686 *setup_frm++ = hash_table[i];
687 }
688 setup_frm = &de->setup_frame[13*6];
689
690
691 eaddrs = (u16 *)dev->dev_addr;
692 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
693 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
694 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
695}
696
697static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
698{
699 struct de_private *de = netdev_priv(dev);
700 struct netdev_hw_addr *ha;
701 u16 *eaddrs;
702
703
704
705 netdev_for_each_mc_addr(ha, dev) {
706 eaddrs = (u16 *) ha->addr;
707 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
708 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
709 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
710 }
711
712 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
713 setup_frm = &de->setup_frame[15*6];
714
715
716 eaddrs = (u16 *)dev->dev_addr;
717 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
718 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
719 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
720}
721
722
723static void __de_set_rx_mode (struct net_device *dev)
724{
725 struct de_private *de = netdev_priv(dev);
726 u32 macmode;
727 unsigned int entry;
728 u32 mapping;
729 struct de_desc *txd;
730 struct de_desc *dummy_txd = NULL;
731
732 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
733
734 if (dev->flags & IFF_PROMISC) {
735 macmode |= AcceptAllMulticast | AcceptAllPhys;
736 goto out;
737 }
738
739 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
740
741 macmode |= AcceptAllMulticast;
742 goto out;
743 }
744
745
746
747 if (netdev_mc_count(dev) > 14)
748 build_setup_frame_hash (de->setup_frame, dev);
749 else
750 build_setup_frame_perfect (de->setup_frame, dev);
751
752
753
754
755
756 entry = de->tx_head;
757
758
759 if (entry != 0) {
760 de->tx_skb[entry].skb = DE_DUMMY_SKB;
761
762 dummy_txd = &de->tx_ring[entry];
763 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
764 cpu_to_le32(RingEnd) : 0;
765 dummy_txd->addr1 = 0;
766
767
768
769 entry = NEXT_TX(entry);
770 }
771
772 de->tx_skb[entry].skb = DE_SETUP_SKB;
773 de->tx_skb[entry].mapping = mapping =
774 pci_map_single (de->pdev, de->setup_frame,
775 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
776
777
778 txd = &de->tx_ring[entry];
779 if (entry == (DE_TX_RING_SIZE - 1))
780 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
781 else
782 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
783 txd->addr1 = cpu_to_le32(mapping);
784 wmb();
785
786 txd->opts1 = cpu_to_le32(DescOwn);
787 wmb();
788
789 if (dummy_txd) {
790 dummy_txd->opts1 = cpu_to_le32(DescOwn);
791 wmb();
792 }
793
794 de->tx_head = NEXT_TX(entry);
795
796 if (TX_BUFFS_AVAIL(de) == 0)
797 netif_stop_queue(dev);
798
799
800 dw32(TxPoll, NormalTxPoll);
801
802out:
803 if (macmode != dr32(MacMode))
804 dw32(MacMode, macmode);
805}
806
807static void de_set_rx_mode (struct net_device *dev)
808{
809 unsigned long flags;
810 struct de_private *de = netdev_priv(dev);
811
812 spin_lock_irqsave (&de->lock, flags);
813 __de_set_rx_mode(dev);
814 spin_unlock_irqrestore (&de->lock, flags);
815}
816
817static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
818{
819 if (unlikely(rx_missed & RxMissedOver))
820 de->net_stats.rx_missed_errors += RxMissedMask;
821 else
822 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
823}
824
825static void __de_get_stats(struct de_private *de)
826{
827 u32 tmp = dr32(RxMissed);
828
829 de_rx_missed(de, tmp);
830}
831
832static struct net_device_stats *de_get_stats(struct net_device *dev)
833{
834 struct de_private *de = netdev_priv(dev);
835
836
837 spin_lock_irq(&de->lock);
838 if (netif_running(dev) && netif_device_present(dev))
839 __de_get_stats(de);
840 spin_unlock_irq(&de->lock);
841
842 return &de->net_stats;
843}
844
845static inline int de_is_running (struct de_private *de)
846{
847 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
848}
849
850static void de_stop_rxtx (struct de_private *de)
851{
852 u32 macmode;
853 unsigned int i = 1300/100;
854
855 macmode = dr32(MacMode);
856 if (macmode & RxTx) {
857 dw32(MacMode, macmode & ~RxTx);
858 dr32(MacMode);
859 }
860
861
862
863
864
865 while (--i) {
866 if (!de_is_running(de))
867 return;
868 udelay(100);
869 }
870
871 netdev_warn(de->dev, "timeout expired, stopping DMA\n");
872}
873
874static inline void de_start_rxtx (struct de_private *de)
875{
876 u32 macmode;
877
878 macmode = dr32(MacMode);
879 if ((macmode & RxTx) != RxTx) {
880 dw32(MacMode, macmode | RxTx);
881 dr32(MacMode);
882 }
883}
884
885static void de_stop_hw (struct de_private *de)
886{
887
888 udelay(5);
889 dw32(IntrMask, 0);
890
891 de_stop_rxtx(de);
892
893 dw32(MacStatus, dr32(MacStatus));
894
895 udelay(10);
896
897 de->rx_tail = 0;
898 de->tx_head = de->tx_tail = 0;
899}
900
901static void de_link_up(struct de_private *de)
902{
903 if (!netif_carrier_ok(de->dev)) {
904 netif_carrier_on(de->dev);
905 netif_info(de, link, de->dev, "link up, media %s\n",
906 media_name[de->media_type]);
907 }
908}
909
910static void de_link_down(struct de_private *de)
911{
912 if (netif_carrier_ok(de->dev)) {
913 netif_carrier_off(de->dev);
914 netif_info(de, link, de->dev, "link down\n");
915 }
916}
917
918static void de_set_media (struct de_private *de)
919{
920 unsigned media = de->media_type;
921 u32 macmode = dr32(MacMode);
922
923 if (de_is_running(de))
924 netdev_warn(de->dev, "chip is running while changing media!\n");
925
926 if (de->de21040)
927 dw32(CSR11, FULL_DUPLEX_MAGIC);
928 dw32(CSR13, 0);
929 dw32(CSR14, de->media[media].csr14);
930 dw32(CSR15, de->media[media].csr15);
931 dw32(CSR13, de->media[media].csr13);
932
933
934
935
936 mdelay(10);
937
938 if (media == DE_MEDIA_TP_FD)
939 macmode |= FullDuplex;
940 else
941 macmode &= ~FullDuplex;
942
943 netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
944 netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
945 dr32(MacMode), dr32(SIAStatus),
946 dr32(CSR13), dr32(CSR14), dr32(CSR15));
947 netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
948 macmode, de->media[media].csr13,
949 de->media[media].csr14, de->media[media].csr15);
950 if (macmode != dr32(MacMode))
951 dw32(MacMode, macmode);
952}
953
954static void de_next_media (struct de_private *de, const u32 *media,
955 unsigned int n_media)
956{
957 unsigned int i;
958
959 for (i = 0; i < n_media; i++) {
960 if (de_ok_to_advertise(de, media[i])) {
961 de->media_type = media[i];
962 return;
963 }
964 }
965}
966
967static void de21040_media_timer (unsigned long data)
968{
969 struct de_private *de = (struct de_private *) data;
970 struct net_device *dev = de->dev;
971 u32 status = dr32(SIAStatus);
972 unsigned int carrier;
973 unsigned long flags;
974
975 carrier = (status & NetCxnErr) ? 0 : 1;
976
977 if (carrier) {
978 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
979 goto no_link_yet;
980
981 de->media_timer.expires = jiffies + DE_TIMER_LINK;
982 add_timer(&de->media_timer);
983 if (!netif_carrier_ok(dev))
984 de_link_up(de);
985 else
986 netif_info(de, timer, dev, "%s link ok, status %x\n",
987 media_name[de->media_type], status);
988 return;
989 }
990
991 de_link_down(de);
992
993 if (de->media_lock)
994 return;
995
996 if (de->media_type == DE_MEDIA_AUI) {
997 static const u32 next_state = DE_MEDIA_TP;
998 de_next_media(de, &next_state, 1);
999 } else {
1000 static const u32 next_state = DE_MEDIA_AUI;
1001 de_next_media(de, &next_state, 1);
1002 }
1003
1004 spin_lock_irqsave(&de->lock, flags);
1005 de_stop_rxtx(de);
1006 spin_unlock_irqrestore(&de->lock, flags);
1007 de_set_media(de);
1008 de_start_rxtx(de);
1009
1010no_link_yet:
1011 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1012 add_timer(&de->media_timer);
1013
1014 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1015 media_name[de->media_type], status);
1016}
1017
1018static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1019{
1020 switch (new_media) {
1021 case DE_MEDIA_TP_AUTO:
1022 if (!(de->media_advertise & ADVERTISED_Autoneg))
1023 return 0;
1024 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1025 return 0;
1026 break;
1027 case DE_MEDIA_BNC:
1028 if (!(de->media_advertise & ADVERTISED_BNC))
1029 return 0;
1030 break;
1031 case DE_MEDIA_AUI:
1032 if (!(de->media_advertise & ADVERTISED_AUI))
1033 return 0;
1034 break;
1035 case DE_MEDIA_TP:
1036 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1037 return 0;
1038 break;
1039 case DE_MEDIA_TP_FD:
1040 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1041 return 0;
1042 break;
1043 }
1044
1045 return 1;
1046}
1047
1048static void de21041_media_timer (unsigned long data)
1049{
1050 struct de_private *de = (struct de_private *) data;
1051 struct net_device *dev = de->dev;
1052 u32 status = dr32(SIAStatus);
1053 unsigned int carrier;
1054 unsigned long flags;
1055
1056
1057 dw32(SIAStatus, NonselPortActive | SelPortActive);
1058
1059 carrier = (status & NetCxnErr) ? 0 : 1;
1060
1061 if (carrier) {
1062 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1063 de->media_type == DE_MEDIA_TP ||
1064 de->media_type == DE_MEDIA_TP_FD) &&
1065 (status & LinkFailStatus))
1066 goto no_link_yet;
1067
1068 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1069 add_timer(&de->media_timer);
1070 if (!netif_carrier_ok(dev))
1071 de_link_up(de);
1072 else
1073 netif_info(de, timer, dev,
1074 "%s link ok, mode %x status %x\n",
1075 media_name[de->media_type],
1076 dr32(MacMode), status);
1077 return;
1078 }
1079
1080 de_link_down(de);
1081
1082
1083 if (de->media_lock)
1084 goto set_media;
1085
1086
1087 if (status & NonselPortActive) {
1088 unsigned int have_media = 1;
1089
1090
1091 if (de->media_type == DE_MEDIA_AUI ||
1092 de->media_type == DE_MEDIA_BNC) {
1093 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1094 de->media_type = DE_MEDIA_TP_AUTO;
1095 else
1096 have_media = 0;
1097 }
1098
1099
1100 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1101 de_ok_to_advertise(de, DE_MEDIA_BNC))
1102 de->media_type = DE_MEDIA_BNC;
1103
1104
1105 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1106 de_ok_to_advertise(de, DE_MEDIA_AUI))
1107 de->media_type = DE_MEDIA_AUI;
1108
1109
1110 else
1111 have_media = 0;
1112
1113 if (have_media)
1114 goto set_media;
1115 }
1116
1117
1118
1119
1120
1121
1122 if (de->media_type == DE_MEDIA_AUI) {
1123 static const u32 next_states[] = {
1124 DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1125 };
1126 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1127 } else if (de->media_type == DE_MEDIA_BNC) {
1128 static const u32 next_states[] = {
1129 DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1130 };
1131 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1132 } else {
1133 static const u32 next_states[] = {
1134 DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1135 };
1136 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1137 }
1138
1139set_media:
1140 spin_lock_irqsave(&de->lock, flags);
1141 de_stop_rxtx(de);
1142 spin_unlock_irqrestore(&de->lock, flags);
1143 de_set_media(de);
1144 de_start_rxtx(de);
1145
1146no_link_yet:
1147 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1148 add_timer(&de->media_timer);
1149
1150 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1151 media_name[de->media_type], status);
1152}
1153
1154static void de_media_interrupt (struct de_private *de, u32 status)
1155{
1156 if (status & LinkPass) {
1157
1158 if ((de->media_type == DE_MEDIA_AUI ||
1159 de->media_type == DE_MEDIA_BNC) &&
1160 (de->media_lock ||
1161 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1162 return;
1163
1164 if ((de->media_type == DE_MEDIA_AUI ||
1165 de->media_type == DE_MEDIA_BNC)) {
1166 de->media_type = DE_MEDIA_TP_AUTO;
1167 de_stop_rxtx(de);
1168 de_set_media(de);
1169 de_start_rxtx(de);
1170 }
1171 de_link_up(de);
1172 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1173 return;
1174 }
1175
1176 BUG_ON(!(status & LinkFail));
1177
1178 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1179 de->media_type != DE_MEDIA_BNC) {
1180 de_link_down(de);
1181 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1182 }
1183}
1184
1185static int de_reset_mac (struct de_private *de)
1186{
1187 u32 status, tmp;
1188
1189
1190
1191
1192
1193
1194 if (dr32(BusMode) == 0xffffffff)
1195 return -EBUSY;
1196
1197
1198 dw32 (BusMode, CmdReset);
1199 mdelay (1);
1200
1201 dw32 (BusMode, de_bus_mode);
1202 mdelay (1);
1203
1204 for (tmp = 0; tmp < 5; tmp++) {
1205 dr32 (BusMode);
1206 mdelay (1);
1207 }
1208
1209 mdelay (1);
1210
1211 status = dr32(MacStatus);
1212 if (status & (RxState | TxState))
1213 return -EBUSY;
1214 if (status == 0xffffffff)
1215 return -ENODEV;
1216 return 0;
1217}
1218
1219static void de_adapter_wake (struct de_private *de)
1220{
1221 u32 pmctl;
1222
1223 if (de->de21040)
1224 return;
1225
1226 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1227 if (pmctl & PM_Mask) {
1228 pmctl &= ~PM_Mask;
1229 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1230
1231
1232 msleep(10);
1233 }
1234}
1235
1236static void de_adapter_sleep (struct de_private *de)
1237{
1238 u32 pmctl;
1239
1240 if (de->de21040)
1241 return;
1242
1243 dw32(CSR13, 0);
1244 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1245 pmctl |= PM_Sleep;
1246 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1247}
1248
1249static int de_init_hw (struct de_private *de)
1250{
1251 struct net_device *dev = de->dev;
1252 u32 macmode;
1253 int rc;
1254
1255 de_adapter_wake(de);
1256
1257 macmode = dr32(MacMode) & ~MacModeClear;
1258
1259 rc = de_reset_mac(de);
1260 if (rc)
1261 return rc;
1262
1263 de_set_media(de);
1264
1265 dw32(RxRingAddr, de->ring_dma);
1266 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1267
1268 dw32(MacMode, RxTx | macmode);
1269
1270 dr32(RxMissed);
1271
1272 dw32(IntrMask, de_intr_mask);
1273
1274 de_set_rx_mode(dev);
1275
1276 return 0;
1277}
1278
1279static int de_refill_rx (struct de_private *de)
1280{
1281 unsigned i;
1282
1283 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1284 struct sk_buff *skb;
1285
1286 skb = dev_alloc_skb(de->rx_buf_sz);
1287 if (!skb)
1288 goto err_out;
1289
1290 skb->dev = de->dev;
1291
1292 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1293 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1294 de->rx_skb[i].skb = skb;
1295
1296 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1297 if (i == (DE_RX_RING_SIZE - 1))
1298 de->rx_ring[i].opts2 =
1299 cpu_to_le32(RingEnd | de->rx_buf_sz);
1300 else
1301 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1302 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1303 de->rx_ring[i].addr2 = 0;
1304 }
1305
1306 return 0;
1307
1308err_out:
1309 de_clean_rings(de);
1310 return -ENOMEM;
1311}
1312
1313static int de_init_rings (struct de_private *de)
1314{
1315 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1316 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1317
1318 de->rx_tail = 0;
1319 de->tx_head = de->tx_tail = 0;
1320
1321 return de_refill_rx (de);
1322}
1323
1324static int de_alloc_rings (struct de_private *de)
1325{
1326 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1327 if (!de->rx_ring)
1328 return -ENOMEM;
1329 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1330 return de_init_rings(de);
1331}
1332
1333static void de_clean_rings (struct de_private *de)
1334{
1335 unsigned i;
1336
1337 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1338 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1339 wmb();
1340 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1341 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1342 wmb();
1343
1344 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1345 if (de->rx_skb[i].skb) {
1346 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1347 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1348 dev_kfree_skb(de->rx_skb[i].skb);
1349 }
1350 }
1351
1352 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1353 struct sk_buff *skb = de->tx_skb[i].skb;
1354 if ((skb) && (skb != DE_DUMMY_SKB)) {
1355 if (skb != DE_SETUP_SKB) {
1356 de->net_stats.tx_dropped++;
1357 pci_unmap_single(de->pdev,
1358 de->tx_skb[i].mapping,
1359 skb->len, PCI_DMA_TODEVICE);
1360 dev_kfree_skb(skb);
1361 } else {
1362 pci_unmap_single(de->pdev,
1363 de->tx_skb[i].mapping,
1364 sizeof(de->setup_frame),
1365 PCI_DMA_TODEVICE);
1366 }
1367 }
1368 }
1369
1370 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1371 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1372}
1373
1374static void de_free_rings (struct de_private *de)
1375{
1376 de_clean_rings(de);
1377 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1378 de->rx_ring = NULL;
1379 de->tx_ring = NULL;
1380}
1381
1382static int de_open (struct net_device *dev)
1383{
1384 struct de_private *de = netdev_priv(dev);
1385 int rc;
1386
1387 netif_dbg(de, ifup, dev, "enabling interface\n");
1388
1389 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1390
1391 rc = de_alloc_rings(de);
1392 if (rc) {
1393 netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1394 return rc;
1395 }
1396
1397 dw32(IntrMask, 0);
1398
1399 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1400 if (rc) {
1401 netdev_err(dev, "IRQ %d request failure, err=%d\n",
1402 dev->irq, rc);
1403 goto err_out_free;
1404 }
1405
1406 rc = de_init_hw(de);
1407 if (rc) {
1408 netdev_err(dev, "h/w init failure, err=%d\n", rc);
1409 goto err_out_free_irq;
1410 }
1411
1412 netif_start_queue(dev);
1413 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1414
1415 return 0;
1416
1417err_out_free_irq:
1418 free_irq(dev->irq, dev);
1419err_out_free:
1420 de_free_rings(de);
1421 return rc;
1422}
1423
1424static int de_close (struct net_device *dev)
1425{
1426 struct de_private *de = netdev_priv(dev);
1427 unsigned long flags;
1428
1429 netif_dbg(de, ifdown, dev, "disabling interface\n");
1430
1431 del_timer_sync(&de->media_timer);
1432
1433 spin_lock_irqsave(&de->lock, flags);
1434 de_stop_hw(de);
1435 netif_stop_queue(dev);
1436 netif_carrier_off(dev);
1437 spin_unlock_irqrestore(&de->lock, flags);
1438
1439 free_irq(dev->irq, dev);
1440
1441 de_free_rings(de);
1442 de_adapter_sleep(de);
1443 return 0;
1444}
1445
1446static void de_tx_timeout (struct net_device *dev)
1447{
1448 struct de_private *de = netdev_priv(dev);
1449
1450 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1451 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1452 de->rx_tail, de->tx_head, de->tx_tail);
1453
1454 del_timer_sync(&de->media_timer);
1455
1456 disable_irq(dev->irq);
1457 spin_lock_irq(&de->lock);
1458
1459 de_stop_hw(de);
1460 netif_stop_queue(dev);
1461 netif_carrier_off(dev);
1462
1463 spin_unlock_irq(&de->lock);
1464 enable_irq(dev->irq);
1465
1466
1467 __de_get_stats(de);
1468
1469 synchronize_irq(dev->irq);
1470 de_clean_rings(de);
1471
1472 de_init_rings(de);
1473
1474 de_init_hw(de);
1475
1476 netif_wake_queue(dev);
1477}
1478
1479static void __de_get_regs(struct de_private *de, u8 *buf)
1480{
1481 int i;
1482 u32 *rbuf = (u32 *)buf;
1483
1484
1485 for (i = 0; i < DE_NUM_REGS; i++)
1486 rbuf[i] = dr32(i * 8);
1487
1488
1489 de_rx_missed(de, rbuf[8]);
1490}
1491
1492static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1493{
1494 ecmd->supported = de->media_supported;
1495 ecmd->transceiver = XCVR_INTERNAL;
1496 ecmd->phy_address = 0;
1497 ecmd->advertising = de->media_advertise;
1498
1499 switch (de->media_type) {
1500 case DE_MEDIA_AUI:
1501 ecmd->port = PORT_AUI;
1502 break;
1503 case DE_MEDIA_BNC:
1504 ecmd->port = PORT_BNC;
1505 break;
1506 default:
1507 ecmd->port = PORT_TP;
1508 break;
1509 }
1510
1511 ethtool_cmd_speed_set(ecmd, 10);
1512
1513 if (dr32(MacMode) & FullDuplex)
1514 ecmd->duplex = DUPLEX_FULL;
1515 else
1516 ecmd->duplex = DUPLEX_HALF;
1517
1518 if (de->media_lock)
1519 ecmd->autoneg = AUTONEG_DISABLE;
1520 else
1521 ecmd->autoneg = AUTONEG_ENABLE;
1522
1523
1524
1525 return 0;
1526}
1527
1528static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1529{
1530 u32 new_media;
1531 unsigned int media_lock;
1532
1533 if (ethtool_cmd_speed(ecmd) != 10)
1534 return -EINVAL;
1535 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1536 return -EINVAL;
1537 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1538 return -EINVAL;
1539 if (de->de21040 && ecmd->port == PORT_BNC)
1540 return -EINVAL;
1541 if (ecmd->transceiver != XCVR_INTERNAL)
1542 return -EINVAL;
1543 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1544 return -EINVAL;
1545 if (ecmd->advertising & ~de->media_supported)
1546 return -EINVAL;
1547 if (ecmd->autoneg == AUTONEG_ENABLE &&
1548 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1549 return -EINVAL;
1550
1551 switch (ecmd->port) {
1552 case PORT_AUI:
1553 new_media = DE_MEDIA_AUI;
1554 if (!(ecmd->advertising & ADVERTISED_AUI))
1555 return -EINVAL;
1556 break;
1557 case PORT_BNC:
1558 new_media = DE_MEDIA_BNC;
1559 if (!(ecmd->advertising & ADVERTISED_BNC))
1560 return -EINVAL;
1561 break;
1562 default:
1563 if (ecmd->autoneg == AUTONEG_ENABLE)
1564 new_media = DE_MEDIA_TP_AUTO;
1565 else if (ecmd->duplex == DUPLEX_FULL)
1566 new_media = DE_MEDIA_TP_FD;
1567 else
1568 new_media = DE_MEDIA_TP;
1569 if (!(ecmd->advertising & ADVERTISED_TP))
1570 return -EINVAL;
1571 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1572 return -EINVAL;
1573 break;
1574 }
1575
1576 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1577
1578 if ((new_media == de->media_type) &&
1579 (media_lock == de->media_lock) &&
1580 (ecmd->advertising == de->media_advertise))
1581 return 0;
1582
1583 de_link_down(de);
1584 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1585 de_stop_rxtx(de);
1586
1587 de->media_type = new_media;
1588 de->media_lock = media_lock;
1589 de->media_advertise = ecmd->advertising;
1590 de_set_media(de);
1591 if (netif_running(de->dev))
1592 de_start_rxtx(de);
1593
1594 return 0;
1595}
1596
1597static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1598{
1599 struct de_private *de = netdev_priv(dev);
1600
1601 strcpy (info->driver, DRV_NAME);
1602 strcpy (info->version, DRV_VERSION);
1603 strcpy (info->bus_info, pci_name(de->pdev));
1604 info->eedump_len = DE_EEPROM_SIZE;
1605}
1606
1607static int de_get_regs_len(struct net_device *dev)
1608{
1609 return DE_REGS_SIZE;
1610}
1611
1612static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1613{
1614 struct de_private *de = netdev_priv(dev);
1615 int rc;
1616
1617 spin_lock_irq(&de->lock);
1618 rc = __de_get_settings(de, ecmd);
1619 spin_unlock_irq(&de->lock);
1620
1621 return rc;
1622}
1623
1624static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1625{
1626 struct de_private *de = netdev_priv(dev);
1627 int rc;
1628
1629 spin_lock_irq(&de->lock);
1630 rc = __de_set_settings(de, ecmd);
1631 spin_unlock_irq(&de->lock);
1632
1633 return rc;
1634}
1635
1636static u32 de_get_msglevel(struct net_device *dev)
1637{
1638 struct de_private *de = netdev_priv(dev);
1639
1640 return de->msg_enable;
1641}
1642
1643static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1644{
1645 struct de_private *de = netdev_priv(dev);
1646
1647 de->msg_enable = msglvl;
1648}
1649
1650static int de_get_eeprom(struct net_device *dev,
1651 struct ethtool_eeprom *eeprom, u8 *data)
1652{
1653 struct de_private *de = netdev_priv(dev);
1654
1655 if (!de->ee_data)
1656 return -EOPNOTSUPP;
1657 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1658 (eeprom->len != DE_EEPROM_SIZE))
1659 return -EINVAL;
1660 memcpy(data, de->ee_data, eeprom->len);
1661
1662 return 0;
1663}
1664
1665static int de_nway_reset(struct net_device *dev)
1666{
1667 struct de_private *de = netdev_priv(dev);
1668 u32 status;
1669
1670 if (de->media_type != DE_MEDIA_TP_AUTO)
1671 return -EINVAL;
1672 if (netif_carrier_ok(de->dev))
1673 de_link_down(de);
1674
1675 status = dr32(SIAStatus);
1676 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1677 netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1678 status, dr32(SIAStatus));
1679 return 0;
1680}
1681
1682static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1683 void *data)
1684{
1685 struct de_private *de = netdev_priv(dev);
1686
1687 regs->version = (DE_REGS_VER << 2) | de->de21040;
1688
1689 spin_lock_irq(&de->lock);
1690 __de_get_regs(de, data);
1691 spin_unlock_irq(&de->lock);
1692}
1693
1694static const struct ethtool_ops de_ethtool_ops = {
1695 .get_link = ethtool_op_get_link,
1696 .get_drvinfo = de_get_drvinfo,
1697 .get_regs_len = de_get_regs_len,
1698 .get_settings = de_get_settings,
1699 .set_settings = de_set_settings,
1700 .get_msglevel = de_get_msglevel,
1701 .set_msglevel = de_set_msglevel,
1702 .get_eeprom = de_get_eeprom,
1703 .nway_reset = de_nway_reset,
1704 .get_regs = de_get_regs,
1705};
1706
1707static void __devinit de21040_get_mac_address (struct de_private *de)
1708{
1709 unsigned i;
1710
1711 dw32 (ROMCmd, 0);
1712 udelay(5);
1713
1714 for (i = 0; i < 6; i++) {
1715 int value, boguscnt = 100000;
1716 do {
1717 value = dr32(ROMCmd);
1718 rmb();
1719 } while (value < 0 && --boguscnt > 0);
1720 de->dev->dev_addr[i] = value;
1721 udelay(1);
1722 if (boguscnt <= 0)
1723 pr_warn("timeout reading 21040 MAC address byte %u\n",
1724 i);
1725 }
1726}
1727
1728static void __devinit de21040_get_media_info(struct de_private *de)
1729{
1730 unsigned int i;
1731
1732 de->media_type = DE_MEDIA_TP;
1733 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1734 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1735 de->media_advertise = de->media_supported;
1736
1737 for (i = 0; i < DE_MAX_MEDIA; i++) {
1738 switch (i) {
1739 case DE_MEDIA_AUI:
1740 case DE_MEDIA_TP:
1741 case DE_MEDIA_TP_FD:
1742 de->media[i].type = i;
1743 de->media[i].csr13 = t21040_csr13[i];
1744 de->media[i].csr14 = t21040_csr14[i];
1745 de->media[i].csr15 = t21040_csr15[i];
1746 break;
1747 default:
1748 de->media[i].type = DE_MEDIA_INVALID;
1749 break;
1750 }
1751 }
1752}
1753
1754
1755static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1756{
1757 int i;
1758 unsigned retval = 0;
1759 void __iomem *ee_addr = regs + ROMCmd;
1760 int read_cmd = location | (EE_READ_CMD << addr_len);
1761
1762 writel(EE_ENB & ~EE_CS, ee_addr);
1763 writel(EE_ENB, ee_addr);
1764
1765
1766 for (i = 4 + addr_len; i >= 0; i--) {
1767 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1768 writel(EE_ENB | dataval, ee_addr);
1769 readl(ee_addr);
1770 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1771 readl(ee_addr);
1772 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1773 }
1774 writel(EE_ENB, ee_addr);
1775 readl(ee_addr);
1776
1777 for (i = 16; i > 0; i--) {
1778 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1779 readl(ee_addr);
1780 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1781 writel(EE_ENB, ee_addr);
1782 readl(ee_addr);
1783 }
1784
1785
1786 writel(EE_ENB & ~EE_CS, ee_addr);
1787 return retval;
1788}
1789
1790static void __devinit de21041_get_srom_info (struct de_private *de)
1791{
1792 unsigned i, sa_offset = 0, ofs;
1793 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1794 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1795 struct de_srom_info_leaf *il;
1796 void *bufp;
1797
1798
1799 for (i = 0; i < DE_EEPROM_WORDS; i++)
1800 ((__le16 *)ee_data)[i] =
1801 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1802
1803
1804
1805
1806
1807#ifndef CONFIG_MIPS_COBALT
1808
1809 for (i = 0; i < 8; i ++)
1810 if (ee_data[i] != ee_data[16+i])
1811 sa_offset = 20;
1812
1813#endif
1814
1815
1816 for (i = 0; i < 6; i ++)
1817 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1818
1819
1820 ofs = ee_data[SROMC0InfoLeaf];
1821 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1822 goto bad_srom;
1823
1824
1825 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1826
1827
1828 if (il->n_blocks == 0)
1829 goto bad_srom;
1830 if ((sizeof(ee_data) - ofs) <
1831 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1832 goto bad_srom;
1833
1834
1835 switch (get_unaligned(&il->default_media)) {
1836 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1837 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1838 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1839 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1840 }
1841
1842 if (netif_msg_probe(de))
1843 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1844 de->board_idx, ofs, media_name[de->media_type]);
1845
1846
1847 for (i = 0; i < DE_MAX_MEDIA; i++) {
1848 de->media[i].type = DE_MEDIA_INVALID;
1849 de->media[i].csr13 = 0xffff;
1850 de->media[i].csr14 = 0xffff;
1851 de->media[i].csr15 = 0xffff;
1852 }
1853
1854
1855
1856
1857 bufp = ((void *)il) + sizeof(*il);
1858 for (i = 0; i < il->n_blocks; i++) {
1859 struct de_srom_media_block *ib = bufp;
1860 unsigned idx;
1861
1862
1863 switch(ib->opts & MediaBlockMask) {
1864 case 0:
1865 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1866 | SUPPORTED_Autoneg;
1867 idx = DE_MEDIA_TP;
1868 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1869 break;
1870 case 1:
1871 de->media_supported |= SUPPORTED_BNC;
1872 idx = DE_MEDIA_BNC;
1873 break;
1874 case 2:
1875 de->media_supported |= SUPPORTED_AUI;
1876 idx = DE_MEDIA_AUI;
1877 break;
1878 case 4:
1879 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1880 | SUPPORTED_Autoneg;
1881 idx = DE_MEDIA_TP_FD;
1882 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1883 break;
1884 default:
1885 goto bad_srom;
1886 }
1887
1888 de->media[idx].type = idx;
1889
1890 if (netif_msg_probe(de))
1891 pr_info("de%d: media block #%u: %s",
1892 de->board_idx, i,
1893 media_name[de->media[idx].type]);
1894
1895 bufp += sizeof (ib->opts);
1896
1897 if (ib->opts & MediaCustomCSRs) {
1898 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1899 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1900 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1901 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1902 sizeof(ib->csr15);
1903
1904 if (netif_msg_probe(de))
1905 pr_cont(" (%x,%x,%x)\n",
1906 de->media[idx].csr13,
1907 de->media[idx].csr14,
1908 de->media[idx].csr15);
1909
1910 } else {
1911 if (netif_msg_probe(de))
1912 pr_cont("\n");
1913 }
1914
1915 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1916 break;
1917 }
1918
1919 de->media_advertise = de->media_supported;
1920
1921fill_defaults:
1922
1923 for (i = 0; i < DE_MAX_MEDIA; i++) {
1924 if (de->media[i].csr13 == 0xffff)
1925 de->media[i].csr13 = t21041_csr13[i];
1926 if (de->media[i].csr14 == 0xffff) {
1927
1928
1929 if (de->pdev->revision < 0x20)
1930 de->media[i].csr14 = t21041_csr14_brk[i];
1931 else
1932 de->media[i].csr14 = t21041_csr14[i];
1933 }
1934 if (de->media[i].csr15 == 0xffff)
1935 de->media[i].csr15 = t21041_csr15[i];
1936 }
1937
1938 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1939
1940 return;
1941
1942bad_srom:
1943
1944 for (i = 0; i < DE_MAX_MEDIA; i++)
1945 de->media[i].type = i;
1946 de->media_supported =
1947 SUPPORTED_10baseT_Half |
1948 SUPPORTED_10baseT_Full |
1949 SUPPORTED_Autoneg |
1950 SUPPORTED_TP |
1951 SUPPORTED_AUI |
1952 SUPPORTED_BNC;
1953 goto fill_defaults;
1954}
1955
1956static const struct net_device_ops de_netdev_ops = {
1957 .ndo_open = de_open,
1958 .ndo_stop = de_close,
1959 .ndo_set_rx_mode = de_set_rx_mode,
1960 .ndo_start_xmit = de_start_xmit,
1961 .ndo_get_stats = de_get_stats,
1962 .ndo_tx_timeout = de_tx_timeout,
1963 .ndo_change_mtu = eth_change_mtu,
1964 .ndo_set_mac_address = eth_mac_addr,
1965 .ndo_validate_addr = eth_validate_addr,
1966};
1967
1968static int __devinit de_init_one (struct pci_dev *pdev,
1969 const struct pci_device_id *ent)
1970{
1971 struct net_device *dev;
1972 struct de_private *de;
1973 int rc;
1974 void __iomem *regs;
1975 unsigned long pciaddr;
1976 static int board_idx = -1;
1977
1978 board_idx++;
1979
1980#ifndef MODULE
1981 if (board_idx == 0)
1982 pr_info("%s\n", version);
1983#endif
1984
1985
1986 dev = alloc_etherdev(sizeof(struct de_private));
1987 if (!dev)
1988 return -ENOMEM;
1989
1990 dev->netdev_ops = &de_netdev_ops;
1991 SET_NETDEV_DEV(dev, &pdev->dev);
1992 dev->ethtool_ops = &de_ethtool_ops;
1993 dev->watchdog_timeo = TX_TIMEOUT;
1994
1995 de = netdev_priv(dev);
1996 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1997 de->pdev = pdev;
1998 de->dev = dev;
1999 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2000 de->board_idx = board_idx;
2001 spin_lock_init (&de->lock);
2002 init_timer(&de->media_timer);
2003 if (de->de21040)
2004 de->media_timer.function = de21040_media_timer;
2005 else
2006 de->media_timer.function = de21041_media_timer;
2007 de->media_timer.data = (unsigned long) de;
2008
2009 netif_carrier_off(dev);
2010
2011
2012 rc = pci_enable_device(pdev);
2013 if (rc)
2014 goto err_out_free;
2015
2016
2017 rc = pci_request_regions(pdev, DRV_NAME);
2018 if (rc)
2019 goto err_out_disable;
2020
2021
2022 if (pdev->irq < 2) {
2023 rc = -EIO;
2024 pr_err("invalid irq (%d) for pci dev %s\n",
2025 pdev->irq, pci_name(pdev));
2026 goto err_out_res;
2027 }
2028
2029 dev->irq = pdev->irq;
2030
2031
2032 pciaddr = pci_resource_start(pdev, 1);
2033 if (!pciaddr) {
2034 rc = -EIO;
2035 pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2036 goto err_out_res;
2037 }
2038 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2039 rc = -EIO;
2040 pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2041 (unsigned long long)pci_resource_len(pdev, 1),
2042 pci_name(pdev));
2043 goto err_out_res;
2044 }
2045
2046
2047 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2048 if (!regs) {
2049 rc = -EIO;
2050 pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2051 (unsigned long long)pci_resource_len(pdev, 1),
2052 pciaddr, pci_name(pdev));
2053 goto err_out_res;
2054 }
2055 dev->base_addr = (unsigned long) regs;
2056 de->regs = regs;
2057
2058 de_adapter_wake(de);
2059
2060
2061 rc = de_reset_mac(de);
2062 if (rc) {
2063 pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2064 goto err_out_iomap;
2065 }
2066
2067
2068
2069
2070 if (de->de21040) {
2071 de21040_get_mac_address(de);
2072 de21040_get_media_info(de);
2073 } else {
2074 de21041_get_srom_info(de);
2075 }
2076
2077
2078 rc = register_netdev(dev);
2079 if (rc)
2080 goto err_out_iomap;
2081
2082
2083 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
2084 de->de21040 ? "21040" : "21041",
2085 dev->base_addr,
2086 dev->dev_addr,
2087 dev->irq);
2088
2089 pci_set_drvdata(pdev, dev);
2090
2091
2092 pci_set_master(pdev);
2093
2094
2095 de_adapter_sleep(de);
2096
2097 return 0;
2098
2099err_out_iomap:
2100 kfree(de->ee_data);
2101 iounmap(regs);
2102err_out_res:
2103 pci_release_regions(pdev);
2104err_out_disable:
2105 pci_disable_device(pdev);
2106err_out_free:
2107 free_netdev(dev);
2108 return rc;
2109}
2110
2111static void __devexit de_remove_one (struct pci_dev *pdev)
2112{
2113 struct net_device *dev = pci_get_drvdata(pdev);
2114 struct de_private *de = netdev_priv(dev);
2115
2116 BUG_ON(!dev);
2117 unregister_netdev(dev);
2118 kfree(de->ee_data);
2119 iounmap(de->regs);
2120 pci_release_regions(pdev);
2121 pci_disable_device(pdev);
2122 pci_set_drvdata(pdev, NULL);
2123 free_netdev(dev);
2124}
2125
2126#ifdef CONFIG_PM
2127
2128static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2129{
2130 struct net_device *dev = pci_get_drvdata (pdev);
2131 struct de_private *de = netdev_priv(dev);
2132
2133 rtnl_lock();
2134 if (netif_running (dev)) {
2135 del_timer_sync(&de->media_timer);
2136
2137 disable_irq(dev->irq);
2138 spin_lock_irq(&de->lock);
2139
2140 de_stop_hw(de);
2141 netif_stop_queue(dev);
2142 netif_device_detach(dev);
2143 netif_carrier_off(dev);
2144
2145 spin_unlock_irq(&de->lock);
2146 enable_irq(dev->irq);
2147
2148
2149 __de_get_stats(de);
2150
2151 synchronize_irq(dev->irq);
2152 de_clean_rings(de);
2153
2154 de_adapter_sleep(de);
2155 pci_disable_device(pdev);
2156 } else {
2157 netif_device_detach(dev);
2158 }
2159 rtnl_unlock();
2160 return 0;
2161}
2162
2163static int de_resume (struct pci_dev *pdev)
2164{
2165 struct net_device *dev = pci_get_drvdata (pdev);
2166 struct de_private *de = netdev_priv(dev);
2167 int retval = 0;
2168
2169 rtnl_lock();
2170 if (netif_device_present(dev))
2171 goto out;
2172 if (!netif_running(dev))
2173 goto out_attach;
2174 if ((retval = pci_enable_device(pdev))) {
2175 netdev_err(dev, "pci_enable_device failed in resume\n");
2176 goto out;
2177 }
2178 pci_set_master(pdev);
2179 de_init_rings(de);
2180 de_init_hw(de);
2181out_attach:
2182 netif_device_attach(dev);
2183out:
2184 rtnl_unlock();
2185 return 0;
2186}
2187
2188#endif
2189
2190static struct pci_driver de_driver = {
2191 .name = DRV_NAME,
2192 .id_table = de_pci_tbl,
2193 .probe = de_init_one,
2194 .remove = __devexit_p(de_remove_one),
2195#ifdef CONFIG_PM
2196 .suspend = de_suspend,
2197 .resume = de_resume,
2198#endif
2199};
2200
2201static int __init de_init (void)
2202{
2203#ifdef MODULE
2204 pr_info("%s\n", version);
2205#endif
2206 return pci_register_driver(&de_driver);
2207}
2208
2209static void __exit de_exit (void)
2210{
2211 pci_unregister_driver (&de_driver);
2212}
2213
2214module_init(de_init);
2215module_exit(de_exit);
2216