1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#define DRV_NAME "de2104x"
33#define DRV_VERSION "0.7"
34#define DRV_RELDATE "Mar 17, 2004"
35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/init.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/delay.h>
44#include <linux/ethtool.h>
45#include <linux/compiler.h>
46#include <linux/rtnetlink.h>
47#include <linux/crc32.h>
48#include <linux/slab.h>
49
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/uaccess.h>
53#include <asm/unaligned.h>
54
55
56static char version[] =
57"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
58
59MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
60MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
61MODULE_LICENSE("GPL");
62MODULE_VERSION(DRV_VERSION);
63
64static int debug = -1;
65module_param (debug, int, 0);
66MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
67
68
69#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
70 defined(CONFIG_SPARC) || defined(__ia64__) || \
71 defined(__sh__) || defined(__mips__)
72static int rx_copybreak = 1518;
73#else
74static int rx_copybreak = 100;
75#endif
76module_param (rx_copybreak, int, 0);
77MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
78
79#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_IFDOWN | \
83 NETIF_MSG_IFUP | \
84 NETIF_MSG_RX_ERR | \
85 NETIF_MSG_TX_ERR)
86
87
88#ifndef CONFIG_DE2104X_DSL
89#define DSL 0
90#else
91#define DSL CONFIG_DE2104X_DSL
92#endif
93
94#define DE_RX_RING_SIZE 64
95#define DE_TX_RING_SIZE 64
96#define DE_RING_BYTES \
97 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
98 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
99#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
100#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
101#define TX_BUFFS_AVAIL(CP) \
102 (((CP)->tx_tail <= (CP)->tx_head) ? \
103 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
104 (CP)->tx_tail - (CP)->tx_head - 1)
105
106#define PKT_BUF_SZ 1536
107#define RX_OFFSET 2
108
109#define DE_SETUP_SKB ((struct sk_buff *) 1)
110#define DE_DUMMY_SKB ((struct sk_buff *) 2)
111#define DE_SETUP_FRAME_WORDS 96
112#define DE_EEPROM_WORDS 256
113#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
114#define DE_MAX_MEDIA 5
115
116#define DE_MEDIA_TP_AUTO 0
117#define DE_MEDIA_BNC 1
118#define DE_MEDIA_AUI 2
119#define DE_MEDIA_TP 3
120#define DE_MEDIA_TP_FD 4
121#define DE_MEDIA_INVALID DE_MAX_MEDIA
122#define DE_MEDIA_FIRST 0
123#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
124#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
125
126#define DE_TIMER_LINK (60 * HZ)
127#define DE_TIMER_NO_LINK (5 * HZ)
128
129#define DE_NUM_REGS 16
130#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
131#define DE_REGS_VER 1
132
133
134#define TX_TIMEOUT (6*HZ)
135
136
137
138
139
140#define FULL_DUPLEX_MAGIC 0x6969
141
142enum {
143
144 BusMode = 0x00,
145 TxPoll = 0x08,
146 RxPoll = 0x10,
147 RxRingAddr = 0x18,
148 TxRingAddr = 0x20,
149 MacStatus = 0x28,
150 MacMode = 0x30,
151 IntrMask = 0x38,
152 RxMissed = 0x40,
153 ROMCmd = 0x48,
154 CSR11 = 0x58,
155 SIAStatus = 0x60,
156 CSR13 = 0x68,
157 CSR14 = 0x70,
158 CSR15 = 0x78,
159 PCIPM = 0x40,
160
161
162 CmdReset = (1 << 0),
163 CacheAlign16 = 0x00008000,
164 BurstLen4 = 0x00000400,
165 DescSkipLen = (DSL << 2),
166
167
168 NormalTxPoll = (1 << 0),
169 NormalRxPoll = (1 << 0),
170
171
172 DescOwn = (1 << 31),
173 RxError = (1 << 15),
174 RxErrLong = (1 << 7),
175 RxErrCRC = (1 << 1),
176 RxErrFIFO = (1 << 0),
177 RxErrRunt = (1 << 11),
178 RxErrFrame = (1 << 14),
179 RingEnd = (1 << 25),
180 FirstFrag = (1 << 29),
181 LastFrag = (1 << 30),
182 TxError = (1 << 15),
183 TxFIFOUnder = (1 << 1),
184 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
185 TxMaxCol = (1 << 8),
186 TxOWC = (1 << 9),
187 TxJabber = (1 << 14),
188 SetupFrame = (1 << 27),
189 TxSwInt = (1 << 31),
190
191
192 IntrOK = (1 << 16),
193 IntrErr = (1 << 15),
194 RxIntr = (1 << 6),
195 RxEmpty = (1 << 7),
196 TxIntr = (1 << 0),
197 TxEmpty = (1 << 2),
198 PciErr = (1 << 13),
199 TxState = (1 << 22) | (1 << 21) | (1 << 20),
200 RxState = (1 << 19) | (1 << 18) | (1 << 17),
201 LinkFail = (1 << 12),
202 LinkPass = (1 << 4),
203 RxStopped = (1 << 8),
204 TxStopped = (1 << 1),
205
206
207 TxEnable = (1 << 13),
208 RxEnable = (1 << 1),
209 RxTx = TxEnable | RxEnable,
210 FullDuplex = (1 << 9),
211 AcceptAllMulticast = (1 << 7),
212 AcceptAllPhys = (1 << 6),
213 BOCnt = (1 << 5),
214 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
215 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
216
217
218 EE_SHIFT_CLK = 0x02,
219 EE_CS = 0x01,
220 EE_DATA_WRITE = 0x04,
221 EE_WRITE_0 = 0x01,
222 EE_WRITE_1 = 0x05,
223 EE_DATA_READ = 0x08,
224 EE_ENB = (0x4800 | EE_CS),
225
226
227 EE_READ_CMD = 6,
228
229
230 RxMissedOver = (1 << 16),
231 RxMissedMask = 0xffff,
232
233
234 SROMC0InfoLeaf = 27,
235 MediaBlockMask = 0x3f,
236 MediaCustomCSRs = (1 << 6),
237
238
239 PM_Sleep = (1 << 31),
240 PM_Snooze = (1 << 30),
241 PM_Mask = PM_Sleep | PM_Snooze,
242
243
244 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
245 NWayRestart = (1 << 12),
246 NonselPortActive = (1 << 9),
247 SelPortActive = (1 << 8),
248 LinkFailStatus = (1 << 2),
249 NetCxnErr = (1 << 1),
250};
251
252static const u32 de_intr_mask =
253 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
254 LinkPass | LinkFail | PciErr;
255
256
257
258
259
260static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
261
262struct de_srom_media_block {
263 u8 opts;
264 u16 csr13;
265 u16 csr14;
266 u16 csr15;
267} __packed;
268
269struct de_srom_info_leaf {
270 u16 default_media;
271 u8 n_blocks;
272 u8 unused;
273} __packed;
274
275struct de_desc {
276 __le32 opts1;
277 __le32 opts2;
278 __le32 addr1;
279 __le32 addr2;
280#if DSL
281 __le32 skip[DSL];
282#endif
283};
284
285struct media_info {
286 u16 type;
287 u16 csr13;
288 u16 csr14;
289 u16 csr15;
290};
291
292struct ring_info {
293 struct sk_buff *skb;
294 dma_addr_t mapping;
295};
296
297struct de_private {
298 unsigned tx_head;
299 unsigned tx_tail;
300 unsigned rx_tail;
301
302 void __iomem *regs;
303 struct net_device *dev;
304 spinlock_t lock;
305
306 struct de_desc *rx_ring;
307 struct de_desc *tx_ring;
308 struct ring_info tx_skb[DE_TX_RING_SIZE];
309 struct ring_info rx_skb[DE_RX_RING_SIZE];
310 unsigned rx_buf_sz;
311 dma_addr_t ring_dma;
312
313 u32 msg_enable;
314
315 struct net_device_stats net_stats;
316
317 struct pci_dev *pdev;
318
319 u16 setup_frame[DE_SETUP_FRAME_WORDS];
320
321 u32 media_type;
322 u32 media_supported;
323 u32 media_advertise;
324 struct media_info media[DE_MAX_MEDIA];
325 struct timer_list media_timer;
326
327 u8 *ee_data;
328 unsigned board_idx;
329 unsigned de21040 : 1;
330 unsigned media_lock : 1;
331};
332
333
334static void de_set_rx_mode (struct net_device *dev);
335static void de_tx (struct de_private *de);
336static void de_clean_rings (struct de_private *de);
337static void de_media_interrupt (struct de_private *de, u32 status);
338static void de21040_media_timer (unsigned long data);
339static void de21041_media_timer (unsigned long data);
340static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
341
342
343static const struct pci_device_id de_pci_tbl[] = {
344 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
345 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
346 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
347 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
348 { },
349};
350MODULE_DEVICE_TABLE(pci, de_pci_tbl);
351
352static const char * const media_name[DE_MAX_MEDIA] = {
353 "10baseT auto",
354 "BNC",
355 "AUI",
356 "10baseT-HD",
357 "10baseT-FD"
358};
359
360
361
362static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
363static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
364static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
365
366
367static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
368static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
369
370static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
371static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
372
373
374#define dr32(reg) ioread32(de->regs + (reg))
375#define dw32(reg, val) iowrite32((val), de->regs + (reg))
376
377
378static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
379 u32 status, u32 len)
380{
381 netif_dbg(de, rx_err, de->dev,
382 "rx err, slot %d status 0x%x len %d\n",
383 rx_tail, status, len);
384
385 if ((status & 0x38000300) != 0x0300) {
386
387 if ((status & 0xffff) != 0x7fff) {
388 netif_warn(de, rx_err, de->dev,
389 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390 status);
391 de->net_stats.rx_length_errors++;
392 }
393 } else if (status & RxError) {
394
395 de->net_stats.rx_errors++;
396 if (status & 0x0890) de->net_stats.rx_length_errors++;
397 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
398 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
399 }
400}
401
402static void de_rx (struct de_private *de)
403{
404 unsigned rx_tail = de->rx_tail;
405 unsigned rx_work = DE_RX_RING_SIZE;
406 unsigned drop = 0;
407 int rc;
408
409 while (--rx_work) {
410 u32 status, len;
411 dma_addr_t mapping;
412 struct sk_buff *skb, *copy_skb;
413 unsigned copying_skb, buflen;
414
415 skb = de->rx_skb[rx_tail].skb;
416 BUG_ON(!skb);
417 rmb();
418 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
419 if (status & DescOwn)
420 break;
421
422 len = ((status >> 16) & 0x7ff) - 4;
423 mapping = de->rx_skb[rx_tail].mapping;
424
425 if (unlikely(drop)) {
426 de->net_stats.rx_dropped++;
427 goto rx_next;
428 }
429
430 if (unlikely((status & 0x38008300) != 0x0300)) {
431 de_rx_err_acct(de, rx_tail, status, len);
432 goto rx_next;
433 }
434
435 copying_skb = (len <= rx_copybreak);
436
437 netif_dbg(de, rx_status, de->dev,
438 "rx slot %d status 0x%x len %d copying? %d\n",
439 rx_tail, status, len, copying_skb);
440
441 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
442 copy_skb = netdev_alloc_skb(de->dev, buflen);
443 if (unlikely(!copy_skb)) {
444 de->net_stats.rx_dropped++;
445 drop = 1;
446 rx_work = 100;
447 goto rx_next;
448 }
449
450 if (!copying_skb) {
451 pci_unmap_single(de->pdev, mapping,
452 buflen, PCI_DMA_FROMDEVICE);
453 skb_put(skb, len);
454
455 mapping =
456 de->rx_skb[rx_tail].mapping =
457 pci_map_single(de->pdev, copy_skb->data,
458 buflen, PCI_DMA_FROMDEVICE);
459 de->rx_skb[rx_tail].skb = copy_skb;
460 } else {
461 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
462 skb_reserve(copy_skb, RX_OFFSET);
463 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
464 len);
465 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
466
467
468 skb = copy_skb;
469 }
470
471 skb->protocol = eth_type_trans (skb, de->dev);
472
473 de->net_stats.rx_packets++;
474 de->net_stats.rx_bytes += skb->len;
475 rc = netif_rx (skb);
476 if (rc == NET_RX_DROP)
477 drop = 1;
478
479rx_next:
480 if (rx_tail == (DE_RX_RING_SIZE - 1))
481 de->rx_ring[rx_tail].opts2 =
482 cpu_to_le32(RingEnd | de->rx_buf_sz);
483 else
484 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
485 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
486 wmb();
487 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
488 rx_tail = NEXT_RX(rx_tail);
489 }
490
491 if (!rx_work)
492 netdev_warn(de->dev, "rx work limit reached\n");
493
494 de->rx_tail = rx_tail;
495}
496
497static irqreturn_t de_interrupt (int irq, void *dev_instance)
498{
499 struct net_device *dev = dev_instance;
500 struct de_private *de = netdev_priv(dev);
501 u32 status;
502
503 status = dr32(MacStatus);
504 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
505 return IRQ_NONE;
506
507 netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
508 status, dr32(MacMode),
509 de->rx_tail, de->tx_head, de->tx_tail);
510
511 dw32(MacStatus, status);
512
513 if (status & (RxIntr | RxEmpty)) {
514 de_rx(de);
515 if (status & RxEmpty)
516 dw32(RxPoll, NormalRxPoll);
517 }
518
519 spin_lock(&de->lock);
520
521 if (status & (TxIntr | TxEmpty))
522 de_tx(de);
523
524 if (status & (LinkPass | LinkFail))
525 de_media_interrupt(de, status);
526
527 spin_unlock(&de->lock);
528
529 if (status & PciErr) {
530 u16 pci_status;
531
532 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
533 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
534 netdev_err(de->dev,
535 "PCI bus error, status=%08x, PCI status=%04x\n",
536 status, pci_status);
537 }
538
539 return IRQ_HANDLED;
540}
541
542static void de_tx (struct de_private *de)
543{
544 unsigned tx_head = de->tx_head;
545 unsigned tx_tail = de->tx_tail;
546
547 while (tx_tail != tx_head) {
548 struct sk_buff *skb;
549 u32 status;
550
551 rmb();
552 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
553 if (status & DescOwn)
554 break;
555
556 skb = de->tx_skb[tx_tail].skb;
557 BUG_ON(!skb);
558 if (unlikely(skb == DE_DUMMY_SKB))
559 goto next;
560
561 if (unlikely(skb == DE_SETUP_SKB)) {
562 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
563 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
564 goto next;
565 }
566
567 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
568 skb->len, PCI_DMA_TODEVICE);
569
570 if (status & LastFrag) {
571 if (status & TxError) {
572 netif_dbg(de, tx_err, de->dev,
573 "tx err, status 0x%x\n",
574 status);
575 de->net_stats.tx_errors++;
576 if (status & TxOWC)
577 de->net_stats.tx_window_errors++;
578 if (status & TxMaxCol)
579 de->net_stats.tx_aborted_errors++;
580 if (status & TxLinkFail)
581 de->net_stats.tx_carrier_errors++;
582 if (status & TxFIFOUnder)
583 de->net_stats.tx_fifo_errors++;
584 } else {
585 de->net_stats.tx_packets++;
586 de->net_stats.tx_bytes += skb->len;
587 netif_dbg(de, tx_done, de->dev,
588 "tx done, slot %d\n", tx_tail);
589 }
590 dev_kfree_skb_irq(skb);
591 }
592
593next:
594 de->tx_skb[tx_tail].skb = NULL;
595
596 tx_tail = NEXT_TX(tx_tail);
597 }
598
599 de->tx_tail = tx_tail;
600
601 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
602 netif_wake_queue(de->dev);
603}
604
605static netdev_tx_t de_start_xmit (struct sk_buff *skb,
606 struct net_device *dev)
607{
608 struct de_private *de = netdev_priv(dev);
609 unsigned int entry, tx_free;
610 u32 mapping, len, flags = FirstFrag | LastFrag;
611 struct de_desc *txd;
612
613 spin_lock_irq(&de->lock);
614
615 tx_free = TX_BUFFS_AVAIL(de);
616 if (tx_free == 0) {
617 netif_stop_queue(dev);
618 spin_unlock_irq(&de->lock);
619 return NETDEV_TX_BUSY;
620 }
621 tx_free--;
622
623 entry = de->tx_head;
624
625 txd = &de->tx_ring[entry];
626
627 len = skb->len;
628 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
629 if (entry == (DE_TX_RING_SIZE - 1))
630 flags |= RingEnd;
631 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
632 flags |= TxSwInt;
633 flags |= len;
634 txd->opts2 = cpu_to_le32(flags);
635 txd->addr1 = cpu_to_le32(mapping);
636
637 de->tx_skb[entry].skb = skb;
638 de->tx_skb[entry].mapping = mapping;
639 wmb();
640
641 txd->opts1 = cpu_to_le32(DescOwn);
642 wmb();
643
644 de->tx_head = NEXT_TX(entry);
645 netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
646 entry, skb->len);
647
648 if (tx_free == 0)
649 netif_stop_queue(dev);
650
651 spin_unlock_irq(&de->lock);
652
653
654 dw32(TxPoll, NormalTxPoll);
655
656 return NETDEV_TX_OK;
657}
658
659
660
661
662
663
664static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
665{
666 struct de_private *de = netdev_priv(dev);
667 u16 hash_table[32];
668 struct netdev_hw_addr *ha;
669 int i;
670 u16 *eaddrs;
671
672 memset(hash_table, 0, sizeof(hash_table));
673 __set_bit_le(255, hash_table);
674
675 netdev_for_each_mc_addr(ha, dev) {
676 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
677
678 __set_bit_le(index, hash_table);
679 }
680
681 for (i = 0; i < 32; i++) {
682 *setup_frm++ = hash_table[i];
683 *setup_frm++ = hash_table[i];
684 }
685 setup_frm = &de->setup_frame[13*6];
686
687
688 eaddrs = (u16 *)dev->dev_addr;
689 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
690 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
691 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
692}
693
694static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
695{
696 struct de_private *de = netdev_priv(dev);
697 struct netdev_hw_addr *ha;
698 u16 *eaddrs;
699
700
701
702 netdev_for_each_mc_addr(ha, dev) {
703 eaddrs = (u16 *) ha->addr;
704 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
705 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
706 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
707 }
708
709 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
710 setup_frm = &de->setup_frame[15*6];
711
712
713 eaddrs = (u16 *)dev->dev_addr;
714 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
715 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
716 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
717}
718
719
720static void __de_set_rx_mode (struct net_device *dev)
721{
722 struct de_private *de = netdev_priv(dev);
723 u32 macmode;
724 unsigned int entry;
725 u32 mapping;
726 struct de_desc *txd;
727 struct de_desc *dummy_txd = NULL;
728
729 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
730
731 if (dev->flags & IFF_PROMISC) {
732 macmode |= AcceptAllMulticast | AcceptAllPhys;
733 goto out;
734 }
735
736 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
737
738 macmode |= AcceptAllMulticast;
739 goto out;
740 }
741
742
743
744 if (netdev_mc_count(dev) > 14)
745 build_setup_frame_hash (de->setup_frame, dev);
746 else
747 build_setup_frame_perfect (de->setup_frame, dev);
748
749
750
751
752
753 entry = de->tx_head;
754
755
756 if (entry != 0) {
757 de->tx_skb[entry].skb = DE_DUMMY_SKB;
758
759 dummy_txd = &de->tx_ring[entry];
760 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
761 cpu_to_le32(RingEnd) : 0;
762 dummy_txd->addr1 = 0;
763
764
765
766 entry = NEXT_TX(entry);
767 }
768
769 de->tx_skb[entry].skb = DE_SETUP_SKB;
770 de->tx_skb[entry].mapping = mapping =
771 pci_map_single (de->pdev, de->setup_frame,
772 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
773
774
775 txd = &de->tx_ring[entry];
776 if (entry == (DE_TX_RING_SIZE - 1))
777 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
778 else
779 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
780 txd->addr1 = cpu_to_le32(mapping);
781 wmb();
782
783 txd->opts1 = cpu_to_le32(DescOwn);
784 wmb();
785
786 if (dummy_txd) {
787 dummy_txd->opts1 = cpu_to_le32(DescOwn);
788 wmb();
789 }
790
791 de->tx_head = NEXT_TX(entry);
792
793 if (TX_BUFFS_AVAIL(de) == 0)
794 netif_stop_queue(dev);
795
796
797 dw32(TxPoll, NormalTxPoll);
798
799out:
800 if (macmode != dr32(MacMode))
801 dw32(MacMode, macmode);
802}
803
804static void de_set_rx_mode (struct net_device *dev)
805{
806 unsigned long flags;
807 struct de_private *de = netdev_priv(dev);
808
809 spin_lock_irqsave (&de->lock, flags);
810 __de_set_rx_mode(dev);
811 spin_unlock_irqrestore (&de->lock, flags);
812}
813
814static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
815{
816 if (unlikely(rx_missed & RxMissedOver))
817 de->net_stats.rx_missed_errors += RxMissedMask;
818 else
819 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
820}
821
822static void __de_get_stats(struct de_private *de)
823{
824 u32 tmp = dr32(RxMissed);
825
826 de_rx_missed(de, tmp);
827}
828
829static struct net_device_stats *de_get_stats(struct net_device *dev)
830{
831 struct de_private *de = netdev_priv(dev);
832
833
834 spin_lock_irq(&de->lock);
835 if (netif_running(dev) && netif_device_present(dev))
836 __de_get_stats(de);
837 spin_unlock_irq(&de->lock);
838
839 return &de->net_stats;
840}
841
842static inline int de_is_running (struct de_private *de)
843{
844 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
845}
846
847static void de_stop_rxtx (struct de_private *de)
848{
849 u32 macmode;
850 unsigned int i = 1300/100;
851
852 macmode = dr32(MacMode);
853 if (macmode & RxTx) {
854 dw32(MacMode, macmode & ~RxTx);
855 dr32(MacMode);
856 }
857
858
859
860
861
862 while (--i) {
863 if (!de_is_running(de))
864 return;
865 udelay(100);
866 }
867
868 netdev_warn(de->dev, "timeout expired, stopping DMA\n");
869}
870
871static inline void de_start_rxtx (struct de_private *de)
872{
873 u32 macmode;
874
875 macmode = dr32(MacMode);
876 if ((macmode & RxTx) != RxTx) {
877 dw32(MacMode, macmode | RxTx);
878 dr32(MacMode);
879 }
880}
881
882static void de_stop_hw (struct de_private *de)
883{
884
885 udelay(5);
886 dw32(IntrMask, 0);
887
888 de_stop_rxtx(de);
889
890 dw32(MacStatus, dr32(MacStatus));
891
892 udelay(10);
893
894 de->rx_tail = 0;
895 de->tx_head = de->tx_tail = 0;
896}
897
898static void de_link_up(struct de_private *de)
899{
900 if (!netif_carrier_ok(de->dev)) {
901 netif_carrier_on(de->dev);
902 netif_info(de, link, de->dev, "link up, media %s\n",
903 media_name[de->media_type]);
904 }
905}
906
907static void de_link_down(struct de_private *de)
908{
909 if (netif_carrier_ok(de->dev)) {
910 netif_carrier_off(de->dev);
911 netif_info(de, link, de->dev, "link down\n");
912 }
913}
914
915static void de_set_media (struct de_private *de)
916{
917 unsigned media = de->media_type;
918 u32 macmode = dr32(MacMode);
919
920 if (de_is_running(de))
921 netdev_warn(de->dev, "chip is running while changing media!\n");
922
923 if (de->de21040)
924 dw32(CSR11, FULL_DUPLEX_MAGIC);
925 dw32(CSR13, 0);
926 dw32(CSR14, de->media[media].csr14);
927 dw32(CSR15, de->media[media].csr15);
928 dw32(CSR13, de->media[media].csr13);
929
930
931
932
933 mdelay(10);
934
935 if (media == DE_MEDIA_TP_FD)
936 macmode |= FullDuplex;
937 else
938 macmode &= ~FullDuplex;
939
940 netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
941 netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
942 dr32(MacMode), dr32(SIAStatus),
943 dr32(CSR13), dr32(CSR14), dr32(CSR15));
944 netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
945 macmode, de->media[media].csr13,
946 de->media[media].csr14, de->media[media].csr15);
947 if (macmode != dr32(MacMode))
948 dw32(MacMode, macmode);
949}
950
951static void de_next_media (struct de_private *de, const u32 *media,
952 unsigned int n_media)
953{
954 unsigned int i;
955
956 for (i = 0; i < n_media; i++) {
957 if (de_ok_to_advertise(de, media[i])) {
958 de->media_type = media[i];
959 return;
960 }
961 }
962}
963
964static void de21040_media_timer (unsigned long data)
965{
966 struct de_private *de = (struct de_private *) data;
967 struct net_device *dev = de->dev;
968 u32 status = dr32(SIAStatus);
969 unsigned int carrier;
970 unsigned long flags;
971
972 carrier = (status & NetCxnErr) ? 0 : 1;
973
974 if (carrier) {
975 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
976 goto no_link_yet;
977
978 de->media_timer.expires = jiffies + DE_TIMER_LINK;
979 add_timer(&de->media_timer);
980 if (!netif_carrier_ok(dev))
981 de_link_up(de);
982 else
983 netif_info(de, timer, dev, "%s link ok, status %x\n",
984 media_name[de->media_type], status);
985 return;
986 }
987
988 de_link_down(de);
989
990 if (de->media_lock)
991 return;
992
993 if (de->media_type == DE_MEDIA_AUI) {
994 static const u32 next_state = DE_MEDIA_TP;
995 de_next_media(de, &next_state, 1);
996 } else {
997 static const u32 next_state = DE_MEDIA_AUI;
998 de_next_media(de, &next_state, 1);
999 }
1000
1001 spin_lock_irqsave(&de->lock, flags);
1002 de_stop_rxtx(de);
1003 spin_unlock_irqrestore(&de->lock, flags);
1004 de_set_media(de);
1005 de_start_rxtx(de);
1006
1007no_link_yet:
1008 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1009 add_timer(&de->media_timer);
1010
1011 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1012 media_name[de->media_type], status);
1013}
1014
1015static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1016{
1017 switch (new_media) {
1018 case DE_MEDIA_TP_AUTO:
1019 if (!(de->media_advertise & ADVERTISED_Autoneg))
1020 return 0;
1021 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1022 return 0;
1023 break;
1024 case DE_MEDIA_BNC:
1025 if (!(de->media_advertise & ADVERTISED_BNC))
1026 return 0;
1027 break;
1028 case DE_MEDIA_AUI:
1029 if (!(de->media_advertise & ADVERTISED_AUI))
1030 return 0;
1031 break;
1032 case DE_MEDIA_TP:
1033 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1034 return 0;
1035 break;
1036 case DE_MEDIA_TP_FD:
1037 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1038 return 0;
1039 break;
1040 }
1041
1042 return 1;
1043}
1044
1045static void de21041_media_timer (unsigned long data)
1046{
1047 struct de_private *de = (struct de_private *) data;
1048 struct net_device *dev = de->dev;
1049 u32 status = dr32(SIAStatus);
1050 unsigned int carrier;
1051 unsigned long flags;
1052
1053
1054 dw32(SIAStatus, NonselPortActive | SelPortActive);
1055
1056 carrier = (status & NetCxnErr) ? 0 : 1;
1057
1058 if (carrier) {
1059 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1060 de->media_type == DE_MEDIA_TP ||
1061 de->media_type == DE_MEDIA_TP_FD) &&
1062 (status & LinkFailStatus))
1063 goto no_link_yet;
1064
1065 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1066 add_timer(&de->media_timer);
1067 if (!netif_carrier_ok(dev))
1068 de_link_up(de);
1069 else
1070 netif_info(de, timer, dev,
1071 "%s link ok, mode %x status %x\n",
1072 media_name[de->media_type],
1073 dr32(MacMode), status);
1074 return;
1075 }
1076
1077 de_link_down(de);
1078
1079
1080 if (de->media_lock)
1081 goto set_media;
1082
1083
1084 if (status & NonselPortActive) {
1085 unsigned int have_media = 1;
1086
1087
1088 if (de->media_type == DE_MEDIA_AUI ||
1089 de->media_type == DE_MEDIA_BNC) {
1090 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1091 de->media_type = DE_MEDIA_TP_AUTO;
1092 else
1093 have_media = 0;
1094 }
1095
1096
1097 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1098 de_ok_to_advertise(de, DE_MEDIA_BNC))
1099 de->media_type = DE_MEDIA_BNC;
1100
1101
1102 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1103 de_ok_to_advertise(de, DE_MEDIA_AUI))
1104 de->media_type = DE_MEDIA_AUI;
1105
1106
1107 else
1108 have_media = 0;
1109
1110 if (have_media)
1111 goto set_media;
1112 }
1113
1114
1115
1116
1117
1118
1119 if (de->media_type == DE_MEDIA_AUI) {
1120 static const u32 next_states[] = {
1121 DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1122 };
1123 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1124 } else if (de->media_type == DE_MEDIA_BNC) {
1125 static const u32 next_states[] = {
1126 DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1127 };
1128 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1129 } else {
1130 static const u32 next_states[] = {
1131 DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1132 };
1133 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1134 }
1135
1136set_media:
1137 spin_lock_irqsave(&de->lock, flags);
1138 de_stop_rxtx(de);
1139 spin_unlock_irqrestore(&de->lock, flags);
1140 de_set_media(de);
1141 de_start_rxtx(de);
1142
1143no_link_yet:
1144 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1145 add_timer(&de->media_timer);
1146
1147 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1148 media_name[de->media_type], status);
1149}
1150
1151static void de_media_interrupt (struct de_private *de, u32 status)
1152{
1153 if (status & LinkPass) {
1154
1155 if ((de->media_type == DE_MEDIA_AUI ||
1156 de->media_type == DE_MEDIA_BNC) &&
1157 (de->media_lock ||
1158 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1159 return;
1160
1161 if ((de->media_type == DE_MEDIA_AUI ||
1162 de->media_type == DE_MEDIA_BNC)) {
1163 de->media_type = DE_MEDIA_TP_AUTO;
1164 de_stop_rxtx(de);
1165 de_set_media(de);
1166 de_start_rxtx(de);
1167 }
1168 de_link_up(de);
1169 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1170 return;
1171 }
1172
1173 BUG_ON(!(status & LinkFail));
1174
1175 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1176 de->media_type != DE_MEDIA_BNC) {
1177 de_link_down(de);
1178 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1179 }
1180}
1181
1182static int de_reset_mac (struct de_private *de)
1183{
1184 u32 status, tmp;
1185
1186
1187
1188
1189
1190
1191 if (dr32(BusMode) == 0xffffffff)
1192 return -EBUSY;
1193
1194
1195 dw32 (BusMode, CmdReset);
1196 mdelay (1);
1197
1198 dw32 (BusMode, de_bus_mode);
1199 mdelay (1);
1200
1201 for (tmp = 0; tmp < 5; tmp++) {
1202 dr32 (BusMode);
1203 mdelay (1);
1204 }
1205
1206 mdelay (1);
1207
1208 status = dr32(MacStatus);
1209 if (status & (RxState | TxState))
1210 return -EBUSY;
1211 if (status == 0xffffffff)
1212 return -ENODEV;
1213 return 0;
1214}
1215
1216static void de_adapter_wake (struct de_private *de)
1217{
1218 u32 pmctl;
1219
1220 if (de->de21040)
1221 return;
1222
1223 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1224 if (pmctl & PM_Mask) {
1225 pmctl &= ~PM_Mask;
1226 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1227
1228
1229 msleep(10);
1230 }
1231}
1232
1233static void de_adapter_sleep (struct de_private *de)
1234{
1235 u32 pmctl;
1236
1237 if (de->de21040)
1238 return;
1239
1240 dw32(CSR13, 0);
1241 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1242 pmctl |= PM_Sleep;
1243 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1244}
1245
1246static int de_init_hw (struct de_private *de)
1247{
1248 struct net_device *dev = de->dev;
1249 u32 macmode;
1250 int rc;
1251
1252 de_adapter_wake(de);
1253
1254 macmode = dr32(MacMode) & ~MacModeClear;
1255
1256 rc = de_reset_mac(de);
1257 if (rc)
1258 return rc;
1259
1260 de_set_media(de);
1261
1262 dw32(RxRingAddr, de->ring_dma);
1263 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1264
1265 dw32(MacMode, RxTx | macmode);
1266
1267 dr32(RxMissed);
1268
1269 dw32(IntrMask, de_intr_mask);
1270
1271 de_set_rx_mode(dev);
1272
1273 return 0;
1274}
1275
1276static int de_refill_rx (struct de_private *de)
1277{
1278 unsigned i;
1279
1280 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1281 struct sk_buff *skb;
1282
1283 skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
1284 if (!skb)
1285 goto err_out;
1286
1287 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1288 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1289 de->rx_skb[i].skb = skb;
1290
1291 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1292 if (i == (DE_RX_RING_SIZE - 1))
1293 de->rx_ring[i].opts2 =
1294 cpu_to_le32(RingEnd | de->rx_buf_sz);
1295 else
1296 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1297 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1298 de->rx_ring[i].addr2 = 0;
1299 }
1300
1301 return 0;
1302
1303err_out:
1304 de_clean_rings(de);
1305 return -ENOMEM;
1306}
1307
1308static int de_init_rings (struct de_private *de)
1309{
1310 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1311 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1312
1313 de->rx_tail = 0;
1314 de->tx_head = de->tx_tail = 0;
1315
1316 return de_refill_rx (de);
1317}
1318
1319static int de_alloc_rings (struct de_private *de)
1320{
1321 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1322 if (!de->rx_ring)
1323 return -ENOMEM;
1324 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1325 return de_init_rings(de);
1326}
1327
1328static void de_clean_rings (struct de_private *de)
1329{
1330 unsigned i;
1331
1332 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1333 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1334 wmb();
1335 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1336 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1337 wmb();
1338
1339 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1340 if (de->rx_skb[i].skb) {
1341 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1342 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1343 dev_kfree_skb(de->rx_skb[i].skb);
1344 }
1345 }
1346
1347 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1348 struct sk_buff *skb = de->tx_skb[i].skb;
1349 if ((skb) && (skb != DE_DUMMY_SKB)) {
1350 if (skb != DE_SETUP_SKB) {
1351 de->net_stats.tx_dropped++;
1352 pci_unmap_single(de->pdev,
1353 de->tx_skb[i].mapping,
1354 skb->len, PCI_DMA_TODEVICE);
1355 dev_kfree_skb(skb);
1356 } else {
1357 pci_unmap_single(de->pdev,
1358 de->tx_skb[i].mapping,
1359 sizeof(de->setup_frame),
1360 PCI_DMA_TODEVICE);
1361 }
1362 }
1363 }
1364
1365 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1366 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1367}
1368
1369static void de_free_rings (struct de_private *de)
1370{
1371 de_clean_rings(de);
1372 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1373 de->rx_ring = NULL;
1374 de->tx_ring = NULL;
1375}
1376
1377static int de_open (struct net_device *dev)
1378{
1379 struct de_private *de = netdev_priv(dev);
1380 const int irq = de->pdev->irq;
1381 int rc;
1382
1383 netif_dbg(de, ifup, dev, "enabling interface\n");
1384
1385 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1386
1387 rc = de_alloc_rings(de);
1388 if (rc) {
1389 netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1390 return rc;
1391 }
1392
1393 dw32(IntrMask, 0);
1394
1395 rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1396 if (rc) {
1397 netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1398 goto err_out_free;
1399 }
1400
1401 rc = de_init_hw(de);
1402 if (rc) {
1403 netdev_err(dev, "h/w init failure, err=%d\n", rc);
1404 goto err_out_free_irq;
1405 }
1406
1407 netif_start_queue(dev);
1408 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1409
1410 return 0;
1411
1412err_out_free_irq:
1413 free_irq(irq, dev);
1414err_out_free:
1415 de_free_rings(de);
1416 return rc;
1417}
1418
1419static int de_close (struct net_device *dev)
1420{
1421 struct de_private *de = netdev_priv(dev);
1422 unsigned long flags;
1423
1424 netif_dbg(de, ifdown, dev, "disabling interface\n");
1425
1426 del_timer_sync(&de->media_timer);
1427
1428 spin_lock_irqsave(&de->lock, flags);
1429 de_stop_hw(de);
1430 netif_stop_queue(dev);
1431 netif_carrier_off(dev);
1432 spin_unlock_irqrestore(&de->lock, flags);
1433
1434 free_irq(de->pdev->irq, dev);
1435
1436 de_free_rings(de);
1437 de_adapter_sleep(de);
1438 return 0;
1439}
1440
1441static void de_tx_timeout (struct net_device *dev)
1442{
1443 struct de_private *de = netdev_priv(dev);
1444 const int irq = de->pdev->irq;
1445
1446 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1447 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1448 de->rx_tail, de->tx_head, de->tx_tail);
1449
1450 del_timer_sync(&de->media_timer);
1451
1452 disable_irq(irq);
1453 spin_lock_irq(&de->lock);
1454
1455 de_stop_hw(de);
1456 netif_stop_queue(dev);
1457 netif_carrier_off(dev);
1458
1459 spin_unlock_irq(&de->lock);
1460 enable_irq(irq);
1461
1462
1463 __de_get_stats(de);
1464
1465 synchronize_irq(irq);
1466 de_clean_rings(de);
1467
1468 de_init_rings(de);
1469
1470 de_init_hw(de);
1471
1472 netif_wake_queue(dev);
1473}
1474
1475static void __de_get_regs(struct de_private *de, u8 *buf)
1476{
1477 int i;
1478 u32 *rbuf = (u32 *)buf;
1479
1480
1481 for (i = 0; i < DE_NUM_REGS; i++)
1482 rbuf[i] = dr32(i * 8);
1483
1484
1485 de_rx_missed(de, rbuf[8]);
1486}
1487
1488static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1489{
1490 ecmd->supported = de->media_supported;
1491 ecmd->transceiver = XCVR_INTERNAL;
1492 ecmd->phy_address = 0;
1493 ecmd->advertising = de->media_advertise;
1494
1495 switch (de->media_type) {
1496 case DE_MEDIA_AUI:
1497 ecmd->port = PORT_AUI;
1498 break;
1499 case DE_MEDIA_BNC:
1500 ecmd->port = PORT_BNC;
1501 break;
1502 default:
1503 ecmd->port = PORT_TP;
1504 break;
1505 }
1506
1507 ethtool_cmd_speed_set(ecmd, 10);
1508
1509 if (dr32(MacMode) & FullDuplex)
1510 ecmd->duplex = DUPLEX_FULL;
1511 else
1512 ecmd->duplex = DUPLEX_HALF;
1513
1514 if (de->media_lock)
1515 ecmd->autoneg = AUTONEG_DISABLE;
1516 else
1517 ecmd->autoneg = AUTONEG_ENABLE;
1518
1519
1520
1521 return 0;
1522}
1523
1524static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1525{
1526 u32 new_media;
1527 unsigned int media_lock;
1528
1529 if (ethtool_cmd_speed(ecmd) != 10)
1530 return -EINVAL;
1531 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1532 return -EINVAL;
1533 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1534 return -EINVAL;
1535 if (de->de21040 && ecmd->port == PORT_BNC)
1536 return -EINVAL;
1537 if (ecmd->transceiver != XCVR_INTERNAL)
1538 return -EINVAL;
1539 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1540 return -EINVAL;
1541 if (ecmd->advertising & ~de->media_supported)
1542 return -EINVAL;
1543 if (ecmd->autoneg == AUTONEG_ENABLE &&
1544 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1545 return -EINVAL;
1546
1547 switch (ecmd->port) {
1548 case PORT_AUI:
1549 new_media = DE_MEDIA_AUI;
1550 if (!(ecmd->advertising & ADVERTISED_AUI))
1551 return -EINVAL;
1552 break;
1553 case PORT_BNC:
1554 new_media = DE_MEDIA_BNC;
1555 if (!(ecmd->advertising & ADVERTISED_BNC))
1556 return -EINVAL;
1557 break;
1558 default:
1559 if (ecmd->autoneg == AUTONEG_ENABLE)
1560 new_media = DE_MEDIA_TP_AUTO;
1561 else if (ecmd->duplex == DUPLEX_FULL)
1562 new_media = DE_MEDIA_TP_FD;
1563 else
1564 new_media = DE_MEDIA_TP;
1565 if (!(ecmd->advertising & ADVERTISED_TP))
1566 return -EINVAL;
1567 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1568 return -EINVAL;
1569 break;
1570 }
1571
1572 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1573
1574 if ((new_media == de->media_type) &&
1575 (media_lock == de->media_lock) &&
1576 (ecmd->advertising == de->media_advertise))
1577 return 0;
1578
1579 de_link_down(de);
1580 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1581 de_stop_rxtx(de);
1582
1583 de->media_type = new_media;
1584 de->media_lock = media_lock;
1585 de->media_advertise = ecmd->advertising;
1586 de_set_media(de);
1587 if (netif_running(de->dev))
1588 de_start_rxtx(de);
1589
1590 return 0;
1591}
1592
1593static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1594{
1595 struct de_private *de = netdev_priv(dev);
1596
1597 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1598 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1599 strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
1600}
1601
1602static int de_get_regs_len(struct net_device *dev)
1603{
1604 return DE_REGS_SIZE;
1605}
1606
1607static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1608{
1609 struct de_private *de = netdev_priv(dev);
1610 int rc;
1611
1612 spin_lock_irq(&de->lock);
1613 rc = __de_get_settings(de, ecmd);
1614 spin_unlock_irq(&de->lock);
1615
1616 return rc;
1617}
1618
1619static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1620{
1621 struct de_private *de = netdev_priv(dev);
1622 int rc;
1623
1624 spin_lock_irq(&de->lock);
1625 rc = __de_set_settings(de, ecmd);
1626 spin_unlock_irq(&de->lock);
1627
1628 return rc;
1629}
1630
1631static u32 de_get_msglevel(struct net_device *dev)
1632{
1633 struct de_private *de = netdev_priv(dev);
1634
1635 return de->msg_enable;
1636}
1637
1638static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1639{
1640 struct de_private *de = netdev_priv(dev);
1641
1642 de->msg_enable = msglvl;
1643}
1644
1645static int de_get_eeprom(struct net_device *dev,
1646 struct ethtool_eeprom *eeprom, u8 *data)
1647{
1648 struct de_private *de = netdev_priv(dev);
1649
1650 if (!de->ee_data)
1651 return -EOPNOTSUPP;
1652 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1653 (eeprom->len != DE_EEPROM_SIZE))
1654 return -EINVAL;
1655 memcpy(data, de->ee_data, eeprom->len);
1656
1657 return 0;
1658}
1659
1660static int de_nway_reset(struct net_device *dev)
1661{
1662 struct de_private *de = netdev_priv(dev);
1663 u32 status;
1664
1665 if (de->media_type != DE_MEDIA_TP_AUTO)
1666 return -EINVAL;
1667 if (netif_carrier_ok(de->dev))
1668 de_link_down(de);
1669
1670 status = dr32(SIAStatus);
1671 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1672 netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1673 status, dr32(SIAStatus));
1674 return 0;
1675}
1676
1677static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1678 void *data)
1679{
1680 struct de_private *de = netdev_priv(dev);
1681
1682 regs->version = (DE_REGS_VER << 2) | de->de21040;
1683
1684 spin_lock_irq(&de->lock);
1685 __de_get_regs(de, data);
1686 spin_unlock_irq(&de->lock);
1687}
1688
1689static const struct ethtool_ops de_ethtool_ops = {
1690 .get_link = ethtool_op_get_link,
1691 .get_drvinfo = de_get_drvinfo,
1692 .get_regs_len = de_get_regs_len,
1693 .get_settings = de_get_settings,
1694 .set_settings = de_set_settings,
1695 .get_msglevel = de_get_msglevel,
1696 .set_msglevel = de_set_msglevel,
1697 .get_eeprom = de_get_eeprom,
1698 .nway_reset = de_nway_reset,
1699 .get_regs = de_get_regs,
1700};
1701
1702static void de21040_get_mac_address(struct de_private *de)
1703{
1704 unsigned i;
1705
1706 dw32 (ROMCmd, 0);
1707 udelay(5);
1708
1709 for (i = 0; i < 6; i++) {
1710 int value, boguscnt = 100000;
1711 do {
1712 value = dr32(ROMCmd);
1713 rmb();
1714 } while (value < 0 && --boguscnt > 0);
1715 de->dev->dev_addr[i] = value;
1716 udelay(1);
1717 if (boguscnt <= 0)
1718 pr_warn("timeout reading 21040 MAC address byte %u\n",
1719 i);
1720 }
1721}
1722
1723static void de21040_get_media_info(struct de_private *de)
1724{
1725 unsigned int i;
1726
1727 de->media_type = DE_MEDIA_TP;
1728 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1729 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1730 de->media_advertise = de->media_supported;
1731
1732 for (i = 0; i < DE_MAX_MEDIA; i++) {
1733 switch (i) {
1734 case DE_MEDIA_AUI:
1735 case DE_MEDIA_TP:
1736 case DE_MEDIA_TP_FD:
1737 de->media[i].type = i;
1738 de->media[i].csr13 = t21040_csr13[i];
1739 de->media[i].csr14 = t21040_csr14[i];
1740 de->media[i].csr15 = t21040_csr15[i];
1741 break;
1742 default:
1743 de->media[i].type = DE_MEDIA_INVALID;
1744 break;
1745 }
1746 }
1747}
1748
1749
1750static unsigned tulip_read_eeprom(void __iomem *regs, int location,
1751 int addr_len)
1752{
1753 int i;
1754 unsigned retval = 0;
1755 void __iomem *ee_addr = regs + ROMCmd;
1756 int read_cmd = location | (EE_READ_CMD << addr_len);
1757
1758 writel(EE_ENB & ~EE_CS, ee_addr);
1759 writel(EE_ENB, ee_addr);
1760
1761
1762 for (i = 4 + addr_len; i >= 0; i--) {
1763 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1764 writel(EE_ENB | dataval, ee_addr);
1765 readl(ee_addr);
1766 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1767 readl(ee_addr);
1768 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1769 }
1770 writel(EE_ENB, ee_addr);
1771 readl(ee_addr);
1772
1773 for (i = 16; i > 0; i--) {
1774 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1775 readl(ee_addr);
1776 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1777 writel(EE_ENB, ee_addr);
1778 readl(ee_addr);
1779 }
1780
1781
1782 writel(EE_ENB & ~EE_CS, ee_addr);
1783 return retval;
1784}
1785
1786static void de21041_get_srom_info(struct de_private *de)
1787{
1788 unsigned i, sa_offset = 0, ofs;
1789 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1790 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1791 struct de_srom_info_leaf *il;
1792 void *bufp;
1793
1794
1795 for (i = 0; i < DE_EEPROM_WORDS; i++)
1796 ((__le16 *)ee_data)[i] =
1797 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1798
1799
1800
1801
1802
1803#ifndef CONFIG_MIPS_COBALT
1804
1805 for (i = 0; i < 8; i ++)
1806 if (ee_data[i] != ee_data[16+i])
1807 sa_offset = 20;
1808
1809#endif
1810
1811
1812 for (i = 0; i < 6; i ++)
1813 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1814
1815
1816 ofs = ee_data[SROMC0InfoLeaf];
1817 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1818 goto bad_srom;
1819
1820
1821 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1822
1823
1824 if (il->n_blocks == 0)
1825 goto bad_srom;
1826 if ((sizeof(ee_data) - ofs) <
1827 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1828 goto bad_srom;
1829
1830
1831 switch (get_unaligned(&il->default_media)) {
1832 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1833 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1834 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1835 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1836 }
1837
1838 if (netif_msg_probe(de))
1839 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1840 de->board_idx, ofs, media_name[de->media_type]);
1841
1842
1843 for (i = 0; i < DE_MAX_MEDIA; i++) {
1844 de->media[i].type = DE_MEDIA_INVALID;
1845 de->media[i].csr13 = 0xffff;
1846 de->media[i].csr14 = 0xffff;
1847 de->media[i].csr15 = 0xffff;
1848 }
1849
1850
1851
1852
1853 bufp = ((void *)il) + sizeof(*il);
1854 for (i = 0; i < il->n_blocks; i++) {
1855 struct de_srom_media_block *ib = bufp;
1856 unsigned idx;
1857
1858
1859 switch(ib->opts & MediaBlockMask) {
1860 case 0:
1861 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1862 | SUPPORTED_Autoneg;
1863 idx = DE_MEDIA_TP;
1864 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1865 break;
1866 case 1:
1867 de->media_supported |= SUPPORTED_BNC;
1868 idx = DE_MEDIA_BNC;
1869 break;
1870 case 2:
1871 de->media_supported |= SUPPORTED_AUI;
1872 idx = DE_MEDIA_AUI;
1873 break;
1874 case 4:
1875 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1876 | SUPPORTED_Autoneg;
1877 idx = DE_MEDIA_TP_FD;
1878 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1879 break;
1880 default:
1881 goto bad_srom;
1882 }
1883
1884 de->media[idx].type = idx;
1885
1886 if (netif_msg_probe(de))
1887 pr_info("de%d: media block #%u: %s",
1888 de->board_idx, i,
1889 media_name[de->media[idx].type]);
1890
1891 bufp += sizeof (ib->opts);
1892
1893 if (ib->opts & MediaCustomCSRs) {
1894 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1895 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1896 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1897 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1898 sizeof(ib->csr15);
1899
1900 if (netif_msg_probe(de))
1901 pr_cont(" (%x,%x,%x)\n",
1902 de->media[idx].csr13,
1903 de->media[idx].csr14,
1904 de->media[idx].csr15);
1905
1906 } else {
1907 if (netif_msg_probe(de))
1908 pr_cont("\n");
1909 }
1910
1911 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1912 break;
1913 }
1914
1915 de->media_advertise = de->media_supported;
1916
1917fill_defaults:
1918
1919 for (i = 0; i < DE_MAX_MEDIA; i++) {
1920 if (de->media[i].csr13 == 0xffff)
1921 de->media[i].csr13 = t21041_csr13[i];
1922 if (de->media[i].csr14 == 0xffff) {
1923
1924
1925 if (de->pdev->revision < 0x20)
1926 de->media[i].csr14 = t21041_csr14_brk[i];
1927 else
1928 de->media[i].csr14 = t21041_csr14[i];
1929 }
1930 if (de->media[i].csr15 == 0xffff)
1931 de->media[i].csr15 = t21041_csr15[i];
1932 }
1933
1934 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1935
1936 return;
1937
1938bad_srom:
1939
1940 for (i = 0; i < DE_MAX_MEDIA; i++)
1941 de->media[i].type = i;
1942 de->media_supported =
1943 SUPPORTED_10baseT_Half |
1944 SUPPORTED_10baseT_Full |
1945 SUPPORTED_Autoneg |
1946 SUPPORTED_TP |
1947 SUPPORTED_AUI |
1948 SUPPORTED_BNC;
1949 goto fill_defaults;
1950}
1951
1952static const struct net_device_ops de_netdev_ops = {
1953 .ndo_open = de_open,
1954 .ndo_stop = de_close,
1955 .ndo_set_rx_mode = de_set_rx_mode,
1956 .ndo_start_xmit = de_start_xmit,
1957 .ndo_get_stats = de_get_stats,
1958 .ndo_tx_timeout = de_tx_timeout,
1959 .ndo_change_mtu = eth_change_mtu,
1960 .ndo_set_mac_address = eth_mac_addr,
1961 .ndo_validate_addr = eth_validate_addr,
1962};
1963
1964static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1965{
1966 struct net_device *dev;
1967 struct de_private *de;
1968 int rc;
1969 void __iomem *regs;
1970 unsigned long pciaddr;
1971 static int board_idx = -1;
1972
1973 board_idx++;
1974
1975#ifndef MODULE
1976 if (board_idx == 0)
1977 pr_info("%s\n", version);
1978#endif
1979
1980
1981 dev = alloc_etherdev(sizeof(struct de_private));
1982 if (!dev)
1983 return -ENOMEM;
1984
1985 dev->netdev_ops = &de_netdev_ops;
1986 SET_NETDEV_DEV(dev, &pdev->dev);
1987 dev->ethtool_ops = &de_ethtool_ops;
1988 dev->watchdog_timeo = TX_TIMEOUT;
1989
1990 de = netdev_priv(dev);
1991 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1992 de->pdev = pdev;
1993 de->dev = dev;
1994 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1995 de->board_idx = board_idx;
1996 spin_lock_init (&de->lock);
1997 init_timer(&de->media_timer);
1998 if (de->de21040)
1999 de->media_timer.function = de21040_media_timer;
2000 else
2001 de->media_timer.function = de21041_media_timer;
2002 de->media_timer.data = (unsigned long) de;
2003
2004 netif_carrier_off(dev);
2005
2006
2007 rc = pci_enable_device(pdev);
2008 if (rc)
2009 goto err_out_free;
2010
2011
2012 rc = pci_request_regions(pdev, DRV_NAME);
2013 if (rc)
2014 goto err_out_disable;
2015
2016
2017 if (pdev->irq < 2) {
2018 rc = -EIO;
2019 pr_err("invalid irq (%d) for pci dev %s\n",
2020 pdev->irq, pci_name(pdev));
2021 goto err_out_res;
2022 }
2023
2024
2025 pciaddr = pci_resource_start(pdev, 1);
2026 if (!pciaddr) {
2027 rc = -EIO;
2028 pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2029 goto err_out_res;
2030 }
2031 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2032 rc = -EIO;
2033 pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2034 (unsigned long long)pci_resource_len(pdev, 1),
2035 pci_name(pdev));
2036 goto err_out_res;
2037 }
2038
2039
2040 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2041 if (!regs) {
2042 rc = -EIO;
2043 pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2044 (unsigned long long)pci_resource_len(pdev, 1),
2045 pciaddr, pci_name(pdev));
2046 goto err_out_res;
2047 }
2048 de->regs = regs;
2049
2050 de_adapter_wake(de);
2051
2052
2053 rc = de_reset_mac(de);
2054 if (rc) {
2055 pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2056 goto err_out_iomap;
2057 }
2058
2059
2060
2061
2062 if (de->de21040) {
2063 de21040_get_mac_address(de);
2064 de21040_get_media_info(de);
2065 } else {
2066 de21041_get_srom_info(de);
2067 }
2068
2069
2070 rc = register_netdev(dev);
2071 if (rc)
2072 goto err_out_iomap;
2073
2074
2075 netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2076 de->de21040 ? "21040" : "21041",
2077 regs, dev->dev_addr, pdev->irq);
2078
2079 pci_set_drvdata(pdev, dev);
2080
2081
2082 pci_set_master(pdev);
2083
2084
2085 de_adapter_sleep(de);
2086
2087 return 0;
2088
2089err_out_iomap:
2090 kfree(de->ee_data);
2091 iounmap(regs);
2092err_out_res:
2093 pci_release_regions(pdev);
2094err_out_disable:
2095 pci_disable_device(pdev);
2096err_out_free:
2097 free_netdev(dev);
2098 return rc;
2099}
2100
2101static void de_remove_one(struct pci_dev *pdev)
2102{
2103 struct net_device *dev = pci_get_drvdata(pdev);
2104 struct de_private *de = netdev_priv(dev);
2105
2106 BUG_ON(!dev);
2107 unregister_netdev(dev);
2108 kfree(de->ee_data);
2109 iounmap(de->regs);
2110 pci_release_regions(pdev);
2111 pci_disable_device(pdev);
2112 pci_set_drvdata(pdev, NULL);
2113 free_netdev(dev);
2114}
2115
2116#ifdef CONFIG_PM
2117
2118static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2119{
2120 struct net_device *dev = pci_get_drvdata (pdev);
2121 struct de_private *de = netdev_priv(dev);
2122
2123 rtnl_lock();
2124 if (netif_running (dev)) {
2125 const int irq = pdev->irq;
2126
2127 del_timer_sync(&de->media_timer);
2128
2129 disable_irq(irq);
2130 spin_lock_irq(&de->lock);
2131
2132 de_stop_hw(de);
2133 netif_stop_queue(dev);
2134 netif_device_detach(dev);
2135 netif_carrier_off(dev);
2136
2137 spin_unlock_irq(&de->lock);
2138 enable_irq(irq);
2139
2140
2141 __de_get_stats(de);
2142
2143 synchronize_irq(irq);
2144 de_clean_rings(de);
2145
2146 de_adapter_sleep(de);
2147 pci_disable_device(pdev);
2148 } else {
2149 netif_device_detach(dev);
2150 }
2151 rtnl_unlock();
2152 return 0;
2153}
2154
2155static int de_resume (struct pci_dev *pdev)
2156{
2157 struct net_device *dev = pci_get_drvdata (pdev);
2158 struct de_private *de = netdev_priv(dev);
2159 int retval = 0;
2160
2161 rtnl_lock();
2162 if (netif_device_present(dev))
2163 goto out;
2164 if (!netif_running(dev))
2165 goto out_attach;
2166 if ((retval = pci_enable_device(pdev))) {
2167 netdev_err(dev, "pci_enable_device failed in resume\n");
2168 goto out;
2169 }
2170 pci_set_master(pdev);
2171 de_init_rings(de);
2172 de_init_hw(de);
2173out_attach:
2174 netif_device_attach(dev);
2175out:
2176 rtnl_unlock();
2177 return 0;
2178}
2179
2180#endif
2181
2182static struct pci_driver de_driver = {
2183 .name = DRV_NAME,
2184 .id_table = de_pci_tbl,
2185 .probe = de_init_one,
2186 .remove = de_remove_one,
2187#ifdef CONFIG_PM
2188 .suspend = de_suspend,
2189 .resume = de_resume,
2190#endif
2191};
2192
2193static int __init de_init (void)
2194{
2195#ifdef MODULE
2196 pr_info("%s\n", version);
2197#endif
2198 return pci_register_driver(&de_driver);
2199}
2200
2201static void __exit de_exit (void)
2202{
2203 pci_unregister_driver (&de_driver);
2204}
2205
2206module_init(de_init);
2207module_exit(de_exit);
2208