1
2
3
4
5
6
7
8
9
10
11
12#define pr_fmt(fmt) "tulip: " fmt
13
14#define DRV_NAME "tulip"
15#ifdef CONFIG_TULIP_NAPI
16#define DRV_VERSION "1.1.15-NAPI"
17#else
18#define DRV_VERSION "1.1.15"
19#endif
20#define DRV_RELDATE "Feb 27, 2007"
21
22
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include "tulip.h"
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/mii.h>
32#include <linux/crc32.h>
33#include <asm/unaligned.h>
34#include <asm/uaccess.h>
35
36#ifdef CONFIG_SPARC
37#include <asm/prom.h>
38#endif
39
40static char version[] __devinitdata =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42
43
44
45
46static unsigned int max_interrupt_work = 25;
47
48#define MAX_UNITS 8
49
50static int full_duplex[MAX_UNITS];
51static int options[MAX_UNITS];
52static int mtu[MAX_UNITS];
53
54
55const char * const medianame[32] = {
56 "10baseT", "10base2", "AUI", "100baseTx",
57 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 "","","","", "","","","", "","","","Transceiver reset",
62};
63
64
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86#if defined(__alpha__) || defined(__ia64__)
87static int csr0 = 0x01A00000 | 0xE000;
88#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89static int csr0 = 0x01A00000 | 0x8000;
90#elif defined(CONFIG_SPARC) || defined(__hppa__)
91
92
93
94
95static int csr0 = 0x01A00000 | 0x9000;
96#elif defined(__arm__) || defined(__sh__)
97static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000;
100#else
101#warning Processor architecture undefined!
102static int csr0 = 0x00A00000 | 0x4800;
103#endif
104
105
106
107#define TX_TIMEOUT (4*HZ)
108
109
110MODULE_AUTHOR("The Linux Kernel Team");
111MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114module_param(tulip_debug, int, 0);
115module_param(max_interrupt_work, int, 0);
116module_param(rx_copybreak, int, 0);
117module_param(csr0, int, 0);
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120
121#ifdef TULIP_DEBUG
122int tulip_debug = TULIP_DEBUG;
123#else
124int tulip_debug = 1;
125#endif
126
127static void tulip_timer(unsigned long data)
128{
129 struct net_device *dev = (struct net_device *)data;
130 struct tulip_private *tp = netdev_priv(dev);
131
132 if (netif_running(dev))
133 schedule_work(&tp->media_work);
134}
135
136
137
138
139
140
141
142struct tulip_chip_table tulip_tbl[] = {
143 { },
144 { },
145
146
147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 tulip_media_task },
150
151
152 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155
156
157 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 HAS_MII | HAS_PNICNWAY, pnic_timer, },
159
160
161 { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163
164
165 { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 HAS_MEDIA_TABLE, mxic_timer, },
167
168
169 { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 HAS_MEDIA_TABLE, mxic_timer, },
171
172
173 { "ASIX AX88140", 128, 0x0001fbff,
174 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 | IS_ASIX, tulip_timer, tulip_media_task },
176
177
178 { "Lite-On PNIC-II", 256, 0x0801fbff,
179 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180
181
182 { "ADMtek Comet", 256, 0x0001abef,
183 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184
185
186 { "Compex 9881 PMAC", 128, 0x0001ebef,
187 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188
189
190 { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193
194
195#ifdef CONFIG_TULIP_DM910X
196 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 tulip_timer, tulip_media_task },
199#else
200 { NULL },
201#endif
202
203
204 { "Conexant LANfinity", 256, 0x0001ebef,
205 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206
207};
208
209
210static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
211 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216
217 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231#ifdef CONFIG_TULIP_DM910X
232 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234#endif
235 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
247 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
248 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { }
251};
252MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253
254
255
256const char tulip_media_cap[32] =
257{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
258
259static void tulip_tx_timeout(struct net_device *dev);
260static void tulip_init_ring(struct net_device *dev);
261static void tulip_free_ring(struct net_device *dev);
262static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 struct net_device *dev);
264static int tulip_open(struct net_device *dev);
265static int tulip_close(struct net_device *dev);
266static void tulip_up(struct net_device *dev);
267static void tulip_down(struct net_device *dev);
268static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270static void set_rx_mode(struct net_device *dev);
271static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272#ifdef CONFIG_NET_POLL_CONTROLLER
273static void poll_tulip(struct net_device *dev);
274#endif
275
276static void tulip_set_power_state (struct tulip_private *tp,
277 int sleep, int snooze)
278{
279 if (tp->flags & HAS_ACPI) {
280 u32 tmp, newtmp;
281 pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 if (sleep)
284 newtmp |= CFDD_Sleep;
285 else if (snooze)
286 newtmp |= CFDD_Snooze;
287 if (tmp != newtmp)
288 pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 }
290
291}
292
293
294static void tulip_up(struct net_device *dev)
295{
296 struct tulip_private *tp = netdev_priv(dev);
297 void __iomem *ioaddr = tp->base_addr;
298 int next_tick = 3*HZ;
299 u32 reg;
300 int i;
301
302#ifdef CONFIG_TULIP_NAPI
303 napi_enable(&tp->napi);
304#endif
305
306
307 tulip_set_power_state (tp, 0, 0);
308
309
310 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 tulip_set_wolopts(tp->pdev, 0);
313
314
315 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
316 iowrite32(0x00040000, ioaddr + CSR6);
317
318
319 iowrite32(0x00000001, ioaddr + CSR0);
320 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®);
321 udelay(100);
322
323
324
325
326 iowrite32(tp->csr0, ioaddr + CSR0);
327 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®);
328 udelay(100);
329
330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 tp->cur_rx = tp->cur_tx = 0;
336 tp->dirty_rx = tp->dirty_tx = 0;
337
338 if (tp->flags & MC_HASH_ONLY) {
339 u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 if (tp->chip_id == AX88140) {
342 iowrite32(0, ioaddr + CSR13);
343 iowrite32(addr_low, ioaddr + CSR14);
344 iowrite32(1, ioaddr + CSR13);
345 iowrite32(addr_high, ioaddr + CSR14);
346 } else if (tp->flags & COMET_MAC_ADDR) {
347 iowrite32(addr_low, ioaddr + 0xA4);
348 iowrite32(addr_high, ioaddr + 0xA8);
349 iowrite32(0, ioaddr + CSR27);
350 iowrite32(0, ioaddr + CSR28);
351 }
352 } else {
353
354 u16 *eaddrs = (u16 *)dev->dev_addr;
355 u16 *setup_frm = &tp->setup_frame[15*6];
356 dma_addr_t mapping;
357
358
359 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360
361 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364
365 mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 sizeof(tp->setup_frame),
367 PCI_DMA_TODEVICE);
368 tp->tx_buffers[tp->cur_tx].skb = NULL;
369 tp->tx_buffers[tp->cur_tx].mapping = mapping;
370
371
372 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375
376 tp->cur_tx++;
377 }
378
379 tp->saved_if_port = dev->if_port;
380 if (dev->if_port == 0)
381 dev->if_port = tp->default_port;
382
383
384 i = 0;
385 if (tp->mtable == NULL)
386 goto media_picked;
387 if (dev->if_port) {
388 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 (dev->if_port == 12 ? 0 : dev->if_port);
390 for (i = 0; i < tp->mtable->leafcount; i++)
391 if (tp->mtable->mleaf[i].media == looking_for) {
392 dev_info(&dev->dev,
393 "Using user-specified media %s\n",
394 medianame[dev->if_port]);
395 goto media_picked;
396 }
397 }
398 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 for (i = 0; i < tp->mtable->leafcount; i++)
401 if (tp->mtable->mleaf[i].media == looking_for) {
402 dev_info(&dev->dev,
403 "Using EEPROM-set media %s\n",
404 medianame[looking_for]);
405 goto media_picked;
406 }
407 }
408
409 for (i = tp->mtable->leafcount - 1;
410 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 ;
412media_picked:
413
414 tp->csr6 = 0;
415 tp->cur_index = i;
416 tp->nwayset = 0;
417
418 if (dev->if_port) {
419 if (tp->chip_id == DC21143 &&
420 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421
422 iowrite32(0x0000, ioaddr + CSR13);
423 iowrite32(0x0000, ioaddr + CSR14);
424 iowrite32(0x0008, ioaddr + CSR15);
425 }
426 tulip_select_media(dev, 1);
427 } else if (tp->chip_id == DC21142) {
428 if (tp->mii_cnt) {
429 tulip_select_media(dev, 1);
430 if (tulip_debug > 1)
431 dev_info(&dev->dev,
432 "Using MII transceiver %d, status %04x\n",
433 tp->phys[0],
434 tulip_mdio_read(dev, tp->phys[0], 1));
435 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 tp->csr6 = csr6_mask_hdcap;
437 dev->if_port = 11;
438 iowrite32(0x0000, ioaddr + CSR13);
439 iowrite32(0x0000, ioaddr + CSR14);
440 } else
441 t21142_start_nway(dev);
442 } else if (tp->chip_id == PNIC2) {
443
444 tp->sym_advertise = 0x01E0;
445
446 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 pnic2_start_nway(dev);
449 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
450 if (tp->mii_cnt) {
451 dev->if_port = 11;
452 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 iowrite32(0x0001, ioaddr + CSR15);
454 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 pnic_do_nway(dev);
456 else {
457
458 iowrite32(0x32, ioaddr + CSR12);
459 tp->csr6 = 0x00420000;
460 iowrite32(0x0001B078, ioaddr + 0xB8);
461 iowrite32(0x0201B078, ioaddr + 0xB8);
462 next_tick = 1*HZ;
463 }
464 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 ! tp->medialock) {
466 dev->if_port = 0;
467 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470
471 dev->if_port = 0;
472 tp->csr6 = 0x01a80200;
473 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476
477 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 dev->if_port = tp->mii_cnt ? 11 : 0;
479 tp->csr6 = 0x00040000;
480 } else if (tp->chip_id == AX88140) {
481 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 } else
483 tulip_select_media(dev, 1);
484
485
486 tulip_stop_rxtx(tp);
487 barrier();
488 udelay(5);
489 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490
491
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 tulip_start_rxtx(tp);
495 iowrite32(0, ioaddr + CSR2);
496
497 if (tulip_debug > 2) {
498 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 ioread32(ioaddr + CSR0),
500 ioread32(ioaddr + CSR5),
501 ioread32(ioaddr + CSR6));
502 }
503
504
505
506 tp->timer.expires = RUN_AT(next_tick);
507 add_timer(&tp->timer);
508#ifdef CONFIG_TULIP_NAPI
509 init_timer(&tp->oom_timer);
510 tp->oom_timer.data = (unsigned long)dev;
511 tp->oom_timer.function = oom_timer;
512#endif
513}
514
515static int
516tulip_open(struct net_device *dev)
517{
518 struct tulip_private *tp = netdev_priv(dev);
519 int retval;
520
521 tulip_init_ring (dev);
522
523 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 dev->name, dev);
525 if (retval)
526 goto free_ring;
527
528 tulip_up (dev);
529
530 netif_start_queue (dev);
531
532 return 0;
533
534free_ring:
535 tulip_free_ring (dev);
536 return retval;
537}
538
539
540static void tulip_tx_timeout(struct net_device *dev)
541{
542 struct tulip_private *tp = netdev_priv(dev);
543 void __iomem *ioaddr = tp->base_addr;
544 unsigned long flags;
545
546 spin_lock_irqsave (&tp->lock, flags);
547
548 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
549
550 if (tulip_debug > 1)
551 dev_warn(&dev->dev,
552 "Transmit timeout using MII device\n");
553 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
554 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
555 tp->chip_id == DM910X) {
556 dev_warn(&dev->dev,
557 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
558 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
559 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
560 ioread32(ioaddr + CSR15));
561 tp->timeout_recovery = 1;
562 schedule_work(&tp->media_work);
563 goto out_unlock;
564 } else if (tp->chip_id == PNIC2) {
565 dev_warn(&dev->dev,
566 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
567 (int)ioread32(ioaddr + CSR5),
568 (int)ioread32(ioaddr + CSR6),
569 (int)ioread32(ioaddr + CSR7),
570 (int)ioread32(ioaddr + CSR12));
571 } else {
572 dev_warn(&dev->dev,
573 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
574 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
575 dev->if_port = 0;
576 }
577
578#if defined(way_too_many_messages)
579 if (tulip_debug > 3) {
580 int i;
581 for (i = 0; i < RX_RING_SIZE; i++) {
582 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
583 int j;
584 printk(KERN_DEBUG
585 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
586 i,
587 (unsigned int)tp->rx_ring[i].status,
588 (unsigned int)tp->rx_ring[i].length,
589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++)
593 if (j < 100)
594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j);
596 }
597 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
598 for (i = 0; i < RX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
600 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
601 for (i = 0; i < TX_RING_SIZE; i++)
602 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
603 pr_cont("\n");
604 }
605#endif
606
607 tulip_tx_timeout_complete(tp, ioaddr);
608
609out_unlock:
610 spin_unlock_irqrestore (&tp->lock, flags);
611 dev->trans_start = jiffies;
612 netif_wake_queue (dev);
613}
614
615
616
617static void tulip_init_ring(struct net_device *dev)
618{
619 struct tulip_private *tp = netdev_priv(dev);
620 int i;
621
622 tp->susp_rx = 0;
623 tp->ttimer = 0;
624 tp->nir = 0;
625
626 for (i = 0; i < RX_RING_SIZE; i++) {
627 tp->rx_ring[i].status = 0x00000000;
628 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
629 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
630 tp->rx_buffers[i].skb = NULL;
631 tp->rx_buffers[i].mapping = 0;
632 }
633
634 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
635 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
636
637 for (i = 0; i < RX_RING_SIZE; i++) {
638 dma_addr_t mapping;
639
640
641
642
643 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
644 tp->rx_buffers[i].skb = skb;
645 if (skb == NULL)
646 break;
647 mapping = pci_map_single(tp->pdev, skb->data,
648 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
649 tp->rx_buffers[i].mapping = mapping;
650 tp->rx_ring[i].status = cpu_to_le32(DescOwned);
651 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
652 }
653 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
654
655
656
657 for (i = 0; i < TX_RING_SIZE; i++) {
658 tp->tx_buffers[i].skb = NULL;
659 tp->tx_buffers[i].mapping = 0;
660 tp->tx_ring[i].status = 0x00000000;
661 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
662 }
663 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
664}
665
666static netdev_tx_t
667tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
668{
669 struct tulip_private *tp = netdev_priv(dev);
670 int entry;
671 u32 flag;
672 dma_addr_t mapping;
673 unsigned long flags;
674
675 spin_lock_irqsave(&tp->lock, flags);
676
677
678 entry = tp->cur_tx % TX_RING_SIZE;
679
680 tp->tx_buffers[entry].skb = skb;
681 mapping = pci_map_single(tp->pdev, skb->data,
682 skb->len, PCI_DMA_TODEVICE);
683 tp->tx_buffers[entry].mapping = mapping;
684 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
685
686 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {
687 flag = 0x60000000;
688 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
689 flag = 0xe0000000;
690 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
691 flag = 0x60000000;
692 } else {
693 flag = 0xe0000000;
694 netif_stop_queue(dev);
695 }
696 if (entry == TX_RING_SIZE-1)
697 flag = 0xe0000000 | DESC_RING_WRAP;
698
699 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
700
701
702 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
703 wmb();
704
705 tp->cur_tx++;
706
707
708 iowrite32(0, tp->base_addr + CSR1);
709
710 spin_unlock_irqrestore(&tp->lock, flags);
711
712 return NETDEV_TX_OK;
713}
714
715static void tulip_clean_tx_ring(struct tulip_private *tp)
716{
717 unsigned int dirty_tx;
718
719 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
720 dirty_tx++) {
721 int entry = dirty_tx % TX_RING_SIZE;
722 int status = le32_to_cpu(tp->tx_ring[entry].status);
723
724 if (status < 0) {
725 tp->dev->stats.tx_errors++;
726 tp->tx_ring[entry].status = 0;
727 }
728
729
730 if (tp->tx_buffers[entry].skb == NULL) {
731
732 if (tp->tx_buffers[entry].mapping)
733 pci_unmap_single(tp->pdev,
734 tp->tx_buffers[entry].mapping,
735 sizeof(tp->setup_frame),
736 PCI_DMA_TODEVICE);
737 continue;
738 }
739
740 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
741 tp->tx_buffers[entry].skb->len,
742 PCI_DMA_TODEVICE);
743
744
745 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
746 tp->tx_buffers[entry].skb = NULL;
747 tp->tx_buffers[entry].mapping = 0;
748 }
749}
750
751static void tulip_down (struct net_device *dev)
752{
753 struct tulip_private *tp = netdev_priv(dev);
754 void __iomem *ioaddr = tp->base_addr;
755 unsigned long flags;
756
757 cancel_work_sync(&tp->media_work);
758
759#ifdef CONFIG_TULIP_NAPI
760 napi_disable(&tp->napi);
761#endif
762
763 del_timer_sync (&tp->timer);
764#ifdef CONFIG_TULIP_NAPI
765 del_timer_sync (&tp->oom_timer);
766#endif
767 spin_lock_irqsave (&tp->lock, flags);
768
769
770 iowrite32 (0x00000000, ioaddr + CSR7);
771
772
773 tulip_stop_rxtx(tp);
774
775
776 tulip_refill_rx(dev);
777
778
779 tulip_clean_tx_ring(tp);
780
781 if (ioread32(ioaddr + CSR6) != 0xffffffff)
782 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
783
784 spin_unlock_irqrestore (&tp->lock, flags);
785
786 init_timer(&tp->timer);
787 tp->timer.data = (unsigned long)dev;
788 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
789
790 dev->if_port = tp->saved_if_port;
791
792
793 tulip_set_power_state (tp, 0, 1);
794}
795
796static void tulip_free_ring (struct net_device *dev)
797{
798 struct tulip_private *tp = netdev_priv(dev);
799 int i;
800
801
802 for (i = 0; i < RX_RING_SIZE; i++) {
803 struct sk_buff *skb = tp->rx_buffers[i].skb;
804 dma_addr_t mapping = tp->rx_buffers[i].mapping;
805
806 tp->rx_buffers[i].skb = NULL;
807 tp->rx_buffers[i].mapping = 0;
808
809 tp->rx_ring[i].status = 0;
810 tp->rx_ring[i].length = 0;
811
812 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
813 if (skb) {
814 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
815 PCI_DMA_FROMDEVICE);
816 dev_kfree_skb (skb);
817 }
818 }
819
820 for (i = 0; i < TX_RING_SIZE; i++) {
821 struct sk_buff *skb = tp->tx_buffers[i].skb;
822
823 if (skb != NULL) {
824 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
825 skb->len, PCI_DMA_TODEVICE);
826 dev_kfree_skb (skb);
827 }
828 tp->tx_buffers[i].skb = NULL;
829 tp->tx_buffers[i].mapping = 0;
830 }
831}
832
833static int tulip_close (struct net_device *dev)
834{
835 struct tulip_private *tp = netdev_priv(dev);
836 void __iomem *ioaddr = tp->base_addr;
837
838 netif_stop_queue (dev);
839
840 tulip_down (dev);
841
842 if (tulip_debug > 1)
843 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
844 ioread32 (ioaddr + CSR5));
845
846 free_irq (tp->pdev->irq, dev);
847
848 tulip_free_ring (dev);
849
850 return 0;
851}
852
853static struct net_device_stats *tulip_get_stats(struct net_device *dev)
854{
855 struct tulip_private *tp = netdev_priv(dev);
856 void __iomem *ioaddr = tp->base_addr;
857
858 if (netif_running(dev)) {
859 unsigned long flags;
860
861 spin_lock_irqsave (&tp->lock, flags);
862
863 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
864
865 spin_unlock_irqrestore(&tp->lock, flags);
866 }
867
868 return &dev->stats;
869}
870
871
872static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
873{
874 struct tulip_private *np = netdev_priv(dev);
875 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
876 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
877 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
878}
879
880
881static int tulip_ethtool_set_wol(struct net_device *dev,
882 struct ethtool_wolinfo *wolinfo)
883{
884 struct tulip_private *tp = netdev_priv(dev);
885
886 if (wolinfo->wolopts & (~tp->wolinfo.supported))
887 return -EOPNOTSUPP;
888
889 tp->wolinfo.wolopts = wolinfo->wolopts;
890 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
891 return 0;
892}
893
894static void tulip_ethtool_get_wol(struct net_device *dev,
895 struct ethtool_wolinfo *wolinfo)
896{
897 struct tulip_private *tp = netdev_priv(dev);
898
899 wolinfo->supported = tp->wolinfo.supported;
900 wolinfo->wolopts = tp->wolinfo.wolopts;
901 return;
902}
903
904
905static const struct ethtool_ops ops = {
906 .get_drvinfo = tulip_get_drvinfo,
907 .set_wol = tulip_ethtool_set_wol,
908 .get_wol = tulip_ethtool_get_wol,
909};
910
911
912static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
913{
914 struct tulip_private *tp = netdev_priv(dev);
915 void __iomem *ioaddr = tp->base_addr;
916 struct mii_ioctl_data *data = if_mii(rq);
917 const unsigned int phy_idx = 0;
918 int phy = tp->phys[phy_idx] & 0x1f;
919 unsigned int regnum = data->reg_num;
920
921 switch (cmd) {
922 case SIOCGMIIPHY:
923 if (tp->mii_cnt)
924 data->phy_id = phy;
925 else if (tp->flags & HAS_NWAY)
926 data->phy_id = 32;
927 else if (tp->chip_id == COMET)
928 data->phy_id = 1;
929 else
930 return -ENODEV;
931
932 case SIOCGMIIREG:
933 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
934 int csr12 = ioread32 (ioaddr + CSR12);
935 int csr14 = ioread32 (ioaddr + CSR14);
936 switch (regnum) {
937 case 0:
938 if (((csr14<<5) & 0x1000) ||
939 (dev->if_port == 5 && tp->nwayset))
940 data->val_out = 0x1000;
941 else
942 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
943 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
944 break;
945 case 1:
946 data->val_out =
947 0x1848 +
948 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
949 ((csr12&0x06) == 6 ? 0 : 4);
950 data->val_out |= 0x6048;
951 break;
952 case 4:
953
954 data->val_out =
955 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
956 ((csr14 >> 1) & 0x20) + 1;
957 data->val_out |= ((csr14 >> 9) & 0x03C0);
958 break;
959 case 5: data->val_out = tp->lpar; break;
960 default: data->val_out = 0; break;
961 }
962 } else {
963 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
964 }
965 return 0;
966
967 case SIOCSMIIREG:
968 if (regnum & ~0x1f)
969 return -EINVAL;
970 if (data->phy_id == phy) {
971 u16 value = data->val_in;
972 switch (regnum) {
973 case 0:
974 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
975 if (tp->full_duplex_lock)
976 tp->full_duplex = (value & 0x0100) ? 1 : 0;
977 break;
978 case 4:
979 tp->advertising[phy_idx] =
980 tp->mii_advertise = data->val_in;
981 break;
982 }
983 }
984 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
985 u16 value = data->val_in;
986 if (regnum == 0) {
987 if ((value & 0x1200) == 0x1200) {
988 if (tp->chip_id == PNIC2) {
989 pnic2_start_nway (dev);
990 } else {
991 t21142_start_nway (dev);
992 }
993 }
994 } else if (regnum == 4)
995 tp->sym_advertise = value;
996 } else {
997 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
998 }
999 return 0;
1000 default:
1001 return -EOPNOTSUPP;
1002 }
1003
1004 return -EOPNOTSUPP;
1005}
1006
1007
1008
1009
1010
1011
1012
1013#undef set_bit_le
1014#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
1015
1016static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1017{
1018 struct tulip_private *tp = netdev_priv(dev);
1019 u16 hash_table[32];
1020 struct netdev_hw_addr *ha;
1021 int i;
1022 u16 *eaddrs;
1023
1024 memset(hash_table, 0, sizeof(hash_table));
1025 set_bit_le(255, hash_table);
1026
1027 netdev_for_each_mc_addr(ha, dev) {
1028 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1029
1030 set_bit_le(index, hash_table);
1031 }
1032 for (i = 0; i < 32; i++) {
1033 *setup_frm++ = hash_table[i];
1034 *setup_frm++ = hash_table[i];
1035 }
1036 setup_frm = &tp->setup_frame[13*6];
1037
1038
1039 eaddrs = (u16 *)dev->dev_addr;
1040 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1041 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1042 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1043}
1044
1045static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1046{
1047 struct tulip_private *tp = netdev_priv(dev);
1048 struct netdev_hw_addr *ha;
1049 u16 *eaddrs;
1050
1051
1052
1053 netdev_for_each_mc_addr(ha, dev) {
1054 eaddrs = (u16 *) ha->addr;
1055 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1056 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1057 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1058 }
1059
1060 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1061 setup_frm = &tp->setup_frame[15*6];
1062
1063
1064 eaddrs = (u16 *)dev->dev_addr;
1065 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1066 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1067 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1068}
1069
1070
1071static void set_rx_mode(struct net_device *dev)
1072{
1073 struct tulip_private *tp = netdev_priv(dev);
1074 void __iomem *ioaddr = tp->base_addr;
1075 int csr6;
1076
1077 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1078
1079 tp->csr6 &= ~0x00D5;
1080 if (dev->flags & IFF_PROMISC) {
1081 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1082 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1083 } else if ((netdev_mc_count(dev) > 1000) ||
1084 (dev->flags & IFF_ALLMULTI)) {
1085
1086 tp->csr6 |= AcceptAllMulticast;
1087 csr6 |= AcceptAllMulticast;
1088 } else if (tp->flags & MC_HASH_ONLY) {
1089
1090
1091 struct netdev_hw_addr *ha;
1092 if (netdev_mc_count(dev) > 64) {
1093
1094 tp->csr6 |= AcceptAllMulticast;
1095 csr6 |= AcceptAllMulticast;
1096 } else {
1097 u32 mc_filter[2] = {0, 0};
1098 int filterbit;
1099 netdev_for_each_mc_addr(ha, dev) {
1100 if (tp->flags & COMET_MAC_ADDR)
1101 filterbit = ether_crc_le(ETH_ALEN,
1102 ha->addr);
1103 else
1104 filterbit = ether_crc(ETH_ALEN,
1105 ha->addr) >> 26;
1106 filterbit &= 0x3f;
1107 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1108 if (tulip_debug > 2)
1109 dev_info(&dev->dev,
1110 "Added filter for %pM %08x bit %d\n",
1111 ha->addr,
1112 ether_crc(ETH_ALEN, ha->addr),
1113 filterbit);
1114 }
1115 if (mc_filter[0] == tp->mc_filter[0] &&
1116 mc_filter[1] == tp->mc_filter[1])
1117 ;
1118 else if (tp->flags & IS_ASIX) {
1119 iowrite32(2, ioaddr + CSR13);
1120 iowrite32(mc_filter[0], ioaddr + CSR14);
1121 iowrite32(3, ioaddr + CSR13);
1122 iowrite32(mc_filter[1], ioaddr + CSR14);
1123 } else if (tp->flags & COMET_MAC_ADDR) {
1124 iowrite32(mc_filter[0], ioaddr + CSR27);
1125 iowrite32(mc_filter[1], ioaddr + CSR28);
1126 }
1127 tp->mc_filter[0] = mc_filter[0];
1128 tp->mc_filter[1] = mc_filter[1];
1129 }
1130 } else {
1131 unsigned long flags;
1132 u32 tx_flags = 0x08000000 | 192;
1133
1134
1135
1136 if (netdev_mc_count(dev) > 14) {
1137
1138 build_setup_frame_hash(tp->setup_frame, dev);
1139 tx_flags = 0x08400000 | 192;
1140 } else {
1141 build_setup_frame_perfect(tp->setup_frame, dev);
1142 }
1143
1144 spin_lock_irqsave(&tp->lock, flags);
1145
1146 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1147
1148 } else {
1149 unsigned int entry;
1150 int dummy = -1;
1151
1152
1153
1154 entry = tp->cur_tx++ % TX_RING_SIZE;
1155
1156 if (entry != 0) {
1157
1158 tp->tx_buffers[entry].skb = NULL;
1159 tp->tx_buffers[entry].mapping = 0;
1160 tp->tx_ring[entry].length =
1161 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1162 tp->tx_ring[entry].buffer1 = 0;
1163
1164 dummy = entry;
1165 entry = tp->cur_tx++ % TX_RING_SIZE;
1166
1167 }
1168
1169 tp->tx_buffers[entry].skb = NULL;
1170 tp->tx_buffers[entry].mapping =
1171 pci_map_single(tp->pdev, tp->setup_frame,
1172 sizeof(tp->setup_frame),
1173 PCI_DMA_TODEVICE);
1174
1175 if (entry == TX_RING_SIZE-1)
1176 tx_flags |= DESC_RING_WRAP;
1177 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1178 tp->tx_ring[entry].buffer1 =
1179 cpu_to_le32(tp->tx_buffers[entry].mapping);
1180 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1181 if (dummy >= 0)
1182 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1183 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1184 netif_stop_queue(dev);
1185
1186
1187 iowrite32(0, ioaddr + CSR1);
1188 }
1189
1190 spin_unlock_irqrestore(&tp->lock, flags);
1191 }
1192
1193 iowrite32(csr6, ioaddr + CSR6);
1194}
1195
1196#ifdef CONFIG_TULIP_MWI
1197static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1198 struct net_device *dev)
1199{
1200 struct tulip_private *tp = netdev_priv(dev);
1201 u8 cache;
1202 u16 pci_command;
1203 u32 csr0;
1204
1205 if (tulip_debug > 3)
1206 netdev_dbg(dev, "tulip_mwi_config()\n");
1207
1208 tp->csr0 = csr0 = 0;
1209
1210
1211 csr0 |= MRM | MWI;
1212
1213
1214
1215
1216 pci_try_set_mwi(pdev);
1217
1218
1219 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1220 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1221 csr0 &= ~MWI;
1222
1223
1224 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1225 if ((csr0 & MWI) && (cache == 0)) {
1226 csr0 &= ~MWI;
1227 pci_clear_mwi(pdev);
1228 }
1229
1230
1231
1232
1233 switch (cache) {
1234 case 8:
1235 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1236 break;
1237 case 16:
1238 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1239 break;
1240 case 32:
1241 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1242 break;
1243 default:
1244 cache = 0;
1245 break;
1246 }
1247
1248
1249
1250
1251 if (cache)
1252 goto out;
1253
1254
1255 if (csr0 & MWI) {
1256 pci_clear_mwi(pdev);
1257 csr0 &= ~MWI;
1258 }
1259
1260
1261
1262
1263 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1264
1265out:
1266 tp->csr0 = csr0;
1267 if (tulip_debug > 2)
1268 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1269 cache, csr0);
1270}
1271#endif
1272
1273
1274
1275
1276
1277
1278static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1279{
1280 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1281 return 1;
1282 return 0;
1283}
1284
1285static const struct net_device_ops tulip_netdev_ops = {
1286 .ndo_open = tulip_open,
1287 .ndo_start_xmit = tulip_start_xmit,
1288 .ndo_tx_timeout = tulip_tx_timeout,
1289 .ndo_stop = tulip_close,
1290 .ndo_get_stats = tulip_get_stats,
1291 .ndo_do_ioctl = private_ioctl,
1292 .ndo_set_rx_mode = set_rx_mode,
1293 .ndo_change_mtu = eth_change_mtu,
1294 .ndo_set_mac_address = eth_mac_addr,
1295 .ndo_validate_addr = eth_validate_addr,
1296#ifdef CONFIG_NET_POLL_CONTROLLER
1297 .ndo_poll_controller = poll_tulip,
1298#endif
1299};
1300
1301DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1302 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1303 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1304 { },
1305};
1306
1307static int __devinit tulip_init_one (struct pci_dev *pdev,
1308 const struct pci_device_id *ent)
1309{
1310 struct tulip_private *tp;
1311
1312 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1313 static int last_irq;
1314 static int multiport_cnt;
1315 int i, irq;
1316 unsigned short sum;
1317 unsigned char *ee_data;
1318 struct net_device *dev;
1319 void __iomem *ioaddr;
1320 static int board_idx = -1;
1321 int chip_idx = ent->driver_data;
1322 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1323 unsigned int eeprom_missing = 0;
1324 unsigned int force_csr0 = 0;
1325
1326#ifndef MODULE
1327 if (tulip_debug > 0)
1328 printk_once(KERN_INFO "%s", version);
1329#endif
1330
1331 board_idx++;
1332
1333
1334
1335
1336
1337
1338 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1339 pr_err("skipping LMC card\n");
1340 return -ENODEV;
1341 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1342 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1343 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1344 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1345 pr_err("skipping SBE T3E3 port\n");
1346 return -ENODEV;
1347 }
1348
1349
1350
1351
1352
1353
1354
1355#ifdef CONFIG_TULIP_DM910X
1356 if (chip_idx == DM910X) {
1357 struct device_node *dp;
1358
1359 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1360 pdev->revision < 0x30) {
1361 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1362 return -ENODEV;
1363 }
1364
1365 dp = pci_device_to_OF_node(pdev);
1366 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1367 pr_info("skipping DM910x expansion card (use dmfe)\n");
1368 return -ENODEV;
1369 }
1370 }
1371#endif
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 if (pci_dev_present(early_486_chipsets)) {
1387 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1388 force_csr0 = 1;
1389 }
1390
1391
1392 if (chip_idx == AX88140) {
1393 if ((csr0 & 0x3f00) == 0)
1394 csr0 |= 0x2000;
1395 }
1396
1397
1398 if (chip_idx == LC82C168)
1399 csr0 &= ~0xfff10000;
1400
1401
1402 if (tulip_uli_dm_quirk(pdev)) {
1403 csr0 &= ~0x01f100ff;
1404#if defined(CONFIG_SPARC)
1405 csr0 = (csr0 & ~0xff00) | 0xe000;
1406#endif
1407 }
1408
1409
1410
1411
1412 i = pci_enable_device(pdev);
1413 if (i) {
1414 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1415 return i;
1416 }
1417
1418
1419
1420 if (pci_set_power_state(pdev, PCI_D0)) {
1421 pr_notice("Failed to set power state to D0\n");
1422 }
1423
1424 irq = pdev->irq;
1425
1426
1427 dev = alloc_etherdev (sizeof (*tp));
1428 if (!dev)
1429 return -ENOMEM;
1430
1431 SET_NETDEV_DEV(dev, &pdev->dev);
1432 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1433 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1434 pci_name(pdev),
1435 (unsigned long long)pci_resource_len (pdev, 0),
1436 (unsigned long long)pci_resource_start (pdev, 0));
1437 goto err_out_free_netdev;
1438 }
1439
1440
1441
1442 if (pci_request_regions (pdev, DRV_NAME))
1443 goto err_out_free_netdev;
1444
1445 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1446
1447 if (!ioaddr)
1448 goto err_out_free_res;
1449
1450
1451
1452
1453
1454 tp = netdev_priv(dev);
1455 tp->dev = dev;
1456
1457 tp->rx_ring = pci_alloc_consistent(pdev,
1458 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1459 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1460 &tp->rx_ring_dma);
1461 if (!tp->rx_ring)
1462 goto err_out_mtable;
1463 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1464 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1465
1466 tp->chip_id = chip_idx;
1467 tp->flags = tulip_tbl[chip_idx].flags;
1468
1469 tp->wolinfo.supported = 0;
1470 tp->wolinfo.wolopts = 0;
1471
1472 if (chip_idx == COMET ) {
1473 u32 sig;
1474 pci_read_config_dword (pdev, 0x80, &sig);
1475 if (sig == 0x09811317) {
1476 tp->flags |= COMET_PM;
1477 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1478 pr_info("%s: Enabled WOL support for AN983B\n",
1479 __func__);
1480 }
1481 }
1482 tp->pdev = pdev;
1483 tp->base_addr = ioaddr;
1484 tp->revision = pdev->revision;
1485 tp->csr0 = csr0;
1486 spin_lock_init(&tp->lock);
1487 spin_lock_init(&tp->mii_lock);
1488 init_timer(&tp->timer);
1489 tp->timer.data = (unsigned long)dev;
1490 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1491
1492 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1493
1494#ifdef CONFIG_TULIP_MWI
1495 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1496 tulip_mwi_config (pdev, dev);
1497#endif
1498
1499
1500 tulip_stop_rxtx(tp);
1501
1502 pci_set_master(pdev);
1503
1504#ifdef CONFIG_GSC
1505 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1506 switch (pdev->subsystem_device) {
1507 default:
1508 break;
1509 case 0x1061:
1510 case 0x1062:
1511 case 0x1063:
1512 case 0x1098:
1513 case 0x1099:
1514 case 0x10EE:
1515 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1516 chip_name = "GSC DS21140 Tulip";
1517 }
1518 }
1519#endif
1520
1521
1522 ioread32(ioaddr + CSR8);
1523
1524
1525
1526
1527
1528 ee_data = tp->eeprom;
1529 memset(ee_data, 0, sizeof(tp->eeprom));
1530 sum = 0;
1531 if (chip_idx == LC82C168) {
1532 for (i = 0; i < 3; i++) {
1533 int value, boguscnt = 100000;
1534 iowrite32(0x600 | i, ioaddr + 0x98);
1535 do {
1536 value = ioread32(ioaddr + CSR9);
1537 } while (value < 0 && --boguscnt > 0);
1538 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1539 sum += value & 0xffff;
1540 }
1541 } else if (chip_idx == COMET) {
1542
1543 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1544 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1545 for (i = 0; i < 6; i ++)
1546 sum += dev->dev_addr[i];
1547 } else {
1548
1549 int sa_offset = 0;
1550 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1551 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1552
1553 if (ee_max_addr > sizeof(tp->eeprom))
1554 ee_max_addr = sizeof(tp->eeprom);
1555
1556 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1557 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1558 ee_data[i] = data & 0xff;
1559 ee_data[i + 1] = data >> 8;
1560 }
1561
1562
1563
1564
1565 for (i = 0; i < 8; i ++)
1566 if (ee_data[i] != ee_data[16+i])
1567 sa_offset = 20;
1568 if (chip_idx == CONEXANT) {
1569
1570 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1571 sa_offset = 0x19A;
1572 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1573 ee_data[2] == 0) {
1574 sa_offset = 2;
1575 multiport_cnt = 4;
1576 }
1577#ifdef CONFIG_MIPS_COBALT
1578 if ((pdev->bus->number == 0) &&
1579 ((PCI_SLOT(pdev->devfn) == 7) ||
1580 (PCI_SLOT(pdev->devfn) == 12))) {
1581
1582 sa_offset = 0;
1583
1584 memcpy(ee_data + 16, ee_data, 8);
1585 }
1586#endif
1587#ifdef CONFIG_GSC
1588
1589 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1590
1591 ee_data[0] = ee_data[2];
1592 ee_data[1] = ee_data[3];
1593 ee_data[2] = 0x61;
1594 ee_data[3] = 0x10;
1595
1596
1597
1598
1599
1600 for (i = 4; i >= 0; i -= 2) {
1601 ee_data[17 + i + 3] = ee_data[17 + i];
1602 ee_data[16 + i + 5] = ee_data[16 + i];
1603 }
1604 }
1605#endif
1606
1607 for (i = 0; i < 6; i ++) {
1608 dev->dev_addr[i] = ee_data[i + sa_offset];
1609 sum += ee_data[i + sa_offset];
1610 }
1611 }
1612
1613 if ((dev->dev_addr[0] == 0xA0 ||
1614 dev->dev_addr[0] == 0xC0 ||
1615 dev->dev_addr[0] == 0x02) &&
1616 dev->dev_addr[1] == 0x00)
1617 for (i = 0; i < 6; i+=2) {
1618 char tmp = dev->dev_addr[i];
1619 dev->dev_addr[i] = dev->dev_addr[i+1];
1620 dev->dev_addr[i+1] = tmp;
1621 }
1622
1623
1624
1625
1626
1627
1628
1629 if (sum == 0 || sum == 6*0xff) {
1630#if defined(CONFIG_SPARC)
1631 struct device_node *dp = pci_device_to_OF_node(pdev);
1632 const unsigned char *addr;
1633 int len;
1634#endif
1635 eeprom_missing = 1;
1636 for (i = 0; i < 5; i++)
1637 dev->dev_addr[i] = last_phys_addr[i];
1638 dev->dev_addr[i] = last_phys_addr[i] + 1;
1639#if defined(CONFIG_SPARC)
1640 addr = of_get_property(dp, "local-mac-address", &len);
1641 if (addr && len == 6)
1642 memcpy(dev->dev_addr, addr, 6);
1643#endif
1644#if defined(__i386__) || defined(__x86_64__)
1645 if (last_irq)
1646 irq = last_irq;
1647#endif
1648 }
1649
1650 for (i = 0; i < 6; i++)
1651 last_phys_addr[i] = dev->dev_addr[i];
1652 last_irq = irq;
1653
1654
1655 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1656 if (options[board_idx] & MEDIA_MASK)
1657 tp->default_port = options[board_idx] & MEDIA_MASK;
1658 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1659 tp->full_duplex = 1;
1660 if (mtu[board_idx] > 0)
1661 dev->mtu = mtu[board_idx];
1662 }
1663 if (dev->mem_start & MEDIA_MASK)
1664 tp->default_port = dev->mem_start & MEDIA_MASK;
1665 if (tp->default_port) {
1666 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1667 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1668 tp->medialock = 1;
1669 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1670 tp->full_duplex = 1;
1671 }
1672 if (tp->full_duplex)
1673 tp->full_duplex_lock = 1;
1674
1675 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1676 static const u16 media2advert[] = {
1677 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1678 };
1679 tp->mii_advertise = media2advert[tp->default_port - 9];
1680 tp->mii_advertise |= (tp->flags & HAS_8023X);
1681 }
1682
1683 if (tp->flags & HAS_MEDIA_TABLE) {
1684 sprintf(dev->name, DRV_NAME "%d", board_idx);
1685 tulip_parse_eeprom(dev);
1686 strcpy(dev->name, "eth%d");
1687 }
1688
1689 if ((tp->flags & ALWAYS_CHECK_MII) ||
1690 (tp->mtable && tp->mtable->has_mii) ||
1691 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1692 if (tp->mtable && tp->mtable->has_mii) {
1693 for (i = 0; i < tp->mtable->leafcount; i++)
1694 if (tp->mtable->mleaf[i].media == 11) {
1695 tp->cur_index = i;
1696 tp->saved_if_port = dev->if_port;
1697 tulip_select_media(dev, 2);
1698 dev->if_port = tp->saved_if_port;
1699 break;
1700 }
1701 }
1702
1703
1704
1705
1706 tulip_find_mii (dev, board_idx);
1707 }
1708
1709
1710 dev->netdev_ops = &tulip_netdev_ops;
1711 dev->watchdog_timeo = TX_TIMEOUT;
1712#ifdef CONFIG_TULIP_NAPI
1713 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1714#endif
1715 SET_ETHTOOL_OPS(dev, &ops);
1716
1717 if (register_netdev(dev))
1718 goto err_out_free_ring;
1719
1720 pci_set_drvdata(pdev, dev);
1721
1722 dev_info(&dev->dev,
1723#ifdef CONFIG_TULIP_MMIO
1724 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1725#else
1726 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1727#endif
1728 chip_name, pdev->revision,
1729 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1730 eeprom_missing ? " EEPROM not present," : "",
1731 dev->dev_addr, irq);
1732
1733 if (tp->chip_id == PNIC2)
1734 tp->link_change = pnic2_lnk_change;
1735 else if (tp->flags & HAS_NWAY)
1736 tp->link_change = t21142_lnk_change;
1737 else if (tp->flags & HAS_PNICNWAY)
1738 tp->link_change = pnic_lnk_change;
1739
1740
1741 switch (chip_idx) {
1742 case DC21140:
1743 case DM910X:
1744 default:
1745 if (tp->mtable)
1746 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1747 break;
1748 case DC21142:
1749 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1750 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1751 iowrite32(0x0000, ioaddr + CSR13);
1752 iowrite32(0x0000, ioaddr + CSR14);
1753 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1754 } else
1755 t21142_start_nway(dev);
1756 break;
1757 case PNIC2:
1758
1759 iowrite32(0x0000, ioaddr + CSR13);
1760 iowrite32(0x0000, ioaddr + CSR14);
1761 break;
1762 case LC82C168:
1763 if ( ! tp->mii_cnt) {
1764 tp->nway = 1;
1765 tp->nwayset = 0;
1766 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1767 iowrite32(0x30, ioaddr + CSR12);
1768 iowrite32(0x0001F078, ioaddr + CSR6);
1769 iowrite32(0x0201F078, ioaddr + CSR6);
1770 }
1771 break;
1772 case MX98713:
1773 case COMPEX9881:
1774 iowrite32(0x00000000, ioaddr + CSR6);
1775 iowrite32(0x000711C0, ioaddr + CSR14);
1776 iowrite32(0x00000001, ioaddr + CSR13);
1777 break;
1778 case MX98715:
1779 case MX98725:
1780 iowrite32(0x01a80000, ioaddr + CSR6);
1781 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1782 iowrite32(0x00001000, ioaddr + CSR12);
1783 break;
1784 case COMET:
1785
1786 break;
1787 }
1788
1789
1790 tulip_set_power_state (tp, 0, 1);
1791
1792 return 0;
1793
1794err_out_free_ring:
1795 pci_free_consistent (pdev,
1796 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1797 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1798 tp->rx_ring, tp->rx_ring_dma);
1799
1800err_out_mtable:
1801 kfree (tp->mtable);
1802 pci_iounmap(pdev, ioaddr);
1803
1804err_out_free_res:
1805 pci_release_regions (pdev);
1806
1807err_out_free_netdev:
1808 free_netdev (dev);
1809 return -ENODEV;
1810}
1811
1812
1813
1814static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1815{
1816 struct net_device *dev = pci_get_drvdata(pdev);
1817 struct tulip_private *tp = netdev_priv(dev);
1818 void __iomem *ioaddr = tp->base_addr;
1819
1820 if (tp->flags & COMET_PM) {
1821
1822 unsigned int tmp;
1823
1824 tmp = ioread32(ioaddr + CSR18);
1825 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1826 tmp |= comet_csr18_pm_mode;
1827 iowrite32(tmp, ioaddr + CSR18);
1828
1829
1830 tmp = ioread32(ioaddr + CSR13);
1831 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1832 if (wolopts & WAKE_MAGIC)
1833 tmp |= comet_csr13_mpre;
1834 if (wolopts & WAKE_PHY)
1835 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1836
1837 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1838 iowrite32(tmp, ioaddr + CSR13);
1839 }
1840}
1841
1842#ifdef CONFIG_PM
1843
1844
1845static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1846{
1847 pci_power_t pstate;
1848 struct net_device *dev = pci_get_drvdata(pdev);
1849 struct tulip_private *tp = netdev_priv(dev);
1850
1851 if (!dev)
1852 return -EINVAL;
1853
1854 if (!netif_running(dev))
1855 goto save_state;
1856
1857 tulip_down(dev);
1858
1859 netif_device_detach(dev);
1860
1861 free_irq(tp->pdev->irq, dev);
1862
1863save_state:
1864 pci_save_state(pdev);
1865 pci_disable_device(pdev);
1866 pstate = pci_choose_state(pdev, state);
1867 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1868 int rc;
1869
1870 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1871 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1872 if (rc)
1873 pr_err("pci_enable_wake failed (%d)\n", rc);
1874 }
1875 pci_set_power_state(pdev, pstate);
1876
1877 return 0;
1878}
1879
1880
1881static int tulip_resume(struct pci_dev *pdev)
1882{
1883 struct net_device *dev = pci_get_drvdata(pdev);
1884 struct tulip_private *tp = netdev_priv(dev);
1885 void __iomem *ioaddr = tp->base_addr;
1886 int retval;
1887 unsigned int tmp;
1888
1889 if (!dev)
1890 return -EINVAL;
1891
1892 pci_set_power_state(pdev, PCI_D0);
1893 pci_restore_state(pdev);
1894
1895 if (!netif_running(dev))
1896 return 0;
1897
1898 if ((retval = pci_enable_device(pdev))) {
1899 pr_err("pci_enable_device failed in resume\n");
1900 return retval;
1901 }
1902
1903 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1904 dev->name, dev);
1905 if (retval) {
1906 pr_err("request_irq failed in resume\n");
1907 return retval;
1908 }
1909
1910 if (tp->flags & COMET_PM) {
1911 pci_enable_wake(pdev, PCI_D3hot, 0);
1912 pci_enable_wake(pdev, PCI_D3cold, 0);
1913
1914
1915 tmp = ioread32(ioaddr + CSR20);
1916 tmp |= comet_csr20_pmes;
1917 iowrite32(tmp, ioaddr + CSR20);
1918
1919
1920 tulip_set_wolopts(pdev, 0);
1921 }
1922 netif_device_attach(dev);
1923
1924 if (netif_running(dev))
1925 tulip_up(dev);
1926
1927 return 0;
1928}
1929
1930#endif
1931
1932
1933static void __devexit tulip_remove_one (struct pci_dev *pdev)
1934{
1935 struct net_device *dev = pci_get_drvdata (pdev);
1936 struct tulip_private *tp;
1937
1938 if (!dev)
1939 return;
1940
1941 tp = netdev_priv(dev);
1942 unregister_netdev(dev);
1943 pci_free_consistent (pdev,
1944 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1945 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1946 tp->rx_ring, tp->rx_ring_dma);
1947 kfree (tp->mtable);
1948 pci_iounmap(pdev, tp->base_addr);
1949 free_netdev (dev);
1950 pci_release_regions (pdev);
1951 pci_set_drvdata (pdev, NULL);
1952
1953
1954}
1955
1956#ifdef CONFIG_NET_POLL_CONTROLLER
1957
1958
1959
1960
1961
1962
1963static void poll_tulip (struct net_device *dev)
1964{
1965 struct tulip_private *tp = netdev_priv(dev);
1966 const int irq = tp->pdev->irq;
1967
1968
1969
1970 disable_irq(irq);
1971 tulip_interrupt (irq, dev);
1972 enable_irq(irq);
1973}
1974#endif
1975
1976static struct pci_driver tulip_driver = {
1977 .name = DRV_NAME,
1978 .id_table = tulip_pci_tbl,
1979 .probe = tulip_init_one,
1980 .remove = __devexit_p(tulip_remove_one),
1981#ifdef CONFIG_PM
1982 .suspend = tulip_suspend,
1983 .resume = tulip_resume,
1984#endif
1985};
1986
1987
1988static int __init tulip_init (void)
1989{
1990#ifdef MODULE
1991 pr_info("%s", version);
1992#endif
1993
1994
1995 tulip_rx_copybreak = rx_copybreak;
1996 tulip_max_interrupt_work = max_interrupt_work;
1997
1998
1999 return pci_register_driver(&tulip_driver);
2000}
2001
2002
2003static void __exit tulip_cleanup (void)
2004{
2005 pci_unregister_driver (&tulip_driver);
2006}
2007
2008
2009module_init(tulip_init);
2010module_exit(tulip_cleanup);
2011