1
2
3
4
5
6
7
8
9
10
11
12#define pr_fmt(fmt) "tulip: " fmt
13
14#define DRV_NAME "tulip"
15#ifdef CONFIG_TULIP_NAPI
16#define DRV_VERSION "1.1.15-NAPI"
17#else
18#define DRV_VERSION "1.1.15"
19#endif
20#define DRV_RELDATE "Feb 27, 2007"
21
22
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include "tulip.h"
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/mii.h>
32#include <linux/crc32.h>
33#include <asm/unaligned.h>
34#include <asm/uaccess.h>
35
36#ifdef CONFIG_SPARC
37#include <asm/prom.h>
38#endif
39
40static char version[] =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42
43
44
45
46static unsigned int max_interrupt_work = 25;
47
48#define MAX_UNITS 8
49
50static int full_duplex[MAX_UNITS];
51static int options[MAX_UNITS];
52static int mtu[MAX_UNITS];
53
54
55const char * const medianame[32] = {
56 "10baseT", "10base2", "AUI", "100baseTx",
57 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 "","","","", "","","","", "","","","Transceiver reset",
62};
63
64
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86#if defined(__alpha__) || defined(__ia64__)
87static int csr0 = 0x01A00000 | 0xE000;
88#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89static int csr0 = 0x01A00000 | 0x8000;
90#elif defined(CONFIG_SPARC) || defined(__hppa__)
91
92
93
94
95static int csr0 = 0x01A00000 | 0x9000;
96#elif defined(__arm__) || defined(__sh__)
97static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000;
100#else
101#warning Processor architecture undefined!
102static int csr0 = 0x00A00000 | 0x4800;
103#endif
104
105
106
107#define TX_TIMEOUT (4*HZ)
108
109
110MODULE_AUTHOR("The Linux Kernel Team");
111MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114module_param(tulip_debug, int, 0);
115module_param(max_interrupt_work, int, 0);
116module_param(rx_copybreak, int, 0);
117module_param(csr0, int, 0);
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120
121#ifdef TULIP_DEBUG
122int tulip_debug = TULIP_DEBUG;
123#else
124int tulip_debug = 1;
125#endif
126
127static void tulip_timer(unsigned long data)
128{
129 struct net_device *dev = (struct net_device *)data;
130 struct tulip_private *tp = netdev_priv(dev);
131
132 if (netif_running(dev))
133 schedule_work(&tp->media_work);
134}
135
136
137
138
139
140
141
142struct tulip_chip_table tulip_tbl[] = {
143 { },
144 { },
145
146
147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 tulip_media_task },
150
151
152 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155
156
157 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 HAS_MII | HAS_PNICNWAY, pnic_timer, },
159
160
161 { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163
164
165 { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 HAS_MEDIA_TABLE, mxic_timer, },
167
168
169 { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 HAS_MEDIA_TABLE, mxic_timer, },
171
172
173 { "ASIX AX88140", 128, 0x0001fbff,
174 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 | IS_ASIX, tulip_timer, tulip_media_task },
176
177
178 { "Lite-On PNIC-II", 256, 0x0801fbff,
179 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180
181
182 { "ADMtek Comet", 256, 0x0001abef,
183 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184
185
186 { "Compex 9881 PMAC", 128, 0x0001ebef,
187 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188
189
190 { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193
194
195#ifdef CONFIG_TULIP_DM910X
196 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 tulip_timer, tulip_media_task },
199#else
200 { NULL },
201#endif
202
203
204 { "Conexant LANfinity", 256, 0x0001ebef,
205 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206
207};
208
209
210static const struct pci_device_id tulip_pci_tbl[] = {
211 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216
217 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231#ifdef CONFIG_TULIP_DM910X
232 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234#endif
235 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
247 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
248 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { }
251};
252MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253
254
255
256const char tulip_media_cap[32] =
257{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
258
259static void tulip_tx_timeout(struct net_device *dev);
260static void tulip_init_ring(struct net_device *dev);
261static void tulip_free_ring(struct net_device *dev);
262static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 struct net_device *dev);
264static int tulip_open(struct net_device *dev);
265static int tulip_close(struct net_device *dev);
266static void tulip_up(struct net_device *dev);
267static void tulip_down(struct net_device *dev);
268static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270static void set_rx_mode(struct net_device *dev);
271static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272#ifdef CONFIG_NET_POLL_CONTROLLER
273static void poll_tulip(struct net_device *dev);
274#endif
275
276static void tulip_set_power_state (struct tulip_private *tp,
277 int sleep, int snooze)
278{
279 if (tp->flags & HAS_ACPI) {
280 u32 tmp, newtmp;
281 pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 if (sleep)
284 newtmp |= CFDD_Sleep;
285 else if (snooze)
286 newtmp |= CFDD_Snooze;
287 if (tmp != newtmp)
288 pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 }
290
291}
292
293
294static void tulip_up(struct net_device *dev)
295{
296 struct tulip_private *tp = netdev_priv(dev);
297 void __iomem *ioaddr = tp->base_addr;
298 int next_tick = 3*HZ;
299 u32 reg;
300 int i;
301
302#ifdef CONFIG_TULIP_NAPI
303 napi_enable(&tp->napi);
304#endif
305
306
307 tulip_set_power_state (tp, 0, 0);
308
309
310 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 tulip_set_wolopts(tp->pdev, 0);
313
314
315 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
316 iowrite32(0x00040000, ioaddr + CSR6);
317
318
319 iowrite32(0x00000001, ioaddr + CSR0);
320 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®);
321 udelay(100);
322
323
324
325
326 iowrite32(tp->csr0, ioaddr + CSR0);
327 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®);
328 udelay(100);
329
330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 tp->cur_rx = tp->cur_tx = 0;
336 tp->dirty_rx = tp->dirty_tx = 0;
337
338 if (tp->flags & MC_HASH_ONLY) {
339 u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 if (tp->chip_id == AX88140) {
342 iowrite32(0, ioaddr + CSR13);
343 iowrite32(addr_low, ioaddr + CSR14);
344 iowrite32(1, ioaddr + CSR13);
345 iowrite32(addr_high, ioaddr + CSR14);
346 } else if (tp->flags & COMET_MAC_ADDR) {
347 iowrite32(addr_low, ioaddr + 0xA4);
348 iowrite32(addr_high, ioaddr + 0xA8);
349 iowrite32(0, ioaddr + CSR27);
350 iowrite32(0, ioaddr + CSR28);
351 }
352 } else {
353
354 u16 *eaddrs = (u16 *)dev->dev_addr;
355 u16 *setup_frm = &tp->setup_frame[15*6];
356 dma_addr_t mapping;
357
358
359 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360
361 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364
365 mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 sizeof(tp->setup_frame),
367 PCI_DMA_TODEVICE);
368 tp->tx_buffers[tp->cur_tx].skb = NULL;
369 tp->tx_buffers[tp->cur_tx].mapping = mapping;
370
371
372 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375
376 tp->cur_tx++;
377 }
378
379 tp->saved_if_port = dev->if_port;
380 if (dev->if_port == 0)
381 dev->if_port = tp->default_port;
382
383
384 i = 0;
385 if (tp->mtable == NULL)
386 goto media_picked;
387 if (dev->if_port) {
388 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 (dev->if_port == 12 ? 0 : dev->if_port);
390 for (i = 0; i < tp->mtable->leafcount; i++)
391 if (tp->mtable->mleaf[i].media == looking_for) {
392 dev_info(&dev->dev,
393 "Using user-specified media %s\n",
394 medianame[dev->if_port]);
395 goto media_picked;
396 }
397 }
398 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 for (i = 0; i < tp->mtable->leafcount; i++)
401 if (tp->mtable->mleaf[i].media == looking_for) {
402 dev_info(&dev->dev,
403 "Using EEPROM-set media %s\n",
404 medianame[looking_for]);
405 goto media_picked;
406 }
407 }
408
409 for (i = tp->mtable->leafcount - 1;
410 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 ;
412media_picked:
413
414 tp->csr6 = 0;
415 tp->cur_index = i;
416 tp->nwayset = 0;
417
418 if (dev->if_port) {
419 if (tp->chip_id == DC21143 &&
420 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421
422 iowrite32(0x0000, ioaddr + CSR13);
423 iowrite32(0x0000, ioaddr + CSR14);
424 iowrite32(0x0008, ioaddr + CSR15);
425 }
426 tulip_select_media(dev, 1);
427 } else if (tp->chip_id == DC21142) {
428 if (tp->mii_cnt) {
429 tulip_select_media(dev, 1);
430 if (tulip_debug > 1)
431 dev_info(&dev->dev,
432 "Using MII transceiver %d, status %04x\n",
433 tp->phys[0],
434 tulip_mdio_read(dev, tp->phys[0], 1));
435 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 tp->csr6 = csr6_mask_hdcap;
437 dev->if_port = 11;
438 iowrite32(0x0000, ioaddr + CSR13);
439 iowrite32(0x0000, ioaddr + CSR14);
440 } else
441 t21142_start_nway(dev);
442 } else if (tp->chip_id == PNIC2) {
443
444 tp->sym_advertise = 0x01E0;
445
446 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 pnic2_start_nway(dev);
449 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
450 if (tp->mii_cnt) {
451 dev->if_port = 11;
452 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 iowrite32(0x0001, ioaddr + CSR15);
454 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 pnic_do_nway(dev);
456 else {
457
458 iowrite32(0x32, ioaddr + CSR12);
459 tp->csr6 = 0x00420000;
460 iowrite32(0x0001B078, ioaddr + 0xB8);
461 iowrite32(0x0201B078, ioaddr + 0xB8);
462 next_tick = 1*HZ;
463 }
464 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 ! tp->medialock) {
466 dev->if_port = 0;
467 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470
471 dev->if_port = 0;
472 tp->csr6 = 0x01a80200;
473 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476
477 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 dev->if_port = tp->mii_cnt ? 11 : 0;
479 tp->csr6 = 0x00040000;
480 } else if (tp->chip_id == AX88140) {
481 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 } else
483 tulip_select_media(dev, 1);
484
485
486 tulip_stop_rxtx(tp);
487 barrier();
488 udelay(5);
489 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490
491
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 tulip_start_rxtx(tp);
495 iowrite32(0, ioaddr + CSR2);
496
497 if (tulip_debug > 2) {
498 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 ioread32(ioaddr + CSR0),
500 ioread32(ioaddr + CSR5),
501 ioread32(ioaddr + CSR6));
502 }
503
504
505
506 tp->timer.expires = RUN_AT(next_tick);
507 add_timer(&tp->timer);
508#ifdef CONFIG_TULIP_NAPI
509 init_timer(&tp->oom_timer);
510 tp->oom_timer.data = (unsigned long)dev;
511 tp->oom_timer.function = oom_timer;
512#endif
513}
514
515static int
516tulip_open(struct net_device *dev)
517{
518 struct tulip_private *tp = netdev_priv(dev);
519 int retval;
520
521 tulip_init_ring (dev);
522
523 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 dev->name, dev);
525 if (retval)
526 goto free_ring;
527
528 tulip_up (dev);
529
530 netif_start_queue (dev);
531
532 return 0;
533
534free_ring:
535 tulip_free_ring (dev);
536 return retval;
537}
538
539
540static void tulip_tx_timeout(struct net_device *dev)
541{
542 struct tulip_private *tp = netdev_priv(dev);
543 void __iomem *ioaddr = tp->base_addr;
544 unsigned long flags;
545
546 spin_lock_irqsave (&tp->lock, flags);
547
548 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
549
550 if (tulip_debug > 1)
551 dev_warn(&dev->dev,
552 "Transmit timeout using MII device\n");
553 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
554 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
555 tp->chip_id == DM910X) {
556 dev_warn(&dev->dev,
557 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
558 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
559 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
560 ioread32(ioaddr + CSR15));
561 tp->timeout_recovery = 1;
562 schedule_work(&tp->media_work);
563 goto out_unlock;
564 } else if (tp->chip_id == PNIC2) {
565 dev_warn(&dev->dev,
566 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
567 (int)ioread32(ioaddr + CSR5),
568 (int)ioread32(ioaddr + CSR6),
569 (int)ioread32(ioaddr + CSR7),
570 (int)ioread32(ioaddr + CSR12));
571 } else {
572 dev_warn(&dev->dev,
573 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
574 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
575 dev->if_port = 0;
576 }
577
578#if defined(way_too_many_messages)
579 if (tulip_debug > 3) {
580 int i;
581 for (i = 0; i < RX_RING_SIZE; i++) {
582 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
583 int j;
584 printk(KERN_DEBUG
585 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
586 i,
587 (unsigned int)tp->rx_ring[i].status,
588 (unsigned int)tp->rx_ring[i].length,
589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++)
593 if (j < 100)
594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j);
596 }
597 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
598 for (i = 0; i < RX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
600 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
601 for (i = 0; i < TX_RING_SIZE; i++)
602 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
603 pr_cont("\n");
604 }
605#endif
606
607 tulip_tx_timeout_complete(tp, ioaddr);
608
609out_unlock:
610 spin_unlock_irqrestore (&tp->lock, flags);
611 netif_trans_update(dev);
612 netif_wake_queue (dev);
613}
614
615
616
617static void tulip_init_ring(struct net_device *dev)
618{
619 struct tulip_private *tp = netdev_priv(dev);
620 int i;
621
622 tp->susp_rx = 0;
623 tp->ttimer = 0;
624 tp->nir = 0;
625
626 for (i = 0; i < RX_RING_SIZE; i++) {
627 tp->rx_ring[i].status = 0x00000000;
628 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
629 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
630 tp->rx_buffers[i].skb = NULL;
631 tp->rx_buffers[i].mapping = 0;
632 }
633
634 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
635 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
636
637 for (i = 0; i < RX_RING_SIZE; i++) {
638 dma_addr_t mapping;
639
640
641
642
643 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
644 tp->rx_buffers[i].skb = skb;
645 if (skb == NULL)
646 break;
647 mapping = pci_map_single(tp->pdev, skb->data,
648 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
649 tp->rx_buffers[i].mapping = mapping;
650 tp->rx_ring[i].status = cpu_to_le32(DescOwned);
651 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
652 }
653 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
654
655
656
657 for (i = 0; i < TX_RING_SIZE; i++) {
658 tp->tx_buffers[i].skb = NULL;
659 tp->tx_buffers[i].mapping = 0;
660 tp->tx_ring[i].status = 0x00000000;
661 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
662 }
663 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
664}
665
666static netdev_tx_t
667tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
668{
669 struct tulip_private *tp = netdev_priv(dev);
670 int entry;
671 u32 flag;
672 dma_addr_t mapping;
673 unsigned long flags;
674
675 spin_lock_irqsave(&tp->lock, flags);
676
677
678 entry = tp->cur_tx % TX_RING_SIZE;
679
680 tp->tx_buffers[entry].skb = skb;
681 mapping = pci_map_single(tp->pdev, skb->data,
682 skb->len, PCI_DMA_TODEVICE);
683 tp->tx_buffers[entry].mapping = mapping;
684 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
685
686 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {
687 flag = 0x60000000;
688 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
689 flag = 0xe0000000;
690 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
691 flag = 0x60000000;
692 } else {
693 flag = 0xe0000000;
694 netif_stop_queue(dev);
695 }
696 if (entry == TX_RING_SIZE-1)
697 flag = 0xe0000000 | DESC_RING_WRAP;
698
699 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
700
701
702 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
703 wmb();
704
705 tp->cur_tx++;
706
707
708 iowrite32(0, tp->base_addr + CSR1);
709
710 spin_unlock_irqrestore(&tp->lock, flags);
711
712 return NETDEV_TX_OK;
713}
714
715static void tulip_clean_tx_ring(struct tulip_private *tp)
716{
717 unsigned int dirty_tx;
718
719 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
720 dirty_tx++) {
721 int entry = dirty_tx % TX_RING_SIZE;
722 int status = le32_to_cpu(tp->tx_ring[entry].status);
723
724 if (status < 0) {
725 tp->dev->stats.tx_errors++;
726 tp->tx_ring[entry].status = 0;
727 }
728
729
730 if (tp->tx_buffers[entry].skb == NULL) {
731
732 if (tp->tx_buffers[entry].mapping)
733 pci_unmap_single(tp->pdev,
734 tp->tx_buffers[entry].mapping,
735 sizeof(tp->setup_frame),
736 PCI_DMA_TODEVICE);
737 continue;
738 }
739
740 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
741 tp->tx_buffers[entry].skb->len,
742 PCI_DMA_TODEVICE);
743
744
745 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
746 tp->tx_buffers[entry].skb = NULL;
747 tp->tx_buffers[entry].mapping = 0;
748 }
749}
750
751static void tulip_down (struct net_device *dev)
752{
753 struct tulip_private *tp = netdev_priv(dev);
754 void __iomem *ioaddr = tp->base_addr;
755 unsigned long flags;
756
757 cancel_work_sync(&tp->media_work);
758
759#ifdef CONFIG_TULIP_NAPI
760 napi_disable(&tp->napi);
761#endif
762
763 del_timer_sync (&tp->timer);
764#ifdef CONFIG_TULIP_NAPI
765 del_timer_sync (&tp->oom_timer);
766#endif
767 spin_lock_irqsave (&tp->lock, flags);
768
769
770 iowrite32 (0x00000000, ioaddr + CSR7);
771
772
773 tulip_stop_rxtx(tp);
774
775
776 tulip_refill_rx(dev);
777
778
779 tulip_clean_tx_ring(tp);
780
781 if (ioread32(ioaddr + CSR6) != 0xffffffff)
782 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
783
784 spin_unlock_irqrestore (&tp->lock, flags);
785
786 init_timer(&tp->timer);
787 tp->timer.data = (unsigned long)dev;
788 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
789
790 dev->if_port = tp->saved_if_port;
791
792
793 tulip_set_power_state (tp, 0, 1);
794}
795
796static void tulip_free_ring (struct net_device *dev)
797{
798 struct tulip_private *tp = netdev_priv(dev);
799 int i;
800
801
802 for (i = 0; i < RX_RING_SIZE; i++) {
803 struct sk_buff *skb = tp->rx_buffers[i].skb;
804 dma_addr_t mapping = tp->rx_buffers[i].mapping;
805
806 tp->rx_buffers[i].skb = NULL;
807 tp->rx_buffers[i].mapping = 0;
808
809 tp->rx_ring[i].status = 0;
810 tp->rx_ring[i].length = 0;
811
812 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
813 if (skb) {
814 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
815 PCI_DMA_FROMDEVICE);
816 dev_kfree_skb (skb);
817 }
818 }
819
820 for (i = 0; i < TX_RING_SIZE; i++) {
821 struct sk_buff *skb = tp->tx_buffers[i].skb;
822
823 if (skb != NULL) {
824 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
825 skb->len, PCI_DMA_TODEVICE);
826 dev_kfree_skb (skb);
827 }
828 tp->tx_buffers[i].skb = NULL;
829 tp->tx_buffers[i].mapping = 0;
830 }
831}
832
833static int tulip_close (struct net_device *dev)
834{
835 struct tulip_private *tp = netdev_priv(dev);
836 void __iomem *ioaddr = tp->base_addr;
837
838 netif_stop_queue (dev);
839
840 tulip_down (dev);
841
842 if (tulip_debug > 1)
843 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
844 ioread32 (ioaddr + CSR5));
845
846 free_irq (tp->pdev->irq, dev);
847
848 tulip_free_ring (dev);
849
850 return 0;
851}
852
853static struct net_device_stats *tulip_get_stats(struct net_device *dev)
854{
855 struct tulip_private *tp = netdev_priv(dev);
856 void __iomem *ioaddr = tp->base_addr;
857
858 if (netif_running(dev)) {
859 unsigned long flags;
860
861 spin_lock_irqsave (&tp->lock, flags);
862
863 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
864
865 spin_unlock_irqrestore(&tp->lock, flags);
866 }
867
868 return &dev->stats;
869}
870
871
872static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
873{
874 struct tulip_private *np = netdev_priv(dev);
875 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
876 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
877 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
878}
879
880
881static int tulip_ethtool_set_wol(struct net_device *dev,
882 struct ethtool_wolinfo *wolinfo)
883{
884 struct tulip_private *tp = netdev_priv(dev);
885
886 if (wolinfo->wolopts & (~tp->wolinfo.supported))
887 return -EOPNOTSUPP;
888
889 tp->wolinfo.wolopts = wolinfo->wolopts;
890 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
891 return 0;
892}
893
894static void tulip_ethtool_get_wol(struct net_device *dev,
895 struct ethtool_wolinfo *wolinfo)
896{
897 struct tulip_private *tp = netdev_priv(dev);
898
899 wolinfo->supported = tp->wolinfo.supported;
900 wolinfo->wolopts = tp->wolinfo.wolopts;
901 return;
902}
903
904
905static const struct ethtool_ops ops = {
906 .get_drvinfo = tulip_get_drvinfo,
907 .set_wol = tulip_ethtool_set_wol,
908 .get_wol = tulip_ethtool_get_wol,
909};
910
911
912static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
913{
914 struct tulip_private *tp = netdev_priv(dev);
915 void __iomem *ioaddr = tp->base_addr;
916 struct mii_ioctl_data *data = if_mii(rq);
917 const unsigned int phy_idx = 0;
918 int phy = tp->phys[phy_idx] & 0x1f;
919 unsigned int regnum = data->reg_num;
920
921 switch (cmd) {
922 case SIOCGMIIPHY:
923 if (tp->mii_cnt)
924 data->phy_id = phy;
925 else if (tp->flags & HAS_NWAY)
926 data->phy_id = 32;
927 else if (tp->chip_id == COMET)
928 data->phy_id = 1;
929 else
930 return -ENODEV;
931
932 case SIOCGMIIREG:
933 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
934 int csr12 = ioread32 (ioaddr + CSR12);
935 int csr14 = ioread32 (ioaddr + CSR14);
936 switch (regnum) {
937 case 0:
938 if (((csr14<<5) & 0x1000) ||
939 (dev->if_port == 5 && tp->nwayset))
940 data->val_out = 0x1000;
941 else
942 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
943 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
944 break;
945 case 1:
946 data->val_out =
947 0x1848 +
948 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
949 ((csr12&0x06) == 6 ? 0 : 4);
950 data->val_out |= 0x6048;
951 break;
952 case 4:
953
954 data->val_out =
955 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
956 ((csr14 >> 1) & 0x20) + 1;
957 data->val_out |= ((csr14 >> 9) & 0x03C0);
958 break;
959 case 5: data->val_out = tp->lpar; break;
960 default: data->val_out = 0; break;
961 }
962 } else {
963 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
964 }
965 return 0;
966
967 case SIOCSMIIREG:
968 if (regnum & ~0x1f)
969 return -EINVAL;
970 if (data->phy_id == phy) {
971 u16 value = data->val_in;
972 switch (regnum) {
973 case 0:
974 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
975 if (tp->full_duplex_lock)
976 tp->full_duplex = (value & 0x0100) ? 1 : 0;
977 break;
978 case 4:
979 tp->advertising[phy_idx] =
980 tp->mii_advertise = data->val_in;
981 break;
982 }
983 }
984 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
985 u16 value = data->val_in;
986 if (regnum == 0) {
987 if ((value & 0x1200) == 0x1200) {
988 if (tp->chip_id == PNIC2) {
989 pnic2_start_nway (dev);
990 } else {
991 t21142_start_nway (dev);
992 }
993 }
994 } else if (regnum == 4)
995 tp->sym_advertise = value;
996 } else {
997 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
998 }
999 return 0;
1000 default:
1001 return -EOPNOTSUPP;
1002 }
1003
1004 return -EOPNOTSUPP;
1005}
1006
1007
1008
1009
1010
1011
1012
1013static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1014{
1015 struct tulip_private *tp = netdev_priv(dev);
1016 u16 hash_table[32];
1017 struct netdev_hw_addr *ha;
1018 int i;
1019 u16 *eaddrs;
1020
1021 memset(hash_table, 0, sizeof(hash_table));
1022 __set_bit_le(255, hash_table);
1023
1024 netdev_for_each_mc_addr(ha, dev) {
1025 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1026
1027 __set_bit_le(index, hash_table);
1028 }
1029 for (i = 0; i < 32; i++) {
1030 *setup_frm++ = hash_table[i];
1031 *setup_frm++ = hash_table[i];
1032 }
1033 setup_frm = &tp->setup_frame[13*6];
1034
1035
1036 eaddrs = (u16 *)dev->dev_addr;
1037 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1038 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1039 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1040}
1041
1042static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1043{
1044 struct tulip_private *tp = netdev_priv(dev);
1045 struct netdev_hw_addr *ha;
1046 u16 *eaddrs;
1047
1048
1049
1050 netdev_for_each_mc_addr(ha, dev) {
1051 eaddrs = (u16 *) ha->addr;
1052 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1053 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1054 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1055 }
1056
1057 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1058 setup_frm = &tp->setup_frame[15*6];
1059
1060
1061 eaddrs = (u16 *)dev->dev_addr;
1062 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1063 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1064 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1065}
1066
1067
1068static void set_rx_mode(struct net_device *dev)
1069{
1070 struct tulip_private *tp = netdev_priv(dev);
1071 void __iomem *ioaddr = tp->base_addr;
1072 int csr6;
1073
1074 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1075
1076 tp->csr6 &= ~0x00D5;
1077 if (dev->flags & IFF_PROMISC) {
1078 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1079 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1080 } else if ((netdev_mc_count(dev) > 1000) ||
1081 (dev->flags & IFF_ALLMULTI)) {
1082
1083 tp->csr6 |= AcceptAllMulticast;
1084 csr6 |= AcceptAllMulticast;
1085 } else if (tp->flags & MC_HASH_ONLY) {
1086
1087
1088 struct netdev_hw_addr *ha;
1089 if (netdev_mc_count(dev) > 64) {
1090
1091 tp->csr6 |= AcceptAllMulticast;
1092 csr6 |= AcceptAllMulticast;
1093 } else {
1094 u32 mc_filter[2] = {0, 0};
1095 int filterbit;
1096 netdev_for_each_mc_addr(ha, dev) {
1097 if (tp->flags & COMET_MAC_ADDR)
1098 filterbit = ether_crc_le(ETH_ALEN,
1099 ha->addr);
1100 else
1101 filterbit = ether_crc(ETH_ALEN,
1102 ha->addr) >> 26;
1103 filterbit &= 0x3f;
1104 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1105 if (tulip_debug > 2)
1106 dev_info(&dev->dev,
1107 "Added filter for %pM %08x bit %d\n",
1108 ha->addr,
1109 ether_crc(ETH_ALEN, ha->addr),
1110 filterbit);
1111 }
1112 if (mc_filter[0] == tp->mc_filter[0] &&
1113 mc_filter[1] == tp->mc_filter[1])
1114 ;
1115 else if (tp->flags & IS_ASIX) {
1116 iowrite32(2, ioaddr + CSR13);
1117 iowrite32(mc_filter[0], ioaddr + CSR14);
1118 iowrite32(3, ioaddr + CSR13);
1119 iowrite32(mc_filter[1], ioaddr + CSR14);
1120 } else if (tp->flags & COMET_MAC_ADDR) {
1121 iowrite32(mc_filter[0], ioaddr + CSR27);
1122 iowrite32(mc_filter[1], ioaddr + CSR28);
1123 }
1124 tp->mc_filter[0] = mc_filter[0];
1125 tp->mc_filter[1] = mc_filter[1];
1126 }
1127 } else {
1128 unsigned long flags;
1129 u32 tx_flags = 0x08000000 | 192;
1130
1131
1132
1133 if (netdev_mc_count(dev) > 14) {
1134
1135 build_setup_frame_hash(tp->setup_frame, dev);
1136 tx_flags = 0x08400000 | 192;
1137 } else {
1138 build_setup_frame_perfect(tp->setup_frame, dev);
1139 }
1140
1141 spin_lock_irqsave(&tp->lock, flags);
1142
1143 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1144
1145 } else {
1146 unsigned int entry;
1147 int dummy = -1;
1148
1149
1150
1151 entry = tp->cur_tx++ % TX_RING_SIZE;
1152
1153 if (entry != 0) {
1154
1155 tp->tx_buffers[entry].skb = NULL;
1156 tp->tx_buffers[entry].mapping = 0;
1157 tp->tx_ring[entry].length =
1158 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1159 tp->tx_ring[entry].buffer1 = 0;
1160
1161 dummy = entry;
1162 entry = tp->cur_tx++ % TX_RING_SIZE;
1163
1164 }
1165
1166 tp->tx_buffers[entry].skb = NULL;
1167 tp->tx_buffers[entry].mapping =
1168 pci_map_single(tp->pdev, tp->setup_frame,
1169 sizeof(tp->setup_frame),
1170 PCI_DMA_TODEVICE);
1171
1172 if (entry == TX_RING_SIZE-1)
1173 tx_flags |= DESC_RING_WRAP;
1174 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1175 tp->tx_ring[entry].buffer1 =
1176 cpu_to_le32(tp->tx_buffers[entry].mapping);
1177 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1178 if (dummy >= 0)
1179 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1180 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1181 netif_stop_queue(dev);
1182
1183
1184 iowrite32(0, ioaddr + CSR1);
1185 }
1186
1187 spin_unlock_irqrestore(&tp->lock, flags);
1188 }
1189
1190 iowrite32(csr6, ioaddr + CSR6);
1191}
1192
1193#ifdef CONFIG_TULIP_MWI
1194static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1195{
1196 struct tulip_private *tp = netdev_priv(dev);
1197 u8 cache;
1198 u16 pci_command;
1199 u32 csr0;
1200
1201 if (tulip_debug > 3)
1202 netdev_dbg(dev, "tulip_mwi_config()\n");
1203
1204 tp->csr0 = csr0 = 0;
1205
1206
1207 csr0 |= MRM | MWI;
1208
1209
1210
1211
1212 pci_try_set_mwi(pdev);
1213
1214
1215 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1216 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1217 csr0 &= ~MWI;
1218
1219
1220 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1221 if ((csr0 & MWI) && (cache == 0)) {
1222 csr0 &= ~MWI;
1223 pci_clear_mwi(pdev);
1224 }
1225
1226
1227
1228
1229 switch (cache) {
1230 case 8:
1231 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1232 break;
1233 case 16:
1234 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1235 break;
1236 case 32:
1237 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1238 break;
1239 default:
1240 cache = 0;
1241 break;
1242 }
1243
1244
1245
1246
1247 if (cache)
1248 goto out;
1249
1250
1251 if (csr0 & MWI) {
1252 pci_clear_mwi(pdev);
1253 csr0 &= ~MWI;
1254 }
1255
1256
1257
1258
1259 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1260
1261out:
1262 tp->csr0 = csr0;
1263 if (tulip_debug > 2)
1264 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1265 cache, csr0);
1266}
1267#endif
1268
1269
1270
1271
1272
1273
1274static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1275{
1276 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1277 return 1;
1278 return 0;
1279}
1280
1281static const struct net_device_ops tulip_netdev_ops = {
1282 .ndo_open = tulip_open,
1283 .ndo_start_xmit = tulip_start_xmit,
1284 .ndo_tx_timeout = tulip_tx_timeout,
1285 .ndo_stop = tulip_close,
1286 .ndo_get_stats = tulip_get_stats,
1287 .ndo_do_ioctl = private_ioctl,
1288 .ndo_set_rx_mode = set_rx_mode,
1289 .ndo_change_mtu_rh74 = eth_change_mtu,
1290 .ndo_set_mac_address = eth_mac_addr,
1291 .ndo_validate_addr = eth_validate_addr,
1292#ifdef CONFIG_NET_POLL_CONTROLLER
1293 .ndo_poll_controller = poll_tulip,
1294#endif
1295};
1296
1297const struct pci_device_id early_486_chipsets[] = {
1298 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1299 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1300 { },
1301};
1302
1303static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1304{
1305 struct tulip_private *tp;
1306
1307 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1308 static int last_irq;
1309 static int multiport_cnt;
1310 int i, irq;
1311 unsigned short sum;
1312 unsigned char *ee_data;
1313 struct net_device *dev;
1314 void __iomem *ioaddr;
1315 static int board_idx = -1;
1316 int chip_idx = ent->driver_data;
1317 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1318 unsigned int eeprom_missing = 0;
1319 unsigned int force_csr0 = 0;
1320
1321#ifndef MODULE
1322 if (tulip_debug > 0)
1323 printk_once(KERN_INFO "%s", version);
1324#endif
1325
1326 board_idx++;
1327
1328
1329
1330
1331
1332
1333 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1334 pr_err("skipping LMC card\n");
1335 return -ENODEV;
1336 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1337 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1338 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1339 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1340 pr_err("skipping SBE T3E3 port\n");
1341 return -ENODEV;
1342 }
1343
1344
1345
1346
1347
1348
1349
1350#ifdef CONFIG_TULIP_DM910X
1351 if (chip_idx == DM910X) {
1352 struct device_node *dp;
1353
1354 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1355 pdev->revision < 0x30) {
1356 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1357 return -ENODEV;
1358 }
1359
1360 dp = pci_device_to_OF_node(pdev);
1361 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1362 pr_info("skipping DM910x expansion card (use dmfe)\n");
1363 return -ENODEV;
1364 }
1365 }
1366#endif
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (pci_dev_present(early_486_chipsets)) {
1382 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1383 force_csr0 = 1;
1384 }
1385
1386
1387 if (chip_idx == AX88140) {
1388 if ((csr0 & 0x3f00) == 0)
1389 csr0 |= 0x2000;
1390 }
1391
1392
1393 if (chip_idx == LC82C168)
1394 csr0 &= ~0xfff10000;
1395
1396
1397 if (tulip_uli_dm_quirk(pdev)) {
1398 csr0 &= ~0x01f100ff;
1399#if defined(CONFIG_SPARC)
1400 csr0 = (csr0 & ~0xff00) | 0xe000;
1401#endif
1402 }
1403
1404
1405
1406
1407 i = pci_enable_device(pdev);
1408 if (i) {
1409 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1410 return i;
1411 }
1412
1413
1414
1415 if (pci_set_power_state(pdev, PCI_D0)) {
1416 pr_notice("Failed to set power state to D0\n");
1417 }
1418
1419 irq = pdev->irq;
1420
1421
1422 dev = alloc_etherdev (sizeof (*tp));
1423 if (!dev)
1424 return -ENOMEM;
1425
1426 SET_NETDEV_DEV(dev, &pdev->dev);
1427 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1428 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1429 pci_name(pdev),
1430 (unsigned long long)pci_resource_len (pdev, 0),
1431 (unsigned long long)pci_resource_start (pdev, 0));
1432 goto err_out_free_netdev;
1433 }
1434
1435
1436
1437 if (pci_request_regions (pdev, DRV_NAME))
1438 goto err_out_free_netdev;
1439
1440 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1441
1442 if (!ioaddr)
1443 goto err_out_free_res;
1444
1445
1446
1447
1448
1449 tp = netdev_priv(dev);
1450 tp->dev = dev;
1451
1452 tp->rx_ring = pci_alloc_consistent(pdev,
1453 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1454 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1455 &tp->rx_ring_dma);
1456 if (!tp->rx_ring)
1457 goto err_out_mtable;
1458 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1459 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1460
1461 tp->chip_id = chip_idx;
1462 tp->flags = tulip_tbl[chip_idx].flags;
1463
1464 tp->wolinfo.supported = 0;
1465 tp->wolinfo.wolopts = 0;
1466
1467 if (chip_idx == COMET ) {
1468 u32 sig;
1469 pci_read_config_dword (pdev, 0x80, &sig);
1470 if (sig == 0x09811317) {
1471 tp->flags |= COMET_PM;
1472 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1473 pr_info("%s: Enabled WOL support for AN983B\n",
1474 __func__);
1475 }
1476 }
1477 tp->pdev = pdev;
1478 tp->base_addr = ioaddr;
1479 tp->revision = pdev->revision;
1480 tp->csr0 = csr0;
1481 spin_lock_init(&tp->lock);
1482 spin_lock_init(&tp->mii_lock);
1483 init_timer(&tp->timer);
1484 tp->timer.data = (unsigned long)dev;
1485 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1486
1487 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1488
1489#ifdef CONFIG_TULIP_MWI
1490 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1491 tulip_mwi_config (pdev, dev);
1492#endif
1493
1494
1495 tulip_stop_rxtx(tp);
1496
1497 pci_set_master(pdev);
1498
1499#ifdef CONFIG_GSC
1500 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1501 switch (pdev->subsystem_device) {
1502 default:
1503 break;
1504 case 0x1061:
1505 case 0x1062:
1506 case 0x1063:
1507 case 0x1098:
1508 case 0x1099:
1509 case 0x10EE:
1510 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1511 chip_name = "GSC DS21140 Tulip";
1512 }
1513 }
1514#endif
1515
1516
1517 ioread32(ioaddr + CSR8);
1518
1519
1520
1521
1522
1523 ee_data = tp->eeprom;
1524 memset(ee_data, 0, sizeof(tp->eeprom));
1525 sum = 0;
1526 if (chip_idx == LC82C168) {
1527 for (i = 0; i < 3; i++) {
1528 int value, boguscnt = 100000;
1529 iowrite32(0x600 | i, ioaddr + 0x98);
1530 do {
1531 value = ioread32(ioaddr + CSR9);
1532 } while (value < 0 && --boguscnt > 0);
1533 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1534 sum += value & 0xffff;
1535 }
1536 } else if (chip_idx == COMET) {
1537
1538 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1539 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1540 for (i = 0; i < 6; i ++)
1541 sum += dev->dev_addr[i];
1542 } else {
1543
1544 int sa_offset = 0;
1545 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1546 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1547
1548 if (ee_max_addr > sizeof(tp->eeprom))
1549 ee_max_addr = sizeof(tp->eeprom);
1550
1551 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1552 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1553 ee_data[i] = data & 0xff;
1554 ee_data[i + 1] = data >> 8;
1555 }
1556
1557
1558
1559
1560 for (i = 0; i < 8; i ++)
1561 if (ee_data[i] != ee_data[16+i])
1562 sa_offset = 20;
1563 if (chip_idx == CONEXANT) {
1564
1565 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1566 sa_offset = 0x19A;
1567 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1568 ee_data[2] == 0) {
1569 sa_offset = 2;
1570 multiport_cnt = 4;
1571 }
1572#ifdef CONFIG_MIPS_COBALT
1573 if ((pdev->bus->number == 0) &&
1574 ((PCI_SLOT(pdev->devfn) == 7) ||
1575 (PCI_SLOT(pdev->devfn) == 12))) {
1576
1577 sa_offset = 0;
1578
1579 memcpy(ee_data + 16, ee_data, 8);
1580 }
1581#endif
1582#ifdef CONFIG_GSC
1583
1584 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1585
1586 ee_data[0] = ee_data[2];
1587 ee_data[1] = ee_data[3];
1588 ee_data[2] = 0x61;
1589 ee_data[3] = 0x10;
1590
1591
1592
1593
1594
1595 for (i = 4; i >= 0; i -= 2) {
1596 ee_data[17 + i + 3] = ee_data[17 + i];
1597 ee_data[16 + i + 5] = ee_data[16 + i];
1598 }
1599 }
1600#endif
1601
1602 for (i = 0; i < 6; i ++) {
1603 dev->dev_addr[i] = ee_data[i + sa_offset];
1604 sum += ee_data[i + sa_offset];
1605 }
1606 }
1607
1608 if ((dev->dev_addr[0] == 0xA0 ||
1609 dev->dev_addr[0] == 0xC0 ||
1610 dev->dev_addr[0] == 0x02) &&
1611 dev->dev_addr[1] == 0x00)
1612 for (i = 0; i < 6; i+=2) {
1613 char tmp = dev->dev_addr[i];
1614 dev->dev_addr[i] = dev->dev_addr[i+1];
1615 dev->dev_addr[i+1] = tmp;
1616 }
1617
1618
1619
1620
1621
1622
1623
1624 if (sum == 0 || sum == 6*0xff) {
1625#if defined(CONFIG_SPARC)
1626 struct device_node *dp = pci_device_to_OF_node(pdev);
1627 const unsigned char *addr;
1628 int len;
1629#endif
1630 eeprom_missing = 1;
1631 for (i = 0; i < 5; i++)
1632 dev->dev_addr[i] = last_phys_addr[i];
1633 dev->dev_addr[i] = last_phys_addr[i] + 1;
1634#if defined(CONFIG_SPARC)
1635 addr = of_get_property(dp, "local-mac-address", &len);
1636 if (addr && len == 6)
1637 memcpy(dev->dev_addr, addr, 6);
1638#endif
1639#if defined(__i386__) || defined(__x86_64__)
1640 if (last_irq)
1641 irq = last_irq;
1642#endif
1643 }
1644
1645 for (i = 0; i < 6; i++)
1646 last_phys_addr[i] = dev->dev_addr[i];
1647 last_irq = irq;
1648
1649
1650 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1651 if (options[board_idx] & MEDIA_MASK)
1652 tp->default_port = options[board_idx] & MEDIA_MASK;
1653 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1654 tp->full_duplex = 1;
1655 if (mtu[board_idx] > 0)
1656 dev->mtu = mtu[board_idx];
1657 }
1658 if (dev->mem_start & MEDIA_MASK)
1659 tp->default_port = dev->mem_start & MEDIA_MASK;
1660 if (tp->default_port) {
1661 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1662 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1663 tp->medialock = 1;
1664 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1665 tp->full_duplex = 1;
1666 }
1667 if (tp->full_duplex)
1668 tp->full_duplex_lock = 1;
1669
1670 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1671 static const u16 media2advert[] = {
1672 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1673 };
1674 tp->mii_advertise = media2advert[tp->default_port - 9];
1675 tp->mii_advertise |= (tp->flags & HAS_8023X);
1676 }
1677
1678 if (tp->flags & HAS_MEDIA_TABLE) {
1679 sprintf(dev->name, DRV_NAME "%d", board_idx);
1680 tulip_parse_eeprom(dev);
1681 strcpy(dev->name, "eth%d");
1682 }
1683
1684 if ((tp->flags & ALWAYS_CHECK_MII) ||
1685 (tp->mtable && tp->mtable->has_mii) ||
1686 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1687 if (tp->mtable && tp->mtable->has_mii) {
1688 for (i = 0; i < tp->mtable->leafcount; i++)
1689 if (tp->mtable->mleaf[i].media == 11) {
1690 tp->cur_index = i;
1691 tp->saved_if_port = dev->if_port;
1692 tulip_select_media(dev, 2);
1693 dev->if_port = tp->saved_if_port;
1694 break;
1695 }
1696 }
1697
1698
1699
1700
1701 tulip_find_mii (dev, board_idx);
1702 }
1703
1704
1705 dev->netdev_ops = &tulip_netdev_ops;
1706 dev->watchdog_timeo = TX_TIMEOUT;
1707#ifdef CONFIG_TULIP_NAPI
1708 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1709#endif
1710 SET_ETHTOOL_OPS(dev, &ops);
1711
1712 if (register_netdev(dev))
1713 goto err_out_free_ring;
1714
1715 pci_set_drvdata(pdev, dev);
1716
1717 dev_info(&dev->dev,
1718#ifdef CONFIG_TULIP_MMIO
1719 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1720#else
1721 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1722#endif
1723 chip_name, pdev->revision,
1724 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1725 eeprom_missing ? " EEPROM not present," : "",
1726 dev->dev_addr, irq);
1727
1728 if (tp->chip_id == PNIC2)
1729 tp->link_change = pnic2_lnk_change;
1730 else if (tp->flags & HAS_NWAY)
1731 tp->link_change = t21142_lnk_change;
1732 else if (tp->flags & HAS_PNICNWAY)
1733 tp->link_change = pnic_lnk_change;
1734
1735
1736 switch (chip_idx) {
1737 case DC21140:
1738 case DM910X:
1739 default:
1740 if (tp->mtable)
1741 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1742 break;
1743 case DC21142:
1744 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1745 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1746 iowrite32(0x0000, ioaddr + CSR13);
1747 iowrite32(0x0000, ioaddr + CSR14);
1748 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1749 } else
1750 t21142_start_nway(dev);
1751 break;
1752 case PNIC2:
1753
1754 iowrite32(0x0000, ioaddr + CSR13);
1755 iowrite32(0x0000, ioaddr + CSR14);
1756 break;
1757 case LC82C168:
1758 if ( ! tp->mii_cnt) {
1759 tp->nway = 1;
1760 tp->nwayset = 0;
1761 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1762 iowrite32(0x30, ioaddr + CSR12);
1763 iowrite32(0x0001F078, ioaddr + CSR6);
1764 iowrite32(0x0201F078, ioaddr + CSR6);
1765 }
1766 break;
1767 case MX98713:
1768 case COMPEX9881:
1769 iowrite32(0x00000000, ioaddr + CSR6);
1770 iowrite32(0x000711C0, ioaddr + CSR14);
1771 iowrite32(0x00000001, ioaddr + CSR13);
1772 break;
1773 case MX98715:
1774 case MX98725:
1775 iowrite32(0x01a80000, ioaddr + CSR6);
1776 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1777 iowrite32(0x00001000, ioaddr + CSR12);
1778 break;
1779 case COMET:
1780
1781 break;
1782 }
1783
1784
1785 tulip_set_power_state (tp, 0, 1);
1786
1787 return 0;
1788
1789err_out_free_ring:
1790 pci_free_consistent (pdev,
1791 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1792 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1793 tp->rx_ring, tp->rx_ring_dma);
1794
1795err_out_mtable:
1796 kfree (tp->mtable);
1797 pci_iounmap(pdev, ioaddr);
1798
1799err_out_free_res:
1800 pci_release_regions (pdev);
1801
1802err_out_free_netdev:
1803 free_netdev (dev);
1804 return -ENODEV;
1805}
1806
1807
1808
1809static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1810{
1811 struct net_device *dev = pci_get_drvdata(pdev);
1812 struct tulip_private *tp = netdev_priv(dev);
1813 void __iomem *ioaddr = tp->base_addr;
1814
1815 if (tp->flags & COMET_PM) {
1816
1817 unsigned int tmp;
1818
1819 tmp = ioread32(ioaddr + CSR18);
1820 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1821 tmp |= comet_csr18_pm_mode;
1822 iowrite32(tmp, ioaddr + CSR18);
1823
1824
1825 tmp = ioread32(ioaddr + CSR13);
1826 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1827 if (wolopts & WAKE_MAGIC)
1828 tmp |= comet_csr13_mpre;
1829 if (wolopts & WAKE_PHY)
1830 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1831
1832 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1833 iowrite32(tmp, ioaddr + CSR13);
1834 }
1835}
1836
1837#ifdef CONFIG_PM
1838
1839
1840static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1841{
1842 pci_power_t pstate;
1843 struct net_device *dev = pci_get_drvdata(pdev);
1844 struct tulip_private *tp = netdev_priv(dev);
1845
1846 if (!dev)
1847 return -EINVAL;
1848
1849 if (!netif_running(dev))
1850 goto save_state;
1851
1852 tulip_down(dev);
1853
1854 netif_device_detach(dev);
1855
1856 free_irq(tp->pdev->irq, dev);
1857
1858save_state:
1859 pci_save_state(pdev);
1860 pci_disable_device(pdev);
1861 pstate = pci_choose_state(pdev, state);
1862 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1863 int rc;
1864
1865 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1866 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1867 if (rc)
1868 pr_err("pci_enable_wake failed (%d)\n", rc);
1869 }
1870 pci_set_power_state(pdev, pstate);
1871
1872 return 0;
1873}
1874
1875
1876static int tulip_resume(struct pci_dev *pdev)
1877{
1878 struct net_device *dev = pci_get_drvdata(pdev);
1879 struct tulip_private *tp = netdev_priv(dev);
1880 void __iomem *ioaddr = tp->base_addr;
1881 int retval;
1882 unsigned int tmp;
1883
1884 if (!dev)
1885 return -EINVAL;
1886
1887 pci_set_power_state(pdev, PCI_D0);
1888 pci_restore_state(pdev);
1889
1890 if (!netif_running(dev))
1891 return 0;
1892
1893 if ((retval = pci_enable_device(pdev))) {
1894 pr_err("pci_enable_device failed in resume\n");
1895 return retval;
1896 }
1897
1898 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1899 dev->name, dev);
1900 if (retval) {
1901 pr_err("request_irq failed in resume\n");
1902 return retval;
1903 }
1904
1905 if (tp->flags & COMET_PM) {
1906 pci_enable_wake(pdev, PCI_D3hot, 0);
1907 pci_enable_wake(pdev, PCI_D3cold, 0);
1908
1909
1910 tmp = ioread32(ioaddr + CSR20);
1911 tmp |= comet_csr20_pmes;
1912 iowrite32(tmp, ioaddr + CSR20);
1913
1914
1915 tulip_set_wolopts(pdev, 0);
1916 }
1917 netif_device_attach(dev);
1918
1919 if (netif_running(dev))
1920 tulip_up(dev);
1921
1922 return 0;
1923}
1924
1925#endif
1926
1927
1928static void tulip_remove_one(struct pci_dev *pdev)
1929{
1930 struct net_device *dev = pci_get_drvdata (pdev);
1931 struct tulip_private *tp;
1932
1933 if (!dev)
1934 return;
1935
1936 tp = netdev_priv(dev);
1937 unregister_netdev(dev);
1938 pci_free_consistent (pdev,
1939 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1940 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1941 tp->rx_ring, tp->rx_ring_dma);
1942 kfree (tp->mtable);
1943 pci_iounmap(pdev, tp->base_addr);
1944 free_netdev (dev);
1945 pci_release_regions (pdev);
1946 pci_set_drvdata (pdev, NULL);
1947
1948
1949}
1950
1951#ifdef CONFIG_NET_POLL_CONTROLLER
1952
1953
1954
1955
1956
1957
1958static void poll_tulip (struct net_device *dev)
1959{
1960 struct tulip_private *tp = netdev_priv(dev);
1961 const int irq = tp->pdev->irq;
1962
1963
1964
1965 disable_irq(irq);
1966 tulip_interrupt (irq, dev);
1967 enable_irq(irq);
1968}
1969#endif
1970
1971static struct pci_driver tulip_driver = {
1972 .name = DRV_NAME,
1973 .id_table = tulip_pci_tbl,
1974 .probe = tulip_init_one,
1975 .remove = tulip_remove_one,
1976#ifdef CONFIG_PM
1977 .suspend = tulip_suspend,
1978 .resume = tulip_resume,
1979#endif
1980};
1981
1982
1983static int __init tulip_init (void)
1984{
1985#ifdef MODULE
1986 pr_info("%s", version);
1987#endif
1988
1989
1990 tulip_rx_copybreak = rx_copybreak;
1991 tulip_max_interrupt_work = max_interrupt_work;
1992
1993
1994 return pci_register_driver(&tulip_driver);
1995}
1996
1997
1998static void __exit tulip_cleanup (void)
1999{
2000 pci_unregister_driver (&tulip_driver);
2001}
2002
2003
2004module_init(tulip_init);
2005module_exit(tulip_cleanup);
2006