1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51#define DRV_NAME "8139cp"
52#define DRV_VERSION "1.3"
53#define DRV_RELDATE "Mar 22, 2004"
54
55
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/kernel.h>
59#include <linux/compiler.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/init.h>
63#include <linux/interrupt.h>
64#include <linux/pci.h>
65#include <linux/dma-mapping.h>
66#include <linux/delay.h>
67#include <linux/ethtool.h>
68#include <linux/gfp.h>
69#include <linux/mii.h>
70#include <linux/if_vlan.h>
71#include <linux/crc32.h>
72#include <linux/in.h>
73#include <linux/ip.h>
74#include <linux/tcp.h>
75#include <linux/udp.h>
76#include <linux/cache.h>
77#include <asm/io.h>
78#include <asm/irq.h>
79#include <linux/uaccess.h>
80
81
82static char version[] =
83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
84
85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87MODULE_VERSION(DRV_VERSION);
88MODULE_LICENSE("GPL");
89
90static int debug = -1;
91module_param(debug, int, 0);
92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
93
94
95
96static int multicast_filter_limit = 32;
97module_param(multicast_filter_limit, int, 0);
98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
99
100#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK)
103#define CP_NUM_STATS 14
104#define CP_STATS_SIZE 64
105#define CP_REGS_SIZE (0xff + 1)
106#define CP_REGS_VER 1
107#define CP_RX_RING_SIZE 64
108#define CP_TX_RING_SIZE 64
109#define CP_RING_BYTES \
110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
112 CP_STATS_SIZE)
113#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
115#define TX_BUFFS_AVAIL(CP) \
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
119
120#define PKT_BUF_SZ 1536
121#define CP_INTERNAL_PHY 32
122
123
124#define RX_FIFO_THRESH 5
125#define RX_DMA_BURST 4
126#define TX_DMA_BURST 6
127#define TX_EARLY_THRESH 256
128
129
130#define TX_TIMEOUT (6*HZ)
131
132
133#define CP_MIN_MTU 60
134#define CP_MAX_MTU 4096
135
136enum {
137
138 MAC0 = 0x00,
139 MAR0 = 0x08,
140 StatsAddr = 0x10,
141 TxRingAddr = 0x20,
142 HiTxRingAddr = 0x28,
143 Cmd = 0x37,
144 IntrMask = 0x3C,
145 IntrStatus = 0x3E,
146 TxConfig = 0x40,
147 ChipVersion = 0x43,
148 RxConfig = 0x44,
149 RxMissed = 0x4C,
150 Cfg9346 = 0x50,
151 Config1 = 0x52,
152 Config3 = 0x59,
153 Config4 = 0x5A,
154 MultiIntr = 0x5C,
155 BasicModeCtrl = 0x62,
156 BasicModeStatus = 0x64,
157 NWayAdvert = 0x66,
158 NWayLPAR = 0x68,
159 NWayExpansion = 0x6A,
160 TxDmaOkLowDesc = 0x82,
161 Config5 = 0xD8,
162 TxPoll = 0xD9,
163 RxMaxSize = 0xDA,
164 CpCmd = 0xE0,
165 IntrMitigate = 0xE2,
166 RxRingAddr = 0xE4,
167 TxThresh = 0xEC,
168 OldRxBufAddr = 0x30,
169 OldTSD0 = 0x10,
170
171
172 DescOwn = (1 << 31),
173 RingEnd = (1 << 30),
174 FirstFrag = (1 << 29),
175 LastFrag = (1 << 28),
176 LargeSend = (1 << 27),
177 MSSShift = 16,
178 MSSMask = 0x7ff,
179 TxError = (1 << 23),
180 RxError = (1 << 20),
181 IPCS = (1 << 18),
182 UDPCS = (1 << 17),
183 TCPCS = (1 << 16),
184 TxVlanTag = (1 << 17),
185 RxVlanTagged = (1 << 16),
186 IPFail = (1 << 15),
187 UDPFail = (1 << 14),
188 TCPFail = (1 << 13),
189 NormalTxPoll = (1 << 6),
190 PID1 = (1 << 17),
191 PID0 = (1 << 16),
192 RxProtoTCP = 1,
193 RxProtoUDP = 2,
194 RxProtoIP = 3,
195 TxFIFOUnder = (1 << 25),
196 TxOWC = (1 << 22),
197 TxLinkFail = (1 << 21),
198 TxMaxCol = (1 << 20),
199 TxColCntShift = 16,
200 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08,
201 RxErrFrame = (1 << 27),
202 RxMcast = (1 << 26),
203 RxErrCRC = (1 << 18),
204 RxErrRunt = (1 << 19),
205 RxErrLong = (1 << 21),
206 RxErrFIFO = (1 << 22),
207
208
209 DumpStats = (1 << 3),
210
211
212 RxCfgFIFOShift = 13,
213 RxCfgDMAShift = 8,
214 AcceptErr = 0x20,
215 AcceptRunt = 0x10,
216 AcceptBroadcast = 0x08,
217 AcceptMulticast = 0x04,
218 AcceptMyPhys = 0x02,
219 AcceptAllPhys = 0x01,
220
221
222 PciErr = (1 << 15),
223 TimerIntr = (1 << 14),
224 LenChg = (1 << 13),
225 SWInt = (1 << 8),
226 TxEmpty = (1 << 7),
227 RxFIFOOvr = (1 << 6),
228 LinkChg = (1 << 5),
229 RxEmpty = (1 << 4),
230 TxErr = (1 << 3),
231 TxOK = (1 << 2),
232 RxErr = (1 << 1),
233 RxOK = (1 << 0),
234 IntrResvd = (1 << 10),
235
236
237 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
238 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
239 RxErr | RxOK | IntrResvd,
240
241
242 CmdReset = (1 << 4),
243 RxOn = (1 << 3),
244 TxOn = (1 << 2),
245
246
247 RxVlanOn = (1 << 6),
248 RxChkSum = (1 << 5),
249 PCIDAC = (1 << 4),
250 PCIMulRW = (1 << 3),
251 CpRxOn = (1 << 1),
252 CpTxOn = (1 << 0),
253
254
255 Cfg9346_Lock = 0x00,
256 Cfg9346_Unlock = 0xC0,
257
258
259 IFG = (1 << 25) | (1 << 24),
260 TxDMAShift = 8,
261
262
263 TxThreshMask = 0x3f,
264 TxThreshMax = 2048,
265
266
267 DriverLoaded = (1 << 5),
268 LWACT = (1 << 4),
269 PMEnable = (1 << 0),
270
271
272 PARMEnable = (1 << 6),
273 MagicPacket = (1 << 5),
274 LinkUp = (1 << 4),
275
276
277 LWPTN = (1 << 1),
278 LWPME = (1 << 4),
279
280
281 BWF = (1 << 6),
282 MWF = (1 << 5),
283 UWF = (1 << 4),
284 LANWake = (1 << 1),
285 PMEStatus = (1 << 0),
286
287 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
288 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
289 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
290};
291
292static const unsigned int cp_rx_config =
293 (RX_FIFO_THRESH << RxCfgFIFOShift) |
294 (RX_DMA_BURST << RxCfgDMAShift);
295
296struct cp_desc {
297 __le32 opts1;
298 __le32 opts2;
299 __le64 addr;
300};
301
302struct cp_dma_stats {
303 __le64 tx_ok;
304 __le64 rx_ok;
305 __le64 tx_err;
306 __le32 rx_err;
307 __le16 rx_fifo;
308 __le16 frame_align;
309 __le32 tx_ok_1col;
310 __le32 tx_ok_mcol;
311 __le64 rx_ok_phys;
312 __le64 rx_ok_bcast;
313 __le32 rx_ok_mcast;
314 __le16 tx_abort;
315 __le16 tx_underrun;
316} __packed;
317
318struct cp_extra_stats {
319 unsigned long rx_frags;
320};
321
322struct cp_private {
323 void __iomem *regs;
324 struct net_device *dev;
325 spinlock_t lock;
326 u32 msg_enable;
327
328 struct napi_struct napi;
329
330 struct pci_dev *pdev;
331 u32 rx_config;
332 u16 cpcmd;
333
334 struct cp_extra_stats cp_stats;
335
336 unsigned rx_head ____cacheline_aligned;
337 unsigned rx_tail;
338 struct cp_desc *rx_ring;
339 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
340
341 unsigned tx_head ____cacheline_aligned;
342 unsigned tx_tail;
343 struct cp_desc *tx_ring;
344 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
345 u32 tx_opts[CP_TX_RING_SIZE];
346
347 unsigned rx_buf_sz;
348 unsigned wol_enabled : 1;
349
350 dma_addr_t ring_dma;
351
352 struct mii_if_info mii_if;
353};
354
355#define cpr8(reg) readb(cp->regs + (reg))
356#define cpr16(reg) readw(cp->regs + (reg))
357#define cpr32(reg) readl(cp->regs + (reg))
358#define cpw8(reg,val) writeb((val), cp->regs + (reg))
359#define cpw16(reg,val) writew((val), cp->regs + (reg))
360#define cpw32(reg,val) writel((val), cp->regs + (reg))
361#define cpw8_f(reg,val) do { \
362 writeb((val), cp->regs + (reg)); \
363 readb(cp->regs + (reg)); \
364 } while (0)
365#define cpw16_f(reg,val) do { \
366 writew((val), cp->regs + (reg)); \
367 readw(cp->regs + (reg)); \
368 } while (0)
369#define cpw32_f(reg,val) do { \
370 writel((val), cp->regs + (reg)); \
371 readl(cp->regs + (reg)); \
372 } while (0)
373
374
375static void __cp_set_rx_mode (struct net_device *dev);
376static void cp_tx (struct cp_private *cp);
377static void cp_clean_rings (struct cp_private *cp);
378#ifdef CONFIG_NET_POLL_CONTROLLER
379static void cp_poll_controller(struct net_device *dev);
380#endif
381static int cp_get_eeprom_len(struct net_device *dev);
382static int cp_get_eeprom(struct net_device *dev,
383 struct ethtool_eeprom *eeprom, u8 *data);
384static int cp_set_eeprom(struct net_device *dev,
385 struct ethtool_eeprom *eeprom, u8 *data);
386
387static struct {
388 const char str[ETH_GSTRING_LEN];
389} ethtool_stats_keys[] = {
390 { "tx_ok" },
391 { "rx_ok" },
392 { "tx_err" },
393 { "rx_err" },
394 { "rx_fifo" },
395 { "frame_align" },
396 { "tx_ok_1col" },
397 { "tx_ok_mcol" },
398 { "rx_ok_phys" },
399 { "rx_ok_bcast" },
400 { "rx_ok_mcast" },
401 { "tx_abort" },
402 { "tx_underrun" },
403 { "rx_frags" },
404};
405
406
407static inline void cp_set_rxbufsize (struct cp_private *cp)
408{
409 unsigned int mtu = cp->dev->mtu;
410
411 if (mtu > ETH_DATA_LEN)
412
413 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
414 else
415 cp->rx_buf_sz = PKT_BUF_SZ;
416}
417
418static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
419 struct cp_desc *desc)
420{
421 u32 opts2 = le32_to_cpu(desc->opts2);
422
423 skb->protocol = eth_type_trans (skb, cp->dev);
424
425 cp->dev->stats.rx_packets++;
426 cp->dev->stats.rx_bytes += skb->len;
427
428 if (opts2 & RxVlanTagged)
429 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
430
431 napi_gro_receive(&cp->napi, skb);
432}
433
434static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
435 u32 status, u32 len)
436{
437 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
438 rx_tail, status, len);
439 cp->dev->stats.rx_errors++;
440 if (status & RxErrFrame)
441 cp->dev->stats.rx_frame_errors++;
442 if (status & RxErrCRC)
443 cp->dev->stats.rx_crc_errors++;
444 if ((status & RxErrRunt) || (status & RxErrLong))
445 cp->dev->stats.rx_length_errors++;
446 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
447 cp->dev->stats.rx_length_errors++;
448 if (status & RxErrFIFO)
449 cp->dev->stats.rx_fifo_errors++;
450}
451
452static inline unsigned int cp_rx_csum_ok (u32 status)
453{
454 unsigned int protocol = (status >> 16) & 0x3;
455
456 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
457 ((protocol == RxProtoUDP) && !(status & UDPFail)))
458 return 1;
459 else
460 return 0;
461}
462
463static int cp_rx_poll(struct napi_struct *napi, int budget)
464{
465 struct cp_private *cp = container_of(napi, struct cp_private, napi);
466 struct net_device *dev = cp->dev;
467 unsigned int rx_tail = cp->rx_tail;
468 int rx = 0;
469
470 cpw16(IntrStatus, cp_rx_intr_mask);
471
472 while (rx < budget) {
473 u32 status, len;
474 dma_addr_t mapping, new_mapping;
475 struct sk_buff *skb, *new_skb;
476 struct cp_desc *desc;
477 const unsigned buflen = cp->rx_buf_sz;
478
479 skb = cp->rx_skb[rx_tail];
480 BUG_ON(!skb);
481
482 desc = &cp->rx_ring[rx_tail];
483 status = le32_to_cpu(desc->opts1);
484 if (status & DescOwn)
485 break;
486
487 len = (status & 0x1fff) - 4;
488 mapping = le64_to_cpu(desc->addr);
489
490 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
491
492
493
494
495
496 cp_rx_err_acct(cp, rx_tail, status, len);
497 dev->stats.rx_dropped++;
498 cp->cp_stats.rx_frags++;
499 goto rx_next;
500 }
501
502 if (status & (RxError | RxErrFIFO)) {
503 cp_rx_err_acct(cp, rx_tail, status, len);
504 goto rx_next;
505 }
506
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
508 rx_tail, status, len);
509
510 new_skb = napi_alloc_skb(napi, buflen);
511 if (!new_skb) {
512 dev->stats.rx_dropped++;
513 goto rx_next;
514 }
515
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
517 PCI_DMA_FROMDEVICE);
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
519 dev->stats.rx_dropped++;
520 kfree_skb(new_skb);
521 goto rx_next;
522 }
523
524 dma_unmap_single(&cp->pdev->dev, mapping,
525 buflen, PCI_DMA_FROMDEVICE);
526
527
528 if (cp_rx_csum_ok(status))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
530 else
531 skb_checksum_none_assert(skb);
532
533 skb_put(skb, len);
534
535 cp->rx_skb[rx_tail] = new_skb;
536
537 cp_rx_skb(cp, skb, desc);
538 rx++;
539 mapping = new_mapping;
540
541rx_next:
542 cp->rx_ring[rx_tail].opts2 = 0;
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 if (rx_tail == (CP_RX_RING_SIZE - 1))
545 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
546 cp->rx_buf_sz);
547 else
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 rx_tail = NEXT_RX(rx_tail);
550 }
551
552 cp->rx_tail = rx_tail;
553
554
555
556
557 if (rx < budget && napi_complete_done(napi, rx)) {
558 unsigned long flags;
559
560 spin_lock_irqsave(&cp->lock, flags);
561 cpw16_f(IntrMask, cp_intr_mask);
562 spin_unlock_irqrestore(&cp->lock, flags);
563 }
564
565 return rx;
566}
567
568static irqreturn_t cp_interrupt (int irq, void *dev_instance)
569{
570 struct net_device *dev = dev_instance;
571 struct cp_private *cp;
572 int handled = 0;
573 u16 status;
574
575 if (unlikely(dev == NULL))
576 return IRQ_NONE;
577 cp = netdev_priv(dev);
578
579 spin_lock(&cp->lock);
580
581 status = cpr16(IntrStatus);
582 if (!status || (status == 0xFFFF))
583 goto out_unlock;
584
585 handled = 1;
586
587 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
588 status, cpr8(Cmd), cpr16(CpCmd));
589
590 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
591
592
593 if (unlikely(!netif_running(dev))) {
594 cpw16(IntrMask, 0);
595 goto out_unlock;
596 }
597
598 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
599 if (napi_schedule_prep(&cp->napi)) {
600 cpw16_f(IntrMask, cp_norx_intr_mask);
601 __napi_schedule(&cp->napi);
602 }
603
604 if (status & (TxOK | TxErr | TxEmpty | SWInt))
605 cp_tx(cp);
606 if (status & LinkChg)
607 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
608
609
610 if (status & PciErr) {
611 u16 pci_status;
612
613 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
614 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
615 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
616 status, pci_status);
617
618
619 }
620
621out_unlock:
622 spin_unlock(&cp->lock);
623
624 return IRQ_RETVAL(handled);
625}
626
627#ifdef CONFIG_NET_POLL_CONTROLLER
628
629
630
631
632static void cp_poll_controller(struct net_device *dev)
633{
634 struct cp_private *cp = netdev_priv(dev);
635 const int irq = cp->pdev->irq;
636
637 disable_irq(irq);
638 cp_interrupt(irq, dev);
639 enable_irq(irq);
640}
641#endif
642
643static void cp_tx (struct cp_private *cp)
644{
645 unsigned tx_head = cp->tx_head;
646 unsigned tx_tail = cp->tx_tail;
647 unsigned bytes_compl = 0, pkts_compl = 0;
648
649 while (tx_tail != tx_head) {
650 struct cp_desc *txd = cp->tx_ring + tx_tail;
651 struct sk_buff *skb;
652 u32 status;
653
654 rmb();
655 status = le32_to_cpu(txd->opts1);
656 if (status & DescOwn)
657 break;
658
659 skb = cp->tx_skb[tx_tail];
660 BUG_ON(!skb);
661
662 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
663 cp->tx_opts[tx_tail] & 0xffff,
664 PCI_DMA_TODEVICE);
665
666 if (status & LastFrag) {
667 if (status & (TxError | TxFIFOUnder)) {
668 netif_dbg(cp, tx_err, cp->dev,
669 "tx err, status 0x%x\n", status);
670 cp->dev->stats.tx_errors++;
671 if (status & TxOWC)
672 cp->dev->stats.tx_window_errors++;
673 if (status & TxMaxCol)
674 cp->dev->stats.tx_aborted_errors++;
675 if (status & TxLinkFail)
676 cp->dev->stats.tx_carrier_errors++;
677 if (status & TxFIFOUnder)
678 cp->dev->stats.tx_fifo_errors++;
679 } else {
680 cp->dev->stats.collisions +=
681 ((status >> TxColCntShift) & TxColCntMask);
682 cp->dev->stats.tx_packets++;
683 cp->dev->stats.tx_bytes += skb->len;
684 netif_dbg(cp, tx_done, cp->dev,
685 "tx done, slot %d\n", tx_tail);
686 }
687 bytes_compl += skb->len;
688 pkts_compl++;
689 dev_kfree_skb_irq(skb);
690 }
691
692 cp->tx_skb[tx_tail] = NULL;
693
694 tx_tail = NEXT_TX(tx_tail);
695 }
696
697 cp->tx_tail = tx_tail;
698
699 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
700 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
701 netif_wake_queue(cp->dev);
702}
703
704static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
705{
706 return skb_vlan_tag_present(skb) ?
707 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
708}
709
710static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
711 int first, int entry_last)
712{
713 int frag, index;
714 struct cp_desc *txd;
715 skb_frag_t *this_frag;
716 for (frag = 0; frag+first < entry_last; frag++) {
717 index = first+frag;
718 cp->tx_skb[index] = NULL;
719 txd = &cp->tx_ring[index];
720 this_frag = &skb_shinfo(skb)->frags[frag];
721 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
722 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
723 }
724}
725
726static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
727 struct net_device *dev)
728{
729 struct cp_private *cp = netdev_priv(dev);
730 unsigned entry;
731 u32 eor, opts1;
732 unsigned long intr_flags;
733 __le32 opts2;
734 int mss = 0;
735
736 spin_lock_irqsave(&cp->lock, intr_flags);
737
738
739 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
740 netif_stop_queue(dev);
741 spin_unlock_irqrestore(&cp->lock, intr_flags);
742 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
743 return NETDEV_TX_BUSY;
744 }
745
746 entry = cp->tx_head;
747 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
748 mss = skb_shinfo(skb)->gso_size;
749
750 if (mss > MSSMask) {
751 netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
752 mss);
753 goto out_dma_error;
754 }
755
756 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
757 opts1 = DescOwn;
758 if (mss)
759 opts1 |= LargeSend | (mss << MSSShift);
760 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 const struct iphdr *ip = ip_hdr(skb);
762 if (ip->protocol == IPPROTO_TCP)
763 opts1 |= IPCS | TCPCS;
764 else if (ip->protocol == IPPROTO_UDP)
765 opts1 |= IPCS | UDPCS;
766 else {
767 WARN_ONCE(1,
768 "Net bug: asked to checksum invalid Legacy IP packet\n");
769 goto out_dma_error;
770 }
771 }
772
773 if (skb_shinfo(skb)->nr_frags == 0) {
774 struct cp_desc *txd = &cp->tx_ring[entry];
775 u32 len;
776 dma_addr_t mapping;
777
778 len = skb->len;
779 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
780 if (dma_mapping_error(&cp->pdev->dev, mapping))
781 goto out_dma_error;
782
783 txd->opts2 = opts2;
784 txd->addr = cpu_to_le64(mapping);
785 wmb();
786
787 opts1 |= eor | len | FirstFrag | LastFrag;
788
789 txd->opts1 = cpu_to_le32(opts1);
790 wmb();
791
792 cp->tx_skb[entry] = skb;
793 cp->tx_opts[entry] = opts1;
794 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
795 entry, skb->len);
796 } else {
797 struct cp_desc *txd;
798 u32 first_len, first_eor, ctrl;
799 dma_addr_t first_mapping;
800 int frag, first_entry = entry;
801
802
803
804
805 first_eor = eor;
806 first_len = skb_headlen(skb);
807 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
808 first_len, PCI_DMA_TODEVICE);
809 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
810 goto out_dma_error;
811
812 cp->tx_skb[entry] = skb;
813
814 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
815 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
816 u32 len;
817 dma_addr_t mapping;
818
819 entry = NEXT_TX(entry);
820
821 len = skb_frag_size(this_frag);
822 mapping = dma_map_single(&cp->pdev->dev,
823 skb_frag_address(this_frag),
824 len, PCI_DMA_TODEVICE);
825 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
826 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
827 goto out_dma_error;
828 }
829
830 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
831
832 ctrl = opts1 | eor | len;
833
834 if (frag == skb_shinfo(skb)->nr_frags - 1)
835 ctrl |= LastFrag;
836
837 txd = &cp->tx_ring[entry];
838 txd->opts2 = opts2;
839 txd->addr = cpu_to_le64(mapping);
840 wmb();
841
842 txd->opts1 = cpu_to_le32(ctrl);
843 wmb();
844
845 cp->tx_opts[entry] = ctrl;
846 cp->tx_skb[entry] = skb;
847 }
848
849 txd = &cp->tx_ring[first_entry];
850 txd->opts2 = opts2;
851 txd->addr = cpu_to_le64(first_mapping);
852 wmb();
853
854 ctrl = opts1 | first_eor | first_len | FirstFrag;
855 txd->opts1 = cpu_to_le32(ctrl);
856 wmb();
857
858 cp->tx_opts[first_entry] = ctrl;
859 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
860 first_entry, entry, skb->len);
861 }
862 cp->tx_head = NEXT_TX(entry);
863
864 netdev_sent_queue(dev, skb->len);
865 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
866 netif_stop_queue(dev);
867
868out_unlock:
869 spin_unlock_irqrestore(&cp->lock, intr_flags);
870
871 cpw8(TxPoll, NormalTxPoll);
872
873 return NETDEV_TX_OK;
874out_dma_error:
875 dev_kfree_skb_any(skb);
876 cp->dev->stats.tx_dropped++;
877 goto out_unlock;
878}
879
880
881
882
883static void __cp_set_rx_mode (struct net_device *dev)
884{
885 struct cp_private *cp = netdev_priv(dev);
886 u32 mc_filter[2];
887 int rx_mode;
888
889
890 if (dev->flags & IFF_PROMISC) {
891
892 rx_mode =
893 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
894 AcceptAllPhys;
895 mc_filter[1] = mc_filter[0] = 0xffffffff;
896 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
897 (dev->flags & IFF_ALLMULTI)) {
898
899 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
900 mc_filter[1] = mc_filter[0] = 0xffffffff;
901 } else {
902 struct netdev_hw_addr *ha;
903 rx_mode = AcceptBroadcast | AcceptMyPhys;
904 mc_filter[1] = mc_filter[0] = 0;
905 netdev_for_each_mc_addr(ha, dev) {
906 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
907
908 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
909 rx_mode |= AcceptMulticast;
910 }
911 }
912
913
914 cp->rx_config = cp_rx_config | rx_mode;
915 cpw32_f(RxConfig, cp->rx_config);
916
917 cpw32_f (MAR0 + 0, mc_filter[0]);
918 cpw32_f (MAR0 + 4, mc_filter[1]);
919}
920
921static void cp_set_rx_mode (struct net_device *dev)
922{
923 unsigned long flags;
924 struct cp_private *cp = netdev_priv(dev);
925
926 spin_lock_irqsave (&cp->lock, flags);
927 __cp_set_rx_mode(dev);
928 spin_unlock_irqrestore (&cp->lock, flags);
929}
930
931static void __cp_get_stats(struct cp_private *cp)
932{
933
934 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
935 cpw32 (RxMissed, 0);
936}
937
938static struct net_device_stats *cp_get_stats(struct net_device *dev)
939{
940 struct cp_private *cp = netdev_priv(dev);
941 unsigned long flags;
942
943
944 spin_lock_irqsave(&cp->lock, flags);
945 if (netif_running(dev) && netif_device_present(dev))
946 __cp_get_stats(cp);
947 spin_unlock_irqrestore(&cp->lock, flags);
948
949 return &dev->stats;
950}
951
952static void cp_stop_hw (struct cp_private *cp)
953{
954 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
955 cpw16_f(IntrMask, 0);
956 cpw8(Cmd, 0);
957 cpw16_f(CpCmd, 0);
958 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
959
960 cp->rx_tail = 0;
961 cp->tx_head = cp->tx_tail = 0;
962
963 netdev_reset_queue(cp->dev);
964}
965
966static void cp_reset_hw (struct cp_private *cp)
967{
968 unsigned work = 1000;
969
970 cpw8(Cmd, CmdReset);
971
972 while (work--) {
973 if (!(cpr8(Cmd) & CmdReset))
974 return;
975
976 schedule_timeout_uninterruptible(10);
977 }
978
979 netdev_err(cp->dev, "hardware reset timeout\n");
980}
981
982static inline void cp_start_hw (struct cp_private *cp)
983{
984 dma_addr_t ring_dma;
985
986 cpw16(CpCmd, cp->cpcmd);
987
988
989
990
991
992
993
994
995
996 cpw32_f(HiTxRingAddr, 0);
997 cpw32_f(HiTxRingAddr + 4, 0);
998
999 ring_dma = cp->ring_dma;
1000 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1001 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1002
1003 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1004 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1005 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1006
1007
1008
1009
1010
1011
1012
1013 cpw8(Cmd, RxOn | TxOn);
1014
1015 netdev_reset_queue(cp->dev);
1016}
1017
1018static void cp_enable_irq(struct cp_private *cp)
1019{
1020 cpw16_f(IntrMask, cp_intr_mask);
1021}
1022
1023static void cp_init_hw (struct cp_private *cp)
1024{
1025 struct net_device *dev = cp->dev;
1026
1027 cp_reset_hw(cp);
1028
1029 cpw8_f (Cfg9346, Cfg9346_Unlock);
1030
1031
1032 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1033 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1034
1035 cp_start_hw(cp);
1036 cpw8(TxThresh, 0x06);
1037
1038 __cp_set_rx_mode(dev);
1039 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1040
1041 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1042
1043 cpw8(Config3, PARMEnable);
1044 cp->wol_enabled = 0;
1045
1046 cpw8(Config5, cpr8(Config5) & PMEStatus);
1047
1048 cpw16(MultiIntr, 0);
1049
1050 cpw8_f(Cfg9346, Cfg9346_Lock);
1051}
1052
1053static int cp_refill_rx(struct cp_private *cp)
1054{
1055 struct net_device *dev = cp->dev;
1056 unsigned i;
1057
1058 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1059 struct sk_buff *skb;
1060 dma_addr_t mapping;
1061
1062 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1063 if (!skb)
1064 goto err_out;
1065
1066 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1067 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1068 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1069 kfree_skb(skb);
1070 goto err_out;
1071 }
1072 cp->rx_skb[i] = skb;
1073
1074 cp->rx_ring[i].opts2 = 0;
1075 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1076 if (i == (CP_RX_RING_SIZE - 1))
1077 cp->rx_ring[i].opts1 =
1078 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1079 else
1080 cp->rx_ring[i].opts1 =
1081 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1082 }
1083
1084 return 0;
1085
1086err_out:
1087 cp_clean_rings(cp);
1088 return -ENOMEM;
1089}
1090
1091static void cp_init_rings_index (struct cp_private *cp)
1092{
1093 cp->rx_tail = 0;
1094 cp->tx_head = cp->tx_tail = 0;
1095}
1096
1097static int cp_init_rings (struct cp_private *cp)
1098{
1099 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1100 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1101 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1102
1103 cp_init_rings_index(cp);
1104
1105 return cp_refill_rx (cp);
1106}
1107
1108static int cp_alloc_rings (struct cp_private *cp)
1109{
1110 struct device *d = &cp->pdev->dev;
1111 void *mem;
1112 int rc;
1113
1114 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1115 if (!mem)
1116 return -ENOMEM;
1117
1118 cp->rx_ring = mem;
1119 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1120
1121 rc = cp_init_rings(cp);
1122 if (rc < 0)
1123 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1124
1125 return rc;
1126}
1127
1128static void cp_clean_rings (struct cp_private *cp)
1129{
1130 struct cp_desc *desc;
1131 unsigned i;
1132
1133 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1134 if (cp->rx_skb[i]) {
1135 desc = cp->rx_ring + i;
1136 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1137 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1138 dev_kfree_skb_any(cp->rx_skb[i]);
1139 }
1140 }
1141
1142 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1143 if (cp->tx_skb[i]) {
1144 struct sk_buff *skb = cp->tx_skb[i];
1145
1146 desc = cp->tx_ring + i;
1147 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1148 le32_to_cpu(desc->opts1) & 0xffff,
1149 PCI_DMA_TODEVICE);
1150 if (le32_to_cpu(desc->opts1) & LastFrag)
1151 dev_kfree_skb_any(skb);
1152 cp->dev->stats.tx_dropped++;
1153 }
1154 }
1155 netdev_reset_queue(cp->dev);
1156
1157 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1158 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1159 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1160
1161 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1162 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1163}
1164
1165static void cp_free_rings (struct cp_private *cp)
1166{
1167 cp_clean_rings(cp);
1168 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1169 cp->ring_dma);
1170 cp->rx_ring = NULL;
1171 cp->tx_ring = NULL;
1172}
1173
1174static int cp_open (struct net_device *dev)
1175{
1176 struct cp_private *cp = netdev_priv(dev);
1177 const int irq = cp->pdev->irq;
1178 int rc;
1179
1180 netif_dbg(cp, ifup, dev, "enabling interface\n");
1181
1182 rc = cp_alloc_rings(cp);
1183 if (rc)
1184 return rc;
1185
1186 napi_enable(&cp->napi);
1187
1188 cp_init_hw(cp);
1189
1190 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1191 if (rc)
1192 goto err_out_hw;
1193
1194 cp_enable_irq(cp);
1195
1196 netif_carrier_off(dev);
1197 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1198 netif_start_queue(dev);
1199
1200 return 0;
1201
1202err_out_hw:
1203 napi_disable(&cp->napi);
1204 cp_stop_hw(cp);
1205 cp_free_rings(cp);
1206 return rc;
1207}
1208
1209static int cp_close (struct net_device *dev)
1210{
1211 struct cp_private *cp = netdev_priv(dev);
1212 unsigned long flags;
1213
1214 napi_disable(&cp->napi);
1215
1216 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1217
1218 spin_lock_irqsave(&cp->lock, flags);
1219
1220 netif_stop_queue(dev);
1221 netif_carrier_off(dev);
1222
1223 cp_stop_hw(cp);
1224
1225 spin_unlock_irqrestore(&cp->lock, flags);
1226
1227 free_irq(cp->pdev->irq, dev);
1228
1229 cp_free_rings(cp);
1230 return 0;
1231}
1232
1233static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1234{
1235 struct cp_private *cp = netdev_priv(dev);
1236 unsigned long flags;
1237 int rc, i;
1238
1239 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1240 cpr8(Cmd), cpr16(CpCmd),
1241 cpr16(IntrStatus), cpr16(IntrMask));
1242
1243 spin_lock_irqsave(&cp->lock, flags);
1244
1245 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1246 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1247 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1248 netif_dbg(cp, tx_err, cp->dev,
1249 "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1250 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1251 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1252 le64_to_cpu(cp->tx_ring[i].addr),
1253 cp->tx_skb[i]);
1254 }
1255
1256 cp_stop_hw(cp);
1257 cp_clean_rings(cp);
1258 rc = cp_init_rings(cp);
1259 cp_start_hw(cp);
1260 __cp_set_rx_mode(dev);
1261 cpw16_f(IntrMask, cp_norx_intr_mask);
1262
1263 netif_wake_queue(dev);
1264 napi_schedule_irqoff(&cp->napi);
1265
1266 spin_unlock_irqrestore(&cp->lock, flags);
1267}
1268
1269static int cp_change_mtu(struct net_device *dev, int new_mtu)
1270{
1271 struct cp_private *cp = netdev_priv(dev);
1272
1273
1274 if (!netif_running(dev)) {
1275 dev->mtu = new_mtu;
1276 cp_set_rxbufsize(cp);
1277 return 0;
1278 }
1279
1280
1281 cp_close(dev);
1282 dev->mtu = new_mtu;
1283 cp_set_rxbufsize(cp);
1284 return cp_open(dev);
1285}
1286
1287static const char mii_2_8139_map[8] = {
1288 BasicModeCtrl,
1289 BasicModeStatus,
1290 0,
1291 0,
1292 NWayAdvert,
1293 NWayLPAR,
1294 NWayExpansion,
1295 0
1296};
1297
1298static int mdio_read(struct net_device *dev, int phy_id, int location)
1299{
1300 struct cp_private *cp = netdev_priv(dev);
1301
1302 return location < 8 && mii_2_8139_map[location] ?
1303 readw(cp->regs + mii_2_8139_map[location]) : 0;
1304}
1305
1306
1307static void mdio_write(struct net_device *dev, int phy_id, int location,
1308 int value)
1309{
1310 struct cp_private *cp = netdev_priv(dev);
1311
1312 if (location == 0) {
1313 cpw8(Cfg9346, Cfg9346_Unlock);
1314 cpw16(BasicModeCtrl, value);
1315 cpw8(Cfg9346, Cfg9346_Lock);
1316 } else if (location < 8 && mii_2_8139_map[location])
1317 cpw16(mii_2_8139_map[location], value);
1318}
1319
1320
1321static int netdev_set_wol (struct cp_private *cp,
1322 const struct ethtool_wolinfo *wol)
1323{
1324 u8 options;
1325
1326 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1327
1328 if (wol->wolopts) {
1329 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1330 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1331 }
1332
1333 cpw8 (Cfg9346, Cfg9346_Unlock);
1334 cpw8 (Config3, options);
1335 cpw8 (Cfg9346, Cfg9346_Lock);
1336
1337 options = 0;
1338 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1339
1340 if (wol->wolopts) {
1341 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1342 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1343 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1344 }
1345
1346 cpw8 (Config5, options);
1347
1348 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1349
1350 return 0;
1351}
1352
1353
1354static void netdev_get_wol (struct cp_private *cp,
1355 struct ethtool_wolinfo *wol)
1356{
1357 u8 options;
1358
1359 wol->wolopts = 0;
1360 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1361 WAKE_MCAST | WAKE_UCAST;
1362
1363 if (!cp->wol_enabled) return;
1364
1365 options = cpr8 (Config3);
1366 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1367 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1368
1369 options = 0;
1370 options = cpr8 (Config5);
1371 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1372 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1373 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1374}
1375
1376static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1377{
1378 struct cp_private *cp = netdev_priv(dev);
1379
1380 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1381 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1382 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1383}
1384
1385static void cp_get_ringparam(struct net_device *dev,
1386 struct ethtool_ringparam *ring)
1387{
1388 ring->rx_max_pending = CP_RX_RING_SIZE;
1389 ring->tx_max_pending = CP_TX_RING_SIZE;
1390 ring->rx_pending = CP_RX_RING_SIZE;
1391 ring->tx_pending = CP_TX_RING_SIZE;
1392}
1393
1394static int cp_get_regs_len(struct net_device *dev)
1395{
1396 return CP_REGS_SIZE;
1397}
1398
1399static int cp_get_sset_count (struct net_device *dev, int sset)
1400{
1401 switch (sset) {
1402 case ETH_SS_STATS:
1403 return CP_NUM_STATS;
1404 default:
1405 return -EOPNOTSUPP;
1406 }
1407}
1408
1409static int cp_get_link_ksettings(struct net_device *dev,
1410 struct ethtool_link_ksettings *cmd)
1411{
1412 struct cp_private *cp = netdev_priv(dev);
1413 unsigned long flags;
1414
1415 spin_lock_irqsave(&cp->lock, flags);
1416 mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
1417 spin_unlock_irqrestore(&cp->lock, flags);
1418
1419 return 0;
1420}
1421
1422static int cp_set_link_ksettings(struct net_device *dev,
1423 const struct ethtool_link_ksettings *cmd)
1424{
1425 struct cp_private *cp = netdev_priv(dev);
1426 int rc;
1427 unsigned long flags;
1428
1429 spin_lock_irqsave(&cp->lock, flags);
1430 rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
1431 spin_unlock_irqrestore(&cp->lock, flags);
1432
1433 return rc;
1434}
1435
1436static int cp_nway_reset(struct net_device *dev)
1437{
1438 struct cp_private *cp = netdev_priv(dev);
1439 return mii_nway_restart(&cp->mii_if);
1440}
1441
1442static u32 cp_get_msglevel(struct net_device *dev)
1443{
1444 struct cp_private *cp = netdev_priv(dev);
1445 return cp->msg_enable;
1446}
1447
1448static void cp_set_msglevel(struct net_device *dev, u32 value)
1449{
1450 struct cp_private *cp = netdev_priv(dev);
1451 cp->msg_enable = value;
1452}
1453
1454static int cp_set_features(struct net_device *dev, netdev_features_t features)
1455{
1456 struct cp_private *cp = netdev_priv(dev);
1457 unsigned long flags;
1458
1459 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1460 return 0;
1461
1462 spin_lock_irqsave(&cp->lock, flags);
1463
1464 if (features & NETIF_F_RXCSUM)
1465 cp->cpcmd |= RxChkSum;
1466 else
1467 cp->cpcmd &= ~RxChkSum;
1468
1469 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1470 cp->cpcmd |= RxVlanOn;
1471 else
1472 cp->cpcmd &= ~RxVlanOn;
1473
1474 cpw16_f(CpCmd, cp->cpcmd);
1475 spin_unlock_irqrestore(&cp->lock, flags);
1476
1477 return 0;
1478}
1479
1480static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1481 void *p)
1482{
1483 struct cp_private *cp = netdev_priv(dev);
1484 unsigned long flags;
1485
1486 if (regs->len < CP_REGS_SIZE)
1487 return ;
1488
1489 regs->version = CP_REGS_VER;
1490
1491 spin_lock_irqsave(&cp->lock, flags);
1492 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1493 spin_unlock_irqrestore(&cp->lock, flags);
1494}
1495
1496static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1497{
1498 struct cp_private *cp = netdev_priv(dev);
1499 unsigned long flags;
1500
1501 spin_lock_irqsave (&cp->lock, flags);
1502 netdev_get_wol (cp, wol);
1503 spin_unlock_irqrestore (&cp->lock, flags);
1504}
1505
1506static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1507{
1508 struct cp_private *cp = netdev_priv(dev);
1509 unsigned long flags;
1510 int rc;
1511
1512 spin_lock_irqsave (&cp->lock, flags);
1513 rc = netdev_set_wol (cp, wol);
1514 spin_unlock_irqrestore (&cp->lock, flags);
1515
1516 return rc;
1517}
1518
1519static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1520{
1521 switch (stringset) {
1522 case ETH_SS_STATS:
1523 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1524 break;
1525 default:
1526 BUG();
1527 break;
1528 }
1529}
1530
1531static void cp_get_ethtool_stats (struct net_device *dev,
1532 struct ethtool_stats *estats, u64 *tmp_stats)
1533{
1534 struct cp_private *cp = netdev_priv(dev);
1535 struct cp_dma_stats *nic_stats;
1536 dma_addr_t dma;
1537 int i;
1538
1539 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1540 &dma, GFP_KERNEL);
1541 if (!nic_stats)
1542 return;
1543
1544
1545 cpw32(StatsAddr + 4, (u64)dma >> 32);
1546 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1547 cpr32(StatsAddr);
1548
1549 for (i = 0; i < 1000; i++) {
1550 if ((cpr32(StatsAddr) & DumpStats) == 0)
1551 break;
1552 udelay(10);
1553 }
1554 cpw32(StatsAddr, 0);
1555 cpw32(StatsAddr + 4, 0);
1556 cpr32(StatsAddr);
1557
1558 i = 0;
1559 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1560 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1561 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1562 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1563 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1564 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1565 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1566 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1567 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1568 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1569 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1570 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1571 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1572 tmp_stats[i++] = cp->cp_stats.rx_frags;
1573 BUG_ON(i != CP_NUM_STATS);
1574
1575 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1576}
1577
1578static const struct ethtool_ops cp_ethtool_ops = {
1579 .get_drvinfo = cp_get_drvinfo,
1580 .get_regs_len = cp_get_regs_len,
1581 .get_sset_count = cp_get_sset_count,
1582 .nway_reset = cp_nway_reset,
1583 .get_link = ethtool_op_get_link,
1584 .get_msglevel = cp_get_msglevel,
1585 .set_msglevel = cp_set_msglevel,
1586 .get_regs = cp_get_regs,
1587 .get_wol = cp_get_wol,
1588 .set_wol = cp_set_wol,
1589 .get_strings = cp_get_strings,
1590 .get_ethtool_stats = cp_get_ethtool_stats,
1591 .get_eeprom_len = cp_get_eeprom_len,
1592 .get_eeprom = cp_get_eeprom,
1593 .set_eeprom = cp_set_eeprom,
1594 .get_ringparam = cp_get_ringparam,
1595 .get_link_ksettings = cp_get_link_ksettings,
1596 .set_link_ksettings = cp_set_link_ksettings,
1597};
1598
1599static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1600{
1601 struct cp_private *cp = netdev_priv(dev);
1602 int rc;
1603 unsigned long flags;
1604
1605 if (!netif_running(dev))
1606 return -EINVAL;
1607
1608 spin_lock_irqsave(&cp->lock, flags);
1609 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1610 spin_unlock_irqrestore(&cp->lock, flags);
1611 return rc;
1612}
1613
1614static int cp_set_mac_address(struct net_device *dev, void *p)
1615{
1616 struct cp_private *cp = netdev_priv(dev);
1617 struct sockaddr *addr = p;
1618
1619 if (!is_valid_ether_addr(addr->sa_data))
1620 return -EADDRNOTAVAIL;
1621
1622 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1623
1624 spin_lock_irq(&cp->lock);
1625
1626 cpw8_f(Cfg9346, Cfg9346_Unlock);
1627 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1628 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1629 cpw8_f(Cfg9346, Cfg9346_Lock);
1630
1631 spin_unlock_irq(&cp->lock);
1632
1633 return 0;
1634}
1635
1636
1637
1638
1639#define EE_SHIFT_CLK 0x04
1640#define EE_CS 0x08
1641#define EE_DATA_WRITE 0x02
1642#define EE_WRITE_0 0x00
1643#define EE_WRITE_1 0x02
1644#define EE_DATA_READ 0x01
1645#define EE_ENB (0x80 | EE_CS)
1646
1647
1648
1649
1650
1651#define eeprom_delay() readb(ee_addr)
1652
1653
1654#define EE_EXTEND_CMD (4)
1655#define EE_WRITE_CMD (5)
1656#define EE_READ_CMD (6)
1657#define EE_ERASE_CMD (7)
1658
1659#define EE_EWDS_ADDR (0)
1660#define EE_WRAL_ADDR (1)
1661#define EE_ERAL_ADDR (2)
1662#define EE_EWEN_ADDR (3)
1663
1664#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1665
1666static void eeprom_cmd_start(void __iomem *ee_addr)
1667{
1668 writeb (EE_ENB & ~EE_CS, ee_addr);
1669 writeb (EE_ENB, ee_addr);
1670 eeprom_delay ();
1671}
1672
1673static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1674{
1675 int i;
1676
1677
1678 for (i = cmd_len - 1; i >= 0; i--) {
1679 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1680 writeb (EE_ENB | dataval, ee_addr);
1681 eeprom_delay ();
1682 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1683 eeprom_delay ();
1684 }
1685 writeb (EE_ENB, ee_addr);
1686 eeprom_delay ();
1687}
1688
1689static void eeprom_cmd_end(void __iomem *ee_addr)
1690{
1691 writeb(0, ee_addr);
1692 eeprom_delay ();
1693}
1694
1695static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1696 int addr_len)
1697{
1698 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1699
1700 eeprom_cmd_start(ee_addr);
1701 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1702 eeprom_cmd_end(ee_addr);
1703}
1704
1705static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1706{
1707 int i;
1708 u16 retval = 0;
1709 void __iomem *ee_addr = ioaddr + Cfg9346;
1710 int read_cmd = location | (EE_READ_CMD << addr_len);
1711
1712 eeprom_cmd_start(ee_addr);
1713 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1714
1715 for (i = 16; i > 0; i--) {
1716 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1717 eeprom_delay ();
1718 retval =
1719 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1720 0);
1721 writeb (EE_ENB, ee_addr);
1722 eeprom_delay ();
1723 }
1724
1725 eeprom_cmd_end(ee_addr);
1726
1727 return retval;
1728}
1729
1730static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1731 int addr_len)
1732{
1733 int i;
1734 void __iomem *ee_addr = ioaddr + Cfg9346;
1735 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1736
1737 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1738
1739 eeprom_cmd_start(ee_addr);
1740 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1741 eeprom_cmd(ee_addr, val, 16);
1742 eeprom_cmd_end(ee_addr);
1743
1744 eeprom_cmd_start(ee_addr);
1745 for (i = 0; i < 20000; i++)
1746 if (readb(ee_addr) & EE_DATA_READ)
1747 break;
1748 eeprom_cmd_end(ee_addr);
1749
1750 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1751}
1752
1753static int cp_get_eeprom_len(struct net_device *dev)
1754{
1755 struct cp_private *cp = netdev_priv(dev);
1756 int size;
1757
1758 spin_lock_irq(&cp->lock);
1759 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1760 spin_unlock_irq(&cp->lock);
1761
1762 return size;
1763}
1764
1765static int cp_get_eeprom(struct net_device *dev,
1766 struct ethtool_eeprom *eeprom, u8 *data)
1767{
1768 struct cp_private *cp = netdev_priv(dev);
1769 unsigned int addr_len;
1770 u16 val;
1771 u32 offset = eeprom->offset >> 1;
1772 u32 len = eeprom->len;
1773 u32 i = 0;
1774
1775 eeprom->magic = CP_EEPROM_MAGIC;
1776
1777 spin_lock_irq(&cp->lock);
1778
1779 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1780
1781 if (eeprom->offset & 1) {
1782 val = read_eeprom(cp->regs, offset, addr_len);
1783 data[i++] = (u8)(val >> 8);
1784 offset++;
1785 }
1786
1787 while (i < len - 1) {
1788 val = read_eeprom(cp->regs, offset, addr_len);
1789 data[i++] = (u8)val;
1790 data[i++] = (u8)(val >> 8);
1791 offset++;
1792 }
1793
1794 if (i < len) {
1795 val = read_eeprom(cp->regs, offset, addr_len);
1796 data[i] = (u8)val;
1797 }
1798
1799 spin_unlock_irq(&cp->lock);
1800 return 0;
1801}
1802
1803static int cp_set_eeprom(struct net_device *dev,
1804 struct ethtool_eeprom *eeprom, u8 *data)
1805{
1806 struct cp_private *cp = netdev_priv(dev);
1807 unsigned int addr_len;
1808 u16 val;
1809 u32 offset = eeprom->offset >> 1;
1810 u32 len = eeprom->len;
1811 u32 i = 0;
1812
1813 if (eeprom->magic != CP_EEPROM_MAGIC)
1814 return -EINVAL;
1815
1816 spin_lock_irq(&cp->lock);
1817
1818 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1819
1820 if (eeprom->offset & 1) {
1821 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1822 val |= (u16)data[i++] << 8;
1823 write_eeprom(cp->regs, offset, val, addr_len);
1824 offset++;
1825 }
1826
1827 while (i < len - 1) {
1828 val = (u16)data[i++];
1829 val |= (u16)data[i++] << 8;
1830 write_eeprom(cp->regs, offset, val, addr_len);
1831 offset++;
1832 }
1833
1834 if (i < len) {
1835 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1836 val |= (u16)data[i];
1837 write_eeprom(cp->regs, offset, val, addr_len);
1838 }
1839
1840 spin_unlock_irq(&cp->lock);
1841 return 0;
1842}
1843
1844
1845static void cp_set_d3_state (struct cp_private *cp)
1846{
1847 pci_enable_wake(cp->pdev, PCI_D0, 1);
1848 pci_set_power_state (cp->pdev, PCI_D3hot);
1849}
1850
1851static netdev_features_t cp_features_check(struct sk_buff *skb,
1852 struct net_device *dev,
1853 netdev_features_t features)
1854{
1855 if (skb_shinfo(skb)->gso_size > MSSMask)
1856 features &= ~NETIF_F_TSO;
1857
1858 return vlan_features_check(skb, features);
1859}
1860static const struct net_device_ops cp_netdev_ops = {
1861 .ndo_open = cp_open,
1862 .ndo_stop = cp_close,
1863 .ndo_validate_addr = eth_validate_addr,
1864 .ndo_set_mac_address = cp_set_mac_address,
1865 .ndo_set_rx_mode = cp_set_rx_mode,
1866 .ndo_get_stats = cp_get_stats,
1867 .ndo_do_ioctl = cp_ioctl,
1868 .ndo_start_xmit = cp_start_xmit,
1869 .ndo_tx_timeout = cp_tx_timeout,
1870 .ndo_set_features = cp_set_features,
1871 .ndo_change_mtu = cp_change_mtu,
1872 .ndo_features_check = cp_features_check,
1873
1874#ifdef CONFIG_NET_POLL_CONTROLLER
1875 .ndo_poll_controller = cp_poll_controller,
1876#endif
1877};
1878
1879static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1880{
1881 struct net_device *dev;
1882 struct cp_private *cp;
1883 int rc;
1884 void __iomem *regs;
1885 resource_size_t pciaddr;
1886 unsigned int addr_len, i, pci_using_dac;
1887
1888 pr_info_once("%s", version);
1889
1890 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1891 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1892 dev_info(&pdev->dev,
1893 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1894 pdev->vendor, pdev->device, pdev->revision);
1895 return -ENODEV;
1896 }
1897
1898 dev = alloc_etherdev(sizeof(struct cp_private));
1899 if (!dev)
1900 return -ENOMEM;
1901 SET_NETDEV_DEV(dev, &pdev->dev);
1902
1903 cp = netdev_priv(dev);
1904 cp->pdev = pdev;
1905 cp->dev = dev;
1906 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1907 spin_lock_init (&cp->lock);
1908 cp->mii_if.dev = dev;
1909 cp->mii_if.mdio_read = mdio_read;
1910 cp->mii_if.mdio_write = mdio_write;
1911 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1912 cp->mii_if.phy_id_mask = 0x1f;
1913 cp->mii_if.reg_num_mask = 0x1f;
1914 cp_set_rxbufsize(cp);
1915
1916 rc = pci_enable_device(pdev);
1917 if (rc)
1918 goto err_out_free;
1919
1920 rc = pci_set_mwi(pdev);
1921 if (rc)
1922 goto err_out_disable;
1923
1924 rc = pci_request_regions(pdev, DRV_NAME);
1925 if (rc)
1926 goto err_out_mwi;
1927
1928 pciaddr = pci_resource_start(pdev, 1);
1929 if (!pciaddr) {
1930 rc = -EIO;
1931 dev_err(&pdev->dev, "no MMIO resource\n");
1932 goto err_out_res;
1933 }
1934 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1935 rc = -EIO;
1936 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1937 (unsigned long long)pci_resource_len(pdev, 1));
1938 goto err_out_res;
1939 }
1940
1941
1942 if ((sizeof(dma_addr_t) > 4) &&
1943 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1944 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1945 pci_using_dac = 1;
1946 } else {
1947 pci_using_dac = 0;
1948
1949 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1950 if (rc) {
1951 dev_err(&pdev->dev,
1952 "No usable DMA configuration, aborting\n");
1953 goto err_out_res;
1954 }
1955 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1956 if (rc) {
1957 dev_err(&pdev->dev,
1958 "No usable consistent DMA configuration, aborting\n");
1959 goto err_out_res;
1960 }
1961 }
1962
1963 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1964 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1965
1966 dev->features |= NETIF_F_RXCSUM;
1967 dev->hw_features |= NETIF_F_RXCSUM;
1968
1969 regs = ioremap(pciaddr, CP_REGS_SIZE);
1970 if (!regs) {
1971 rc = -EIO;
1972 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1973 (unsigned long long)pci_resource_len(pdev, 1),
1974 (unsigned long long)pciaddr);
1975 goto err_out_res;
1976 }
1977 cp->regs = regs;
1978
1979 cp_stop_hw(cp);
1980
1981
1982 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1983 for (i = 0; i < 3; i++)
1984 ((__le16 *) (dev->dev_addr))[i] =
1985 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1986
1987 dev->netdev_ops = &cp_netdev_ops;
1988 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1989 dev->ethtool_ops = &cp_ethtool_ops;
1990 dev->watchdog_timeo = TX_TIMEOUT;
1991
1992 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1993 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1994
1995 if (pci_using_dac)
1996 dev->features |= NETIF_F_HIGHDMA;
1997
1998 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1999 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2000 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2001 NETIF_F_HIGHDMA;
2002
2003
2004 dev->min_mtu = CP_MIN_MTU;
2005 dev->max_mtu = CP_MAX_MTU;
2006
2007 rc = register_netdev(dev);
2008 if (rc)
2009 goto err_out_iomap;
2010
2011 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2012 regs, dev->dev_addr, pdev->irq);
2013
2014 pci_set_drvdata(pdev, dev);
2015
2016
2017 pci_set_master(pdev);
2018
2019 if (cp->wol_enabled)
2020 cp_set_d3_state (cp);
2021
2022 return 0;
2023
2024err_out_iomap:
2025 iounmap(regs);
2026err_out_res:
2027 pci_release_regions(pdev);
2028err_out_mwi:
2029 pci_clear_mwi(pdev);
2030err_out_disable:
2031 pci_disable_device(pdev);
2032err_out_free:
2033 free_netdev(dev);
2034 return rc;
2035}
2036
2037static void cp_remove_one (struct pci_dev *pdev)
2038{
2039 struct net_device *dev = pci_get_drvdata(pdev);
2040 struct cp_private *cp = netdev_priv(dev);
2041
2042 unregister_netdev(dev);
2043 iounmap(cp->regs);
2044 if (cp->wol_enabled)
2045 pci_set_power_state (pdev, PCI_D0);
2046 pci_release_regions(pdev);
2047 pci_clear_mwi(pdev);
2048 pci_disable_device(pdev);
2049 free_netdev(dev);
2050}
2051
2052#ifdef CONFIG_PM
2053static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2054{
2055 struct net_device *dev = pci_get_drvdata(pdev);
2056 struct cp_private *cp = netdev_priv(dev);
2057 unsigned long flags;
2058
2059 if (!netif_running(dev))
2060 return 0;
2061
2062 netif_device_detach (dev);
2063 netif_stop_queue (dev);
2064
2065 spin_lock_irqsave (&cp->lock, flags);
2066
2067
2068 cpw16 (IntrMask, 0);
2069 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2070
2071 spin_unlock_irqrestore (&cp->lock, flags);
2072
2073 pci_save_state(pdev);
2074 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2075 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2076
2077 return 0;
2078}
2079
2080static int cp_resume (struct pci_dev *pdev)
2081{
2082 struct net_device *dev = pci_get_drvdata (pdev);
2083 struct cp_private *cp = netdev_priv(dev);
2084 unsigned long flags;
2085
2086 if (!netif_running(dev))
2087 return 0;
2088
2089 netif_device_attach (dev);
2090
2091 pci_set_power_state(pdev, PCI_D0);
2092 pci_restore_state(pdev);
2093 pci_enable_wake(pdev, PCI_D0, 0);
2094
2095
2096 cp_init_rings_index (cp);
2097 cp_init_hw (cp);
2098 cp_enable_irq(cp);
2099 netif_start_queue (dev);
2100
2101 spin_lock_irqsave (&cp->lock, flags);
2102
2103 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2104
2105 spin_unlock_irqrestore (&cp->lock, flags);
2106
2107 return 0;
2108}
2109#endif
2110
2111static const struct pci_device_id cp_pci_tbl[] = {
2112 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
2113 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
2114 { },
2115};
2116MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2117
2118static struct pci_driver cp_driver = {
2119 .name = DRV_NAME,
2120 .id_table = cp_pci_tbl,
2121 .probe = cp_init_one,
2122 .remove = cp_remove_one,
2123#ifdef CONFIG_PM
2124 .resume = cp_resume,
2125 .suspend = cp_suspend,
2126#endif
2127};
2128
2129module_pci_driver(cp_driver);
2130