1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h>
32#include <linux/types.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/vmalloc.h>
36#include <linux/pagemap.h>
37#include <linux/delay.h>
38#include <linux/netdevice.h>
39#include <linux/interrupt.h>
40#include <linux/tcp.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/ethtool.h>
46#include <linux/if_vlan.h>
47#include <linux/cpu.h>
48#include <linux/smp.h>
49#include <linux/pm_qos.h>
50#include <linux/pm_runtime.h>
51#include <linux/aer.h>
52#include <linux/prefetch.h>
53
54#include "e1000.h"
55
56#define DRV_EXTRAVERSION "-k"
57
58#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
59char e1000e_driver_name[] = "e1000e";
60const char e1000e_driver_version[] = DRV_VERSION;
61
62#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
63static int debug = -1;
64module_param(debug, int, 0);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66
67static const struct e1000_info *e1000_info_tbl[] = {
68 [board_82571] = &e1000_82571_info,
69 [board_82572] = &e1000_82572_info,
70 [board_82573] = &e1000_82573_info,
71 [board_82574] = &e1000_82574_info,
72 [board_82583] = &e1000_82583_info,
73 [board_80003es2lan] = &e1000_es2_info,
74 [board_ich8lan] = &e1000_ich8_info,
75 [board_ich9lan] = &e1000_ich9_info,
76 [board_ich10lan] = &e1000_ich10_info,
77 [board_pchlan] = &e1000_pch_info,
78 [board_pch2lan] = &e1000_pch2_info,
79 [board_pch_lpt] = &e1000_pch_lpt_info,
80};
81
82struct e1000_reg_info {
83 u32 ofs;
84 char *name;
85};
86
87static const struct e1000_reg_info e1000_reg_info_tbl[] = {
88
89 {E1000_CTRL, "CTRL"},
90 {E1000_STATUS, "STATUS"},
91 {E1000_CTRL_EXT, "CTRL_EXT"},
92
93
94 {E1000_ICR, "ICR"},
95
96
97 {E1000_RCTL, "RCTL"},
98 {E1000_RDLEN(0), "RDLEN"},
99 {E1000_RDH(0), "RDH"},
100 {E1000_RDT(0), "RDT"},
101 {E1000_RDTR, "RDTR"},
102 {E1000_RXDCTL(0), "RXDCTL"},
103 {E1000_ERT, "ERT"},
104 {E1000_RDBAL(0), "RDBAL"},
105 {E1000_RDBAH(0), "RDBAH"},
106 {E1000_RDFH, "RDFH"},
107 {E1000_RDFT, "RDFT"},
108 {E1000_RDFHS, "RDFHS"},
109 {E1000_RDFTS, "RDFTS"},
110 {E1000_RDFPC, "RDFPC"},
111
112
113 {E1000_TCTL, "TCTL"},
114 {E1000_TDBAL(0), "TDBAL"},
115 {E1000_TDBAH(0), "TDBAH"},
116 {E1000_TDLEN(0), "TDLEN"},
117 {E1000_TDH(0), "TDH"},
118 {E1000_TDT(0), "TDT"},
119 {E1000_TIDV, "TIDV"},
120 {E1000_TXDCTL(0), "TXDCTL"},
121 {E1000_TADV, "TADV"},
122 {E1000_TARC(0), "TARC"},
123 {E1000_TDFH, "TDFH"},
124 {E1000_TDFT, "TDFT"},
125 {E1000_TDFHS, "TDFHS"},
126 {E1000_TDFTS, "TDFTS"},
127 {E1000_TDFPC, "TDFPC"},
128
129
130 {0, NULL}
131};
132
133
134
135
136
137
138static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
139{
140 int n = 0;
141 char rname[16];
142 u32 regs[8];
143
144 switch (reginfo->ofs) {
145 case E1000_RXDCTL(0):
146 for (n = 0; n < 2; n++)
147 regs[n] = __er32(hw, E1000_RXDCTL(n));
148 break;
149 case E1000_TXDCTL(0):
150 for (n = 0; n < 2; n++)
151 regs[n] = __er32(hw, E1000_TXDCTL(n));
152 break;
153 case E1000_TARC(0):
154 for (n = 0; n < 2; n++)
155 regs[n] = __er32(hw, E1000_TARC(n));
156 break;
157 default:
158 pr_info("%-15s %08x\n",
159 reginfo->name, __er32(hw, reginfo->ofs));
160 return;
161 }
162
163 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
164 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
165}
166
167static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
168 struct e1000_buffer *bi)
169{
170 int i;
171 struct e1000_ps_page *ps_page;
172
173 for (i = 0; i < adapter->rx_ps_pages; i++) {
174 ps_page = &bi->ps_pages[i];
175
176 if (ps_page->page) {
177 pr_info("packet dump for ps_page %d:\n", i);
178 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
179 16, 1, page_address(ps_page->page),
180 PAGE_SIZE, true);
181 }
182 }
183}
184
185
186
187
188
189static void e1000e_dump(struct e1000_adapter *adapter)
190{
191 struct net_device *netdev = adapter->netdev;
192 struct e1000_hw *hw = &adapter->hw;
193 struct e1000_reg_info *reginfo;
194 struct e1000_ring *tx_ring = adapter->tx_ring;
195 struct e1000_tx_desc *tx_desc;
196 struct my_u0 {
197 __le64 a;
198 __le64 b;
199 } *u0;
200 struct e1000_buffer *buffer_info;
201 struct e1000_ring *rx_ring = adapter->rx_ring;
202 union e1000_rx_desc_packet_split *rx_desc_ps;
203 union e1000_rx_desc_extended *rx_desc;
204 struct my_u1 {
205 __le64 a;
206 __le64 b;
207 __le64 c;
208 __le64 d;
209 } *u1;
210 u32 staterr;
211 int i = 0;
212
213 if (!netif_msg_hw(adapter))
214 return;
215
216
217 if (netdev) {
218 dev_info(&adapter->pdev->dev, "Net device Info\n");
219 pr_info("Device Name state trans_start last_rx\n");
220 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
221 netdev->state, netdev->trans_start, netdev->last_rx);
222 }
223
224
225 dev_info(&adapter->pdev->dev, "Register Dump\n");
226 pr_info(" Register Name Value\n");
227 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
228 reginfo->name; reginfo++) {
229 e1000_regdump(hw, reginfo);
230 }
231
232
233 if (!netdev || !netif_running(netdev))
234 return;
235
236 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
237 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
238 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
239 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
240 0, tx_ring->next_to_use, tx_ring->next_to_clean,
241 (unsigned long long)buffer_info->dma,
242 buffer_info->length,
243 buffer_info->next_to_watch,
244 (unsigned long long)buffer_info->time_stamp);
245
246
247 if (!netif_msg_tx_done(adapter))
248 goto rx_ring_summary;
249
250 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
280 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
281 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
282 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
283 const char *next_desc;
284 tx_desc = E1000_TX_DESC(*tx_ring, i);
285 buffer_info = &tx_ring->buffer_info[i];
286 u0 = (struct my_u0 *)tx_desc;
287 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
288 next_desc = " NTC/U";
289 else if (i == tx_ring->next_to_use)
290 next_desc = " NTU";
291 else if (i == tx_ring->next_to_clean)
292 next_desc = " NTC";
293 else
294 next_desc = "";
295 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
296 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
297 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
298 i,
299 (unsigned long long)le64_to_cpu(u0->a),
300 (unsigned long long)le64_to_cpu(u0->b),
301 (unsigned long long)buffer_info->dma,
302 buffer_info->length, buffer_info->next_to_watch,
303 (unsigned long long)buffer_info->time_stamp,
304 buffer_info->skb, next_desc);
305
306 if (netif_msg_pktdata(adapter) && buffer_info->skb)
307 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
308 16, 1, buffer_info->skb->data,
309 buffer_info->skb->len, true);
310 }
311
312
313rx_ring_summary:
314 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
315 pr_info("Queue [NTU] [NTC]\n");
316 pr_info(" %5d %5X %5X\n",
317 0, rx_ring->next_to_use, rx_ring->next_to_clean);
318
319
320 if (!netif_msg_rx_status(adapter))
321 return;
322
323 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
324 switch (adapter->rx_ps_pages) {
325 case 1:
326 case 2:
327 case 3:
328
329
330
331
332
333
334
335
336
337
338
339
340 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
341
342
343
344
345
346
347
348
349
350
351
352 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
353 for (i = 0; i < rx_ring->count; i++) {
354 const char *next_desc;
355 buffer_info = &rx_ring->buffer_info[i];
356 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
357 u1 = (struct my_u1 *)rx_desc_ps;
358 staterr =
359 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
360
361 if (i == rx_ring->next_to_use)
362 next_desc = " NTU";
363 else if (i == rx_ring->next_to_clean)
364 next_desc = " NTC";
365 else
366 next_desc = "";
367
368 if (staterr & E1000_RXD_STAT_DD) {
369
370 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
371 "RWB", i,
372 (unsigned long long)le64_to_cpu(u1->a),
373 (unsigned long long)le64_to_cpu(u1->b),
374 (unsigned long long)le64_to_cpu(u1->c),
375 (unsigned long long)le64_to_cpu(u1->d),
376 buffer_info->skb, next_desc);
377 } else {
378 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
379 "R ", i,
380 (unsigned long long)le64_to_cpu(u1->a),
381 (unsigned long long)le64_to_cpu(u1->b),
382 (unsigned long long)le64_to_cpu(u1->c),
383 (unsigned long long)le64_to_cpu(u1->d),
384 (unsigned long long)buffer_info->dma,
385 buffer_info->skb, next_desc);
386
387 if (netif_msg_pktdata(adapter))
388 e1000e_dump_ps_pages(adapter,
389 buffer_info);
390 }
391 }
392 break;
393 default:
394 case 0:
395
396
397
398
399
400
401
402
403 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
404
405
406
407
408
409
410
411
412
413
414
415
416
417 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
418
419 for (i = 0; i < rx_ring->count; i++) {
420 const char *next_desc;
421
422 buffer_info = &rx_ring->buffer_info[i];
423 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
424 u1 = (struct my_u1 *)rx_desc;
425 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
426
427 if (i == rx_ring->next_to_use)
428 next_desc = " NTU";
429 else if (i == rx_ring->next_to_clean)
430 next_desc = " NTC";
431 else
432 next_desc = "";
433
434 if (staterr & E1000_RXD_STAT_DD) {
435
436 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
437 "RWB", i,
438 (unsigned long long)le64_to_cpu(u1->a),
439 (unsigned long long)le64_to_cpu(u1->b),
440 buffer_info->skb, next_desc);
441 } else {
442 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
443 "R ", i,
444 (unsigned long long)le64_to_cpu(u1->a),
445 (unsigned long long)le64_to_cpu(u1->b),
446 (unsigned long long)buffer_info->dma,
447 buffer_info->skb, next_desc);
448
449 if (netif_msg_pktdata(adapter) &&
450 buffer_info->skb)
451 print_hex_dump(KERN_INFO, "",
452 DUMP_PREFIX_ADDRESS, 16,
453 1,
454 buffer_info->skb->data,
455 adapter->rx_buffer_len,
456 true);
457 }
458 }
459 }
460}
461
462
463
464
465static int e1000_desc_unused(struct e1000_ring *ring)
466{
467 if (ring->next_to_clean > ring->next_to_use)
468 return ring->next_to_clean - ring->next_to_use - 1;
469
470 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
471}
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
488 struct skb_shared_hwtstamps *hwtstamps,
489 u64 systim)
490{
491 u64 ns;
492 unsigned long flags;
493
494 spin_lock_irqsave(&adapter->systim_lock, flags);
495 ns = timecounter_cyc2time(&adapter->tc, systim);
496 spin_unlock_irqrestore(&adapter->systim_lock, flags);
497
498 memset(hwtstamps, 0, sizeof(*hwtstamps));
499 hwtstamps->hwtstamp = ns_to_ktime(ns);
500}
501
502
503
504
505
506
507
508
509
510
511
512static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
513 struct sk_buff *skb)
514{
515 struct e1000_hw *hw = &adapter->hw;
516 u64 rxstmp;
517
518 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
519 !(status & E1000_RXDEXT_STATERR_TST) ||
520 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
521 return;
522
523
524
525
526
527
528
529
530 rxstmp = (u64)er32(RXSTMPL);
531 rxstmp |= (u64)er32(RXSTMPH) << 32;
532 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
533
534 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
535}
536
537
538
539
540
541
542
543
544static void e1000_receive_skb(struct e1000_adapter *adapter,
545 struct net_device *netdev, struct sk_buff *skb,
546 u32 staterr, __le16 vlan)
547{
548 u16 tag = le16_to_cpu(vlan);
549
550 e1000e_rx_hwtstamp(adapter, staterr, skb);
551
552 skb->protocol = eth_type_trans(skb, netdev);
553
554 if (staterr & E1000_RXD_STAT_VP)
555 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
556
557 napi_gro_receive(&adapter->napi, skb);
558}
559
560
561
562
563
564
565
566
567static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
568 struct sk_buff *skb)
569{
570 u16 status = (u16)status_err;
571 u8 errors = (u8)(status_err >> 24);
572
573 skb_checksum_none_assert(skb);
574
575
576 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
577 return;
578
579
580 if (status & E1000_RXD_STAT_IXSM)
581 return;
582
583
584 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
585
586 adapter->hw_csum_err++;
587 return;
588 }
589
590
591 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
592 return;
593
594
595 skb->ip_summed = CHECKSUM_UNNECESSARY;
596 adapter->hw_csum_good++;
597}
598
599static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
600{
601 struct e1000_adapter *adapter = rx_ring->adapter;
602 struct e1000_hw *hw = &adapter->hw;
603 s32 ret_val = __ew32_prepare(hw);
604
605 writel(i, rx_ring->tail);
606
607 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
608 u32 rctl = er32(RCTL);
609 ew32(RCTL, rctl & ~E1000_RCTL_EN);
610 e_err("ME firmware caused invalid RDT - resetting\n");
611 schedule_work(&adapter->reset_task);
612 }
613}
614
615static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
616{
617 struct e1000_adapter *adapter = tx_ring->adapter;
618 struct e1000_hw *hw = &adapter->hw;
619 s32 ret_val = __ew32_prepare(hw);
620
621 writel(i, tx_ring->tail);
622
623 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
624 u32 tctl = er32(TCTL);
625 ew32(TCTL, tctl & ~E1000_TCTL_EN);
626 e_err("ME firmware caused invalid TDT - resetting\n");
627 schedule_work(&adapter->reset_task);
628 }
629}
630
631
632
633
634
635static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
636 int cleaned_count, gfp_t gfp)
637{
638 struct e1000_adapter *adapter = rx_ring->adapter;
639 struct net_device *netdev = adapter->netdev;
640 struct pci_dev *pdev = adapter->pdev;
641 union e1000_rx_desc_extended *rx_desc;
642 struct e1000_buffer *buffer_info;
643 struct sk_buff *skb;
644 unsigned int i;
645 unsigned int bufsz = adapter->rx_buffer_len;
646
647 i = rx_ring->next_to_use;
648 buffer_info = &rx_ring->buffer_info[i];
649
650 while (cleaned_count--) {
651 skb = buffer_info->skb;
652 if (skb) {
653 skb_trim(skb, 0);
654 goto map_skb;
655 }
656
657 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
658 if (!skb) {
659
660 adapter->alloc_rx_buff_failed++;
661 break;
662 }
663
664 buffer_info->skb = skb;
665map_skb:
666 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
667 adapter->rx_buffer_len,
668 DMA_FROM_DEVICE);
669 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
670 dev_err(&pdev->dev, "Rx DMA map failed\n");
671 adapter->rx_dma_failed++;
672 break;
673 }
674
675 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
676 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
677
678 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
679
680
681
682
683
684 wmb();
685 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
686 e1000e_update_rdt_wa(rx_ring, i);
687 else
688 writel(i, rx_ring->tail);
689 }
690 i++;
691 if (i == rx_ring->count)
692 i = 0;
693 buffer_info = &rx_ring->buffer_info[i];
694 }
695
696 rx_ring->next_to_use = i;
697}
698
699
700
701
702
703static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
704 int cleaned_count, gfp_t gfp)
705{
706 struct e1000_adapter *adapter = rx_ring->adapter;
707 struct net_device *netdev = adapter->netdev;
708 struct pci_dev *pdev = adapter->pdev;
709 union e1000_rx_desc_packet_split *rx_desc;
710 struct e1000_buffer *buffer_info;
711 struct e1000_ps_page *ps_page;
712 struct sk_buff *skb;
713 unsigned int i, j;
714
715 i = rx_ring->next_to_use;
716 buffer_info = &rx_ring->buffer_info[i];
717
718 while (cleaned_count--) {
719 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
720
721 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
722 ps_page = &buffer_info->ps_pages[j];
723 if (j >= adapter->rx_ps_pages) {
724
725 rx_desc->read.buffer_addr[j + 1] =
726 ~cpu_to_le64(0);
727 continue;
728 }
729 if (!ps_page->page) {
730 ps_page->page = alloc_page(gfp);
731 if (!ps_page->page) {
732 adapter->alloc_rx_buff_failed++;
733 goto no_buffers;
734 }
735 ps_page->dma = dma_map_page(&pdev->dev,
736 ps_page->page,
737 0, PAGE_SIZE,
738 DMA_FROM_DEVICE);
739 if (dma_mapping_error(&pdev->dev,
740 ps_page->dma)) {
741 dev_err(&adapter->pdev->dev,
742 "Rx DMA page map failed\n");
743 adapter->rx_dma_failed++;
744 goto no_buffers;
745 }
746 }
747
748
749
750
751 rx_desc->read.buffer_addr[j + 1] =
752 cpu_to_le64(ps_page->dma);
753 }
754
755 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
756 gfp);
757
758 if (!skb) {
759 adapter->alloc_rx_buff_failed++;
760 break;
761 }
762
763 buffer_info->skb = skb;
764 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
765 adapter->rx_ps_bsize0,
766 DMA_FROM_DEVICE);
767 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
768 dev_err(&pdev->dev, "Rx DMA map failed\n");
769 adapter->rx_dma_failed++;
770
771 dev_kfree_skb_any(skb);
772 buffer_info->skb = NULL;
773 break;
774 }
775
776 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
777
778 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
779
780
781
782
783
784 wmb();
785 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
786 e1000e_update_rdt_wa(rx_ring, i << 1);
787 else
788 writel(i << 1, rx_ring->tail);
789 }
790
791 i++;
792 if (i == rx_ring->count)
793 i = 0;
794 buffer_info = &rx_ring->buffer_info[i];
795 }
796
797no_buffers:
798 rx_ring->next_to_use = i;
799}
800
801
802
803
804
805
806
807static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
808 int cleaned_count, gfp_t gfp)
809{
810 struct e1000_adapter *adapter = rx_ring->adapter;
811 struct net_device *netdev = adapter->netdev;
812 struct pci_dev *pdev = adapter->pdev;
813 union e1000_rx_desc_extended *rx_desc;
814 struct e1000_buffer *buffer_info;
815 struct sk_buff *skb;
816 unsigned int i;
817 unsigned int bufsz = 256 - 16;
818
819 i = rx_ring->next_to_use;
820 buffer_info = &rx_ring->buffer_info[i];
821
822 while (cleaned_count--) {
823 skb = buffer_info->skb;
824 if (skb) {
825 skb_trim(skb, 0);
826 goto check_page;
827 }
828
829 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
830 if (unlikely(!skb)) {
831
832 adapter->alloc_rx_buff_failed++;
833 break;
834 }
835
836 buffer_info->skb = skb;
837check_page:
838
839 if (!buffer_info->page) {
840 buffer_info->page = alloc_page(gfp);
841 if (unlikely(!buffer_info->page)) {
842 adapter->alloc_rx_buff_failed++;
843 break;
844 }
845 }
846
847 if (!buffer_info->dma) {
848 buffer_info->dma = dma_map_page(&pdev->dev,
849 buffer_info->page, 0,
850 PAGE_SIZE,
851 DMA_FROM_DEVICE);
852 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
853 adapter->alloc_rx_buff_failed++;
854 break;
855 }
856 }
857
858 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
859 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
860
861 if (unlikely(++i == rx_ring->count))
862 i = 0;
863 buffer_info = &rx_ring->buffer_info[i];
864 }
865
866 if (likely(rx_ring->next_to_use != i)) {
867 rx_ring->next_to_use = i;
868 if (unlikely(i-- == 0))
869 i = (rx_ring->count - 1);
870
871
872
873
874
875
876 wmb();
877 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
878 e1000e_update_rdt_wa(rx_ring, i);
879 else
880 writel(i, rx_ring->tail);
881 }
882}
883
884static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
885 struct sk_buff *skb)
886{
887 if (netdev->features & NETIF_F_RXHASH)
888 skb->rxhash = le32_to_cpu(rss);
889}
890
891
892
893
894
895
896
897
898static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
899 int work_to_do)
900{
901 struct e1000_adapter *adapter = rx_ring->adapter;
902 struct net_device *netdev = adapter->netdev;
903 struct pci_dev *pdev = adapter->pdev;
904 struct e1000_hw *hw = &adapter->hw;
905 union e1000_rx_desc_extended *rx_desc, *next_rxd;
906 struct e1000_buffer *buffer_info, *next_buffer;
907 u32 length, staterr;
908 unsigned int i;
909 int cleaned_count = 0;
910 bool cleaned = false;
911 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
912
913 i = rx_ring->next_to_clean;
914 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
915 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
916 buffer_info = &rx_ring->buffer_info[i];
917
918 while (staterr & E1000_RXD_STAT_DD) {
919 struct sk_buff *skb;
920
921 if (*work_done >= work_to_do)
922 break;
923 (*work_done)++;
924 rmb();
925
926 skb = buffer_info->skb;
927 buffer_info->skb = NULL;
928
929 prefetch(skb->data - NET_IP_ALIGN);
930
931 i++;
932 if (i == rx_ring->count)
933 i = 0;
934 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
935 prefetch(next_rxd);
936
937 next_buffer = &rx_ring->buffer_info[i];
938
939 cleaned = true;
940 cleaned_count++;
941 dma_unmap_single(&pdev->dev, buffer_info->dma,
942 adapter->rx_buffer_len, DMA_FROM_DEVICE);
943 buffer_info->dma = 0;
944
945 length = le16_to_cpu(rx_desc->wb.upper.length);
946
947
948
949
950
951
952
953 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
954 adapter->flags2 |= FLAG2_IS_DISCARDING;
955
956 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
957
958 e_dbg("Receive packet consumed multiple buffers\n");
959
960 buffer_info->skb = skb;
961 if (staterr & E1000_RXD_STAT_EOP)
962 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
963 goto next_desc;
964 }
965
966 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
967 !(netdev->features & NETIF_F_RXALL))) {
968
969 buffer_info->skb = skb;
970 goto next_desc;
971 }
972
973
974 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
975
976
977
978
979 if (netdev->features & NETIF_F_RXFCS)
980 total_rx_bytes -= 4;
981 else
982 length -= 4;
983 }
984
985 total_rx_bytes += length;
986 total_rx_packets++;
987
988
989
990
991
992 if (length < copybreak) {
993 struct sk_buff *new_skb =
994 netdev_alloc_skb_ip_align(netdev, length);
995 if (new_skb) {
996 skb_copy_to_linear_data_offset(new_skb,
997 -NET_IP_ALIGN,
998 (skb->data -
999 NET_IP_ALIGN),
1000 (length +
1001 NET_IP_ALIGN));
1002
1003 buffer_info->skb = skb;
1004 skb = new_skb;
1005 }
1006
1007 }
1008
1009 skb_put(skb, length);
1010
1011
1012 e1000_rx_checksum(adapter, staterr, skb);
1013
1014 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1015
1016 e1000_receive_skb(adapter, netdev, skb, staterr,
1017 rx_desc->wb.upper.vlan);
1018
1019next_desc:
1020 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1021
1022
1023 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1024 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1025 GFP_ATOMIC);
1026 cleaned_count = 0;
1027 }
1028
1029
1030 rx_desc = next_rxd;
1031 buffer_info = next_buffer;
1032
1033 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1034 }
1035 rx_ring->next_to_clean = i;
1036
1037 cleaned_count = e1000_desc_unused(rx_ring);
1038 if (cleaned_count)
1039 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1040
1041 adapter->total_rx_bytes += total_rx_bytes;
1042 adapter->total_rx_packets += total_rx_packets;
1043 return cleaned;
1044}
1045
1046static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1047 struct e1000_buffer *buffer_info)
1048{
1049 struct e1000_adapter *adapter = tx_ring->adapter;
1050
1051 if (buffer_info->dma) {
1052 if (buffer_info->mapped_as_page)
1053 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1054 buffer_info->length, DMA_TO_DEVICE);
1055 else
1056 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1057 buffer_info->length, DMA_TO_DEVICE);
1058 buffer_info->dma = 0;
1059 }
1060 if (buffer_info->skb) {
1061 dev_kfree_skb_any(buffer_info->skb);
1062 buffer_info->skb = NULL;
1063 }
1064 buffer_info->time_stamp = 0;
1065}
1066
1067static void e1000_print_hw_hang(struct work_struct *work)
1068{
1069 struct e1000_adapter *adapter = container_of(work,
1070 struct e1000_adapter,
1071 print_hang_task);
1072 struct net_device *netdev = adapter->netdev;
1073 struct e1000_ring *tx_ring = adapter->tx_ring;
1074 unsigned int i = tx_ring->next_to_clean;
1075 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1076 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1077 struct e1000_hw *hw = &adapter->hw;
1078 u16 phy_status, phy_1000t_status, phy_ext_status;
1079 u16 pci_status;
1080
1081 if (test_bit(__E1000_DOWN, &adapter->state))
1082 return;
1083
1084 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1085
1086
1087
1088 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1089
1090 e1e_flush();
1091
1092
1093
1094 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1095
1096 e1e_flush();
1097 adapter->tx_hang_recheck = true;
1098 return;
1099 }
1100
1101 adapter->tx_hang_recheck = false;
1102 netif_stop_queue(netdev);
1103
1104 e1e_rphy(hw, MII_BMSR, &phy_status);
1105 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1106 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
1107
1108 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1109
1110
1111 e_err("Detected Hardware Unit Hang:\n"
1112 " TDH <%x>\n"
1113 " TDT <%x>\n"
1114 " next_to_use <%x>\n"
1115 " next_to_clean <%x>\n"
1116 "buffer_info[next_to_clean]:\n"
1117 " time_stamp <%lx>\n"
1118 " next_to_watch <%x>\n"
1119 " jiffies <%lx>\n"
1120 " next_to_watch.status <%x>\n"
1121 "MAC Status <%x>\n"
1122 "PHY Status <%x>\n"
1123 "PHY 1000BASE-T Status <%x>\n"
1124 "PHY Extended Status <%x>\n"
1125 "PCI Status <%x>\n",
1126 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1127 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1128 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1129 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1130
1131
1132 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1133 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1145{
1146 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1147 tx_hwtstamp_work);
1148 struct e1000_hw *hw = &adapter->hw;
1149
1150 if (!adapter->tx_hwtstamp_skb)
1151 return;
1152
1153 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1154 struct skb_shared_hwtstamps shhwtstamps;
1155 u64 txstmp;
1156
1157 txstmp = er32(TXSTMPL);
1158 txstmp |= (u64)er32(TXSTMPH) << 32;
1159
1160 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1161
1162 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
1163 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1164 adapter->tx_hwtstamp_skb = NULL;
1165 } else {
1166
1167 schedule_work(&adapter->tx_hwtstamp_work);
1168 }
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1179{
1180 struct e1000_adapter *adapter = tx_ring->adapter;
1181 struct net_device *netdev = adapter->netdev;
1182 struct e1000_hw *hw = &adapter->hw;
1183 struct e1000_tx_desc *tx_desc, *eop_desc;
1184 struct e1000_buffer *buffer_info;
1185 unsigned int i, eop;
1186 unsigned int count = 0;
1187 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1188 unsigned int bytes_compl = 0, pkts_compl = 0;
1189
1190 i = tx_ring->next_to_clean;
1191 eop = tx_ring->buffer_info[i].next_to_watch;
1192 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1193
1194 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1195 (count < tx_ring->count)) {
1196 bool cleaned = false;
1197 rmb();
1198 for (; !cleaned; count++) {
1199 tx_desc = E1000_TX_DESC(*tx_ring, i);
1200 buffer_info = &tx_ring->buffer_info[i];
1201 cleaned = (i == eop);
1202
1203 if (cleaned) {
1204 total_tx_packets += buffer_info->segs;
1205 total_tx_bytes += buffer_info->bytecount;
1206 if (buffer_info->skb) {
1207 bytes_compl += buffer_info->skb->len;
1208 pkts_compl++;
1209 }
1210 }
1211
1212 e1000_put_txbuf(tx_ring, buffer_info);
1213 tx_desc->upper.data = 0;
1214
1215 i++;
1216 if (i == tx_ring->count)
1217 i = 0;
1218 }
1219
1220 if (i == tx_ring->next_to_use)
1221 break;
1222 eop = tx_ring->buffer_info[i].next_to_watch;
1223 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1224 }
1225
1226 tx_ring->next_to_clean = i;
1227
1228 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1229
1230#define TX_WAKE_THRESHOLD 32
1231 if (count && netif_carrier_ok(netdev) &&
1232 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1233
1234
1235
1236 smp_mb();
1237
1238 if (netif_queue_stopped(netdev) &&
1239 !(test_bit(__E1000_DOWN, &adapter->state))) {
1240 netif_wake_queue(netdev);
1241 ++adapter->restart_queue;
1242 }
1243 }
1244
1245 if (adapter->detect_tx_hung) {
1246
1247
1248
1249 adapter->detect_tx_hung = false;
1250 if (tx_ring->buffer_info[i].time_stamp &&
1251 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1252 + (adapter->tx_timeout_factor * HZ)) &&
1253 !(er32(STATUS) & E1000_STATUS_TXOFF))
1254 schedule_work(&adapter->print_hang_task);
1255 else
1256 adapter->tx_hang_recheck = false;
1257 }
1258 adapter->total_tx_bytes += total_tx_bytes;
1259 adapter->total_tx_packets += total_tx_packets;
1260 return count < tx_ring->count;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1271 int work_to_do)
1272{
1273 struct e1000_adapter *adapter = rx_ring->adapter;
1274 struct e1000_hw *hw = &adapter->hw;
1275 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1276 struct net_device *netdev = adapter->netdev;
1277 struct pci_dev *pdev = adapter->pdev;
1278 struct e1000_buffer *buffer_info, *next_buffer;
1279 struct e1000_ps_page *ps_page;
1280 struct sk_buff *skb;
1281 unsigned int i, j;
1282 u32 length, staterr;
1283 int cleaned_count = 0;
1284 bool cleaned = false;
1285 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1286
1287 i = rx_ring->next_to_clean;
1288 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1289 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1290 buffer_info = &rx_ring->buffer_info[i];
1291
1292 while (staterr & E1000_RXD_STAT_DD) {
1293 if (*work_done >= work_to_do)
1294 break;
1295 (*work_done)++;
1296 skb = buffer_info->skb;
1297 rmb();
1298
1299
1300 prefetch(skb->data - NET_IP_ALIGN);
1301
1302 i++;
1303 if (i == rx_ring->count)
1304 i = 0;
1305 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1306 prefetch(next_rxd);
1307
1308 next_buffer = &rx_ring->buffer_info[i];
1309
1310 cleaned = true;
1311 cleaned_count++;
1312 dma_unmap_single(&pdev->dev, buffer_info->dma,
1313 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1314 buffer_info->dma = 0;
1315
1316
1317 if (!(staterr & E1000_RXD_STAT_EOP))
1318 adapter->flags2 |= FLAG2_IS_DISCARDING;
1319
1320 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1321 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1322 dev_kfree_skb_irq(skb);
1323 if (staterr & E1000_RXD_STAT_EOP)
1324 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1325 goto next_desc;
1326 }
1327
1328 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1329 !(netdev->features & NETIF_F_RXALL))) {
1330 dev_kfree_skb_irq(skb);
1331 goto next_desc;
1332 }
1333
1334 length = le16_to_cpu(rx_desc->wb.middle.length0);
1335
1336 if (!length) {
1337 e_dbg("Last part of the packet spanning multiple descriptors\n");
1338 dev_kfree_skb_irq(skb);
1339 goto next_desc;
1340 }
1341
1342
1343 skb_put(skb, length);
1344
1345 {
1346
1347
1348
1349 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1350
1351
1352
1353
1354
1355
1356 if (l1 && (l1 <= copybreak) &&
1357 ((length + l1) <= adapter->rx_ps_bsize0)) {
1358 u8 *vaddr;
1359
1360 ps_page = &buffer_info->ps_pages[0];
1361
1362
1363
1364
1365
1366 dma_sync_single_for_cpu(&pdev->dev,
1367 ps_page->dma,
1368 PAGE_SIZE,
1369 DMA_FROM_DEVICE);
1370 vaddr = kmap_atomic(ps_page->page);
1371 memcpy(skb_tail_pointer(skb), vaddr, l1);
1372 kunmap_atomic(vaddr);
1373 dma_sync_single_for_device(&pdev->dev,
1374 ps_page->dma,
1375 PAGE_SIZE,
1376 DMA_FROM_DEVICE);
1377
1378
1379 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1380 if (!(netdev->features & NETIF_F_RXFCS))
1381 l1 -= 4;
1382 }
1383
1384 skb_put(skb, l1);
1385 goto copydone;
1386 }
1387 }
1388
1389 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1390 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1391 if (!length)
1392 break;
1393
1394 ps_page = &buffer_info->ps_pages[j];
1395 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1396 DMA_FROM_DEVICE);
1397 ps_page->dma = 0;
1398 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1399 ps_page->page = NULL;
1400 skb->len += length;
1401 skb->data_len += length;
1402 skb->truesize += PAGE_SIZE;
1403 }
1404
1405
1406
1407
1408 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1409 if (!(netdev->features & NETIF_F_RXFCS))
1410 pskb_trim(skb, skb->len - 4);
1411 }
1412
1413copydone:
1414 total_rx_bytes += skb->len;
1415 total_rx_packets++;
1416
1417 e1000_rx_checksum(adapter, staterr, skb);
1418
1419 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1420
1421 if (rx_desc->wb.upper.header_status &
1422 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1423 adapter->rx_hdr_split++;
1424
1425 e1000_receive_skb(adapter, netdev, skb, staterr,
1426 rx_desc->wb.middle.vlan);
1427
1428next_desc:
1429 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1430 buffer_info->skb = NULL;
1431
1432
1433 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1434 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1435 GFP_ATOMIC);
1436 cleaned_count = 0;
1437 }
1438
1439
1440 rx_desc = next_rxd;
1441 buffer_info = next_buffer;
1442
1443 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1444 }
1445 rx_ring->next_to_clean = i;
1446
1447 cleaned_count = e1000_desc_unused(rx_ring);
1448 if (cleaned_count)
1449 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1450
1451 adapter->total_rx_bytes += total_rx_bytes;
1452 adapter->total_rx_packets += total_rx_packets;
1453 return cleaned;
1454}
1455
1456
1457
1458
1459static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1460 u16 length)
1461{
1462 bi->page = NULL;
1463 skb->len += length;
1464 skb->data_len += length;
1465 skb->truesize += PAGE_SIZE;
1466}
1467
1468
1469
1470
1471
1472
1473
1474
1475static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1476 int work_to_do)
1477{
1478 struct e1000_adapter *adapter = rx_ring->adapter;
1479 struct net_device *netdev = adapter->netdev;
1480 struct pci_dev *pdev = adapter->pdev;
1481 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1482 struct e1000_buffer *buffer_info, *next_buffer;
1483 u32 length, staterr;
1484 unsigned int i;
1485 int cleaned_count = 0;
1486 bool cleaned = false;
1487 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1488 struct skb_shared_info *shinfo;
1489
1490 i = rx_ring->next_to_clean;
1491 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1492 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1493 buffer_info = &rx_ring->buffer_info[i];
1494
1495 while (staterr & E1000_RXD_STAT_DD) {
1496 struct sk_buff *skb;
1497
1498 if (*work_done >= work_to_do)
1499 break;
1500 (*work_done)++;
1501 rmb();
1502
1503 skb = buffer_info->skb;
1504 buffer_info->skb = NULL;
1505
1506 ++i;
1507 if (i == rx_ring->count)
1508 i = 0;
1509 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1510 prefetch(next_rxd);
1511
1512 next_buffer = &rx_ring->buffer_info[i];
1513
1514 cleaned = true;
1515 cleaned_count++;
1516 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1517 DMA_FROM_DEVICE);
1518 buffer_info->dma = 0;
1519
1520 length = le16_to_cpu(rx_desc->wb.upper.length);
1521
1522
1523 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1524 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1525 !(netdev->features & NETIF_F_RXALL)))) {
1526
1527 buffer_info->skb = skb;
1528
1529 if (rx_ring->rx_skb_top)
1530 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1531 rx_ring->rx_skb_top = NULL;
1532 goto next_desc;
1533 }
1534#define rxtop (rx_ring->rx_skb_top)
1535 if (!(staterr & E1000_RXD_STAT_EOP)) {
1536
1537 if (!rxtop) {
1538
1539 rxtop = skb;
1540 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1541 0, length);
1542 } else {
1543
1544 shinfo = skb_shinfo(rxtop);
1545 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1546 buffer_info->page, 0,
1547 length);
1548
1549 buffer_info->skb = skb;
1550 }
1551 e1000_consume_page(buffer_info, rxtop, length);
1552 goto next_desc;
1553 } else {
1554 if (rxtop) {
1555
1556 shinfo = skb_shinfo(rxtop);
1557 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1558 buffer_info->page, 0,
1559 length);
1560
1561
1562
1563 buffer_info->skb = skb;
1564 skb = rxtop;
1565 rxtop = NULL;
1566 e1000_consume_page(buffer_info, skb, length);
1567 } else {
1568
1569
1570
1571 if (length <= copybreak &&
1572 skb_tailroom(skb) >= length) {
1573 u8 *vaddr;
1574 vaddr = kmap_atomic(buffer_info->page);
1575 memcpy(skb_tail_pointer(skb), vaddr,
1576 length);
1577 kunmap_atomic(vaddr);
1578
1579
1580
1581 skb_put(skb, length);
1582 } else {
1583 skb_fill_page_desc(skb, 0,
1584 buffer_info->page, 0,
1585 length);
1586 e1000_consume_page(buffer_info, skb,
1587 length);
1588 }
1589 }
1590 }
1591
1592
1593 e1000_rx_checksum(adapter, staterr, skb);
1594
1595 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1596
1597
1598 total_rx_bytes += skb->len;
1599 total_rx_packets++;
1600
1601
1602 if (!pskb_may_pull(skb, ETH_HLEN)) {
1603 e_err("pskb_may_pull failed.\n");
1604 dev_kfree_skb_irq(skb);
1605 goto next_desc;
1606 }
1607
1608 e1000_receive_skb(adapter, netdev, skb, staterr,
1609 rx_desc->wb.upper.vlan);
1610
1611next_desc:
1612 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1613
1614
1615 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1616 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1617 GFP_ATOMIC);
1618 cleaned_count = 0;
1619 }
1620
1621
1622 rx_desc = next_rxd;
1623 buffer_info = next_buffer;
1624
1625 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1626 }
1627 rx_ring->next_to_clean = i;
1628
1629 cleaned_count = e1000_desc_unused(rx_ring);
1630 if (cleaned_count)
1631 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1632
1633 adapter->total_rx_bytes += total_rx_bytes;
1634 adapter->total_rx_packets += total_rx_packets;
1635 return cleaned;
1636}
1637
1638
1639
1640
1641
1642static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1643{
1644 struct e1000_adapter *adapter = rx_ring->adapter;
1645 struct e1000_buffer *buffer_info;
1646 struct e1000_ps_page *ps_page;
1647 struct pci_dev *pdev = adapter->pdev;
1648 unsigned int i, j;
1649
1650
1651 for (i = 0; i < rx_ring->count; i++) {
1652 buffer_info = &rx_ring->buffer_info[i];
1653 if (buffer_info->dma) {
1654 if (adapter->clean_rx == e1000_clean_rx_irq)
1655 dma_unmap_single(&pdev->dev, buffer_info->dma,
1656 adapter->rx_buffer_len,
1657 DMA_FROM_DEVICE);
1658 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1659 dma_unmap_page(&pdev->dev, buffer_info->dma,
1660 PAGE_SIZE, DMA_FROM_DEVICE);
1661 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1662 dma_unmap_single(&pdev->dev, buffer_info->dma,
1663 adapter->rx_ps_bsize0,
1664 DMA_FROM_DEVICE);
1665 buffer_info->dma = 0;
1666 }
1667
1668 if (buffer_info->page) {
1669 put_page(buffer_info->page);
1670 buffer_info->page = NULL;
1671 }
1672
1673 if (buffer_info->skb) {
1674 dev_kfree_skb(buffer_info->skb);
1675 buffer_info->skb = NULL;
1676 }
1677
1678 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1679 ps_page = &buffer_info->ps_pages[j];
1680 if (!ps_page->page)
1681 break;
1682 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1683 DMA_FROM_DEVICE);
1684 ps_page->dma = 0;
1685 put_page(ps_page->page);
1686 ps_page->page = NULL;
1687 }
1688 }
1689
1690
1691 if (rx_ring->rx_skb_top) {
1692 dev_kfree_skb(rx_ring->rx_skb_top);
1693 rx_ring->rx_skb_top = NULL;
1694 }
1695
1696
1697 memset(rx_ring->desc, 0, rx_ring->size);
1698
1699 rx_ring->next_to_clean = 0;
1700 rx_ring->next_to_use = 0;
1701 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1702
1703 writel(0, rx_ring->head);
1704 if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1705 e1000e_update_rdt_wa(rx_ring, 0);
1706 else
1707 writel(0, rx_ring->tail);
1708}
1709
1710static void e1000e_downshift_workaround(struct work_struct *work)
1711{
1712 struct e1000_adapter *adapter = container_of(work,
1713 struct e1000_adapter,
1714 downshift_task);
1715
1716 if (test_bit(__E1000_DOWN, &adapter->state))
1717 return;
1718
1719 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1720}
1721
1722
1723
1724
1725
1726
1727static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1728{
1729 struct net_device *netdev = data;
1730 struct e1000_adapter *adapter = netdev_priv(netdev);
1731 struct e1000_hw *hw = &adapter->hw;
1732 u32 icr = er32(ICR);
1733
1734
1735 if (icr & E1000_ICR_LSC) {
1736 hw->mac.get_link_status = true;
1737
1738
1739
1740 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1741 (!(er32(STATUS) & E1000_STATUS_LU)))
1742 schedule_work(&adapter->downshift_task);
1743
1744
1745
1746
1747
1748 if (netif_carrier_ok(netdev) &&
1749 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1750
1751 u32 rctl = er32(RCTL);
1752 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1753 adapter->flags |= FLAG_RESTART_NOW;
1754 }
1755
1756 if (!test_bit(__E1000_DOWN, &adapter->state))
1757 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1758 }
1759
1760
1761 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1762 u32 pbeccsts = er32(PBECCSTS);
1763
1764 adapter->corr_errors +=
1765 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1766 adapter->uncorr_errors +=
1767 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1768 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1769
1770
1771 schedule_work(&adapter->reset_task);
1772
1773
1774 return IRQ_HANDLED;
1775 }
1776
1777 if (napi_schedule_prep(&adapter->napi)) {
1778 adapter->total_tx_bytes = 0;
1779 adapter->total_tx_packets = 0;
1780 adapter->total_rx_bytes = 0;
1781 adapter->total_rx_packets = 0;
1782 __napi_schedule(&adapter->napi);
1783 }
1784
1785 return IRQ_HANDLED;
1786}
1787
1788
1789
1790
1791
1792
1793static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1794{
1795 struct net_device *netdev = data;
1796 struct e1000_adapter *adapter = netdev_priv(netdev);
1797 struct e1000_hw *hw = &adapter->hw;
1798 u32 rctl, icr = er32(ICR);
1799
1800 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1801 return IRQ_NONE;
1802
1803
1804
1805
1806 if (!(icr & E1000_ICR_INT_ASSERTED))
1807 return IRQ_NONE;
1808
1809
1810
1811
1812
1813
1814 if (icr & E1000_ICR_LSC) {
1815 hw->mac.get_link_status = true;
1816
1817
1818
1819 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1820 (!(er32(STATUS) & E1000_STATUS_LU)))
1821 schedule_work(&adapter->downshift_task);
1822
1823
1824
1825
1826
1827
1828 if (netif_carrier_ok(netdev) &&
1829 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1830
1831 rctl = er32(RCTL);
1832 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1833 adapter->flags |= FLAG_RESTART_NOW;
1834 }
1835
1836 if (!test_bit(__E1000_DOWN, &adapter->state))
1837 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1838 }
1839
1840
1841 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1842 u32 pbeccsts = er32(PBECCSTS);
1843
1844 adapter->corr_errors +=
1845 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1846 adapter->uncorr_errors +=
1847 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1848 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1849
1850
1851 schedule_work(&adapter->reset_task);
1852
1853
1854 return IRQ_HANDLED;
1855 }
1856
1857 if (napi_schedule_prep(&adapter->napi)) {
1858 adapter->total_tx_bytes = 0;
1859 adapter->total_tx_packets = 0;
1860 adapter->total_rx_bytes = 0;
1861 adapter->total_rx_packets = 0;
1862 __napi_schedule(&adapter->napi);
1863 }
1864
1865 return IRQ_HANDLED;
1866}
1867
1868static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1869{
1870 struct net_device *netdev = data;
1871 struct e1000_adapter *adapter = netdev_priv(netdev);
1872 struct e1000_hw *hw = &adapter->hw;
1873 u32 icr = er32(ICR);
1874
1875 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1876 if (!test_bit(__E1000_DOWN, &adapter->state))
1877 ew32(IMS, E1000_IMS_OTHER);
1878 return IRQ_NONE;
1879 }
1880
1881 if (icr & adapter->eiac_mask)
1882 ew32(ICS, (icr & adapter->eiac_mask));
1883
1884 if (icr & E1000_ICR_OTHER) {
1885 if (!(icr & E1000_ICR_LSC))
1886 goto no_link_interrupt;
1887 hw->mac.get_link_status = true;
1888
1889 if (!test_bit(__E1000_DOWN, &adapter->state))
1890 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1891 }
1892
1893no_link_interrupt:
1894 if (!test_bit(__E1000_DOWN, &adapter->state))
1895 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1896
1897 return IRQ_HANDLED;
1898}
1899
1900static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1901{
1902 struct net_device *netdev = data;
1903 struct e1000_adapter *adapter = netdev_priv(netdev);
1904 struct e1000_hw *hw = &adapter->hw;
1905 struct e1000_ring *tx_ring = adapter->tx_ring;
1906
1907 adapter->total_tx_bytes = 0;
1908 adapter->total_tx_packets = 0;
1909
1910 if (!e1000_clean_tx_irq(tx_ring))
1911
1912 ew32(ICS, tx_ring->ims_val);
1913
1914 return IRQ_HANDLED;
1915}
1916
1917static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
1918{
1919 struct net_device *netdev = data;
1920 struct e1000_adapter *adapter = netdev_priv(netdev);
1921 struct e1000_ring *rx_ring = adapter->rx_ring;
1922
1923
1924
1925
1926 if (rx_ring->set_itr) {
1927 writel(1000000000 / (rx_ring->itr_val * 256),
1928 rx_ring->itr_register);
1929 rx_ring->set_itr = 0;
1930 }
1931
1932 if (napi_schedule_prep(&adapter->napi)) {
1933 adapter->total_rx_bytes = 0;
1934 adapter->total_rx_packets = 0;
1935 __napi_schedule(&adapter->napi);
1936 }
1937 return IRQ_HANDLED;
1938}
1939
1940
1941
1942
1943
1944
1945
1946static void e1000_configure_msix(struct e1000_adapter *adapter)
1947{
1948 struct e1000_hw *hw = &adapter->hw;
1949 struct e1000_ring *rx_ring = adapter->rx_ring;
1950 struct e1000_ring *tx_ring = adapter->tx_ring;
1951 int vector = 0;
1952 u32 ctrl_ext, ivar = 0;
1953
1954 adapter->eiac_mask = 0;
1955
1956
1957 if (hw->mac.type == e1000_82574) {
1958 u32 rfctl = er32(RFCTL);
1959 rfctl |= E1000_RFCTL_ACK_DIS;
1960 ew32(RFCTL, rfctl);
1961 }
1962
1963
1964 rx_ring->ims_val = E1000_IMS_RXQ0;
1965 adapter->eiac_mask |= rx_ring->ims_val;
1966 if (rx_ring->itr_val)
1967 writel(1000000000 / (rx_ring->itr_val * 256),
1968 rx_ring->itr_register);
1969 else
1970 writel(1, rx_ring->itr_register);
1971 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1972
1973
1974 tx_ring->ims_val = E1000_IMS_TXQ0;
1975 vector++;
1976 if (tx_ring->itr_val)
1977 writel(1000000000 / (tx_ring->itr_val * 256),
1978 tx_ring->itr_register);
1979 else
1980 writel(1, tx_ring->itr_register);
1981 adapter->eiac_mask |= tx_ring->ims_val;
1982 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1983
1984
1985 vector++;
1986 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1987 if (rx_ring->itr_val)
1988 writel(1000000000 / (rx_ring->itr_val * 256),
1989 hw->hw_addr + E1000_EITR_82574(vector));
1990 else
1991 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1992
1993
1994 ivar |= (1 << 31);
1995
1996 ew32(IVAR, ivar);
1997
1998
1999 ctrl_ext = er32(CTRL_EXT);
2000 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
2001
2002
2003 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
2004 ctrl_ext |= E1000_CTRL_EXT_EIAME;
2005 ew32(CTRL_EXT, ctrl_ext);
2006 e1e_flush();
2007}
2008
2009void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2010{
2011 if (adapter->msix_entries) {
2012 pci_disable_msix(adapter->pdev);
2013 kfree(adapter->msix_entries);
2014 adapter->msix_entries = NULL;
2015 } else if (adapter->flags & FLAG_MSI_ENABLED) {
2016 pci_disable_msi(adapter->pdev);
2017 adapter->flags &= ~FLAG_MSI_ENABLED;
2018 }
2019}
2020
2021
2022
2023
2024
2025
2026
2027void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2028{
2029 int err;
2030 int i;
2031
2032 switch (adapter->int_mode) {
2033 case E1000E_INT_MODE_MSIX:
2034 if (adapter->flags & FLAG_HAS_MSIX) {
2035 adapter->num_vectors = 3;
2036 adapter->msix_entries = kcalloc(adapter->num_vectors,
2037 sizeof(struct
2038 msix_entry),
2039 GFP_KERNEL);
2040 if (adapter->msix_entries) {
2041 for (i = 0; i < adapter->num_vectors; i++)
2042 adapter->msix_entries[i].entry = i;
2043
2044 err = pci_enable_msix(adapter->pdev,
2045 adapter->msix_entries,
2046 adapter->num_vectors);
2047 if (err == 0)
2048 return;
2049 }
2050
2051 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
2052 e1000e_reset_interrupt_capability(adapter);
2053 }
2054 adapter->int_mode = E1000E_INT_MODE_MSI;
2055
2056 case E1000E_INT_MODE_MSI:
2057 if (!pci_enable_msi(adapter->pdev)) {
2058 adapter->flags |= FLAG_MSI_ENABLED;
2059 } else {
2060 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2061 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
2062 }
2063
2064 case E1000E_INT_MODE_LEGACY:
2065
2066 break;
2067 }
2068
2069
2070 adapter->num_vectors = 1;
2071}
2072
2073
2074
2075
2076
2077
2078
2079static int e1000_request_msix(struct e1000_adapter *adapter)
2080{
2081 struct net_device *netdev = adapter->netdev;
2082 int err = 0, vector = 0;
2083
2084 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2085 snprintf(adapter->rx_ring->name,
2086 sizeof(adapter->rx_ring->name) - 1,
2087 "%s-rx-0", netdev->name);
2088 else
2089 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2090 err = request_irq(adapter->msix_entries[vector].vector,
2091 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
2092 netdev);
2093 if (err)
2094 return err;
2095 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2096 E1000_EITR_82574(vector);
2097 adapter->rx_ring->itr_val = adapter->itr;
2098 vector++;
2099
2100 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2101 snprintf(adapter->tx_ring->name,
2102 sizeof(adapter->tx_ring->name) - 1,
2103 "%s-tx-0", netdev->name);
2104 else
2105 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2106 err = request_irq(adapter->msix_entries[vector].vector,
2107 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2108 netdev);
2109 if (err)
2110 return err;
2111 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2112 E1000_EITR_82574(vector);
2113 adapter->tx_ring->itr_val = adapter->itr;
2114 vector++;
2115
2116 err = request_irq(adapter->msix_entries[vector].vector,
2117 e1000_msix_other, 0, netdev->name, netdev);
2118 if (err)
2119 return err;
2120
2121 e1000_configure_msix(adapter);
2122
2123 return 0;
2124}
2125
2126
2127
2128
2129
2130
2131
2132static int e1000_request_irq(struct e1000_adapter *adapter)
2133{
2134 struct net_device *netdev = adapter->netdev;
2135 int err;
2136
2137 if (adapter->msix_entries) {
2138 err = e1000_request_msix(adapter);
2139 if (!err)
2140 return err;
2141
2142 e1000e_reset_interrupt_capability(adapter);
2143 adapter->int_mode = E1000E_INT_MODE_MSI;
2144 e1000e_set_interrupt_capability(adapter);
2145 }
2146 if (adapter->flags & FLAG_MSI_ENABLED) {
2147 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2148 netdev->name, netdev);
2149 if (!err)
2150 return err;
2151
2152
2153 e1000e_reset_interrupt_capability(adapter);
2154 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2155 }
2156
2157 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2158 netdev->name, netdev);
2159 if (err)
2160 e_err("Unable to allocate interrupt, Error: %d\n", err);
2161
2162 return err;
2163}
2164
2165static void e1000_free_irq(struct e1000_adapter *adapter)
2166{
2167 struct net_device *netdev = adapter->netdev;
2168
2169 if (adapter->msix_entries) {
2170 int vector = 0;
2171
2172 free_irq(adapter->msix_entries[vector].vector, netdev);
2173 vector++;
2174
2175 free_irq(adapter->msix_entries[vector].vector, netdev);
2176 vector++;
2177
2178
2179 free_irq(adapter->msix_entries[vector].vector, netdev);
2180 return;
2181 }
2182
2183 free_irq(adapter->pdev->irq, netdev);
2184}
2185
2186
2187
2188
2189static void e1000_irq_disable(struct e1000_adapter *adapter)
2190{
2191 struct e1000_hw *hw = &adapter->hw;
2192
2193 ew32(IMC, ~0);
2194 if (adapter->msix_entries)
2195 ew32(EIAC_82574, 0);
2196 e1e_flush();
2197
2198 if (adapter->msix_entries) {
2199 int i;
2200 for (i = 0; i < adapter->num_vectors; i++)
2201 synchronize_irq(adapter->msix_entries[i].vector);
2202 } else {
2203 synchronize_irq(adapter->pdev->irq);
2204 }
2205}
2206
2207
2208
2209
2210static void e1000_irq_enable(struct e1000_adapter *adapter)
2211{
2212 struct e1000_hw *hw = &adapter->hw;
2213
2214 if (adapter->msix_entries) {
2215 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2216 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2217 } else if (hw->mac.type == e1000_pch_lpt) {
2218 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2219 } else {
2220 ew32(IMS, IMS_ENABLE_MASK);
2221 }
2222 e1e_flush();
2223}
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234void e1000e_get_hw_control(struct e1000_adapter *adapter)
2235{
2236 struct e1000_hw *hw = &adapter->hw;
2237 u32 ctrl_ext;
2238 u32 swsm;
2239
2240
2241 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2242 swsm = er32(SWSM);
2243 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2244 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2245 ctrl_ext = er32(CTRL_EXT);
2246 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2247 }
2248}
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260void e1000e_release_hw_control(struct e1000_adapter *adapter)
2261{
2262 struct e1000_hw *hw = &adapter->hw;
2263 u32 ctrl_ext;
2264 u32 swsm;
2265
2266
2267 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2268 swsm = er32(SWSM);
2269 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2270 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2271 ctrl_ext = er32(CTRL_EXT);
2272 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2273 }
2274}
2275
2276
2277
2278
2279static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2280 struct e1000_ring *ring)
2281{
2282 struct pci_dev *pdev = adapter->pdev;
2283
2284 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2285 GFP_KERNEL);
2286 if (!ring->desc)
2287 return -ENOMEM;
2288
2289 return 0;
2290}
2291
2292
2293
2294
2295
2296
2297
2298int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2299{
2300 struct e1000_adapter *adapter = tx_ring->adapter;
2301 int err = -ENOMEM, size;
2302
2303 size = sizeof(struct e1000_buffer) * tx_ring->count;
2304 tx_ring->buffer_info = vzalloc(size);
2305 if (!tx_ring->buffer_info)
2306 goto err;
2307
2308
2309 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2310 tx_ring->size = ALIGN(tx_ring->size, 4096);
2311
2312 err = e1000_alloc_ring_dma(adapter, tx_ring);
2313 if (err)
2314 goto err;
2315
2316 tx_ring->next_to_use = 0;
2317 tx_ring->next_to_clean = 0;
2318
2319 return 0;
2320err:
2321 vfree(tx_ring->buffer_info);
2322 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2323 return err;
2324}
2325
2326
2327
2328
2329
2330
2331
2332int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2333{
2334 struct e1000_adapter *adapter = rx_ring->adapter;
2335 struct e1000_buffer *buffer_info;
2336 int i, size, desc_len, err = -ENOMEM;
2337
2338 size = sizeof(struct e1000_buffer) * rx_ring->count;
2339 rx_ring->buffer_info = vzalloc(size);
2340 if (!rx_ring->buffer_info)
2341 goto err;
2342
2343 for (i = 0; i < rx_ring->count; i++) {
2344 buffer_info = &rx_ring->buffer_info[i];
2345 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2346 sizeof(struct e1000_ps_page),
2347 GFP_KERNEL);
2348 if (!buffer_info->ps_pages)
2349 goto err_pages;
2350 }
2351
2352 desc_len = sizeof(union e1000_rx_desc_packet_split);
2353
2354
2355 rx_ring->size = rx_ring->count * desc_len;
2356 rx_ring->size = ALIGN(rx_ring->size, 4096);
2357
2358 err = e1000_alloc_ring_dma(adapter, rx_ring);
2359 if (err)
2360 goto err_pages;
2361
2362 rx_ring->next_to_clean = 0;
2363 rx_ring->next_to_use = 0;
2364 rx_ring->rx_skb_top = NULL;
2365
2366 return 0;
2367
2368err_pages:
2369 for (i = 0; i < rx_ring->count; i++) {
2370 buffer_info = &rx_ring->buffer_info[i];
2371 kfree(buffer_info->ps_pages);
2372 }
2373err:
2374 vfree(rx_ring->buffer_info);
2375 e_err("Unable to allocate memory for the receive descriptor ring\n");
2376 return err;
2377}
2378
2379
2380
2381
2382
2383static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2384{
2385 struct e1000_adapter *adapter = tx_ring->adapter;
2386 struct e1000_buffer *buffer_info;
2387 unsigned long size;
2388 unsigned int i;
2389
2390 for (i = 0; i < tx_ring->count; i++) {
2391 buffer_info = &tx_ring->buffer_info[i];
2392 e1000_put_txbuf(tx_ring, buffer_info);
2393 }
2394
2395 netdev_reset_queue(adapter->netdev);
2396 size = sizeof(struct e1000_buffer) * tx_ring->count;
2397 memset(tx_ring->buffer_info, 0, size);
2398
2399 memset(tx_ring->desc, 0, tx_ring->size);
2400
2401 tx_ring->next_to_use = 0;
2402 tx_ring->next_to_clean = 0;
2403
2404 writel(0, tx_ring->head);
2405 if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2406 e1000e_update_tdt_wa(tx_ring, 0);
2407 else
2408 writel(0, tx_ring->tail);
2409}
2410
2411
2412
2413
2414
2415
2416
2417void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2418{
2419 struct e1000_adapter *adapter = tx_ring->adapter;
2420 struct pci_dev *pdev = adapter->pdev;
2421
2422 e1000_clean_tx_ring(tx_ring);
2423
2424 vfree(tx_ring->buffer_info);
2425 tx_ring->buffer_info = NULL;
2426
2427 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2428 tx_ring->dma);
2429 tx_ring->desc = NULL;
2430}
2431
2432
2433
2434
2435
2436
2437
2438void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2439{
2440 struct e1000_adapter *adapter = rx_ring->adapter;
2441 struct pci_dev *pdev = adapter->pdev;
2442 int i;
2443
2444 e1000_clean_rx_ring(rx_ring);
2445
2446 for (i = 0; i < rx_ring->count; i++)
2447 kfree(rx_ring->buffer_info[i].ps_pages);
2448
2449 vfree(rx_ring->buffer_info);
2450 rx_ring->buffer_info = NULL;
2451
2452 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2453 rx_ring->dma);
2454 rx_ring->desc = NULL;
2455}
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2474{
2475 unsigned int retval = itr_setting;
2476
2477 if (packets == 0)
2478 return itr_setting;
2479
2480 switch (itr_setting) {
2481 case lowest_latency:
2482
2483 if (bytes / packets > 8000)
2484 retval = bulk_latency;
2485 else if ((packets < 5) && (bytes > 512))
2486 retval = low_latency;
2487 break;
2488 case low_latency:
2489 if (bytes > 10000) {
2490
2491 if (bytes / packets > 8000)
2492 retval = bulk_latency;
2493 else if ((packets < 10) || ((bytes / packets) > 1200))
2494 retval = bulk_latency;
2495 else if ((packets > 35))
2496 retval = lowest_latency;
2497 } else if (bytes / packets > 2000) {
2498 retval = bulk_latency;
2499 } else if (packets <= 2 && bytes < 512) {
2500 retval = lowest_latency;
2501 }
2502 break;
2503 case bulk_latency:
2504 if (bytes > 25000) {
2505 if (packets > 35)
2506 retval = low_latency;
2507 } else if (bytes < 6000) {
2508 retval = low_latency;
2509 }
2510 break;
2511 }
2512
2513 return retval;
2514}
2515
2516static void e1000_set_itr(struct e1000_adapter *adapter)
2517{
2518 u16 current_itr;
2519 u32 new_itr = adapter->itr;
2520
2521
2522 if (adapter->link_speed != SPEED_1000) {
2523 current_itr = 0;
2524 new_itr = 4000;
2525 goto set_itr_now;
2526 }
2527
2528 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2529 new_itr = 0;
2530 goto set_itr_now;
2531 }
2532
2533 adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2534 adapter->total_tx_packets,
2535 adapter->total_tx_bytes);
2536
2537 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2538 adapter->tx_itr = low_latency;
2539
2540 adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2541 adapter->total_rx_packets,
2542 adapter->total_rx_bytes);
2543
2544 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2545 adapter->rx_itr = low_latency;
2546
2547 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2548
2549
2550 switch (current_itr) {
2551 case lowest_latency:
2552 new_itr = 70000;
2553 break;
2554 case low_latency:
2555 new_itr = 20000;
2556 break;
2557 case bulk_latency:
2558 new_itr = 4000;
2559 break;
2560 default:
2561 break;
2562 }
2563
2564set_itr_now:
2565 if (new_itr != adapter->itr) {
2566
2567
2568
2569
2570 new_itr = new_itr > adapter->itr ?
2571 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2572 adapter->itr = new_itr;
2573 adapter->rx_ring->itr_val = new_itr;
2574 if (adapter->msix_entries)
2575 adapter->rx_ring->set_itr = 1;
2576 else
2577 e1000e_write_itr(adapter, new_itr);
2578 }
2579}
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2591{
2592 struct e1000_hw *hw = &adapter->hw;
2593 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2594
2595 if (adapter->msix_entries) {
2596 int vector;
2597
2598 for (vector = 0; vector < adapter->num_vectors; vector++)
2599 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2600 } else {
2601 ew32(ITR, new_itr);
2602 }
2603}
2604
2605
2606
2607
2608
2609static int e1000_alloc_queues(struct e1000_adapter *adapter)
2610{
2611 int size = sizeof(struct e1000_ring);
2612
2613 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2614 if (!adapter->tx_ring)
2615 goto err;
2616 adapter->tx_ring->count = adapter->tx_ring_count;
2617 adapter->tx_ring->adapter = adapter;
2618
2619 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2620 if (!adapter->rx_ring)
2621 goto err;
2622 adapter->rx_ring->count = adapter->rx_ring_count;
2623 adapter->rx_ring->adapter = adapter;
2624
2625 return 0;
2626err:
2627 e_err("Unable to allocate memory for queues\n");
2628 kfree(adapter->rx_ring);
2629 kfree(adapter->tx_ring);
2630 return -ENOMEM;
2631}
2632
2633
2634
2635
2636
2637
2638static int e1000e_poll(struct napi_struct *napi, int weight)
2639{
2640 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2641 napi);
2642 struct e1000_hw *hw = &adapter->hw;
2643 struct net_device *poll_dev = adapter->netdev;
2644 int tx_cleaned = 1, work_done = 0;
2645
2646 adapter = netdev_priv(poll_dev);
2647
2648 if (!adapter->msix_entries ||
2649 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2650 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2651
2652 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2653
2654 if (!tx_cleaned)
2655 work_done = weight;
2656
2657
2658 if (work_done < weight) {
2659 if (adapter->itr_setting & 3)
2660 e1000_set_itr(adapter);
2661 napi_complete(napi);
2662 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2663 if (adapter->msix_entries)
2664 ew32(IMS, adapter->rx_ring->ims_val);
2665 else
2666 e1000_irq_enable(adapter);
2667 }
2668 }
2669
2670 return work_done;
2671}
2672
2673static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2674 __always_unused __be16 proto, u16 vid)
2675{
2676 struct e1000_adapter *adapter = netdev_priv(netdev);
2677 struct e1000_hw *hw = &adapter->hw;
2678 u32 vfta, index;
2679
2680
2681 if ((adapter->hw.mng_cookie.status &
2682 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2683 (vid == adapter->mng_vlan_id))
2684 return 0;
2685
2686
2687 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2688 index = (vid >> 5) & 0x7F;
2689 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2690 vfta |= (1 << (vid & 0x1F));
2691 hw->mac.ops.write_vfta(hw, index, vfta);
2692 }
2693
2694 set_bit(vid, adapter->active_vlans);
2695
2696 return 0;
2697}
2698
2699static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2700 __always_unused __be16 proto, u16 vid)
2701{
2702 struct e1000_adapter *adapter = netdev_priv(netdev);
2703 struct e1000_hw *hw = &adapter->hw;
2704 u32 vfta, index;
2705
2706 if ((adapter->hw.mng_cookie.status &
2707 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2708 (vid == adapter->mng_vlan_id)) {
2709
2710 e1000e_release_hw_control(adapter);
2711 return 0;
2712 }
2713
2714
2715 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2716 index = (vid >> 5) & 0x7F;
2717 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2718 vfta &= ~(1 << (vid & 0x1F));
2719 hw->mac.ops.write_vfta(hw, index, vfta);
2720 }
2721
2722 clear_bit(vid, adapter->active_vlans);
2723
2724 return 0;
2725}
2726
2727
2728
2729
2730
2731static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2732{
2733 struct net_device *netdev = adapter->netdev;
2734 struct e1000_hw *hw = &adapter->hw;
2735 u32 rctl;
2736
2737 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2738
2739 rctl = er32(RCTL);
2740 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2741 ew32(RCTL, rctl);
2742
2743 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2744 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2745 adapter->mng_vlan_id);
2746 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2747 }
2748 }
2749}
2750
2751
2752
2753
2754
2755static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2756{
2757 struct e1000_hw *hw = &adapter->hw;
2758 u32 rctl;
2759
2760 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2761
2762 rctl = er32(RCTL);
2763 rctl |= E1000_RCTL_VFE;
2764 rctl &= ~E1000_RCTL_CFIEN;
2765 ew32(RCTL, rctl);
2766 }
2767}
2768
2769
2770
2771
2772
2773static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2774{
2775 struct e1000_hw *hw = &adapter->hw;
2776 u32 ctrl;
2777
2778
2779 ctrl = er32(CTRL);
2780 ctrl &= ~E1000_CTRL_VME;
2781 ew32(CTRL, ctrl);
2782}
2783
2784
2785
2786
2787
2788static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2789{
2790 struct e1000_hw *hw = &adapter->hw;
2791 u32 ctrl;
2792
2793
2794 ctrl = er32(CTRL);
2795 ctrl |= E1000_CTRL_VME;
2796 ew32(CTRL, ctrl);
2797}
2798
2799static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2800{
2801 struct net_device *netdev = adapter->netdev;
2802 u16 vid = adapter->hw.mng_cookie.vlan_id;
2803 u16 old_vid = adapter->mng_vlan_id;
2804
2805 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2806 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
2807 adapter->mng_vlan_id = vid;
2808 }
2809
2810 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2811 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
2812}
2813
2814static void e1000_restore_vlan(struct e1000_adapter *adapter)
2815{
2816 u16 vid;
2817
2818 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
2819
2820 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2821 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2822}
2823
2824static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2825{
2826 struct e1000_hw *hw = &adapter->hw;
2827 u32 manc, manc2h, mdef, i, j;
2828
2829 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2830 return;
2831
2832 manc = er32(MANC);
2833
2834
2835
2836
2837
2838 manc |= E1000_MANC_EN_MNG2HOST;
2839 manc2h = er32(MANC2H);
2840
2841 switch (hw->mac.type) {
2842 default:
2843 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2844 break;
2845 case e1000_82574:
2846 case e1000_82583:
2847
2848
2849
2850 for (i = 0, j = 0; i < 8; i++) {
2851 mdef = er32(MDEF(i));
2852
2853
2854 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2855 continue;
2856
2857
2858 if (mdef)
2859 manc2h |= (1 << i);
2860
2861 j |= mdef;
2862 }
2863
2864 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2865 break;
2866
2867
2868 for (i = 0, j = 0; i < 8; i++)
2869 if (er32(MDEF(i)) == 0) {
2870 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2871 E1000_MDEF_PORT_664));
2872 manc2h |= (1 << 1);
2873 j++;
2874 break;
2875 }
2876
2877 if (!j)
2878 e_warn("Unable to create IPMI pass-through filter\n");
2879 break;
2880 }
2881
2882 ew32(MANC2H, manc2h);
2883 ew32(MANC, manc);
2884}
2885
2886
2887
2888
2889
2890
2891
2892static void e1000_configure_tx(struct e1000_adapter *adapter)
2893{
2894 struct e1000_hw *hw = &adapter->hw;
2895 struct e1000_ring *tx_ring = adapter->tx_ring;
2896 u64 tdba;
2897 u32 tdlen, tarc;
2898
2899
2900 tdba = tx_ring->dma;
2901 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2902 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2903 ew32(TDBAH(0), (tdba >> 32));
2904 ew32(TDLEN(0), tdlen);
2905 ew32(TDH(0), 0);
2906 ew32(TDT(0), 0);
2907 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2908 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2909
2910
2911 ew32(TIDV, adapter->tx_int_delay);
2912
2913 ew32(TADV, adapter->tx_abs_int_delay);
2914
2915 if (adapter->flags2 & FLAG2_DMA_BURST) {
2916 u32 txdctl = er32(TXDCTL(0));
2917 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2918 E1000_TXDCTL_WTHRESH);
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2929 ew32(TXDCTL(0), txdctl);
2930 }
2931
2932 ew32(TXDCTL(1), er32(TXDCTL(0)));
2933
2934 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2935 tarc = er32(TARC(0));
2936
2937
2938
2939#define SPEED_MODE_BIT (1 << 21)
2940 tarc |= SPEED_MODE_BIT;
2941 ew32(TARC(0), tarc);
2942 }
2943
2944
2945 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2946 tarc = er32(TARC(0));
2947 tarc |= 1;
2948 ew32(TARC(0), tarc);
2949 tarc = er32(TARC(1));
2950 tarc |= 1;
2951 ew32(TARC(1), tarc);
2952 }
2953
2954
2955 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2956
2957
2958 if (adapter->tx_int_delay)
2959 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2960
2961
2962 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2963
2964 hw->mac.ops.config_collision_dist(hw);
2965}
2966
2967
2968
2969
2970
2971#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2972 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2973static void e1000_setup_rctl(struct e1000_adapter *adapter)
2974{
2975 struct e1000_hw *hw = &adapter->hw;
2976 u32 rctl, rfctl;
2977 u32 pages = 0;
2978
2979
2980 if ((hw->mac.type >= e1000_pch2lan) &&
2981 (adapter->netdev->mtu > ETH_DATA_LEN) &&
2982 e1000_lv_jumbo_workaround_ich8lan(hw, true))
2983 e_dbg("failed to enable jumbo frame workaround mode\n");
2984
2985
2986 rctl = er32(RCTL);
2987 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2988 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2989 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2990 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2991
2992
2993 rctl &= ~E1000_RCTL_SBP;
2994
2995
2996 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2997 rctl &= ~E1000_RCTL_LPE;
2998 else
2999 rctl |= E1000_RCTL_LPE;
3000
3001
3002
3003
3004
3005 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3006 rctl |= E1000_RCTL_SECRC;
3007
3008
3009 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3010 u16 phy_data;
3011
3012 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3013 phy_data &= 0xfff8;
3014 phy_data |= (1 << 2);
3015 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3016
3017 e1e_rphy(hw, 22, &phy_data);
3018 phy_data &= 0x0fff;
3019 phy_data |= (1 << 14);
3020 e1e_wphy(hw, 0x10, 0x2823);
3021 e1e_wphy(hw, 0x11, 0x0003);
3022 e1e_wphy(hw, 22, phy_data);
3023 }
3024
3025
3026 rctl &= ~E1000_RCTL_SZ_4096;
3027 rctl |= E1000_RCTL_BSEX;
3028 switch (adapter->rx_buffer_len) {
3029 case 2048:
3030 default:
3031 rctl |= E1000_RCTL_SZ_2048;
3032 rctl &= ~E1000_RCTL_BSEX;
3033 break;
3034 case 4096:
3035 rctl |= E1000_RCTL_SZ_4096;
3036 break;
3037 case 8192:
3038 rctl |= E1000_RCTL_SZ_8192;
3039 break;
3040 case 16384:
3041 rctl |= E1000_RCTL_SZ_16384;
3042 break;
3043 }
3044
3045
3046 rfctl = er32(RFCTL);
3047 rfctl |= E1000_RFCTL_EXTEN;
3048 ew32(RFCTL, rfctl);
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3065 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
3066 adapter->rx_ps_pages = pages;
3067 else
3068 adapter->rx_ps_pages = 0;
3069
3070 if (adapter->rx_ps_pages) {
3071 u32 psrctl = 0;
3072
3073
3074 rctl |= E1000_RCTL_DTYP_PS;
3075
3076 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3077
3078 switch (adapter->rx_ps_pages) {
3079 case 3:
3080 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3081
3082 case 2:
3083 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3084
3085 case 1:
3086 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3087 break;
3088 }
3089
3090 ew32(PSRCTL, psrctl);
3091 }
3092
3093
3094 if (adapter->netdev->features & NETIF_F_RXALL) {
3095
3096
3097
3098 rctl |= (E1000_RCTL_SBP |
3099 E1000_RCTL_BAM |
3100 E1000_RCTL_PMCF);
3101
3102 rctl &= ~(E1000_RCTL_VFE |
3103 E1000_RCTL_DPF |
3104 E1000_RCTL_CFIEN);
3105
3106
3107
3108 }
3109
3110 ew32(RCTL, rctl);
3111
3112 adapter->flags &= ~FLAG_RESTART_NOW;
3113}
3114
3115
3116
3117
3118
3119
3120
3121static void e1000_configure_rx(struct e1000_adapter *adapter)
3122{
3123 struct e1000_hw *hw = &adapter->hw;
3124 struct e1000_ring *rx_ring = adapter->rx_ring;
3125 u64 rdba;
3126 u32 rdlen, rctl, rxcsum, ctrl_ext;
3127
3128 if (adapter->rx_ps_pages) {
3129
3130 rdlen = rx_ring->count *
3131 sizeof(union e1000_rx_desc_packet_split);
3132 adapter->clean_rx = e1000_clean_rx_irq_ps;
3133 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3134 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3135 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3136 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3137 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3138 } else {
3139 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3140 adapter->clean_rx = e1000_clean_rx_irq;
3141 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3142 }
3143
3144
3145 rctl = er32(RCTL);
3146 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3147 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3148 e1e_flush();
3149 usleep_range(10000, 20000);
3150
3151 if (adapter->flags2 & FLAG2_DMA_BURST) {
3152
3153
3154
3155
3156
3157
3158
3159
3160 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3161 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3162
3163
3164
3165
3166 if (adapter->rx_int_delay == DEFAULT_RDTR)
3167 adapter->rx_int_delay = BURST_RDTR;
3168 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3169 adapter->rx_abs_int_delay = BURST_RADV;
3170 }
3171
3172
3173 ew32(RDTR, adapter->rx_int_delay);
3174
3175
3176 ew32(RADV, adapter->rx_abs_int_delay);
3177 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3178 e1000e_write_itr(adapter, adapter->itr);
3179
3180 ctrl_ext = er32(CTRL_EXT);
3181
3182 ctrl_ext |= E1000_CTRL_EXT_IAME;
3183 ew32(IAM, 0xffffffff);
3184 ew32(CTRL_EXT, ctrl_ext);
3185 e1e_flush();
3186
3187
3188
3189
3190 rdba = rx_ring->dma;
3191 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3192 ew32(RDBAH(0), (rdba >> 32));
3193 ew32(RDLEN(0), rdlen);
3194 ew32(RDH(0), 0);
3195 ew32(RDT(0), 0);
3196 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3197 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3198
3199
3200 rxcsum = er32(RXCSUM);
3201 if (adapter->netdev->features & NETIF_F_RXCSUM)
3202 rxcsum |= E1000_RXCSUM_TUOFL;
3203 else
3204 rxcsum &= ~E1000_RXCSUM_TUOFL;
3205 ew32(RXCSUM, rxcsum);
3206
3207
3208
3209
3210 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3211 u32 lat =
3212 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3213 adapter->max_frame_size) * 8 / 1000;
3214
3215 if (adapter->flags & FLAG_IS_ICH) {
3216 u32 rxdctl = er32(RXDCTL(0));
3217 ew32(RXDCTL(0), rxdctl | 0x3);
3218 }
3219
3220 pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
3221 } else {
3222 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3223 PM_QOS_DEFAULT_VALUE);
3224 }
3225
3226
3227 ew32(RCTL, rctl);
3228}
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239static int e1000e_write_mc_addr_list(struct net_device *netdev)
3240{
3241 struct e1000_adapter *adapter = netdev_priv(netdev);
3242 struct e1000_hw *hw = &adapter->hw;
3243 struct netdev_hw_addr *ha;
3244 u8 *mta_list;
3245 int i;
3246
3247 if (netdev_mc_empty(netdev)) {
3248
3249 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3250 return 0;
3251 }
3252
3253 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3254 if (!mta_list)
3255 return -ENOMEM;
3256
3257
3258 i = 0;
3259 netdev_for_each_mc_addr(ha, netdev)
3260 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3261
3262 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3263 kfree(mta_list);
3264
3265 return netdev_mc_count(netdev);
3266}
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277static int e1000e_write_uc_addr_list(struct net_device *netdev)
3278{
3279 struct e1000_adapter *adapter = netdev_priv(netdev);
3280 struct e1000_hw *hw = &adapter->hw;
3281 unsigned int rar_entries = hw->mac.rar_entry_count;
3282 int count = 0;
3283
3284
3285 rar_entries--;
3286
3287
3288 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3289 rar_entries--;
3290
3291
3292 if (netdev_uc_count(netdev) > rar_entries)
3293 return -ENOMEM;
3294
3295 if (!netdev_uc_empty(netdev) && rar_entries) {
3296 struct netdev_hw_addr *ha;
3297
3298
3299
3300
3301 netdev_for_each_uc_addr(ha, netdev) {
3302 if (!rar_entries)
3303 break;
3304 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3305 count++;
3306 }
3307 }
3308
3309
3310 for (; rar_entries > 0; rar_entries--) {
3311 ew32(RAH(rar_entries), 0);
3312 ew32(RAL(rar_entries), 0);
3313 }
3314 e1e_flush();
3315
3316 return count;
3317}
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328static void e1000e_set_rx_mode(struct net_device *netdev)
3329{
3330 struct e1000_adapter *adapter = netdev_priv(netdev);
3331 struct e1000_hw *hw = &adapter->hw;
3332 u32 rctl;
3333
3334
3335 rctl = er32(RCTL);
3336
3337
3338 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3339
3340 if (netdev->flags & IFF_PROMISC) {
3341 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3342
3343 e1000e_vlan_filter_disable(adapter);
3344 } else {
3345 int count;
3346
3347 if (netdev->flags & IFF_ALLMULTI) {
3348 rctl |= E1000_RCTL_MPE;
3349 } else {
3350
3351
3352
3353
3354 count = e1000e_write_mc_addr_list(netdev);
3355 if (count < 0)
3356 rctl |= E1000_RCTL_MPE;
3357 }
3358 e1000e_vlan_filter_enable(adapter);
3359
3360
3361
3362
3363 count = e1000e_write_uc_addr_list(netdev);
3364 if (count < 0)
3365 rctl |= E1000_RCTL_UPE;
3366 }
3367
3368 ew32(RCTL, rctl);
3369
3370 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3371 e1000e_vlan_strip_enable(adapter);
3372 else
3373 e1000e_vlan_strip_disable(adapter);
3374}
3375
3376static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3377{
3378 struct e1000_hw *hw = &adapter->hw;
3379 u32 mrqc, rxcsum;
3380 int i;
3381 static const u32 rsskey[10] = {
3382 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3383 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3384 };
3385
3386
3387 for (i = 0; i < 10; i++)
3388 ew32(RSSRK(i), rsskey[i]);
3389
3390
3391 for (i = 0; i < 32; i++)
3392 ew32(RETA(i), 0);
3393
3394
3395
3396
3397 rxcsum = er32(RXCSUM);
3398 rxcsum |= E1000_RXCSUM_PCSD;
3399
3400 ew32(RXCSUM, rxcsum);
3401
3402 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3403 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3404 E1000_MRQC_RSS_FIELD_IPV6 |
3405 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3406 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3407
3408 ew32(MRQC, mrqc);
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3420{
3421 struct e1000_hw *hw = &adapter->hw;
3422 u32 incvalue, incperiod, shift;
3423
3424
3425 if ((hw->mac.type == e1000_pch_lpt) &&
3426 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3427 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3428 u32 fextnvm7 = er32(FEXTNVM7);
3429
3430 if (!(fextnvm7 & (1 << 0))) {
3431 ew32(FEXTNVM7, fextnvm7 | (1 << 0));
3432 e1e_flush();
3433 }
3434 }
3435
3436 switch (hw->mac.type) {
3437 case e1000_pch2lan:
3438 case e1000_pch_lpt:
3439
3440
3441
3442 if ((hw->mac.type != e1000_pch_lpt) ||
3443 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
3444
3445 incperiod = INCPERIOD_96MHz;
3446 incvalue = INCVALUE_96MHz;
3447 shift = INCVALUE_SHIFT_96MHz;
3448 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
3449 break;
3450 }
3451
3452 case e1000_82574:
3453 case e1000_82583:
3454
3455 incperiod = INCPERIOD_25MHz;
3456 incvalue = INCVALUE_25MHz;
3457 shift = INCVALUE_SHIFT_25MHz;
3458 adapter->cc.shift = shift;
3459 break;
3460 default:
3461 return -EINVAL;
3462 }
3463
3464 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3465 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3466
3467 return 0;
3468}
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
3486{
3487 struct e1000_hw *hw = &adapter->hw;
3488 struct hwtstamp_config *config = &adapter->hwtstamp_config;
3489 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3490 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
3491 u32 rxmtrl = 0;
3492 u16 rxudp = 0;
3493 bool is_l4 = false;
3494 bool is_l2 = false;
3495 u32 regval;
3496 s32 ret_val;
3497
3498 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3499 return -EINVAL;
3500
3501
3502 if (config->flags)
3503 return -EINVAL;
3504
3505 switch (config->tx_type) {
3506 case HWTSTAMP_TX_OFF:
3507 tsync_tx_ctl = 0;
3508 break;
3509 case HWTSTAMP_TX_ON:
3510 break;
3511 default:
3512 return -ERANGE;
3513 }
3514
3515 switch (config->rx_filter) {
3516 case HWTSTAMP_FILTER_NONE:
3517 tsync_rx_ctl = 0;
3518 break;
3519 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3520 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3521 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3522 is_l4 = true;
3523 break;
3524 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3525 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3526 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3527 is_l4 = true;
3528 break;
3529 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3530
3531 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3532 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3533 is_l2 = true;
3534 break;
3535 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3536
3537 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3538 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3539 is_l2 = true;
3540 break;
3541 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3542
3543
3544
3545 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3546
3547 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3548 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3549 is_l2 = true;
3550 is_l4 = true;
3551 break;
3552 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3553
3554
3555
3556 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3557
3558 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3559 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3560 is_l2 = true;
3561 is_l4 = true;
3562 break;
3563 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3564 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3565
3566
3567
3568 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3569 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3570 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3571 is_l2 = true;
3572 is_l4 = true;
3573 break;
3574 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3575
3576
3577
3578
3579 case HWTSTAMP_FILTER_ALL:
3580 is_l2 = true;
3581 is_l4 = true;
3582 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3583 config->rx_filter = HWTSTAMP_FILTER_ALL;
3584 break;
3585 default:
3586 return -ERANGE;
3587 }
3588
3589
3590 regval = er32(TSYNCTXCTL);
3591 regval &= ~E1000_TSYNCTXCTL_ENABLED;
3592 regval |= tsync_tx_ctl;
3593 ew32(TSYNCTXCTL, regval);
3594 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3595 (regval & E1000_TSYNCTXCTL_ENABLED)) {
3596 e_err("Timesync Tx Control register not set as expected\n");
3597 return -EAGAIN;
3598 }
3599
3600
3601 regval = er32(TSYNCRXCTL);
3602 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3603 regval |= tsync_rx_ctl;
3604 ew32(TSYNCRXCTL, regval);
3605 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3606 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3607 (regval & (E1000_TSYNCRXCTL_ENABLED |
3608 E1000_TSYNCRXCTL_TYPE_MASK))) {
3609 e_err("Timesync Rx Control register not set as expected\n");
3610 return -EAGAIN;
3611 }
3612
3613
3614 if (is_l2)
3615 rxmtrl |= ETH_P_1588;
3616
3617
3618 ew32(RXMTRL, rxmtrl);
3619
3620
3621 if (is_l4) {
3622 rxudp = PTP_EV_PORT;
3623 cpu_to_be16s(&rxudp);
3624 }
3625 ew32(RXUDP, rxudp);
3626
3627 e1e_flush();
3628
3629
3630 er32(RXSTMPH);
3631 er32(TXSTMPH);
3632
3633
3634 ret_val = e1000e_get_base_timinca(adapter, ®val);
3635 if (ret_val)
3636 return ret_val;
3637 ew32(TIMINCA, regval);
3638
3639
3640 timecounter_init(&adapter->tc, &adapter->cc,
3641 ktime_to_ns(ktime_get_real()));
3642
3643 return 0;
3644}
3645
3646
3647
3648
3649
3650static void e1000_configure(struct e1000_adapter *adapter)
3651{
3652 struct e1000_ring *rx_ring = adapter->rx_ring;
3653
3654 e1000e_set_rx_mode(adapter->netdev);
3655
3656 e1000_restore_vlan(adapter);
3657 e1000_init_manageability_pt(adapter);
3658
3659 e1000_configure_tx(adapter);
3660
3661 if (adapter->netdev->features & NETIF_F_RXHASH)
3662 e1000e_setup_rss_hash(adapter);
3663 e1000_setup_rctl(adapter);
3664 e1000_configure_rx(adapter);
3665 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3666}
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676void e1000e_power_up_phy(struct e1000_adapter *adapter)
3677{
3678 if (adapter->hw.phy.ops.power_up)
3679 adapter->hw.phy.ops.power_up(&adapter->hw);
3680
3681 adapter->hw.mac.ops.setup_link(&adapter->hw);
3682}
3683
3684
3685
3686
3687
3688
3689
3690static void e1000_power_down_phy(struct e1000_adapter *adapter)
3691{
3692
3693 if (adapter->wol)
3694 return;
3695
3696 if (adapter->hw.phy.ops.power_down)
3697 adapter->hw.phy.ops.power_down(&adapter->hw);
3698}
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708void e1000e_reset(struct e1000_adapter *adapter)
3709{
3710 struct e1000_mac_info *mac = &adapter->hw.mac;
3711 struct e1000_fc_info *fc = &adapter->hw.fc;
3712 struct e1000_hw *hw = &adapter->hw;
3713 u32 tx_space, min_tx_space, min_rx_space;
3714 u32 pba = adapter->pba;
3715 u16 hwm;
3716
3717
3718 ew32(PBA, pba);
3719
3720 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3721
3722
3723
3724
3725
3726
3727
3728 pba = er32(PBA);
3729
3730 tx_space = pba >> 16;
3731
3732 pba &= 0xffff;
3733
3734
3735
3736 min_tx_space = (adapter->max_frame_size +
3737 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3738 min_tx_space = ALIGN(min_tx_space, 1024);
3739 min_tx_space >>= 10;
3740
3741 min_rx_space = adapter->max_frame_size;
3742 min_rx_space = ALIGN(min_rx_space, 1024);
3743 min_rx_space >>= 10;
3744
3745
3746
3747
3748
3749 if ((tx_space < min_tx_space) &&
3750 ((min_tx_space - tx_space) < pba)) {
3751 pba -= min_tx_space - tx_space;
3752
3753
3754
3755
3756 if (pba < min_rx_space)
3757 pba = min_rx_space;
3758 }
3759
3760 ew32(PBA, pba);
3761 }
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3772 fc->pause_time = 0xFFFF;
3773 else
3774 fc->pause_time = E1000_FC_PAUSE_TIME;
3775 fc->send_xon = true;
3776 fc->current_mode = fc->requested_mode;
3777
3778 switch (hw->mac.type) {
3779 case e1000_ich9lan:
3780 case e1000_ich10lan:
3781 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3782 pba = 14;
3783 ew32(PBA, pba);
3784 fc->high_water = 0x2800;
3785 fc->low_water = fc->high_water - 8;
3786 break;
3787 }
3788
3789 default:
3790 hwm = min(((pba << 10) * 9 / 10),
3791 ((pba << 10) - adapter->max_frame_size));
3792
3793 fc->high_water = hwm & E1000_FCRTH_RTH;
3794 fc->low_water = fc->high_water - 8;
3795 break;
3796 case e1000_pchlan:
3797
3798
3799
3800 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3801 fc->high_water = 0x3500;
3802 fc->low_water = 0x1500;
3803 } else {
3804 fc->high_water = 0x5000;
3805 fc->low_water = 0x3000;
3806 }
3807 fc->refresh_time = 0x1000;
3808 break;
3809 case e1000_pch2lan:
3810 case e1000_pch_lpt:
3811 fc->refresh_time = 0x0400;
3812
3813 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
3814 fc->high_water = 0x05C20;
3815 fc->low_water = 0x05048;
3816 fc->pause_time = 0x0650;
3817 break;
3818 }
3819
3820 pba = 14;
3821 ew32(PBA, pba);
3822 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3823 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
3824 break;
3825 }
3826
3827
3828
3829
3830
3831
3832 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3833 24 << 10);
3834
3835
3836
3837
3838 if (adapter->itr_setting & 0x3) {
3839 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3840 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3841 dev_info(&adapter->pdev->dev,
3842 "Interrupt Throttle Rate off\n");
3843 adapter->flags2 |= FLAG2_DISABLE_AIM;
3844 e1000e_write_itr(adapter, 0);
3845 }
3846 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3847 dev_info(&adapter->pdev->dev,
3848 "Interrupt Throttle Rate on\n");
3849 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3850 adapter->itr = 20000;
3851 e1000e_write_itr(adapter, adapter->itr);
3852 }
3853 }
3854
3855
3856 mac->ops.reset_hw(hw);
3857
3858
3859
3860
3861 if (adapter->flags & FLAG_HAS_AMT)
3862 e1000e_get_hw_control(adapter);
3863
3864 ew32(WUC, 0);
3865
3866 if (mac->ops.init_hw(hw))
3867 e_err("Hardware Error\n");
3868
3869 e1000_update_mng_vlan(adapter);
3870
3871
3872 ew32(VET, ETH_P_8021Q);
3873
3874 e1000e_reset_adaptive(hw);
3875
3876
3877 e1000e_config_hwtstamp(adapter);
3878
3879
3880 if (adapter->flags2 & FLAG2_HAS_EEE) {
3881 s32 ret_val;
3882 u16 adv_addr;
3883
3884 switch (hw->phy.type) {
3885 case e1000_phy_82579:
3886 adv_addr = I82579_EEE_ADVERTISEMENT;
3887 break;
3888 case e1000_phy_i217:
3889 adv_addr = I217_EEE_ADVERTISEMENT;
3890 break;
3891 default:
3892 dev_err(&adapter->pdev->dev,
3893 "Invalid PHY type setting EEE advertisement\n");
3894 return;
3895 }
3896
3897 ret_val = hw->phy.ops.acquire(hw);
3898 if (ret_val) {
3899 dev_err(&adapter->pdev->dev,
3900 "EEE advertisement - unable to acquire PHY\n");
3901 return;
3902 }
3903
3904 e1000_write_emi_reg_locked(hw, adv_addr,
3905 hw->dev_spec.ich8lan.eee_disable ?
3906 0 : adapter->eee_advert);
3907
3908 hw->phy.ops.release(hw);
3909 }
3910
3911 if (!netif_running(adapter->netdev) &&
3912 !test_bit(__E1000_TESTING, &adapter->state)) {
3913 e1000_power_down_phy(adapter);
3914 return;
3915 }
3916
3917 e1000_get_phy_info(hw);
3918
3919 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3920 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3921 u16 phy_data = 0;
3922
3923
3924
3925
3926 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3927 phy_data &= ~IGP02E1000_PM_SPD;
3928 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3929 }
3930}
3931
3932int e1000e_up(struct e1000_adapter *adapter)
3933{
3934 struct e1000_hw *hw = &adapter->hw;
3935
3936
3937 e1000_configure(adapter);
3938
3939 clear_bit(__E1000_DOWN, &adapter->state);
3940
3941 if (adapter->msix_entries)
3942 e1000_configure_msix(adapter);
3943 e1000_irq_enable(adapter);
3944
3945 netif_start_queue(adapter->netdev);
3946
3947
3948 if (adapter->msix_entries)
3949 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3950 else
3951 ew32(ICS, E1000_ICS_LSC);
3952
3953 return 0;
3954}
3955
3956static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3957{
3958 struct e1000_hw *hw = &adapter->hw;
3959
3960 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3961 return;
3962
3963
3964 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3965 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3966
3967
3968 e1e_flush();
3969
3970
3971
3972
3973 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3974 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3975
3976
3977 e1e_flush();
3978}
3979
3980static void e1000e_update_stats(struct e1000_adapter *adapter);
3981
3982void e1000e_down(struct e1000_adapter *adapter)
3983{
3984 struct net_device *netdev = adapter->netdev;
3985 struct e1000_hw *hw = &adapter->hw;
3986 u32 tctl, rctl;
3987
3988
3989
3990
3991 set_bit(__E1000_DOWN, &adapter->state);
3992
3993
3994 rctl = er32(RCTL);
3995 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3996 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3997
3998
3999 netif_stop_queue(netdev);
4000
4001
4002 tctl = er32(TCTL);
4003 tctl &= ~E1000_TCTL_EN;
4004 ew32(TCTL, tctl);
4005
4006
4007 e1e_flush();
4008 usleep_range(10000, 20000);
4009
4010 e1000_irq_disable(adapter);
4011
4012 napi_synchronize(&adapter->napi);
4013
4014 del_timer_sync(&adapter->watchdog_timer);
4015 del_timer_sync(&adapter->phy_info_timer);
4016
4017 netif_carrier_off(netdev);
4018
4019 spin_lock(&adapter->stats64_lock);
4020 e1000e_update_stats(adapter);
4021 spin_unlock(&adapter->stats64_lock);
4022
4023 e1000e_flush_descriptors(adapter);
4024 e1000_clean_tx_ring(adapter->tx_ring);
4025 e1000_clean_rx_ring(adapter->rx_ring);
4026
4027 adapter->link_speed = 0;
4028 adapter->link_duplex = 0;
4029
4030
4031 if ((hw->mac.type >= e1000_pch2lan) &&
4032 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4033 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4034 e_dbg("failed to disable jumbo frame workaround mode\n");
4035
4036 if (!pci_channel_offline(adapter->pdev))
4037 e1000e_reset(adapter);
4038
4039
4040
4041
4042}
4043
4044void e1000e_reinit_locked(struct e1000_adapter *adapter)
4045{
4046 might_sleep();
4047 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4048 usleep_range(1000, 2000);
4049 e1000e_down(adapter);
4050 e1000e_up(adapter);
4051 clear_bit(__E1000_RESETTING, &adapter->state);
4052}
4053
4054
4055
4056
4057
4058static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4059{
4060 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4061 cc);
4062 struct e1000_hw *hw = &adapter->hw;
4063 cycle_t systim;
4064
4065
4066 systim = (cycle_t)er32(SYSTIML);
4067 systim |= (cycle_t)er32(SYSTIMH) << 32;
4068
4069 return systim;
4070}
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080static int e1000_sw_init(struct e1000_adapter *adapter)
4081{
4082 struct net_device *netdev = adapter->netdev;
4083
4084 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
4085 adapter->rx_ps_bsize0 = 128;
4086 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4087 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4088 adapter->tx_ring_count = E1000_DEFAULT_TXD;
4089 adapter->rx_ring_count = E1000_DEFAULT_RXD;
4090
4091 spin_lock_init(&adapter->stats64_lock);
4092
4093 e1000e_set_interrupt_capability(adapter);
4094
4095 if (e1000_alloc_queues(adapter))
4096 return -ENOMEM;
4097
4098
4099 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4100 adapter->cc.read = e1000e_cyclecounter_read;
4101 adapter->cc.mask = CLOCKSOURCE_MASK(64);
4102 adapter->cc.mult = 1;
4103
4104
4105 spin_lock_init(&adapter->systim_lock);
4106 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4107 }
4108
4109
4110 e1000_irq_disable(adapter);
4111
4112 set_bit(__E1000_DOWN, &adapter->state);
4113 return 0;
4114}
4115
4116
4117
4118
4119
4120
4121static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
4122{
4123 struct net_device *netdev = data;
4124 struct e1000_adapter *adapter = netdev_priv(netdev);
4125 struct e1000_hw *hw = &adapter->hw;
4126 u32 icr = er32(ICR);
4127
4128 e_dbg("icr is %08X\n", icr);
4129 if (icr & E1000_ICR_RXSEQ) {
4130 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
4131
4132
4133
4134 wmb();
4135 }
4136
4137 return IRQ_HANDLED;
4138}
4139
4140
4141
4142
4143
4144
4145
4146static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4147{
4148 struct net_device *netdev = adapter->netdev;
4149 struct e1000_hw *hw = &adapter->hw;
4150 int err;
4151
4152
4153
4154 er32(ICR);
4155
4156
4157 e1000_free_irq(adapter);
4158 e1000e_reset_interrupt_capability(adapter);
4159
4160
4161
4162
4163 adapter->flags |= FLAG_MSI_TEST_FAILED;
4164
4165 err = pci_enable_msi(adapter->pdev);
4166 if (err)
4167 goto msi_test_failed;
4168
4169 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
4170 netdev->name, netdev);
4171 if (err) {
4172 pci_disable_msi(adapter->pdev);
4173 goto msi_test_failed;
4174 }
4175
4176
4177
4178
4179 wmb();
4180
4181 e1000_irq_enable(adapter);
4182
4183
4184 ew32(ICS, E1000_ICS_RXSEQ);
4185 e1e_flush();
4186 msleep(100);
4187
4188 e1000_irq_disable(adapter);
4189
4190 rmb();
4191
4192 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4193 adapter->int_mode = E1000E_INT_MODE_LEGACY;
4194 e_info("MSI interrupt test failed, using legacy interrupt.\n");
4195 } else {
4196 e_dbg("MSI interrupt test succeeded!\n");
4197 }
4198
4199 free_irq(adapter->pdev->irq, netdev);
4200 pci_disable_msi(adapter->pdev);
4201
4202msi_test_failed:
4203 e1000e_set_interrupt_capability(adapter);
4204 return e1000_request_irq(adapter);
4205}
4206
4207
4208
4209
4210
4211
4212
4213static int e1000_test_msi(struct e1000_adapter *adapter)
4214{
4215 int err;
4216 u16 pci_cmd;
4217
4218 if (!(adapter->flags & FLAG_MSI_ENABLED))
4219 return 0;
4220
4221
4222 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4223 if (pci_cmd & PCI_COMMAND_SERR)
4224 pci_write_config_word(adapter->pdev, PCI_COMMAND,
4225 pci_cmd & ~PCI_COMMAND_SERR);
4226
4227 err = e1000_test_msi_interrupt(adapter);
4228
4229
4230 if (pci_cmd & PCI_COMMAND_SERR) {
4231 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4232 pci_cmd |= PCI_COMMAND_SERR;
4233 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4234 }
4235
4236 return err;
4237}
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251static int e1000_open(struct net_device *netdev)
4252{
4253 struct e1000_adapter *adapter = netdev_priv(netdev);
4254 struct e1000_hw *hw = &adapter->hw;
4255 struct pci_dev *pdev = adapter->pdev;
4256 int err;
4257
4258
4259 if (test_bit(__E1000_TESTING, &adapter->state))
4260 return -EBUSY;
4261
4262 pm_runtime_get_sync(&pdev->dev);
4263
4264 netif_carrier_off(netdev);
4265
4266
4267 err = e1000e_setup_tx_resources(adapter->tx_ring);
4268 if (err)
4269 goto err_setup_tx;
4270
4271
4272 err = e1000e_setup_rx_resources(adapter->rx_ring);
4273 if (err)
4274 goto err_setup_rx;
4275
4276
4277
4278
4279 if (adapter->flags & FLAG_HAS_AMT) {
4280 e1000e_get_hw_control(adapter);
4281 e1000e_reset(adapter);
4282 }
4283
4284 e1000e_power_up_phy(adapter);
4285
4286 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4287 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4288 e1000_update_mng_vlan(adapter);
4289
4290
4291 pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
4292 PM_QOS_DEFAULT_VALUE);
4293
4294
4295
4296
4297
4298
4299 e1000_configure(adapter);
4300
4301 err = e1000_request_irq(adapter);
4302 if (err)
4303 goto err_req_irq;
4304
4305
4306
4307
4308
4309 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
4310 err = e1000_test_msi(adapter);
4311 if (err) {
4312 e_err("Interrupt allocation failed\n");
4313 goto err_req_irq;
4314 }
4315 }
4316
4317
4318 clear_bit(__E1000_DOWN, &adapter->state);
4319
4320 napi_enable(&adapter->napi);
4321
4322 e1000_irq_enable(adapter);
4323
4324 adapter->tx_hang_recheck = false;
4325 netif_start_queue(netdev);
4326
4327 adapter->idle_check = true;
4328 hw->mac.get_link_status = true;
4329 pm_runtime_put(&pdev->dev);
4330
4331
4332 if (adapter->msix_entries)
4333 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4334 else
4335 ew32(ICS, E1000_ICS_LSC);
4336
4337 return 0;
4338
4339err_req_irq:
4340 e1000e_release_hw_control(adapter);
4341 e1000_power_down_phy(adapter);
4342 e1000e_free_rx_resources(adapter->rx_ring);
4343err_setup_rx:
4344 e1000e_free_tx_resources(adapter->tx_ring);
4345err_setup_tx:
4346 e1000e_reset(adapter);
4347 pm_runtime_put_sync(&pdev->dev);
4348
4349 return err;
4350}
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363static int e1000_close(struct net_device *netdev)
4364{
4365 struct e1000_adapter *adapter = netdev_priv(netdev);
4366 struct pci_dev *pdev = adapter->pdev;
4367 int count = E1000_CHECK_RESET_COUNT;
4368
4369 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4370 usleep_range(10000, 20000);
4371
4372 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4373
4374 pm_runtime_get_sync(&pdev->dev);
4375
4376 if (!test_bit(__E1000_DOWN, &adapter->state)) {
4377 e1000e_down(adapter);
4378 e1000_free_irq(adapter);
4379 }
4380
4381 napi_disable(&adapter->napi);
4382
4383 e1000_power_down_phy(adapter);
4384
4385 e1000e_free_tx_resources(adapter->tx_ring);
4386 e1000e_free_rx_resources(adapter->rx_ring);
4387
4388
4389
4390
4391 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4392 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4393 adapter->mng_vlan_id);
4394
4395
4396
4397
4398 if ((adapter->flags & FLAG_HAS_AMT) &&
4399 !test_bit(__E1000_TESTING, &adapter->state))
4400 e1000e_release_hw_control(adapter);
4401
4402 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
4403
4404 pm_runtime_put_sync(&pdev->dev);
4405
4406 return 0;
4407}
4408
4409
4410
4411
4412
4413
4414
4415
4416static int e1000_set_mac(struct net_device *netdev, void *p)
4417{
4418 struct e1000_adapter *adapter = netdev_priv(netdev);
4419 struct e1000_hw *hw = &adapter->hw;
4420 struct sockaddr *addr = p;
4421
4422 if (!is_valid_ether_addr(addr->sa_data))
4423 return -EADDRNOTAVAIL;
4424
4425 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4426 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4427
4428 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4429
4430 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4431
4432 e1000e_set_laa_state_82571(&adapter->hw, 1);
4433
4434
4435
4436
4437
4438
4439
4440
4441 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4442 adapter->hw.mac.rar_entry_count - 1);
4443 }
4444
4445 return 0;
4446}
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456static void e1000e_update_phy_task(struct work_struct *work)
4457{
4458 struct e1000_adapter *adapter = container_of(work,
4459 struct e1000_adapter,
4460 update_phy_task);
4461
4462 if (test_bit(__E1000_DOWN, &adapter->state))
4463 return;
4464
4465 e1000_get_phy_info(&adapter->hw);
4466}
4467
4468
4469
4470
4471
4472
4473
4474
4475static void e1000_update_phy_info(unsigned long data)
4476{
4477 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4478
4479 if (test_bit(__E1000_DOWN, &adapter->state))
4480 return;
4481
4482 schedule_work(&adapter->update_phy_task);
4483}
4484
4485
4486
4487
4488
4489
4490
4491static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4492{
4493 struct e1000_hw *hw = &adapter->hw;
4494 s32 ret_val;
4495 u16 phy_data;
4496
4497 ret_val = hw->phy.ops.acquire(hw);
4498 if (ret_val)
4499 return;
4500
4501
4502
4503
4504 hw->phy.addr = 1;
4505 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4506 &phy_data);
4507 if (ret_val)
4508 goto release;
4509 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4510 ret_val = hw->phy.ops.set_page(hw,
4511 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4512 if (ret_val)
4513 goto release;
4514 }
4515
4516
4517 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4518 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4519 if (!ret_val)
4520 adapter->stats.scc += phy_data;
4521
4522
4523 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4524 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4525 if (!ret_val)
4526 adapter->stats.ecol += phy_data;
4527
4528
4529 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4530 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4531 if (!ret_val)
4532 adapter->stats.mcc += phy_data;
4533
4534
4535 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4536 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4537 if (!ret_val)
4538 adapter->stats.latecol += phy_data;
4539
4540
4541 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4542 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4543 if (!ret_val)
4544 hw->mac.collision_delta = phy_data;
4545
4546
4547 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4548 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4549 if (!ret_val)
4550 adapter->stats.dc += phy_data;
4551
4552
4553 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4554 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4555 if (!ret_val)
4556 adapter->stats.tncrs += phy_data;
4557
4558release:
4559 hw->phy.ops.release(hw);
4560}
4561
4562
4563
4564
4565
4566static void e1000e_update_stats(struct e1000_adapter *adapter)
4567{
4568 struct net_device *netdev = adapter->netdev;
4569 struct e1000_hw *hw = &adapter->hw;
4570 struct pci_dev *pdev = adapter->pdev;
4571
4572
4573
4574
4575 if (adapter->link_speed == 0)
4576 return;
4577 if (pci_channel_offline(pdev))
4578 return;
4579
4580 adapter->stats.crcerrs += er32(CRCERRS);
4581 adapter->stats.gprc += er32(GPRC);
4582 adapter->stats.gorc += er32(GORCL);
4583 er32(GORCH);
4584 adapter->stats.bprc += er32(BPRC);
4585 adapter->stats.mprc += er32(MPRC);
4586 adapter->stats.roc += er32(ROC);
4587
4588 adapter->stats.mpc += er32(MPC);
4589
4590
4591 if (adapter->link_duplex == HALF_DUPLEX) {
4592 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4593 e1000e_update_phy_stats(adapter);
4594 } else {
4595 adapter->stats.scc += er32(SCC);
4596 adapter->stats.ecol += er32(ECOL);
4597 adapter->stats.mcc += er32(MCC);
4598 adapter->stats.latecol += er32(LATECOL);
4599 adapter->stats.dc += er32(DC);
4600
4601 hw->mac.collision_delta = er32(COLC);
4602
4603 if ((hw->mac.type != e1000_82574) &&
4604 (hw->mac.type != e1000_82583))
4605 adapter->stats.tncrs += er32(TNCRS);
4606 }
4607 adapter->stats.colc += hw->mac.collision_delta;
4608 }
4609
4610 adapter->stats.xonrxc += er32(XONRXC);
4611 adapter->stats.xontxc += er32(XONTXC);
4612 adapter->stats.xoffrxc += er32(XOFFRXC);
4613 adapter->stats.xofftxc += er32(XOFFTXC);
4614 adapter->stats.gptc += er32(GPTC);
4615 adapter->stats.gotc += er32(GOTCL);
4616 er32(GOTCH);
4617 adapter->stats.rnbc += er32(RNBC);
4618 adapter->stats.ruc += er32(RUC);
4619
4620 adapter->stats.mptc += er32(MPTC);
4621 adapter->stats.bptc += er32(BPTC);
4622
4623
4624
4625 hw->mac.tx_packet_delta = er32(TPT);
4626 adapter->stats.tpt += hw->mac.tx_packet_delta;
4627
4628 adapter->stats.algnerrc += er32(ALGNERRC);
4629 adapter->stats.rxerrc += er32(RXERRC);
4630 adapter->stats.cexterr += er32(CEXTERR);
4631 adapter->stats.tsctc += er32(TSCTC);
4632 adapter->stats.tsctfc += er32(TSCTFC);
4633
4634
4635 netdev->stats.multicast = adapter->stats.mprc;
4636 netdev->stats.collisions = adapter->stats.colc;
4637
4638
4639
4640
4641
4642
4643 netdev->stats.rx_errors = adapter->stats.rxerrc +
4644 adapter->stats.crcerrs + adapter->stats.algnerrc +
4645 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
4646 netdev->stats.rx_length_errors = adapter->stats.ruc +
4647 adapter->stats.roc;
4648 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4649 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4650 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4651
4652
4653 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
4654 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4655 netdev->stats.tx_window_errors = adapter->stats.latecol;
4656 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4657
4658
4659
4660
4661 adapter->stats.mgptc += er32(MGTPTC);
4662 adapter->stats.mgprc += er32(MGTPRC);
4663 adapter->stats.mgpdc += er32(MGTPDC);
4664
4665
4666 if (hw->mac.type == e1000_pch_lpt) {
4667 u32 pbeccsts = er32(PBECCSTS);
4668 adapter->corr_errors +=
4669 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4670 adapter->uncorr_errors +=
4671 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4672 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4673 }
4674}
4675
4676
4677
4678
4679
4680static void e1000_phy_read_status(struct e1000_adapter *adapter)
4681{
4682 struct e1000_hw *hw = &adapter->hw;
4683 struct e1000_phy_regs *phy = &adapter->phy_regs;
4684
4685 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
4686 (er32(STATUS) & E1000_STATUS_LU) &&
4687 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4688 int ret_val;
4689
4690 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4691 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4692 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
4693 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
4694 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
4695 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
4696 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
4697 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4698 if (ret_val)
4699 e_warn("Error reading PHY register\n");
4700 } else {
4701
4702
4703
4704 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4705 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4706 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4707 BMSR_ERCAP);
4708 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4709 ADVERTISE_ALL | ADVERTISE_CSMA);
4710 phy->lpa = 0;
4711 phy->expansion = EXPANSION_ENABLENPAGE;
4712 phy->ctrl1000 = ADVERTISE_1000FULL;
4713 phy->stat1000 = 0;
4714 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4715 }
4716}
4717
4718static void e1000_print_link_info(struct e1000_adapter *adapter)
4719{
4720 struct e1000_hw *hw = &adapter->hw;
4721 u32 ctrl = er32(CTRL);
4722
4723
4724 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4725 adapter->netdev->name, adapter->link_speed,
4726 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4727 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4728 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4729 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4730}
4731
4732static bool e1000e_has_link(struct e1000_adapter *adapter)
4733{
4734 struct e1000_hw *hw = &adapter->hw;
4735 bool link_active = false;
4736 s32 ret_val = 0;
4737
4738
4739
4740
4741
4742
4743 switch (hw->phy.media_type) {
4744 case e1000_media_type_copper:
4745 if (hw->mac.get_link_status) {
4746 ret_val = hw->mac.ops.check_for_link(hw);
4747 link_active = !hw->mac.get_link_status;
4748 } else {
4749 link_active = true;
4750 }
4751 break;
4752 case e1000_media_type_fiber:
4753 ret_val = hw->mac.ops.check_for_link(hw);
4754 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4755 break;
4756 case e1000_media_type_internal_serdes:
4757 ret_val = hw->mac.ops.check_for_link(hw);
4758 link_active = adapter->hw.mac.serdes_has_link;
4759 break;
4760 default:
4761 case e1000_media_type_unknown:
4762 break;
4763 }
4764
4765 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4766 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4767
4768 e_info("Gigabit has been disabled, downgrading speed\n");
4769 }
4770
4771 return link_active;
4772}
4773
4774static void e1000e_enable_receives(struct e1000_adapter *adapter)
4775{
4776
4777 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4778 (adapter->flags & FLAG_RESTART_NOW)) {
4779 struct e1000_hw *hw = &adapter->hw;
4780 u32 rctl = er32(RCTL);
4781 ew32(RCTL, rctl | E1000_RCTL_EN);
4782 adapter->flags &= ~FLAG_RESTART_NOW;
4783 }
4784}
4785
4786static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4787{
4788 struct e1000_hw *hw = &adapter->hw;
4789
4790
4791
4792
4793 if (e1000_check_phy_82574(hw))
4794 adapter->phy_hang_count++;
4795 else
4796 adapter->phy_hang_count = 0;
4797
4798 if (adapter->phy_hang_count > 1) {
4799 adapter->phy_hang_count = 0;
4800 schedule_work(&adapter->reset_task);
4801 }
4802}
4803
4804
4805
4806
4807
4808static void e1000_watchdog(unsigned long data)
4809{
4810 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4811
4812
4813 schedule_work(&adapter->watchdog_task);
4814
4815
4816}
4817
4818static void e1000_watchdog_task(struct work_struct *work)
4819{
4820 struct e1000_adapter *adapter = container_of(work,
4821 struct e1000_adapter,
4822 watchdog_task);
4823 struct net_device *netdev = adapter->netdev;
4824 struct e1000_mac_info *mac = &adapter->hw.mac;
4825 struct e1000_phy_info *phy = &adapter->hw.phy;
4826 struct e1000_ring *tx_ring = adapter->tx_ring;
4827 struct e1000_hw *hw = &adapter->hw;
4828 u32 link, tctl;
4829
4830 if (test_bit(__E1000_DOWN, &adapter->state))
4831 return;
4832
4833 link = e1000e_has_link(adapter);
4834 if ((netif_carrier_ok(netdev)) && link) {
4835
4836 pm_runtime_resume(netdev->dev.parent);
4837
4838 e1000e_enable_receives(adapter);
4839 goto link_up;
4840 }
4841
4842 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4843 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4844 e1000_update_mng_vlan(adapter);
4845
4846 if (link) {
4847 if (!netif_carrier_ok(netdev)) {
4848 bool txb2b = true;
4849
4850
4851 pm_runtime_resume(netdev->dev.parent);
4852
4853
4854 e1000_phy_read_status(adapter);
4855 mac->ops.get_link_up_info(&adapter->hw,
4856 &adapter->link_speed,
4857 &adapter->link_duplex);
4858 e1000_print_link_info(adapter);
4859
4860
4861 e1000e_check_downshift(hw);
4862 if (phy->speed_downgraded)
4863 netdev_warn(netdev,
4864 "Link Speed was downgraded by SmartSpeed\n");
4865
4866
4867
4868
4869 if ((hw->phy.type == e1000_phy_igp_3 ||
4870 hw->phy.type == e1000_phy_bm) &&
4871 hw->mac.autoneg &&
4872 (adapter->link_speed == SPEED_10 ||
4873 adapter->link_speed == SPEED_100) &&
4874 (adapter->link_duplex == HALF_DUPLEX)) {
4875 u16 autoneg_exp;
4876
4877 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
4878
4879 if (!(autoneg_exp & EXPANSION_NWAY))
4880 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
4881 }
4882
4883
4884 adapter->tx_timeout_factor = 1;
4885 switch (adapter->link_speed) {
4886 case SPEED_10:
4887 txb2b = false;
4888 adapter->tx_timeout_factor = 16;
4889 break;
4890 case SPEED_100:
4891 txb2b = false;
4892 adapter->tx_timeout_factor = 10;
4893 break;
4894 }
4895
4896
4897
4898
4899 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4900 !txb2b) {
4901 u32 tarc0;
4902 tarc0 = er32(TARC(0));
4903 tarc0 &= ~SPEED_MODE_BIT;
4904 ew32(TARC(0), tarc0);
4905 }
4906
4907
4908
4909
4910 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4911 switch (adapter->link_speed) {
4912 case SPEED_10:
4913 case SPEED_100:
4914 e_info("10/100 speed: disabling TSO\n");
4915 netdev->features &= ~NETIF_F_TSO;
4916 netdev->features &= ~NETIF_F_TSO6;
4917 break;
4918 case SPEED_1000:
4919 netdev->features |= NETIF_F_TSO;
4920 netdev->features |= NETIF_F_TSO6;
4921 break;
4922 default:
4923
4924 break;
4925 }
4926 }
4927
4928
4929
4930
4931 tctl = er32(TCTL);
4932 tctl |= E1000_TCTL_EN;
4933 ew32(TCTL, tctl);
4934
4935
4936
4937
4938 if (phy->ops.cfg_on_link_up)
4939 phy->ops.cfg_on_link_up(hw);
4940
4941 netif_carrier_on(netdev);
4942
4943 if (!test_bit(__E1000_DOWN, &adapter->state))
4944 mod_timer(&adapter->phy_info_timer,
4945 round_jiffies(jiffies + 2 * HZ));
4946 }
4947 } else {
4948 if (netif_carrier_ok(netdev)) {
4949 adapter->link_speed = 0;
4950 adapter->link_duplex = 0;
4951
4952 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
4953 netif_carrier_off(netdev);
4954 if (!test_bit(__E1000_DOWN, &adapter->state))
4955 mod_timer(&adapter->phy_info_timer,
4956 round_jiffies(jiffies + 2 * HZ));
4957
4958
4959
4960
4961
4962
4963
4964
4965 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
4966 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
4967 adapter->flags |= FLAG_RESTART_NOW;
4968 else
4969 pm_schedule_suspend(netdev->dev.parent,
4970 LINK_TIMEOUT);
4971 }
4972 }
4973
4974link_up:
4975 spin_lock(&adapter->stats64_lock);
4976 e1000e_update_stats(adapter);
4977
4978 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4979 adapter->tpt_old = adapter->stats.tpt;
4980 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4981 adapter->colc_old = adapter->stats.colc;
4982
4983 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4984 adapter->gorc_old = adapter->stats.gorc;
4985 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4986 adapter->gotc_old = adapter->stats.gotc;
4987 spin_unlock(&adapter->stats64_lock);
4988
4989 if (adapter->flags & FLAG_RESTART_NOW) {
4990 schedule_work(&adapter->reset_task);
4991
4992 return;
4993 }
4994
4995 e1000e_update_adaptive(&adapter->hw);
4996
4997
4998 if (adapter->itr_setting == 4) {
4999
5000
5001
5002
5003 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5004 u32 dif = (adapter->gotc > adapter->gorc ?
5005 adapter->gotc - adapter->gorc :
5006 adapter->gorc - adapter->gotc) / 10000;
5007 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5008
5009 e1000e_write_itr(adapter, itr);
5010 }
5011
5012
5013 if (adapter->msix_entries)
5014 ew32(ICS, adapter->rx_ring->ims_val);
5015 else
5016 ew32(ICS, E1000_ICS_RXDMT0);
5017
5018
5019 e1000e_flush_descriptors(adapter);
5020
5021
5022 adapter->detect_tx_hung = true;
5023
5024
5025
5026
5027 if (e1000e_get_laa_state_82571(hw))
5028 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
5029
5030 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5031 e1000e_check_82574_phy_workaround(adapter);
5032
5033
5034 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5035 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5036 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5037 er32(RXSTMPH);
5038 adapter->rx_hwtstamp_cleared++;
5039 } else {
5040 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5041 }
5042 }
5043
5044
5045 if (!test_bit(__E1000_DOWN, &adapter->state))
5046 mod_timer(&adapter->watchdog_timer,
5047 round_jiffies(jiffies + 2 * HZ));
5048}
5049
5050#define E1000_TX_FLAGS_CSUM 0x00000001
5051#define E1000_TX_FLAGS_VLAN 0x00000002
5052#define E1000_TX_FLAGS_TSO 0x00000004
5053#define E1000_TX_FLAGS_IPV4 0x00000008
5054#define E1000_TX_FLAGS_NO_FCS 0x00000010
5055#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
5056#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5057#define E1000_TX_FLAGS_VLAN_SHIFT 16
5058
5059static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5060{
5061 struct e1000_context_desc *context_desc;
5062 struct e1000_buffer *buffer_info;
5063 unsigned int i;
5064 u32 cmd_length = 0;
5065 u16 ipcse = 0, mss;
5066 u8 ipcss, ipcso, tucss, tucso, hdr_len;
5067
5068 if (!skb_is_gso(skb))
5069 return 0;
5070
5071 if (skb_header_cloned(skb)) {
5072 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5073
5074 if (err)
5075 return err;
5076 }
5077
5078 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5079 mss = skb_shinfo(skb)->gso_size;
5080 if (skb->protocol == htons(ETH_P_IP)) {
5081 struct iphdr *iph = ip_hdr(skb);
5082 iph->tot_len = 0;
5083 iph->check = 0;
5084 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5085 0, IPPROTO_TCP, 0);
5086 cmd_length = E1000_TXD_CMD_IP;
5087 ipcse = skb_transport_offset(skb) - 1;
5088 } else if (skb_is_gso_v6(skb)) {
5089 ipv6_hdr(skb)->payload_len = 0;
5090 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5091 &ipv6_hdr(skb)->daddr,
5092 0, IPPROTO_TCP, 0);
5093 ipcse = 0;
5094 }
5095 ipcss = skb_network_offset(skb);
5096 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5097 tucss = skb_transport_offset(skb);
5098 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5099
5100 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5101 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5102
5103 i = tx_ring->next_to_use;
5104 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5105 buffer_info = &tx_ring->buffer_info[i];
5106
5107 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5108 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5109 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5110 context_desc->upper_setup.tcp_fields.tucss = tucss;
5111 context_desc->upper_setup.tcp_fields.tucso = tucso;
5112 context_desc->upper_setup.tcp_fields.tucse = 0;
5113 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5114 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5115 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5116
5117 buffer_info->time_stamp = jiffies;
5118 buffer_info->next_to_watch = i;
5119
5120 i++;
5121 if (i == tx_ring->count)
5122 i = 0;
5123 tx_ring->next_to_use = i;
5124
5125 return 1;
5126}
5127
5128static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5129{
5130 struct e1000_adapter *adapter = tx_ring->adapter;
5131 struct e1000_context_desc *context_desc;
5132 struct e1000_buffer *buffer_info;
5133 unsigned int i;
5134 u8 css;
5135 u32 cmd_len = E1000_TXD_CMD_DEXT;
5136 __be16 protocol;
5137
5138 if (skb->ip_summed != CHECKSUM_PARTIAL)
5139 return 0;
5140
5141 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5142 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
5143 else
5144 protocol = skb->protocol;
5145
5146 switch (protocol) {
5147 case cpu_to_be16(ETH_P_IP):
5148 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5149 cmd_len |= E1000_TXD_CMD_TCP;
5150 break;
5151 case cpu_to_be16(ETH_P_IPV6):
5152
5153 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5154 cmd_len |= E1000_TXD_CMD_TCP;
5155 break;
5156 default:
5157 if (unlikely(net_ratelimit()))
5158 e_warn("checksum_partial proto=%x!\n",
5159 be16_to_cpu(protocol));
5160 break;
5161 }
5162
5163 css = skb_checksum_start_offset(skb);
5164
5165 i = tx_ring->next_to_use;
5166 buffer_info = &tx_ring->buffer_info[i];
5167 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5168
5169 context_desc->lower_setup.ip_config = 0;
5170 context_desc->upper_setup.tcp_fields.tucss = css;
5171 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5172 context_desc->upper_setup.tcp_fields.tucse = 0;
5173 context_desc->tcp_seg_setup.data = 0;
5174 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5175
5176 buffer_info->time_stamp = jiffies;
5177 buffer_info->next_to_watch = i;
5178
5179 i++;
5180 if (i == tx_ring->count)
5181 i = 0;
5182 tx_ring->next_to_use = i;
5183
5184 return 1;
5185}
5186
5187static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5188 unsigned int first, unsigned int max_per_txd,
5189 unsigned int nr_frags)
5190{
5191 struct e1000_adapter *adapter = tx_ring->adapter;
5192 struct pci_dev *pdev = adapter->pdev;
5193 struct e1000_buffer *buffer_info;
5194 unsigned int len = skb_headlen(skb);
5195 unsigned int offset = 0, size, count = 0, i;
5196 unsigned int f, bytecount, segs;
5197
5198 i = tx_ring->next_to_use;
5199
5200 while (len) {
5201 buffer_info = &tx_ring->buffer_info[i];
5202 size = min(len, max_per_txd);
5203
5204 buffer_info->length = size;
5205 buffer_info->time_stamp = jiffies;
5206 buffer_info->next_to_watch = i;
5207 buffer_info->dma = dma_map_single(&pdev->dev,
5208 skb->data + offset,
5209 size, DMA_TO_DEVICE);
5210 buffer_info->mapped_as_page = false;
5211 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5212 goto dma_error;
5213
5214 len -= size;
5215 offset += size;
5216 count++;
5217
5218 if (len) {
5219 i++;
5220 if (i == tx_ring->count)
5221 i = 0;
5222 }
5223 }
5224
5225 for (f = 0; f < nr_frags; f++) {
5226 const struct skb_frag_struct *frag;
5227
5228 frag = &skb_shinfo(skb)->frags[f];
5229 len = skb_frag_size(frag);
5230 offset = 0;
5231
5232 while (len) {
5233 i++;
5234 if (i == tx_ring->count)
5235 i = 0;
5236
5237 buffer_info = &tx_ring->buffer_info[i];
5238 size = min(len, max_per_txd);
5239
5240 buffer_info->length = size;
5241 buffer_info->time_stamp = jiffies;
5242 buffer_info->next_to_watch = i;
5243 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5244 offset, size,
5245 DMA_TO_DEVICE);
5246 buffer_info->mapped_as_page = true;
5247 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5248 goto dma_error;
5249
5250 len -= size;
5251 offset += size;
5252 count++;
5253 }
5254 }
5255
5256 segs = skb_shinfo(skb)->gso_segs ? : 1;
5257
5258 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5259
5260 tx_ring->buffer_info[i].skb = skb;
5261 tx_ring->buffer_info[i].segs = segs;
5262 tx_ring->buffer_info[i].bytecount = bytecount;
5263 tx_ring->buffer_info[first].next_to_watch = i;
5264
5265 return count;
5266
5267dma_error:
5268 dev_err(&pdev->dev, "Tx DMA map failed\n");
5269 buffer_info->dma = 0;
5270 if (count)
5271 count--;
5272
5273 while (count--) {
5274 if (i == 0)
5275 i += tx_ring->count;
5276 i--;
5277 buffer_info = &tx_ring->buffer_info[i];
5278 e1000_put_txbuf(tx_ring, buffer_info);
5279 }
5280
5281 return 0;
5282}
5283
5284static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5285{
5286 struct e1000_adapter *adapter = tx_ring->adapter;
5287 struct e1000_tx_desc *tx_desc = NULL;
5288 struct e1000_buffer *buffer_info;
5289 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5290 unsigned int i;
5291
5292 if (tx_flags & E1000_TX_FLAGS_TSO) {
5293 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5294 E1000_TXD_CMD_TSE;
5295 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5296
5297 if (tx_flags & E1000_TX_FLAGS_IPV4)
5298 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5299 }
5300
5301 if (tx_flags & E1000_TX_FLAGS_CSUM) {
5302 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5303 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5304 }
5305
5306 if (tx_flags & E1000_TX_FLAGS_VLAN) {
5307 txd_lower |= E1000_TXD_CMD_VLE;
5308 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5309 }
5310
5311 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5312 txd_lower &= ~(E1000_TXD_CMD_IFCS);
5313
5314 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5315 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5316 txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5317 }
5318
5319 i = tx_ring->next_to_use;
5320
5321 do {
5322 buffer_info = &tx_ring->buffer_info[i];
5323 tx_desc = E1000_TX_DESC(*tx_ring, i);
5324 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5325 tx_desc->lower.data = cpu_to_le32(txd_lower |
5326 buffer_info->length);
5327 tx_desc->upper.data = cpu_to_le32(txd_upper);
5328
5329 i++;
5330 if (i == tx_ring->count)
5331 i = 0;
5332 } while (--count > 0);
5333
5334 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5335
5336
5337 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5338 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5339
5340
5341
5342
5343
5344
5345 wmb();
5346
5347 tx_ring->next_to_use = i;
5348
5349 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5350 e1000e_update_tdt_wa(tx_ring, i);
5351 else
5352 writel(i, tx_ring->tail);
5353
5354
5355
5356
5357 mmiowb();
5358}
5359
5360#define MINIMUM_DHCP_PACKET_SIZE 282
5361static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5362 struct sk_buff *skb)
5363{
5364 struct e1000_hw *hw = &adapter->hw;
5365 u16 length, offset;
5366
5367 if (vlan_tx_tag_present(skb) &&
5368 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5369 (adapter->hw.mng_cookie.status &
5370 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5371 return 0;
5372
5373 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5374 return 0;
5375
5376 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5377 return 0;
5378
5379 {
5380 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5381 struct udphdr *udp;
5382
5383 if (ip->protocol != IPPROTO_UDP)
5384 return 0;
5385
5386 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5387 if (ntohs(udp->dest) != 67)
5388 return 0;
5389
5390 offset = (u8 *)udp + 8 - skb->data;
5391 length = skb->len - offset;
5392 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5393 }
5394
5395 return 0;
5396}
5397
5398static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5399{
5400 struct e1000_adapter *adapter = tx_ring->adapter;
5401
5402 netif_stop_queue(adapter->netdev);
5403
5404
5405
5406
5407 smp_mb();
5408
5409
5410
5411
5412 if (e1000_desc_unused(tx_ring) < size)
5413 return -EBUSY;
5414
5415
5416 netif_start_queue(adapter->netdev);
5417 ++adapter->restart_queue;
5418 return 0;
5419}
5420
5421static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5422{
5423 BUG_ON(size > tx_ring->count);
5424
5425 if (e1000_desc_unused(tx_ring) >= size)
5426 return 0;
5427 return __e1000_maybe_stop_tx(tx_ring, size);
5428}
5429
5430static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5431 struct net_device *netdev)
5432{
5433 struct e1000_adapter *adapter = netdev_priv(netdev);
5434 struct e1000_ring *tx_ring = adapter->tx_ring;
5435 unsigned int first;
5436 unsigned int tx_flags = 0;
5437 unsigned int len = skb_headlen(skb);
5438 unsigned int nr_frags;
5439 unsigned int mss;
5440 int count = 0;
5441 int tso;
5442 unsigned int f;
5443
5444 if (test_bit(__E1000_DOWN, &adapter->state)) {
5445 dev_kfree_skb_any(skb);
5446 return NETDEV_TX_OK;
5447 }
5448
5449 if (skb->len <= 0) {
5450 dev_kfree_skb_any(skb);
5451 return NETDEV_TX_OK;
5452 }
5453
5454
5455
5456
5457 if (unlikely(skb->len < 17)) {
5458 if (skb_pad(skb, 17 - skb->len))
5459 return NETDEV_TX_OK;
5460 skb->len = 17;
5461 skb_set_tail_pointer(skb, 17);
5462 }
5463
5464 mss = skb_shinfo(skb)->gso_size;
5465 if (mss) {
5466 u8 hdr_len;
5467
5468
5469
5470
5471
5472 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5473
5474
5475
5476 if (skb->data_len && (hdr_len == len)) {
5477 unsigned int pull_size;
5478
5479 pull_size = min_t(unsigned int, 4, skb->data_len);
5480 if (!__pskb_pull_tail(skb, pull_size)) {
5481 e_err("__pskb_pull_tail failed.\n");
5482 dev_kfree_skb_any(skb);
5483 return NETDEV_TX_OK;
5484 }
5485 len = skb_headlen(skb);
5486 }
5487 }
5488
5489
5490 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5491 count++;
5492 count++;
5493
5494 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5495
5496 nr_frags = skb_shinfo(skb)->nr_frags;
5497 for (f = 0; f < nr_frags; f++)
5498 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5499 adapter->tx_fifo_limit);
5500
5501 if (adapter->hw.mac.tx_pkt_filtering)
5502 e1000_transfer_dhcp_info(adapter, skb);
5503
5504
5505
5506
5507 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5508 return NETDEV_TX_BUSY;
5509
5510 if (vlan_tx_tag_present(skb)) {
5511 tx_flags |= E1000_TX_FLAGS_VLAN;
5512 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5513 }
5514
5515 first = tx_ring->next_to_use;
5516
5517 tso = e1000_tso(tx_ring, skb);
5518 if (tso < 0) {
5519 dev_kfree_skb_any(skb);
5520 return NETDEV_TX_OK;
5521 }
5522
5523 if (tso)
5524 tx_flags |= E1000_TX_FLAGS_TSO;
5525 else if (e1000_tx_csum(tx_ring, skb))
5526 tx_flags |= E1000_TX_FLAGS_CSUM;
5527
5528
5529
5530
5531
5532 if (skb->protocol == htons(ETH_P_IP))
5533 tx_flags |= E1000_TX_FLAGS_IPV4;
5534
5535 if (unlikely(skb->no_fcs))
5536 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5537
5538
5539 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5540 nr_frags);
5541 if (count) {
5542 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5543 !adapter->tx_hwtstamp_skb)) {
5544 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5545 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5546 adapter->tx_hwtstamp_skb = skb_get(skb);
5547 schedule_work(&adapter->tx_hwtstamp_work);
5548 } else {
5549 skb_tx_timestamp(skb);
5550 }
5551
5552 netdev_sent_queue(netdev, skb->len);
5553 e1000_tx_queue(tx_ring, tx_flags, count);
5554
5555 e1000_maybe_stop_tx(tx_ring,
5556 (MAX_SKB_FRAGS *
5557 DIV_ROUND_UP(PAGE_SIZE,
5558 adapter->tx_fifo_limit) + 2));
5559 } else {
5560 dev_kfree_skb_any(skb);
5561 tx_ring->buffer_info[first].time_stamp = 0;
5562 tx_ring->next_to_use = first;
5563 }
5564
5565 return NETDEV_TX_OK;
5566}
5567
5568
5569
5570
5571
5572static void e1000_tx_timeout(struct net_device *netdev)
5573{
5574 struct e1000_adapter *adapter = netdev_priv(netdev);
5575
5576
5577 adapter->tx_timeout_count++;
5578 schedule_work(&adapter->reset_task);
5579}
5580
5581static void e1000_reset_task(struct work_struct *work)
5582{
5583 struct e1000_adapter *adapter;
5584 adapter = container_of(work, struct e1000_adapter, reset_task);
5585
5586
5587 if (test_bit(__E1000_DOWN, &adapter->state))
5588 return;
5589
5590 if (!(adapter->flags & FLAG_RESTART_NOW)) {
5591 e1000e_dump(adapter);
5592 e_err("Reset adapter unexpectedly\n");
5593 }
5594 e1000e_reinit_locked(adapter);
5595}
5596
5597
5598
5599
5600
5601
5602
5603
5604struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5605 struct rtnl_link_stats64 *stats)
5606{
5607 struct e1000_adapter *adapter = netdev_priv(netdev);
5608
5609 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5610 spin_lock(&adapter->stats64_lock);
5611 e1000e_update_stats(adapter);
5612
5613 stats->rx_bytes = adapter->stats.gorc;
5614 stats->rx_packets = adapter->stats.gprc;
5615 stats->tx_bytes = adapter->stats.gotc;
5616 stats->tx_packets = adapter->stats.gptc;
5617 stats->multicast = adapter->stats.mprc;
5618 stats->collisions = adapter->stats.colc;
5619
5620
5621
5622
5623
5624
5625 stats->rx_errors = adapter->stats.rxerrc +
5626 adapter->stats.crcerrs + adapter->stats.algnerrc +
5627 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5628 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
5629 stats->rx_crc_errors = adapter->stats.crcerrs;
5630 stats->rx_frame_errors = adapter->stats.algnerrc;
5631 stats->rx_missed_errors = adapter->stats.mpc;
5632
5633
5634 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5635 stats->tx_aborted_errors = adapter->stats.ecol;
5636 stats->tx_window_errors = adapter->stats.latecol;
5637 stats->tx_carrier_errors = adapter->stats.tncrs;
5638
5639
5640
5641 spin_unlock(&adapter->stats64_lock);
5642 return stats;
5643}
5644
5645
5646
5647
5648
5649
5650
5651
5652static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5653{
5654 struct e1000_adapter *adapter = netdev_priv(netdev);
5655 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5656
5657
5658 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5659 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5660 e_err("Jumbo Frames not supported.\n");
5661 return -EINVAL;
5662 }
5663
5664
5665 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5666 (max_frame > adapter->max_hw_frame_size)) {
5667 e_err("Unsupported MTU setting\n");
5668 return -EINVAL;
5669 }
5670
5671
5672 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5673 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5674 (new_mtu > ETH_DATA_LEN)) {
5675 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5676 return -EINVAL;
5677 }
5678
5679 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5680 usleep_range(1000, 2000);
5681
5682 adapter->max_frame_size = max_frame;
5683 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5684 netdev->mtu = new_mtu;
5685 if (netif_running(netdev))
5686 e1000e_down(adapter);
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696 if (max_frame <= 2048)
5697 adapter->rx_buffer_len = 2048;
5698 else
5699 adapter->rx_buffer_len = 4096;
5700
5701
5702 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5703 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5704 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5705 + ETH_FCS_LEN;
5706
5707 if (netif_running(netdev))
5708 e1000e_up(adapter);
5709 else
5710 e1000e_reset(adapter);
5711
5712 clear_bit(__E1000_RESETTING, &adapter->state);
5713
5714 return 0;
5715}
5716
5717static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5718 int cmd)
5719{
5720 struct e1000_adapter *adapter = netdev_priv(netdev);
5721 struct mii_ioctl_data *data = if_mii(ifr);
5722
5723 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5724 return -EOPNOTSUPP;
5725
5726 switch (cmd) {
5727 case SIOCGMIIPHY:
5728 data->phy_id = adapter->hw.phy.addr;
5729 break;
5730 case SIOCGMIIREG:
5731 e1000_phy_read_status(adapter);
5732
5733 switch (data->reg_num & 0x1F) {
5734 case MII_BMCR:
5735 data->val_out = adapter->phy_regs.bmcr;
5736 break;
5737 case MII_BMSR:
5738 data->val_out = adapter->phy_regs.bmsr;
5739 break;
5740 case MII_PHYSID1:
5741 data->val_out = (adapter->hw.phy.id >> 16);
5742 break;
5743 case MII_PHYSID2:
5744 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5745 break;
5746 case MII_ADVERTISE:
5747 data->val_out = adapter->phy_regs.advertise;
5748 break;
5749 case MII_LPA:
5750 data->val_out = adapter->phy_regs.lpa;
5751 break;
5752 case MII_EXPANSION:
5753 data->val_out = adapter->phy_regs.expansion;
5754 break;
5755 case MII_CTRL1000:
5756 data->val_out = adapter->phy_regs.ctrl1000;
5757 break;
5758 case MII_STAT1000:
5759 data->val_out = adapter->phy_regs.stat1000;
5760 break;
5761 case MII_ESTATUS:
5762 data->val_out = adapter->phy_regs.estatus;
5763 break;
5764 default:
5765 return -EIO;
5766 }
5767 break;
5768 case SIOCSMIIREG:
5769 default:
5770 return -EOPNOTSUPP;
5771 }
5772 return 0;
5773}
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
5792{
5793 struct e1000_adapter *adapter = netdev_priv(netdev);
5794 struct hwtstamp_config config;
5795 int ret_val;
5796
5797 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5798 return -EFAULT;
5799
5800 adapter->hwtstamp_config = config;
5801
5802 ret_val = e1000e_config_hwtstamp(adapter);
5803 if (ret_val)
5804 return ret_val;
5805
5806 config = adapter->hwtstamp_config;
5807
5808 switch (config.rx_filter) {
5809 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5810 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5811 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5812 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5813 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5814 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5815
5816
5817
5818
5819
5820 config.rx_filter = HWTSTAMP_FILTER_SOME;
5821 break;
5822 default:
5823 break;
5824 }
5825
5826 return copy_to_user(ifr->ifr_data, &config,
5827 sizeof(config)) ? -EFAULT : 0;
5828}
5829
5830static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5831{
5832 switch (cmd) {
5833 case SIOCGMIIPHY:
5834 case SIOCGMIIREG:
5835 case SIOCSMIIREG:
5836 return e1000_mii_ioctl(netdev, ifr, cmd);
5837 case SIOCSHWTSTAMP:
5838 return e1000e_hwtstamp_ioctl(netdev, ifr);
5839 default:
5840 return -EOPNOTSUPP;
5841 }
5842}
5843
5844static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5845{
5846 struct e1000_hw *hw = &adapter->hw;
5847 u32 i, mac_reg;
5848 u16 phy_reg, wuc_enable;
5849 int retval;
5850
5851
5852 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5853
5854 retval = hw->phy.ops.acquire(hw);
5855 if (retval) {
5856 e_err("Could not acquire PHY\n");
5857 return retval;
5858 }
5859
5860
5861 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5862 if (retval)
5863 goto release;
5864
5865
5866 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5867 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5868 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5869 (u16)(mac_reg & 0xFFFF));
5870 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5871 (u16)((mac_reg >> 16) & 0xFFFF));
5872 }
5873
5874
5875 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5876 mac_reg = er32(RCTL);
5877 if (mac_reg & E1000_RCTL_UPE)
5878 phy_reg |= BM_RCTL_UPE;
5879 if (mac_reg & E1000_RCTL_MPE)
5880 phy_reg |= BM_RCTL_MPE;
5881 phy_reg &= ~(BM_RCTL_MO_MASK);
5882 if (mac_reg & E1000_RCTL_MO_3)
5883 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5884 << BM_RCTL_MO_SHIFT);
5885 if (mac_reg & E1000_RCTL_BAM)
5886 phy_reg |= BM_RCTL_BAM;
5887 if (mac_reg & E1000_RCTL_PMCF)
5888 phy_reg |= BM_RCTL_PMCF;
5889 mac_reg = er32(CTRL);
5890 if (mac_reg & E1000_CTRL_RFCE)
5891 phy_reg |= BM_RCTL_RFCE;
5892 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5893
5894
5895 ew32(WUFC, wufc);
5896 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5897
5898
5899 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5900 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5901
5902
5903 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5904 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5905 if (retval)
5906 e_err("Could not set PHY Host Wakeup bit\n");
5907release:
5908 hw->phy.ops.release(hw);
5909
5910 return retval;
5911}
5912
5913static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5914{
5915 struct net_device *netdev = pci_get_drvdata(pdev);
5916 struct e1000_adapter *adapter = netdev_priv(netdev);
5917 struct e1000_hw *hw = &adapter->hw;
5918 u32 ctrl, ctrl_ext, rctl, status;
5919
5920 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5921 int retval = 0;
5922
5923 netif_device_detach(netdev);
5924
5925 if (netif_running(netdev)) {
5926 int count = E1000_CHECK_RESET_COUNT;
5927
5928 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5929 usleep_range(10000, 20000);
5930
5931 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5932 e1000e_down(adapter);
5933 e1000_free_irq(adapter);
5934 }
5935 e1000e_reset_interrupt_capability(adapter);
5936
5937 status = er32(STATUS);
5938 if (status & E1000_STATUS_LU)
5939 wufc &= ~E1000_WUFC_LNKC;
5940
5941 if (wufc) {
5942 e1000_setup_rctl(adapter);
5943 e1000e_set_rx_mode(netdev);
5944
5945
5946 if (wufc & E1000_WUFC_MC) {
5947 rctl = er32(RCTL);
5948 rctl |= E1000_RCTL_MPE;
5949 ew32(RCTL, rctl);
5950 }
5951
5952 ctrl = er32(CTRL);
5953 ctrl |= E1000_CTRL_ADVD3WUC;
5954 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5955 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5956 ew32(CTRL, ctrl);
5957
5958 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5959 adapter->hw.phy.media_type ==
5960 e1000_media_type_internal_serdes) {
5961
5962 ctrl_ext = er32(CTRL_EXT);
5963 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5964 ew32(CTRL_EXT, ctrl_ext);
5965 }
5966
5967 if (adapter->flags & FLAG_IS_ICH)
5968 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5969
5970
5971 e1000e_disable_pcie_master(&adapter->hw);
5972
5973 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5974
5975 retval = e1000_init_phy_wakeup(adapter, wufc);
5976 if (retval)
5977 return retval;
5978 } else {
5979
5980 ew32(WUFC, wufc);
5981 ew32(WUC, E1000_WUC_PME_EN);
5982 }
5983 } else {
5984 ew32(WUC, 0);
5985 ew32(WUFC, 0);
5986 }
5987
5988 if (adapter->hw.phy.type == e1000_phy_igp_3)
5989 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5990
5991
5992
5993
5994 e1000e_release_hw_control(adapter);
5995
5996 pci_clear_master(pdev);
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6008 struct pci_dev *us_dev = pdev->bus->self;
6009 u16 devctl;
6010
6011 if (!us_dev)
6012 return 0;
6013
6014 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6015 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6016 (devctl & ~PCI_EXP_DEVCTL_CERE));
6017
6018 pci_save_state(pdev);
6019 pci_prepare_to_sleep(pdev);
6020
6021 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6022 }
6023
6024 return 0;
6025}
6026
6027
6028
6029
6030
6031
6032
6033
6034static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6035{
6036 struct pci_dev *parent = pdev->bus->self;
6037 u16 aspm_dis_mask = 0;
6038 u16 pdev_aspmc, parent_aspmc;
6039
6040 switch (state) {
6041 case PCIE_LINK_STATE_L0S:
6042 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6043 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6044
6045 case PCIE_LINK_STATE_L1:
6046 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6047 break;
6048 default:
6049 return;
6050 }
6051
6052 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6053 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6054
6055 if (parent) {
6056 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6057 &parent_aspmc);
6058 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6059 }
6060
6061
6062 if (!(pdev_aspmc & aspm_dis_mask) &&
6063 (!parent || !(parent_aspmc & aspm_dis_mask)))
6064 return;
6065
6066 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6067 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6068 "L0s" : "",
6069 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6070 "L1" : "");
6071
6072#ifdef CONFIG_PCIEASPM
6073 pci_disable_link_state_locked(pdev, state);
6074
6075
6076
6077
6078
6079 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6080 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6081
6082 if (!(aspm_dis_mask & pdev_aspmc))
6083 return;
6084#endif
6085
6086
6087
6088
6089 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6090
6091 if (parent)
6092 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6093 aspm_dis_mask);
6094}
6095
6096#ifdef CONFIG_PM
6097static bool e1000e_pm_ready(struct e1000_adapter *adapter)
6098{
6099 return !!adapter->tx_ring->buffer_info;
6100}
6101
6102static int __e1000_resume(struct pci_dev *pdev)
6103{
6104 struct net_device *netdev = pci_get_drvdata(pdev);
6105 struct e1000_adapter *adapter = netdev_priv(netdev);
6106 struct e1000_hw *hw = &adapter->hw;
6107 u16 aspm_disable_flag = 0;
6108 u32 err;
6109
6110 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6111 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6112 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6113 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6114 if (aspm_disable_flag)
6115 e1000e_disable_aspm(pdev, aspm_disable_flag);
6116
6117 pci_set_master(pdev);
6118
6119 e1000e_set_interrupt_capability(adapter);
6120 if (netif_running(netdev)) {
6121 err = e1000_request_irq(adapter);
6122 if (err)
6123 return err;
6124 }
6125
6126 if (hw->mac.type >= e1000_pch2lan)
6127 e1000_resume_workarounds_pchlan(&adapter->hw);
6128
6129 e1000e_power_up_phy(adapter);
6130
6131
6132 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6133 u16 phy_data;
6134
6135 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6136 if (phy_data) {
6137 e_info("PHY Wakeup cause - %s\n",
6138 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6139 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6140 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6141 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6142 phy_data & E1000_WUS_LNKC ?
6143 "Link Status Change" : "other");
6144 }
6145 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6146 } else {
6147 u32 wus = er32(WUS);
6148 if (wus) {
6149 e_info("MAC Wakeup cause - %s\n",
6150 wus & E1000_WUS_EX ? "Unicast Packet" :
6151 wus & E1000_WUS_MC ? "Multicast Packet" :
6152 wus & E1000_WUS_BC ? "Broadcast Packet" :
6153 wus & E1000_WUS_MAG ? "Magic Packet" :
6154 wus & E1000_WUS_LNKC ? "Link Status Change" :
6155 "other");
6156 }
6157 ew32(WUS, ~0);
6158 }
6159
6160 e1000e_reset(adapter);
6161
6162 e1000_init_manageability_pt(adapter);
6163
6164 if (netif_running(netdev))
6165 e1000e_up(adapter);
6166
6167 netif_device_attach(netdev);
6168
6169
6170
6171
6172
6173 if (!(adapter->flags & FLAG_HAS_AMT))
6174 e1000e_get_hw_control(adapter);
6175
6176 return 0;
6177}
6178
6179#ifdef CONFIG_PM_SLEEP
6180static int e1000_suspend(struct device *dev)
6181{
6182 struct pci_dev *pdev = to_pci_dev(dev);
6183
6184 return __e1000_shutdown(pdev, false);
6185}
6186
6187static int e1000_resume(struct device *dev)
6188{
6189 struct pci_dev *pdev = to_pci_dev(dev);
6190 struct net_device *netdev = pci_get_drvdata(pdev);
6191 struct e1000_adapter *adapter = netdev_priv(netdev);
6192
6193 if (e1000e_pm_ready(adapter))
6194 adapter->idle_check = true;
6195
6196 return __e1000_resume(pdev);
6197}
6198#endif
6199
6200#ifdef CONFIG_PM_RUNTIME
6201static int e1000_runtime_suspend(struct device *dev)
6202{
6203 struct pci_dev *pdev = to_pci_dev(dev);
6204 struct net_device *netdev = pci_get_drvdata(pdev);
6205 struct e1000_adapter *adapter = netdev_priv(netdev);
6206
6207 if (!e1000e_pm_ready(adapter))
6208 return 0;
6209
6210 return __e1000_shutdown(pdev, true);
6211}
6212
6213static int e1000_idle(struct device *dev)
6214{
6215 struct pci_dev *pdev = to_pci_dev(dev);
6216 struct net_device *netdev = pci_get_drvdata(pdev);
6217 struct e1000_adapter *adapter = netdev_priv(netdev);
6218
6219 if (!e1000e_pm_ready(adapter))
6220 return 0;
6221
6222 if (adapter->idle_check) {
6223 adapter->idle_check = false;
6224 if (!e1000e_has_link(adapter))
6225 pm_schedule_suspend(dev, MSEC_PER_SEC);
6226 }
6227
6228 return -EBUSY;
6229}
6230
6231static int e1000_runtime_resume(struct device *dev)
6232{
6233 struct pci_dev *pdev = to_pci_dev(dev);
6234 struct net_device *netdev = pci_get_drvdata(pdev);
6235 struct e1000_adapter *adapter = netdev_priv(netdev);
6236
6237 if (!e1000e_pm_ready(adapter))
6238 return 0;
6239
6240 adapter->idle_check = !dev->power.runtime_auto;
6241 return __e1000_resume(pdev);
6242}
6243#endif
6244#endif
6245
6246static void e1000_shutdown(struct pci_dev *pdev)
6247{
6248 __e1000_shutdown(pdev, false);
6249}
6250
6251#ifdef CONFIG_NET_POLL_CONTROLLER
6252
6253static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
6254{
6255 struct net_device *netdev = data;
6256 struct e1000_adapter *adapter = netdev_priv(netdev);
6257
6258 if (adapter->msix_entries) {
6259 int vector, msix_irq;
6260
6261 vector = 0;
6262 msix_irq = adapter->msix_entries[vector].vector;
6263 disable_irq(msix_irq);
6264 e1000_intr_msix_rx(msix_irq, netdev);
6265 enable_irq(msix_irq);
6266
6267 vector++;
6268 msix_irq = adapter->msix_entries[vector].vector;
6269 disable_irq(msix_irq);
6270 e1000_intr_msix_tx(msix_irq, netdev);
6271 enable_irq(msix_irq);
6272
6273 vector++;
6274 msix_irq = adapter->msix_entries[vector].vector;
6275 disable_irq(msix_irq);
6276 e1000_msix_other(msix_irq, netdev);
6277 enable_irq(msix_irq);
6278 }
6279
6280 return IRQ_HANDLED;
6281}
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291static void e1000_netpoll(struct net_device *netdev)
6292{
6293 struct e1000_adapter *adapter = netdev_priv(netdev);
6294
6295 switch (adapter->int_mode) {
6296 case E1000E_INT_MODE_MSIX:
6297 e1000_intr_msix(adapter->pdev->irq, netdev);
6298 break;
6299 case E1000E_INT_MODE_MSI:
6300 disable_irq(adapter->pdev->irq);
6301 e1000_intr_msi(adapter->pdev->irq, netdev);
6302 enable_irq(adapter->pdev->irq);
6303 break;
6304 default:
6305 disable_irq(adapter->pdev->irq);
6306 e1000_intr(adapter->pdev->irq, netdev);
6307 enable_irq(adapter->pdev->irq);
6308 break;
6309 }
6310}
6311#endif
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6322 pci_channel_state_t state)
6323{
6324 struct net_device *netdev = pci_get_drvdata(pdev);
6325 struct e1000_adapter *adapter = netdev_priv(netdev);
6326
6327 netif_device_detach(netdev);
6328
6329 if (state == pci_channel_io_perm_failure)
6330 return PCI_ERS_RESULT_DISCONNECT;
6331
6332 if (netif_running(netdev))
6333 e1000e_down(adapter);
6334 pci_disable_device(pdev);
6335
6336
6337 return PCI_ERS_RESULT_NEED_RESET;
6338}
6339
6340
6341
6342
6343
6344
6345
6346
6347static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6348{
6349 struct net_device *netdev = pci_get_drvdata(pdev);
6350 struct e1000_adapter *adapter = netdev_priv(netdev);
6351 struct e1000_hw *hw = &adapter->hw;
6352 u16 aspm_disable_flag = 0;
6353 int err;
6354 pci_ers_result_t result;
6355
6356 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6357 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6358 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6359 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6360 if (aspm_disable_flag)
6361 e1000e_disable_aspm(pdev, aspm_disable_flag);
6362
6363 err = pci_enable_device_mem(pdev);
6364 if (err) {
6365 dev_err(&pdev->dev,
6366 "Cannot re-enable PCI device after reset.\n");
6367 result = PCI_ERS_RESULT_DISCONNECT;
6368 } else {
6369 pdev->state_saved = true;
6370 pci_restore_state(pdev);
6371 pci_set_master(pdev);
6372
6373 pci_enable_wake(pdev, PCI_D3hot, 0);
6374 pci_enable_wake(pdev, PCI_D3cold, 0);
6375
6376 e1000e_reset(adapter);
6377 ew32(WUS, ~0);
6378 result = PCI_ERS_RESULT_RECOVERED;
6379 }
6380
6381 pci_cleanup_aer_uncorrect_error_status(pdev);
6382
6383 return result;
6384}
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394static void e1000_io_resume(struct pci_dev *pdev)
6395{
6396 struct net_device *netdev = pci_get_drvdata(pdev);
6397 struct e1000_adapter *adapter = netdev_priv(netdev);
6398
6399 e1000_init_manageability_pt(adapter);
6400
6401 if (netif_running(netdev)) {
6402 if (e1000e_up(adapter)) {
6403 dev_err(&pdev->dev,
6404 "can't bring device back up after reset\n");
6405 return;
6406 }
6407 }
6408
6409 netif_device_attach(netdev);
6410
6411
6412
6413
6414
6415 if (!(adapter->flags & FLAG_HAS_AMT))
6416 e1000e_get_hw_control(adapter);
6417}
6418
6419static void e1000_print_device_info(struct e1000_adapter *adapter)
6420{
6421 struct e1000_hw *hw = &adapter->hw;
6422 struct net_device *netdev = adapter->netdev;
6423 u32 ret_val;
6424 u8 pba_str[E1000_PBANUM_LENGTH];
6425
6426
6427 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
6428
6429 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
6430 "Width x1"),
6431
6432 netdev->dev_addr);
6433 e_info("Intel(R) PRO/%s Network Connection\n",
6434 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
6435 ret_val = e1000_read_pba_string_generic(hw, pba_str,
6436 E1000_PBANUM_LENGTH);
6437 if (ret_val)
6438 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
6439 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
6440 hw->mac.type, hw->phy.type, pba_str);
6441}
6442
6443static void e1000_eeprom_checks(struct e1000_adapter *adapter)
6444{
6445 struct e1000_hw *hw = &adapter->hw;
6446 int ret_val;
6447 u16 buf = 0;
6448
6449 if (hw->mac.type != e1000_82573)
6450 return;
6451
6452 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
6453 le16_to_cpus(&buf);
6454 if (!ret_val && (!(buf & (1 << 0)))) {
6455
6456 dev_warn(&adapter->pdev->dev,
6457 "Warning: detected DSPD enabled in EEPROM\n");
6458 }
6459}
6460
6461static int e1000_set_features(struct net_device *netdev,
6462 netdev_features_t features)
6463{
6464 struct e1000_adapter *adapter = netdev_priv(netdev);
6465 netdev_features_t changed = features ^ netdev->features;
6466
6467 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6468 adapter->flags |= FLAG_TSO_FORCE;
6469
6470 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6471 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6472 NETIF_F_RXALL)))
6473 return 0;
6474
6475 if (changed & NETIF_F_RXFCS) {
6476 if (features & NETIF_F_RXFCS) {
6477 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6478 } else {
6479
6480
6481
6482 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6483 adapter->flags2 |= FLAG2_CRC_STRIPPING;
6484 else
6485 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6486 }
6487 }
6488
6489 netdev->features = features;
6490
6491 if (netif_running(netdev))
6492 e1000e_reinit_locked(adapter);
6493 else
6494 e1000e_reset(adapter);
6495
6496 return 0;
6497}
6498
6499static const struct net_device_ops e1000e_netdev_ops = {
6500 .ndo_open = e1000_open,
6501 .ndo_stop = e1000_close,
6502 .ndo_start_xmit = e1000_xmit_frame,
6503 .ndo_get_stats64 = e1000e_get_stats64,
6504 .ndo_set_rx_mode = e1000e_set_rx_mode,
6505 .ndo_set_mac_address = e1000_set_mac,
6506 .ndo_change_mtu = e1000_change_mtu,
6507 .ndo_do_ioctl = e1000_ioctl,
6508 .ndo_tx_timeout = e1000_tx_timeout,
6509 .ndo_validate_addr = eth_validate_addr,
6510
6511 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6512 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6513#ifdef CONFIG_NET_POLL_CONTROLLER
6514 .ndo_poll_controller = e1000_netpoll,
6515#endif
6516 .ndo_set_features = e1000_set_features,
6517};
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6531{
6532 struct net_device *netdev;
6533 struct e1000_adapter *adapter;
6534 struct e1000_hw *hw;
6535 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6536 resource_size_t mmio_start, mmio_len;
6537 resource_size_t flash_start, flash_len;
6538 static int cards_found;
6539 u16 aspm_disable_flag = 0;
6540 int bars, i, err, pci_using_dac;
6541 u16 eeprom_data = 0;
6542 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6543
6544 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6545 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6546 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6547 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6548 if (aspm_disable_flag)
6549 e1000e_disable_aspm(pdev, aspm_disable_flag);
6550
6551 err = pci_enable_device_mem(pdev);
6552 if (err)
6553 return err;
6554
6555 pci_using_dac = 0;
6556 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6557 if (!err) {
6558 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6559 if (!err)
6560 pci_using_dac = 1;
6561 } else {
6562 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6563 if (err) {
6564 err = dma_set_coherent_mask(&pdev->dev,
6565 DMA_BIT_MASK(32));
6566 if (err) {
6567 dev_err(&pdev->dev,
6568 "No usable DMA configuration, aborting\n");
6569 goto err_dma;
6570 }
6571 }
6572 }
6573
6574 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6575 err = pci_request_selected_regions_exclusive(pdev, bars,
6576 e1000e_driver_name);
6577 if (err)
6578 goto err_pci_reg;
6579
6580
6581 pci_enable_pcie_error_reporting(pdev);
6582
6583 pci_set_master(pdev);
6584
6585 err = pci_save_state(pdev);
6586 if (err)
6587 goto err_alloc_etherdev;
6588
6589 err = -ENOMEM;
6590 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6591 if (!netdev)
6592 goto err_alloc_etherdev;
6593
6594 SET_NETDEV_DEV(netdev, &pdev->dev);
6595
6596 netdev->irq = pdev->irq;
6597
6598 pci_set_drvdata(pdev, netdev);
6599 adapter = netdev_priv(netdev);
6600 hw = &adapter->hw;
6601 adapter->netdev = netdev;
6602 adapter->pdev = pdev;
6603 adapter->ei = ei;
6604 adapter->pba = ei->pba;
6605 adapter->flags = ei->flags;
6606 adapter->flags2 = ei->flags2;
6607 adapter->hw.adapter = adapter;
6608 adapter->hw.mac.type = ei->mac;
6609 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6610 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6611
6612 mmio_start = pci_resource_start(pdev, 0);
6613 mmio_len = pci_resource_len(pdev, 0);
6614
6615 err = -EIO;
6616 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6617 if (!adapter->hw.hw_addr)
6618 goto err_ioremap;
6619
6620 if ((adapter->flags & FLAG_HAS_FLASH) &&
6621 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6622 flash_start = pci_resource_start(pdev, 1);
6623 flash_len = pci_resource_len(pdev, 1);
6624 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6625 if (!adapter->hw.flash_address)
6626 goto err_flashmap;
6627 }
6628
6629
6630 if (adapter->flags2 & FLAG2_HAS_EEE)
6631 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6632
6633
6634 netdev->netdev_ops = &e1000e_netdev_ops;
6635 e1000e_set_ethtool_ops(netdev);
6636 netdev->watchdog_timeo = 5 * HZ;
6637 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6638 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6639
6640 netdev->mem_start = mmio_start;
6641 netdev->mem_end = mmio_start + mmio_len;
6642
6643 adapter->bd_number = cards_found++;
6644
6645 e1000e_check_options(adapter);
6646
6647
6648 err = e1000_sw_init(adapter);
6649 if (err)
6650 goto err_sw_init;
6651
6652 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6653 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6654 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6655
6656 err = ei->get_variants(adapter);
6657 if (err)
6658 goto err_hw_init;
6659
6660 if ((adapter->flags & FLAG_IS_ICH) &&
6661 (adapter->flags & FLAG_READ_ONLY_NVM))
6662 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6663
6664 hw->mac.ops.get_bus_info(&adapter->hw);
6665
6666 adapter->hw.phy.autoneg_wait_to_complete = 0;
6667
6668
6669 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6670 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6671 adapter->hw.phy.disable_polarity_correction = 0;
6672 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6673 }
6674
6675 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6676 dev_info(&pdev->dev,
6677 "PHY reset is blocked due to SOL/IDER session.\n");
6678
6679
6680 netdev->features = (NETIF_F_SG |
6681 NETIF_F_HW_VLAN_CTAG_RX |
6682 NETIF_F_HW_VLAN_CTAG_TX |
6683 NETIF_F_TSO |
6684 NETIF_F_TSO6 |
6685 NETIF_F_RXHASH |
6686 NETIF_F_RXCSUM |
6687 NETIF_F_HW_CSUM);
6688
6689
6690 netdev->hw_features = netdev->features;
6691 netdev->hw_features |= NETIF_F_RXFCS;
6692 netdev->priv_flags |= IFF_SUPP_NOFCS;
6693 netdev->hw_features |= NETIF_F_RXALL;
6694
6695 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6696 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6697
6698 netdev->vlan_features |= (NETIF_F_SG |
6699 NETIF_F_TSO |
6700 NETIF_F_TSO6 |
6701 NETIF_F_HW_CSUM);
6702
6703 netdev->priv_flags |= IFF_UNICAST_FLT;
6704
6705 if (pci_using_dac) {
6706 netdev->features |= NETIF_F_HIGHDMA;
6707 netdev->vlan_features |= NETIF_F_HIGHDMA;
6708 }
6709
6710 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6711 adapter->flags |= FLAG_MNG_PT_ENABLED;
6712
6713
6714
6715
6716 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6717
6718
6719
6720
6721 for (i = 0;; i++) {
6722 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6723 break;
6724 if (i == 2) {
6725 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6726 err = -EIO;
6727 goto err_eeprom;
6728 }
6729 }
6730
6731 e1000_eeprom_checks(adapter);
6732
6733
6734 if (e1000e_read_mac_addr(&adapter->hw))
6735 dev_err(&pdev->dev,
6736 "NVM Read Error while reading MAC address\n");
6737
6738 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6739
6740 if (!is_valid_ether_addr(netdev->dev_addr)) {
6741 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6742 netdev->dev_addr);
6743 err = -EIO;
6744 goto err_eeprom;
6745 }
6746
6747 init_timer(&adapter->watchdog_timer);
6748 adapter->watchdog_timer.function = e1000_watchdog;
6749 adapter->watchdog_timer.data = (unsigned long)adapter;
6750
6751 init_timer(&adapter->phy_info_timer);
6752 adapter->phy_info_timer.function = e1000_update_phy_info;
6753 adapter->phy_info_timer.data = (unsigned long)adapter;
6754
6755 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6756 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6757 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6758 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6759 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6760
6761
6762 adapter->hw.mac.autoneg = 1;
6763 adapter->fc_autoneg = true;
6764 adapter->hw.fc.requested_mode = e1000_fc_default;
6765 adapter->hw.fc.current_mode = e1000_fc_default;
6766 adapter->hw.phy.autoneg_advertised = 0x2f;
6767
6768
6769
6770
6771 if (adapter->flags & FLAG_APME_IN_WUC) {
6772
6773 eeprom_data = er32(WUC);
6774 eeprom_apme_mask = E1000_WUC_APME;
6775 if ((hw->mac.type > e1000_ich10lan) &&
6776 (eeprom_data & E1000_WUC_PHY_WAKE))
6777 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6778 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6779 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6780 (adapter->hw.bus.func == 1))
6781 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6782 1, &eeprom_data);
6783 else
6784 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6785 1, &eeprom_data);
6786 }
6787
6788
6789 if (eeprom_data & eeprom_apme_mask)
6790 adapter->eeprom_wol |= E1000_WUFC_MAG;
6791
6792
6793
6794
6795
6796 if (!(adapter->flags & FLAG_HAS_WOL))
6797 adapter->eeprom_wol = 0;
6798
6799
6800 adapter->wol = adapter->eeprom_wol;
6801
6802
6803 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6804 (hw->mac.ops.check_mng_mode(hw)))
6805 device_wakeup_enable(&pdev->dev);
6806
6807
6808 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6809
6810
6811 e1000e_reset(adapter);
6812
6813
6814
6815
6816
6817 if (!(adapter->flags & FLAG_HAS_AMT))
6818 e1000e_get_hw_control(adapter);
6819
6820 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6821 err = register_netdev(netdev);
6822 if (err)
6823 goto err_register;
6824
6825
6826 netif_carrier_off(netdev);
6827
6828
6829 e1000e_ptp_init(adapter);
6830
6831 e1000_print_device_info(adapter);
6832
6833 if (pci_dev_run_wake(pdev))
6834 pm_runtime_put_noidle(&pdev->dev);
6835
6836 return 0;
6837
6838err_register:
6839 if (!(adapter->flags & FLAG_HAS_AMT))
6840 e1000e_release_hw_control(adapter);
6841err_eeprom:
6842 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6843 e1000_phy_hw_reset(&adapter->hw);
6844err_hw_init:
6845 kfree(adapter->tx_ring);
6846 kfree(adapter->rx_ring);
6847err_sw_init:
6848 if (adapter->hw.flash_address)
6849 iounmap(adapter->hw.flash_address);
6850 e1000e_reset_interrupt_capability(adapter);
6851err_flashmap:
6852 iounmap(adapter->hw.hw_addr);
6853err_ioremap:
6854 free_netdev(netdev);
6855err_alloc_etherdev:
6856 pci_release_selected_regions(pdev,
6857 pci_select_bars(pdev, IORESOURCE_MEM));
6858err_pci_reg:
6859err_dma:
6860 pci_disable_device(pdev);
6861 return err;
6862}
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873static void e1000_remove(struct pci_dev *pdev)
6874{
6875 struct net_device *netdev = pci_get_drvdata(pdev);
6876 struct e1000_adapter *adapter = netdev_priv(netdev);
6877 bool down = test_bit(__E1000_DOWN, &adapter->state);
6878
6879 e1000e_ptp_remove(adapter);
6880
6881
6882
6883
6884 if (!down)
6885 set_bit(__E1000_DOWN, &adapter->state);
6886 del_timer_sync(&adapter->watchdog_timer);
6887 del_timer_sync(&adapter->phy_info_timer);
6888
6889 cancel_work_sync(&adapter->reset_task);
6890 cancel_work_sync(&adapter->watchdog_task);
6891 cancel_work_sync(&adapter->downshift_task);
6892 cancel_work_sync(&adapter->update_phy_task);
6893 cancel_work_sync(&adapter->print_hang_task);
6894
6895 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
6896 cancel_work_sync(&adapter->tx_hwtstamp_work);
6897 if (adapter->tx_hwtstamp_skb) {
6898 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
6899 adapter->tx_hwtstamp_skb = NULL;
6900 }
6901 }
6902
6903 if (!(netdev->flags & IFF_UP))
6904 e1000_power_down_phy(adapter);
6905
6906
6907 if (!down)
6908 clear_bit(__E1000_DOWN, &adapter->state);
6909 unregister_netdev(netdev);
6910
6911 if (pci_dev_run_wake(pdev))
6912 pm_runtime_get_noresume(&pdev->dev);
6913
6914
6915
6916
6917 e1000e_release_hw_control(adapter);
6918
6919 e1000e_reset_interrupt_capability(adapter);
6920 kfree(adapter->tx_ring);
6921 kfree(adapter->rx_ring);
6922
6923 iounmap(adapter->hw.hw_addr);
6924 if (adapter->hw.flash_address)
6925 iounmap(adapter->hw.flash_address);
6926 pci_release_selected_regions(pdev,
6927 pci_select_bars(pdev, IORESOURCE_MEM));
6928
6929 free_netdev(netdev);
6930
6931
6932 pci_disable_pcie_error_reporting(pdev);
6933
6934 pci_disable_device(pdev);
6935}
6936
6937
6938static const struct pci_error_handlers e1000_err_handler = {
6939 .error_detected = e1000_io_error_detected,
6940 .slot_reset = e1000_io_slot_reset,
6941 .resume = e1000_io_resume,
6942};
6943
6944static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6945 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6946 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6947 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6948 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
6949 board_82571 },
6950 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6951 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6952 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6953 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6954 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6955
6956 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6957 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6958 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6959 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6960
6961 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6962 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6963 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6964
6965 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6966 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6967 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6968
6969 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6970 board_80003es2lan },
6971 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6972 board_80003es2lan },
6973 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6974 board_80003es2lan },
6975 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6976 board_80003es2lan },
6977
6978 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6979 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6980 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6981 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6982 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6983 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6984 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6985 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6986
6987 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6988 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6989 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6990 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6991 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6992 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6993 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6994 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6995 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6996
6997 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6998 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6999 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
7000
7001 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
7002 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
7003 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
7004
7005 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
7006 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
7007 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
7008 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
7009
7010 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
7011 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
7012
7013 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
7014 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
7015 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
7016 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
7017 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7018 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7019 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7020 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
7021
7022 { 0, 0, 0, 0, 0, 0, 0 }
7023};
7024MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7025
7026#ifdef CONFIG_PM
7027static const struct dev_pm_ops e1000_pm_ops = {
7028 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
7029 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
7030 e1000_idle)
7031};
7032#endif
7033
7034
7035static struct pci_driver e1000_driver = {
7036 .name = e1000e_driver_name,
7037 .id_table = e1000_pci_tbl,
7038 .probe = e1000_probe,
7039 .remove = e1000_remove,
7040#ifdef CONFIG_PM
7041 .driver = {
7042 .pm = &e1000_pm_ops,
7043 },
7044#endif
7045 .shutdown = e1000_shutdown,
7046 .err_handler = &e1000_err_handler
7047};
7048
7049
7050
7051
7052
7053
7054
7055static int __init e1000_init_module(void)
7056{
7057 int ret;
7058 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7059 e1000e_driver_version);
7060 pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
7061 ret = pci_register_driver(&e1000_driver);
7062
7063 return ret;
7064}
7065module_init(e1000_init_module);
7066
7067
7068
7069
7070
7071
7072
7073static void __exit e1000_exit_module(void)
7074{
7075 pci_unregister_driver(&e1000_driver);
7076}
7077module_exit(e1000_exit_module);
7078
7079MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7080MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7081MODULE_LICENSE("GPL");
7082MODULE_VERSION(DRV_VERSION);
7083
7084
7085