1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h>
32#include <linux/types.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/vmalloc.h>
36#include <linux/pagemap.h>
37#include <linux/delay.h>
38#include <linux/netdevice.h>
39#include <linux/interrupt.h>
40#include <linux/tcp.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/ethtool.h>
46#include <linux/if_vlan.h>
47#include <linux/cpu.h>
48#include <linux/smp.h>
49#include <linux/pm_qos.h>
50#include <linux/pm_runtime.h>
51#include <linux/aer.h>
52#include <linux/prefetch.h>
53
54#include "e1000.h"
55
56#define DRV_EXTRAVERSION "-k"
57
58#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
59char e1000e_driver_name[] = "e1000e";
60const char e1000e_driver_version[] = DRV_VERSION;
61
62#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
63static int debug = -1;
64module_param(debug, int, 0);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66
67static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
68
69static const struct e1000_info *e1000_info_tbl[] = {
70 [board_82571] = &e1000_82571_info,
71 [board_82572] = &e1000_82572_info,
72 [board_82573] = &e1000_82573_info,
73 [board_82574] = &e1000_82574_info,
74 [board_82583] = &e1000_82583_info,
75 [board_80003es2lan] = &e1000_es2_info,
76 [board_ich8lan] = &e1000_ich8_info,
77 [board_ich9lan] = &e1000_ich9_info,
78 [board_ich10lan] = &e1000_ich10_info,
79 [board_pchlan] = &e1000_pch_info,
80 [board_pch2lan] = &e1000_pch2_info,
81 [board_pch_lpt] = &e1000_pch_lpt_info,
82};
83
84struct e1000_reg_info {
85 u32 ofs;
86 char *name;
87};
88
89static const struct e1000_reg_info e1000_reg_info_tbl[] = {
90
91 {E1000_CTRL, "CTRL"},
92 {E1000_STATUS, "STATUS"},
93 {E1000_CTRL_EXT, "CTRL_EXT"},
94
95
96 {E1000_ICR, "ICR"},
97
98
99 {E1000_RCTL, "RCTL"},
100 {E1000_RDLEN(0), "RDLEN"},
101 {E1000_RDH(0), "RDH"},
102 {E1000_RDT(0), "RDT"},
103 {E1000_RDTR, "RDTR"},
104 {E1000_RXDCTL(0), "RXDCTL"},
105 {E1000_ERT, "ERT"},
106 {E1000_RDBAL(0), "RDBAL"},
107 {E1000_RDBAH(0), "RDBAH"},
108 {E1000_RDFH, "RDFH"},
109 {E1000_RDFT, "RDFT"},
110 {E1000_RDFHS, "RDFHS"},
111 {E1000_RDFTS, "RDFTS"},
112 {E1000_RDFPC, "RDFPC"},
113
114
115 {E1000_TCTL, "TCTL"},
116 {E1000_TDBAL(0), "TDBAL"},
117 {E1000_TDBAH(0), "TDBAH"},
118 {E1000_TDLEN(0), "TDLEN"},
119 {E1000_TDH(0), "TDH"},
120 {E1000_TDT(0), "TDT"},
121 {E1000_TIDV, "TIDV"},
122 {E1000_TXDCTL(0), "TXDCTL"},
123 {E1000_TADV, "TADV"},
124 {E1000_TARC(0), "TARC"},
125 {E1000_TDFH, "TDFH"},
126 {E1000_TDFT, "TDFT"},
127 {E1000_TDFHS, "TDFHS"},
128 {E1000_TDFTS, "TDFTS"},
129 {E1000_TDFPC, "TDFPC"},
130
131
132 {0, NULL}
133};
134
135
136
137
138
139
140static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
141{
142 int n = 0;
143 char rname[16];
144 u32 regs[8];
145
146 switch (reginfo->ofs) {
147 case E1000_RXDCTL(0):
148 for (n = 0; n < 2; n++)
149 regs[n] = __er32(hw, E1000_RXDCTL(n));
150 break;
151 case E1000_TXDCTL(0):
152 for (n = 0; n < 2; n++)
153 regs[n] = __er32(hw, E1000_TXDCTL(n));
154 break;
155 case E1000_TARC(0):
156 for (n = 0; n < 2; n++)
157 regs[n] = __er32(hw, E1000_TARC(n));
158 break;
159 default:
160 pr_info("%-15s %08x\n",
161 reginfo->name, __er32(hw, reginfo->ofs));
162 return;
163 }
164
165 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
166 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
167}
168
169static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
170 struct e1000_buffer *bi)
171{
172 int i;
173 struct e1000_ps_page *ps_page;
174
175 for (i = 0; i < adapter->rx_ps_pages; i++) {
176 ps_page = &bi->ps_pages[i];
177
178 if (ps_page->page) {
179 pr_info("packet dump for ps_page %d:\n", i);
180 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
181 16, 1, page_address(ps_page->page),
182 PAGE_SIZE, true);
183 }
184 }
185}
186
187
188
189
190
191static void e1000e_dump(struct e1000_adapter *adapter)
192{
193 struct net_device *netdev = adapter->netdev;
194 struct e1000_hw *hw = &adapter->hw;
195 struct e1000_reg_info *reginfo;
196 struct e1000_ring *tx_ring = adapter->tx_ring;
197 struct e1000_tx_desc *tx_desc;
198 struct my_u0 {
199 __le64 a;
200 __le64 b;
201 } *u0;
202 struct e1000_buffer *buffer_info;
203 struct e1000_ring *rx_ring = adapter->rx_ring;
204 union e1000_rx_desc_packet_split *rx_desc_ps;
205 union e1000_rx_desc_extended *rx_desc;
206 struct my_u1 {
207 __le64 a;
208 __le64 b;
209 __le64 c;
210 __le64 d;
211 } *u1;
212 u32 staterr;
213 int i = 0;
214
215 if (!netif_msg_hw(adapter))
216 return;
217
218
219 if (netdev) {
220 dev_info(&adapter->pdev->dev, "Net device Info\n");
221 pr_info("Device Name state trans_start last_rx\n");
222 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
223 netdev->state, netdev->trans_start, netdev->last_rx);
224 }
225
226
227 dev_info(&adapter->pdev->dev, "Register Dump\n");
228 pr_info(" Register Name Value\n");
229 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
230 reginfo->name; reginfo++) {
231 e1000_regdump(hw, reginfo);
232 }
233
234
235 if (!netdev || !netif_running(netdev))
236 return;
237
238 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
239 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
240 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
241 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
242 0, tx_ring->next_to_use, tx_ring->next_to_clean,
243 (unsigned long long)buffer_info->dma,
244 buffer_info->length,
245 buffer_info->next_to_watch,
246 (unsigned long long)buffer_info->time_stamp);
247
248
249 if (!netif_msg_tx_done(adapter))
250 goto rx_ring_summary;
251
252 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
282 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
283 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
284 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
285 const char *next_desc;
286 tx_desc = E1000_TX_DESC(*tx_ring, i);
287 buffer_info = &tx_ring->buffer_info[i];
288 u0 = (struct my_u0 *)tx_desc;
289 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
290 next_desc = " NTC/U";
291 else if (i == tx_ring->next_to_use)
292 next_desc = " NTU";
293 else if (i == tx_ring->next_to_clean)
294 next_desc = " NTC";
295 else
296 next_desc = "";
297 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
298 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
299 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
300 i,
301 (unsigned long long)le64_to_cpu(u0->a),
302 (unsigned long long)le64_to_cpu(u0->b),
303 (unsigned long long)buffer_info->dma,
304 buffer_info->length, buffer_info->next_to_watch,
305 (unsigned long long)buffer_info->time_stamp,
306 buffer_info->skb, next_desc);
307
308 if (netif_msg_pktdata(adapter) && buffer_info->skb)
309 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
310 16, 1, buffer_info->skb->data,
311 buffer_info->skb->len, true);
312 }
313
314
315rx_ring_summary:
316 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
317 pr_info("Queue [NTU] [NTC]\n");
318 pr_info(" %5d %5X %5X\n",
319 0, rx_ring->next_to_use, rx_ring->next_to_clean);
320
321
322 if (!netif_msg_rx_status(adapter))
323 return;
324
325 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
326 switch (adapter->rx_ps_pages) {
327 case 1:
328 case 2:
329 case 3:
330
331
332
333
334
335
336
337
338
339
340
341
342 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
343
344
345
346
347
348
349
350
351
352
353
354 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
355 for (i = 0; i < rx_ring->count; i++) {
356 const char *next_desc;
357 buffer_info = &rx_ring->buffer_info[i];
358 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
359 u1 = (struct my_u1 *)rx_desc_ps;
360 staterr =
361 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
362
363 if (i == rx_ring->next_to_use)
364 next_desc = " NTU";
365 else if (i == rx_ring->next_to_clean)
366 next_desc = " NTC";
367 else
368 next_desc = "";
369
370 if (staterr & E1000_RXD_STAT_DD) {
371
372 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
373 "RWB", i,
374 (unsigned long long)le64_to_cpu(u1->a),
375 (unsigned long long)le64_to_cpu(u1->b),
376 (unsigned long long)le64_to_cpu(u1->c),
377 (unsigned long long)le64_to_cpu(u1->d),
378 buffer_info->skb, next_desc);
379 } else {
380 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
381 "R ", i,
382 (unsigned long long)le64_to_cpu(u1->a),
383 (unsigned long long)le64_to_cpu(u1->b),
384 (unsigned long long)le64_to_cpu(u1->c),
385 (unsigned long long)le64_to_cpu(u1->d),
386 (unsigned long long)buffer_info->dma,
387 buffer_info->skb, next_desc);
388
389 if (netif_msg_pktdata(adapter))
390 e1000e_dump_ps_pages(adapter,
391 buffer_info);
392 }
393 }
394 break;
395 default:
396 case 0:
397
398
399
400
401
402
403
404
405 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
406
407
408
409
410
411
412
413
414
415
416
417
418
419 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
420
421 for (i = 0; i < rx_ring->count; i++) {
422 const char *next_desc;
423
424 buffer_info = &rx_ring->buffer_info[i];
425 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
426 u1 = (struct my_u1 *)rx_desc;
427 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
428
429 if (i == rx_ring->next_to_use)
430 next_desc = " NTU";
431 else if (i == rx_ring->next_to_clean)
432 next_desc = " NTC";
433 else
434 next_desc = "";
435
436 if (staterr & E1000_RXD_STAT_DD) {
437
438 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
439 "RWB", i,
440 (unsigned long long)le64_to_cpu(u1->a),
441 (unsigned long long)le64_to_cpu(u1->b),
442 buffer_info->skb, next_desc);
443 } else {
444 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
445 "R ", i,
446 (unsigned long long)le64_to_cpu(u1->a),
447 (unsigned long long)le64_to_cpu(u1->b),
448 (unsigned long long)buffer_info->dma,
449 buffer_info->skb, next_desc);
450
451 if (netif_msg_pktdata(adapter) &&
452 buffer_info->skb)
453 print_hex_dump(KERN_INFO, "",
454 DUMP_PREFIX_ADDRESS, 16,
455 1,
456 buffer_info->skb->data,
457 adapter->rx_buffer_len,
458 true);
459 }
460 }
461 }
462}
463
464
465
466
467static int e1000_desc_unused(struct e1000_ring *ring)
468{
469 if (ring->next_to_clean > ring->next_to_use)
470 return ring->next_to_clean - ring->next_to_use - 1;
471
472 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
473}
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
490 struct skb_shared_hwtstamps *hwtstamps,
491 u64 systim)
492{
493 u64 ns;
494 unsigned long flags;
495
496 spin_lock_irqsave(&adapter->systim_lock, flags);
497 ns = timecounter_cyc2time(&adapter->tc, systim);
498 spin_unlock_irqrestore(&adapter->systim_lock, flags);
499
500 memset(hwtstamps, 0, sizeof(*hwtstamps));
501 hwtstamps->hwtstamp = ns_to_ktime(ns);
502}
503
504
505
506
507
508
509
510
511
512
513
514static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
515 struct sk_buff *skb)
516{
517 struct e1000_hw *hw = &adapter->hw;
518 u64 rxstmp;
519
520 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
521 !(status & E1000_RXDEXT_STATERR_TST) ||
522 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
523 return;
524
525
526
527
528
529
530
531
532 rxstmp = (u64)er32(RXSTMPL);
533 rxstmp |= (u64)er32(RXSTMPH) << 32;
534 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
535
536 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
537}
538
539
540
541
542
543
544
545
546static void e1000_receive_skb(struct e1000_adapter *adapter,
547 struct net_device *netdev, struct sk_buff *skb,
548 u32 staterr, __le16 vlan)
549{
550 u16 tag = le16_to_cpu(vlan);
551
552 e1000e_rx_hwtstamp(adapter, staterr, skb);
553
554 skb->protocol = eth_type_trans(skb, netdev);
555
556 if (staterr & E1000_RXD_STAT_VP)
557 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
558
559 napi_gro_receive(&adapter->napi, skb);
560}
561
562
563
564
565
566
567
568
569static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
570 struct sk_buff *skb)
571{
572 u16 status = (u16)status_err;
573 u8 errors = (u8)(status_err >> 24);
574
575 skb_checksum_none_assert(skb);
576
577
578 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
579 return;
580
581
582 if (status & E1000_RXD_STAT_IXSM)
583 return;
584
585
586 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
587
588 adapter->hw_csum_err++;
589 return;
590 }
591
592
593 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
594 return;
595
596
597 skb->ip_summed = CHECKSUM_UNNECESSARY;
598 adapter->hw_csum_good++;
599}
600
601static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
602{
603 struct e1000_adapter *adapter = rx_ring->adapter;
604 struct e1000_hw *hw = &adapter->hw;
605 s32 ret_val = __ew32_prepare(hw);
606
607 writel(i, rx_ring->tail);
608
609 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
610 u32 rctl = er32(RCTL);
611 ew32(RCTL, rctl & ~E1000_RCTL_EN);
612 e_err("ME firmware caused invalid RDT - resetting\n");
613 schedule_work(&adapter->reset_task);
614 }
615}
616
617static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
618{
619 struct e1000_adapter *adapter = tx_ring->adapter;
620 struct e1000_hw *hw = &adapter->hw;
621 s32 ret_val = __ew32_prepare(hw);
622
623 writel(i, tx_ring->tail);
624
625 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
626 u32 tctl = er32(TCTL);
627 ew32(TCTL, tctl & ~E1000_TCTL_EN);
628 e_err("ME firmware caused invalid TDT - resetting\n");
629 schedule_work(&adapter->reset_task);
630 }
631}
632
633
634
635
636
637static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
638 int cleaned_count, gfp_t gfp)
639{
640 struct e1000_adapter *adapter = rx_ring->adapter;
641 struct net_device *netdev = adapter->netdev;
642 struct pci_dev *pdev = adapter->pdev;
643 union e1000_rx_desc_extended *rx_desc;
644 struct e1000_buffer *buffer_info;
645 struct sk_buff *skb;
646 unsigned int i;
647 unsigned int bufsz = adapter->rx_buffer_len;
648
649 i = rx_ring->next_to_use;
650 buffer_info = &rx_ring->buffer_info[i];
651
652 while (cleaned_count--) {
653 skb = buffer_info->skb;
654 if (skb) {
655 skb_trim(skb, 0);
656 goto map_skb;
657 }
658
659 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
660 if (!skb) {
661
662 adapter->alloc_rx_buff_failed++;
663 break;
664 }
665
666 buffer_info->skb = skb;
667map_skb:
668 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
669 adapter->rx_buffer_len,
670 DMA_FROM_DEVICE);
671 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
672 dev_err(&pdev->dev, "Rx DMA map failed\n");
673 adapter->rx_dma_failed++;
674 break;
675 }
676
677 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
678 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
679
680 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
681
682
683
684
685
686 wmb();
687 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
688 e1000e_update_rdt_wa(rx_ring, i);
689 else
690 writel(i, rx_ring->tail);
691 }
692 i++;
693 if (i == rx_ring->count)
694 i = 0;
695 buffer_info = &rx_ring->buffer_info[i];
696 }
697
698 rx_ring->next_to_use = i;
699}
700
701
702
703
704
705static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
706 int cleaned_count, gfp_t gfp)
707{
708 struct e1000_adapter *adapter = rx_ring->adapter;
709 struct net_device *netdev = adapter->netdev;
710 struct pci_dev *pdev = adapter->pdev;
711 union e1000_rx_desc_packet_split *rx_desc;
712 struct e1000_buffer *buffer_info;
713 struct e1000_ps_page *ps_page;
714 struct sk_buff *skb;
715 unsigned int i, j;
716
717 i = rx_ring->next_to_use;
718 buffer_info = &rx_ring->buffer_info[i];
719
720 while (cleaned_count--) {
721 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
722
723 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
724 ps_page = &buffer_info->ps_pages[j];
725 if (j >= adapter->rx_ps_pages) {
726
727 rx_desc->read.buffer_addr[j + 1] =
728 ~cpu_to_le64(0);
729 continue;
730 }
731 if (!ps_page->page) {
732 ps_page->page = alloc_page(gfp);
733 if (!ps_page->page) {
734 adapter->alloc_rx_buff_failed++;
735 goto no_buffers;
736 }
737 ps_page->dma = dma_map_page(&pdev->dev,
738 ps_page->page,
739 0, PAGE_SIZE,
740 DMA_FROM_DEVICE);
741 if (dma_mapping_error(&pdev->dev,
742 ps_page->dma)) {
743 dev_err(&adapter->pdev->dev,
744 "Rx DMA page map failed\n");
745 adapter->rx_dma_failed++;
746 goto no_buffers;
747 }
748 }
749
750
751
752
753 rx_desc->read.buffer_addr[j + 1] =
754 cpu_to_le64(ps_page->dma);
755 }
756
757 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
758 gfp);
759
760 if (!skb) {
761 adapter->alloc_rx_buff_failed++;
762 break;
763 }
764
765 buffer_info->skb = skb;
766 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
767 adapter->rx_ps_bsize0,
768 DMA_FROM_DEVICE);
769 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
770 dev_err(&pdev->dev, "Rx DMA map failed\n");
771 adapter->rx_dma_failed++;
772
773 dev_kfree_skb_any(skb);
774 buffer_info->skb = NULL;
775 break;
776 }
777
778 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
779
780 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
781
782
783
784
785
786 wmb();
787 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
788 e1000e_update_rdt_wa(rx_ring, i << 1);
789 else
790 writel(i << 1, rx_ring->tail);
791 }
792
793 i++;
794 if (i == rx_ring->count)
795 i = 0;
796 buffer_info = &rx_ring->buffer_info[i];
797 }
798
799no_buffers:
800 rx_ring->next_to_use = i;
801}
802
803
804
805
806
807
808
809static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
810 int cleaned_count, gfp_t gfp)
811{
812 struct e1000_adapter *adapter = rx_ring->adapter;
813 struct net_device *netdev = adapter->netdev;
814 struct pci_dev *pdev = adapter->pdev;
815 union e1000_rx_desc_extended *rx_desc;
816 struct e1000_buffer *buffer_info;
817 struct sk_buff *skb;
818 unsigned int i;
819 unsigned int bufsz = 256 - 16;
820
821 i = rx_ring->next_to_use;
822 buffer_info = &rx_ring->buffer_info[i];
823
824 while (cleaned_count--) {
825 skb = buffer_info->skb;
826 if (skb) {
827 skb_trim(skb, 0);
828 goto check_page;
829 }
830
831 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
832 if (unlikely(!skb)) {
833
834 adapter->alloc_rx_buff_failed++;
835 break;
836 }
837
838 buffer_info->skb = skb;
839check_page:
840
841 if (!buffer_info->page) {
842 buffer_info->page = alloc_page(gfp);
843 if (unlikely(!buffer_info->page)) {
844 adapter->alloc_rx_buff_failed++;
845 break;
846 }
847 }
848
849 if (!buffer_info->dma) {
850 buffer_info->dma = dma_map_page(&pdev->dev,
851 buffer_info->page, 0,
852 PAGE_SIZE,
853 DMA_FROM_DEVICE);
854 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
855 adapter->alloc_rx_buff_failed++;
856 break;
857 }
858 }
859
860 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
861 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
862
863 if (unlikely(++i == rx_ring->count))
864 i = 0;
865 buffer_info = &rx_ring->buffer_info[i];
866 }
867
868 if (likely(rx_ring->next_to_use != i)) {
869 rx_ring->next_to_use = i;
870 if (unlikely(i-- == 0))
871 i = (rx_ring->count - 1);
872
873
874
875
876
877
878 wmb();
879 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
880 e1000e_update_rdt_wa(rx_ring, i);
881 else
882 writel(i, rx_ring->tail);
883 }
884}
885
886static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
887 struct sk_buff *skb)
888{
889 if (netdev->features & NETIF_F_RXHASH)
890 skb->rxhash = le32_to_cpu(rss);
891}
892
893
894
895
896
897
898
899
900static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
901 int work_to_do)
902{
903 struct e1000_adapter *adapter = rx_ring->adapter;
904 struct net_device *netdev = adapter->netdev;
905 struct pci_dev *pdev = adapter->pdev;
906 struct e1000_hw *hw = &adapter->hw;
907 union e1000_rx_desc_extended *rx_desc, *next_rxd;
908 struct e1000_buffer *buffer_info, *next_buffer;
909 u32 length, staterr;
910 unsigned int i;
911 int cleaned_count = 0;
912 bool cleaned = false;
913 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
914
915 i = rx_ring->next_to_clean;
916 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
917 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
918 buffer_info = &rx_ring->buffer_info[i];
919
920 while (staterr & E1000_RXD_STAT_DD) {
921 struct sk_buff *skb;
922
923 if (*work_done >= work_to_do)
924 break;
925 (*work_done)++;
926 rmb();
927
928 skb = buffer_info->skb;
929 buffer_info->skb = NULL;
930
931 prefetch(skb->data - NET_IP_ALIGN);
932
933 i++;
934 if (i == rx_ring->count)
935 i = 0;
936 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
937 prefetch(next_rxd);
938
939 next_buffer = &rx_ring->buffer_info[i];
940
941 cleaned = true;
942 cleaned_count++;
943 dma_unmap_single(&pdev->dev, buffer_info->dma,
944 adapter->rx_buffer_len, DMA_FROM_DEVICE);
945 buffer_info->dma = 0;
946
947 length = le16_to_cpu(rx_desc->wb.upper.length);
948
949
950
951
952
953
954
955 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
956 adapter->flags2 |= FLAG2_IS_DISCARDING;
957
958 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
959
960 e_dbg("Receive packet consumed multiple buffers\n");
961
962 buffer_info->skb = skb;
963 if (staterr & E1000_RXD_STAT_EOP)
964 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
965 goto next_desc;
966 }
967
968 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
969 !(netdev->features & NETIF_F_RXALL))) {
970
971 buffer_info->skb = skb;
972 goto next_desc;
973 }
974
975
976 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
977
978
979
980
981 if (netdev->features & NETIF_F_RXFCS)
982 total_rx_bytes -= 4;
983 else
984 length -= 4;
985 }
986
987 total_rx_bytes += length;
988 total_rx_packets++;
989
990
991
992
993
994 if (length < copybreak) {
995 struct sk_buff *new_skb =
996 netdev_alloc_skb_ip_align(netdev, length);
997 if (new_skb) {
998 skb_copy_to_linear_data_offset(new_skb,
999 -NET_IP_ALIGN,
1000 (skb->data -
1001 NET_IP_ALIGN),
1002 (length +
1003 NET_IP_ALIGN));
1004
1005 buffer_info->skb = skb;
1006 skb = new_skb;
1007 }
1008
1009 }
1010
1011 skb_put(skb, length);
1012
1013
1014 e1000_rx_checksum(adapter, staterr, skb);
1015
1016 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1017
1018 e1000_receive_skb(adapter, netdev, skb, staterr,
1019 rx_desc->wb.upper.vlan);
1020
1021next_desc:
1022 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1023
1024
1025 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1026 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1027 GFP_ATOMIC);
1028 cleaned_count = 0;
1029 }
1030
1031
1032 rx_desc = next_rxd;
1033 buffer_info = next_buffer;
1034
1035 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1036 }
1037 rx_ring->next_to_clean = i;
1038
1039 cleaned_count = e1000_desc_unused(rx_ring);
1040 if (cleaned_count)
1041 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1042
1043 adapter->total_rx_bytes += total_rx_bytes;
1044 adapter->total_rx_packets += total_rx_packets;
1045 return cleaned;
1046}
1047
1048static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1049 struct e1000_buffer *buffer_info)
1050{
1051 struct e1000_adapter *adapter = tx_ring->adapter;
1052
1053 if (buffer_info->dma) {
1054 if (buffer_info->mapped_as_page)
1055 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1056 buffer_info->length, DMA_TO_DEVICE);
1057 else
1058 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1059 buffer_info->length, DMA_TO_DEVICE);
1060 buffer_info->dma = 0;
1061 }
1062 if (buffer_info->skb) {
1063 dev_kfree_skb_any(buffer_info->skb);
1064 buffer_info->skb = NULL;
1065 }
1066 buffer_info->time_stamp = 0;
1067}
1068
1069static void e1000_print_hw_hang(struct work_struct *work)
1070{
1071 struct e1000_adapter *adapter = container_of(work,
1072 struct e1000_adapter,
1073 print_hang_task);
1074 struct net_device *netdev = adapter->netdev;
1075 struct e1000_ring *tx_ring = adapter->tx_ring;
1076 unsigned int i = tx_ring->next_to_clean;
1077 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1078 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1079 struct e1000_hw *hw = &adapter->hw;
1080 u16 phy_status, phy_1000t_status, phy_ext_status;
1081 u16 pci_status;
1082
1083 if (test_bit(__E1000_DOWN, &adapter->state))
1084 return;
1085
1086 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1087
1088
1089
1090 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1091
1092 e1e_flush();
1093
1094
1095
1096 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1097
1098 e1e_flush();
1099 adapter->tx_hang_recheck = true;
1100 return;
1101 }
1102
1103 adapter->tx_hang_recheck = false;
1104 netif_stop_queue(netdev);
1105
1106 e1e_rphy(hw, MII_BMSR, &phy_status);
1107 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1108 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
1109
1110 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1111
1112
1113 e_err("Detected Hardware Unit Hang:\n"
1114 " TDH <%x>\n"
1115 " TDT <%x>\n"
1116 " next_to_use <%x>\n"
1117 " next_to_clean <%x>\n"
1118 "buffer_info[next_to_clean]:\n"
1119 " time_stamp <%lx>\n"
1120 " next_to_watch <%x>\n"
1121 " jiffies <%lx>\n"
1122 " next_to_watch.status <%x>\n"
1123 "MAC Status <%x>\n"
1124 "PHY Status <%x>\n"
1125 "PHY 1000BASE-T Status <%x>\n"
1126 "PHY Extended Status <%x>\n"
1127 "PCI Status <%x>\n",
1128 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1129 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1130 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1131 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1132
1133
1134 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1135 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1147{
1148 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1149 tx_hwtstamp_work);
1150 struct e1000_hw *hw = &adapter->hw;
1151
1152 if (!adapter->tx_hwtstamp_skb)
1153 return;
1154
1155 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1156 struct skb_shared_hwtstamps shhwtstamps;
1157 u64 txstmp;
1158
1159 txstmp = er32(TXSTMPL);
1160 txstmp |= (u64)er32(TXSTMPH) << 32;
1161
1162 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1163
1164 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1166 adapter->tx_hwtstamp_skb = NULL;
1167 } else {
1168
1169 schedule_work(&adapter->tx_hwtstamp_work);
1170 }
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1181{
1182 struct e1000_adapter *adapter = tx_ring->adapter;
1183 struct net_device *netdev = adapter->netdev;
1184 struct e1000_hw *hw = &adapter->hw;
1185 struct e1000_tx_desc *tx_desc, *eop_desc;
1186 struct e1000_buffer *buffer_info;
1187 unsigned int i, eop;
1188 unsigned int count = 0;
1189 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1190 unsigned int bytes_compl = 0, pkts_compl = 0;
1191
1192 i = tx_ring->next_to_clean;
1193 eop = tx_ring->buffer_info[i].next_to_watch;
1194 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1195
1196 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1197 (count < tx_ring->count)) {
1198 bool cleaned = false;
1199 rmb();
1200 for (; !cleaned; count++) {
1201 tx_desc = E1000_TX_DESC(*tx_ring, i);
1202 buffer_info = &tx_ring->buffer_info[i];
1203 cleaned = (i == eop);
1204
1205 if (cleaned) {
1206 total_tx_packets += buffer_info->segs;
1207 total_tx_bytes += buffer_info->bytecount;
1208 if (buffer_info->skb) {
1209 bytes_compl += buffer_info->skb->len;
1210 pkts_compl++;
1211 }
1212 }
1213
1214 e1000_put_txbuf(tx_ring, buffer_info);
1215 tx_desc->upper.data = 0;
1216
1217 i++;
1218 if (i == tx_ring->count)
1219 i = 0;
1220 }
1221
1222 if (i == tx_ring->next_to_use)
1223 break;
1224 eop = tx_ring->buffer_info[i].next_to_watch;
1225 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1226 }
1227
1228 tx_ring->next_to_clean = i;
1229
1230 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1231
1232#define TX_WAKE_THRESHOLD 32
1233 if (count && netif_carrier_ok(netdev) &&
1234 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1235
1236
1237
1238 smp_mb();
1239
1240 if (netif_queue_stopped(netdev) &&
1241 !(test_bit(__E1000_DOWN, &adapter->state))) {
1242 netif_wake_queue(netdev);
1243 ++adapter->restart_queue;
1244 }
1245 }
1246
1247 if (adapter->detect_tx_hung) {
1248
1249
1250
1251 adapter->detect_tx_hung = false;
1252 if (tx_ring->buffer_info[i].time_stamp &&
1253 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1254 + (adapter->tx_timeout_factor * HZ)) &&
1255 !(er32(STATUS) & E1000_STATUS_TXOFF))
1256 schedule_work(&adapter->print_hang_task);
1257 else
1258 adapter->tx_hang_recheck = false;
1259 }
1260 adapter->total_tx_bytes += total_tx_bytes;
1261 adapter->total_tx_packets += total_tx_packets;
1262 return count < tx_ring->count;
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1273 int work_to_do)
1274{
1275 struct e1000_adapter *adapter = rx_ring->adapter;
1276 struct e1000_hw *hw = &adapter->hw;
1277 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1278 struct net_device *netdev = adapter->netdev;
1279 struct pci_dev *pdev = adapter->pdev;
1280 struct e1000_buffer *buffer_info, *next_buffer;
1281 struct e1000_ps_page *ps_page;
1282 struct sk_buff *skb;
1283 unsigned int i, j;
1284 u32 length, staterr;
1285 int cleaned_count = 0;
1286 bool cleaned = false;
1287 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1288
1289 i = rx_ring->next_to_clean;
1290 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1291 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1292 buffer_info = &rx_ring->buffer_info[i];
1293
1294 while (staterr & E1000_RXD_STAT_DD) {
1295 if (*work_done >= work_to_do)
1296 break;
1297 (*work_done)++;
1298 skb = buffer_info->skb;
1299 rmb();
1300
1301
1302 prefetch(skb->data - NET_IP_ALIGN);
1303
1304 i++;
1305 if (i == rx_ring->count)
1306 i = 0;
1307 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1308 prefetch(next_rxd);
1309
1310 next_buffer = &rx_ring->buffer_info[i];
1311
1312 cleaned = true;
1313 cleaned_count++;
1314 dma_unmap_single(&pdev->dev, buffer_info->dma,
1315 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1316 buffer_info->dma = 0;
1317
1318
1319 if (!(staterr & E1000_RXD_STAT_EOP))
1320 adapter->flags2 |= FLAG2_IS_DISCARDING;
1321
1322 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1323 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1324 dev_kfree_skb_irq(skb);
1325 if (staterr & E1000_RXD_STAT_EOP)
1326 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1327 goto next_desc;
1328 }
1329
1330 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1331 !(netdev->features & NETIF_F_RXALL))) {
1332 dev_kfree_skb_irq(skb);
1333 goto next_desc;
1334 }
1335
1336 length = le16_to_cpu(rx_desc->wb.middle.length0);
1337
1338 if (!length) {
1339 e_dbg("Last part of the packet spanning multiple descriptors\n");
1340 dev_kfree_skb_irq(skb);
1341 goto next_desc;
1342 }
1343
1344
1345 skb_put(skb, length);
1346
1347 {
1348
1349
1350
1351 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1352
1353
1354
1355
1356
1357
1358 if (l1 && (l1 <= copybreak) &&
1359 ((length + l1) <= adapter->rx_ps_bsize0)) {
1360 u8 *vaddr;
1361
1362 ps_page = &buffer_info->ps_pages[0];
1363
1364
1365
1366
1367
1368 dma_sync_single_for_cpu(&pdev->dev,
1369 ps_page->dma,
1370 PAGE_SIZE,
1371 DMA_FROM_DEVICE);
1372 vaddr = kmap_atomic(ps_page->page);
1373 memcpy(skb_tail_pointer(skb), vaddr, l1);
1374 kunmap_atomic(vaddr);
1375 dma_sync_single_for_device(&pdev->dev,
1376 ps_page->dma,
1377 PAGE_SIZE,
1378 DMA_FROM_DEVICE);
1379
1380
1381 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1382 if (!(netdev->features & NETIF_F_RXFCS))
1383 l1 -= 4;
1384 }
1385
1386 skb_put(skb, l1);
1387 goto copydone;
1388 }
1389 }
1390
1391 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1392 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1393 if (!length)
1394 break;
1395
1396 ps_page = &buffer_info->ps_pages[j];
1397 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1398 DMA_FROM_DEVICE);
1399 ps_page->dma = 0;
1400 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1401 ps_page->page = NULL;
1402 skb->len += length;
1403 skb->data_len += length;
1404 skb->truesize += PAGE_SIZE;
1405 }
1406
1407
1408
1409
1410 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1411 if (!(netdev->features & NETIF_F_RXFCS))
1412 pskb_trim(skb, skb->len - 4);
1413 }
1414
1415copydone:
1416 total_rx_bytes += skb->len;
1417 total_rx_packets++;
1418
1419 e1000_rx_checksum(adapter, staterr, skb);
1420
1421 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1422
1423 if (rx_desc->wb.upper.header_status &
1424 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1425 adapter->rx_hdr_split++;
1426
1427 e1000_receive_skb(adapter, netdev, skb, staterr,
1428 rx_desc->wb.middle.vlan);
1429
1430next_desc:
1431 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1432 buffer_info->skb = NULL;
1433
1434
1435 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1436 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1437 GFP_ATOMIC);
1438 cleaned_count = 0;
1439 }
1440
1441
1442 rx_desc = next_rxd;
1443 buffer_info = next_buffer;
1444
1445 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1446 }
1447 rx_ring->next_to_clean = i;
1448
1449 cleaned_count = e1000_desc_unused(rx_ring);
1450 if (cleaned_count)
1451 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1452
1453 adapter->total_rx_bytes += total_rx_bytes;
1454 adapter->total_rx_packets += total_rx_packets;
1455 return cleaned;
1456}
1457
1458
1459
1460
1461static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1462 u16 length)
1463{
1464 bi->page = NULL;
1465 skb->len += length;
1466 skb->data_len += length;
1467 skb->truesize += PAGE_SIZE;
1468}
1469
1470
1471
1472
1473
1474
1475
1476
1477static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1478 int work_to_do)
1479{
1480 struct e1000_adapter *adapter = rx_ring->adapter;
1481 struct net_device *netdev = adapter->netdev;
1482 struct pci_dev *pdev = adapter->pdev;
1483 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1484 struct e1000_buffer *buffer_info, *next_buffer;
1485 u32 length, staterr;
1486 unsigned int i;
1487 int cleaned_count = 0;
1488 bool cleaned = false;
1489 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1490 struct skb_shared_info *shinfo;
1491
1492 i = rx_ring->next_to_clean;
1493 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1494 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1495 buffer_info = &rx_ring->buffer_info[i];
1496
1497 while (staterr & E1000_RXD_STAT_DD) {
1498 struct sk_buff *skb;
1499
1500 if (*work_done >= work_to_do)
1501 break;
1502 (*work_done)++;
1503 rmb();
1504
1505 skb = buffer_info->skb;
1506 buffer_info->skb = NULL;
1507
1508 ++i;
1509 if (i == rx_ring->count)
1510 i = 0;
1511 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1512 prefetch(next_rxd);
1513
1514 next_buffer = &rx_ring->buffer_info[i];
1515
1516 cleaned = true;
1517 cleaned_count++;
1518 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1519 DMA_FROM_DEVICE);
1520 buffer_info->dma = 0;
1521
1522 length = le16_to_cpu(rx_desc->wb.upper.length);
1523
1524
1525 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1526 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1527 !(netdev->features & NETIF_F_RXALL)))) {
1528
1529 buffer_info->skb = skb;
1530
1531 if (rx_ring->rx_skb_top)
1532 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1533 rx_ring->rx_skb_top = NULL;
1534 goto next_desc;
1535 }
1536#define rxtop (rx_ring->rx_skb_top)
1537 if (!(staterr & E1000_RXD_STAT_EOP)) {
1538
1539 if (!rxtop) {
1540
1541 rxtop = skb;
1542 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1543 0, length);
1544 } else {
1545
1546 shinfo = skb_shinfo(rxtop);
1547 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1548 buffer_info->page, 0,
1549 length);
1550
1551 buffer_info->skb = skb;
1552 }
1553 e1000_consume_page(buffer_info, rxtop, length);
1554 goto next_desc;
1555 } else {
1556 if (rxtop) {
1557
1558 shinfo = skb_shinfo(rxtop);
1559 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1560 buffer_info->page, 0,
1561 length);
1562
1563
1564
1565 buffer_info->skb = skb;
1566 skb = rxtop;
1567 rxtop = NULL;
1568 e1000_consume_page(buffer_info, skb, length);
1569 } else {
1570
1571
1572
1573 if (length <= copybreak &&
1574 skb_tailroom(skb) >= length) {
1575 u8 *vaddr;
1576 vaddr = kmap_atomic(buffer_info->page);
1577 memcpy(skb_tail_pointer(skb), vaddr,
1578 length);
1579 kunmap_atomic(vaddr);
1580
1581
1582
1583 skb_put(skb, length);
1584 } else {
1585 skb_fill_page_desc(skb, 0,
1586 buffer_info->page, 0,
1587 length);
1588 e1000_consume_page(buffer_info, skb,
1589 length);
1590 }
1591 }
1592 }
1593
1594
1595 e1000_rx_checksum(adapter, staterr, skb);
1596
1597 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1598
1599
1600 total_rx_bytes += skb->len;
1601 total_rx_packets++;
1602
1603
1604 if (!pskb_may_pull(skb, ETH_HLEN)) {
1605 e_err("pskb_may_pull failed.\n");
1606 dev_kfree_skb_irq(skb);
1607 goto next_desc;
1608 }
1609
1610 e1000_receive_skb(adapter, netdev, skb, staterr,
1611 rx_desc->wb.upper.vlan);
1612
1613next_desc:
1614 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1615
1616
1617 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1618 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1619 GFP_ATOMIC);
1620 cleaned_count = 0;
1621 }
1622
1623
1624 rx_desc = next_rxd;
1625 buffer_info = next_buffer;
1626
1627 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1628 }
1629 rx_ring->next_to_clean = i;
1630
1631 cleaned_count = e1000_desc_unused(rx_ring);
1632 if (cleaned_count)
1633 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1634
1635 adapter->total_rx_bytes += total_rx_bytes;
1636 adapter->total_rx_packets += total_rx_packets;
1637 return cleaned;
1638}
1639
1640
1641
1642
1643
1644static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1645{
1646 struct e1000_adapter *adapter = rx_ring->adapter;
1647 struct e1000_buffer *buffer_info;
1648 struct e1000_ps_page *ps_page;
1649 struct pci_dev *pdev = adapter->pdev;
1650 unsigned int i, j;
1651
1652
1653 for (i = 0; i < rx_ring->count; i++) {
1654 buffer_info = &rx_ring->buffer_info[i];
1655 if (buffer_info->dma) {
1656 if (adapter->clean_rx == e1000_clean_rx_irq)
1657 dma_unmap_single(&pdev->dev, buffer_info->dma,
1658 adapter->rx_buffer_len,
1659 DMA_FROM_DEVICE);
1660 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1661 dma_unmap_page(&pdev->dev, buffer_info->dma,
1662 PAGE_SIZE, DMA_FROM_DEVICE);
1663 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1664 dma_unmap_single(&pdev->dev, buffer_info->dma,
1665 adapter->rx_ps_bsize0,
1666 DMA_FROM_DEVICE);
1667 buffer_info->dma = 0;
1668 }
1669
1670 if (buffer_info->page) {
1671 put_page(buffer_info->page);
1672 buffer_info->page = NULL;
1673 }
1674
1675 if (buffer_info->skb) {
1676 dev_kfree_skb(buffer_info->skb);
1677 buffer_info->skb = NULL;
1678 }
1679
1680 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1681 ps_page = &buffer_info->ps_pages[j];
1682 if (!ps_page->page)
1683 break;
1684 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1685 DMA_FROM_DEVICE);
1686 ps_page->dma = 0;
1687 put_page(ps_page->page);
1688 ps_page->page = NULL;
1689 }
1690 }
1691
1692
1693 if (rx_ring->rx_skb_top) {
1694 dev_kfree_skb(rx_ring->rx_skb_top);
1695 rx_ring->rx_skb_top = NULL;
1696 }
1697
1698
1699 memset(rx_ring->desc, 0, rx_ring->size);
1700
1701 rx_ring->next_to_clean = 0;
1702 rx_ring->next_to_use = 0;
1703 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1704
1705 writel(0, rx_ring->head);
1706 if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1707 e1000e_update_rdt_wa(rx_ring, 0);
1708 else
1709 writel(0, rx_ring->tail);
1710}
1711
1712static void e1000e_downshift_workaround(struct work_struct *work)
1713{
1714 struct e1000_adapter *adapter = container_of(work,
1715 struct e1000_adapter,
1716 downshift_task);
1717
1718 if (test_bit(__E1000_DOWN, &adapter->state))
1719 return;
1720
1721 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1722}
1723
1724
1725
1726
1727
1728
1729static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1730{
1731 struct net_device *netdev = data;
1732 struct e1000_adapter *adapter = netdev_priv(netdev);
1733 struct e1000_hw *hw = &adapter->hw;
1734 u32 icr = er32(ICR);
1735
1736
1737 if (icr & E1000_ICR_LSC) {
1738 hw->mac.get_link_status = true;
1739
1740
1741
1742 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1743 (!(er32(STATUS) & E1000_STATUS_LU)))
1744 schedule_work(&adapter->downshift_task);
1745
1746
1747
1748
1749
1750 if (netif_carrier_ok(netdev) &&
1751 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1752
1753 u32 rctl = er32(RCTL);
1754 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1755 adapter->flags |= FLAG_RESTART_NOW;
1756 }
1757
1758 if (!test_bit(__E1000_DOWN, &adapter->state))
1759 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1760 }
1761
1762
1763 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1764 u32 pbeccsts = er32(PBECCSTS);
1765
1766 adapter->corr_errors +=
1767 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1768 adapter->uncorr_errors +=
1769 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1770 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1771
1772
1773 schedule_work(&adapter->reset_task);
1774
1775
1776 return IRQ_HANDLED;
1777 }
1778
1779 if (napi_schedule_prep(&adapter->napi)) {
1780 adapter->total_tx_bytes = 0;
1781 adapter->total_tx_packets = 0;
1782 adapter->total_rx_bytes = 0;
1783 adapter->total_rx_packets = 0;
1784 __napi_schedule(&adapter->napi);
1785 }
1786
1787 return IRQ_HANDLED;
1788}
1789
1790
1791
1792
1793
1794
1795static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1796{
1797 struct net_device *netdev = data;
1798 struct e1000_adapter *adapter = netdev_priv(netdev);
1799 struct e1000_hw *hw = &adapter->hw;
1800 u32 rctl, icr = er32(ICR);
1801
1802 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1803 return IRQ_NONE;
1804
1805
1806
1807
1808 if (!(icr & E1000_ICR_INT_ASSERTED))
1809 return IRQ_NONE;
1810
1811
1812
1813
1814
1815
1816 if (icr & E1000_ICR_LSC) {
1817 hw->mac.get_link_status = true;
1818
1819
1820
1821 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1822 (!(er32(STATUS) & E1000_STATUS_LU)))
1823 schedule_work(&adapter->downshift_task);
1824
1825
1826
1827
1828
1829
1830 if (netif_carrier_ok(netdev) &&
1831 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1832
1833 rctl = er32(RCTL);
1834 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1835 adapter->flags |= FLAG_RESTART_NOW;
1836 }
1837
1838 if (!test_bit(__E1000_DOWN, &adapter->state))
1839 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1840 }
1841
1842
1843 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1844 u32 pbeccsts = er32(PBECCSTS);
1845
1846 adapter->corr_errors +=
1847 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1848 adapter->uncorr_errors +=
1849 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1850 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1851
1852
1853 schedule_work(&adapter->reset_task);
1854
1855
1856 return IRQ_HANDLED;
1857 }
1858
1859 if (napi_schedule_prep(&adapter->napi)) {
1860 adapter->total_tx_bytes = 0;
1861 adapter->total_tx_packets = 0;
1862 adapter->total_rx_bytes = 0;
1863 adapter->total_rx_packets = 0;
1864 __napi_schedule(&adapter->napi);
1865 }
1866
1867 return IRQ_HANDLED;
1868}
1869
1870static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1871{
1872 struct net_device *netdev = data;
1873 struct e1000_adapter *adapter = netdev_priv(netdev);
1874 struct e1000_hw *hw = &adapter->hw;
1875 u32 icr = er32(ICR);
1876
1877 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1878 if (!test_bit(__E1000_DOWN, &adapter->state))
1879 ew32(IMS, E1000_IMS_OTHER);
1880 return IRQ_NONE;
1881 }
1882
1883 if (icr & adapter->eiac_mask)
1884 ew32(ICS, (icr & adapter->eiac_mask));
1885
1886 if (icr & E1000_ICR_OTHER) {
1887 if (!(icr & E1000_ICR_LSC))
1888 goto no_link_interrupt;
1889 hw->mac.get_link_status = true;
1890
1891 if (!test_bit(__E1000_DOWN, &adapter->state))
1892 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1893 }
1894
1895no_link_interrupt:
1896 if (!test_bit(__E1000_DOWN, &adapter->state))
1897 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1898
1899 return IRQ_HANDLED;
1900}
1901
1902static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1903{
1904 struct net_device *netdev = data;
1905 struct e1000_adapter *adapter = netdev_priv(netdev);
1906 struct e1000_hw *hw = &adapter->hw;
1907 struct e1000_ring *tx_ring = adapter->tx_ring;
1908
1909 adapter->total_tx_bytes = 0;
1910 adapter->total_tx_packets = 0;
1911
1912 if (!e1000_clean_tx_irq(tx_ring))
1913
1914 ew32(ICS, tx_ring->ims_val);
1915
1916 return IRQ_HANDLED;
1917}
1918
1919static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
1920{
1921 struct net_device *netdev = data;
1922 struct e1000_adapter *adapter = netdev_priv(netdev);
1923 struct e1000_ring *rx_ring = adapter->rx_ring;
1924
1925
1926
1927
1928 if (rx_ring->set_itr) {
1929 writel(1000000000 / (rx_ring->itr_val * 256),
1930 rx_ring->itr_register);
1931 rx_ring->set_itr = 0;
1932 }
1933
1934 if (napi_schedule_prep(&adapter->napi)) {
1935 adapter->total_rx_bytes = 0;
1936 adapter->total_rx_packets = 0;
1937 __napi_schedule(&adapter->napi);
1938 }
1939 return IRQ_HANDLED;
1940}
1941
1942
1943
1944
1945
1946
1947
1948static void e1000_configure_msix(struct e1000_adapter *adapter)
1949{
1950 struct e1000_hw *hw = &adapter->hw;
1951 struct e1000_ring *rx_ring = adapter->rx_ring;
1952 struct e1000_ring *tx_ring = adapter->tx_ring;
1953 int vector = 0;
1954 u32 ctrl_ext, ivar = 0;
1955
1956 adapter->eiac_mask = 0;
1957
1958
1959 if (hw->mac.type == e1000_82574) {
1960 u32 rfctl = er32(RFCTL);
1961 rfctl |= E1000_RFCTL_ACK_DIS;
1962 ew32(RFCTL, rfctl);
1963 }
1964
1965
1966 rx_ring->ims_val = E1000_IMS_RXQ0;
1967 adapter->eiac_mask |= rx_ring->ims_val;
1968 if (rx_ring->itr_val)
1969 writel(1000000000 / (rx_ring->itr_val * 256),
1970 rx_ring->itr_register);
1971 else
1972 writel(1, rx_ring->itr_register);
1973 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1974
1975
1976 tx_ring->ims_val = E1000_IMS_TXQ0;
1977 vector++;
1978 if (tx_ring->itr_val)
1979 writel(1000000000 / (tx_ring->itr_val * 256),
1980 tx_ring->itr_register);
1981 else
1982 writel(1, tx_ring->itr_register);
1983 adapter->eiac_mask |= tx_ring->ims_val;
1984 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1985
1986
1987 vector++;
1988 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1989 if (rx_ring->itr_val)
1990 writel(1000000000 / (rx_ring->itr_val * 256),
1991 hw->hw_addr + E1000_EITR_82574(vector));
1992 else
1993 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1994
1995
1996 ivar |= (1 << 31);
1997
1998 ew32(IVAR, ivar);
1999
2000
2001 ctrl_ext = er32(CTRL_EXT);
2002 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
2003
2004
2005 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
2006 ctrl_ext |= E1000_CTRL_EXT_EIAME;
2007 ew32(CTRL_EXT, ctrl_ext);
2008 e1e_flush();
2009}
2010
2011void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2012{
2013 if (adapter->msix_entries) {
2014 pci_disable_msix(adapter->pdev);
2015 kfree(adapter->msix_entries);
2016 adapter->msix_entries = NULL;
2017 } else if (adapter->flags & FLAG_MSI_ENABLED) {
2018 pci_disable_msi(adapter->pdev);
2019 adapter->flags &= ~FLAG_MSI_ENABLED;
2020 }
2021}
2022
2023
2024
2025
2026
2027
2028
2029void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2030{
2031 int err;
2032 int i;
2033
2034 switch (adapter->int_mode) {
2035 case E1000E_INT_MODE_MSIX:
2036 if (adapter->flags & FLAG_HAS_MSIX) {
2037 adapter->num_vectors = 3;
2038 adapter->msix_entries = kcalloc(adapter->num_vectors,
2039 sizeof(struct
2040 msix_entry),
2041 GFP_KERNEL);
2042 if (adapter->msix_entries) {
2043 for (i = 0; i < adapter->num_vectors; i++)
2044 adapter->msix_entries[i].entry = i;
2045
2046 err = pci_enable_msix(adapter->pdev,
2047 adapter->msix_entries,
2048 adapter->num_vectors);
2049 if (err == 0)
2050 return;
2051 }
2052
2053 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
2054 e1000e_reset_interrupt_capability(adapter);
2055 }
2056 adapter->int_mode = E1000E_INT_MODE_MSI;
2057
2058 case E1000E_INT_MODE_MSI:
2059 if (!pci_enable_msi(adapter->pdev)) {
2060 adapter->flags |= FLAG_MSI_ENABLED;
2061 } else {
2062 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2063 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
2064 }
2065
2066 case E1000E_INT_MODE_LEGACY:
2067
2068 break;
2069 }
2070
2071
2072 adapter->num_vectors = 1;
2073}
2074
2075
2076
2077
2078
2079
2080
2081static int e1000_request_msix(struct e1000_adapter *adapter)
2082{
2083 struct net_device *netdev = adapter->netdev;
2084 int err = 0, vector = 0;
2085
2086 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2087 snprintf(adapter->rx_ring->name,
2088 sizeof(adapter->rx_ring->name) - 1,
2089 "%s-rx-0", netdev->name);
2090 else
2091 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2092 err = request_irq(adapter->msix_entries[vector].vector,
2093 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
2094 netdev);
2095 if (err)
2096 return err;
2097 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2098 E1000_EITR_82574(vector);
2099 adapter->rx_ring->itr_val = adapter->itr;
2100 vector++;
2101
2102 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2103 snprintf(adapter->tx_ring->name,
2104 sizeof(adapter->tx_ring->name) - 1,
2105 "%s-tx-0", netdev->name);
2106 else
2107 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2108 err = request_irq(adapter->msix_entries[vector].vector,
2109 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2110 netdev);
2111 if (err)
2112 return err;
2113 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2114 E1000_EITR_82574(vector);
2115 adapter->tx_ring->itr_val = adapter->itr;
2116 vector++;
2117
2118 err = request_irq(adapter->msix_entries[vector].vector,
2119 e1000_msix_other, 0, netdev->name, netdev);
2120 if (err)
2121 return err;
2122
2123 e1000_configure_msix(adapter);
2124
2125 return 0;
2126}
2127
2128
2129
2130
2131
2132
2133
2134static int e1000_request_irq(struct e1000_adapter *adapter)
2135{
2136 struct net_device *netdev = adapter->netdev;
2137 int err;
2138
2139 if (adapter->msix_entries) {
2140 err = e1000_request_msix(adapter);
2141 if (!err)
2142 return err;
2143
2144 e1000e_reset_interrupt_capability(adapter);
2145 adapter->int_mode = E1000E_INT_MODE_MSI;
2146 e1000e_set_interrupt_capability(adapter);
2147 }
2148 if (adapter->flags & FLAG_MSI_ENABLED) {
2149 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2150 netdev->name, netdev);
2151 if (!err)
2152 return err;
2153
2154
2155 e1000e_reset_interrupt_capability(adapter);
2156 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2157 }
2158
2159 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2160 netdev->name, netdev);
2161 if (err)
2162 e_err("Unable to allocate interrupt, Error: %d\n", err);
2163
2164 return err;
2165}
2166
2167static void e1000_free_irq(struct e1000_adapter *adapter)
2168{
2169 struct net_device *netdev = adapter->netdev;
2170
2171 if (adapter->msix_entries) {
2172 int vector = 0;
2173
2174 free_irq(adapter->msix_entries[vector].vector, netdev);
2175 vector++;
2176
2177 free_irq(adapter->msix_entries[vector].vector, netdev);
2178 vector++;
2179
2180
2181 free_irq(adapter->msix_entries[vector].vector, netdev);
2182 return;
2183 }
2184
2185 free_irq(adapter->pdev->irq, netdev);
2186}
2187
2188
2189
2190
2191static void e1000_irq_disable(struct e1000_adapter *adapter)
2192{
2193 struct e1000_hw *hw = &adapter->hw;
2194
2195 ew32(IMC, ~0);
2196 if (adapter->msix_entries)
2197 ew32(EIAC_82574, 0);
2198 e1e_flush();
2199
2200 if (adapter->msix_entries) {
2201 int i;
2202 for (i = 0; i < adapter->num_vectors; i++)
2203 synchronize_irq(adapter->msix_entries[i].vector);
2204 } else {
2205 synchronize_irq(adapter->pdev->irq);
2206 }
2207}
2208
2209
2210
2211
2212static void e1000_irq_enable(struct e1000_adapter *adapter)
2213{
2214 struct e1000_hw *hw = &adapter->hw;
2215
2216 if (adapter->msix_entries) {
2217 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2218 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2219 } else if (hw->mac.type == e1000_pch_lpt) {
2220 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2221 } else {
2222 ew32(IMS, IMS_ENABLE_MASK);
2223 }
2224 e1e_flush();
2225}
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236void e1000e_get_hw_control(struct e1000_adapter *adapter)
2237{
2238 struct e1000_hw *hw = &adapter->hw;
2239 u32 ctrl_ext;
2240 u32 swsm;
2241
2242
2243 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2244 swsm = er32(SWSM);
2245 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2246 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2247 ctrl_ext = er32(CTRL_EXT);
2248 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2249 }
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262void e1000e_release_hw_control(struct e1000_adapter *adapter)
2263{
2264 struct e1000_hw *hw = &adapter->hw;
2265 u32 ctrl_ext;
2266 u32 swsm;
2267
2268
2269 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2270 swsm = er32(SWSM);
2271 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2272 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2273 ctrl_ext = er32(CTRL_EXT);
2274 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2275 }
2276}
2277
2278
2279
2280
2281static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2282 struct e1000_ring *ring)
2283{
2284 struct pci_dev *pdev = adapter->pdev;
2285
2286 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2287 GFP_KERNEL);
2288 if (!ring->desc)
2289 return -ENOMEM;
2290
2291 return 0;
2292}
2293
2294
2295
2296
2297
2298
2299
2300int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2301{
2302 struct e1000_adapter *adapter = tx_ring->adapter;
2303 int err = -ENOMEM, size;
2304
2305 size = sizeof(struct e1000_buffer) * tx_ring->count;
2306 tx_ring->buffer_info = vzalloc(size);
2307 if (!tx_ring->buffer_info)
2308 goto err;
2309
2310
2311 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2312 tx_ring->size = ALIGN(tx_ring->size, 4096);
2313
2314 err = e1000_alloc_ring_dma(adapter, tx_ring);
2315 if (err)
2316 goto err;
2317
2318 tx_ring->next_to_use = 0;
2319 tx_ring->next_to_clean = 0;
2320
2321 return 0;
2322err:
2323 vfree(tx_ring->buffer_info);
2324 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2325 return err;
2326}
2327
2328
2329
2330
2331
2332
2333
2334int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2335{
2336 struct e1000_adapter *adapter = rx_ring->adapter;
2337 struct e1000_buffer *buffer_info;
2338 int i, size, desc_len, err = -ENOMEM;
2339
2340 size = sizeof(struct e1000_buffer) * rx_ring->count;
2341 rx_ring->buffer_info = vzalloc(size);
2342 if (!rx_ring->buffer_info)
2343 goto err;
2344
2345 for (i = 0; i < rx_ring->count; i++) {
2346 buffer_info = &rx_ring->buffer_info[i];
2347 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2348 sizeof(struct e1000_ps_page),
2349 GFP_KERNEL);
2350 if (!buffer_info->ps_pages)
2351 goto err_pages;
2352 }
2353
2354 desc_len = sizeof(union e1000_rx_desc_packet_split);
2355
2356
2357 rx_ring->size = rx_ring->count * desc_len;
2358 rx_ring->size = ALIGN(rx_ring->size, 4096);
2359
2360 err = e1000_alloc_ring_dma(adapter, rx_ring);
2361 if (err)
2362 goto err_pages;
2363
2364 rx_ring->next_to_clean = 0;
2365 rx_ring->next_to_use = 0;
2366 rx_ring->rx_skb_top = NULL;
2367
2368 return 0;
2369
2370err_pages:
2371 for (i = 0; i < rx_ring->count; i++) {
2372 buffer_info = &rx_ring->buffer_info[i];
2373 kfree(buffer_info->ps_pages);
2374 }
2375err:
2376 vfree(rx_ring->buffer_info);
2377 e_err("Unable to allocate memory for the receive descriptor ring\n");
2378 return err;
2379}
2380
2381
2382
2383
2384
2385static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2386{
2387 struct e1000_adapter *adapter = tx_ring->adapter;
2388 struct e1000_buffer *buffer_info;
2389 unsigned long size;
2390 unsigned int i;
2391
2392 for (i = 0; i < tx_ring->count; i++) {
2393 buffer_info = &tx_ring->buffer_info[i];
2394 e1000_put_txbuf(tx_ring, buffer_info);
2395 }
2396
2397 netdev_reset_queue(adapter->netdev);
2398 size = sizeof(struct e1000_buffer) * tx_ring->count;
2399 memset(tx_ring->buffer_info, 0, size);
2400
2401 memset(tx_ring->desc, 0, tx_ring->size);
2402
2403 tx_ring->next_to_use = 0;
2404 tx_ring->next_to_clean = 0;
2405
2406 writel(0, tx_ring->head);
2407 if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2408 e1000e_update_tdt_wa(tx_ring, 0);
2409 else
2410 writel(0, tx_ring->tail);
2411}
2412
2413
2414
2415
2416
2417
2418
2419void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2420{
2421 struct e1000_adapter *adapter = tx_ring->adapter;
2422 struct pci_dev *pdev = adapter->pdev;
2423
2424 e1000_clean_tx_ring(tx_ring);
2425
2426 vfree(tx_ring->buffer_info);
2427 tx_ring->buffer_info = NULL;
2428
2429 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2430 tx_ring->dma);
2431 tx_ring->desc = NULL;
2432}
2433
2434
2435
2436
2437
2438
2439
2440void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2441{
2442 struct e1000_adapter *adapter = rx_ring->adapter;
2443 struct pci_dev *pdev = adapter->pdev;
2444 int i;
2445
2446 e1000_clean_rx_ring(rx_ring);
2447
2448 for (i = 0; i < rx_ring->count; i++)
2449 kfree(rx_ring->buffer_info[i].ps_pages);
2450
2451 vfree(rx_ring->buffer_info);
2452 rx_ring->buffer_info = NULL;
2453
2454 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2455 rx_ring->dma);
2456 rx_ring->desc = NULL;
2457}
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2476{
2477 unsigned int retval = itr_setting;
2478
2479 if (packets == 0)
2480 return itr_setting;
2481
2482 switch (itr_setting) {
2483 case lowest_latency:
2484
2485 if (bytes / packets > 8000)
2486 retval = bulk_latency;
2487 else if ((packets < 5) && (bytes > 512))
2488 retval = low_latency;
2489 break;
2490 case low_latency:
2491 if (bytes > 10000) {
2492
2493 if (bytes / packets > 8000)
2494 retval = bulk_latency;
2495 else if ((packets < 10) || ((bytes / packets) > 1200))
2496 retval = bulk_latency;
2497 else if ((packets > 35))
2498 retval = lowest_latency;
2499 } else if (bytes / packets > 2000) {
2500 retval = bulk_latency;
2501 } else if (packets <= 2 && bytes < 512) {
2502 retval = lowest_latency;
2503 }
2504 break;
2505 case bulk_latency:
2506 if (bytes > 25000) {
2507 if (packets > 35)
2508 retval = low_latency;
2509 } else if (bytes < 6000) {
2510 retval = low_latency;
2511 }
2512 break;
2513 }
2514
2515 return retval;
2516}
2517
2518static void e1000_set_itr(struct e1000_adapter *adapter)
2519{
2520 u16 current_itr;
2521 u32 new_itr = adapter->itr;
2522
2523
2524 if (adapter->link_speed != SPEED_1000) {
2525 current_itr = 0;
2526 new_itr = 4000;
2527 goto set_itr_now;
2528 }
2529
2530 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2531 new_itr = 0;
2532 goto set_itr_now;
2533 }
2534
2535 adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2536 adapter->total_tx_packets,
2537 adapter->total_tx_bytes);
2538
2539 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2540 adapter->tx_itr = low_latency;
2541
2542 adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2543 adapter->total_rx_packets,
2544 adapter->total_rx_bytes);
2545
2546 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2547 adapter->rx_itr = low_latency;
2548
2549 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2550
2551
2552 switch (current_itr) {
2553 case lowest_latency:
2554 new_itr = 70000;
2555 break;
2556 case low_latency:
2557 new_itr = 20000;
2558 break;
2559 case bulk_latency:
2560 new_itr = 4000;
2561 break;
2562 default:
2563 break;
2564 }
2565
2566set_itr_now:
2567 if (new_itr != adapter->itr) {
2568
2569
2570
2571
2572 new_itr = new_itr > adapter->itr ?
2573 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2574 adapter->itr = new_itr;
2575 adapter->rx_ring->itr_val = new_itr;
2576 if (adapter->msix_entries)
2577 adapter->rx_ring->set_itr = 1;
2578 else
2579 e1000e_write_itr(adapter, new_itr);
2580 }
2581}
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2593{
2594 struct e1000_hw *hw = &adapter->hw;
2595 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2596
2597 if (adapter->msix_entries) {
2598 int vector;
2599
2600 for (vector = 0; vector < adapter->num_vectors; vector++)
2601 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2602 } else {
2603 ew32(ITR, new_itr);
2604 }
2605}
2606
2607
2608
2609
2610
2611static int e1000_alloc_queues(struct e1000_adapter *adapter)
2612{
2613 int size = sizeof(struct e1000_ring);
2614
2615 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2616 if (!adapter->tx_ring)
2617 goto err;
2618 adapter->tx_ring->count = adapter->tx_ring_count;
2619 adapter->tx_ring->adapter = adapter;
2620
2621 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2622 if (!adapter->rx_ring)
2623 goto err;
2624 adapter->rx_ring->count = adapter->rx_ring_count;
2625 adapter->rx_ring->adapter = adapter;
2626
2627 return 0;
2628err:
2629 e_err("Unable to allocate memory for queues\n");
2630 kfree(adapter->rx_ring);
2631 kfree(adapter->tx_ring);
2632 return -ENOMEM;
2633}
2634
2635
2636
2637
2638
2639
2640static int e1000e_poll(struct napi_struct *napi, int weight)
2641{
2642 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2643 napi);
2644 struct e1000_hw *hw = &adapter->hw;
2645 struct net_device *poll_dev = adapter->netdev;
2646 int tx_cleaned = 1, work_done = 0;
2647
2648 adapter = netdev_priv(poll_dev);
2649
2650 if (!adapter->msix_entries ||
2651 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2652 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2653
2654 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2655
2656 if (!tx_cleaned)
2657 work_done = weight;
2658
2659
2660 if (work_done < weight) {
2661 if (adapter->itr_setting & 3)
2662 e1000_set_itr(adapter);
2663 napi_complete(napi);
2664 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2665 if (adapter->msix_entries)
2666 ew32(IMS, adapter->rx_ring->ims_val);
2667 else
2668 e1000_irq_enable(adapter);
2669 }
2670 }
2671
2672 return work_done;
2673}
2674
2675static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2676 __always_unused __be16 proto, u16 vid)
2677{
2678 struct e1000_adapter *adapter = netdev_priv(netdev);
2679 struct e1000_hw *hw = &adapter->hw;
2680 u32 vfta, index;
2681
2682
2683 if ((adapter->hw.mng_cookie.status &
2684 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2685 (vid == adapter->mng_vlan_id))
2686 return 0;
2687
2688
2689 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2690 index = (vid >> 5) & 0x7F;
2691 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2692 vfta |= (1 << (vid & 0x1F));
2693 hw->mac.ops.write_vfta(hw, index, vfta);
2694 }
2695
2696 set_bit(vid, adapter->active_vlans);
2697
2698 return 0;
2699}
2700
2701static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2702 __always_unused __be16 proto, u16 vid)
2703{
2704 struct e1000_adapter *adapter = netdev_priv(netdev);
2705 struct e1000_hw *hw = &adapter->hw;
2706 u32 vfta, index;
2707
2708 if ((adapter->hw.mng_cookie.status &
2709 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2710 (vid == adapter->mng_vlan_id)) {
2711
2712 e1000e_release_hw_control(adapter);
2713 return 0;
2714 }
2715
2716
2717 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2718 index = (vid >> 5) & 0x7F;
2719 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2720 vfta &= ~(1 << (vid & 0x1F));
2721 hw->mac.ops.write_vfta(hw, index, vfta);
2722 }
2723
2724 clear_bit(vid, adapter->active_vlans);
2725
2726 return 0;
2727}
2728
2729
2730
2731
2732
2733static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2734{
2735 struct net_device *netdev = adapter->netdev;
2736 struct e1000_hw *hw = &adapter->hw;
2737 u32 rctl;
2738
2739 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2740
2741 rctl = er32(RCTL);
2742 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2743 ew32(RCTL, rctl);
2744
2745 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2746 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2747 adapter->mng_vlan_id);
2748 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2749 }
2750 }
2751}
2752
2753
2754
2755
2756
2757static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2758{
2759 struct e1000_hw *hw = &adapter->hw;
2760 u32 rctl;
2761
2762 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2763
2764 rctl = er32(RCTL);
2765 rctl |= E1000_RCTL_VFE;
2766 rctl &= ~E1000_RCTL_CFIEN;
2767 ew32(RCTL, rctl);
2768 }
2769}
2770
2771
2772
2773
2774
2775static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2776{
2777 struct e1000_hw *hw = &adapter->hw;
2778 u32 ctrl;
2779
2780
2781 ctrl = er32(CTRL);
2782 ctrl &= ~E1000_CTRL_VME;
2783 ew32(CTRL, ctrl);
2784}
2785
2786
2787
2788
2789
2790static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2791{
2792 struct e1000_hw *hw = &adapter->hw;
2793 u32 ctrl;
2794
2795
2796 ctrl = er32(CTRL);
2797 ctrl |= E1000_CTRL_VME;
2798 ew32(CTRL, ctrl);
2799}
2800
2801static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2802{
2803 struct net_device *netdev = adapter->netdev;
2804 u16 vid = adapter->hw.mng_cookie.vlan_id;
2805 u16 old_vid = adapter->mng_vlan_id;
2806
2807 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2808 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
2809 adapter->mng_vlan_id = vid;
2810 }
2811
2812 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2813 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
2814}
2815
2816static void e1000_restore_vlan(struct e1000_adapter *adapter)
2817{
2818 u16 vid;
2819
2820 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
2821
2822 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2823 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2824}
2825
2826static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2827{
2828 struct e1000_hw *hw = &adapter->hw;
2829 u32 manc, manc2h, mdef, i, j;
2830
2831 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2832 return;
2833
2834 manc = er32(MANC);
2835
2836
2837
2838
2839
2840 manc |= E1000_MANC_EN_MNG2HOST;
2841 manc2h = er32(MANC2H);
2842
2843 switch (hw->mac.type) {
2844 default:
2845 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2846 break;
2847 case e1000_82574:
2848 case e1000_82583:
2849
2850
2851
2852 for (i = 0, j = 0; i < 8; i++) {
2853 mdef = er32(MDEF(i));
2854
2855
2856 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2857 continue;
2858
2859
2860 if (mdef)
2861 manc2h |= (1 << i);
2862
2863 j |= mdef;
2864 }
2865
2866 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2867 break;
2868
2869
2870 for (i = 0, j = 0; i < 8; i++)
2871 if (er32(MDEF(i)) == 0) {
2872 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2873 E1000_MDEF_PORT_664));
2874 manc2h |= (1 << 1);
2875 j++;
2876 break;
2877 }
2878
2879 if (!j)
2880 e_warn("Unable to create IPMI pass-through filter\n");
2881 break;
2882 }
2883
2884 ew32(MANC2H, manc2h);
2885 ew32(MANC, manc);
2886}
2887
2888
2889
2890
2891
2892
2893
2894static void e1000_configure_tx(struct e1000_adapter *adapter)
2895{
2896 struct e1000_hw *hw = &adapter->hw;
2897 struct e1000_ring *tx_ring = adapter->tx_ring;
2898 u64 tdba;
2899 u32 tdlen, tarc;
2900
2901
2902 tdba = tx_ring->dma;
2903 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2904 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2905 ew32(TDBAH(0), (tdba >> 32));
2906 ew32(TDLEN(0), tdlen);
2907 ew32(TDH(0), 0);
2908 ew32(TDT(0), 0);
2909 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2910 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2911
2912
2913 ew32(TIDV, adapter->tx_int_delay);
2914
2915 ew32(TADV, adapter->tx_abs_int_delay);
2916
2917 if (adapter->flags2 & FLAG2_DMA_BURST) {
2918 u32 txdctl = er32(TXDCTL(0));
2919 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2920 E1000_TXDCTL_WTHRESH);
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2931 ew32(TXDCTL(0), txdctl);
2932 }
2933
2934 ew32(TXDCTL(1), er32(TXDCTL(0)));
2935
2936 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2937 tarc = er32(TARC(0));
2938
2939
2940
2941#define SPEED_MODE_BIT (1 << 21)
2942 tarc |= SPEED_MODE_BIT;
2943 ew32(TARC(0), tarc);
2944 }
2945
2946
2947 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2948 tarc = er32(TARC(0));
2949 tarc |= 1;
2950 ew32(TARC(0), tarc);
2951 tarc = er32(TARC(1));
2952 tarc |= 1;
2953 ew32(TARC(1), tarc);
2954 }
2955
2956
2957 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2958
2959
2960 if (adapter->tx_int_delay)
2961 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2962
2963
2964 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2965
2966 hw->mac.ops.config_collision_dist(hw);
2967}
2968
2969
2970
2971
2972
2973#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2974 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2975static void e1000_setup_rctl(struct e1000_adapter *adapter)
2976{
2977 struct e1000_hw *hw = &adapter->hw;
2978 u32 rctl, rfctl;
2979 u32 pages = 0;
2980
2981
2982 if (hw->mac.type >= e1000_pch2lan) {
2983 s32 ret_val;
2984
2985 if (adapter->netdev->mtu > ETH_DATA_LEN)
2986 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2987 else
2988 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2989
2990 if (ret_val)
2991 e_dbg("failed to enable jumbo frame workaround mode\n");
2992 }
2993
2994
2995 rctl = er32(RCTL);
2996 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2997 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2998 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2999 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3000
3001
3002 rctl &= ~E1000_RCTL_SBP;
3003
3004
3005 if (adapter->netdev->mtu <= ETH_DATA_LEN)
3006 rctl &= ~E1000_RCTL_LPE;
3007 else
3008 rctl |= E1000_RCTL_LPE;
3009
3010
3011
3012
3013
3014 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3015 rctl |= E1000_RCTL_SECRC;
3016
3017
3018 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3019 u16 phy_data;
3020
3021 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3022 phy_data &= 0xfff8;
3023 phy_data |= (1 << 2);
3024 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3025
3026 e1e_rphy(hw, 22, &phy_data);
3027 phy_data &= 0x0fff;
3028 phy_data |= (1 << 14);
3029 e1e_wphy(hw, 0x10, 0x2823);
3030 e1e_wphy(hw, 0x11, 0x0003);
3031 e1e_wphy(hw, 22, phy_data);
3032 }
3033
3034
3035 rctl &= ~E1000_RCTL_SZ_4096;
3036 rctl |= E1000_RCTL_BSEX;
3037 switch (adapter->rx_buffer_len) {
3038 case 2048:
3039 default:
3040 rctl |= E1000_RCTL_SZ_2048;
3041 rctl &= ~E1000_RCTL_BSEX;
3042 break;
3043 case 4096:
3044 rctl |= E1000_RCTL_SZ_4096;
3045 break;
3046 case 8192:
3047 rctl |= E1000_RCTL_SZ_8192;
3048 break;
3049 case 16384:
3050 rctl |= E1000_RCTL_SZ_16384;
3051 break;
3052 }
3053
3054
3055 rfctl = er32(RFCTL);
3056 rfctl |= E1000_RFCTL_EXTEN;
3057 ew32(RFCTL, rfctl);
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3074 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
3075 adapter->rx_ps_pages = pages;
3076 else
3077 adapter->rx_ps_pages = 0;
3078
3079 if (adapter->rx_ps_pages) {
3080 u32 psrctl = 0;
3081
3082
3083 rctl |= E1000_RCTL_DTYP_PS;
3084
3085 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3086
3087 switch (adapter->rx_ps_pages) {
3088 case 3:
3089 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3090
3091 case 2:
3092 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3093
3094 case 1:
3095 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3096 break;
3097 }
3098
3099 ew32(PSRCTL, psrctl);
3100 }
3101
3102
3103 if (adapter->netdev->features & NETIF_F_RXALL) {
3104
3105
3106
3107 rctl |= (E1000_RCTL_SBP |
3108 E1000_RCTL_BAM |
3109 E1000_RCTL_PMCF);
3110
3111 rctl &= ~(E1000_RCTL_VFE |
3112 E1000_RCTL_DPF |
3113 E1000_RCTL_CFIEN);
3114
3115
3116
3117 }
3118
3119 ew32(RCTL, rctl);
3120
3121 adapter->flags &= ~FLAG_RESTART_NOW;
3122}
3123
3124
3125
3126
3127
3128
3129
3130static void e1000_configure_rx(struct e1000_adapter *adapter)
3131{
3132 struct e1000_hw *hw = &adapter->hw;
3133 struct e1000_ring *rx_ring = adapter->rx_ring;
3134 u64 rdba;
3135 u32 rdlen, rctl, rxcsum, ctrl_ext;
3136
3137 if (adapter->rx_ps_pages) {
3138
3139 rdlen = rx_ring->count *
3140 sizeof(union e1000_rx_desc_packet_split);
3141 adapter->clean_rx = e1000_clean_rx_irq_ps;
3142 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3143 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3144 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3145 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3146 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3147 } else {
3148 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3149 adapter->clean_rx = e1000_clean_rx_irq;
3150 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3151 }
3152
3153
3154 rctl = er32(RCTL);
3155 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3156 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3157 e1e_flush();
3158 usleep_range(10000, 20000);
3159
3160 if (adapter->flags2 & FLAG2_DMA_BURST) {
3161
3162
3163
3164
3165
3166
3167
3168
3169 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3170 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3171
3172
3173
3174
3175 if (adapter->rx_int_delay == DEFAULT_RDTR)
3176 adapter->rx_int_delay = BURST_RDTR;
3177 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3178 adapter->rx_abs_int_delay = BURST_RADV;
3179 }
3180
3181
3182 ew32(RDTR, adapter->rx_int_delay);
3183
3184
3185 ew32(RADV, adapter->rx_abs_int_delay);
3186 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3187 e1000e_write_itr(adapter, adapter->itr);
3188
3189 ctrl_ext = er32(CTRL_EXT);
3190
3191 ctrl_ext |= E1000_CTRL_EXT_IAME;
3192 ew32(IAM, 0xffffffff);
3193 ew32(CTRL_EXT, ctrl_ext);
3194 e1e_flush();
3195
3196
3197
3198
3199 rdba = rx_ring->dma;
3200 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3201 ew32(RDBAH(0), (rdba >> 32));
3202 ew32(RDLEN(0), rdlen);
3203 ew32(RDH(0), 0);
3204 ew32(RDT(0), 0);
3205 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3206 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3207
3208
3209 rxcsum = er32(RXCSUM);
3210 if (adapter->netdev->features & NETIF_F_RXCSUM)
3211 rxcsum |= E1000_RXCSUM_TUOFL;
3212 else
3213 rxcsum &= ~E1000_RXCSUM_TUOFL;
3214 ew32(RXCSUM, rxcsum);
3215
3216
3217
3218
3219 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3220 u32 lat =
3221 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3222 adapter->max_frame_size) * 8 / 1000;
3223
3224 if (adapter->flags & FLAG_IS_ICH) {
3225 u32 rxdctl = er32(RXDCTL(0));
3226 ew32(RXDCTL(0), rxdctl | 0x3);
3227 }
3228
3229 pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
3230 } else {
3231 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3232 PM_QOS_DEFAULT_VALUE);
3233 }
3234
3235
3236 ew32(RCTL, rctl);
3237}
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248static int e1000e_write_mc_addr_list(struct net_device *netdev)
3249{
3250 struct e1000_adapter *adapter = netdev_priv(netdev);
3251 struct e1000_hw *hw = &adapter->hw;
3252 struct netdev_hw_addr *ha;
3253 u8 *mta_list;
3254 int i;
3255
3256 if (netdev_mc_empty(netdev)) {
3257
3258 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3259 return 0;
3260 }
3261
3262 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3263 if (!mta_list)
3264 return -ENOMEM;
3265
3266
3267 i = 0;
3268 netdev_for_each_mc_addr(ha, netdev)
3269 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3270
3271 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3272 kfree(mta_list);
3273
3274 return netdev_mc_count(netdev);
3275}
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286static int e1000e_write_uc_addr_list(struct net_device *netdev)
3287{
3288 struct e1000_adapter *adapter = netdev_priv(netdev);
3289 struct e1000_hw *hw = &adapter->hw;
3290 unsigned int rar_entries = hw->mac.rar_entry_count;
3291 int count = 0;
3292
3293
3294 rar_entries--;
3295
3296
3297 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3298 rar_entries--;
3299
3300
3301 if (netdev_uc_count(netdev) > rar_entries)
3302 return -ENOMEM;
3303
3304 if (!netdev_uc_empty(netdev) && rar_entries) {
3305 struct netdev_hw_addr *ha;
3306
3307
3308
3309
3310 netdev_for_each_uc_addr(ha, netdev) {
3311 if (!rar_entries)
3312 break;
3313 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3314 count++;
3315 }
3316 }
3317
3318
3319 for (; rar_entries > 0; rar_entries--) {
3320 ew32(RAH(rar_entries), 0);
3321 ew32(RAL(rar_entries), 0);
3322 }
3323 e1e_flush();
3324
3325 return count;
3326}
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337static void e1000e_set_rx_mode(struct net_device *netdev)
3338{
3339 struct e1000_adapter *adapter = netdev_priv(netdev);
3340 struct e1000_hw *hw = &adapter->hw;
3341 u32 rctl;
3342
3343
3344 rctl = er32(RCTL);
3345
3346
3347 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3348
3349 if (netdev->flags & IFF_PROMISC) {
3350 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3351
3352 e1000e_vlan_filter_disable(adapter);
3353 } else {
3354 int count;
3355
3356 if (netdev->flags & IFF_ALLMULTI) {
3357 rctl |= E1000_RCTL_MPE;
3358 } else {
3359
3360
3361
3362
3363 count = e1000e_write_mc_addr_list(netdev);
3364 if (count < 0)
3365 rctl |= E1000_RCTL_MPE;
3366 }
3367 e1000e_vlan_filter_enable(adapter);
3368
3369
3370
3371
3372 count = e1000e_write_uc_addr_list(netdev);
3373 if (count < 0)
3374 rctl |= E1000_RCTL_UPE;
3375 }
3376
3377 ew32(RCTL, rctl);
3378
3379 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3380 e1000e_vlan_strip_enable(adapter);
3381 else
3382 e1000e_vlan_strip_disable(adapter);
3383}
3384
3385static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3386{
3387 struct e1000_hw *hw = &adapter->hw;
3388 u32 mrqc, rxcsum;
3389 int i;
3390 static const u32 rsskey[10] = {
3391 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3392 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3393 };
3394
3395
3396 for (i = 0; i < 10; i++)
3397 ew32(RSSRK(i), rsskey[i]);
3398
3399
3400 for (i = 0; i < 32; i++)
3401 ew32(RETA(i), 0);
3402
3403
3404
3405
3406 rxcsum = er32(RXCSUM);
3407 rxcsum |= E1000_RXCSUM_PCSD;
3408
3409 ew32(RXCSUM, rxcsum);
3410
3411 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3412 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3413 E1000_MRQC_RSS_FIELD_IPV6 |
3414 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3415 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3416
3417 ew32(MRQC, mrqc);
3418}
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3429{
3430 struct e1000_hw *hw = &adapter->hw;
3431 u32 incvalue, incperiod, shift;
3432
3433
3434 if ((hw->mac.type == e1000_pch_lpt) &&
3435 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3436 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3437 u32 fextnvm7 = er32(FEXTNVM7);
3438
3439 if (!(fextnvm7 & (1 << 0))) {
3440 ew32(FEXTNVM7, fextnvm7 | (1 << 0));
3441 e1e_flush();
3442 }
3443 }
3444
3445 switch (hw->mac.type) {
3446 case e1000_pch2lan:
3447 case e1000_pch_lpt:
3448
3449
3450
3451 if ((hw->mac.type != e1000_pch_lpt) ||
3452 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
3453
3454 incperiod = INCPERIOD_96MHz;
3455 incvalue = INCVALUE_96MHz;
3456 shift = INCVALUE_SHIFT_96MHz;
3457 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
3458 break;
3459 }
3460
3461 case e1000_82574:
3462 case e1000_82583:
3463
3464 incperiod = INCPERIOD_25MHz;
3465 incvalue = INCVALUE_25MHz;
3466 shift = INCVALUE_SHIFT_25MHz;
3467 adapter->cc.shift = shift;
3468 break;
3469 default:
3470 return -EINVAL;
3471 }
3472
3473 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3474 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3475
3476 return 0;
3477}
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
3495{
3496 struct e1000_hw *hw = &adapter->hw;
3497 struct hwtstamp_config *config = &adapter->hwtstamp_config;
3498 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3499 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
3500 u32 rxmtrl = 0;
3501 u16 rxudp = 0;
3502 bool is_l4 = false;
3503 bool is_l2 = false;
3504 u32 regval;
3505 s32 ret_val;
3506
3507 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3508 return -EINVAL;
3509
3510
3511 if (config->flags)
3512 return -EINVAL;
3513
3514 switch (config->tx_type) {
3515 case HWTSTAMP_TX_OFF:
3516 tsync_tx_ctl = 0;
3517 break;
3518 case HWTSTAMP_TX_ON:
3519 break;
3520 default:
3521 return -ERANGE;
3522 }
3523
3524 switch (config->rx_filter) {
3525 case HWTSTAMP_FILTER_NONE:
3526 tsync_rx_ctl = 0;
3527 break;
3528 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3529 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3530 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3531 is_l4 = true;
3532 break;
3533 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3534 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3535 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3536 is_l4 = true;
3537 break;
3538 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3539
3540 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3541 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3542 is_l2 = true;
3543 break;
3544 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3545
3546 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3547 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3548 is_l2 = true;
3549 break;
3550 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3551
3552
3553
3554 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3555
3556 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3557 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3558 is_l2 = true;
3559 is_l4 = true;
3560 break;
3561 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3562
3563
3564
3565 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3566
3567 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3568 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3569 is_l2 = true;
3570 is_l4 = true;
3571 break;
3572 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3573 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3574
3575
3576
3577 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3578 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3579 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3580 is_l2 = true;
3581 is_l4 = true;
3582 break;
3583 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3584
3585
3586
3587
3588 case HWTSTAMP_FILTER_ALL:
3589 is_l2 = true;
3590 is_l4 = true;
3591 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3592 config->rx_filter = HWTSTAMP_FILTER_ALL;
3593 break;
3594 default:
3595 return -ERANGE;
3596 }
3597
3598
3599 regval = er32(TSYNCTXCTL);
3600 regval &= ~E1000_TSYNCTXCTL_ENABLED;
3601 regval |= tsync_tx_ctl;
3602 ew32(TSYNCTXCTL, regval);
3603 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3604 (regval & E1000_TSYNCTXCTL_ENABLED)) {
3605 e_err("Timesync Tx Control register not set as expected\n");
3606 return -EAGAIN;
3607 }
3608
3609
3610 regval = er32(TSYNCRXCTL);
3611 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3612 regval |= tsync_rx_ctl;
3613 ew32(TSYNCRXCTL, regval);
3614 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3615 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3616 (regval & (E1000_TSYNCRXCTL_ENABLED |
3617 E1000_TSYNCRXCTL_TYPE_MASK))) {
3618 e_err("Timesync Rx Control register not set as expected\n");
3619 return -EAGAIN;
3620 }
3621
3622
3623 if (is_l2)
3624 rxmtrl |= ETH_P_1588;
3625
3626
3627 ew32(RXMTRL, rxmtrl);
3628
3629
3630 if (is_l4) {
3631 rxudp = PTP_EV_PORT;
3632 cpu_to_be16s(&rxudp);
3633 }
3634 ew32(RXUDP, rxudp);
3635
3636 e1e_flush();
3637
3638
3639 er32(RXSTMPH);
3640 er32(TXSTMPH);
3641
3642
3643 ret_val = e1000e_get_base_timinca(adapter, ®val);
3644 if (ret_val)
3645 return ret_val;
3646 ew32(TIMINCA, regval);
3647
3648
3649 timecounter_init(&adapter->tc, &adapter->cc,
3650 ktime_to_ns(ktime_get_real()));
3651
3652 return 0;
3653}
3654
3655
3656
3657
3658
3659static void e1000_configure(struct e1000_adapter *adapter)
3660{
3661 struct e1000_ring *rx_ring = adapter->rx_ring;
3662
3663 e1000e_set_rx_mode(adapter->netdev);
3664
3665 e1000_restore_vlan(adapter);
3666 e1000_init_manageability_pt(adapter);
3667
3668 e1000_configure_tx(adapter);
3669
3670 if (adapter->netdev->features & NETIF_F_RXHASH)
3671 e1000e_setup_rss_hash(adapter);
3672 e1000_setup_rctl(adapter);
3673 e1000_configure_rx(adapter);
3674 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3675}
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685void e1000e_power_up_phy(struct e1000_adapter *adapter)
3686{
3687 if (adapter->hw.phy.ops.power_up)
3688 adapter->hw.phy.ops.power_up(&adapter->hw);
3689
3690 adapter->hw.mac.ops.setup_link(&adapter->hw);
3691}
3692
3693
3694
3695
3696
3697
3698
3699static void e1000_power_down_phy(struct e1000_adapter *adapter)
3700{
3701
3702 if (adapter->wol)
3703 return;
3704
3705 if (adapter->hw.phy.ops.power_down)
3706 adapter->hw.phy.ops.power_down(&adapter->hw);
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717void e1000e_reset(struct e1000_adapter *adapter)
3718{
3719 struct e1000_mac_info *mac = &adapter->hw.mac;
3720 struct e1000_fc_info *fc = &adapter->hw.fc;
3721 struct e1000_hw *hw = &adapter->hw;
3722 u32 tx_space, min_tx_space, min_rx_space;
3723 u32 pba = adapter->pba;
3724 u16 hwm;
3725
3726
3727 ew32(PBA, pba);
3728
3729 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3730
3731
3732
3733
3734
3735
3736
3737 pba = er32(PBA);
3738
3739 tx_space = pba >> 16;
3740
3741 pba &= 0xffff;
3742
3743
3744
3745 min_tx_space = (adapter->max_frame_size +
3746 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3747 min_tx_space = ALIGN(min_tx_space, 1024);
3748 min_tx_space >>= 10;
3749
3750 min_rx_space = adapter->max_frame_size;
3751 min_rx_space = ALIGN(min_rx_space, 1024);
3752 min_rx_space >>= 10;
3753
3754
3755
3756
3757
3758 if ((tx_space < min_tx_space) &&
3759 ((min_tx_space - tx_space) < pba)) {
3760 pba -= min_tx_space - tx_space;
3761
3762
3763
3764
3765 if (pba < min_rx_space)
3766 pba = min_rx_space;
3767 }
3768
3769 ew32(PBA, pba);
3770 }
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3781 fc->pause_time = 0xFFFF;
3782 else
3783 fc->pause_time = E1000_FC_PAUSE_TIME;
3784 fc->send_xon = true;
3785 fc->current_mode = fc->requested_mode;
3786
3787 switch (hw->mac.type) {
3788 case e1000_ich9lan:
3789 case e1000_ich10lan:
3790 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3791 pba = 14;
3792 ew32(PBA, pba);
3793 fc->high_water = 0x2800;
3794 fc->low_water = fc->high_water - 8;
3795 break;
3796 }
3797
3798 default:
3799 hwm = min(((pba << 10) * 9 / 10),
3800 ((pba << 10) - adapter->max_frame_size));
3801
3802 fc->high_water = hwm & E1000_FCRTH_RTH;
3803 fc->low_water = fc->high_water - 8;
3804 break;
3805 case e1000_pchlan:
3806
3807
3808
3809 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3810 fc->high_water = 0x3500;
3811 fc->low_water = 0x1500;
3812 } else {
3813 fc->high_water = 0x5000;
3814 fc->low_water = 0x3000;
3815 }
3816 fc->refresh_time = 0x1000;
3817 break;
3818 case e1000_pch2lan:
3819 case e1000_pch_lpt:
3820 fc->refresh_time = 0x0400;
3821
3822 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
3823 fc->high_water = 0x05C20;
3824 fc->low_water = 0x05048;
3825 fc->pause_time = 0x0650;
3826 break;
3827 }
3828
3829 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3830 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
3831 break;
3832 }
3833
3834
3835
3836
3837
3838
3839 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3840 24 << 10);
3841
3842
3843
3844
3845 if (adapter->itr_setting & 0x3) {
3846 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3847 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3848 dev_info(&adapter->pdev->dev,
3849 "Interrupt Throttle Rate off\n");
3850 adapter->flags2 |= FLAG2_DISABLE_AIM;
3851 e1000e_write_itr(adapter, 0);
3852 }
3853 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3854 dev_info(&adapter->pdev->dev,
3855 "Interrupt Throttle Rate on\n");
3856 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3857 adapter->itr = 20000;
3858 e1000e_write_itr(adapter, adapter->itr);
3859 }
3860 }
3861
3862
3863 mac->ops.reset_hw(hw);
3864
3865
3866
3867
3868 if (adapter->flags & FLAG_HAS_AMT)
3869 e1000e_get_hw_control(adapter);
3870
3871 ew32(WUC, 0);
3872
3873 if (mac->ops.init_hw(hw))
3874 e_err("Hardware Error\n");
3875
3876 e1000_update_mng_vlan(adapter);
3877
3878
3879 ew32(VET, ETH_P_8021Q);
3880
3881 e1000e_reset_adaptive(hw);
3882
3883
3884 e1000e_config_hwtstamp(adapter);
3885
3886
3887 if (adapter->flags2 & FLAG2_HAS_EEE) {
3888 s32 ret_val;
3889 u16 adv_addr;
3890
3891 switch (hw->phy.type) {
3892 case e1000_phy_82579:
3893 adv_addr = I82579_EEE_ADVERTISEMENT;
3894 break;
3895 case e1000_phy_i217:
3896 adv_addr = I217_EEE_ADVERTISEMENT;
3897 break;
3898 default:
3899 dev_err(&adapter->pdev->dev,
3900 "Invalid PHY type setting EEE advertisement\n");
3901 return;
3902 }
3903
3904 ret_val = hw->phy.ops.acquire(hw);
3905 if (ret_val) {
3906 dev_err(&adapter->pdev->dev,
3907 "EEE advertisement - unable to acquire PHY\n");
3908 return;
3909 }
3910
3911 e1000_write_emi_reg_locked(hw, adv_addr,
3912 hw->dev_spec.ich8lan.eee_disable ?
3913 0 : adapter->eee_advert);
3914
3915 hw->phy.ops.release(hw);
3916 }
3917
3918 if (!netif_running(adapter->netdev) &&
3919 !test_bit(__E1000_TESTING, &adapter->state)) {
3920 e1000_power_down_phy(adapter);
3921 return;
3922 }
3923
3924 e1000_get_phy_info(hw);
3925
3926 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3927 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3928 u16 phy_data = 0;
3929
3930
3931
3932
3933 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3934 phy_data &= ~IGP02E1000_PM_SPD;
3935 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3936 }
3937}
3938
3939int e1000e_up(struct e1000_adapter *adapter)
3940{
3941 struct e1000_hw *hw = &adapter->hw;
3942
3943
3944 e1000_configure(adapter);
3945
3946 clear_bit(__E1000_DOWN, &adapter->state);
3947
3948 if (adapter->msix_entries)
3949 e1000_configure_msix(adapter);
3950 e1000_irq_enable(adapter);
3951
3952 netif_start_queue(adapter->netdev);
3953
3954
3955 if (adapter->msix_entries)
3956 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3957 else
3958 ew32(ICS, E1000_ICS_LSC);
3959
3960 return 0;
3961}
3962
3963static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3964{
3965 struct e1000_hw *hw = &adapter->hw;
3966
3967 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3968 return;
3969
3970
3971 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3972 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3973
3974
3975 e1e_flush();
3976
3977
3978
3979
3980 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3981 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3982
3983
3984 e1e_flush();
3985}
3986
3987static void e1000e_update_stats(struct e1000_adapter *adapter);
3988
3989void e1000e_down(struct e1000_adapter *adapter)
3990{
3991 struct net_device *netdev = adapter->netdev;
3992 struct e1000_hw *hw = &adapter->hw;
3993 u32 tctl, rctl;
3994
3995
3996
3997
3998 set_bit(__E1000_DOWN, &adapter->state);
3999
4000
4001 rctl = er32(RCTL);
4002 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
4003 ew32(RCTL, rctl & ~E1000_RCTL_EN);
4004
4005
4006 netif_stop_queue(netdev);
4007
4008
4009 tctl = er32(TCTL);
4010 tctl &= ~E1000_TCTL_EN;
4011 ew32(TCTL, tctl);
4012
4013
4014 e1e_flush();
4015 usleep_range(10000, 20000);
4016
4017 e1000_irq_disable(adapter);
4018
4019 napi_synchronize(&adapter->napi);
4020
4021 del_timer_sync(&adapter->watchdog_timer);
4022 del_timer_sync(&adapter->phy_info_timer);
4023
4024 netif_carrier_off(netdev);
4025
4026 spin_lock(&adapter->stats64_lock);
4027 e1000e_update_stats(adapter);
4028 spin_unlock(&adapter->stats64_lock);
4029
4030 e1000e_flush_descriptors(adapter);
4031 e1000_clean_tx_ring(adapter->tx_ring);
4032 e1000_clean_rx_ring(adapter->rx_ring);
4033
4034 adapter->link_speed = 0;
4035 adapter->link_duplex = 0;
4036
4037 if (!pci_channel_offline(adapter->pdev))
4038 e1000e_reset(adapter);
4039
4040
4041
4042
4043}
4044
4045void e1000e_reinit_locked(struct e1000_adapter *adapter)
4046{
4047 might_sleep();
4048 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4049 usleep_range(1000, 2000);
4050 e1000e_down(adapter);
4051 e1000e_up(adapter);
4052 clear_bit(__E1000_RESETTING, &adapter->state);
4053}
4054
4055
4056
4057
4058
4059static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4060{
4061 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4062 cc);
4063 struct e1000_hw *hw = &adapter->hw;
4064 cycle_t systim;
4065
4066
4067 systim = (cycle_t)er32(SYSTIML);
4068 systim |= (cycle_t)er32(SYSTIMH) << 32;
4069
4070 return systim;
4071}
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081static int e1000_sw_init(struct e1000_adapter *adapter)
4082{
4083 struct net_device *netdev = adapter->netdev;
4084
4085 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
4086 adapter->rx_ps_bsize0 = 128;
4087 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4088 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4089 adapter->tx_ring_count = E1000_DEFAULT_TXD;
4090 adapter->rx_ring_count = E1000_DEFAULT_RXD;
4091
4092 spin_lock_init(&adapter->stats64_lock);
4093
4094 e1000e_set_interrupt_capability(adapter);
4095
4096 if (e1000_alloc_queues(adapter))
4097 return -ENOMEM;
4098
4099
4100 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4101 adapter->cc.read = e1000e_cyclecounter_read;
4102 adapter->cc.mask = CLOCKSOURCE_MASK(64);
4103 adapter->cc.mult = 1;
4104
4105
4106 spin_lock_init(&adapter->systim_lock);
4107 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4108 }
4109
4110
4111 e1000_irq_disable(adapter);
4112
4113 set_bit(__E1000_DOWN, &adapter->state);
4114 return 0;
4115}
4116
4117
4118
4119
4120
4121
4122static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
4123{
4124 struct net_device *netdev = data;
4125 struct e1000_adapter *adapter = netdev_priv(netdev);
4126 struct e1000_hw *hw = &adapter->hw;
4127 u32 icr = er32(ICR);
4128
4129 e_dbg("icr is %08X\n", icr);
4130 if (icr & E1000_ICR_RXSEQ) {
4131 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
4132
4133
4134
4135 wmb();
4136 }
4137
4138 return IRQ_HANDLED;
4139}
4140
4141
4142
4143
4144
4145
4146
4147static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4148{
4149 struct net_device *netdev = adapter->netdev;
4150 struct e1000_hw *hw = &adapter->hw;
4151 int err;
4152
4153
4154
4155 er32(ICR);
4156
4157
4158 e1000_free_irq(adapter);
4159 e1000e_reset_interrupt_capability(adapter);
4160
4161
4162
4163
4164 adapter->flags |= FLAG_MSI_TEST_FAILED;
4165
4166 err = pci_enable_msi(adapter->pdev);
4167 if (err)
4168 goto msi_test_failed;
4169
4170 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
4171 netdev->name, netdev);
4172 if (err) {
4173 pci_disable_msi(adapter->pdev);
4174 goto msi_test_failed;
4175 }
4176
4177
4178
4179
4180 wmb();
4181
4182 e1000_irq_enable(adapter);
4183
4184
4185 ew32(ICS, E1000_ICS_RXSEQ);
4186 e1e_flush();
4187 msleep(100);
4188
4189 e1000_irq_disable(adapter);
4190
4191 rmb();
4192
4193 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4194 adapter->int_mode = E1000E_INT_MODE_LEGACY;
4195 e_info("MSI interrupt test failed, using legacy interrupt.\n");
4196 } else {
4197 e_dbg("MSI interrupt test succeeded!\n");
4198 }
4199
4200 free_irq(adapter->pdev->irq, netdev);
4201 pci_disable_msi(adapter->pdev);
4202
4203msi_test_failed:
4204 e1000e_set_interrupt_capability(adapter);
4205 return e1000_request_irq(adapter);
4206}
4207
4208
4209
4210
4211
4212
4213
4214static int e1000_test_msi(struct e1000_adapter *adapter)
4215{
4216 int err;
4217 u16 pci_cmd;
4218
4219 if (!(adapter->flags & FLAG_MSI_ENABLED))
4220 return 0;
4221
4222
4223 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4224 if (pci_cmd & PCI_COMMAND_SERR)
4225 pci_write_config_word(adapter->pdev, PCI_COMMAND,
4226 pci_cmd & ~PCI_COMMAND_SERR);
4227
4228 err = e1000_test_msi_interrupt(adapter);
4229
4230
4231 if (pci_cmd & PCI_COMMAND_SERR) {
4232 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4233 pci_cmd |= PCI_COMMAND_SERR;
4234 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4235 }
4236
4237 return err;
4238}
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252static int e1000_open(struct net_device *netdev)
4253{
4254 struct e1000_adapter *adapter = netdev_priv(netdev);
4255 struct e1000_hw *hw = &adapter->hw;
4256 struct pci_dev *pdev = adapter->pdev;
4257 int err;
4258
4259
4260 if (test_bit(__E1000_TESTING, &adapter->state))
4261 return -EBUSY;
4262
4263 pm_runtime_get_sync(&pdev->dev);
4264
4265 netif_carrier_off(netdev);
4266
4267
4268 err = e1000e_setup_tx_resources(adapter->tx_ring);
4269 if (err)
4270 goto err_setup_tx;
4271
4272
4273 err = e1000e_setup_rx_resources(adapter->rx_ring);
4274 if (err)
4275 goto err_setup_rx;
4276
4277
4278
4279
4280 if (adapter->flags & FLAG_HAS_AMT) {
4281 e1000e_get_hw_control(adapter);
4282 e1000e_reset(adapter);
4283 }
4284
4285 e1000e_power_up_phy(adapter);
4286
4287 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4288 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4289 e1000_update_mng_vlan(adapter);
4290
4291
4292 pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
4293 PM_QOS_DEFAULT_VALUE);
4294
4295
4296
4297
4298
4299
4300 e1000_configure(adapter);
4301
4302 err = e1000_request_irq(adapter);
4303 if (err)
4304 goto err_req_irq;
4305
4306
4307
4308
4309
4310 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
4311 err = e1000_test_msi(adapter);
4312 if (err) {
4313 e_err("Interrupt allocation failed\n");
4314 goto err_req_irq;
4315 }
4316 }
4317
4318
4319 clear_bit(__E1000_DOWN, &adapter->state);
4320
4321 napi_enable(&adapter->napi);
4322
4323 e1000_irq_enable(adapter);
4324
4325 adapter->tx_hang_recheck = false;
4326 netif_start_queue(netdev);
4327
4328 adapter->idle_check = true;
4329 hw->mac.get_link_status = true;
4330 pm_runtime_put(&pdev->dev);
4331
4332
4333 if (adapter->msix_entries)
4334 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4335 else
4336 ew32(ICS, E1000_ICS_LSC);
4337
4338 return 0;
4339
4340err_req_irq:
4341 e1000e_release_hw_control(adapter);
4342 e1000_power_down_phy(adapter);
4343 e1000e_free_rx_resources(adapter->rx_ring);
4344err_setup_rx:
4345 e1000e_free_tx_resources(adapter->tx_ring);
4346err_setup_tx:
4347 e1000e_reset(adapter);
4348 pm_runtime_put_sync(&pdev->dev);
4349
4350 return err;
4351}
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364static int e1000_close(struct net_device *netdev)
4365{
4366 struct e1000_adapter *adapter = netdev_priv(netdev);
4367 struct pci_dev *pdev = adapter->pdev;
4368 int count = E1000_CHECK_RESET_COUNT;
4369
4370 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4371 usleep_range(10000, 20000);
4372
4373 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4374
4375 pm_runtime_get_sync(&pdev->dev);
4376
4377 if (!test_bit(__E1000_DOWN, &adapter->state)) {
4378 e1000e_down(adapter);
4379 e1000_free_irq(adapter);
4380 }
4381
4382 napi_disable(&adapter->napi);
4383
4384 e1000_power_down_phy(adapter);
4385
4386 e1000e_free_tx_resources(adapter->tx_ring);
4387 e1000e_free_rx_resources(adapter->rx_ring);
4388
4389
4390
4391
4392 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4393 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4394 adapter->mng_vlan_id);
4395
4396
4397
4398
4399 if ((adapter->flags & FLAG_HAS_AMT) &&
4400 !test_bit(__E1000_TESTING, &adapter->state))
4401 e1000e_release_hw_control(adapter);
4402
4403 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
4404
4405 pm_runtime_put_sync(&pdev->dev);
4406
4407 return 0;
4408}
4409
4410
4411
4412
4413
4414
4415
4416
4417static int e1000_set_mac(struct net_device *netdev, void *p)
4418{
4419 struct e1000_adapter *adapter = netdev_priv(netdev);
4420 struct e1000_hw *hw = &adapter->hw;
4421 struct sockaddr *addr = p;
4422
4423 if (!is_valid_ether_addr(addr->sa_data))
4424 return -EADDRNOTAVAIL;
4425
4426 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4427 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4428
4429 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4430
4431 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4432
4433 e1000e_set_laa_state_82571(&adapter->hw, 1);
4434
4435
4436
4437
4438
4439
4440
4441
4442 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4443 adapter->hw.mac.rar_entry_count - 1);
4444 }
4445
4446 return 0;
4447}
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457static void e1000e_update_phy_task(struct work_struct *work)
4458{
4459 struct e1000_adapter *adapter = container_of(work,
4460 struct e1000_adapter,
4461 update_phy_task);
4462
4463 if (test_bit(__E1000_DOWN, &adapter->state))
4464 return;
4465
4466 e1000_get_phy_info(&adapter->hw);
4467}
4468
4469
4470
4471
4472
4473
4474
4475
4476static void e1000_update_phy_info(unsigned long data)
4477{
4478 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4479
4480 if (test_bit(__E1000_DOWN, &adapter->state))
4481 return;
4482
4483 schedule_work(&adapter->update_phy_task);
4484}
4485
4486
4487
4488
4489
4490
4491
4492static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4493{
4494 struct e1000_hw *hw = &adapter->hw;
4495 s32 ret_val;
4496 u16 phy_data;
4497
4498 ret_val = hw->phy.ops.acquire(hw);
4499 if (ret_val)
4500 return;
4501
4502
4503
4504
4505 hw->phy.addr = 1;
4506 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4507 &phy_data);
4508 if (ret_val)
4509 goto release;
4510 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4511 ret_val = hw->phy.ops.set_page(hw,
4512 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4513 if (ret_val)
4514 goto release;
4515 }
4516
4517
4518 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4519 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4520 if (!ret_val)
4521 adapter->stats.scc += phy_data;
4522
4523
4524 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4525 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4526 if (!ret_val)
4527 adapter->stats.ecol += phy_data;
4528
4529
4530 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4531 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4532 if (!ret_val)
4533 adapter->stats.mcc += phy_data;
4534
4535
4536 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4537 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4538 if (!ret_val)
4539 adapter->stats.latecol += phy_data;
4540
4541
4542 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4543 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4544 if (!ret_val)
4545 hw->mac.collision_delta = phy_data;
4546
4547
4548 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4549 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4550 if (!ret_val)
4551 adapter->stats.dc += phy_data;
4552
4553
4554 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4555 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4556 if (!ret_val)
4557 adapter->stats.tncrs += phy_data;
4558
4559release:
4560 hw->phy.ops.release(hw);
4561}
4562
4563
4564
4565
4566
4567static void e1000e_update_stats(struct e1000_adapter *adapter)
4568{
4569 struct net_device *netdev = adapter->netdev;
4570 struct e1000_hw *hw = &adapter->hw;
4571 struct pci_dev *pdev = adapter->pdev;
4572
4573
4574
4575
4576 if (adapter->link_speed == 0)
4577 return;
4578 if (pci_channel_offline(pdev))
4579 return;
4580
4581 adapter->stats.crcerrs += er32(CRCERRS);
4582 adapter->stats.gprc += er32(GPRC);
4583 adapter->stats.gorc += er32(GORCL);
4584 er32(GORCH);
4585 adapter->stats.bprc += er32(BPRC);
4586 adapter->stats.mprc += er32(MPRC);
4587 adapter->stats.roc += er32(ROC);
4588
4589 adapter->stats.mpc += er32(MPC);
4590
4591
4592 if (adapter->link_duplex == HALF_DUPLEX) {
4593 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4594 e1000e_update_phy_stats(adapter);
4595 } else {
4596 adapter->stats.scc += er32(SCC);
4597 adapter->stats.ecol += er32(ECOL);
4598 adapter->stats.mcc += er32(MCC);
4599 adapter->stats.latecol += er32(LATECOL);
4600 adapter->stats.dc += er32(DC);
4601
4602 hw->mac.collision_delta = er32(COLC);
4603
4604 if ((hw->mac.type != e1000_82574) &&
4605 (hw->mac.type != e1000_82583))
4606 adapter->stats.tncrs += er32(TNCRS);
4607 }
4608 adapter->stats.colc += hw->mac.collision_delta;
4609 }
4610
4611 adapter->stats.xonrxc += er32(XONRXC);
4612 adapter->stats.xontxc += er32(XONTXC);
4613 adapter->stats.xoffrxc += er32(XOFFRXC);
4614 adapter->stats.xofftxc += er32(XOFFTXC);
4615 adapter->stats.gptc += er32(GPTC);
4616 adapter->stats.gotc += er32(GOTCL);
4617 er32(GOTCH);
4618 adapter->stats.rnbc += er32(RNBC);
4619 adapter->stats.ruc += er32(RUC);
4620
4621 adapter->stats.mptc += er32(MPTC);
4622 adapter->stats.bptc += er32(BPTC);
4623
4624
4625
4626 hw->mac.tx_packet_delta = er32(TPT);
4627 adapter->stats.tpt += hw->mac.tx_packet_delta;
4628
4629 adapter->stats.algnerrc += er32(ALGNERRC);
4630 adapter->stats.rxerrc += er32(RXERRC);
4631 adapter->stats.cexterr += er32(CEXTERR);
4632 adapter->stats.tsctc += er32(TSCTC);
4633 adapter->stats.tsctfc += er32(TSCTFC);
4634
4635
4636 netdev->stats.multicast = adapter->stats.mprc;
4637 netdev->stats.collisions = adapter->stats.colc;
4638
4639
4640
4641
4642
4643
4644 netdev->stats.rx_errors = adapter->stats.rxerrc +
4645 adapter->stats.crcerrs + adapter->stats.algnerrc +
4646 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
4647 netdev->stats.rx_length_errors = adapter->stats.ruc +
4648 adapter->stats.roc;
4649 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4650 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4651 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4652
4653
4654 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
4655 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4656 netdev->stats.tx_window_errors = adapter->stats.latecol;
4657 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4658
4659
4660
4661
4662 adapter->stats.mgptc += er32(MGTPTC);
4663 adapter->stats.mgprc += er32(MGTPRC);
4664 adapter->stats.mgpdc += er32(MGTPDC);
4665
4666
4667 if (hw->mac.type == e1000_pch_lpt) {
4668 u32 pbeccsts = er32(PBECCSTS);
4669 adapter->corr_errors +=
4670 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4671 adapter->uncorr_errors +=
4672 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4673 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4674 }
4675}
4676
4677
4678
4679
4680
4681static void e1000_phy_read_status(struct e1000_adapter *adapter)
4682{
4683 struct e1000_hw *hw = &adapter->hw;
4684 struct e1000_phy_regs *phy = &adapter->phy_regs;
4685
4686 if ((er32(STATUS) & E1000_STATUS_LU) &&
4687 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4688 int ret_val;
4689
4690 pm_runtime_get_sync(&adapter->pdev->dev);
4691 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4692 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4693 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
4694 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
4695 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
4696 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
4697 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
4698 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4699 if (ret_val)
4700 e_warn("Error reading PHY register\n");
4701 pm_runtime_put_sync(&adapter->pdev->dev);
4702 } else {
4703
4704
4705
4706 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4707 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4708 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4709 BMSR_ERCAP);
4710 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4711 ADVERTISE_ALL | ADVERTISE_CSMA);
4712 phy->lpa = 0;
4713 phy->expansion = EXPANSION_ENABLENPAGE;
4714 phy->ctrl1000 = ADVERTISE_1000FULL;
4715 phy->stat1000 = 0;
4716 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4717 }
4718}
4719
4720static void e1000_print_link_info(struct e1000_adapter *adapter)
4721{
4722 struct e1000_hw *hw = &adapter->hw;
4723 u32 ctrl = er32(CTRL);
4724
4725
4726 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4727 adapter->netdev->name, adapter->link_speed,
4728 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4729 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4730 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4731 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4732}
4733
4734static bool e1000e_has_link(struct e1000_adapter *adapter)
4735{
4736 struct e1000_hw *hw = &adapter->hw;
4737 bool link_active = false;
4738 s32 ret_val = 0;
4739
4740
4741
4742
4743
4744
4745 switch (hw->phy.media_type) {
4746 case e1000_media_type_copper:
4747 if (hw->mac.get_link_status) {
4748 ret_val = hw->mac.ops.check_for_link(hw);
4749 link_active = !hw->mac.get_link_status;
4750 } else {
4751 link_active = true;
4752 }
4753 break;
4754 case e1000_media_type_fiber:
4755 ret_val = hw->mac.ops.check_for_link(hw);
4756 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4757 break;
4758 case e1000_media_type_internal_serdes:
4759 ret_val = hw->mac.ops.check_for_link(hw);
4760 link_active = adapter->hw.mac.serdes_has_link;
4761 break;
4762 default:
4763 case e1000_media_type_unknown:
4764 break;
4765 }
4766
4767 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4768 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4769
4770 e_info("Gigabit has been disabled, downgrading speed\n");
4771 }
4772
4773 return link_active;
4774}
4775
4776static void e1000e_enable_receives(struct e1000_adapter *adapter)
4777{
4778
4779 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4780 (adapter->flags & FLAG_RESTART_NOW)) {
4781 struct e1000_hw *hw = &adapter->hw;
4782 u32 rctl = er32(RCTL);
4783 ew32(RCTL, rctl | E1000_RCTL_EN);
4784 adapter->flags &= ~FLAG_RESTART_NOW;
4785 }
4786}
4787
4788static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4789{
4790 struct e1000_hw *hw = &adapter->hw;
4791
4792
4793
4794
4795 if (e1000_check_phy_82574(hw))
4796 adapter->phy_hang_count++;
4797 else
4798 adapter->phy_hang_count = 0;
4799
4800 if (adapter->phy_hang_count > 1) {
4801 adapter->phy_hang_count = 0;
4802 schedule_work(&adapter->reset_task);
4803 }
4804}
4805
4806
4807
4808
4809
4810static void e1000_watchdog(unsigned long data)
4811{
4812 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4813
4814
4815 schedule_work(&adapter->watchdog_task);
4816
4817
4818}
4819
4820static void e1000_watchdog_task(struct work_struct *work)
4821{
4822 struct e1000_adapter *adapter = container_of(work,
4823 struct e1000_adapter,
4824 watchdog_task);
4825 struct net_device *netdev = adapter->netdev;
4826 struct e1000_mac_info *mac = &adapter->hw.mac;
4827 struct e1000_phy_info *phy = &adapter->hw.phy;
4828 struct e1000_ring *tx_ring = adapter->tx_ring;
4829 struct e1000_hw *hw = &adapter->hw;
4830 u32 link, tctl;
4831
4832 if (test_bit(__E1000_DOWN, &adapter->state))
4833 return;
4834
4835 link = e1000e_has_link(adapter);
4836 if ((netif_carrier_ok(netdev)) && link) {
4837
4838 pm_runtime_resume(netdev->dev.parent);
4839
4840 e1000e_enable_receives(adapter);
4841 goto link_up;
4842 }
4843
4844 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4845 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4846 e1000_update_mng_vlan(adapter);
4847
4848 if (link) {
4849 if (!netif_carrier_ok(netdev)) {
4850 bool txb2b = true;
4851
4852
4853 pm_runtime_resume(netdev->dev.parent);
4854
4855
4856 e1000_phy_read_status(adapter);
4857 mac->ops.get_link_up_info(&adapter->hw,
4858 &adapter->link_speed,
4859 &adapter->link_duplex);
4860 e1000_print_link_info(adapter);
4861
4862
4863 e1000e_check_downshift(hw);
4864 if (phy->speed_downgraded)
4865 netdev_warn(netdev,
4866 "Link Speed was downgraded by SmartSpeed\n");
4867
4868
4869
4870
4871 if ((hw->phy.type == e1000_phy_igp_3 ||
4872 hw->phy.type == e1000_phy_bm) &&
4873 (hw->mac.autoneg == true) &&
4874 (adapter->link_speed == SPEED_10 ||
4875 adapter->link_speed == SPEED_100) &&
4876 (adapter->link_duplex == HALF_DUPLEX)) {
4877 u16 autoneg_exp;
4878
4879 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
4880
4881 if (!(autoneg_exp & EXPANSION_NWAY))
4882 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
4883 }
4884
4885
4886 adapter->tx_timeout_factor = 1;
4887 switch (adapter->link_speed) {
4888 case SPEED_10:
4889 txb2b = false;
4890 adapter->tx_timeout_factor = 16;
4891 break;
4892 case SPEED_100:
4893 txb2b = false;
4894 adapter->tx_timeout_factor = 10;
4895 break;
4896 }
4897
4898
4899
4900
4901 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4902 !txb2b) {
4903 u32 tarc0;
4904 tarc0 = er32(TARC(0));
4905 tarc0 &= ~SPEED_MODE_BIT;
4906 ew32(TARC(0), tarc0);
4907 }
4908
4909
4910
4911
4912 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4913 switch (adapter->link_speed) {
4914 case SPEED_10:
4915 case SPEED_100:
4916 e_info("10/100 speed: disabling TSO\n");
4917 netdev->features &= ~NETIF_F_TSO;
4918 netdev->features &= ~NETIF_F_TSO6;
4919 break;
4920 case SPEED_1000:
4921 netdev->features |= NETIF_F_TSO;
4922 netdev->features |= NETIF_F_TSO6;
4923 break;
4924 default:
4925
4926 break;
4927 }
4928 }
4929
4930
4931
4932
4933 tctl = er32(TCTL);
4934 tctl |= E1000_TCTL_EN;
4935 ew32(TCTL, tctl);
4936
4937
4938
4939
4940 if (phy->ops.cfg_on_link_up)
4941 phy->ops.cfg_on_link_up(hw);
4942
4943 netif_carrier_on(netdev);
4944
4945 if (!test_bit(__E1000_DOWN, &adapter->state))
4946 mod_timer(&adapter->phy_info_timer,
4947 round_jiffies(jiffies + 2 * HZ));
4948 }
4949 } else {
4950 if (netif_carrier_ok(netdev)) {
4951 adapter->link_speed = 0;
4952 adapter->link_duplex = 0;
4953
4954 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
4955 netif_carrier_off(netdev);
4956 if (!test_bit(__E1000_DOWN, &adapter->state))
4957 mod_timer(&adapter->phy_info_timer,
4958 round_jiffies(jiffies + 2 * HZ));
4959
4960
4961
4962
4963
4964
4965
4966
4967 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
4968 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
4969 adapter->flags |= FLAG_RESTART_NOW;
4970 else
4971 pm_schedule_suspend(netdev->dev.parent,
4972 LINK_TIMEOUT);
4973 }
4974 }
4975
4976link_up:
4977 spin_lock(&adapter->stats64_lock);
4978 e1000e_update_stats(adapter);
4979
4980 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4981 adapter->tpt_old = adapter->stats.tpt;
4982 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4983 adapter->colc_old = adapter->stats.colc;
4984
4985 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4986 adapter->gorc_old = adapter->stats.gorc;
4987 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4988 adapter->gotc_old = adapter->stats.gotc;
4989 spin_unlock(&adapter->stats64_lock);
4990
4991 if (adapter->flags & FLAG_RESTART_NOW) {
4992 schedule_work(&adapter->reset_task);
4993
4994 return;
4995 }
4996
4997 e1000e_update_adaptive(&adapter->hw);
4998
4999
5000 if (adapter->itr_setting == 4) {
5001
5002
5003
5004
5005 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5006 u32 dif = (adapter->gotc > adapter->gorc ?
5007 adapter->gotc - adapter->gorc :
5008 adapter->gorc - adapter->gotc) / 10000;
5009 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5010
5011 e1000e_write_itr(adapter, itr);
5012 }
5013
5014
5015 if (adapter->msix_entries)
5016 ew32(ICS, adapter->rx_ring->ims_val);
5017 else
5018 ew32(ICS, E1000_ICS_RXDMT0);
5019
5020
5021 e1000e_flush_descriptors(adapter);
5022
5023
5024 adapter->detect_tx_hung = true;
5025
5026
5027
5028
5029 if (e1000e_get_laa_state_82571(hw))
5030 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
5031
5032 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5033 e1000e_check_82574_phy_workaround(adapter);
5034
5035
5036 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5037 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5038 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5039 er32(RXSTMPH);
5040 adapter->rx_hwtstamp_cleared++;
5041 } else {
5042 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5043 }
5044 }
5045
5046
5047 if (!test_bit(__E1000_DOWN, &adapter->state))
5048 mod_timer(&adapter->watchdog_timer,
5049 round_jiffies(jiffies + 2 * HZ));
5050}
5051
5052#define E1000_TX_FLAGS_CSUM 0x00000001
5053#define E1000_TX_FLAGS_VLAN 0x00000002
5054#define E1000_TX_FLAGS_TSO 0x00000004
5055#define E1000_TX_FLAGS_IPV4 0x00000008
5056#define E1000_TX_FLAGS_NO_FCS 0x00000010
5057#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
5058#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5059#define E1000_TX_FLAGS_VLAN_SHIFT 16
5060
5061static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5062{
5063 struct e1000_context_desc *context_desc;
5064 struct e1000_buffer *buffer_info;
5065 unsigned int i;
5066 u32 cmd_length = 0;
5067 u16 ipcse = 0, mss;
5068 u8 ipcss, ipcso, tucss, tucso, hdr_len;
5069
5070 if (!skb_is_gso(skb))
5071 return 0;
5072
5073 if (skb_header_cloned(skb)) {
5074 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5075
5076 if (err)
5077 return err;
5078 }
5079
5080 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5081 mss = skb_shinfo(skb)->gso_size;
5082 if (skb->protocol == htons(ETH_P_IP)) {
5083 struct iphdr *iph = ip_hdr(skb);
5084 iph->tot_len = 0;
5085 iph->check = 0;
5086 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5087 0, IPPROTO_TCP, 0);
5088 cmd_length = E1000_TXD_CMD_IP;
5089 ipcse = skb_transport_offset(skb) - 1;
5090 } else if (skb_is_gso_v6(skb)) {
5091 ipv6_hdr(skb)->payload_len = 0;
5092 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5093 &ipv6_hdr(skb)->daddr,
5094 0, IPPROTO_TCP, 0);
5095 ipcse = 0;
5096 }
5097 ipcss = skb_network_offset(skb);
5098 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5099 tucss = skb_transport_offset(skb);
5100 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5101
5102 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5103 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5104
5105 i = tx_ring->next_to_use;
5106 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5107 buffer_info = &tx_ring->buffer_info[i];
5108
5109 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5110 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5111 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5112 context_desc->upper_setup.tcp_fields.tucss = tucss;
5113 context_desc->upper_setup.tcp_fields.tucso = tucso;
5114 context_desc->upper_setup.tcp_fields.tucse = 0;
5115 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5116 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5117 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5118
5119 buffer_info->time_stamp = jiffies;
5120 buffer_info->next_to_watch = i;
5121
5122 i++;
5123 if (i == tx_ring->count)
5124 i = 0;
5125 tx_ring->next_to_use = i;
5126
5127 return 1;
5128}
5129
5130static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5131{
5132 struct e1000_adapter *adapter = tx_ring->adapter;
5133 struct e1000_context_desc *context_desc;
5134 struct e1000_buffer *buffer_info;
5135 unsigned int i;
5136 u8 css;
5137 u32 cmd_len = E1000_TXD_CMD_DEXT;
5138 __be16 protocol;
5139
5140 if (skb->ip_summed != CHECKSUM_PARTIAL)
5141 return 0;
5142
5143 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5144 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
5145 else
5146 protocol = skb->protocol;
5147
5148 switch (protocol) {
5149 case cpu_to_be16(ETH_P_IP):
5150 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5151 cmd_len |= E1000_TXD_CMD_TCP;
5152 break;
5153 case cpu_to_be16(ETH_P_IPV6):
5154
5155 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5156 cmd_len |= E1000_TXD_CMD_TCP;
5157 break;
5158 default:
5159 if (unlikely(net_ratelimit()))
5160 e_warn("checksum_partial proto=%x!\n",
5161 be16_to_cpu(protocol));
5162 break;
5163 }
5164
5165 css = skb_checksum_start_offset(skb);
5166
5167 i = tx_ring->next_to_use;
5168 buffer_info = &tx_ring->buffer_info[i];
5169 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5170
5171 context_desc->lower_setup.ip_config = 0;
5172 context_desc->upper_setup.tcp_fields.tucss = css;
5173 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5174 context_desc->upper_setup.tcp_fields.tucse = 0;
5175 context_desc->tcp_seg_setup.data = 0;
5176 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5177
5178 buffer_info->time_stamp = jiffies;
5179 buffer_info->next_to_watch = i;
5180
5181 i++;
5182 if (i == tx_ring->count)
5183 i = 0;
5184 tx_ring->next_to_use = i;
5185
5186 return 1;
5187}
5188
5189static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5190 unsigned int first, unsigned int max_per_txd,
5191 unsigned int nr_frags)
5192{
5193 struct e1000_adapter *adapter = tx_ring->adapter;
5194 struct pci_dev *pdev = adapter->pdev;
5195 struct e1000_buffer *buffer_info;
5196 unsigned int len = skb_headlen(skb);
5197 unsigned int offset = 0, size, count = 0, i;
5198 unsigned int f, bytecount, segs;
5199
5200 i = tx_ring->next_to_use;
5201
5202 while (len) {
5203 buffer_info = &tx_ring->buffer_info[i];
5204 size = min(len, max_per_txd);
5205
5206 buffer_info->length = size;
5207 buffer_info->time_stamp = jiffies;
5208 buffer_info->next_to_watch = i;
5209 buffer_info->dma = dma_map_single(&pdev->dev,
5210 skb->data + offset,
5211 size, DMA_TO_DEVICE);
5212 buffer_info->mapped_as_page = false;
5213 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5214 goto dma_error;
5215
5216 len -= size;
5217 offset += size;
5218 count++;
5219
5220 if (len) {
5221 i++;
5222 if (i == tx_ring->count)
5223 i = 0;
5224 }
5225 }
5226
5227 for (f = 0; f < nr_frags; f++) {
5228 const struct skb_frag_struct *frag;
5229
5230 frag = &skb_shinfo(skb)->frags[f];
5231 len = skb_frag_size(frag);
5232 offset = 0;
5233
5234 while (len) {
5235 i++;
5236 if (i == tx_ring->count)
5237 i = 0;
5238
5239 buffer_info = &tx_ring->buffer_info[i];
5240 size = min(len, max_per_txd);
5241
5242 buffer_info->length = size;
5243 buffer_info->time_stamp = jiffies;
5244 buffer_info->next_to_watch = i;
5245 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5246 offset, size,
5247 DMA_TO_DEVICE);
5248 buffer_info->mapped_as_page = true;
5249 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5250 goto dma_error;
5251
5252 len -= size;
5253 offset += size;
5254 count++;
5255 }
5256 }
5257
5258 segs = skb_shinfo(skb)->gso_segs ? : 1;
5259
5260 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5261
5262 tx_ring->buffer_info[i].skb = skb;
5263 tx_ring->buffer_info[i].segs = segs;
5264 tx_ring->buffer_info[i].bytecount = bytecount;
5265 tx_ring->buffer_info[first].next_to_watch = i;
5266
5267 return count;
5268
5269dma_error:
5270 dev_err(&pdev->dev, "Tx DMA map failed\n");
5271 buffer_info->dma = 0;
5272 if (count)
5273 count--;
5274
5275 while (count--) {
5276 if (i == 0)
5277 i += tx_ring->count;
5278 i--;
5279 buffer_info = &tx_ring->buffer_info[i];
5280 e1000_put_txbuf(tx_ring, buffer_info);
5281 }
5282
5283 return 0;
5284}
5285
5286static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5287{
5288 struct e1000_adapter *adapter = tx_ring->adapter;
5289 struct e1000_tx_desc *tx_desc = NULL;
5290 struct e1000_buffer *buffer_info;
5291 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5292 unsigned int i;
5293
5294 if (tx_flags & E1000_TX_FLAGS_TSO) {
5295 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5296 E1000_TXD_CMD_TSE;
5297 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5298
5299 if (tx_flags & E1000_TX_FLAGS_IPV4)
5300 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5301 }
5302
5303 if (tx_flags & E1000_TX_FLAGS_CSUM) {
5304 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5305 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5306 }
5307
5308 if (tx_flags & E1000_TX_FLAGS_VLAN) {
5309 txd_lower |= E1000_TXD_CMD_VLE;
5310 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5311 }
5312
5313 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5314 txd_lower &= ~(E1000_TXD_CMD_IFCS);
5315
5316 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5317 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5318 txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5319 }
5320
5321 i = tx_ring->next_to_use;
5322
5323 do {
5324 buffer_info = &tx_ring->buffer_info[i];
5325 tx_desc = E1000_TX_DESC(*tx_ring, i);
5326 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5327 tx_desc->lower.data = cpu_to_le32(txd_lower |
5328 buffer_info->length);
5329 tx_desc->upper.data = cpu_to_le32(txd_upper);
5330
5331 i++;
5332 if (i == tx_ring->count)
5333 i = 0;
5334 } while (--count > 0);
5335
5336 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5337
5338
5339 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5340 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5341
5342
5343
5344
5345
5346
5347 wmb();
5348
5349 tx_ring->next_to_use = i;
5350
5351 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5352 e1000e_update_tdt_wa(tx_ring, i);
5353 else
5354 writel(i, tx_ring->tail);
5355
5356
5357
5358
5359 mmiowb();
5360}
5361
5362#define MINIMUM_DHCP_PACKET_SIZE 282
5363static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5364 struct sk_buff *skb)
5365{
5366 struct e1000_hw *hw = &adapter->hw;
5367 u16 length, offset;
5368
5369 if (vlan_tx_tag_present(skb) &&
5370 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5371 (adapter->hw.mng_cookie.status &
5372 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5373 return 0;
5374
5375 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5376 return 0;
5377
5378 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5379 return 0;
5380
5381 {
5382 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5383 struct udphdr *udp;
5384
5385 if (ip->protocol != IPPROTO_UDP)
5386 return 0;
5387
5388 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5389 if (ntohs(udp->dest) != 67)
5390 return 0;
5391
5392 offset = (u8 *)udp + 8 - skb->data;
5393 length = skb->len - offset;
5394 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5395 }
5396
5397 return 0;
5398}
5399
5400static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5401{
5402 struct e1000_adapter *adapter = tx_ring->adapter;
5403
5404 netif_stop_queue(adapter->netdev);
5405
5406
5407
5408
5409 smp_mb();
5410
5411
5412
5413
5414 if (e1000_desc_unused(tx_ring) < size)
5415 return -EBUSY;
5416
5417
5418 netif_start_queue(adapter->netdev);
5419 ++adapter->restart_queue;
5420 return 0;
5421}
5422
5423static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5424{
5425 BUG_ON(size > tx_ring->count);
5426
5427 if (e1000_desc_unused(tx_ring) >= size)
5428 return 0;
5429 return __e1000_maybe_stop_tx(tx_ring, size);
5430}
5431
5432static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5433 struct net_device *netdev)
5434{
5435 struct e1000_adapter *adapter = netdev_priv(netdev);
5436 struct e1000_ring *tx_ring = adapter->tx_ring;
5437 unsigned int first;
5438 unsigned int tx_flags = 0;
5439 unsigned int len = skb_headlen(skb);
5440 unsigned int nr_frags;
5441 unsigned int mss;
5442 int count = 0;
5443 int tso;
5444 unsigned int f;
5445
5446 if (test_bit(__E1000_DOWN, &adapter->state)) {
5447 dev_kfree_skb_any(skb);
5448 return NETDEV_TX_OK;
5449 }
5450
5451 if (skb->len <= 0) {
5452 dev_kfree_skb_any(skb);
5453 return NETDEV_TX_OK;
5454 }
5455
5456
5457
5458
5459 if (unlikely(skb->len < 17)) {
5460 if (skb_pad(skb, 17 - skb->len))
5461 return NETDEV_TX_OK;
5462 skb->len = 17;
5463 skb_set_tail_pointer(skb, 17);
5464 }
5465
5466 mss = skb_shinfo(skb)->gso_size;
5467 if (mss) {
5468 u8 hdr_len;
5469
5470
5471
5472
5473
5474 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5475
5476
5477
5478 if (skb->data_len && (hdr_len == len)) {
5479 unsigned int pull_size;
5480
5481 pull_size = min_t(unsigned int, 4, skb->data_len);
5482 if (!__pskb_pull_tail(skb, pull_size)) {
5483 e_err("__pskb_pull_tail failed.\n");
5484 dev_kfree_skb_any(skb);
5485 return NETDEV_TX_OK;
5486 }
5487 len = skb_headlen(skb);
5488 }
5489 }
5490
5491
5492 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5493 count++;
5494 count++;
5495
5496 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5497
5498 nr_frags = skb_shinfo(skb)->nr_frags;
5499 for (f = 0; f < nr_frags; f++)
5500 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5501 adapter->tx_fifo_limit);
5502
5503 if (adapter->hw.mac.tx_pkt_filtering)
5504 e1000_transfer_dhcp_info(adapter, skb);
5505
5506
5507
5508
5509 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5510 return NETDEV_TX_BUSY;
5511
5512 if (vlan_tx_tag_present(skb)) {
5513 tx_flags |= E1000_TX_FLAGS_VLAN;
5514 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5515 }
5516
5517 first = tx_ring->next_to_use;
5518
5519 tso = e1000_tso(tx_ring, skb);
5520 if (tso < 0) {
5521 dev_kfree_skb_any(skb);
5522 return NETDEV_TX_OK;
5523 }
5524
5525 if (tso)
5526 tx_flags |= E1000_TX_FLAGS_TSO;
5527 else if (e1000_tx_csum(tx_ring, skb))
5528 tx_flags |= E1000_TX_FLAGS_CSUM;
5529
5530
5531
5532
5533
5534 if (skb->protocol == htons(ETH_P_IP))
5535 tx_flags |= E1000_TX_FLAGS_IPV4;
5536
5537 if (unlikely(skb->no_fcs))
5538 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5539
5540
5541 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5542 nr_frags);
5543 if (count) {
5544 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5545 !adapter->tx_hwtstamp_skb)) {
5546 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5547 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5548 adapter->tx_hwtstamp_skb = skb_get(skb);
5549 schedule_work(&adapter->tx_hwtstamp_work);
5550 } else {
5551 skb_tx_timestamp(skb);
5552 }
5553
5554 netdev_sent_queue(netdev, skb->len);
5555 e1000_tx_queue(tx_ring, tx_flags, count);
5556
5557 e1000_maybe_stop_tx(tx_ring,
5558 (MAX_SKB_FRAGS *
5559 DIV_ROUND_UP(PAGE_SIZE,
5560 adapter->tx_fifo_limit) + 2));
5561 } else {
5562 dev_kfree_skb_any(skb);
5563 tx_ring->buffer_info[first].time_stamp = 0;
5564 tx_ring->next_to_use = first;
5565 }
5566
5567 return NETDEV_TX_OK;
5568}
5569
5570
5571
5572
5573
5574static void e1000_tx_timeout(struct net_device *netdev)
5575{
5576 struct e1000_adapter *adapter = netdev_priv(netdev);
5577
5578
5579 adapter->tx_timeout_count++;
5580 schedule_work(&adapter->reset_task);
5581}
5582
5583static void e1000_reset_task(struct work_struct *work)
5584{
5585 struct e1000_adapter *adapter;
5586 adapter = container_of(work, struct e1000_adapter, reset_task);
5587
5588
5589 if (test_bit(__E1000_DOWN, &adapter->state))
5590 return;
5591
5592 if (!(adapter->flags & FLAG_RESTART_NOW)) {
5593 e1000e_dump(adapter);
5594 e_err("Reset adapter unexpectedly\n");
5595 }
5596 e1000e_reinit_locked(adapter);
5597}
5598
5599
5600
5601
5602
5603
5604
5605
5606struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5607 struct rtnl_link_stats64 *stats)
5608{
5609 struct e1000_adapter *adapter = netdev_priv(netdev);
5610
5611 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5612 spin_lock(&adapter->stats64_lock);
5613 e1000e_update_stats(adapter);
5614
5615 stats->rx_bytes = adapter->stats.gorc;
5616 stats->rx_packets = adapter->stats.gprc;
5617 stats->tx_bytes = adapter->stats.gotc;
5618 stats->tx_packets = adapter->stats.gptc;
5619 stats->multicast = adapter->stats.mprc;
5620 stats->collisions = adapter->stats.colc;
5621
5622
5623
5624
5625
5626
5627 stats->rx_errors = adapter->stats.rxerrc +
5628 adapter->stats.crcerrs + adapter->stats.algnerrc +
5629 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5630 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
5631 stats->rx_crc_errors = adapter->stats.crcerrs;
5632 stats->rx_frame_errors = adapter->stats.algnerrc;
5633 stats->rx_missed_errors = adapter->stats.mpc;
5634
5635
5636 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5637 stats->tx_aborted_errors = adapter->stats.ecol;
5638 stats->tx_window_errors = adapter->stats.latecol;
5639 stats->tx_carrier_errors = adapter->stats.tncrs;
5640
5641
5642
5643 spin_unlock(&adapter->stats64_lock);
5644 return stats;
5645}
5646
5647
5648
5649
5650
5651
5652
5653
5654static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5655{
5656 struct e1000_adapter *adapter = netdev_priv(netdev);
5657 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5658
5659
5660 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5661 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5662 e_err("Jumbo Frames not supported.\n");
5663 return -EINVAL;
5664 }
5665
5666
5667 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5668 (max_frame > adapter->max_hw_frame_size)) {
5669 e_err("Unsupported MTU setting\n");
5670 return -EINVAL;
5671 }
5672
5673
5674 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5675 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5676 (new_mtu > ETH_DATA_LEN)) {
5677 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5678 return -EINVAL;
5679 }
5680
5681 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5682 usleep_range(1000, 2000);
5683
5684 adapter->max_frame_size = max_frame;
5685 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5686 netdev->mtu = new_mtu;
5687 if (netif_running(netdev))
5688 e1000e_down(adapter);
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698 if (max_frame <= 2048)
5699 adapter->rx_buffer_len = 2048;
5700 else
5701 adapter->rx_buffer_len = 4096;
5702
5703
5704 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5705 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5706 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5707 + ETH_FCS_LEN;
5708
5709 if (netif_running(netdev))
5710 e1000e_up(adapter);
5711 else
5712 e1000e_reset(adapter);
5713
5714 clear_bit(__E1000_RESETTING, &adapter->state);
5715
5716 return 0;
5717}
5718
5719static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5720 int cmd)
5721{
5722 struct e1000_adapter *adapter = netdev_priv(netdev);
5723 struct mii_ioctl_data *data = if_mii(ifr);
5724
5725 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5726 return -EOPNOTSUPP;
5727
5728 switch (cmd) {
5729 case SIOCGMIIPHY:
5730 data->phy_id = adapter->hw.phy.addr;
5731 break;
5732 case SIOCGMIIREG:
5733 e1000_phy_read_status(adapter);
5734
5735 switch (data->reg_num & 0x1F) {
5736 case MII_BMCR:
5737 data->val_out = adapter->phy_regs.bmcr;
5738 break;
5739 case MII_BMSR:
5740 data->val_out = adapter->phy_regs.bmsr;
5741 break;
5742 case MII_PHYSID1:
5743 data->val_out = (adapter->hw.phy.id >> 16);
5744 break;
5745 case MII_PHYSID2:
5746 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5747 break;
5748 case MII_ADVERTISE:
5749 data->val_out = adapter->phy_regs.advertise;
5750 break;
5751 case MII_LPA:
5752 data->val_out = adapter->phy_regs.lpa;
5753 break;
5754 case MII_EXPANSION:
5755 data->val_out = adapter->phy_regs.expansion;
5756 break;
5757 case MII_CTRL1000:
5758 data->val_out = adapter->phy_regs.ctrl1000;
5759 break;
5760 case MII_STAT1000:
5761 data->val_out = adapter->phy_regs.stat1000;
5762 break;
5763 case MII_ESTATUS:
5764 data->val_out = adapter->phy_regs.estatus;
5765 break;
5766 default:
5767 return -EIO;
5768 }
5769 break;
5770 case SIOCSMIIREG:
5771 default:
5772 return -EOPNOTSUPP;
5773 }
5774 return 0;
5775}
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
5794{
5795 struct e1000_adapter *adapter = netdev_priv(netdev);
5796 struct hwtstamp_config config;
5797 int ret_val;
5798
5799 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5800 return -EFAULT;
5801
5802 adapter->hwtstamp_config = config;
5803
5804 ret_val = e1000e_config_hwtstamp(adapter);
5805 if (ret_val)
5806 return ret_val;
5807
5808 config = adapter->hwtstamp_config;
5809
5810 switch (config.rx_filter) {
5811 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5812 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5813 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5814 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5815 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5816 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5817
5818
5819
5820
5821
5822 config.rx_filter = HWTSTAMP_FILTER_SOME;
5823 break;
5824 default:
5825 break;
5826 }
5827
5828 return copy_to_user(ifr->ifr_data, &config,
5829 sizeof(config)) ? -EFAULT : 0;
5830}
5831
5832static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5833{
5834 switch (cmd) {
5835 case SIOCGMIIPHY:
5836 case SIOCGMIIREG:
5837 case SIOCSMIIREG:
5838 return e1000_mii_ioctl(netdev, ifr, cmd);
5839 case SIOCSHWTSTAMP:
5840 return e1000e_hwtstamp_ioctl(netdev, ifr);
5841 default:
5842 return -EOPNOTSUPP;
5843 }
5844}
5845
5846static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5847{
5848 struct e1000_hw *hw = &adapter->hw;
5849 u32 i, mac_reg;
5850 u16 phy_reg, wuc_enable;
5851 int retval;
5852
5853
5854 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5855
5856 retval = hw->phy.ops.acquire(hw);
5857 if (retval) {
5858 e_err("Could not acquire PHY\n");
5859 return retval;
5860 }
5861
5862
5863 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5864 if (retval)
5865 goto release;
5866
5867
5868 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5869 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5870 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5871 (u16)(mac_reg & 0xFFFF));
5872 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5873 (u16)((mac_reg >> 16) & 0xFFFF));
5874 }
5875
5876
5877 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5878 mac_reg = er32(RCTL);
5879 if (mac_reg & E1000_RCTL_UPE)
5880 phy_reg |= BM_RCTL_UPE;
5881 if (mac_reg & E1000_RCTL_MPE)
5882 phy_reg |= BM_RCTL_MPE;
5883 phy_reg &= ~(BM_RCTL_MO_MASK);
5884 if (mac_reg & E1000_RCTL_MO_3)
5885 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5886 << BM_RCTL_MO_SHIFT);
5887 if (mac_reg & E1000_RCTL_BAM)
5888 phy_reg |= BM_RCTL_BAM;
5889 if (mac_reg & E1000_RCTL_PMCF)
5890 phy_reg |= BM_RCTL_PMCF;
5891 mac_reg = er32(CTRL);
5892 if (mac_reg & E1000_CTRL_RFCE)
5893 phy_reg |= BM_RCTL_RFCE;
5894 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5895
5896
5897 ew32(WUFC, wufc);
5898 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5899
5900
5901 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5902 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5903
5904
5905 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5906 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5907 if (retval)
5908 e_err("Could not set PHY Host Wakeup bit\n");
5909release:
5910 hw->phy.ops.release(hw);
5911
5912 return retval;
5913}
5914
5915static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5916{
5917 struct net_device *netdev = pci_get_drvdata(pdev);
5918 struct e1000_adapter *adapter = netdev_priv(netdev);
5919 struct e1000_hw *hw = &adapter->hw;
5920 u32 ctrl, ctrl_ext, rctl, status;
5921
5922 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5923 int retval = 0;
5924
5925 netif_device_detach(netdev);
5926
5927 if (netif_running(netdev)) {
5928 int count = E1000_CHECK_RESET_COUNT;
5929
5930 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5931 usleep_range(10000, 20000);
5932
5933 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5934 e1000e_down(adapter);
5935 e1000_free_irq(adapter);
5936 }
5937 e1000e_reset_interrupt_capability(adapter);
5938
5939 status = er32(STATUS);
5940 if (status & E1000_STATUS_LU)
5941 wufc &= ~E1000_WUFC_LNKC;
5942
5943 if (wufc) {
5944 e1000_setup_rctl(adapter);
5945 e1000e_set_rx_mode(netdev);
5946
5947
5948 if (wufc & E1000_WUFC_MC) {
5949 rctl = er32(RCTL);
5950 rctl |= E1000_RCTL_MPE;
5951 ew32(RCTL, rctl);
5952 }
5953
5954 ctrl = er32(CTRL);
5955 ctrl |= E1000_CTRL_ADVD3WUC;
5956 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5957 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5958 ew32(CTRL, ctrl);
5959
5960 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5961 adapter->hw.phy.media_type ==
5962 e1000_media_type_internal_serdes) {
5963
5964 ctrl_ext = er32(CTRL_EXT);
5965 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5966 ew32(CTRL_EXT, ctrl_ext);
5967 }
5968
5969 if (adapter->flags & FLAG_IS_ICH)
5970 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5971
5972
5973 e1000e_disable_pcie_master(&adapter->hw);
5974
5975 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5976
5977 retval = e1000_init_phy_wakeup(adapter, wufc);
5978 if (retval)
5979 return retval;
5980 } else {
5981
5982 ew32(WUFC, wufc);
5983 ew32(WUC, E1000_WUC_PME_EN);
5984 }
5985 } else {
5986 ew32(WUC, 0);
5987 ew32(WUFC, 0);
5988 }
5989
5990 if (adapter->hw.phy.type == e1000_phy_igp_3)
5991 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5992
5993
5994
5995
5996 e1000e_release_hw_control(adapter);
5997
5998
5999
6000
6001
6002
6003 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6004 struct pci_dev *us_dev = pdev->bus->self;
6005 u16 devctl;
6006
6007 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6008 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6009 (devctl & ~PCI_EXP_DEVCTL_CERE));
6010
6011 pci_save_state(pdev);
6012 pci_prepare_to_sleep(pdev);
6013
6014 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6015 }
6016
6017 return 0;
6018}
6019
6020#ifdef CONFIG_PCIEASPM
6021static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6022{
6023 pci_disable_link_state_locked(pdev, state);
6024}
6025#else
6026static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6027{
6028 u16 aspm_ctl = 0;
6029
6030 if (state & PCIE_LINK_STATE_L0S)
6031 aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
6032 if (state & PCIE_LINK_STATE_L1)
6033 aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
6034
6035
6036
6037
6038 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
6039
6040 if (pdev->bus->self)
6041 pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
6042 aspm_ctl);
6043}
6044#endif
6045static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6046{
6047 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6048 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
6049 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
6050
6051 __e1000e_disable_aspm(pdev, state);
6052}
6053
6054#ifdef CONFIG_PM
6055static bool e1000e_pm_ready(struct e1000_adapter *adapter)
6056{
6057 return !!adapter->tx_ring->buffer_info;
6058}
6059
6060static int __e1000_resume(struct pci_dev *pdev)
6061{
6062 struct net_device *netdev = pci_get_drvdata(pdev);
6063 struct e1000_adapter *adapter = netdev_priv(netdev);
6064 struct e1000_hw *hw = &adapter->hw;
6065 u16 aspm_disable_flag = 0;
6066 u32 err;
6067
6068 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6069 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6070 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6071 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6072 if (aspm_disable_flag)
6073 e1000e_disable_aspm(pdev, aspm_disable_flag);
6074
6075 pci_set_master(pdev);
6076
6077 e1000e_set_interrupt_capability(adapter);
6078 if (netif_running(netdev)) {
6079 err = e1000_request_irq(adapter);
6080 if (err)
6081 return err;
6082 }
6083
6084 if (hw->mac.type >= e1000_pch2lan)
6085 e1000_resume_workarounds_pchlan(&adapter->hw);
6086
6087 e1000e_power_up_phy(adapter);
6088
6089
6090 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6091 u16 phy_data;
6092
6093 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6094 if (phy_data) {
6095 e_info("PHY Wakeup cause - %s\n",
6096 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6097 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6098 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6099 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6100 phy_data & E1000_WUS_LNKC ?
6101 "Link Status Change" : "other");
6102 }
6103 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6104 } else {
6105 u32 wus = er32(WUS);
6106 if (wus) {
6107 e_info("MAC Wakeup cause - %s\n",
6108 wus & E1000_WUS_EX ? "Unicast Packet" :
6109 wus & E1000_WUS_MC ? "Multicast Packet" :
6110 wus & E1000_WUS_BC ? "Broadcast Packet" :
6111 wus & E1000_WUS_MAG ? "Magic Packet" :
6112 wus & E1000_WUS_LNKC ? "Link Status Change" :
6113 "other");
6114 }
6115 ew32(WUS, ~0);
6116 }
6117
6118 e1000e_reset(adapter);
6119
6120 e1000_init_manageability_pt(adapter);
6121
6122 if (netif_running(netdev))
6123 e1000e_up(adapter);
6124
6125 netif_device_attach(netdev);
6126
6127
6128
6129
6130
6131 if (!(adapter->flags & FLAG_HAS_AMT))
6132 e1000e_get_hw_control(adapter);
6133
6134 return 0;
6135}
6136
6137#ifdef CONFIG_PM_SLEEP
6138static int e1000_suspend(struct device *dev)
6139{
6140 struct pci_dev *pdev = to_pci_dev(dev);
6141
6142 return __e1000_shutdown(pdev, false);
6143}
6144
6145static int e1000_resume(struct device *dev)
6146{
6147 struct pci_dev *pdev = to_pci_dev(dev);
6148 struct net_device *netdev = pci_get_drvdata(pdev);
6149 struct e1000_adapter *adapter = netdev_priv(netdev);
6150
6151 if (e1000e_pm_ready(adapter))
6152 adapter->idle_check = true;
6153
6154 return __e1000_resume(pdev);
6155}
6156#endif
6157
6158#ifdef CONFIG_PM_RUNTIME
6159static int e1000_runtime_suspend(struct device *dev)
6160{
6161 struct pci_dev *pdev = to_pci_dev(dev);
6162 struct net_device *netdev = pci_get_drvdata(pdev);
6163 struct e1000_adapter *adapter = netdev_priv(netdev);
6164
6165 if (!e1000e_pm_ready(adapter))
6166 return 0;
6167
6168 return __e1000_shutdown(pdev, true);
6169}
6170
6171static int e1000_idle(struct device *dev)
6172{
6173 struct pci_dev *pdev = to_pci_dev(dev);
6174 struct net_device *netdev = pci_get_drvdata(pdev);
6175 struct e1000_adapter *adapter = netdev_priv(netdev);
6176
6177 if (!e1000e_pm_ready(adapter))
6178 return 0;
6179
6180 if (adapter->idle_check) {
6181 adapter->idle_check = false;
6182 if (!e1000e_has_link(adapter))
6183 pm_schedule_suspend(dev, MSEC_PER_SEC);
6184 }
6185
6186 return -EBUSY;
6187}
6188
6189static int e1000_runtime_resume(struct device *dev)
6190{
6191 struct pci_dev *pdev = to_pci_dev(dev);
6192 struct net_device *netdev = pci_get_drvdata(pdev);
6193 struct e1000_adapter *adapter = netdev_priv(netdev);
6194
6195 if (!e1000e_pm_ready(adapter))
6196 return 0;
6197
6198 adapter->idle_check = !dev->power.runtime_auto;
6199 return __e1000_resume(pdev);
6200}
6201#endif
6202#endif
6203
6204static void e1000_shutdown(struct pci_dev *pdev)
6205{
6206 __e1000_shutdown(pdev, false);
6207}
6208
6209#ifdef CONFIG_NET_POLL_CONTROLLER
6210
6211static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
6212{
6213 struct net_device *netdev = data;
6214 struct e1000_adapter *adapter = netdev_priv(netdev);
6215
6216 if (adapter->msix_entries) {
6217 int vector, msix_irq;
6218
6219 vector = 0;
6220 msix_irq = adapter->msix_entries[vector].vector;
6221 disable_irq(msix_irq);
6222 e1000_intr_msix_rx(msix_irq, netdev);
6223 enable_irq(msix_irq);
6224
6225 vector++;
6226 msix_irq = adapter->msix_entries[vector].vector;
6227 disable_irq(msix_irq);
6228 e1000_intr_msix_tx(msix_irq, netdev);
6229 enable_irq(msix_irq);
6230
6231 vector++;
6232 msix_irq = adapter->msix_entries[vector].vector;
6233 disable_irq(msix_irq);
6234 e1000_msix_other(msix_irq, netdev);
6235 enable_irq(msix_irq);
6236 }
6237
6238 return IRQ_HANDLED;
6239}
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249static void e1000_netpoll(struct net_device *netdev)
6250{
6251 struct e1000_adapter *adapter = netdev_priv(netdev);
6252
6253 switch (adapter->int_mode) {
6254 case E1000E_INT_MODE_MSIX:
6255 e1000_intr_msix(adapter->pdev->irq, netdev);
6256 break;
6257 case E1000E_INT_MODE_MSI:
6258 disable_irq(adapter->pdev->irq);
6259 e1000_intr_msi(adapter->pdev->irq, netdev);
6260 enable_irq(adapter->pdev->irq);
6261 break;
6262 default:
6263 disable_irq(adapter->pdev->irq);
6264 e1000_intr(adapter->pdev->irq, netdev);
6265 enable_irq(adapter->pdev->irq);
6266 break;
6267 }
6268}
6269#endif
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6280 pci_channel_state_t state)
6281{
6282 struct net_device *netdev = pci_get_drvdata(pdev);
6283 struct e1000_adapter *adapter = netdev_priv(netdev);
6284
6285 netif_device_detach(netdev);
6286
6287 if (state == pci_channel_io_perm_failure)
6288 return PCI_ERS_RESULT_DISCONNECT;
6289
6290 if (netif_running(netdev))
6291 e1000e_down(adapter);
6292 pci_disable_device(pdev);
6293
6294
6295 return PCI_ERS_RESULT_NEED_RESET;
6296}
6297
6298
6299
6300
6301
6302
6303
6304
6305static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6306{
6307 struct net_device *netdev = pci_get_drvdata(pdev);
6308 struct e1000_adapter *adapter = netdev_priv(netdev);
6309 struct e1000_hw *hw = &adapter->hw;
6310 u16 aspm_disable_flag = 0;
6311 int err;
6312 pci_ers_result_t result;
6313
6314 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6315 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6316 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6317 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6318 if (aspm_disable_flag)
6319 e1000e_disable_aspm(pdev, aspm_disable_flag);
6320
6321 err = pci_enable_device_mem(pdev);
6322 if (err) {
6323 dev_err(&pdev->dev,
6324 "Cannot re-enable PCI device after reset.\n");
6325 result = PCI_ERS_RESULT_DISCONNECT;
6326 } else {
6327 pdev->state_saved = true;
6328 pci_restore_state(pdev);
6329 pci_set_master(pdev);
6330
6331 pci_enable_wake(pdev, PCI_D3hot, 0);
6332 pci_enable_wake(pdev, PCI_D3cold, 0);
6333
6334 e1000e_reset(adapter);
6335 ew32(WUS, ~0);
6336 result = PCI_ERS_RESULT_RECOVERED;
6337 }
6338
6339 pci_cleanup_aer_uncorrect_error_status(pdev);
6340
6341 return result;
6342}
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352static void e1000_io_resume(struct pci_dev *pdev)
6353{
6354 struct net_device *netdev = pci_get_drvdata(pdev);
6355 struct e1000_adapter *adapter = netdev_priv(netdev);
6356
6357 e1000_init_manageability_pt(adapter);
6358
6359 if (netif_running(netdev)) {
6360 if (e1000e_up(adapter)) {
6361 dev_err(&pdev->dev,
6362 "can't bring device back up after reset\n");
6363 return;
6364 }
6365 }
6366
6367 netif_device_attach(netdev);
6368
6369
6370
6371
6372
6373 if (!(adapter->flags & FLAG_HAS_AMT))
6374 e1000e_get_hw_control(adapter);
6375}
6376
6377static void e1000_print_device_info(struct e1000_adapter *adapter)
6378{
6379 struct e1000_hw *hw = &adapter->hw;
6380 struct net_device *netdev = adapter->netdev;
6381 u32 ret_val;
6382 u8 pba_str[E1000_PBANUM_LENGTH];
6383
6384
6385 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
6386
6387 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
6388 "Width x1"),
6389
6390 netdev->dev_addr);
6391 e_info("Intel(R) PRO/%s Network Connection\n",
6392 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
6393 ret_val = e1000_read_pba_string_generic(hw, pba_str,
6394 E1000_PBANUM_LENGTH);
6395 if (ret_val)
6396 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
6397 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
6398 hw->mac.type, hw->phy.type, pba_str);
6399}
6400
6401static void e1000_eeprom_checks(struct e1000_adapter *adapter)
6402{
6403 struct e1000_hw *hw = &adapter->hw;
6404 int ret_val;
6405 u16 buf = 0;
6406
6407 if (hw->mac.type != e1000_82573)
6408 return;
6409
6410 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
6411 le16_to_cpus(&buf);
6412 if (!ret_val && (!(buf & (1 << 0)))) {
6413
6414 dev_warn(&adapter->pdev->dev,
6415 "Warning: detected DSPD enabled in EEPROM\n");
6416 }
6417}
6418
6419static int e1000_set_features(struct net_device *netdev,
6420 netdev_features_t features)
6421{
6422 struct e1000_adapter *adapter = netdev_priv(netdev);
6423 netdev_features_t changed = features ^ netdev->features;
6424
6425 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6426 adapter->flags |= FLAG_TSO_FORCE;
6427
6428 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6429 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6430 NETIF_F_RXALL)))
6431 return 0;
6432
6433 if (changed & NETIF_F_RXFCS) {
6434 if (features & NETIF_F_RXFCS) {
6435 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6436 } else {
6437
6438
6439
6440 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6441 adapter->flags2 |= FLAG2_CRC_STRIPPING;
6442 else
6443 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6444 }
6445 }
6446
6447 netdev->features = features;
6448
6449 if (netif_running(netdev))
6450 e1000e_reinit_locked(adapter);
6451 else
6452 e1000e_reset(adapter);
6453
6454 return 0;
6455}
6456
6457static const struct net_device_ops e1000e_netdev_ops = {
6458 .ndo_open = e1000_open,
6459 .ndo_stop = e1000_close,
6460 .ndo_start_xmit = e1000_xmit_frame,
6461 .ndo_get_stats64 = e1000e_get_stats64,
6462 .ndo_set_rx_mode = e1000e_set_rx_mode,
6463 .ndo_set_mac_address = e1000_set_mac,
6464 .ndo_change_mtu = e1000_change_mtu,
6465 .ndo_do_ioctl = e1000_ioctl,
6466 .ndo_tx_timeout = e1000_tx_timeout,
6467 .ndo_validate_addr = eth_validate_addr,
6468
6469 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6470 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6471#ifdef CONFIG_NET_POLL_CONTROLLER
6472 .ndo_poll_controller = e1000_netpoll,
6473#endif
6474 .ndo_set_features = e1000_set_features,
6475};
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6489{
6490 struct net_device *netdev;
6491 struct e1000_adapter *adapter;
6492 struct e1000_hw *hw;
6493 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6494 resource_size_t mmio_start, mmio_len;
6495 resource_size_t flash_start, flash_len;
6496 static int cards_found;
6497 u16 aspm_disable_flag = 0;
6498 int bars, i, err, pci_using_dac;
6499 u16 eeprom_data = 0;
6500 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6501
6502 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6503 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6504 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6505 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6506 if (aspm_disable_flag)
6507 e1000e_disable_aspm(pdev, aspm_disable_flag);
6508
6509 err = pci_enable_device_mem(pdev);
6510 if (err)
6511 return err;
6512
6513 pci_using_dac = 0;
6514 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6515 if (!err) {
6516 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6517 if (!err)
6518 pci_using_dac = 1;
6519 } else {
6520 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6521 if (err) {
6522 err = dma_set_coherent_mask(&pdev->dev,
6523 DMA_BIT_MASK(32));
6524 if (err) {
6525 dev_err(&pdev->dev,
6526 "No usable DMA configuration, aborting\n");
6527 goto err_dma;
6528 }
6529 }
6530 }
6531
6532 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6533 err = pci_request_selected_regions_exclusive(pdev, bars,
6534 e1000e_driver_name);
6535 if (err)
6536 goto err_pci_reg;
6537
6538
6539 pci_enable_pcie_error_reporting(pdev);
6540
6541 pci_set_master(pdev);
6542
6543 err = pci_save_state(pdev);
6544 if (err)
6545 goto err_alloc_etherdev;
6546
6547 err = -ENOMEM;
6548 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6549 if (!netdev)
6550 goto err_alloc_etherdev;
6551
6552 SET_NETDEV_DEV(netdev, &pdev->dev);
6553
6554 netdev->irq = pdev->irq;
6555
6556 pci_set_drvdata(pdev, netdev);
6557 adapter = netdev_priv(netdev);
6558 hw = &adapter->hw;
6559 adapter->netdev = netdev;
6560 adapter->pdev = pdev;
6561 adapter->ei = ei;
6562 adapter->pba = ei->pba;
6563 adapter->flags = ei->flags;
6564 adapter->flags2 = ei->flags2;
6565 adapter->hw.adapter = adapter;
6566 adapter->hw.mac.type = ei->mac;
6567 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6568 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6569
6570 mmio_start = pci_resource_start(pdev, 0);
6571 mmio_len = pci_resource_len(pdev, 0);
6572
6573 err = -EIO;
6574 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6575 if (!adapter->hw.hw_addr)
6576 goto err_ioremap;
6577
6578 if ((adapter->flags & FLAG_HAS_FLASH) &&
6579 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6580 flash_start = pci_resource_start(pdev, 1);
6581 flash_len = pci_resource_len(pdev, 1);
6582 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6583 if (!adapter->hw.flash_address)
6584 goto err_flashmap;
6585 }
6586
6587
6588 if (adapter->flags2 & FLAG2_HAS_EEE)
6589 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6590
6591
6592 netdev->netdev_ops = &e1000e_netdev_ops;
6593 e1000e_set_ethtool_ops(netdev);
6594 netdev->watchdog_timeo = 5 * HZ;
6595 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6596 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6597
6598 netdev->mem_start = mmio_start;
6599 netdev->mem_end = mmio_start + mmio_len;
6600
6601 adapter->bd_number = cards_found++;
6602
6603 e1000e_check_options(adapter);
6604
6605
6606 err = e1000_sw_init(adapter);
6607 if (err)
6608 goto err_sw_init;
6609
6610 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6611 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6612 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6613
6614 err = ei->get_variants(adapter);
6615 if (err)
6616 goto err_hw_init;
6617
6618 if ((adapter->flags & FLAG_IS_ICH) &&
6619 (adapter->flags & FLAG_READ_ONLY_NVM))
6620 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6621
6622 hw->mac.ops.get_bus_info(&adapter->hw);
6623
6624 adapter->hw.phy.autoneg_wait_to_complete = 0;
6625
6626
6627 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6628 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6629 adapter->hw.phy.disable_polarity_correction = 0;
6630 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6631 }
6632
6633 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6634 dev_info(&pdev->dev,
6635 "PHY reset is blocked due to SOL/IDER session.\n");
6636
6637
6638 netdev->features = (NETIF_F_SG |
6639 NETIF_F_HW_VLAN_CTAG_RX |
6640 NETIF_F_HW_VLAN_CTAG_TX |
6641 NETIF_F_TSO |
6642 NETIF_F_TSO6 |
6643 NETIF_F_RXHASH |
6644 NETIF_F_RXCSUM |
6645 NETIF_F_HW_CSUM);
6646
6647
6648 netdev->hw_features = netdev->features;
6649 netdev->hw_features |= NETIF_F_RXFCS;
6650 netdev->priv_flags |= IFF_SUPP_NOFCS;
6651 netdev->hw_features |= NETIF_F_RXALL;
6652
6653 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6654 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6655
6656 netdev->vlan_features |= (NETIF_F_SG |
6657 NETIF_F_TSO |
6658 NETIF_F_TSO6 |
6659 NETIF_F_HW_CSUM);
6660
6661 netdev->priv_flags |= IFF_UNICAST_FLT;
6662
6663 if (pci_using_dac) {
6664 netdev->features |= NETIF_F_HIGHDMA;
6665 netdev->vlan_features |= NETIF_F_HIGHDMA;
6666 }
6667
6668 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6669 adapter->flags |= FLAG_MNG_PT_ENABLED;
6670
6671
6672
6673
6674 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6675
6676
6677
6678
6679 for (i = 0;; i++) {
6680 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6681 break;
6682 if (i == 2) {
6683 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6684 err = -EIO;
6685 goto err_eeprom;
6686 }
6687 }
6688
6689 e1000_eeprom_checks(adapter);
6690
6691
6692 if (e1000e_read_mac_addr(&adapter->hw))
6693 dev_err(&pdev->dev,
6694 "NVM Read Error while reading MAC address\n");
6695
6696 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6697
6698 if (!is_valid_ether_addr(netdev->dev_addr)) {
6699 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6700 netdev->dev_addr);
6701 err = -EIO;
6702 goto err_eeprom;
6703 }
6704
6705 init_timer(&adapter->watchdog_timer);
6706 adapter->watchdog_timer.function = e1000_watchdog;
6707 adapter->watchdog_timer.data = (unsigned long)adapter;
6708
6709 init_timer(&adapter->phy_info_timer);
6710 adapter->phy_info_timer.function = e1000_update_phy_info;
6711 adapter->phy_info_timer.data = (unsigned long)adapter;
6712
6713 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6714 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6715 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6716 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6717 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6718
6719
6720 adapter->hw.mac.autoneg = 1;
6721 adapter->fc_autoneg = true;
6722 adapter->hw.fc.requested_mode = e1000_fc_default;
6723 adapter->hw.fc.current_mode = e1000_fc_default;
6724 adapter->hw.phy.autoneg_advertised = 0x2f;
6725
6726
6727 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6728 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6729
6730
6731
6732
6733 if (adapter->flags & FLAG_APME_IN_WUC) {
6734
6735 eeprom_data = er32(WUC);
6736 eeprom_apme_mask = E1000_WUC_APME;
6737 if ((hw->mac.type > e1000_ich10lan) &&
6738 (eeprom_data & E1000_WUC_PHY_WAKE))
6739 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6740 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6741 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6742 (adapter->hw.bus.func == 1))
6743 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6744 1, &eeprom_data);
6745 else
6746 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6747 1, &eeprom_data);
6748 }
6749
6750
6751 if (eeprom_data & eeprom_apme_mask)
6752 adapter->eeprom_wol |= E1000_WUFC_MAG;
6753
6754
6755
6756
6757
6758 if (!(adapter->flags & FLAG_HAS_WOL))
6759 adapter->eeprom_wol = 0;
6760
6761
6762 adapter->wol = adapter->eeprom_wol;
6763
6764
6765 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6766 (hw->mac.ops.check_mng_mode(hw)))
6767 device_wakeup_enable(&pdev->dev);
6768
6769
6770 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6771
6772
6773 e1000e_reset(adapter);
6774
6775
6776
6777
6778
6779 if (!(adapter->flags & FLAG_HAS_AMT))
6780 e1000e_get_hw_control(adapter);
6781
6782 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6783 err = register_netdev(netdev);
6784 if (err)
6785 goto err_register;
6786
6787
6788 netif_carrier_off(netdev);
6789
6790
6791 e1000e_ptp_init(adapter);
6792
6793 e1000_print_device_info(adapter);
6794
6795 if (pci_dev_run_wake(pdev))
6796 pm_runtime_put_noidle(&pdev->dev);
6797
6798 return 0;
6799
6800err_register:
6801 if (!(adapter->flags & FLAG_HAS_AMT))
6802 e1000e_release_hw_control(adapter);
6803err_eeprom:
6804 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6805 e1000_phy_hw_reset(&adapter->hw);
6806err_hw_init:
6807 kfree(adapter->tx_ring);
6808 kfree(adapter->rx_ring);
6809err_sw_init:
6810 if (adapter->hw.flash_address)
6811 iounmap(adapter->hw.flash_address);
6812 e1000e_reset_interrupt_capability(adapter);
6813err_flashmap:
6814 iounmap(adapter->hw.hw_addr);
6815err_ioremap:
6816 free_netdev(netdev);
6817err_alloc_etherdev:
6818 pci_release_selected_regions(pdev,
6819 pci_select_bars(pdev, IORESOURCE_MEM));
6820err_pci_reg:
6821err_dma:
6822 pci_disable_device(pdev);
6823 return err;
6824}
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835static void e1000_remove(struct pci_dev *pdev)
6836{
6837 struct net_device *netdev = pci_get_drvdata(pdev);
6838 struct e1000_adapter *adapter = netdev_priv(netdev);
6839 bool down = test_bit(__E1000_DOWN, &adapter->state);
6840
6841 e1000e_ptp_remove(adapter);
6842
6843
6844
6845
6846 if (!down)
6847 set_bit(__E1000_DOWN, &adapter->state);
6848 del_timer_sync(&adapter->watchdog_timer);
6849 del_timer_sync(&adapter->phy_info_timer);
6850
6851 cancel_work_sync(&adapter->reset_task);
6852 cancel_work_sync(&adapter->watchdog_task);
6853 cancel_work_sync(&adapter->downshift_task);
6854 cancel_work_sync(&adapter->update_phy_task);
6855 cancel_work_sync(&adapter->print_hang_task);
6856
6857 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
6858 cancel_work_sync(&adapter->tx_hwtstamp_work);
6859 if (adapter->tx_hwtstamp_skb) {
6860 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
6861 adapter->tx_hwtstamp_skb = NULL;
6862 }
6863 }
6864
6865 if (!(netdev->flags & IFF_UP))
6866 e1000_power_down_phy(adapter);
6867
6868
6869 if (!down)
6870 clear_bit(__E1000_DOWN, &adapter->state);
6871 unregister_netdev(netdev);
6872
6873 if (pci_dev_run_wake(pdev))
6874 pm_runtime_get_noresume(&pdev->dev);
6875
6876
6877
6878
6879 e1000e_release_hw_control(adapter);
6880
6881 e1000e_reset_interrupt_capability(adapter);
6882 kfree(adapter->tx_ring);
6883 kfree(adapter->rx_ring);
6884
6885 iounmap(adapter->hw.hw_addr);
6886 if (adapter->hw.flash_address)
6887 iounmap(adapter->hw.flash_address);
6888 pci_release_selected_regions(pdev,
6889 pci_select_bars(pdev, IORESOURCE_MEM));
6890
6891 free_netdev(netdev);
6892
6893
6894 pci_disable_pcie_error_reporting(pdev);
6895
6896 pci_disable_device(pdev);
6897}
6898
6899
6900static const struct pci_error_handlers e1000_err_handler = {
6901 .error_detected = e1000_io_error_detected,
6902 .slot_reset = e1000_io_slot_reset,
6903 .resume = e1000_io_resume,
6904};
6905
6906static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6907 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6908 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6909 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6910 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
6911 board_82571 },
6912 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6913 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6914 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6915 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6916 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6917
6918 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6919 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6920 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6921 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6922
6923 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6924 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6925 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6926
6927 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6928 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6929 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6930
6931 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6932 board_80003es2lan },
6933 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6934 board_80003es2lan },
6935 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6936 board_80003es2lan },
6937 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6938 board_80003es2lan },
6939
6940 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6941 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6942 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6943 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6944 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6945 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6946 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6947 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6948
6949 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6950 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6951 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6952 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6953 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6954 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6955 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6956 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6957 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6958
6959 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6960 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6961 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6962
6963 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6964 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6965 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6966
6967 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6968 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6969 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6970 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6971
6972 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6973 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6974
6975 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6976 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6977 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6978 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
6979
6980 { 0, 0, 0, 0, 0, 0, 0 }
6981};
6982MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6983
6984#ifdef CONFIG_PM
6985static const struct dev_pm_ops e1000_pm_ops = {
6986 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6987 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
6988 e1000_idle)
6989};
6990#endif
6991
6992
6993static struct pci_driver e1000_driver = {
6994 .name = e1000e_driver_name,
6995 .id_table = e1000_pci_tbl,
6996 .probe = e1000_probe,
6997 .remove = e1000_remove,
6998#ifdef CONFIG_PM
6999 .driver = {
7000 .pm = &e1000_pm_ops,
7001 },
7002#endif
7003 .shutdown = e1000_shutdown,
7004 .err_handler = &e1000_err_handler
7005};
7006
7007
7008
7009
7010
7011
7012
7013static int __init e1000_init_module(void)
7014{
7015 int ret;
7016 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7017 e1000e_driver_version);
7018 pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
7019 ret = pci_register_driver(&e1000_driver);
7020
7021 return ret;
7022}
7023module_init(e1000_init_module);
7024
7025
7026
7027
7028
7029
7030
7031static void __exit e1000_exit_module(void)
7032{
7033 pci_unregister_driver(&e1000_driver);
7034}
7035module_exit(e1000_exit_module);
7036
7037MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7038MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7039MODULE_LICENSE("GPL");
7040MODULE_VERSION(DRV_VERSION);
7041
7042
7043