1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/interrupt.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/sctp.h>
39#include <linux/pkt_sched.h>
40#include <linux/ipv6.h>
41#include <linux/slab.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
44#include <linux/ethtool.h>
45#include <linux/if.h>
46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
48#include <linux/prefetch.h>
49#include <scsi/fc/fc_fcoe.h>
50
51#include "ixgbe.h"
52#include "ixgbe_common.h"
53#include "ixgbe_dcb_82599.h"
54#include "ixgbe_sriov.h"
55
56char ixgbe_driver_name[] = "ixgbe";
57static const char ixgbe_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Network Driver";
59#ifdef IXGBE_FCOE
60char ixgbe_default_device_descr[] =
61 "Intel(R) 10 Gigabit Network Connection";
62#else
63static char ixgbe_default_device_descr[] =
64 "Intel(R) 10 Gigabit Network Connection";
65#endif
66#define DRV_VERSION "3.15.1-k"
67const char ixgbe_driver_version[] = DRV_VERSION;
68static const char ixgbe_copyright[] =
69 "Copyright (c) 1999-2013 Intel Corporation.";
70
71static const struct ixgbe_info *ixgbe_info_tbl[] = {
72 [board_82598] = &ixgbe_82598_info,
73 [board_82599] = &ixgbe_82599_info,
74 [board_X540] = &ixgbe_X540_info,
75};
76
77
78
79
80
81
82
83
84
85static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
116
117 {0, }
118};
119MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120
121#ifdef CONFIG_IXGBE_DCA
122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
123 void *p);
124static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL,
127 .priority = 0
128};
129#endif
130
131#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs;
133module_param(max_vfs, uint, 0);
134MODULE_PARM_DESC(max_vfs,
135 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
136#endif
137
138static unsigned int allow_unsupported_sfp;
139module_param(allow_unsupported_sfp, uint, 0);
140MODULE_PARM_DESC(allow_unsupported_sfp,
141 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
142
143#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
144static int debug = -1;
145module_param(debug, int, 0);
146MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
147
148MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
149MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
153static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
154 u32 reg, u16 *value)
155{
156 int pos = 0;
157 struct pci_dev *parent_dev;
158 struct pci_bus *parent_bus;
159
160 parent_bus = adapter->pdev->bus->parent;
161 if (!parent_bus)
162 return -1;
163
164 parent_dev = parent_bus->self;
165 if (!parent_dev)
166 return -1;
167
168 pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
169 if (!pos)
170 return -1;
171
172 pci_read_config_word(parent_dev, pos + reg, value);
173 return 0;
174}
175
176static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
177{
178 struct ixgbe_hw *hw = &adapter->hw;
179 u16 link_status = 0;
180 int err;
181
182 hw->bus.type = ixgbe_bus_type_pci_express;
183
184
185
186
187 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
188
189
190 if (err)
191 return err;
192
193 hw->bus.width = ixgbe_convert_bus_width(link_status);
194 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
195
196 return 0;
197}
198
199
200
201
202
203
204
205
206
207
208static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
209{
210 switch (hw->device_id) {
211 case IXGBE_DEV_ID_82599_SFP_SF_QP:
212 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
213 return true;
214 default:
215 return false;
216 }
217}
218
219static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
220 int expected_gts)
221{
222 int max_gts = 0;
223 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
224 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
225 struct pci_dev *pdev;
226
227
228
229 if (ixgbe_pcie_from_parent(&adapter->hw))
230 pdev = adapter->pdev->bus->parent->self;
231 else
232 pdev = adapter->pdev;
233
234 if (pcie_get_minimum_link(pdev, &speed, &width) ||
235 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
236 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
237 return;
238 }
239
240 switch (speed) {
241 case PCIE_SPEED_2_5GT:
242
243 max_gts = 2 * width;
244 break;
245 case PCIE_SPEED_5_0GT:
246
247 max_gts = 4 * width;
248 break;
249 case PCIE_SPEED_8_0GT:
250
251 max_gts = 8 * width;
252 break;
253 default:
254 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
255 return;
256 }
257
258 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
259 max_gts);
260 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
261 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
262 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
263 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
264 "Unknown"),
265 width,
266 (speed == PCIE_SPEED_2_5GT ? "20%" :
267 speed == PCIE_SPEED_5_0GT ? "20%" :
268 speed == PCIE_SPEED_8_0GT ? "N/a" :
269 "Unknown"));
270
271 if (max_gts < expected_gts) {
272 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
273 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
274 expected_gts);
275 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
276 }
277}
278
279static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
280{
281 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
282 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
283 schedule_work(&adapter->service_task);
284}
285
286static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
287{
288 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
289
290
291 smp_mb__before_clear_bit();
292 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
293}
294
295struct ixgbe_reg_info {
296 u32 ofs;
297 char *name;
298};
299
300static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
301
302
303 {IXGBE_CTRL, "CTRL"},
304 {IXGBE_STATUS, "STATUS"},
305 {IXGBE_CTRL_EXT, "CTRL_EXT"},
306
307
308 {IXGBE_EICR, "EICR"},
309
310
311 {IXGBE_SRRCTL(0), "SRRCTL"},
312 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
313 {IXGBE_RDLEN(0), "RDLEN"},
314 {IXGBE_RDH(0), "RDH"},
315 {IXGBE_RDT(0), "RDT"},
316 {IXGBE_RXDCTL(0), "RXDCTL"},
317 {IXGBE_RDBAL(0), "RDBAL"},
318 {IXGBE_RDBAH(0), "RDBAH"},
319
320
321 {IXGBE_TDBAL(0), "TDBAL"},
322 {IXGBE_TDBAH(0), "TDBAH"},
323 {IXGBE_TDLEN(0), "TDLEN"},
324 {IXGBE_TDH(0), "TDH"},
325 {IXGBE_TDT(0), "TDT"},
326 {IXGBE_TXDCTL(0), "TXDCTL"},
327
328
329 {}
330};
331
332
333
334
335
336static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
337{
338 int i = 0, j = 0;
339 char rname[16];
340 u32 regs[64];
341
342 switch (reginfo->ofs) {
343 case IXGBE_SRRCTL(0):
344 for (i = 0; i < 64; i++)
345 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
346 break;
347 case IXGBE_DCA_RXCTRL(0):
348 for (i = 0; i < 64; i++)
349 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
350 break;
351 case IXGBE_RDLEN(0):
352 for (i = 0; i < 64; i++)
353 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
354 break;
355 case IXGBE_RDH(0):
356 for (i = 0; i < 64; i++)
357 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
358 break;
359 case IXGBE_RDT(0):
360 for (i = 0; i < 64; i++)
361 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
362 break;
363 case IXGBE_RXDCTL(0):
364 for (i = 0; i < 64; i++)
365 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
366 break;
367 case IXGBE_RDBAL(0):
368 for (i = 0; i < 64; i++)
369 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
370 break;
371 case IXGBE_RDBAH(0):
372 for (i = 0; i < 64; i++)
373 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
374 break;
375 case IXGBE_TDBAL(0):
376 for (i = 0; i < 64; i++)
377 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
378 break;
379 case IXGBE_TDBAH(0):
380 for (i = 0; i < 64; i++)
381 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
382 break;
383 case IXGBE_TDLEN(0):
384 for (i = 0; i < 64; i++)
385 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
386 break;
387 case IXGBE_TDH(0):
388 for (i = 0; i < 64; i++)
389 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
390 break;
391 case IXGBE_TDT(0):
392 for (i = 0; i < 64; i++)
393 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
394 break;
395 case IXGBE_TXDCTL(0):
396 for (i = 0; i < 64; i++)
397 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
398 break;
399 default:
400 pr_info("%-15s %08x\n", reginfo->name,
401 IXGBE_READ_REG(hw, reginfo->ofs));
402 return;
403 }
404
405 for (i = 0; i < 8; i++) {
406 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
407 pr_err("%-15s", rname);
408 for (j = 0; j < 8; j++)
409 pr_cont(" %08x", regs[i*8+j]);
410 pr_cont("\n");
411 }
412
413}
414
415
416
417
418static void ixgbe_dump(struct ixgbe_adapter *adapter)
419{
420 struct net_device *netdev = adapter->netdev;
421 struct ixgbe_hw *hw = &adapter->hw;
422 struct ixgbe_reg_info *reginfo;
423 int n = 0;
424 struct ixgbe_ring *tx_ring;
425 struct ixgbe_tx_buffer *tx_buffer;
426 union ixgbe_adv_tx_desc *tx_desc;
427 struct my_u0 { u64 a; u64 b; } *u0;
428 struct ixgbe_ring *rx_ring;
429 union ixgbe_adv_rx_desc *rx_desc;
430 struct ixgbe_rx_buffer *rx_buffer_info;
431 u32 staterr;
432 int i = 0;
433
434 if (!netif_msg_hw(adapter))
435 return;
436
437
438 if (netdev) {
439 dev_info(&adapter->pdev->dev, "Net device Info\n");
440 pr_info("Device Name state "
441 "trans_start last_rx\n");
442 pr_info("%-15s %016lX %016lX %016lX\n",
443 netdev->name,
444 netdev->state,
445 netdev->trans_start,
446 netdev->last_rx);
447 }
448
449
450 dev_info(&adapter->pdev->dev, "Register Dump\n");
451 pr_info(" Register Name Value\n");
452 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
453 reginfo->name; reginfo++) {
454 ixgbe_regdump(hw, reginfo);
455 }
456
457
458 if (!netdev || !netif_running(netdev))
459 goto exit;
460
461 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
462 pr_info(" %s %s %s %s\n",
463 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
464 "leng", "ntw", "timestamp");
465 for (n = 0; n < adapter->num_tx_queues; n++) {
466 tx_ring = adapter->tx_ring[n];
467 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
468 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
469 n, tx_ring->next_to_use, tx_ring->next_to_clean,
470 (u64)dma_unmap_addr(tx_buffer, dma),
471 dma_unmap_len(tx_buffer, len),
472 tx_buffer->next_to_watch,
473 (u64)tx_buffer->time_stamp);
474 }
475
476
477 if (!netif_msg_tx_done(adapter))
478 goto rx_ring_summary;
479
480 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517 for (n = 0; n < adapter->num_tx_queues; n++) {
518 tx_ring = adapter->tx_ring[n];
519 pr_info("------------------------------------\n");
520 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
521 pr_info("------------------------------------\n");
522 pr_info("%s%s %s %s %s %s\n",
523 "T [desc] [address 63:0 ] ",
524 "[PlPOIdStDDt Ln] [bi->dma ] ",
525 "leng", "ntw", "timestamp", "bi->skb");
526
527 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
528 tx_desc = IXGBE_TX_DESC(tx_ring, i);
529 tx_buffer = &tx_ring->tx_buffer_info[i];
530 u0 = (struct my_u0 *)tx_desc;
531 if (dma_unmap_len(tx_buffer, len) > 0) {
532 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
533 i,
534 le64_to_cpu(u0->a),
535 le64_to_cpu(u0->b),
536 (u64)dma_unmap_addr(tx_buffer, dma),
537 dma_unmap_len(tx_buffer, len),
538 tx_buffer->next_to_watch,
539 (u64)tx_buffer->time_stamp,
540 tx_buffer->skb);
541 if (i == tx_ring->next_to_use &&
542 i == tx_ring->next_to_clean)
543 pr_cont(" NTC/U\n");
544 else if (i == tx_ring->next_to_use)
545 pr_cont(" NTU\n");
546 else if (i == tx_ring->next_to_clean)
547 pr_cont(" NTC\n");
548 else
549 pr_cont("\n");
550
551 if (netif_msg_pktdata(adapter) &&
552 tx_buffer->skb)
553 print_hex_dump(KERN_INFO, "",
554 DUMP_PREFIX_ADDRESS, 16, 1,
555 tx_buffer->skb->data,
556 dma_unmap_len(tx_buffer, len),
557 true);
558 }
559 }
560 }
561
562
563rx_ring_summary:
564 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
565 pr_info("Queue [NTU] [NTC]\n");
566 for (n = 0; n < adapter->num_rx_queues; n++) {
567 rx_ring = adapter->rx_ring[n];
568 pr_info("%5d %5X %5X\n",
569 n, rx_ring->next_to_use, rx_ring->next_to_clean);
570 }
571
572
573 if (!netif_msg_rx_status(adapter))
574 goto exit;
575
576 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 for (n = 0; n < adapter->num_rx_queues; n++) {
624 rx_ring = adapter->rx_ring[n];
625 pr_info("------------------------------------\n");
626 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
627 pr_info("------------------------------------\n");
628 pr_info("%s%s%s",
629 "R [desc] [ PktBuf A0] ",
630 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
631 "<-- Adv Rx Read format\n");
632 pr_info("%s%s%s",
633 "RWB[desc] [PcsmIpSHl PtRs] ",
634 "[vl er S cks ln] ---------------- [bi->skb ] ",
635 "<-- Adv Rx Write-Back format\n");
636
637 for (i = 0; i < rx_ring->count; i++) {
638 rx_buffer_info = &rx_ring->rx_buffer_info[i];
639 rx_desc = IXGBE_RX_DESC(rx_ring, i);
640 u0 = (struct my_u0 *)rx_desc;
641 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
642 if (staterr & IXGBE_RXD_STAT_DD) {
643
644 pr_info("RWB[0x%03X] %016llX "
645 "%016llX ---------------- %p", i,
646 le64_to_cpu(u0->a),
647 le64_to_cpu(u0->b),
648 rx_buffer_info->skb);
649 } else {
650 pr_info("R [0x%03X] %016llX "
651 "%016llX %016llX %p", i,
652 le64_to_cpu(u0->a),
653 le64_to_cpu(u0->b),
654 (u64)rx_buffer_info->dma,
655 rx_buffer_info->skb);
656
657 if (netif_msg_pktdata(adapter) &&
658 rx_buffer_info->dma) {
659 print_hex_dump(KERN_INFO, "",
660 DUMP_PREFIX_ADDRESS, 16, 1,
661 page_address(rx_buffer_info->page) +
662 rx_buffer_info->page_offset,
663 ixgbe_rx_bufsz(rx_ring), true);
664 }
665 }
666
667 if (i == rx_ring->next_to_use)
668 pr_cont(" NTU\n");
669 else if (i == rx_ring->next_to_clean)
670 pr_cont(" NTC\n");
671 else
672 pr_cont("\n");
673
674 }
675 }
676
677exit:
678 return;
679}
680
681static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
682{
683 u32 ctrl_ext;
684
685
686 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
687 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
688 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
689}
690
691static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
692{
693 u32 ctrl_ext;
694
695
696 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
697 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
698 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
699}
700
701
702
703
704
705
706
707
708
709static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
710 u8 queue, u8 msix_vector)
711{
712 u32 ivar, index;
713 struct ixgbe_hw *hw = &adapter->hw;
714 switch (hw->mac.type) {
715 case ixgbe_mac_82598EB:
716 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
717 if (direction == -1)
718 direction = 0;
719 index = (((direction * 64) + queue) >> 2) & 0x1F;
720 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
721 ivar &= ~(0xFF << (8 * (queue & 0x3)));
722 ivar |= (msix_vector << (8 * (queue & 0x3)));
723 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
724 break;
725 case ixgbe_mac_82599EB:
726 case ixgbe_mac_X540:
727 if (direction == -1) {
728
729 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
730 index = ((queue & 1) * 8);
731 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
732 ivar &= ~(0xFF << index);
733 ivar |= (msix_vector << index);
734 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
735 break;
736 } else {
737
738 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
739 index = ((16 * (queue & 1)) + (8 * direction));
740 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
741 ivar &= ~(0xFF << index);
742 ivar |= (msix_vector << index);
743 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
744 break;
745 }
746 default:
747 break;
748 }
749}
750
751static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
752 u64 qmask)
753{
754 u32 mask;
755
756 switch (adapter->hw.mac.type) {
757 case ixgbe_mac_82598EB:
758 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
760 break;
761 case ixgbe_mac_82599EB:
762 case ixgbe_mac_X540:
763 mask = (qmask & 0xFFFFFFFF);
764 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
765 mask = (qmask >> 32);
766 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
767 break;
768 default:
769 break;
770 }
771}
772
773void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
774 struct ixgbe_tx_buffer *tx_buffer)
775{
776 if (tx_buffer->skb) {
777 dev_kfree_skb_any(tx_buffer->skb);
778 if (dma_unmap_len(tx_buffer, len))
779 dma_unmap_single(ring->dev,
780 dma_unmap_addr(tx_buffer, dma),
781 dma_unmap_len(tx_buffer, len),
782 DMA_TO_DEVICE);
783 } else if (dma_unmap_len(tx_buffer, len)) {
784 dma_unmap_page(ring->dev,
785 dma_unmap_addr(tx_buffer, dma),
786 dma_unmap_len(tx_buffer, len),
787 DMA_TO_DEVICE);
788 }
789 tx_buffer->next_to_watch = NULL;
790 tx_buffer->skb = NULL;
791 dma_unmap_len_set(tx_buffer, len, 0);
792
793}
794
795static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
796{
797 struct ixgbe_hw *hw = &adapter->hw;
798 struct ixgbe_hw_stats *hwstats = &adapter->stats;
799 int i;
800 u32 data;
801
802 if ((hw->fc.current_mode != ixgbe_fc_full) &&
803 (hw->fc.current_mode != ixgbe_fc_rx_pause))
804 return;
805
806 switch (hw->mac.type) {
807 case ixgbe_mac_82598EB:
808 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
809 break;
810 default:
811 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
812 }
813 hwstats->lxoffrxc += data;
814
815
816 if (!data)
817 return;
818
819 for (i = 0; i < adapter->num_tx_queues; i++)
820 clear_bit(__IXGBE_HANG_CHECK_ARMED,
821 &adapter->tx_ring[i]->state);
822}
823
824static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
825{
826 struct ixgbe_hw *hw = &adapter->hw;
827 struct ixgbe_hw_stats *hwstats = &adapter->stats;
828 u32 xoff[8] = {0};
829 u8 tc;
830 int i;
831 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
832
833 if (adapter->ixgbe_ieee_pfc)
834 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
835
836 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
837 ixgbe_update_xoff_rx_lfc(adapter);
838 return;
839 }
840
841
842 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
843 u32 pxoffrxc;
844
845 switch (hw->mac.type) {
846 case ixgbe_mac_82598EB:
847 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
848 break;
849 default:
850 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
851 }
852 hwstats->pxoffrxc[i] += pxoffrxc;
853
854 tc = netdev_get_prio_tc_map(adapter->netdev, i);
855 xoff[tc] += pxoffrxc;
856 }
857
858
859 for (i = 0; i < adapter->num_tx_queues; i++) {
860 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
861
862 tc = tx_ring->dcb_tc;
863 if (xoff[tc])
864 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
865 }
866}
867
868static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
869{
870 return ring->stats.packets;
871}
872
873static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
874{
875 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
876 struct ixgbe_hw *hw = &adapter->hw;
877
878 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
879 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
880
881 if (head != tail)
882 return (head < tail) ?
883 tail - head : (tail + ring->count - head);
884
885 return 0;
886}
887
888static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
889{
890 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
891 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
892 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
893 bool ret = false;
894
895 clear_check_for_tx_hang(tx_ring);
896
897
898
899
900
901
902
903
904
905
906
907
908
909 if ((tx_done_old == tx_done) && tx_pending) {
910
911 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
912 &tx_ring->state);
913 } else {
914
915 tx_ring->tx_stats.tx_done_old = tx_done;
916
917 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
918 }
919
920 return ret;
921}
922
923
924
925
926
927static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
928{
929
930
931 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
932 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
933 e_warn(drv, "initiating reset due to tx timeout\n");
934 ixgbe_service_event_schedule(adapter);
935 }
936}
937
938
939
940
941
942
943static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
944 struct ixgbe_ring *tx_ring)
945{
946 struct ixgbe_adapter *adapter = q_vector->adapter;
947 struct ixgbe_tx_buffer *tx_buffer;
948 union ixgbe_adv_tx_desc *tx_desc;
949 unsigned int total_bytes = 0, total_packets = 0;
950 unsigned int budget = q_vector->tx.work_limit;
951 unsigned int i = tx_ring->next_to_clean;
952
953 if (test_bit(__IXGBE_DOWN, &adapter->state))
954 return true;
955
956 tx_buffer = &tx_ring->tx_buffer_info[i];
957 tx_desc = IXGBE_TX_DESC(tx_ring, i);
958 i -= tx_ring->count;
959
960 do {
961 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
962
963
964 if (!eop_desc)
965 break;
966
967
968 read_barrier_depends();
969
970
971 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
972 break;
973
974
975 tx_buffer->next_to_watch = NULL;
976
977
978 total_bytes += tx_buffer->bytecount;
979 total_packets += tx_buffer->gso_segs;
980
981
982 dev_kfree_skb_any(tx_buffer->skb);
983
984
985 dma_unmap_single(tx_ring->dev,
986 dma_unmap_addr(tx_buffer, dma),
987 dma_unmap_len(tx_buffer, len),
988 DMA_TO_DEVICE);
989
990
991 tx_buffer->skb = NULL;
992 dma_unmap_len_set(tx_buffer, len, 0);
993
994
995 while (tx_desc != eop_desc) {
996 tx_buffer++;
997 tx_desc++;
998 i++;
999 if (unlikely(!i)) {
1000 i -= tx_ring->count;
1001 tx_buffer = tx_ring->tx_buffer_info;
1002 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1003 }
1004
1005
1006 if (dma_unmap_len(tx_buffer, len)) {
1007 dma_unmap_page(tx_ring->dev,
1008 dma_unmap_addr(tx_buffer, dma),
1009 dma_unmap_len(tx_buffer, len),
1010 DMA_TO_DEVICE);
1011 dma_unmap_len_set(tx_buffer, len, 0);
1012 }
1013 }
1014
1015
1016 tx_buffer++;
1017 tx_desc++;
1018 i++;
1019 if (unlikely(!i)) {
1020 i -= tx_ring->count;
1021 tx_buffer = tx_ring->tx_buffer_info;
1022 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1023 }
1024
1025
1026 prefetch(tx_desc);
1027
1028
1029 budget--;
1030 } while (likely(budget));
1031
1032 i += tx_ring->count;
1033 tx_ring->next_to_clean = i;
1034 u64_stats_update_begin(&tx_ring->syncp);
1035 tx_ring->stats.bytes += total_bytes;
1036 tx_ring->stats.packets += total_packets;
1037 u64_stats_update_end(&tx_ring->syncp);
1038 q_vector->tx.total_bytes += total_bytes;
1039 q_vector->tx.total_packets += total_packets;
1040
1041 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1042
1043 struct ixgbe_hw *hw = &adapter->hw;
1044 e_err(drv, "Detected Tx Unit Hang\n"
1045 " Tx Queue <%d>\n"
1046 " TDH, TDT <%x>, <%x>\n"
1047 " next_to_use <%x>\n"
1048 " next_to_clean <%x>\n"
1049 "tx_buffer_info[next_to_clean]\n"
1050 " time_stamp <%lx>\n"
1051 " jiffies <%lx>\n",
1052 tx_ring->queue_index,
1053 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1054 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1055 tx_ring->next_to_use, i,
1056 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1057
1058 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1059
1060 e_info(probe,
1061 "tx hang %d detected on queue %d, resetting adapter\n",
1062 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1063
1064
1065 ixgbe_tx_timeout_reset(adapter);
1066
1067
1068 return true;
1069 }
1070
1071 netdev_tx_completed_queue(txring_txq(tx_ring),
1072 total_packets, total_bytes);
1073
1074#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1075 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1076 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1077
1078
1079
1080 smp_mb();
1081 if (__netif_subqueue_stopped(tx_ring->netdev,
1082 tx_ring->queue_index)
1083 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1084 netif_wake_subqueue(tx_ring->netdev,
1085 tx_ring->queue_index);
1086 ++tx_ring->tx_stats.restart_queue;
1087 }
1088 }
1089
1090 return !!budget;
1091}
1092
1093#ifdef CONFIG_IXGBE_DCA
1094static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1095 struct ixgbe_ring *tx_ring,
1096 int cpu)
1097{
1098 struct ixgbe_hw *hw = &adapter->hw;
1099 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1100 u16 reg_offset;
1101
1102 switch (hw->mac.type) {
1103 case ixgbe_mac_82598EB:
1104 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1105 break;
1106 case ixgbe_mac_82599EB:
1107 case ixgbe_mac_X540:
1108 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1109 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1110 break;
1111 default:
1112
1113 return;
1114 }
1115
1116
1117
1118
1119
1120
1121 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1122 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1123 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1124
1125 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1126}
1127
1128static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1129 struct ixgbe_ring *rx_ring,
1130 int cpu)
1131{
1132 struct ixgbe_hw *hw = &adapter->hw;
1133 u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1134 u8 reg_idx = rx_ring->reg_idx;
1135
1136
1137 switch (hw->mac.type) {
1138 case ixgbe_mac_82599EB:
1139 case ixgbe_mac_X540:
1140 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1141 break;
1142 default:
1143 break;
1144 }
1145
1146
1147
1148
1149
1150
1151 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1152 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1153
1154 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1155}
1156
1157static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1158{
1159 struct ixgbe_adapter *adapter = q_vector->adapter;
1160 struct ixgbe_ring *ring;
1161 int cpu = get_cpu();
1162
1163 if (q_vector->cpu == cpu)
1164 goto out_no_update;
1165
1166 ixgbe_for_each_ring(ring, q_vector->tx)
1167 ixgbe_update_tx_dca(adapter, ring, cpu);
1168
1169 ixgbe_for_each_ring(ring, q_vector->rx)
1170 ixgbe_update_rx_dca(adapter, ring, cpu);
1171
1172 q_vector->cpu = cpu;
1173out_no_update:
1174 put_cpu();
1175}
1176
1177static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1178{
1179 int i;
1180
1181 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1182 return;
1183
1184
1185 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1186
1187 for (i = 0; i < adapter->num_q_vectors; i++) {
1188 adapter->q_vector[i]->cpu = -1;
1189 ixgbe_update_dca(adapter->q_vector[i]);
1190 }
1191}
1192
1193static int __ixgbe_notify_dca(struct device *dev, void *data)
1194{
1195 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1196 unsigned long event = *(unsigned long *)data;
1197
1198 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1199 return 0;
1200
1201 switch (event) {
1202 case DCA_PROVIDER_ADD:
1203
1204 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1205 break;
1206 if (dca_add_requester(dev) == 0) {
1207 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1208 ixgbe_setup_dca(adapter);
1209 break;
1210 }
1211
1212 case DCA_PROVIDER_REMOVE:
1213 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1214 dca_remove_requester(dev);
1215 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1216 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1217 }
1218 break;
1219 }
1220
1221 return 0;
1222}
1223
1224#endif
1225static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1226 union ixgbe_adv_rx_desc *rx_desc,
1227 struct sk_buff *skb)
1228{
1229 if (ring->netdev->features & NETIF_F_RXHASH)
1230 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1231}
1232
1233#ifdef IXGBE_FCOE
1234
1235
1236
1237
1238
1239
1240
1241static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1242 union ixgbe_adv_rx_desc *rx_desc)
1243{
1244 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1245
1246 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1247 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1248 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1249 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1250}
1251
1252#endif
1253
1254
1255
1256
1257
1258
1259static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1260 union ixgbe_adv_rx_desc *rx_desc,
1261 struct sk_buff *skb)
1262{
1263 skb_checksum_none_assert(skb);
1264
1265
1266 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1267 return;
1268
1269
1270 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1271 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1272 ring->rx_stats.csum_err++;
1273 return;
1274 }
1275
1276 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1277 return;
1278
1279 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1280 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1281
1282
1283
1284
1285
1286 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1287 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1288 return;
1289
1290 ring->rx_stats.csum_err++;
1291 return;
1292 }
1293
1294
1295 skb->ip_summed = CHECKSUM_UNNECESSARY;
1296}
1297
1298static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1299{
1300 rx_ring->next_to_use = val;
1301
1302
1303 rx_ring->next_to_alloc = val;
1304
1305
1306
1307
1308
1309
1310 wmb();
1311 writel(val, rx_ring->tail);
1312}
1313
1314static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1315 struct ixgbe_rx_buffer *bi)
1316{
1317 struct page *page = bi->page;
1318 dma_addr_t dma = bi->dma;
1319
1320
1321 if (likely(dma))
1322 return true;
1323
1324
1325 if (likely(!page)) {
1326 page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
1327 bi->skb, ixgbe_rx_pg_order(rx_ring));
1328 if (unlikely(!page)) {
1329 rx_ring->rx_stats.alloc_rx_page_failed++;
1330 return false;
1331 }
1332 bi->page = page;
1333 }
1334
1335
1336 dma = dma_map_page(rx_ring->dev, page, 0,
1337 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1338
1339
1340
1341
1342
1343 if (dma_mapping_error(rx_ring->dev, dma)) {
1344 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1345 bi->page = NULL;
1346
1347 rx_ring->rx_stats.alloc_rx_page_failed++;
1348 return false;
1349 }
1350
1351 bi->dma = dma;
1352 bi->page_offset = 0;
1353
1354 return true;
1355}
1356
1357
1358
1359
1360
1361
1362void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1363{
1364 union ixgbe_adv_rx_desc *rx_desc;
1365 struct ixgbe_rx_buffer *bi;
1366 u16 i = rx_ring->next_to_use;
1367
1368
1369 if (!cleaned_count)
1370 return;
1371
1372 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1373 bi = &rx_ring->rx_buffer_info[i];
1374 i -= rx_ring->count;
1375
1376 do {
1377 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1378 break;
1379
1380
1381
1382
1383
1384 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1385
1386 rx_desc++;
1387 bi++;
1388 i++;
1389 if (unlikely(!i)) {
1390 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1391 bi = rx_ring->rx_buffer_info;
1392 i -= rx_ring->count;
1393 }
1394
1395
1396 rx_desc->read.hdr_addr = 0;
1397
1398 cleaned_count--;
1399 } while (cleaned_count);
1400
1401 i += rx_ring->count;
1402
1403 if (rx_ring->next_to_use != i)
1404 ixgbe_release_rx_desc(rx_ring, i);
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418static unsigned int ixgbe_get_headlen(unsigned char *data,
1419 unsigned int max_len)
1420{
1421 union {
1422 unsigned char *network;
1423
1424 struct ethhdr *eth;
1425 struct vlan_hdr *vlan;
1426
1427 struct iphdr *ipv4;
1428 struct ipv6hdr *ipv6;
1429 } hdr;
1430 __be16 protocol;
1431 u8 nexthdr = 0;
1432 u8 hlen;
1433
1434
1435 if (max_len < ETH_HLEN)
1436 return max_len;
1437
1438
1439 hdr.network = data;
1440
1441
1442 protocol = hdr.eth->h_proto;
1443 hdr.network += ETH_HLEN;
1444
1445
1446 if (protocol == __constant_htons(ETH_P_8021Q)) {
1447 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1448 return max_len;
1449
1450 protocol = hdr.vlan->h_vlan_encapsulated_proto;
1451 hdr.network += VLAN_HLEN;
1452 }
1453
1454
1455 if (protocol == __constant_htons(ETH_P_IP)) {
1456 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1457 return max_len;
1458
1459
1460 hlen = (hdr.network[0] & 0x0F) << 2;
1461
1462
1463 if (hlen < sizeof(struct iphdr))
1464 return hdr.network - data;
1465
1466
1467 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
1468 nexthdr = hdr.ipv4->protocol;
1469 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1470 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1471 return max_len;
1472
1473
1474 nexthdr = hdr.ipv6->nexthdr;
1475 hlen = sizeof(struct ipv6hdr);
1476#ifdef IXGBE_FCOE
1477 } else if (protocol == __constant_htons(ETH_P_FCOE)) {
1478 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1479 return max_len;
1480 hlen = FCOE_HEADER_LEN;
1481#endif
1482 } else {
1483 return hdr.network - data;
1484 }
1485
1486
1487 hdr.network += hlen;
1488
1489
1490 if (nexthdr == IPPROTO_TCP) {
1491 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1492 return max_len;
1493
1494
1495 hlen = (hdr.network[12] & 0xF0) >> 2;
1496
1497
1498 if (hlen < sizeof(struct tcphdr))
1499 return hdr.network - data;
1500
1501 hdr.network += hlen;
1502 } else if (nexthdr == IPPROTO_UDP) {
1503 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1504 return max_len;
1505
1506 hdr.network += sizeof(struct udphdr);
1507 }
1508
1509
1510
1511
1512
1513
1514
1515 if ((hdr.network - data) < max_len)
1516 return hdr.network - data;
1517 else
1518 return max_len;
1519}
1520
1521static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1522 struct sk_buff *skb)
1523{
1524 u16 hdr_len = skb_headlen(skb);
1525
1526
1527 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1528 IXGBE_CB(skb)->append_cnt);
1529 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1530}
1531
1532static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1533 struct sk_buff *skb)
1534{
1535
1536 if (!IXGBE_CB(skb)->append_cnt)
1537 return;
1538
1539 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1540 rx_ring->rx_stats.rsc_flush++;
1541
1542 ixgbe_set_rsc_gso_size(rx_ring, skb);
1543
1544
1545 IXGBE_CB(skb)->append_cnt = 0;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1559 union ixgbe_adv_rx_desc *rx_desc,
1560 struct sk_buff *skb)
1561{
1562 struct net_device *dev = rx_ring->netdev;
1563
1564 ixgbe_update_rsc_stats(rx_ring, skb);
1565
1566 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1567
1568 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1569
1570 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1571
1572 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1573 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1574 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1575 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1576 }
1577
1578 skb_record_rx_queue(skb, rx_ring->queue_index);
1579
1580 skb->protocol = eth_type_trans(skb, dev);
1581}
1582
1583static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1584 struct sk_buff *skb)
1585{
1586 struct ixgbe_adapter *adapter = q_vector->adapter;
1587
1588 if (ixgbe_qv_ll_polling(q_vector))
1589 netif_receive_skb(skb);
1590 else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1591 napi_gro_receive(&q_vector->napi, skb);
1592 else
1593 netif_rx(skb);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1608 union ixgbe_adv_rx_desc *rx_desc,
1609 struct sk_buff *skb)
1610{
1611 u32 ntc = rx_ring->next_to_clean + 1;
1612
1613
1614 ntc = (ntc < rx_ring->count) ? ntc : 0;
1615 rx_ring->next_to_clean = ntc;
1616
1617 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1618
1619
1620 if (ring_is_rsc_enabled(rx_ring)) {
1621 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1622 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1623
1624 if (unlikely(rsc_enabled)) {
1625 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1626
1627 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1628 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1629
1630
1631 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1632 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1633 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1634 }
1635 }
1636
1637
1638 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1639 return false;
1640
1641
1642 rx_ring->rx_buffer_info[ntc].skb = skb;
1643 rx_ring->rx_stats.non_eop_descs++;
1644
1645 return true;
1646}
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1661 struct sk_buff *skb)
1662{
1663 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1664 unsigned char *va;
1665 unsigned int pull_len;
1666
1667
1668
1669
1670
1671
1672 va = skb_frag_address(frag);
1673
1674
1675
1676
1677
1678 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1679
1680
1681 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1682
1683
1684 skb_frag_size_sub(frag, pull_len);
1685 frag->page_offset += pull_len;
1686 skb->data_len -= pull_len;
1687 skb->tail += pull_len;
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1701 struct sk_buff *skb)
1702{
1703
1704 if (unlikely(IXGBE_CB(skb)->page_released)) {
1705 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1706 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1707 IXGBE_CB(skb)->page_released = false;
1708 } else {
1709 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1710
1711 dma_sync_single_range_for_cpu(rx_ring->dev,
1712 IXGBE_CB(skb)->dma,
1713 frag->page_offset,
1714 ixgbe_rx_bufsz(rx_ring),
1715 DMA_FROM_DEVICE);
1716 }
1717 IXGBE_CB(skb)->dma = 0;
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1739 union ixgbe_adv_rx_desc *rx_desc,
1740 struct sk_buff *skb)
1741{
1742 struct net_device *netdev = rx_ring->netdev;
1743
1744
1745 if (unlikely(ixgbe_test_staterr(rx_desc,
1746 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1747 !(netdev->features & NETIF_F_RXALL))) {
1748 dev_kfree_skb_any(skb);
1749 return true;
1750 }
1751
1752
1753 if (skb_is_nonlinear(skb))
1754 ixgbe_pull_tail(rx_ring, skb);
1755
1756#ifdef IXGBE_FCOE
1757
1758 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1759 return false;
1760
1761#endif
1762
1763 if (unlikely(skb->len < 60)) {
1764 int pad_len = 60 - skb->len;
1765
1766 if (skb_pad(skb, pad_len))
1767 return true;
1768 __skb_put(skb, pad_len);
1769 }
1770
1771 return false;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1782 struct ixgbe_rx_buffer *old_buff)
1783{
1784 struct ixgbe_rx_buffer *new_buff;
1785 u16 nta = rx_ring->next_to_alloc;
1786
1787 new_buff = &rx_ring->rx_buffer_info[nta];
1788
1789
1790 nta++;
1791 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1792
1793
1794 new_buff->page = old_buff->page;
1795 new_buff->dma = old_buff->dma;
1796 new_buff->page_offset = old_buff->page_offset;
1797
1798
1799 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1800 new_buff->page_offset,
1801 ixgbe_rx_bufsz(rx_ring),
1802 DMA_FROM_DEVICE);
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1821 struct ixgbe_rx_buffer *rx_buffer,
1822 union ixgbe_adv_rx_desc *rx_desc,
1823 struct sk_buff *skb)
1824{
1825 struct page *page = rx_buffer->page;
1826 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1827#if (PAGE_SIZE < 8192)
1828 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1829#else
1830 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1831 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1832 ixgbe_rx_bufsz(rx_ring);
1833#endif
1834
1835 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1836 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1837
1838 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1839
1840
1841 if (likely(page_to_nid(page) == numa_node_id()))
1842 return true;
1843
1844
1845 put_page(page);
1846 return false;
1847 }
1848
1849 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1850 rx_buffer->page_offset, size, truesize);
1851
1852
1853 if (unlikely(page_to_nid(page) != numa_node_id()))
1854 return false;
1855
1856#if (PAGE_SIZE < 8192)
1857
1858 if (unlikely(page_count(page) != 1))
1859 return false;
1860
1861
1862 rx_buffer->page_offset ^= truesize;
1863
1864
1865
1866
1867
1868
1869 atomic_set(&page->_count, 2);
1870#else
1871
1872 rx_buffer->page_offset += truesize;
1873
1874 if (rx_buffer->page_offset > last_offset)
1875 return false;
1876
1877
1878 get_page(page);
1879#endif
1880
1881 return true;
1882}
1883
1884static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1885 union ixgbe_adv_rx_desc *rx_desc)
1886{
1887 struct ixgbe_rx_buffer *rx_buffer;
1888 struct sk_buff *skb;
1889 struct page *page;
1890
1891 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1892 page = rx_buffer->page;
1893 prefetchw(page);
1894
1895 skb = rx_buffer->skb;
1896
1897 if (likely(!skb)) {
1898 void *page_addr = page_address(page) +
1899 rx_buffer->page_offset;
1900
1901
1902 prefetch(page_addr);
1903#if L1_CACHE_BYTES < 128
1904 prefetch(page_addr + L1_CACHE_BYTES);
1905#endif
1906
1907
1908 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1909 IXGBE_RX_HDR_SIZE);
1910 if (unlikely(!skb)) {
1911 rx_ring->rx_stats.alloc_rx_buff_failed++;
1912 return NULL;
1913 }
1914
1915
1916
1917
1918
1919
1920 prefetchw(skb->data);
1921
1922
1923
1924
1925
1926
1927
1928 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1929 goto dma_sync;
1930
1931 IXGBE_CB(skb)->dma = rx_buffer->dma;
1932 } else {
1933 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1934 ixgbe_dma_sync_frag(rx_ring, skb);
1935
1936dma_sync:
1937
1938 dma_sync_single_range_for_cpu(rx_ring->dev,
1939 rx_buffer->dma,
1940 rx_buffer->page_offset,
1941 ixgbe_rx_bufsz(rx_ring),
1942 DMA_FROM_DEVICE);
1943 }
1944
1945
1946 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1947
1948 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1949 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1950
1951 IXGBE_CB(skb)->page_released = true;
1952 } else {
1953
1954 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1955 ixgbe_rx_pg_size(rx_ring),
1956 DMA_FROM_DEVICE);
1957 }
1958
1959
1960 rx_buffer->skb = NULL;
1961 rx_buffer->dma = 0;
1962 rx_buffer->page = NULL;
1963
1964 return skb;
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1981 struct ixgbe_ring *rx_ring,
1982 const int budget)
1983{
1984 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1985#ifdef IXGBE_FCOE
1986 struct ixgbe_adapter *adapter = q_vector->adapter;
1987 int ddp_bytes;
1988 unsigned int mss = 0;
1989#endif
1990 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1991
1992 do {
1993 union ixgbe_adv_rx_desc *rx_desc;
1994 struct sk_buff *skb;
1995
1996
1997 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1998 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1999 cleaned_count = 0;
2000 }
2001
2002 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2003
2004 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
2005 break;
2006
2007
2008
2009
2010
2011
2012 rmb();
2013
2014
2015 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
2016
2017
2018 if (!skb)
2019 break;
2020
2021 cleaned_count++;
2022
2023
2024 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2025 continue;
2026
2027
2028 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2029 continue;
2030
2031
2032 total_rx_bytes += skb->len;
2033
2034
2035 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2036
2037#ifdef IXGBE_FCOE
2038
2039 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2040 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2041
2042 if (ddp_bytes > 0) {
2043 if (!mss) {
2044 mss = rx_ring->netdev->mtu -
2045 sizeof(struct fcoe_hdr) -
2046 sizeof(struct fc_frame_header) -
2047 sizeof(struct fcoe_crc_eof);
2048 if (mss > 512)
2049 mss &= ~511;
2050 }
2051 total_rx_bytes += ddp_bytes;
2052 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2053 mss);
2054 }
2055 if (!ddp_bytes) {
2056 dev_kfree_skb_any(skb);
2057 continue;
2058 }
2059 }
2060
2061#endif
2062 skb_mark_napi_id(skb, &q_vector->napi);
2063 ixgbe_rx_skb(q_vector, skb);
2064
2065
2066 total_rx_packets++;
2067 } while (likely(total_rx_packets < budget));
2068
2069 u64_stats_update_begin(&rx_ring->syncp);
2070 rx_ring->stats.packets += total_rx_packets;
2071 rx_ring->stats.bytes += total_rx_bytes;
2072 u64_stats_update_end(&rx_ring->syncp);
2073 q_vector->rx.total_packets += total_rx_packets;
2074 q_vector->rx.total_bytes += total_rx_bytes;
2075
2076 if (cleaned_count)
2077 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2078
2079 return total_rx_packets;
2080}
2081
2082#ifdef CONFIG_NET_RX_BUSY_POLL
2083
2084static int ixgbe_low_latency_recv(struct napi_struct *napi)
2085{
2086 struct ixgbe_q_vector *q_vector =
2087 container_of(napi, struct ixgbe_q_vector, napi);
2088 struct ixgbe_adapter *adapter = q_vector->adapter;
2089 struct ixgbe_ring *ring;
2090 int found = 0;
2091
2092 if (test_bit(__IXGBE_DOWN, &adapter->state))
2093 return LL_FLUSH_FAILED;
2094
2095 if (!ixgbe_qv_lock_poll(q_vector))
2096 return LL_FLUSH_BUSY;
2097
2098 ixgbe_for_each_ring(ring, q_vector->rx) {
2099 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2100#ifdef LL_EXTENDED_STATS
2101 if (found)
2102 ring->stats.cleaned += found;
2103 else
2104 ring->stats.misses++;
2105#endif
2106 if (found)
2107 break;
2108 }
2109
2110 ixgbe_qv_unlock_poll(q_vector);
2111
2112 return found;
2113}
2114#endif
2115
2116
2117
2118
2119
2120
2121
2122
2123static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2124{
2125 struct ixgbe_q_vector *q_vector;
2126 int v_idx;
2127 u32 mask;
2128
2129
2130 if (adapter->num_vfs > 32) {
2131 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2133 }
2134
2135
2136
2137
2138
2139 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2140 struct ixgbe_ring *ring;
2141 q_vector = adapter->q_vector[v_idx];
2142
2143 ixgbe_for_each_ring(ring, q_vector->rx)
2144 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2145
2146 ixgbe_for_each_ring(ring, q_vector->tx)
2147 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2148
2149 ixgbe_write_eitr(q_vector);
2150 }
2151
2152 switch (adapter->hw.mac.type) {
2153 case ixgbe_mac_82598EB:
2154 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2155 v_idx);
2156 break;
2157 case ixgbe_mac_82599EB:
2158 case ixgbe_mac_X540:
2159 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2160 break;
2161 default:
2162 break;
2163 }
2164 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2165
2166
2167 mask = IXGBE_EIMS_ENABLE_MASK;
2168 mask &= ~(IXGBE_EIMS_OTHER |
2169 IXGBE_EIMS_MAILBOX |
2170 IXGBE_EIMS_LSC);
2171
2172 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2173}
2174
2175enum latency_range {
2176 lowest_latency = 0,
2177 low_latency = 1,
2178 bulk_latency = 2,
2179 latency_invalid = 255
2180};
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2198 struct ixgbe_ring_container *ring_container)
2199{
2200 int bytes = ring_container->total_bytes;
2201 int packets = ring_container->total_packets;
2202 u32 timepassed_us;
2203 u64 bytes_perint;
2204 u8 itr_setting = ring_container->itr;
2205
2206 if (packets == 0)
2207 return;
2208
2209
2210
2211
2212
2213
2214
2215 timepassed_us = q_vector->itr >> 2;
2216 if (timepassed_us == 0)
2217 return;
2218
2219 bytes_perint = bytes / timepassed_us;
2220
2221 switch (itr_setting) {
2222 case lowest_latency:
2223 if (bytes_perint > 10)
2224 itr_setting = low_latency;
2225 break;
2226 case low_latency:
2227 if (bytes_perint > 20)
2228 itr_setting = bulk_latency;
2229 else if (bytes_perint <= 10)
2230 itr_setting = lowest_latency;
2231 break;
2232 case bulk_latency:
2233 if (bytes_perint <= 20)
2234 itr_setting = low_latency;
2235 break;
2236 }
2237
2238
2239 ring_container->total_bytes = 0;
2240 ring_container->total_packets = 0;
2241
2242
2243 ring_container->itr = itr_setting;
2244}
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2255{
2256 struct ixgbe_adapter *adapter = q_vector->adapter;
2257 struct ixgbe_hw *hw = &adapter->hw;
2258 int v_idx = q_vector->v_idx;
2259 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2260
2261 switch (adapter->hw.mac.type) {
2262 case ixgbe_mac_82598EB:
2263
2264 itr_reg |= (itr_reg << 16);
2265 break;
2266 case ixgbe_mac_82599EB:
2267 case ixgbe_mac_X540:
2268
2269
2270
2271
2272 itr_reg |= IXGBE_EITR_CNT_WDIS;
2273 break;
2274 default:
2275 break;
2276 }
2277 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2278}
2279
2280static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2281{
2282 u32 new_itr = q_vector->itr;
2283 u8 current_itr;
2284
2285 ixgbe_update_itr(q_vector, &q_vector->tx);
2286 ixgbe_update_itr(q_vector, &q_vector->rx);
2287
2288 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2289
2290 switch (current_itr) {
2291
2292 case lowest_latency:
2293 new_itr = IXGBE_100K_ITR;
2294 break;
2295 case low_latency:
2296 new_itr = IXGBE_20K_ITR;
2297 break;
2298 case bulk_latency:
2299 new_itr = IXGBE_8K_ITR;
2300 break;
2301 default:
2302 break;
2303 }
2304
2305 if (new_itr != q_vector->itr) {
2306
2307 new_itr = (10 * new_itr * q_vector->itr) /
2308 ((9 * new_itr) + q_vector->itr);
2309
2310
2311 q_vector->itr = new_itr;
2312
2313 ixgbe_write_eitr(q_vector);
2314 }
2315}
2316
2317
2318
2319
2320
2321static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2322{
2323 struct ixgbe_hw *hw = &adapter->hw;
2324 u32 eicr = adapter->interrupt_event;
2325
2326 if (test_bit(__IXGBE_DOWN, &adapter->state))
2327 return;
2328
2329 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2330 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2331 return;
2332
2333 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2334
2335 switch (hw->device_id) {
2336 case IXGBE_DEV_ID_82599_T3_LOM:
2337
2338
2339
2340
2341
2342
2343
2344 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
2345 !(eicr & IXGBE_EICR_LSC))
2346 return;
2347
2348 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2349 u32 speed;
2350 bool link_up = false;
2351
2352 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2353
2354 if (link_up)
2355 return;
2356 }
2357
2358
2359 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2360 return;
2361
2362 break;
2363 default:
2364 if (!(eicr & IXGBE_EICR_GPI_SDP0))
2365 return;
2366 break;
2367 }
2368 e_crit(drv,
2369 "Network adapter has been stopped because it has over heated. "
2370 "Restart the computer. If the problem persists, "
2371 "power off the system and replace the adapter\n");
2372
2373 adapter->interrupt_event = 0;
2374}
2375
2376static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2377{
2378 struct ixgbe_hw *hw = &adapter->hw;
2379
2380 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2381 (eicr & IXGBE_EICR_GPI_SDP1)) {
2382 e_crit(probe, "Fan has stopped, replace the adapter\n");
2383
2384 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2385 }
2386}
2387
2388static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2389{
2390 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2391 return;
2392
2393 switch (adapter->hw.mac.type) {
2394 case ixgbe_mac_82599EB:
2395
2396
2397
2398
2399 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2400 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2401 adapter->interrupt_event = eicr;
2402 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2403 ixgbe_service_event_schedule(adapter);
2404 return;
2405 }
2406 return;
2407 case ixgbe_mac_X540:
2408 if (!(eicr & IXGBE_EICR_TS))
2409 return;
2410 break;
2411 default:
2412 return;
2413 }
2414
2415 e_crit(drv,
2416 "Network adapter has been stopped because it has over heated. "
2417 "Restart the computer. If the problem persists, "
2418 "power off the system and replace the adapter\n");
2419}
2420
2421static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2422{
2423 struct ixgbe_hw *hw = &adapter->hw;
2424
2425 if (eicr & IXGBE_EICR_GPI_SDP2) {
2426
2427 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2428 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2429 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2430 ixgbe_service_event_schedule(adapter);
2431 }
2432 }
2433
2434 if (eicr & IXGBE_EICR_GPI_SDP1) {
2435
2436 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2437 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2438 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2439 ixgbe_service_event_schedule(adapter);
2440 }
2441 }
2442}
2443
2444static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2445{
2446 struct ixgbe_hw *hw = &adapter->hw;
2447
2448 adapter->lsc_int++;
2449 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2450 adapter->link_check_timeout = jiffies;
2451 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2452 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2453 IXGBE_WRITE_FLUSH(hw);
2454 ixgbe_service_event_schedule(adapter);
2455 }
2456}
2457
2458static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2459 u64 qmask)
2460{
2461 u32 mask;
2462 struct ixgbe_hw *hw = &adapter->hw;
2463
2464 switch (hw->mac.type) {
2465 case ixgbe_mac_82598EB:
2466 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2467 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2468 break;
2469 case ixgbe_mac_82599EB:
2470 case ixgbe_mac_X540:
2471 mask = (qmask & 0xFFFFFFFF);
2472 if (mask)
2473 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2474 mask = (qmask >> 32);
2475 if (mask)
2476 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2477 break;
2478 default:
2479 break;
2480 }
2481
2482}
2483
2484static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2485 u64 qmask)
2486{
2487 u32 mask;
2488 struct ixgbe_hw *hw = &adapter->hw;
2489
2490 switch (hw->mac.type) {
2491 case ixgbe_mac_82598EB:
2492 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2493 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2494 break;
2495 case ixgbe_mac_82599EB:
2496 case ixgbe_mac_X540:
2497 mask = (qmask & 0xFFFFFFFF);
2498 if (mask)
2499 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2500 mask = (qmask >> 32);
2501 if (mask)
2502 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2503 break;
2504 default:
2505 break;
2506 }
2507
2508}
2509
2510
2511
2512
2513
2514static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2515 bool flush)
2516{
2517 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2518
2519
2520 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2521 mask &= ~IXGBE_EIMS_LSC;
2522
2523 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2524 switch (adapter->hw.mac.type) {
2525 case ixgbe_mac_82599EB:
2526 mask |= IXGBE_EIMS_GPI_SDP0;
2527 break;
2528 case ixgbe_mac_X540:
2529 mask |= IXGBE_EIMS_TS;
2530 break;
2531 default:
2532 break;
2533 }
2534 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2535 mask |= IXGBE_EIMS_GPI_SDP1;
2536 switch (adapter->hw.mac.type) {
2537 case ixgbe_mac_82599EB:
2538 mask |= IXGBE_EIMS_GPI_SDP1;
2539 mask |= IXGBE_EIMS_GPI_SDP2;
2540 case ixgbe_mac_X540:
2541 mask |= IXGBE_EIMS_ECC;
2542 mask |= IXGBE_EIMS_MAILBOX;
2543 break;
2544 default:
2545 break;
2546 }
2547
2548 if (adapter->hw.mac.type == ixgbe_mac_X540)
2549 mask |= IXGBE_EIMS_TIMESYNC;
2550
2551 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2552 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2553 mask |= IXGBE_EIMS_FLOW_DIR;
2554
2555 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2556 if (queues)
2557 ixgbe_irq_enable_queues(adapter, ~0);
2558 if (flush)
2559 IXGBE_WRITE_FLUSH(&adapter->hw);
2560}
2561
2562static irqreturn_t ixgbe_msix_other(int irq, void *data)
2563{
2564 struct ixgbe_adapter *adapter = data;
2565 struct ixgbe_hw *hw = &adapter->hw;
2566 u32 eicr;
2567
2568
2569
2570
2571
2572
2573
2574 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2575
2576
2577
2578
2579
2580
2581
2582
2583 eicr &= 0xFFFF0000;
2584
2585 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2586
2587 if (eicr & IXGBE_EICR_LSC)
2588 ixgbe_check_lsc(adapter);
2589
2590 if (eicr & IXGBE_EICR_MAILBOX)
2591 ixgbe_msg_task(adapter);
2592
2593 switch (hw->mac.type) {
2594 case ixgbe_mac_82599EB:
2595 case ixgbe_mac_X540:
2596 if (eicr & IXGBE_EICR_ECC)
2597 e_info(link, "Received unrecoverable ECC Err, please "
2598 "reboot\n");
2599
2600 if (eicr & IXGBE_EICR_FLOW_DIR) {
2601 int reinit_count = 0;
2602 int i;
2603 for (i = 0; i < adapter->num_tx_queues; i++) {
2604 struct ixgbe_ring *ring = adapter->tx_ring[i];
2605 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2606 &ring->state))
2607 reinit_count++;
2608 }
2609 if (reinit_count) {
2610
2611 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2612 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2613 ixgbe_service_event_schedule(adapter);
2614 }
2615 }
2616 ixgbe_check_sfp_event(adapter, eicr);
2617 ixgbe_check_overtemp_event(adapter, eicr);
2618 break;
2619 default:
2620 break;
2621 }
2622
2623 ixgbe_check_fan_failure(adapter, eicr);
2624
2625 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2626 ixgbe_ptp_check_pps_event(adapter, eicr);
2627
2628
2629 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2630 ixgbe_irq_enable(adapter, false, false);
2631
2632 return IRQ_HANDLED;
2633}
2634
2635static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2636{
2637 struct ixgbe_q_vector *q_vector = data;
2638
2639
2640
2641 if (q_vector->rx.ring || q_vector->tx.ring)
2642 napi_schedule(&q_vector->napi);
2643
2644 return IRQ_HANDLED;
2645}
2646
2647
2648
2649
2650
2651
2652
2653
2654int ixgbe_poll(struct napi_struct *napi, int budget)
2655{
2656 struct ixgbe_q_vector *q_vector =
2657 container_of(napi, struct ixgbe_q_vector, napi);
2658 struct ixgbe_adapter *adapter = q_vector->adapter;
2659 struct ixgbe_ring *ring;
2660 int per_ring_budget;
2661 bool clean_complete = true;
2662
2663#ifdef CONFIG_IXGBE_DCA
2664 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2665 ixgbe_update_dca(q_vector);
2666#endif
2667
2668 ixgbe_for_each_ring(ring, q_vector->tx)
2669 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2670
2671 if (!ixgbe_qv_lock_napi(q_vector))
2672 return budget;
2673
2674
2675
2676 if (q_vector->rx.count > 1)
2677 per_ring_budget = max(budget/q_vector->rx.count, 1);
2678 else
2679 per_ring_budget = budget;
2680
2681 ixgbe_for_each_ring(ring, q_vector->rx)
2682 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
2683 per_ring_budget) < per_ring_budget);
2684
2685 ixgbe_qv_unlock_napi(q_vector);
2686
2687 if (!clean_complete)
2688 return budget;
2689
2690
2691 napi_complete(napi);
2692 if (adapter->rx_itr_setting & 1)
2693 ixgbe_set_itr(q_vector);
2694 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2695 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2696
2697 return 0;
2698}
2699
2700
2701
2702
2703
2704
2705
2706
2707static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2708{
2709 struct net_device *netdev = adapter->netdev;
2710 int vector, err;
2711 int ri = 0, ti = 0;
2712
2713 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2714 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2715 struct msix_entry *entry = &adapter->msix_entries[vector];
2716
2717 if (q_vector->tx.ring && q_vector->rx.ring) {
2718 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2719 "%s-%s-%d", netdev->name, "TxRx", ri++);
2720 ti++;
2721 } else if (q_vector->rx.ring) {
2722 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2723 "%s-%s-%d", netdev->name, "rx", ri++);
2724 } else if (q_vector->tx.ring) {
2725 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2726 "%s-%s-%d", netdev->name, "tx", ti++);
2727 } else {
2728
2729 continue;
2730 }
2731 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2732 q_vector->name, q_vector);
2733 if (err) {
2734 e_err(probe, "request_irq failed for MSIX interrupt "
2735 "Error: %d\n", err);
2736 goto free_queue_irqs;
2737 }
2738
2739 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2740
2741 irq_set_affinity_hint(entry->vector,
2742 &q_vector->affinity_mask);
2743 }
2744 }
2745
2746 err = request_irq(adapter->msix_entries[vector].vector,
2747 ixgbe_msix_other, 0, netdev->name, adapter);
2748 if (err) {
2749 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2750 goto free_queue_irqs;
2751 }
2752
2753 return 0;
2754
2755free_queue_irqs:
2756 while (vector) {
2757 vector--;
2758 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2759 NULL);
2760 free_irq(adapter->msix_entries[vector].vector,
2761 adapter->q_vector[vector]);
2762 }
2763 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2764 pci_disable_msix(adapter->pdev);
2765 kfree(adapter->msix_entries);
2766 adapter->msix_entries = NULL;
2767 return err;
2768}
2769
2770
2771
2772
2773
2774
2775static irqreturn_t ixgbe_intr(int irq, void *data)
2776{
2777 struct ixgbe_adapter *adapter = data;
2778 struct ixgbe_hw *hw = &adapter->hw;
2779 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2780 u32 eicr;
2781
2782
2783
2784
2785
2786 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2787
2788
2789
2790 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2791 if (!eicr) {
2792
2793
2794
2795
2796
2797
2798
2799 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2800 ixgbe_irq_enable(adapter, true, true);
2801 return IRQ_NONE;
2802 }
2803
2804 if (eicr & IXGBE_EICR_LSC)
2805 ixgbe_check_lsc(adapter);
2806
2807 switch (hw->mac.type) {
2808 case ixgbe_mac_82599EB:
2809 ixgbe_check_sfp_event(adapter, eicr);
2810
2811 case ixgbe_mac_X540:
2812 if (eicr & IXGBE_EICR_ECC)
2813 e_info(link, "Received unrecoverable ECC err, please "
2814 "reboot\n");
2815 ixgbe_check_overtemp_event(adapter, eicr);
2816 break;
2817 default:
2818 break;
2819 }
2820
2821 ixgbe_check_fan_failure(adapter, eicr);
2822 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2823 ixgbe_ptp_check_pps_event(adapter, eicr);
2824
2825
2826 napi_schedule(&q_vector->napi);
2827
2828
2829
2830
2831
2832 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2833 ixgbe_irq_enable(adapter, false, false);
2834
2835 return IRQ_HANDLED;
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2846{
2847 struct net_device *netdev = adapter->netdev;
2848 int err;
2849
2850 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2851 err = ixgbe_request_msix_irqs(adapter);
2852 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2853 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2854 netdev->name, adapter);
2855 else
2856 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2857 netdev->name, adapter);
2858
2859 if (err)
2860 e_err(probe, "request_irq failed, Error %d\n", err);
2861
2862 return err;
2863}
2864
2865static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2866{
2867 int vector;
2868
2869 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2870 free_irq(adapter->pdev->irq, adapter);
2871 return;
2872 }
2873
2874 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2875 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2876 struct msix_entry *entry = &adapter->msix_entries[vector];
2877
2878
2879 if (!q_vector->rx.ring && !q_vector->tx.ring)
2880 continue;
2881
2882
2883 irq_set_affinity_hint(entry->vector, NULL);
2884
2885 free_irq(entry->vector, q_vector);
2886 }
2887
2888 free_irq(adapter->msix_entries[vector++].vector, adapter);
2889}
2890
2891
2892
2893
2894
2895static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2896{
2897 switch (adapter->hw.mac.type) {
2898 case ixgbe_mac_82598EB:
2899 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2900 break;
2901 case ixgbe_mac_82599EB:
2902 case ixgbe_mac_X540:
2903 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2904 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2906 break;
2907 default:
2908 break;
2909 }
2910 IXGBE_WRITE_FLUSH(&adapter->hw);
2911 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2912 int vector;
2913
2914 for (vector = 0; vector < adapter->num_q_vectors; vector++)
2915 synchronize_irq(adapter->msix_entries[vector].vector);
2916
2917 synchronize_irq(adapter->msix_entries[vector++].vector);
2918 } else {
2919 synchronize_irq(adapter->pdev->irq);
2920 }
2921}
2922
2923
2924
2925
2926
2927static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2928{
2929 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2930
2931 ixgbe_write_eitr(q_vector);
2932
2933 ixgbe_set_ivar(adapter, 0, 0, 0);
2934 ixgbe_set_ivar(adapter, 1, 0, 0);
2935
2936 e_info(hw, "Legacy interrupt IVAR setup done\n");
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2947 struct ixgbe_ring *ring)
2948{
2949 struct ixgbe_hw *hw = &adapter->hw;
2950 u64 tdba = ring->dma;
2951 int wait_loop = 10;
2952 u32 txdctl = IXGBE_TXDCTL_ENABLE;
2953 u8 reg_idx = ring->reg_idx;
2954
2955
2956 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
2957 IXGBE_WRITE_FLUSH(hw);
2958
2959 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2960 (tdba & DMA_BIT_MASK(32)));
2961 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2962 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2963 ring->count * sizeof(union ixgbe_adv_tx_desc));
2964 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2965 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2966 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978#if IS_ENABLED(CONFIG_BQL)
2979 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
2980#else
2981 if (!ring->q_vector || (ring->q_vector->itr < 8))
2982#endif
2983 txdctl |= (1 << 16);
2984 else
2985 txdctl |= (8 << 16);
2986
2987
2988
2989
2990
2991 txdctl |= (1 << 8) |
2992 32;
2993
2994
2995 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2996 ring->atr_sample_rate = adapter->atr_sample_rate;
2997 ring->atr_count = 0;
2998 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2999 } else {
3000 ring->atr_sample_rate = 0;
3001 }
3002
3003
3004 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3005 struct ixgbe_q_vector *q_vector = ring->q_vector;
3006
3007 if (q_vector)
3008 netif_set_xps_queue(adapter->netdev,
3009 &q_vector->affinity_mask,
3010 ring->queue_index);
3011 }
3012
3013 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3014
3015
3016 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3017
3018
3019 if (hw->mac.type == ixgbe_mac_82598EB &&
3020 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3021 return;
3022
3023
3024 do {
3025 usleep_range(1000, 2000);
3026 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3027 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3028 if (!wait_loop)
3029 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
3030}
3031
3032static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3033{
3034 struct ixgbe_hw *hw = &adapter->hw;
3035 u32 rttdcs, mtqc;
3036 u8 tcs = netdev_get_num_tc(adapter->netdev);
3037
3038 if (hw->mac.type == ixgbe_mac_82598EB)
3039 return;
3040
3041
3042 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3043 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3044 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3045
3046
3047 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3048 mtqc = IXGBE_MTQC_VT_ENA;
3049 if (tcs > 4)
3050 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3051 else if (tcs > 1)
3052 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3053 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3054 mtqc |= IXGBE_MTQC_32VF;
3055 else
3056 mtqc |= IXGBE_MTQC_64VF;
3057 } else {
3058 if (tcs > 4)
3059 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3060 else if (tcs > 1)
3061 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3062 else
3063 mtqc = IXGBE_MTQC_64Q_1PB;
3064 }
3065
3066 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3067
3068
3069 if (tcs) {
3070 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3071 sectx |= IXGBE_SECTX_DCB;
3072 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3073 }
3074
3075
3076 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3077 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3078}
3079
3080
3081
3082
3083
3084
3085
3086static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3087{
3088 struct ixgbe_hw *hw = &adapter->hw;
3089 u32 dmatxctl;
3090 u32 i;
3091
3092 ixgbe_setup_mtqc(adapter);
3093
3094 if (hw->mac.type != ixgbe_mac_82598EB) {
3095
3096 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3097 dmatxctl |= IXGBE_DMATXCTL_TE;
3098 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3099 }
3100
3101
3102 for (i = 0; i < adapter->num_tx_queues; i++)
3103 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3104}
3105
3106static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3107 struct ixgbe_ring *ring)
3108{
3109 struct ixgbe_hw *hw = &adapter->hw;
3110 u8 reg_idx = ring->reg_idx;
3111 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3112
3113 srrctl |= IXGBE_SRRCTL_DROP_EN;
3114
3115 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3116}
3117
3118static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3119 struct ixgbe_ring *ring)
3120{
3121 struct ixgbe_hw *hw = &adapter->hw;
3122 u8 reg_idx = ring->reg_idx;
3123 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3124
3125 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3126
3127 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3128}
3129
3130#ifdef CONFIG_IXGBE_DCB
3131void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3132#else
3133static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3134#endif
3135{
3136 int i;
3137 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3138
3139 if (adapter->ixgbe_ieee_pfc)
3140 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3152 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3153 for (i = 0; i < adapter->num_rx_queues; i++)
3154 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3155 } else {
3156 for (i = 0; i < adapter->num_rx_queues; i++)
3157 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3158 }
3159}
3160
3161#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3162
3163static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3164 struct ixgbe_ring *rx_ring)
3165{
3166 struct ixgbe_hw *hw = &adapter->hw;
3167 u32 srrctl;
3168 u8 reg_idx = rx_ring->reg_idx;
3169
3170 if (hw->mac.type == ixgbe_mac_82598EB) {
3171 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3172
3173
3174
3175
3176
3177 reg_idx &= mask;
3178 }
3179
3180
3181 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3182
3183
3184 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3185
3186
3187 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3188
3189 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3190}
3191
3192static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3193{
3194 struct ixgbe_hw *hw = &adapter->hw;
3195 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
3196 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
3197 0x6A3E67EA, 0x14364D17, 0x3BED200D};
3198 u32 mrqc = 0, reta = 0;
3199 u32 rxcsum;
3200 int i, j;
3201 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3202
3203
3204
3205
3206
3207
3208 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3209 rss_i = 2;
3210
3211
3212 for (i = 0; i < 10; i++)
3213 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
3214
3215
3216 for (i = 0, j = 0; i < 128; i++, j++) {
3217 if (j == rss_i)
3218 j = 0;
3219
3220
3221 reta = (reta << 8) | (j * 0x11);
3222 if ((i & 3) == 3)
3223 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3224 }
3225
3226
3227 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3228 rxcsum |= IXGBE_RXCSUM_PCSD;
3229 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3230
3231 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3232 if (adapter->ring_feature[RING_F_RSS].mask)
3233 mrqc = IXGBE_MRQC_RSSEN;
3234 } else {
3235 u8 tcs = netdev_get_num_tc(adapter->netdev);
3236
3237 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3238 if (tcs > 4)
3239 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3240 else if (tcs > 1)
3241 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3242 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3243 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3244 else
3245 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3246 } else {
3247 if (tcs > 4)
3248 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3249 else if (tcs > 1)
3250 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3251 else
3252 mrqc = IXGBE_MRQC_RSSEN;
3253 }
3254 }
3255
3256
3257 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3258 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3259 IXGBE_MRQC_RSS_FIELD_IPV6 |
3260 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3261
3262 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3263 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3264 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3265 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3266
3267 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3268}
3269
3270
3271
3272
3273
3274
3275static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3276 struct ixgbe_ring *ring)
3277{
3278 struct ixgbe_hw *hw = &adapter->hw;
3279 u32 rscctrl;
3280 u8 reg_idx = ring->reg_idx;
3281
3282 if (!ring_is_rsc_enabled(ring))
3283 return;
3284
3285 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3286 rscctrl |= IXGBE_RSCCTL_RSCEN;
3287
3288
3289
3290
3291
3292 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3293 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3294}
3295
3296#define IXGBE_MAX_RX_DESC_POLL 10
3297static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3298 struct ixgbe_ring *ring)
3299{
3300 struct ixgbe_hw *hw = &adapter->hw;
3301 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3302 u32 rxdctl;
3303 u8 reg_idx = ring->reg_idx;
3304
3305
3306 if (hw->mac.type == ixgbe_mac_82598EB &&
3307 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3308 return;
3309
3310 do {
3311 usleep_range(1000, 2000);
3312 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3313 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3314
3315 if (!wait_loop) {
3316 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3317 "the polling period\n", reg_idx);
3318 }
3319}
3320
3321void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3322 struct ixgbe_ring *ring)
3323{
3324 struct ixgbe_hw *hw = &adapter->hw;
3325 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3326 u32 rxdctl;
3327 u8 reg_idx = ring->reg_idx;
3328
3329 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3330 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3331
3332
3333 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3334
3335 if (hw->mac.type == ixgbe_mac_82598EB &&
3336 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3337 return;
3338
3339
3340 do {
3341 udelay(10);
3342 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3343 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3344
3345 if (!wait_loop) {
3346 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3347 "the polling period\n", reg_idx);
3348 }
3349}
3350
3351void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3352 struct ixgbe_ring *ring)
3353{
3354 struct ixgbe_hw *hw = &adapter->hw;
3355 u64 rdba = ring->dma;
3356 u32 rxdctl;
3357 u8 reg_idx = ring->reg_idx;
3358
3359
3360 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3361 ixgbe_disable_rx_queue(adapter, ring);
3362
3363 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3364 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3365 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3366 ring->count * sizeof(union ixgbe_adv_rx_desc));
3367 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3368 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3369 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
3370
3371 ixgbe_configure_srrctl(adapter, ring);
3372 ixgbe_configure_rscctl(adapter, ring);
3373
3374 if (hw->mac.type == ixgbe_mac_82598EB) {
3375
3376
3377
3378
3379
3380
3381
3382 rxdctl &= ~0x3FFFFF;
3383 rxdctl |= 0x080420;
3384 }
3385
3386
3387 rxdctl |= IXGBE_RXDCTL_ENABLE;
3388 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3389
3390 ixgbe_rx_desc_queue_enable(adapter, ring);
3391 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3392}
3393
3394static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3395{
3396 struct ixgbe_hw *hw = &adapter->hw;
3397 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3398 int p;
3399
3400
3401 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3402 IXGBE_PSRTYPE_UDPHDR |
3403 IXGBE_PSRTYPE_IPV4HDR |
3404 IXGBE_PSRTYPE_L2HDR |
3405 IXGBE_PSRTYPE_IPV6HDR;
3406
3407 if (hw->mac.type == ixgbe_mac_82598EB)
3408 return;
3409
3410 if (rss_i > 3)
3411 psrtype |= 2 << 29;
3412 else if (rss_i > 1)
3413 psrtype |= 1 << 29;
3414
3415 for (p = 0; p < adapter->num_rx_pools; p++)
3416 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
3417 psrtype);
3418}
3419
3420static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3421{
3422 struct ixgbe_hw *hw = &adapter->hw;
3423 u32 reg_offset, vf_shift;
3424 u32 gcr_ext, vmdctl;
3425 int i;
3426
3427 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3428 return;
3429
3430 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3431 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3432 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3433 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3434 vmdctl |= IXGBE_VT_CTL_REPLEN;
3435 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3436
3437 vf_shift = VMDQ_P(0) % 32;
3438 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3439
3440
3441 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3442 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3443 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3444 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3445 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
3446 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3447
3448
3449 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3450
3451
3452
3453
3454
3455 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3456 case IXGBE_82599_VMDQ_8Q_MASK:
3457 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3458 break;
3459 case IXGBE_82599_VMDQ_4Q_MASK:
3460 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3461 break;
3462 default:
3463 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3464 break;
3465 }
3466
3467 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3468
3469
3470
3471 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3472 adapter->num_vfs);
3473
3474 for (i = 0; i < adapter->num_vfs; i++) {
3475 if (!adapter->vfinfo[i].spoofchk_enabled)
3476 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3477 }
3478}
3479
3480static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3481{
3482 struct ixgbe_hw *hw = &adapter->hw;
3483 struct net_device *netdev = adapter->netdev;
3484 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3485 struct ixgbe_ring *rx_ring;
3486 int i;
3487 u32 mhadd, hlreg0;
3488
3489#ifdef IXGBE_FCOE
3490
3491 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3492 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3493 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3494
3495#endif
3496
3497
3498 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3499 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3500
3501 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3502 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3503 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3504 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3505
3506 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3507 }
3508
3509 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3510
3511 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3512 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3513
3514
3515
3516
3517
3518 for (i = 0; i < adapter->num_rx_queues; i++) {
3519 rx_ring = adapter->rx_ring[i];
3520 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3521 set_ring_rsc_enabled(rx_ring);
3522 else
3523 clear_ring_rsc_enabled(rx_ring);
3524 }
3525}
3526
3527static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3528{
3529 struct ixgbe_hw *hw = &adapter->hw;
3530 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3531
3532 switch (hw->mac.type) {
3533 case ixgbe_mac_82598EB:
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3545 break;
3546 case ixgbe_mac_82599EB:
3547 case ixgbe_mac_X540:
3548
3549 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3550 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3551 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3552
3553 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3554 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3555 break;
3556 default:
3557
3558 return;
3559 }
3560
3561 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3562}
3563
3564
3565
3566
3567
3568
3569
3570static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3571{
3572 struct ixgbe_hw *hw = &adapter->hw;
3573 int i;
3574 u32 rxctrl, rfctl;
3575
3576
3577 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3578 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3579
3580 ixgbe_setup_psrtype(adapter);
3581 ixgbe_setup_rdrxctl(adapter);
3582
3583
3584 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3585 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3586 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3587 rfctl |= IXGBE_RFCTL_RSC_DIS;
3588 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3589
3590
3591 ixgbe_setup_mrqc(adapter);
3592
3593
3594 ixgbe_set_rx_buffer_len(adapter);
3595
3596
3597
3598
3599
3600 for (i = 0; i < adapter->num_rx_queues; i++)
3601 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3602
3603
3604 if (hw->mac.type == ixgbe_mac_82598EB)
3605 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3606
3607
3608 rxctrl |= IXGBE_RXCTRL_RXEN;
3609 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3610}
3611
3612static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3613 __be16 proto, u16 vid)
3614{
3615 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3616 struct ixgbe_hw *hw = &adapter->hw;
3617
3618
3619 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3620 set_bit(vid, adapter->active_vlans);
3621
3622 return 0;
3623}
3624
3625static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3626 __be16 proto, u16 vid)
3627{
3628 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3629 struct ixgbe_hw *hw = &adapter->hw;
3630
3631
3632 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3633 clear_bit(vid, adapter->active_vlans);
3634
3635 return 0;
3636}
3637
3638
3639
3640
3641
3642static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3643{
3644 struct ixgbe_hw *hw = &adapter->hw;
3645 u32 vlnctrl;
3646
3647 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3648 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3649 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3650}
3651
3652
3653
3654
3655
3656static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3657{
3658 struct ixgbe_hw *hw = &adapter->hw;
3659 u32 vlnctrl;
3660
3661 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3662 vlnctrl |= IXGBE_VLNCTRL_VFE;
3663 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3664 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3665}
3666
3667
3668
3669
3670
3671static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3672{
3673 struct ixgbe_hw *hw = &adapter->hw;
3674 u32 vlnctrl;
3675 int i, j;
3676
3677 switch (hw->mac.type) {
3678 case ixgbe_mac_82598EB:
3679 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3680 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3681 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3682 break;
3683 case ixgbe_mac_82599EB:
3684 case ixgbe_mac_X540:
3685 for (i = 0; i < adapter->num_rx_queues; i++) {
3686 j = adapter->rx_ring[i]->reg_idx;
3687 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3688 vlnctrl &= ~IXGBE_RXDCTL_VME;
3689 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3690 }
3691 break;
3692 default:
3693 break;
3694 }
3695}
3696
3697
3698
3699
3700
3701static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3702{
3703 struct ixgbe_hw *hw = &adapter->hw;
3704 u32 vlnctrl;
3705 int i, j;
3706
3707 switch (hw->mac.type) {
3708 case ixgbe_mac_82598EB:
3709 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3710 vlnctrl |= IXGBE_VLNCTRL_VME;
3711 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3712 break;
3713 case ixgbe_mac_82599EB:
3714 case ixgbe_mac_X540:
3715 for (i = 0; i < adapter->num_rx_queues; i++) {
3716 j = adapter->rx_ring[i]->reg_idx;
3717 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3718 vlnctrl |= IXGBE_RXDCTL_VME;
3719 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3720 }
3721 break;
3722 default:
3723 break;
3724 }
3725}
3726
3727static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3728{
3729 u16 vid;
3730
3731 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
3732
3733 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3734 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3747{
3748 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3749 struct ixgbe_hw *hw = &adapter->hw;
3750 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3751 int count = 0;
3752
3753
3754 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3755 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3756
3757
3758 if (netdev_uc_count(netdev) > rar_entries)
3759 return -ENOMEM;
3760
3761 if (!netdev_uc_empty(netdev)) {
3762 struct netdev_hw_addr *ha;
3763
3764 if (!hw->mac.ops.set_rar)
3765 return -ENOMEM;
3766
3767 netdev_for_each_uc_addr(ha, netdev) {
3768 if (!rar_entries)
3769 break;
3770 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3771 VMDQ_P(0), IXGBE_RAH_AV);
3772 count++;
3773 }
3774 }
3775
3776 for (; rar_entries > 0 ; rar_entries--)
3777 hw->mac.ops.clear_rar(hw, rar_entries);
3778
3779 return count;
3780}
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791void ixgbe_set_rx_mode(struct net_device *netdev)
3792{
3793 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3794 struct ixgbe_hw *hw = &adapter->hw;
3795 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3796 int count;
3797
3798
3799
3800 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3801
3802
3803 fctrl &= ~IXGBE_FCTRL_SBP;
3804 fctrl |= IXGBE_FCTRL_BAM;
3805 fctrl |= IXGBE_FCTRL_DPF;
3806 fctrl |= IXGBE_FCTRL_PMCF;
3807
3808
3809 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3810
3811 if (netdev->flags & IFF_PROMISC) {
3812 hw->addr_ctrl.user_set_promisc = true;
3813 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3814 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3815
3816
3817
3818
3819 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3820 IXGBE_FLAG_SRIOV_ENABLED)))
3821 ixgbe_vlan_filter_disable(adapter);
3822 else
3823 ixgbe_vlan_filter_enable(adapter);
3824 } else {
3825 if (netdev->flags & IFF_ALLMULTI) {
3826 fctrl |= IXGBE_FCTRL_MPE;
3827 vmolr |= IXGBE_VMOLR_MPE;
3828 } else {
3829
3830
3831
3832
3833
3834 hw->mac.ops.update_mc_addr_list(hw, netdev);
3835 vmolr |= IXGBE_VMOLR_ROMPE;
3836 }
3837 ixgbe_vlan_filter_enable(adapter);
3838 hw->addr_ctrl.user_set_promisc = false;
3839 }
3840
3841
3842
3843
3844
3845
3846 count = ixgbe_write_uc_addr_list(netdev);
3847 if (count < 0) {
3848 fctrl |= IXGBE_FCTRL_UPE;
3849 vmolr |= IXGBE_VMOLR_ROPE;
3850 }
3851
3852 if (adapter->num_vfs)
3853 ixgbe_restore_vf_multicasts(adapter);
3854
3855 if (hw->mac.type != ixgbe_mac_82598EB) {
3856 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
3857 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3858 IXGBE_VMOLR_ROPE);
3859 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
3860 }
3861
3862
3863 if (adapter->netdev->features & NETIF_F_RXALL) {
3864
3865
3866 fctrl |= (IXGBE_FCTRL_SBP |
3867 IXGBE_FCTRL_BAM |
3868 IXGBE_FCTRL_PMCF);
3869
3870 fctrl &= ~(IXGBE_FCTRL_DPF);
3871
3872 }
3873
3874 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3875
3876 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3877 ixgbe_vlan_strip_enable(adapter);
3878 else
3879 ixgbe_vlan_strip_disable(adapter);
3880}
3881
3882static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3883{
3884 int q_idx;
3885
3886 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3887 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
3888 napi_enable(&adapter->q_vector[q_idx]->napi);
3889 }
3890}
3891
3892static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3893{
3894 int q_idx;
3895
3896 local_bh_disable();
3897 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3898 napi_disable(&adapter->q_vector[q_idx]->napi);
3899 while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
3900 pr_info("QV %d locked\n", q_idx);
3901 mdelay(1);
3902 }
3903 }
3904 local_bh_enable();
3905}
3906
3907#ifdef CONFIG_IXGBE_DCB
3908
3909
3910
3911
3912
3913
3914
3915
3916static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3917{
3918 struct ixgbe_hw *hw = &adapter->hw;
3919 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3920
3921 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3922 if (hw->mac.type == ixgbe_mac_82598EB)
3923 netif_set_gso_max_size(adapter->netdev, 65536);
3924 return;
3925 }
3926
3927 if (hw->mac.type == ixgbe_mac_82598EB)
3928 netif_set_gso_max_size(adapter->netdev, 32768);
3929
3930#ifdef IXGBE_FCOE
3931 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3932 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3933#endif
3934
3935
3936 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
3937 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3938 DCB_TX_CONFIG);
3939 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3940 DCB_RX_CONFIG);
3941 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3942 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
3943 ixgbe_dcb_hw_ets(&adapter->hw,
3944 adapter->ixgbe_ieee_ets,
3945 max_frame);
3946 ixgbe_dcb_hw_pfc_config(&adapter->hw,
3947 adapter->ixgbe_ieee_pfc->pfc_en,
3948 adapter->ixgbe_ieee_ets->prio_tc);
3949 }
3950
3951
3952 if (hw->mac.type != ixgbe_mac_82598EB) {
3953 u32 msb = 0;
3954 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
3955
3956 while (rss_i) {
3957 msb++;
3958 rss_i >>= 1;
3959 }
3960
3961
3962 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
3963 }
3964}
3965#endif
3966
3967
3968#define IXGBE_ETH_FRAMING 20
3969
3970
3971
3972
3973
3974
3975
3976static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3977{
3978 struct ixgbe_hw *hw = &adapter->hw;
3979 struct net_device *dev = adapter->netdev;
3980 int link, tc, kb, marker;
3981 u32 dv_id, rx_pba;
3982
3983
3984 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
3985
3986#ifdef IXGBE_FCOE
3987
3988 if ((dev->features & NETIF_F_FCOE_MTU) &&
3989 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
3990 (pb == ixgbe_fcoe_get_tc(adapter)))
3991 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3992
3993#endif
3994
3995 switch (hw->mac.type) {
3996 case ixgbe_mac_X540:
3997 dv_id = IXGBE_DV_X540(link, tc);
3998 break;
3999 default:
4000 dv_id = IXGBE_DV(link, tc);
4001 break;
4002 }
4003
4004
4005 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4006 dv_id += IXGBE_B2BT(tc);
4007
4008
4009 kb = IXGBE_BT2KB(dv_id);
4010 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4011
4012 marker = rx_pba - kb;
4013
4014
4015
4016
4017
4018 if (marker < 0) {
4019 e_warn(drv, "Packet Buffer(%i) can not provide enough"
4020 "headroom to support flow control."
4021 "Decrease MTU or number of traffic classes\n", pb);
4022 marker = tc + 1;
4023 }
4024
4025 return marker;
4026}
4027
4028
4029
4030
4031
4032
4033
4034static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
4035{
4036 struct ixgbe_hw *hw = &adapter->hw;
4037 struct net_device *dev = adapter->netdev;
4038 int tc;
4039 u32 dv_id;
4040
4041
4042 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4043
4044
4045 switch (hw->mac.type) {
4046 case ixgbe_mac_X540:
4047 dv_id = IXGBE_LOW_DV_X540(tc);
4048 break;
4049 default:
4050 dv_id = IXGBE_LOW_DV(tc);
4051 break;
4052 }
4053
4054
4055 return IXGBE_BT2KB(dv_id);
4056}
4057
4058
4059
4060
4061static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4062{
4063 struct ixgbe_hw *hw = &adapter->hw;
4064 int num_tc = netdev_get_num_tc(adapter->netdev);
4065 int i;
4066
4067 if (!num_tc)
4068 num_tc = 1;
4069
4070 hw->fc.low_water = ixgbe_lpbthresh(adapter);
4071
4072 for (i = 0; i < num_tc; i++) {
4073 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4074
4075
4076 if (hw->fc.low_water > hw->fc.high_water[i])
4077 hw->fc.low_water = 0;
4078 }
4079}
4080
4081static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4082{
4083 struct ixgbe_hw *hw = &adapter->hw;
4084 int hdrm;
4085 u8 tc = netdev_get_num_tc(adapter->netdev);
4086
4087 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4088 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4089 hdrm = 32 << adapter->fdir_pballoc;
4090 else
4091 hdrm = 0;
4092
4093 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4094 ixgbe_pbthresh_setup(adapter);
4095}
4096
4097static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4098{
4099 struct ixgbe_hw *hw = &adapter->hw;
4100 struct hlist_node *node2;
4101 struct ixgbe_fdir_filter *filter;
4102
4103 spin_lock(&adapter->fdir_perfect_lock);
4104
4105 if (!hlist_empty(&adapter->fdir_filter_list))
4106 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4107
4108 hlist_for_each_entry_safe(filter, node2,
4109 &adapter->fdir_filter_list, fdir_node) {
4110 ixgbe_fdir_write_perfect_filter_82599(hw,
4111 &filter->filter,
4112 filter->sw_idx,
4113 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4114 IXGBE_FDIR_DROP_QUEUE :
4115 adapter->rx_ring[filter->action]->reg_idx);
4116 }
4117
4118 spin_unlock(&adapter->fdir_perfect_lock);
4119}
4120
4121static void ixgbe_configure(struct ixgbe_adapter *adapter)
4122{
4123 struct ixgbe_hw *hw = &adapter->hw;
4124
4125 ixgbe_configure_pb(adapter);
4126#ifdef CONFIG_IXGBE_DCB
4127 ixgbe_configure_dcb(adapter);
4128#endif
4129
4130
4131
4132
4133 ixgbe_configure_virtualization(adapter);
4134
4135 ixgbe_set_rx_mode(adapter->netdev);
4136 ixgbe_restore_vlan(adapter);
4137
4138 switch (hw->mac.type) {
4139 case ixgbe_mac_82599EB:
4140 case ixgbe_mac_X540:
4141 hw->mac.ops.disable_rx_buff(hw);
4142 break;
4143 default:
4144 break;
4145 }
4146
4147 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
4148 ixgbe_init_fdir_signature_82599(&adapter->hw,
4149 adapter->fdir_pballoc);
4150 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
4151 ixgbe_init_fdir_perfect_82599(&adapter->hw,
4152 adapter->fdir_pballoc);
4153 ixgbe_fdir_filter_restore(adapter);
4154 }
4155
4156 switch (hw->mac.type) {
4157 case ixgbe_mac_82599EB:
4158 case ixgbe_mac_X540:
4159 hw->mac.ops.enable_rx_buff(hw);
4160 break;
4161 default:
4162 break;
4163 }
4164
4165#ifdef IXGBE_FCOE
4166
4167 ixgbe_configure_fcoe(adapter);
4168
4169#endif
4170 ixgbe_configure_tx(adapter);
4171 ixgbe_configure_rx(adapter);
4172}
4173
4174static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4175{
4176 switch (hw->phy.type) {
4177 case ixgbe_phy_sfp_avago:
4178 case ixgbe_phy_sfp_ftl:
4179 case ixgbe_phy_sfp_intel:
4180 case ixgbe_phy_sfp_unknown:
4181 case ixgbe_phy_sfp_passive_tyco:
4182 case ixgbe_phy_sfp_passive_unknown:
4183 case ixgbe_phy_sfp_active_unknown:
4184 case ixgbe_phy_sfp_ftl_active:
4185 case ixgbe_phy_qsfp_passive_unknown:
4186 case ixgbe_phy_qsfp_active_unknown:
4187 case ixgbe_phy_qsfp_intel:
4188 case ixgbe_phy_qsfp_unknown:
4189 return true;
4190 case ixgbe_phy_nl:
4191 if (hw->mac.type == ixgbe_mac_82598EB)
4192 return true;
4193 default:
4194 return false;
4195 }
4196}
4197
4198
4199
4200
4201
4202static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
4203{
4204
4205
4206
4207
4208
4209
4210 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
4211 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
4212
4213 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
4214}
4215
4216
4217
4218
4219
4220
4221
4222static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
4223{
4224 u32 speed;
4225 bool autoneg, link_up = false;
4226 u32 ret = IXGBE_ERR_LINK_SETUP;
4227
4228 if (hw->mac.ops.check_link)
4229 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
4230
4231 if (ret)
4232 goto link_cfg_out;
4233
4234 speed = hw->phy.autoneg_advertised;
4235 if ((!speed) && (hw->mac.ops.get_link_capabilities))
4236 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
4237 &autoneg);
4238 if (ret)
4239 goto link_cfg_out;
4240
4241 if (hw->mac.ops.setup_link)
4242 ret = hw->mac.ops.setup_link(hw, speed, link_up);
4243link_cfg_out:
4244 return ret;
4245}
4246
4247static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4248{
4249 struct ixgbe_hw *hw = &adapter->hw;
4250 u32 gpie = 0;
4251
4252 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4253 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4254 IXGBE_GPIE_OCD;
4255 gpie |= IXGBE_GPIE_EIAME;
4256
4257
4258
4259
4260 switch (hw->mac.type) {
4261 case ixgbe_mac_82598EB:
4262 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4263 break;
4264 case ixgbe_mac_82599EB:
4265 case ixgbe_mac_X540:
4266 default:
4267 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4268 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4269 break;
4270 }
4271 } else {
4272
4273
4274 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4275 }
4276
4277
4278
4279
4280 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
4281 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
4282
4283 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4284 case IXGBE_82599_VMDQ_8Q_MASK:
4285 gpie |= IXGBE_GPIE_VTMODE_16;
4286 break;
4287 case IXGBE_82599_VMDQ_4Q_MASK:
4288 gpie |= IXGBE_GPIE_VTMODE_32;
4289 break;
4290 default:
4291 gpie |= IXGBE_GPIE_VTMODE_64;
4292 break;
4293 }
4294 }
4295
4296
4297 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
4298 switch (adapter->hw.mac.type) {
4299 case ixgbe_mac_82599EB:
4300 gpie |= IXGBE_SDP0_GPIEN;
4301 break;
4302 case ixgbe_mac_X540:
4303 gpie |= IXGBE_EIMS_TS;
4304 break;
4305 default:
4306 break;
4307 }
4308 }
4309
4310
4311 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
4312 gpie |= IXGBE_SDP1_GPIEN;
4313
4314 if (hw->mac.type == ixgbe_mac_82599EB) {
4315 gpie |= IXGBE_SDP1_GPIEN;
4316 gpie |= IXGBE_SDP2_GPIEN;
4317 }
4318
4319 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4320}
4321
4322static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4323{
4324 struct ixgbe_hw *hw = &adapter->hw;
4325 int err;
4326 u32 ctrl_ext;
4327
4328 ixgbe_get_hw_control(adapter);
4329 ixgbe_setup_gpie(adapter);
4330
4331 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4332 ixgbe_configure_msix(adapter);
4333 else
4334 ixgbe_configure_msi_and_legacy(adapter);
4335
4336
4337 if (hw->mac.ops.enable_tx_laser)
4338 hw->mac.ops.enable_tx_laser(hw);
4339
4340 clear_bit(__IXGBE_DOWN, &adapter->state);
4341 ixgbe_napi_enable_all(adapter);
4342
4343 if (ixgbe_is_sfp(hw)) {
4344 ixgbe_sfp_link_config(adapter);
4345 } else {
4346 err = ixgbe_non_sfp_link_config(hw);
4347 if (err)
4348 e_err(probe, "link_config FAILED %d\n", err);
4349 }
4350
4351
4352 IXGBE_READ_REG(hw, IXGBE_EICR);
4353 ixgbe_irq_enable(adapter, true, true);
4354
4355
4356
4357
4358
4359 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4360 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4361 if (esdp & IXGBE_ESDP_SDP1)
4362 e_crit(drv, "Fan has stopped, replace the adapter\n");
4363 }
4364
4365
4366 netif_tx_start_all_queues(adapter->netdev);
4367
4368
4369
4370 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4371 adapter->link_check_timeout = jiffies;
4372 mod_timer(&adapter->service_timer, jiffies);
4373
4374
4375 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4376 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4377 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4378}
4379
4380void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
4381{
4382 WARN_ON(in_interrupt());
4383
4384 adapter->netdev->trans_start = jiffies;
4385
4386 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
4387 usleep_range(1000, 2000);
4388 ixgbe_down(adapter);
4389
4390
4391
4392
4393
4394
4395 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4396 msleep(2000);
4397 ixgbe_up(adapter);
4398 clear_bit(__IXGBE_RESETTING, &adapter->state);
4399}
4400
4401void ixgbe_up(struct ixgbe_adapter *adapter)
4402{
4403
4404 ixgbe_configure(adapter);
4405
4406 ixgbe_up_complete(adapter);
4407}
4408
4409void ixgbe_reset(struct ixgbe_adapter *adapter)
4410{
4411 struct ixgbe_hw *hw = &adapter->hw;
4412 int err;
4413
4414
4415 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4416 usleep_range(1000, 2000);
4417
4418
4419 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4420 IXGBE_FLAG2_SFP_NEEDS_RESET);
4421 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4422
4423 err = hw->mac.ops.init_hw(hw);
4424 switch (err) {
4425 case 0:
4426 case IXGBE_ERR_SFP_NOT_PRESENT:
4427 case IXGBE_ERR_SFP_NOT_SUPPORTED:
4428 break;
4429 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
4430 e_dev_err("master disable timed out\n");
4431 break;
4432 case IXGBE_ERR_EEPROM_VERSION:
4433
4434 e_dev_warn("This device is a pre-production adapter/LOM. "
4435 "Please be aware there may be issues associated with "
4436 "your hardware. If you are experiencing problems "
4437 "please contact your Intel or hardware "
4438 "representative who provided you with this "
4439 "hardware.\n");
4440 break;
4441 default:
4442 e_dev_err("Hardware Error: %d\n", err);
4443 }
4444
4445 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4446
4447
4448 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
4449
4450
4451 if (hw->mac.san_mac_rar_index)
4452 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4453
4454 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
4455 ixgbe_ptp_reset(adapter);
4456}
4457
4458
4459
4460
4461
4462static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4463{
4464 struct device *dev = rx_ring->dev;
4465 unsigned long size;
4466 u16 i;
4467
4468
4469 if (!rx_ring->rx_buffer_info)
4470 return;
4471
4472
4473 for (i = 0; i < rx_ring->count; i++) {
4474 struct ixgbe_rx_buffer *rx_buffer;
4475
4476 rx_buffer = &rx_ring->rx_buffer_info[i];
4477 if (rx_buffer->skb) {
4478 struct sk_buff *skb = rx_buffer->skb;
4479 if (IXGBE_CB(skb)->page_released) {
4480 dma_unmap_page(dev,
4481 IXGBE_CB(skb)->dma,
4482 ixgbe_rx_bufsz(rx_ring),
4483 DMA_FROM_DEVICE);
4484 IXGBE_CB(skb)->page_released = false;
4485 }
4486 dev_kfree_skb(skb);
4487 }
4488 rx_buffer->skb = NULL;
4489 if (rx_buffer->dma)
4490 dma_unmap_page(dev, rx_buffer->dma,
4491 ixgbe_rx_pg_size(rx_ring),
4492 DMA_FROM_DEVICE);
4493 rx_buffer->dma = 0;
4494 if (rx_buffer->page)
4495 __free_pages(rx_buffer->page,
4496 ixgbe_rx_pg_order(rx_ring));
4497 rx_buffer->page = NULL;
4498 }
4499
4500 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4501 memset(rx_ring->rx_buffer_info, 0, size);
4502
4503
4504 memset(rx_ring->desc, 0, rx_ring->size);
4505
4506 rx_ring->next_to_alloc = 0;
4507 rx_ring->next_to_clean = 0;
4508 rx_ring->next_to_use = 0;
4509}
4510
4511
4512
4513
4514
4515static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
4516{
4517 struct ixgbe_tx_buffer *tx_buffer_info;
4518 unsigned long size;
4519 u16 i;
4520
4521
4522 if (!tx_ring->tx_buffer_info)
4523 return;
4524
4525
4526 for (i = 0; i < tx_ring->count; i++) {
4527 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4528 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4529 }
4530
4531 netdev_tx_reset_queue(txring_txq(tx_ring));
4532
4533 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4534 memset(tx_ring->tx_buffer_info, 0, size);
4535
4536
4537 memset(tx_ring->desc, 0, tx_ring->size);
4538
4539 tx_ring->next_to_use = 0;
4540 tx_ring->next_to_clean = 0;
4541}
4542
4543
4544
4545
4546
4547static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
4548{
4549 int i;
4550
4551 for (i = 0; i < adapter->num_rx_queues; i++)
4552 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
4553}
4554
4555
4556
4557
4558
4559static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4560{
4561 int i;
4562
4563 for (i = 0; i < adapter->num_tx_queues; i++)
4564 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
4565}
4566
4567static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4568{
4569 struct hlist_node *node2;
4570 struct ixgbe_fdir_filter *filter;
4571
4572 spin_lock(&adapter->fdir_perfect_lock);
4573
4574 hlist_for_each_entry_safe(filter, node2,
4575 &adapter->fdir_filter_list, fdir_node) {
4576 hlist_del(&filter->fdir_node);
4577 kfree(filter);
4578 }
4579 adapter->fdir_filter_count = 0;
4580
4581 spin_unlock(&adapter->fdir_perfect_lock);
4582}
4583
4584void ixgbe_down(struct ixgbe_adapter *adapter)
4585{
4586 struct net_device *netdev = adapter->netdev;
4587 struct ixgbe_hw *hw = &adapter->hw;
4588 u32 rxctrl;
4589 int i;
4590
4591
4592 set_bit(__IXGBE_DOWN, &adapter->state);
4593
4594
4595 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4596 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4597
4598
4599 for (i = 0; i < adapter->num_rx_queues; i++)
4600
4601 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4602
4603 usleep_range(10000, 20000);
4604
4605 netif_tx_stop_all_queues(netdev);
4606
4607
4608 netif_carrier_off(netdev);
4609 netif_tx_disable(netdev);
4610
4611 ixgbe_irq_disable(adapter);
4612
4613 ixgbe_napi_disable_all(adapter);
4614
4615 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
4616 IXGBE_FLAG2_RESET_REQUESTED);
4617 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4618
4619 del_timer_sync(&adapter->service_timer);
4620
4621 if (adapter->num_vfs) {
4622
4623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
4624
4625
4626 for (i = 0 ; i < adapter->num_vfs; i++)
4627 adapter->vfinfo[i].clear_to_send = false;
4628
4629
4630 ixgbe_ping_all_vfs(adapter);
4631
4632
4633 ixgbe_disable_tx_rx(adapter);
4634 }
4635
4636
4637 for (i = 0; i < adapter->num_tx_queues; i++) {
4638 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
4639 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
4640 }
4641
4642
4643 switch (hw->mac.type) {
4644 case ixgbe_mac_82599EB:
4645 case ixgbe_mac_X540:
4646 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
4647 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4648 ~IXGBE_DMATXCTL_TE));
4649 break;
4650 default:
4651 break;
4652 }
4653
4654 if (!pci_channel_offline(adapter->pdev))
4655 ixgbe_reset(adapter);
4656
4657
4658 if (hw->mac.ops.disable_tx_laser)
4659 hw->mac.ops.disable_tx_laser(hw);
4660
4661 ixgbe_clean_all_tx_rings(adapter);
4662 ixgbe_clean_all_rx_rings(adapter);
4663
4664#ifdef CONFIG_IXGBE_DCA
4665
4666 ixgbe_setup_dca(adapter);
4667#endif
4668}
4669
4670
4671
4672
4673
4674static void ixgbe_tx_timeout(struct net_device *netdev)
4675{
4676 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4677
4678
4679 ixgbe_tx_timeout_reset(adapter);
4680}
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4691{
4692 struct ixgbe_hw *hw = &adapter->hw;
4693 struct pci_dev *pdev = adapter->pdev;
4694 unsigned int rss, fdir;
4695 u32 fwsm;
4696#ifdef CONFIG_IXGBE_DCB
4697 int j;
4698 struct tc_configuration *tc;
4699#endif
4700
4701
4702
4703 hw->vendor_id = pdev->vendor;
4704 hw->device_id = pdev->device;
4705 hw->revision_id = pdev->revision;
4706 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4707 hw->subsystem_device_id = pdev->subsystem_device;
4708
4709
4710 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
4711 adapter->ring_feature[RING_F_RSS].limit = rss;
4712 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4713 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4714 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
4715 adapter->atr_sample_rate = 20;
4716 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
4717 adapter->ring_feature[RING_F_FDIR].limit = fdir;
4718 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
4719#ifdef CONFIG_IXGBE_DCA
4720 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
4721#endif
4722#ifdef IXGBE_FCOE
4723 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4724 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4725#ifdef CONFIG_IXGBE_DCB
4726
4727 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4728#endif
4729#endif
4730
4731
4732 switch (hw->mac.type) {
4733 case ixgbe_mac_82598EB:
4734 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
4735 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
4736
4737 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4738 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4739
4740 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
4741 adapter->ring_feature[RING_F_FDIR].limit = 0;
4742 adapter->atr_sample_rate = 0;
4743 adapter->fdir_pballoc = 0;
4744#ifdef IXGBE_FCOE
4745 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
4746 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4747#ifdef CONFIG_IXGBE_DCB
4748 adapter->fcoe.up = 0;
4749#endif
4750#endif
4751 break;
4752 case ixgbe_mac_82599EB:
4753 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4754 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4755 break;
4756 case ixgbe_mac_X540:
4757 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4758 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4759 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4760 break;
4761 default:
4762 break;
4763 }
4764
4765#ifdef IXGBE_FCOE
4766
4767 spin_lock_init(&adapter->fcoe.lock);
4768
4769#endif
4770
4771 spin_lock_init(&adapter->fdir_perfect_lock);
4772
4773#ifdef CONFIG_IXGBE_DCB
4774 switch (hw->mac.type) {
4775 case ixgbe_mac_X540:
4776 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
4777 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
4778 break;
4779 default:
4780 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
4781 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
4782 break;
4783 }
4784
4785
4786 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4787 tc = &adapter->dcb_cfg.tc_config[j];
4788 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4789 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4790 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4791 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4792 tc->dcb_pfc = pfc_disabled;
4793 }
4794
4795
4796 tc = &adapter->dcb_cfg.tc_config[0];
4797 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
4798 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
4799
4800 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4801 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4802 adapter->dcb_cfg.pfc_mode_enable = false;
4803 adapter->dcb_set_bitmap = 0x00;
4804 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
4805 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
4806 sizeof(adapter->temp_dcb_cfg));
4807
4808#endif
4809
4810
4811 hw->fc.requested_mode = ixgbe_fc_full;
4812 hw->fc.current_mode = ixgbe_fc_full;
4813 ixgbe_pbthresh_setup(adapter);
4814 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4815 hw->fc.send_xon = true;
4816 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
4817
4818#ifdef CONFIG_PCI_IOV
4819
4820 if (hw->mac.type != ixgbe_mac_82598EB)
4821 adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
4822
4823#endif
4824
4825 adapter->rx_itr_setting = 1;
4826 adapter->tx_itr_setting = 1;
4827
4828
4829 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4830 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4831
4832
4833 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
4834
4835
4836 if (ixgbe_init_eeprom_params_generic(hw)) {
4837 e_dev_err("EEPROM initialization failed\n");
4838 return -EIO;
4839 }
4840
4841 set_bit(__IXGBE_DOWN, &adapter->state);
4842
4843 return 0;
4844}
4845
4846
4847
4848
4849
4850
4851
4852int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4853{
4854 struct device *dev = tx_ring->dev;
4855 int orig_node = dev_to_node(dev);
4856 int numa_node = -1;
4857 int size;
4858
4859 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4860
4861 if (tx_ring->q_vector)
4862 numa_node = tx_ring->q_vector->numa_node;
4863
4864 tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
4865 if (!tx_ring->tx_buffer_info)
4866 tx_ring->tx_buffer_info = vzalloc(size);
4867 if (!tx_ring->tx_buffer_info)
4868 goto err;
4869
4870
4871 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4872 tx_ring->size = ALIGN(tx_ring->size, 4096);
4873
4874 set_dev_node(dev, numa_node);
4875 tx_ring->desc = dma_alloc_coherent(dev,
4876 tx_ring->size,
4877 &tx_ring->dma,
4878 GFP_KERNEL);
4879 set_dev_node(dev, orig_node);
4880 if (!tx_ring->desc)
4881 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4882 &tx_ring->dma, GFP_KERNEL);
4883 if (!tx_ring->desc)
4884 goto err;
4885
4886 tx_ring->next_to_use = 0;
4887 tx_ring->next_to_clean = 0;
4888 return 0;
4889
4890err:
4891 vfree(tx_ring->tx_buffer_info);
4892 tx_ring->tx_buffer_info = NULL;
4893 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4894 return -ENOMEM;
4895}
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4908{
4909 int i, err = 0;
4910
4911 for (i = 0; i < adapter->num_tx_queues; i++) {
4912 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
4913 if (!err)
4914 continue;
4915
4916 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
4917 goto err_setup_tx;
4918 }
4919
4920 return 0;
4921err_setup_tx:
4922
4923 while (i--)
4924 ixgbe_free_tx_resources(adapter->tx_ring[i]);
4925 return err;
4926}
4927
4928
4929
4930
4931
4932
4933
4934int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
4935{
4936 struct device *dev = rx_ring->dev;
4937 int orig_node = dev_to_node(dev);
4938 int numa_node = -1;
4939 int size;
4940
4941 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4942
4943 if (rx_ring->q_vector)
4944 numa_node = rx_ring->q_vector->numa_node;
4945
4946 rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
4947 if (!rx_ring->rx_buffer_info)
4948 rx_ring->rx_buffer_info = vzalloc(size);
4949 if (!rx_ring->rx_buffer_info)
4950 goto err;
4951
4952
4953 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4954 rx_ring->size = ALIGN(rx_ring->size, 4096);
4955
4956 set_dev_node(dev, numa_node);
4957 rx_ring->desc = dma_alloc_coherent(dev,
4958 rx_ring->size,
4959 &rx_ring->dma,
4960 GFP_KERNEL);
4961 set_dev_node(dev, orig_node);
4962 if (!rx_ring->desc)
4963 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4964 &rx_ring->dma, GFP_KERNEL);
4965 if (!rx_ring->desc)
4966 goto err;
4967
4968 rx_ring->next_to_clean = 0;
4969 rx_ring->next_to_use = 0;
4970
4971 return 0;
4972err:
4973 vfree(rx_ring->rx_buffer_info);
4974 rx_ring->rx_buffer_info = NULL;
4975 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4976 return -ENOMEM;
4977}
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4990{
4991 int i, err = 0;
4992
4993 for (i = 0; i < adapter->num_rx_queues; i++) {
4994 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
4995 if (!err)
4996 continue;
4997
4998 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
4999 goto err_setup_rx;
5000 }
5001
5002#ifdef IXGBE_FCOE
5003 err = ixgbe_setup_fcoe_ddp_resources(adapter);
5004 if (!err)
5005#endif
5006 return 0;
5007err_setup_rx:
5008
5009 while (i--)
5010 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5011 return err;
5012}
5013
5014
5015
5016
5017
5018
5019
5020void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5021{
5022 ixgbe_clean_tx_ring(tx_ring);
5023
5024 vfree(tx_ring->tx_buffer_info);
5025 tx_ring->tx_buffer_info = NULL;
5026
5027
5028 if (!tx_ring->desc)
5029 return;
5030
5031 dma_free_coherent(tx_ring->dev, tx_ring->size,
5032 tx_ring->desc, tx_ring->dma);
5033
5034 tx_ring->desc = NULL;
5035}
5036
5037
5038
5039
5040
5041
5042
5043static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5044{
5045 int i;
5046
5047 for (i = 0; i < adapter->num_tx_queues; i++)
5048 if (adapter->tx_ring[i]->desc)
5049 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5050}
5051
5052
5053
5054
5055
5056
5057
5058void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5059{
5060 ixgbe_clean_rx_ring(rx_ring);
5061
5062 vfree(rx_ring->rx_buffer_info);
5063 rx_ring->rx_buffer_info = NULL;
5064
5065
5066 if (!rx_ring->desc)
5067 return;
5068
5069 dma_free_coherent(rx_ring->dev, rx_ring->size,
5070 rx_ring->desc, rx_ring->dma);
5071
5072 rx_ring->desc = NULL;
5073}
5074
5075
5076
5077
5078
5079
5080
5081static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5082{
5083 int i;
5084
5085#ifdef IXGBE_FCOE
5086 ixgbe_free_fcoe_ddp_resources(adapter);
5087
5088#endif
5089 for (i = 0; i < adapter->num_rx_queues; i++)
5090 if (adapter->rx_ring[i]->desc)
5091 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5092}
5093
5094
5095
5096
5097
5098
5099
5100
5101static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5102{
5103 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5104 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5105
5106
5107 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5108 return -EINVAL;
5109
5110
5111
5112
5113
5114
5115 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5116 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5117 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5118 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5119
5120 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5121
5122
5123 netdev->mtu = new_mtu;
5124
5125 if (netif_running(netdev))
5126 ixgbe_reinit_locked(adapter);
5127
5128 return 0;
5129}
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143static int ixgbe_open(struct net_device *netdev)
5144{
5145 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5146 int err;
5147
5148
5149 if (test_bit(__IXGBE_TESTING, &adapter->state))
5150 return -EBUSY;
5151
5152 netif_carrier_off(netdev);
5153
5154
5155 err = ixgbe_setup_all_tx_resources(adapter);
5156 if (err)
5157 goto err_setup_tx;
5158
5159
5160 err = ixgbe_setup_all_rx_resources(adapter);
5161 if (err)
5162 goto err_setup_rx;
5163
5164 ixgbe_configure(adapter);
5165
5166 err = ixgbe_request_irq(adapter);
5167 if (err)
5168 goto err_req_irq;
5169
5170
5171 err = netif_set_real_num_tx_queues(netdev,
5172 adapter->num_rx_pools > 1 ? 1 :
5173 adapter->num_tx_queues);
5174 if (err)
5175 goto err_set_queues;
5176
5177
5178 err = netif_set_real_num_rx_queues(netdev,
5179 adapter->num_rx_pools > 1 ? 1 :
5180 adapter->num_rx_queues);
5181 if (err)
5182 goto err_set_queues;
5183
5184 ixgbe_ptp_init(adapter);
5185
5186 ixgbe_up_complete(adapter);
5187
5188 return 0;
5189
5190err_set_queues:
5191 ixgbe_free_irq(adapter);
5192err_req_irq:
5193 ixgbe_free_all_rx_resources(adapter);
5194err_setup_rx:
5195 ixgbe_free_all_tx_resources(adapter);
5196err_setup_tx:
5197 ixgbe_reset(adapter);
5198
5199 return err;
5200}
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213static int ixgbe_close(struct net_device *netdev)
5214{
5215 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5216
5217 ixgbe_ptp_stop(adapter);
5218
5219 ixgbe_down(adapter);
5220 ixgbe_free_irq(adapter);
5221
5222 ixgbe_fdir_filter_exit(adapter);
5223
5224 ixgbe_free_all_tx_resources(adapter);
5225 ixgbe_free_all_rx_resources(adapter);
5226
5227 ixgbe_release_hw_control(adapter);
5228
5229 return 0;
5230}
5231
5232#ifdef CONFIG_PM
5233static int ixgbe_resume(struct pci_dev *pdev)
5234{
5235 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5236 struct net_device *netdev = adapter->netdev;
5237 u32 err;
5238
5239 pci_set_power_state(pdev, PCI_D0);
5240 pci_restore_state(pdev);
5241
5242
5243
5244
5245 pci_save_state(pdev);
5246
5247 err = pci_enable_device_mem(pdev);
5248 if (err) {
5249 e_dev_err("Cannot enable PCI device from suspend\n");
5250 return err;
5251 }
5252 pci_set_master(pdev);
5253
5254 pci_wake_from_d3(pdev, false);
5255
5256 ixgbe_reset(adapter);
5257
5258 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5259
5260 rtnl_lock();
5261 err = ixgbe_init_interrupt_scheme(adapter);
5262 if (!err && netif_running(netdev))
5263 err = ixgbe_open(netdev);
5264
5265 rtnl_unlock();
5266
5267 if (err)
5268 return err;
5269
5270 netif_device_attach(netdev);
5271
5272 return 0;
5273}
5274#endif
5275
5276static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5277{
5278 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5279 struct net_device *netdev = adapter->netdev;
5280 struct ixgbe_hw *hw = &adapter->hw;
5281 u32 ctrl, fctrl;
5282 u32 wufc = adapter->wol;
5283#ifdef CONFIG_PM
5284 int retval = 0;
5285#endif
5286
5287 netif_device_detach(netdev);
5288
5289 rtnl_lock();
5290 if (netif_running(netdev)) {
5291 ixgbe_down(adapter);
5292 ixgbe_free_irq(adapter);
5293 ixgbe_free_all_tx_resources(adapter);
5294 ixgbe_free_all_rx_resources(adapter);
5295 }
5296 rtnl_unlock();
5297
5298 ixgbe_clear_interrupt_scheme(adapter);
5299
5300#ifdef CONFIG_PM
5301 retval = pci_save_state(pdev);
5302 if (retval)
5303 return retval;
5304
5305#endif
5306 if (hw->mac.ops.stop_link_on_d3)
5307 hw->mac.ops.stop_link_on_d3(hw);
5308
5309 if (wufc) {
5310 ixgbe_set_rx_mode(netdev);
5311
5312
5313 if (hw->mac.ops.enable_tx_laser)
5314 hw->mac.ops.enable_tx_laser(hw);
5315
5316
5317 if (wufc & IXGBE_WUFC_MC) {
5318 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5319 fctrl |= IXGBE_FCTRL_MPE;
5320 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5321 }
5322
5323 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5324 ctrl |= IXGBE_CTRL_GIO_DIS;
5325 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5326
5327 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5328 } else {
5329 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5330 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5331 }
5332
5333 switch (hw->mac.type) {
5334 case ixgbe_mac_82598EB:
5335 pci_wake_from_d3(pdev, false);
5336 break;
5337 case ixgbe_mac_82599EB:
5338 case ixgbe_mac_X540:
5339 pci_wake_from_d3(pdev, !!wufc);
5340 break;
5341 default:
5342 break;
5343 }
5344
5345 *enable_wake = !!wufc;
5346
5347 ixgbe_release_hw_control(adapter);
5348
5349 pci_disable_device(pdev);
5350
5351 return 0;
5352}
5353
5354#ifdef CONFIG_PM
5355static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5356{
5357 int retval;
5358 bool wake;
5359
5360 retval = __ixgbe_shutdown(pdev, &wake);
5361 if (retval)
5362 return retval;
5363
5364 if (wake) {
5365 pci_prepare_to_sleep(pdev);
5366 } else {
5367 pci_wake_from_d3(pdev, false);
5368 pci_set_power_state(pdev, PCI_D3hot);
5369 }
5370
5371 return 0;
5372}
5373#endif
5374
5375static void ixgbe_shutdown(struct pci_dev *pdev)
5376{
5377 bool wake;
5378
5379 __ixgbe_shutdown(pdev, &wake);
5380
5381 if (system_state == SYSTEM_POWER_OFF) {
5382 pci_wake_from_d3(pdev, wake);
5383 pci_set_power_state(pdev, PCI_D3hot);
5384 }
5385}
5386
5387
5388
5389
5390
5391void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5392{
5393 struct net_device *netdev = adapter->netdev;
5394 struct ixgbe_hw *hw = &adapter->hw;
5395 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5396 u64 total_mpc = 0;
5397 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5398 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5399 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5400 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
5401
5402 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5403 test_bit(__IXGBE_RESETTING, &adapter->state))
5404 return;
5405
5406 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5407 u64 rsc_count = 0;
5408 u64 rsc_flush = 0;
5409 for (i = 0; i < adapter->num_rx_queues; i++) {
5410 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5411 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5412 }
5413 adapter->rsc_total_count = rsc_count;
5414 adapter->rsc_total_flush = rsc_flush;
5415 }
5416
5417 for (i = 0; i < adapter->num_rx_queues; i++) {
5418 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5419 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5420 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5421 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5422 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
5423 bytes += rx_ring->stats.bytes;
5424 packets += rx_ring->stats.packets;
5425 }
5426 adapter->non_eop_descs = non_eop_descs;
5427 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5428 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5429 adapter->hw_csum_rx_error = hw_csum_rx_error;
5430 netdev->stats.rx_bytes = bytes;
5431 netdev->stats.rx_packets = packets;
5432
5433 bytes = 0;
5434 packets = 0;
5435
5436 for (i = 0; i < adapter->num_tx_queues; i++) {
5437 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5438 restart_queue += tx_ring->tx_stats.restart_queue;
5439 tx_busy += tx_ring->tx_stats.tx_busy;
5440 bytes += tx_ring->stats.bytes;
5441 packets += tx_ring->stats.packets;
5442 }
5443 adapter->restart_queue = restart_queue;
5444 adapter->tx_busy = tx_busy;
5445 netdev->stats.tx_bytes = bytes;
5446 netdev->stats.tx_packets = packets;
5447
5448 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5449
5450
5451 for (i = 0; i < 8; i++) {
5452
5453 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5454 missed_rx += mpc;
5455 hwstats->mpc[i] += mpc;
5456 total_mpc += hwstats->mpc[i];
5457 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5458 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5459 switch (hw->mac.type) {
5460 case ixgbe_mac_82598EB:
5461 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5462 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5463 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5464 hwstats->pxonrxc[i] +=
5465 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5466 break;
5467 case ixgbe_mac_82599EB:
5468 case ixgbe_mac_X540:
5469 hwstats->pxonrxc[i] +=
5470 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5471 break;
5472 default:
5473 break;
5474 }
5475 }
5476
5477
5478 for (i = 0; i < 16; i++) {
5479 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5480 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5481 if ((hw->mac.type == ixgbe_mac_82599EB) ||
5482 (hw->mac.type == ixgbe_mac_X540)) {
5483 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
5484 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
5485 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
5486 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
5487 }
5488 }
5489
5490 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5491
5492 hwstats->gprc -= missed_rx;
5493
5494 ixgbe_update_xoff_received(adapter);
5495
5496
5497 switch (hw->mac.type) {
5498 case ixgbe_mac_82598EB:
5499 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5500 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5501 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5502 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5503 break;
5504 case ixgbe_mac_X540:
5505
5506 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5507 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5508 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5509 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5510 case ixgbe_mac_82599EB:
5511 for (i = 0; i < 16; i++)
5512 adapter->hw_rx_no_dma_resources +=
5513 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5514 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5515 IXGBE_READ_REG(hw, IXGBE_GORCH);
5516 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5517 IXGBE_READ_REG(hw, IXGBE_GOTCH);
5518 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5519 IXGBE_READ_REG(hw, IXGBE_TORH);
5520 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5521 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5522 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5523#ifdef IXGBE_FCOE
5524 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5525 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5526 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5527 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5528 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5529 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5530
5531 if (adapter->fcoe.ddp_pool) {
5532 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5533 struct ixgbe_fcoe_ddp_pool *ddp_pool;
5534 unsigned int cpu;
5535 u64 noddp = 0, noddp_ext_buff = 0;
5536 for_each_possible_cpu(cpu) {
5537 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
5538 noddp += ddp_pool->noddp;
5539 noddp_ext_buff += ddp_pool->noddp_ext_buff;
5540 }
5541 hwstats->fcoe_noddp = noddp;
5542 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
5543 }
5544#endif
5545 break;
5546 default:
5547 break;
5548 }
5549 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5550 hwstats->bprc += bprc;
5551 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5552 if (hw->mac.type == ixgbe_mac_82598EB)
5553 hwstats->mprc -= bprc;
5554 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5555 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5556 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5557 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5558 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5559 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5560 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5561 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5562 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5563 hwstats->lxontxc += lxon;
5564 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5565 hwstats->lxofftxc += lxoff;
5566 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5567 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5568
5569
5570
5571 xon_off_tot = lxon + lxoff;
5572 hwstats->gptc -= xon_off_tot;
5573 hwstats->mptc -= xon_off_tot;
5574 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5575 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5576 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5577 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5578 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5579 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5580 hwstats->ptc64 -= xon_off_tot;
5581 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5582 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5583 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5584 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5585 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5586 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5587
5588
5589 netdev->stats.multicast = hwstats->mprc;
5590
5591
5592 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5593 netdev->stats.rx_dropped = 0;
5594 netdev->stats.rx_length_errors = hwstats->rlec;
5595 netdev->stats.rx_crc_errors = hwstats->crcerrs;
5596 netdev->stats.rx_missed_errors = total_mpc;
5597}
5598
5599
5600
5601
5602
5603static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5604{
5605 struct ixgbe_hw *hw = &adapter->hw;
5606 int i;
5607
5608 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5609 return;
5610
5611 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5612
5613
5614 if (test_bit(__IXGBE_DOWN, &adapter->state))
5615 return;
5616
5617
5618 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5619 return;
5620
5621 adapter->fdir_overflow++;
5622
5623 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5624 for (i = 0; i < adapter->num_tx_queues; i++)
5625 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5626 &(adapter->tx_ring[i]->state));
5627
5628 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5629 } else {
5630 e_err(probe, "failed to finish FDIR re-initialization, "
5631 "ignored adding FDIR ATR filters\n");
5632 }
5633}
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5645{
5646 struct ixgbe_hw *hw = &adapter->hw;
5647 u64 eics = 0;
5648 int i;
5649
5650
5651 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5652 test_bit(__IXGBE_RESETTING, &adapter->state))
5653 return;
5654
5655
5656 if (netif_carrier_ok(adapter->netdev)) {
5657 for (i = 0; i < adapter->num_tx_queues; i++)
5658 set_check_for_tx_hang(adapter->tx_ring[i]);
5659 }
5660
5661 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5662
5663
5664
5665
5666
5667 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5668 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5669 } else {
5670
5671 for (i = 0; i < adapter->num_q_vectors; i++) {
5672 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5673 if (qv->rx.ring || qv->tx.ring)
5674 eics |= ((u64)1 << i);
5675 }
5676 }
5677
5678
5679 ixgbe_irq_rearm_queues(adapter, eics);
5680
5681}
5682
5683
5684
5685
5686
5687
5688static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5689{
5690 struct ixgbe_hw *hw = &adapter->hw;
5691 u32 link_speed = adapter->link_speed;
5692 bool link_up = adapter->link_up;
5693 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
5694
5695 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5696 return;
5697
5698 if (hw->mac.ops.check_link) {
5699 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5700 } else {
5701
5702 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5703 link_up = true;
5704 }
5705
5706 if (adapter->ixgbe_ieee_pfc)
5707 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5708
5709 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
5710 hw->mac.ops.fc_enable(hw);
5711 ixgbe_set_rx_drop_en(adapter);
5712 }
5713
5714 if (link_up ||
5715 time_after(jiffies, (adapter->link_check_timeout +
5716 IXGBE_TRY_LINK_TIMEOUT))) {
5717 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5718 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5719 IXGBE_WRITE_FLUSH(hw);
5720 }
5721
5722 adapter->link_up = link_up;
5723 adapter->link_speed = link_speed;
5724}
5725
5726static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
5727{
5728#ifdef CONFIG_IXGBE_DCB
5729 struct net_device *netdev = adapter->netdev;
5730 struct dcb_app app = {
5731 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
5732 .protocol = 0,
5733 };
5734 u8 up = 0;
5735
5736 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
5737 up = dcb_ieee_getapp_mask(netdev, &app);
5738
5739 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
5740#endif
5741}
5742
5743
5744
5745
5746
5747
5748static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5749{
5750 struct net_device *netdev = adapter->netdev;
5751 struct ixgbe_hw *hw = &adapter->hw;
5752 u32 link_speed = adapter->link_speed;
5753 bool flow_rx, flow_tx;
5754
5755
5756 if (netif_carrier_ok(netdev))
5757 return;
5758
5759 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
5760
5761 switch (hw->mac.type) {
5762 case ixgbe_mac_82598EB: {
5763 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5764 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5765 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5766 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5767 }
5768 break;
5769 case ixgbe_mac_X540:
5770 case ixgbe_mac_82599EB: {
5771 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5772 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5773 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5774 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5775 }
5776 break;
5777 default:
5778 flow_tx = false;
5779 flow_rx = false;
5780 break;
5781 }
5782
5783 adapter->last_rx_ptp_check = jiffies;
5784
5785 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5786 ixgbe_ptp_start_cyclecounter(adapter);
5787
5788 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5789 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5790 "10 Gbps" :
5791 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5792 "1 Gbps" :
5793 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
5794 "100 Mbps" :
5795 "unknown speed"))),
5796 ((flow_rx && flow_tx) ? "RX/TX" :
5797 (flow_rx ? "RX" :
5798 (flow_tx ? "TX" : "None"))));
5799
5800 netif_carrier_on(netdev);
5801 ixgbe_check_vf_rate_limit(adapter);
5802
5803
5804 ixgbe_update_default_up(adapter);
5805
5806
5807 ixgbe_ping_all_vfs(adapter);
5808}
5809
5810
5811
5812
5813
5814
5815static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5816{
5817 struct net_device *netdev = adapter->netdev;
5818 struct ixgbe_hw *hw = &adapter->hw;
5819
5820 adapter->link_up = false;
5821 adapter->link_speed = 0;
5822
5823
5824 if (!netif_carrier_ok(netdev))
5825 return;
5826
5827
5828 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5829 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5830
5831 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5832 ixgbe_ptp_start_cyclecounter(adapter);
5833
5834 e_info(drv, "NIC Link is Down\n");
5835 netif_carrier_off(netdev);
5836
5837
5838 ixgbe_ping_all_vfs(adapter);
5839}
5840
5841
5842
5843
5844
5845static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
5846{
5847 int i;
5848 int some_tx_pending = 0;
5849
5850 if (!netif_carrier_ok(adapter->netdev)) {
5851 for (i = 0; i < adapter->num_tx_queues; i++) {
5852 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5853 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5854 some_tx_pending = 1;
5855 break;
5856 }
5857 }
5858
5859 if (some_tx_pending) {
5860
5861
5862
5863
5864
5865 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
5866 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
5867 }
5868 }
5869}
5870
5871static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5872{
5873 u32 ssvpc;
5874
5875
5876 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
5877 adapter->num_vfs == 0)
5878 return;
5879
5880 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
5881
5882
5883
5884
5885
5886 if (!ssvpc)
5887 return;
5888
5889 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
5890}
5891
5892
5893
5894
5895
5896static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
5897{
5898
5899 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5900 test_bit(__IXGBE_RESETTING, &adapter->state))
5901 return;
5902
5903 ixgbe_watchdog_update_link(adapter);
5904
5905 if (adapter->link_up)
5906 ixgbe_watchdog_link_is_up(adapter);
5907 else
5908 ixgbe_watchdog_link_is_down(adapter);
5909
5910 ixgbe_spoof_check(adapter);
5911 ixgbe_update_stats(adapter);
5912
5913 ixgbe_watchdog_flush_tx(adapter);
5914}
5915
5916
5917
5918
5919
5920static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5921{
5922 struct ixgbe_hw *hw = &adapter->hw;
5923 s32 err;
5924
5925
5926 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
5927 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5928 return;
5929
5930
5931 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5932 return;
5933
5934 err = hw->phy.ops.identify_sfp(hw);
5935 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5936 goto sfp_out;
5937
5938 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
5939
5940
5941 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5942 }
5943
5944
5945 if (err)
5946 goto sfp_out;
5947
5948
5949 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5950 goto sfp_out;
5951
5952 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
5953
5954
5955
5956
5957
5958
5959 if (hw->mac.type == ixgbe_mac_82598EB)
5960 err = hw->phy.ops.reset(hw);
5961 else
5962 err = hw->mac.ops.setup_sfp(hw);
5963
5964 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5965 goto sfp_out;
5966
5967 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
5968 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
5969
5970sfp_out:
5971 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5972
5973 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
5974 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
5975 e_dev_err("failed to initialize because an unsupported "
5976 "SFP+ module type was detected.\n");
5977 e_dev_err("Reload the driver after installing a "
5978 "supported module.\n");
5979 unregister_netdev(adapter->netdev);
5980 }
5981}
5982
5983
5984
5985
5986
5987static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5988{
5989 struct ixgbe_hw *hw = &adapter->hw;
5990 u32 speed;
5991 bool autoneg = false;
5992
5993 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
5994 return;
5995
5996
5997 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5998 return;
5999
6000 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6001
6002 speed = hw->phy.autoneg_advertised;
6003 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
6004 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6005
6006
6007 if (!autoneg) {
6008 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6009 speed = IXGBE_LINK_SPEED_10GB_FULL;
6010 }
6011 }
6012
6013 if (hw->mac.ops.setup_link)
6014 hw->mac.ops.setup_link(hw, speed, true);
6015
6016 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6017 adapter->link_check_timeout = jiffies;
6018 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6019}
6020
6021#ifdef CONFIG_PCI_IOV
6022static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6023{
6024 int vf;
6025 struct ixgbe_hw *hw = &adapter->hw;
6026 struct net_device *netdev = adapter->netdev;
6027 u32 gpc;
6028 u32 ciaa, ciad;
6029
6030 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6031 if (gpc)
6032 return;
6033
6034
6035
6036
6037
6038
6039
6040 for (vf = 0; vf < adapter->num_vfs; vf++) {
6041 ciaa = (vf << 16) | 0x80000000;
6042
6043 ciaa |= PCI_COMMAND;
6044 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6045 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
6046 ciaa &= 0x7FFFFFFF;
6047
6048 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6049
6050 ciad >>= 16;
6051 if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
6052 netdev_err(netdev, "VF %d Hung DMA\n", vf);
6053
6054 ciaa = (vf << 16) | 0x80000000;
6055 ciaa |= 0xA8;
6056 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6057 ciad = 0x00008000;
6058 IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
6059 ciaa &= 0x7FFFFFFF;
6060 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
6061 }
6062 }
6063}
6064
6065#endif
6066
6067
6068
6069
6070static void ixgbe_service_timer(unsigned long data)
6071{
6072 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6073 unsigned long next_event_offset;
6074 bool ready = true;
6075
6076
6077 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6078 next_event_offset = HZ / 10;
6079 else
6080 next_event_offset = HZ * 2;
6081
6082#ifdef CONFIG_PCI_IOV
6083
6084
6085
6086
6087 if (!adapter->num_vfs ||
6088 (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6089 goto normal_timer_service;
6090
6091
6092 ixgbe_check_for_bad_vf(adapter);
6093 next_event_offset = HZ / 50;
6094 adapter->timer_event_accumulator++;
6095
6096 if (adapter->timer_event_accumulator >= 100)
6097 adapter->timer_event_accumulator = 0;
6098 else
6099 ready = false;
6100
6101normal_timer_service:
6102#endif
6103
6104 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6105
6106 if (ready)
6107 ixgbe_service_event_schedule(adapter);
6108}
6109
6110static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6111{
6112 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6113 return;
6114
6115 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6116
6117
6118 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6119 test_bit(__IXGBE_RESETTING, &adapter->state))
6120 return;
6121
6122 ixgbe_dump(adapter);
6123 netdev_err(adapter->netdev, "Reset adapter\n");
6124 adapter->tx_timeout_count++;
6125
6126 ixgbe_reinit_locked(adapter);
6127}
6128
6129
6130
6131
6132
6133static void ixgbe_service_task(struct work_struct *work)
6134{
6135 struct ixgbe_adapter *adapter = container_of(work,
6136 struct ixgbe_adapter,
6137 service_task);
6138 ixgbe_reset_subtask(adapter);
6139 ixgbe_sfp_detection_subtask(adapter);
6140 ixgbe_sfp_link_config_subtask(adapter);
6141 ixgbe_check_overtemp_subtask(adapter);
6142 ixgbe_watchdog_subtask(adapter);
6143 ixgbe_fdir_reinit_subtask(adapter);
6144 ixgbe_check_hang_subtask(adapter);
6145
6146 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
6147 ixgbe_ptp_overflow_check(adapter);
6148 ixgbe_ptp_rx_hang(adapter);
6149 }
6150
6151 ixgbe_service_event_complete(adapter);
6152}
6153
6154static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6155 struct ixgbe_tx_buffer *first,
6156 u8 *hdr_len)
6157{
6158 struct sk_buff *skb = first->skb;
6159 u32 vlan_macip_lens, type_tucmd;
6160 u32 mss_l4len_idx, l4len;
6161
6162 if (skb->ip_summed != CHECKSUM_PARTIAL)
6163 return 0;
6164
6165 if (!skb_is_gso(skb))
6166 return 0;
6167
6168 if (skb_header_cloned(skb)) {
6169 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6170 if (err)
6171 return err;
6172 }
6173
6174
6175 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6176
6177 if (first->protocol == __constant_htons(ETH_P_IP)) {
6178 struct iphdr *iph = ip_hdr(skb);
6179 iph->tot_len = 0;
6180 iph->check = 0;
6181 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6182 iph->daddr, 0,
6183 IPPROTO_TCP,
6184 0);
6185 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6186 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6187 IXGBE_TX_FLAGS_CSUM |
6188 IXGBE_TX_FLAGS_IPV4;
6189 } else if (skb_is_gso_v6(skb)) {
6190 ipv6_hdr(skb)->payload_len = 0;
6191 tcp_hdr(skb)->check =
6192 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6193 &ipv6_hdr(skb)->daddr,
6194 0, IPPROTO_TCP, 0);
6195 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6196 IXGBE_TX_FLAGS_CSUM;
6197 }
6198
6199
6200 l4len = tcp_hdrlen(skb);
6201 *hdr_len = skb_transport_offset(skb) + l4len;
6202
6203
6204 first->gso_segs = skb_shinfo(skb)->gso_segs;
6205 first->bytecount += (first->gso_segs - 1) * *hdr_len;
6206
6207
6208 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6209 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6210
6211
6212 vlan_macip_lens = skb_network_header_len(skb);
6213 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6214 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6215
6216 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6217 mss_l4len_idx);
6218
6219 return 1;
6220}
6221
6222static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6223 struct ixgbe_tx_buffer *first)
6224{
6225 struct sk_buff *skb = first->skb;
6226 u32 vlan_macip_lens = 0;
6227 u32 mss_l4len_idx = 0;
6228 u32 type_tucmd = 0;
6229
6230 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6231 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
6232 !(first->tx_flags & IXGBE_TX_FLAGS_CC))
6233 return;
6234 } else {
6235 u8 l4_hdr = 0;
6236 switch (first->protocol) {
6237 case __constant_htons(ETH_P_IP):
6238 vlan_macip_lens |= skb_network_header_len(skb);
6239 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6240 l4_hdr = ip_hdr(skb)->protocol;
6241 break;
6242 case __constant_htons(ETH_P_IPV6):
6243 vlan_macip_lens |= skb_network_header_len(skb);
6244 l4_hdr = ipv6_hdr(skb)->nexthdr;
6245 break;
6246 default:
6247 if (unlikely(net_ratelimit())) {
6248 dev_warn(tx_ring->dev,
6249 "partial checksum but proto=%x!\n",
6250 first->protocol);
6251 }
6252 break;
6253 }
6254
6255 switch (l4_hdr) {
6256 case IPPROTO_TCP:
6257 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6258 mss_l4len_idx = tcp_hdrlen(skb) <<
6259 IXGBE_ADVTXD_L4LEN_SHIFT;
6260 break;
6261 case IPPROTO_SCTP:
6262 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6263 mss_l4len_idx = sizeof(struct sctphdr) <<
6264 IXGBE_ADVTXD_L4LEN_SHIFT;
6265 break;
6266 case IPPROTO_UDP:
6267 mss_l4len_idx = sizeof(struct udphdr) <<
6268 IXGBE_ADVTXD_L4LEN_SHIFT;
6269 break;
6270 default:
6271 if (unlikely(net_ratelimit())) {
6272 dev_warn(tx_ring->dev,
6273 "partial checksum but l4 proto=%x!\n",
6274 l4_hdr);
6275 }
6276 break;
6277 }
6278
6279
6280 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
6281 }
6282
6283
6284 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6285 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6286
6287 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
6288 type_tucmd, mss_l4len_idx);
6289}
6290
6291#define IXGBE_SET_FLAG(_input, _flag, _result) \
6292 ((_flag <= _result) ? \
6293 ((u32)(_input & _flag) * (_result / _flag)) : \
6294 ((u32)(_input & _flag) / (_flag / _result)))
6295
6296static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6297{
6298
6299 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
6300 IXGBE_ADVTXD_DCMD_DEXT |
6301 IXGBE_ADVTXD_DCMD_IFCS;
6302
6303
6304 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
6305 IXGBE_ADVTXD_DCMD_VLE);
6306
6307
6308 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
6309 IXGBE_ADVTXD_DCMD_TSE);
6310
6311
6312 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
6313 IXGBE_ADVTXD_MAC_TSTAMP);
6314
6315
6316 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
6317
6318 return cmd_type;
6319}
6320
6321static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
6322 u32 tx_flags, unsigned int paylen)
6323{
6324 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
6325
6326
6327 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6328 IXGBE_TX_FLAGS_CSUM,
6329 IXGBE_ADVTXD_POPTS_TXSM);
6330
6331
6332 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6333 IXGBE_TX_FLAGS_IPV4,
6334 IXGBE_ADVTXD_POPTS_IXSM);
6335
6336
6337
6338
6339
6340 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6341 IXGBE_TX_FLAGS_CC,
6342 IXGBE_ADVTXD_CC);
6343
6344 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6345}
6346
6347#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6348 IXGBE_TXD_CMD_RS)
6349
6350static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6351 struct ixgbe_tx_buffer *first,
6352 const u8 hdr_len)
6353{
6354 struct sk_buff *skb = first->skb;
6355 struct ixgbe_tx_buffer *tx_buffer;
6356 union ixgbe_adv_tx_desc *tx_desc;
6357 struct skb_frag_struct *frag;
6358 dma_addr_t dma;
6359 unsigned int data_len, size;
6360 u32 tx_flags = first->tx_flags;
6361 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
6362 u16 i = tx_ring->next_to_use;
6363
6364 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6365
6366 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
6367
6368 size = skb_headlen(skb);
6369 data_len = skb->data_len;
6370
6371#ifdef IXGBE_FCOE
6372 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6373 if (data_len < sizeof(struct fcoe_crc_eof)) {
6374 size -= sizeof(struct fcoe_crc_eof) - data_len;
6375 data_len = 0;
6376 } else {
6377 data_len -= sizeof(struct fcoe_crc_eof);
6378 }
6379 }
6380
6381#endif
6382 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6383
6384 tx_buffer = first;
6385
6386 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6387 if (dma_mapping_error(tx_ring->dev, dma))
6388 goto dma_error;
6389
6390
6391 dma_unmap_len_set(tx_buffer, len, size);
6392 dma_unmap_addr_set(tx_buffer, dma, dma);
6393
6394 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6395
6396 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
6397 tx_desc->read.cmd_type_len =
6398 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
6399
6400 i++;
6401 tx_desc++;
6402 if (i == tx_ring->count) {
6403 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6404 i = 0;
6405 }
6406 tx_desc->read.olinfo_status = 0;
6407
6408 dma += IXGBE_MAX_DATA_PER_TXD;
6409 size -= IXGBE_MAX_DATA_PER_TXD;
6410
6411 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6412 }
6413
6414 if (likely(!data_len))
6415 break;
6416
6417 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6418
6419 i++;
6420 tx_desc++;
6421 if (i == tx_ring->count) {
6422 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6423 i = 0;
6424 }
6425 tx_desc->read.olinfo_status = 0;
6426
6427#ifdef IXGBE_FCOE
6428 size = min_t(unsigned int, data_len, skb_frag_size(frag));
6429#else
6430 size = skb_frag_size(frag);
6431#endif
6432 data_len -= size;
6433
6434 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
6435 DMA_TO_DEVICE);
6436
6437 tx_buffer = &tx_ring->tx_buffer_info[i];
6438 }
6439
6440
6441 cmd_type |= size | IXGBE_TXD_CMD;
6442 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6443
6444 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6445
6446
6447 first->time_stamp = jiffies;
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457 wmb();
6458
6459
6460 first->next_to_watch = tx_desc;
6461
6462 i++;
6463 if (i == tx_ring->count)
6464 i = 0;
6465
6466 tx_ring->next_to_use = i;
6467
6468
6469 writel(i, tx_ring->tail);
6470
6471 return;
6472dma_error:
6473 dev_err(tx_ring->dev, "TX DMA map failed\n");
6474
6475
6476 for (;;) {
6477 tx_buffer = &tx_ring->tx_buffer_info[i];
6478 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
6479 if (tx_buffer == first)
6480 break;
6481 if (i == 0)
6482 i = tx_ring->count;
6483 i--;
6484 }
6485
6486 tx_ring->next_to_use = i;
6487}
6488
6489static void ixgbe_atr(struct ixgbe_ring *ring,
6490 struct ixgbe_tx_buffer *first)
6491{
6492 struct ixgbe_q_vector *q_vector = ring->q_vector;
6493 union ixgbe_atr_hash_dword input = { .dword = 0 };
6494 union ixgbe_atr_hash_dword common = { .dword = 0 };
6495 union {
6496 unsigned char *network;
6497 struct iphdr *ipv4;
6498 struct ipv6hdr *ipv6;
6499 } hdr;
6500 struct tcphdr *th;
6501 __be16 vlan_id;
6502
6503
6504 if (!q_vector)
6505 return;
6506
6507
6508 if (!ring->atr_sample_rate)
6509 return;
6510
6511 ring->atr_count++;
6512
6513
6514 hdr.network = skb_network_header(first->skb);
6515
6516
6517 if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
6518 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6519 (first->protocol != __constant_htons(ETH_P_IP) ||
6520 hdr.ipv4->protocol != IPPROTO_TCP))
6521 return;
6522
6523 th = tcp_hdr(first->skb);
6524
6525
6526 if (!th || th->fin)
6527 return;
6528
6529
6530 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6531 return;
6532
6533
6534 ring->atr_count = 0;
6535
6536 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6537
6538
6539
6540
6541
6542
6543
6544
6545 input.formatted.vlan_id = vlan_id;
6546
6547
6548
6549
6550
6551 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
6552 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6553 else
6554 common.port.src ^= th->dest ^ first->protocol;
6555 common.port.dst ^= th->source;
6556
6557 if (first->protocol == __constant_htons(ETH_P_IP)) {
6558 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6559 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6560 } else {
6561 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6562 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6563 hdr.ipv6->saddr.s6_addr32[1] ^
6564 hdr.ipv6->saddr.s6_addr32[2] ^
6565 hdr.ipv6->saddr.s6_addr32[3] ^
6566 hdr.ipv6->daddr.s6_addr32[0] ^
6567 hdr.ipv6->daddr.s6_addr32[1] ^
6568 hdr.ipv6->daddr.s6_addr32[2] ^
6569 hdr.ipv6->daddr.s6_addr32[3];
6570 }
6571
6572
6573 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6574 input, common, ring->queue_index);
6575}
6576
6577static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6578{
6579 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6580
6581
6582
6583 smp_mb();
6584
6585
6586
6587 if (likely(ixgbe_desc_unused(tx_ring) < size))
6588 return -EBUSY;
6589
6590
6591 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6592 ++tx_ring->tx_stats.restart_queue;
6593 return 0;
6594}
6595
6596static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6597{
6598 if (likely(ixgbe_desc_unused(tx_ring) >= size))
6599 return 0;
6600 return __ixgbe_maybe_stop_tx(tx_ring, size);
6601}
6602
6603#ifdef IXGBE_FCOE
6604static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6605{
6606 struct ixgbe_adapter *adapter;
6607 struct ixgbe_ring_feature *f;
6608 int txq;
6609
6610
6611
6612
6613
6614 switch (vlan_get_protocol(skb)) {
6615 case __constant_htons(ETH_P_FCOE):
6616 case __constant_htons(ETH_P_FIP):
6617 adapter = netdev_priv(dev);
6618
6619 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
6620 break;
6621 default:
6622 return __netdev_pick_tx(dev, skb);
6623 }
6624
6625 f = &adapter->ring_feature[RING_F_FCOE];
6626
6627 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6628 smp_processor_id();
6629
6630 while (txq >= f->indices)
6631 txq -= f->indices;
6632
6633 return txq + f->offset;
6634}
6635
6636#endif
6637netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6638 struct ixgbe_adapter *adapter,
6639 struct ixgbe_ring *tx_ring)
6640{
6641 struct ixgbe_tx_buffer *first;
6642 int tso;
6643 u32 tx_flags = 0;
6644 unsigned short f;
6645 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6646 __be16 protocol = skb->protocol;
6647 u8 hdr_len = 0;
6648
6649
6650
6651
6652
6653
6654
6655
6656 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6657 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6658
6659 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6660 tx_ring->tx_stats.tx_busy++;
6661 return NETDEV_TX_BUSY;
6662 }
6663
6664
6665 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6666 first->skb = skb;
6667 first->bytecount = skb->len;
6668 first->gso_segs = 1;
6669
6670
6671 if (vlan_tx_tag_present(skb)) {
6672 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6673 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6674
6675 } else if (protocol == __constant_htons(ETH_P_8021Q)) {
6676 struct vlan_hdr *vhdr, _vhdr;
6677 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6678 if (!vhdr)
6679 goto out_drop;
6680
6681 protocol = vhdr->h_vlan_encapsulated_proto;
6682 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
6683 IXGBE_TX_FLAGS_VLAN_SHIFT;
6684 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
6685 }
6686
6687 skb_tx_timestamp(skb);
6688
6689 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6690 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6691 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6692
6693
6694 adapter->ptp_tx_skb = skb_get(skb);
6695 adapter->ptp_tx_start = jiffies;
6696 schedule_work(&adapter->ptp_tx_work);
6697 }
6698
6699#ifdef CONFIG_PCI_IOV
6700
6701
6702
6703
6704 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6705 tx_flags |= IXGBE_TX_FLAGS_CC;
6706
6707#endif
6708
6709 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
6710 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
6711 (skb->priority != TC_PRIO_CONTROL))) {
6712 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6713 tx_flags |= (skb->priority & 0x7) <<
6714 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
6715 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
6716 struct vlan_ethhdr *vhdr;
6717 if (skb_header_cloned(skb) &&
6718 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6719 goto out_drop;
6720 vhdr = (struct vlan_ethhdr *)skb->data;
6721 vhdr->h_vlan_TCI = htons(tx_flags >>
6722 IXGBE_TX_FLAGS_VLAN_SHIFT);
6723 } else {
6724 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6725 }
6726 }
6727
6728
6729 first->tx_flags = tx_flags;
6730 first->protocol = protocol;
6731
6732#ifdef IXGBE_FCOE
6733
6734 if ((protocol == __constant_htons(ETH_P_FCOE)) &&
6735 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
6736 tso = ixgbe_fso(tx_ring, first, &hdr_len);
6737 if (tso < 0)
6738 goto out_drop;
6739
6740 goto xmit_fcoe;
6741 }
6742
6743#endif
6744 tso = ixgbe_tso(tx_ring, first, &hdr_len);
6745 if (tso < 0)
6746 goto out_drop;
6747 else if (!tso)
6748 ixgbe_tx_csum(tx_ring, first);
6749
6750
6751 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6752 ixgbe_atr(tx_ring, first);
6753
6754#ifdef IXGBE_FCOE
6755xmit_fcoe:
6756#endif
6757 ixgbe_tx_map(tx_ring, first, hdr_len);
6758
6759 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6760
6761 return NETDEV_TX_OK;
6762
6763out_drop:
6764 dev_kfree_skb_any(first->skb);
6765 first->skb = NULL;
6766
6767 return NETDEV_TX_OK;
6768}
6769
6770static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6771 struct net_device *netdev)
6772{
6773 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6774 struct ixgbe_ring *tx_ring;
6775
6776
6777
6778
6779
6780 if (unlikely(skb->len < 17)) {
6781 if (skb_pad(skb, 17 - skb->len))
6782 return NETDEV_TX_OK;
6783 skb->len = 17;
6784 skb_set_tail_pointer(skb, 17);
6785 }
6786
6787 tx_ring = adapter->tx_ring[skb->queue_mapping];
6788 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6789}
6790
6791
6792
6793
6794
6795
6796
6797
6798static int ixgbe_set_mac(struct net_device *netdev, void *p)
6799{
6800 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6801 struct ixgbe_hw *hw = &adapter->hw;
6802 struct sockaddr *addr = p;
6803
6804 if (!is_valid_ether_addr(addr->sa_data))
6805 return -EADDRNOTAVAIL;
6806
6807 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6808 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
6809
6810 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
6811
6812 return 0;
6813}
6814
6815static int
6816ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6817{
6818 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6819 struct ixgbe_hw *hw = &adapter->hw;
6820 u16 value;
6821 int rc;
6822
6823 if (prtad != hw->phy.mdio.prtad)
6824 return -EINVAL;
6825 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6826 if (!rc)
6827 rc = value;
6828 return rc;
6829}
6830
6831static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6832 u16 addr, u16 value)
6833{
6834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6835 struct ixgbe_hw *hw = &adapter->hw;
6836
6837 if (prtad != hw->phy.mdio.prtad)
6838 return -EINVAL;
6839 return hw->phy.ops.write_reg(hw, addr, devad, value);
6840}
6841
6842static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6843{
6844 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6845
6846 switch (cmd) {
6847 case SIOCSHWTSTAMP:
6848 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
6849 default:
6850 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6851 }
6852}
6853
6854
6855
6856
6857
6858
6859
6860
6861static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6862{
6863 int err = 0;
6864 struct ixgbe_adapter *adapter = netdev_priv(dev);
6865 struct ixgbe_hw *hw = &adapter->hw;
6866
6867 if (is_valid_ether_addr(hw->mac.san_addr)) {
6868 rtnl_lock();
6869 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
6870 rtnl_unlock();
6871
6872
6873 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
6874 }
6875 return err;
6876}
6877
6878
6879
6880
6881
6882
6883
6884
6885static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6886{
6887 int err = 0;
6888 struct ixgbe_adapter *adapter = netdev_priv(dev);
6889 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6890
6891 if (is_valid_ether_addr(mac->san_addr)) {
6892 rtnl_lock();
6893 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6894 rtnl_unlock();
6895 }
6896 return err;
6897}
6898
6899#ifdef CONFIG_NET_POLL_CONTROLLER
6900
6901
6902
6903
6904
6905static void ixgbe_netpoll(struct net_device *netdev)
6906{
6907 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6908 int i;
6909
6910
6911 if (test_bit(__IXGBE_DOWN, &adapter->state))
6912 return;
6913
6914 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
6915 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6916 for (i = 0; i < adapter->num_q_vectors; i++)
6917 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
6918 } else {
6919 ixgbe_intr(adapter->pdev->irq, netdev);
6920 }
6921 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
6922}
6923
6924#endif
6925static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6926 struct rtnl_link_stats64 *stats)
6927{
6928 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6929 int i;
6930
6931 rcu_read_lock();
6932 for (i = 0; i < adapter->num_rx_queues; i++) {
6933 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
6934 u64 bytes, packets;
6935 unsigned int start;
6936
6937 if (ring) {
6938 do {
6939 start = u64_stats_fetch_begin_bh(&ring->syncp);
6940 packets = ring->stats.packets;
6941 bytes = ring->stats.bytes;
6942 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6943 stats->rx_packets += packets;
6944 stats->rx_bytes += bytes;
6945 }
6946 }
6947
6948 for (i = 0; i < adapter->num_tx_queues; i++) {
6949 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
6950 u64 bytes, packets;
6951 unsigned int start;
6952
6953 if (ring) {
6954 do {
6955 start = u64_stats_fetch_begin_bh(&ring->syncp);
6956 packets = ring->stats.packets;
6957 bytes = ring->stats.bytes;
6958 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6959 stats->tx_packets += packets;
6960 stats->tx_bytes += bytes;
6961 }
6962 }
6963 rcu_read_unlock();
6964
6965 stats->multicast = netdev->stats.multicast;
6966 stats->rx_errors = netdev->stats.rx_errors;
6967 stats->rx_length_errors = netdev->stats.rx_length_errors;
6968 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6969 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6970 return stats;
6971}
6972
6973#ifdef CONFIG_IXGBE_DCB
6974
6975
6976
6977
6978
6979
6980
6981
6982static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
6983{
6984 struct ixgbe_hw *hw = &adapter->hw;
6985 u32 reg, rsave;
6986 int i;
6987
6988
6989
6990
6991 if (hw->mac.type == ixgbe_mac_82598EB)
6992 return;
6993
6994 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
6995 rsave = reg;
6996
6997 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
6998 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
6999
7000
7001 if (up2tc > tc)
7002 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
7003 }
7004
7005 if (reg != rsave)
7006 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
7007
7008 return;
7009}
7010
7011
7012
7013
7014
7015
7016
7017static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
7018{
7019 struct net_device *dev = adapter->netdev;
7020 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
7021 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
7022 u8 prio;
7023
7024 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
7025 u8 tc = 0;
7026
7027 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
7028 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
7029 else if (ets)
7030 tc = ets->prio_tc[prio];
7031
7032 netdev_set_prio_tc_map(dev, prio, tc);
7033 }
7034}
7035
7036#endif
7037
7038
7039
7040
7041
7042
7043int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7044{
7045 struct ixgbe_adapter *adapter = netdev_priv(dev);
7046 struct ixgbe_hw *hw = &adapter->hw;
7047
7048
7049 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
7050 (hw->mac.type == ixgbe_mac_82598EB &&
7051 tc < MAX_TRAFFIC_CLASS))
7052 return -EINVAL;
7053
7054
7055
7056
7057
7058 if (netif_running(dev))
7059 ixgbe_close(dev);
7060 ixgbe_clear_interrupt_scheme(adapter);
7061
7062#ifdef CONFIG_IXGBE_DCB
7063 if (tc) {
7064 netdev_set_num_tc(dev, tc);
7065 ixgbe_set_prio_tc_map(adapter);
7066
7067 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
7068
7069 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
7070 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
7071 adapter->hw.fc.requested_mode = ixgbe_fc_none;
7072 }
7073 } else {
7074 netdev_reset_tc(dev);
7075
7076 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7077 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
7078
7079 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
7080
7081 adapter->temp_dcb_cfg.pfc_mode_enable = false;
7082 adapter->dcb_cfg.pfc_mode_enable = false;
7083 }
7084
7085 ixgbe_validate_rtr(adapter, tc);
7086
7087#endif
7088 ixgbe_init_interrupt_scheme(adapter);
7089
7090 if (netif_running(dev))
7091 return ixgbe_open(dev);
7092
7093 return 0;
7094}
7095
7096#ifdef CONFIG_PCI_IOV
7097void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
7098{
7099 struct net_device *netdev = adapter->netdev;
7100
7101 rtnl_lock();
7102 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
7103 rtnl_unlock();
7104}
7105
7106#endif
7107void ixgbe_do_reset(struct net_device *netdev)
7108{
7109 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7110
7111 if (netif_running(netdev))
7112 ixgbe_reinit_locked(adapter);
7113 else
7114 ixgbe_reset(adapter);
7115}
7116
7117static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
7118 netdev_features_t features)
7119{
7120 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7121
7122
7123 if (!(features & NETIF_F_RXCSUM))
7124 features &= ~NETIF_F_LRO;
7125
7126
7127 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
7128 features &= ~NETIF_F_LRO;
7129
7130 return features;
7131}
7132
7133static int ixgbe_set_features(struct net_device *netdev,
7134 netdev_features_t features)
7135{
7136 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7137 netdev_features_t changed = netdev->features ^ features;
7138 bool need_reset = false;
7139
7140
7141 if (!(features & NETIF_F_LRO)) {
7142 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7143 need_reset = true;
7144 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
7145 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
7146 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
7147 if (adapter->rx_itr_setting == 1 ||
7148 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
7149 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
7150 need_reset = true;
7151 } else if ((changed ^ features) & NETIF_F_LRO) {
7152 e_info(probe, "rx-usecs set too low, "
7153 "disabling RSC\n");
7154 }
7155 }
7156
7157
7158
7159
7160
7161 switch (features & NETIF_F_NTUPLE) {
7162 case NETIF_F_NTUPLE:
7163
7164 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
7165 need_reset = true;
7166
7167 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
7168 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7169 break;
7170 default:
7171
7172 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7173 need_reset = true;
7174
7175 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7176
7177
7178 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7179 break;
7180
7181
7182 if (netdev_get_num_tc(netdev) > 1)
7183 break;
7184
7185
7186 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
7187 break;
7188
7189
7190 if (!adapter->atr_sample_rate)
7191 break;
7192
7193 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
7194 break;
7195 }
7196
7197 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7198 ixgbe_vlan_strip_enable(adapter);
7199 else
7200 ixgbe_vlan_strip_disable(adapter);
7201
7202 if (changed & NETIF_F_RXALL)
7203 need_reset = true;
7204
7205 netdev->features = features;
7206 if (need_reset)
7207 ixgbe_do_reset(netdev);
7208
7209 return 0;
7210}
7211
7212static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7213 struct net_device *dev,
7214 const unsigned char *addr,
7215 u16 flags)
7216{
7217 struct ixgbe_adapter *adapter = netdev_priv(dev);
7218 int err;
7219
7220 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7221 return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
7222
7223
7224
7225
7226 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7227 pr_info("%s: FDB only supports static addresses\n",
7228 ixgbe_driver_name);
7229 return -EINVAL;
7230 }
7231
7232 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
7233 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
7234
7235 if (netdev_uc_count(dev) < rar_uc_entries)
7236 err = dev_uc_add_excl(dev, addr);
7237 else
7238 err = -ENOMEM;
7239 } else if (is_multicast_ether_addr(addr)) {
7240 err = dev_mc_add_excl(dev, addr);
7241 } else {
7242 err = -EINVAL;
7243 }
7244
7245
7246 if (err == -EEXIST && !(flags & NLM_F_EXCL))
7247 err = 0;
7248
7249 return err;
7250}
7251
7252static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7253 struct nlmsghdr *nlh)
7254{
7255 struct ixgbe_adapter *adapter = netdev_priv(dev);
7256 struct nlattr *attr, *br_spec;
7257 int rem;
7258
7259 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7260 return -EOPNOTSUPP;
7261
7262 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7263
7264 nla_for_each_nested(attr, br_spec, rem) {
7265 __u16 mode;
7266 u32 reg = 0;
7267
7268 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7269 continue;
7270
7271 mode = nla_get_u16(attr);
7272 if (mode == BRIDGE_MODE_VEPA) {
7273 reg = 0;
7274 adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
7275 } else if (mode == BRIDGE_MODE_VEB) {
7276 reg = IXGBE_PFDTXGSWC_VT_LBEN;
7277 adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
7278 } else
7279 return -EINVAL;
7280
7281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
7282
7283 e_info(drv, "enabling bridge mode: %s\n",
7284 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
7285 }
7286
7287 return 0;
7288}
7289
7290static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7291 struct net_device *dev,
7292 u32 filter_mask)
7293{
7294 struct ixgbe_adapter *adapter = netdev_priv(dev);
7295 u16 mode;
7296
7297 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7298 return 0;
7299
7300 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
7301 mode = BRIDGE_MODE_VEB;
7302 else
7303 mode = BRIDGE_MODE_VEPA;
7304
7305 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7306}
7307
7308static const struct net_device_ops ixgbe_netdev_ops = {
7309 .ndo_open = ixgbe_open,
7310 .ndo_stop = ixgbe_close,
7311 .ndo_start_xmit = ixgbe_xmit_frame,
7312#ifdef IXGBE_FCOE
7313 .ndo_select_queue = ixgbe_select_queue,
7314#endif
7315 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7316 .ndo_validate_addr = eth_validate_addr,
7317 .ndo_set_mac_address = ixgbe_set_mac,
7318 .ndo_change_mtu = ixgbe_change_mtu,
7319 .ndo_tx_timeout = ixgbe_tx_timeout,
7320 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
7321 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
7322 .ndo_do_ioctl = ixgbe_ioctl,
7323 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
7324 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
7325 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
7326 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
7327 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
7328 .ndo_get_stats64 = ixgbe_get_stats64,
7329#ifdef CONFIG_IXGBE_DCB
7330 .ndo_setup_tc = ixgbe_setup_tc,
7331#endif
7332#ifdef CONFIG_NET_POLL_CONTROLLER
7333 .ndo_poll_controller = ixgbe_netpoll,
7334#endif
7335#ifdef CONFIG_NET_RX_BUSY_POLL
7336 .ndo_busy_poll = ixgbe_low_latency_recv,
7337#endif
7338#ifdef IXGBE_FCOE
7339 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7340 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
7341 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
7342 .ndo_fcoe_enable = ixgbe_fcoe_enable,
7343 .ndo_fcoe_disable = ixgbe_fcoe_disable,
7344 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
7345 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
7346#endif
7347 .ndo_set_features = ixgbe_set_features,
7348 .ndo_fix_features = ixgbe_fix_features,
7349 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7350 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7351 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7352};
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
7364{
7365 struct ixgbe_hw *hw = &adapter->hw;
7366 struct list_head *entry;
7367 int physfns = 0;
7368
7369
7370
7371
7372 switch (hw->device_id) {
7373 case IXGBE_DEV_ID_82599_SFP_SF_QP:
7374 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
7375 physfns = 4;
7376 break;
7377 default:
7378 list_for_each(entry, &adapter->pdev->bus_list) {
7379 struct pci_dev *pdev =
7380 list_entry(entry, struct pci_dev, bus_list);
7381
7382 if (!pdev->is_virtfn)
7383 physfns++;
7384 }
7385 }
7386
7387 return physfns;
7388}
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398
7399
7400int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7401 u16 subdevice_id)
7402{
7403 struct ixgbe_hw *hw = &adapter->hw;
7404 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
7405 int is_wol_supported = 0;
7406
7407 switch (device_id) {
7408 case IXGBE_DEV_ID_82599_SFP:
7409
7410 switch (subdevice_id) {
7411 case IXGBE_SUBDEV_ID_82599_560FLR:
7412
7413 if (hw->bus.func != 0)
7414 break;
7415 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
7416 case IXGBE_SUBDEV_ID_82599_SFP:
7417 case IXGBE_SUBDEV_ID_82599_RNDC:
7418 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
7419 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
7420 is_wol_supported = 1;
7421 break;
7422 }
7423 break;
7424 case IXGBE_DEV_ID_82599EN_SFP:
7425
7426 switch (subdevice_id) {
7427 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
7428 is_wol_supported = 1;
7429 break;
7430 }
7431 break;
7432 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7433
7434 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7435 is_wol_supported = 1;
7436 break;
7437 case IXGBE_DEV_ID_82599_KX4:
7438 is_wol_supported = 1;
7439 break;
7440 case IXGBE_DEV_ID_X540T:
7441 case IXGBE_DEV_ID_X540T1:
7442
7443 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
7444 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
7445 (hw->bus.func == 0))) {
7446 is_wol_supported = 1;
7447 }
7448 break;
7449 }
7450
7451 return is_wol_supported;
7452}
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463
7464
7465static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7466{
7467 struct net_device *netdev;
7468 struct ixgbe_adapter *adapter = NULL;
7469 struct ixgbe_hw *hw;
7470 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
7471 static int cards_found;
7472 int i, err, pci_using_dac, expected_gts;
7473 unsigned int indices = MAX_TX_QUEUES;
7474 u8 part_str[IXGBE_PBANUM_LENGTH];
7475#ifdef IXGBE_FCOE
7476 u16 device_caps;
7477#endif
7478 u32 eec;
7479
7480
7481
7482
7483 if (pdev->is_virtfn) {
7484 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
7485 pci_name(pdev), pdev->vendor, pdev->device);
7486 return -EINVAL;
7487 }
7488
7489 err = pci_enable_device_mem(pdev);
7490 if (err)
7491 return err;
7492
7493 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
7494 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7495 pci_using_dac = 1;
7496 } else {
7497 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
7498 if (err) {
7499 err = dma_set_coherent_mask(&pdev->dev,
7500 DMA_BIT_MASK(32));
7501 if (err) {
7502 dev_err(&pdev->dev,
7503 "No usable DMA configuration, aborting\n");
7504 goto err_dma;
7505 }
7506 }
7507 pci_using_dac = 0;
7508 }
7509
7510 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7511 IORESOURCE_MEM), ixgbe_driver_name);
7512 if (err) {
7513 dev_err(&pdev->dev,
7514 "pci_request_selected_regions failed 0x%x\n", err);
7515 goto err_pci_reg;
7516 }
7517
7518 pci_enable_pcie_error_reporting(pdev);
7519
7520 pci_set_master(pdev);
7521 pci_save_state(pdev);
7522
7523 if (ii->mac == ixgbe_mac_82598EB) {
7524#ifdef CONFIG_IXGBE_DCB
7525
7526 indices = 4 * MAX_TRAFFIC_CLASS;
7527#else
7528 indices = IXGBE_MAX_RSS_INDICES;
7529#endif
7530 }
7531
7532 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
7533 if (!netdev) {
7534 err = -ENOMEM;
7535 goto err_alloc_etherdev;
7536 }
7537
7538 SET_NETDEV_DEV(netdev, &pdev->dev);
7539
7540 adapter = netdev_priv(netdev);
7541 pci_set_drvdata(pdev, adapter);
7542
7543 adapter->netdev = netdev;
7544 adapter->pdev = pdev;
7545 hw = &adapter->hw;
7546 hw->back = adapter;
7547 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
7548
7549 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7550 pci_resource_len(pdev, 0));
7551 if (!hw->hw_addr) {
7552 err = -EIO;
7553 goto err_ioremap;
7554 }
7555
7556 netdev->netdev_ops = &ixgbe_netdev_ops;
7557 ixgbe_set_ethtool_ops(netdev);
7558 netdev->watchdog_timeo = 5 * HZ;
7559 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
7560
7561 adapter->bd_number = cards_found;
7562
7563
7564 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
7565 hw->mac.type = ii->mac;
7566
7567
7568 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7569 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7570
7571 if (!(eec & (1 << 8)))
7572 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7573
7574
7575 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
7576 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
7577
7578 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7579 hw->phy.mdio.mmds = 0;
7580 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7581 hw->phy.mdio.dev = netdev;
7582 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7583 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
7584
7585 ii->get_invariants(hw);
7586
7587
7588 err = ixgbe_sw_init(adapter);
7589 if (err)
7590 goto err_sw_init;
7591
7592
7593 if (hw->mac.ops.mng_fw_enabled)
7594 hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
7595
7596
7597 switch (adapter->hw.mac.type) {
7598 case ixgbe_mac_82599EB:
7599 case ixgbe_mac_X540:
7600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7601 break;
7602 default:
7603 break;
7604 }
7605
7606
7607
7608
7609
7610 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7611 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7612 if (esdp & IXGBE_ESDP_SDP1)
7613 e_crit(probe, "Fan has stopped, replace the adapter\n");
7614 }
7615
7616 if (allow_unsupported_sfp)
7617 hw->allow_unsupported_sfp = allow_unsupported_sfp;
7618
7619
7620 hw->phy.reset_if_overtemp = true;
7621 err = hw->mac.ops.reset_hw(hw);
7622 hw->phy.reset_if_overtemp = false;
7623 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7624 hw->mac.type == ixgbe_mac_82598EB) {
7625 err = 0;
7626 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7627 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
7628 e_dev_err("Reload the driver after installing a supported module.\n");
7629 goto err_sw_init;
7630 } else if (err) {
7631 e_dev_err("HW Init failed: %d\n", err);
7632 goto err_sw_init;
7633 }
7634
7635#ifdef CONFIG_PCI_IOV
7636
7637 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7638 goto skip_sriov;
7639
7640 ixgbe_init_mbx_params_pf(hw);
7641 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
7642 ixgbe_enable_sriov(adapter);
7643 pci_sriov_set_totalvfs(pdev, 63);
7644skip_sriov:
7645
7646#endif
7647 netdev->features = NETIF_F_SG |
7648 NETIF_F_IP_CSUM |
7649 NETIF_F_IPV6_CSUM |
7650 NETIF_F_HW_VLAN_CTAG_TX |
7651 NETIF_F_HW_VLAN_CTAG_RX |
7652 NETIF_F_HW_VLAN_CTAG_FILTER |
7653 NETIF_F_TSO |
7654 NETIF_F_TSO6 |
7655 NETIF_F_RXHASH |
7656 NETIF_F_RXCSUM;
7657
7658 netdev->hw_features = netdev->features;
7659
7660 switch (adapter->hw.mac.type) {
7661 case ixgbe_mac_82599EB:
7662 case ixgbe_mac_X540:
7663 netdev->features |= NETIF_F_SCTP_CSUM;
7664 netdev->hw_features |= NETIF_F_SCTP_CSUM |
7665 NETIF_F_NTUPLE;
7666 break;
7667 default:
7668 break;
7669 }
7670
7671 netdev->hw_features |= NETIF_F_RXALL;
7672
7673 netdev->vlan_features |= NETIF_F_TSO;
7674 netdev->vlan_features |= NETIF_F_TSO6;
7675 netdev->vlan_features |= NETIF_F_IP_CSUM;
7676 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
7677 netdev->vlan_features |= NETIF_F_SG;
7678
7679 netdev->priv_flags |= IFF_UNICAST_FLT;
7680 netdev->priv_flags |= IFF_SUPP_NOFCS;
7681
7682#ifdef CONFIG_IXGBE_DCB
7683 netdev->dcbnl_ops = &dcbnl_ops;
7684#endif
7685
7686#ifdef IXGBE_FCOE
7687 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
7688 unsigned int fcoe_l;
7689
7690 if (hw->mac.ops.get_device_caps) {
7691 hw->mac.ops.get_device_caps(hw, &device_caps);
7692 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7693 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
7694 }
7695
7696
7697 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
7698 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
7699
7700 netdev->features |= NETIF_F_FSO |
7701 NETIF_F_FCOE_CRC;
7702
7703 netdev->vlan_features |= NETIF_F_FSO |
7704 NETIF_F_FCOE_CRC |
7705 NETIF_F_FCOE_MTU;
7706 }
7707#endif
7708 if (pci_using_dac) {
7709 netdev->features |= NETIF_F_HIGHDMA;
7710 netdev->vlan_features |= NETIF_F_HIGHDMA;
7711 }
7712
7713 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
7714 netdev->hw_features |= NETIF_F_LRO;
7715 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7716 netdev->features |= NETIF_F_LRO;
7717
7718
7719 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
7720 e_dev_err("The EEPROM Checksum Is Not Valid\n");
7721 err = -EIO;
7722 goto err_sw_init;
7723 }
7724
7725 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7726
7727 if (!is_valid_ether_addr(netdev->dev_addr)) {
7728 e_dev_err("invalid MAC address\n");
7729 err = -EIO;
7730 goto err_sw_init;
7731 }
7732
7733 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
7734 (unsigned long) adapter);
7735
7736 INIT_WORK(&adapter->service_task, ixgbe_service_task);
7737 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
7738
7739 err = ixgbe_init_interrupt_scheme(adapter);
7740 if (err)
7741 goto err_sw_init;
7742
7743
7744 adapter->wol = 0;
7745 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7746 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
7747 pdev->subsystem_device);
7748 if (hw->wol_enabled)
7749 adapter->wol = IXGBE_WUFC_MAG;
7750
7751 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7752
7753
7754 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
7755 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
7756
7757
7758 hw->mac.ops.get_bus_info(hw);
7759 if (ixgbe_pcie_from_parent(hw))
7760 ixgbe_get_parent_bus_info(adapter);
7761
7762
7763 e_dev_info("(PCI Express:%s:%s) %pM\n",
7764 (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
7765 hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7766 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
7767 "Unknown"),
7768 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7769 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7770 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7771 "Unknown"),
7772 netdev->dev_addr);
7773
7774 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7775 if (err)
7776 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
7777 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
7778 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
7779 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7780 part_str);
7781 else
7782 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7783 hw->mac.type, hw->phy.type, part_str);
7784
7785
7786
7787
7788
7789
7790 switch (hw->mac.type) {
7791 case ixgbe_mac_82598EB:
7792 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
7793 break;
7794 default:
7795 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
7796 break;
7797 }
7798 ixgbe_check_minimum_link(adapter, expected_gts);
7799
7800
7801 err = hw->mac.ops.start_hw(hw);
7802 if (err == IXGBE_ERR_EEPROM_VERSION) {
7803
7804 e_dev_warn("This device is a pre-production adapter/LOM. "
7805 "Please be aware there may be issues associated "
7806 "with your hardware. If you are experiencing "
7807 "problems please contact your Intel or hardware "
7808 "representative who provided you with this "
7809 "hardware.\n");
7810 }
7811 strcpy(netdev->name, "eth%d");
7812 err = register_netdev(netdev);
7813 if (err)
7814 goto err_register;
7815
7816
7817 if (hw->mac.ops.disable_tx_laser)
7818 hw->mac.ops.disable_tx_laser(hw);
7819
7820
7821 netif_carrier_off(netdev);
7822
7823#ifdef CONFIG_IXGBE_DCA
7824 if (dca_add_requester(&pdev->dev) == 0) {
7825 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
7826 ixgbe_setup_dca(adapter);
7827 }
7828#endif
7829 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
7830 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
7831 for (i = 0; i < adapter->num_vfs; i++)
7832 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7833 }
7834
7835
7836
7837
7838 if (hw->mac.ops.set_fw_drv_ver)
7839 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
7840 0xFF);
7841
7842
7843 ixgbe_add_sanmac_netdev(netdev);
7844
7845 e_dev_info("%s\n", ixgbe_default_device_descr);
7846 cards_found++;
7847
7848#ifdef CONFIG_IXGBE_HWMON
7849 if (ixgbe_sysfs_init(adapter))
7850 e_err(probe, "failed to allocate sysfs resources\n");
7851#endif
7852
7853 ixgbe_dbg_adapter_init(adapter);
7854
7855
7856 if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
7857 hw->mac.ops.setup_link(hw,
7858 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
7859 true);
7860
7861 return 0;
7862
7863err_register:
7864 ixgbe_release_hw_control(adapter);
7865 ixgbe_clear_interrupt_scheme(adapter);
7866err_sw_init:
7867 ixgbe_disable_sriov(adapter);
7868 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7869 iounmap(hw->hw_addr);
7870err_ioremap:
7871 free_netdev(netdev);
7872err_alloc_etherdev:
7873 pci_release_selected_regions(pdev,
7874 pci_select_bars(pdev, IORESOURCE_MEM));
7875err_pci_reg:
7876err_dma:
7877 pci_disable_device(pdev);
7878 return err;
7879}
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889
7890static void ixgbe_remove(struct pci_dev *pdev)
7891{
7892 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7893 struct net_device *netdev = adapter->netdev;
7894
7895 ixgbe_dbg_adapter_exit(adapter);
7896
7897 set_bit(__IXGBE_DOWN, &adapter->state);
7898 cancel_work_sync(&adapter->service_task);
7899
7900
7901#ifdef CONFIG_IXGBE_DCA
7902 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7903 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7904 dca_remove_requester(&pdev->dev);
7905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7906 }
7907
7908#endif
7909#ifdef CONFIG_IXGBE_HWMON
7910 ixgbe_sysfs_exit(adapter);
7911#endif
7912
7913
7914 ixgbe_del_sanmac_netdev(netdev);
7915
7916 if (netdev->reg_state == NETREG_REGISTERED)
7917 unregister_netdev(netdev);
7918
7919#ifdef CONFIG_PCI_IOV
7920
7921
7922
7923
7924 if (max_vfs)
7925 ixgbe_disable_sriov(adapter);
7926#endif
7927 ixgbe_clear_interrupt_scheme(adapter);
7928
7929 ixgbe_release_hw_control(adapter);
7930
7931#ifdef CONFIG_DCB
7932 kfree(adapter->ixgbe_ieee_pfc);
7933 kfree(adapter->ixgbe_ieee_ets);
7934
7935#endif
7936 iounmap(adapter->hw.hw_addr);
7937 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7938 IORESOURCE_MEM));
7939
7940 e_dev_info("complete\n");
7941
7942 free_netdev(netdev);
7943
7944 pci_disable_pcie_error_reporting(pdev);
7945
7946 pci_disable_device(pdev);
7947}
7948
7949
7950
7951
7952
7953
7954
7955
7956
7957static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7958 pci_channel_state_t state)
7959{
7960 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7961 struct net_device *netdev = adapter->netdev;
7962
7963#ifdef CONFIG_PCI_IOV
7964 struct pci_dev *bdev, *vfdev;
7965 u32 dw0, dw1, dw2, dw3;
7966 int vf, pos;
7967 u16 req_id, pf_func;
7968
7969 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7970 adapter->num_vfs == 0)
7971 goto skip_bad_vf_detection;
7972
7973 bdev = pdev->bus->self;
7974 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
7975 bdev = bdev->bus->self;
7976
7977 if (!bdev)
7978 goto skip_bad_vf_detection;
7979
7980 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
7981 if (!pos)
7982 goto skip_bad_vf_detection;
7983
7984 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
7985 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
7986 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
7987 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
7988
7989 req_id = dw1 >> 16;
7990
7991 if (!(req_id & 0x0080))
7992 goto skip_bad_vf_detection;
7993
7994 pf_func = req_id & 0x01;
7995 if ((pf_func & 1) == (pdev->devfn & 1)) {
7996 unsigned int device_id;
7997
7998 vf = (req_id & 0x7F) >> 1;
7999 e_dev_err("VF %d has caused a PCIe error\n", vf);
8000 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
8001 "%8.8x\tdw3: %8.8x\n",
8002 dw0, dw1, dw2, dw3);
8003 switch (adapter->hw.mac.type) {
8004 case ixgbe_mac_82599EB:
8005 device_id = IXGBE_82599_VF_DEVICE_ID;
8006 break;
8007 case ixgbe_mac_X540:
8008 device_id = IXGBE_X540_VF_DEVICE_ID;
8009 break;
8010 default:
8011 device_id = 0;
8012 break;
8013 }
8014
8015
8016 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
8017 while (vfdev) {
8018 if (vfdev->devfn == (req_id & 0xFF))
8019 break;
8020 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
8021 device_id, vfdev);
8022 }
8023
8024
8025
8026
8027
8028 if (vfdev) {
8029 e_dev_err("Issuing VFLR to VF %d\n", vf);
8030 pci_write_config_dword(vfdev, 0xA8, 0x00008000);
8031
8032 pci_dev_put(vfdev);
8033 }
8034
8035 pci_cleanup_aer_uncorrect_error_status(pdev);
8036 }
8037
8038
8039
8040
8041
8042
8043
8044 adapter->vferr_refcount++;
8045
8046 return PCI_ERS_RESULT_RECOVERED;
8047
8048skip_bad_vf_detection:
8049#endif
8050 netif_device_detach(netdev);
8051
8052 if (state == pci_channel_io_perm_failure)
8053 return PCI_ERS_RESULT_DISCONNECT;
8054
8055 if (netif_running(netdev))
8056 ixgbe_down(adapter);
8057 pci_disable_device(pdev);
8058
8059
8060 return PCI_ERS_RESULT_NEED_RESET;
8061}
8062
8063
8064
8065
8066
8067
8068
8069static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
8070{
8071 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8072 pci_ers_result_t result;
8073 int err;
8074
8075 if (pci_enable_device_mem(pdev)) {
8076 e_err(probe, "Cannot re-enable PCI device after reset.\n");
8077 result = PCI_ERS_RESULT_DISCONNECT;
8078 } else {
8079 pci_set_master(pdev);
8080 pci_restore_state(pdev);
8081 pci_save_state(pdev);
8082
8083 pci_wake_from_d3(pdev, false);
8084
8085 ixgbe_reset(adapter);
8086 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8087 result = PCI_ERS_RESULT_RECOVERED;
8088 }
8089
8090 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8091 if (err) {
8092 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
8093 "failed 0x%0x\n", err);
8094
8095 }
8096
8097 return result;
8098}
8099
8100
8101
8102
8103
8104
8105
8106
8107static void ixgbe_io_resume(struct pci_dev *pdev)
8108{
8109 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8110 struct net_device *netdev = adapter->netdev;
8111
8112#ifdef CONFIG_PCI_IOV
8113 if (adapter->vferr_refcount) {
8114 e_info(drv, "Resuming after VF err\n");
8115 adapter->vferr_refcount--;
8116 return;
8117 }
8118
8119#endif
8120 if (netif_running(netdev))
8121 ixgbe_up(adapter);
8122
8123 netif_device_attach(netdev);
8124}
8125
8126static const struct pci_error_handlers ixgbe_err_handler = {
8127 .error_detected = ixgbe_io_error_detected,
8128 .slot_reset = ixgbe_io_slot_reset,
8129 .resume = ixgbe_io_resume,
8130};
8131
8132static struct pci_driver ixgbe_driver = {
8133 .name = ixgbe_driver_name,
8134 .id_table = ixgbe_pci_tbl,
8135 .probe = ixgbe_probe,
8136 .remove = ixgbe_remove,
8137#ifdef CONFIG_PM
8138 .suspend = ixgbe_suspend,
8139 .resume = ixgbe_resume,
8140#endif
8141 .shutdown = ixgbe_shutdown,
8142 .sriov_configure = ixgbe_pci_sriov_configure,
8143 .err_handler = &ixgbe_err_handler
8144};
8145
8146
8147
8148
8149
8150
8151
8152static int __init ixgbe_init_module(void)
8153{
8154 int ret;
8155 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
8156 pr_info("%s\n", ixgbe_copyright);
8157
8158 ixgbe_dbg_init();
8159
8160 ret = pci_register_driver(&ixgbe_driver);
8161 if (ret) {
8162 ixgbe_dbg_exit();
8163 return ret;
8164 }
8165
8166#ifdef CONFIG_IXGBE_DCA
8167 dca_register_notify(&dca_notifier);
8168#endif
8169
8170 return 0;
8171}
8172
8173module_init(ixgbe_init_module);
8174
8175
8176
8177
8178
8179
8180
8181static void __exit ixgbe_exit_module(void)
8182{
8183#ifdef CONFIG_IXGBE_DCA
8184 dca_unregister_notify(&dca_notifier);
8185#endif
8186 pci_unregister_driver(&ixgbe_driver);
8187
8188 ixgbe_dbg_exit();
8189
8190 rcu_barrier();
8191}
8192
8193#ifdef CONFIG_IXGBE_DCA
8194static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
8195 void *p)
8196{
8197 int ret_val;
8198
8199 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
8200 __ixgbe_notify_dca);
8201
8202 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
8203}
8204
8205#endif
8206
8207module_exit(ixgbe_exit_module);
8208
8209
8210