1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/interrupt.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/sctp.h>
40#include <linux/pkt_sched.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
46#include <linux/ethtool.h>
47#include <linux/if.h>
48#include <linux/if_vlan.h>
49#include <linux/if_macvlan.h>
50#include <linux/if_bridge.h>
51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h>
53#include <net/vxlan.h>
54
55#ifdef CONFIG_OF
56#include <linux/of_net.h>
57#endif
58
59#ifdef CONFIG_SPARC
60#include <asm/idprom.h>
61#include <asm/prom.h>
62#endif
63
64#include "ixgbe.h"
65#include "ixgbe_common.h"
66#include "ixgbe_dcb_82599.h"
67#include "ixgbe_sriov.h"
68
69char ixgbe_driver_name[] = "ixgbe";
70static const char ixgbe_driver_string[] =
71 "Intel(R) 10 Gigabit PCI Express Network Driver";
72#ifdef IXGBE_FCOE
73char ixgbe_default_device_descr[] =
74 "Intel(R) 10 Gigabit Network Connection";
75#else
76static char ixgbe_default_device_descr[] =
77 "Intel(R) 10 Gigabit Network Connection";
78#endif
79#define DRV_VERSION "4.2.1-k"
80const char ixgbe_driver_version[] = DRV_VERSION;
81static const char ixgbe_copyright[] =
82 "Copyright (c) 1999-2015 Intel Corporation.";
83
84static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
85
86static const struct ixgbe_info *ixgbe_info_tbl[] = {
87 [board_82598] = &ixgbe_82598_info,
88 [board_82599] = &ixgbe_82599_info,
89 [board_X540] = &ixgbe_X540_info,
90 [board_X550] = &ixgbe_X550_info,
91 [board_X550EM_x] = &ixgbe_X550EM_x_info,
92};
93
94
95
96
97
98
99
100
101
102static const struct pci_device_id ixgbe_pci_tbl[] = {
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
138
139 {0, }
140};
141MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
142
143#ifdef CONFIG_IXGBE_DCA
144static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
145 void *p);
146static struct notifier_block dca_notifier = {
147 .notifier_call = ixgbe_notify_dca,
148 .next = NULL,
149 .priority = 0
150};
151#endif
152
153#ifdef CONFIG_PCI_IOV
154static unsigned int max_vfs;
155module_param(max_vfs, uint, 0);
156MODULE_PARM_DESC(max_vfs,
157 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
158#endif
159
160static unsigned int allow_unsupported_sfp;
161module_param(allow_unsupported_sfp, uint, 0);
162MODULE_PARM_DESC(allow_unsupported_sfp,
163 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
164
165#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
166static int debug = -1;
167module_param(debug, int, 0);
168MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
169
170MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
171MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
175static struct workqueue_struct *ixgbe_wq;
176
177static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
178
179static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
180 u32 reg, u16 *value)
181{
182 struct pci_dev *parent_dev;
183 struct pci_bus *parent_bus;
184
185 parent_bus = adapter->pdev->bus->parent;
186 if (!parent_bus)
187 return -1;
188
189 parent_dev = parent_bus->self;
190 if (!parent_dev)
191 return -1;
192
193 if (!pci_is_pcie(parent_dev))
194 return -1;
195
196 pcie_capability_read_word(parent_dev, reg, value);
197 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
198 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
199 return -1;
200 return 0;
201}
202
203static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
204{
205 struct ixgbe_hw *hw = &adapter->hw;
206 u16 link_status = 0;
207 int err;
208
209 hw->bus.type = ixgbe_bus_type_pci_express;
210
211
212
213
214 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
215
216
217 if (err)
218 return err;
219
220 hw->bus.width = ixgbe_convert_bus_width(link_status);
221 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
222
223 return 0;
224}
225
226
227
228
229
230
231
232
233
234
235static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
236{
237 switch (hw->device_id) {
238 case IXGBE_DEV_ID_82599_SFP_SF_QP:
239 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
240 return true;
241 default:
242 return false;
243 }
244}
245
246static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
247 int expected_gts)
248{
249 struct ixgbe_hw *hw = &adapter->hw;
250 int max_gts = 0;
251 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
252 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
253 struct pci_dev *pdev;
254
255
256
257
258
259 if (hw->bus.type == ixgbe_bus_type_internal)
260 return;
261
262
263 if (ixgbe_pcie_from_parent(&adapter->hw))
264 pdev = adapter->pdev->bus->parent->self;
265 else
266 pdev = adapter->pdev;
267
268 if (pcie_get_minimum_link(pdev, &speed, &width) ||
269 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
270 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
271 return;
272 }
273
274 switch (speed) {
275 case PCIE_SPEED_2_5GT:
276
277 max_gts = 2 * width;
278 break;
279 case PCIE_SPEED_5_0GT:
280
281 max_gts = 4 * width;
282 break;
283 case PCIE_SPEED_8_0GT:
284
285 max_gts = 8 * width;
286 break;
287 default:
288 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
289 return;
290 }
291
292 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
293 max_gts);
294 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
295 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
296 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
297 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
298 "Unknown"),
299 width,
300 (speed == PCIE_SPEED_2_5GT ? "20%" :
301 speed == PCIE_SPEED_5_0GT ? "20%" :
302 speed == PCIE_SPEED_8_0GT ? "<2%" :
303 "Unknown"));
304
305 if (max_gts < expected_gts) {
306 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
307 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
308 expected_gts);
309 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
310 }
311}
312
313static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
314{
315 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
316 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
317 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
318 queue_work(ixgbe_wq, &adapter->service_task);
319}
320
321static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
322{
323 struct ixgbe_adapter *adapter = hw->back;
324
325 if (!hw->hw_addr)
326 return;
327 hw->hw_addr = NULL;
328 e_dev_err("Adapter removed\n");
329 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
330 ixgbe_service_event_schedule(adapter);
331}
332
333static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
334{
335 u32 value;
336
337
338
339
340
341
342
343 if (reg == IXGBE_STATUS) {
344 ixgbe_remove_adapter(hw);
345 return;
346 }
347 value = ixgbe_read_reg(hw, IXGBE_STATUS);
348 if (value == IXGBE_FAILED_READ_REG)
349 ixgbe_remove_adapter(hw);
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
366{
367 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
368 u32 value;
369
370 if (ixgbe_removed(reg_addr))
371 return IXGBE_FAILED_READ_REG;
372 value = readl(reg_addr + reg);
373 if (unlikely(value == IXGBE_FAILED_READ_REG))
374 ixgbe_check_remove(hw, reg);
375 return value;
376}
377
378static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
379{
380 u16 value;
381
382 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
383 if (value == IXGBE_FAILED_READ_CFG_WORD) {
384 ixgbe_remove_adapter(hw);
385 return true;
386 }
387 return false;
388}
389
390u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
391{
392 struct ixgbe_adapter *adapter = hw->back;
393 u16 value;
394
395 if (ixgbe_removed(hw->hw_addr))
396 return IXGBE_FAILED_READ_CFG_WORD;
397 pci_read_config_word(adapter->pdev, reg, &value);
398 if (value == IXGBE_FAILED_READ_CFG_WORD &&
399 ixgbe_check_cfg_remove(hw, adapter->pdev))
400 return IXGBE_FAILED_READ_CFG_WORD;
401 return value;
402}
403
404#ifdef CONFIG_PCI_IOV
405static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
406{
407 struct ixgbe_adapter *adapter = hw->back;
408 u32 value;
409
410 if (ixgbe_removed(hw->hw_addr))
411 return IXGBE_FAILED_READ_CFG_DWORD;
412 pci_read_config_dword(adapter->pdev, reg, &value);
413 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
414 ixgbe_check_cfg_remove(hw, adapter->pdev))
415 return IXGBE_FAILED_READ_CFG_DWORD;
416 return value;
417}
418#endif
419
420void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
421{
422 struct ixgbe_adapter *adapter = hw->back;
423
424 if (ixgbe_removed(hw->hw_addr))
425 return;
426 pci_write_config_word(adapter->pdev, reg, value);
427}
428
429static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
430{
431 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
432
433
434 smp_mb__before_atomic();
435 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
436}
437
438struct ixgbe_reg_info {
439 u32 ofs;
440 char *name;
441};
442
443static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
444
445
446 {IXGBE_CTRL, "CTRL"},
447 {IXGBE_STATUS, "STATUS"},
448 {IXGBE_CTRL_EXT, "CTRL_EXT"},
449
450
451 {IXGBE_EICR, "EICR"},
452
453
454 {IXGBE_SRRCTL(0), "SRRCTL"},
455 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
456 {IXGBE_RDLEN(0), "RDLEN"},
457 {IXGBE_RDH(0), "RDH"},
458 {IXGBE_RDT(0), "RDT"},
459 {IXGBE_RXDCTL(0), "RXDCTL"},
460 {IXGBE_RDBAL(0), "RDBAL"},
461 {IXGBE_RDBAH(0), "RDBAH"},
462
463
464 {IXGBE_TDBAL(0), "TDBAL"},
465 {IXGBE_TDBAH(0), "TDBAH"},
466 {IXGBE_TDLEN(0), "TDLEN"},
467 {IXGBE_TDH(0), "TDH"},
468 {IXGBE_TDT(0), "TDT"},
469 {IXGBE_TXDCTL(0), "TXDCTL"},
470
471
472 { .name = NULL }
473};
474
475
476
477
478
479static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
480{
481 int i = 0, j = 0;
482 char rname[16];
483 u32 regs[64];
484
485 switch (reginfo->ofs) {
486 case IXGBE_SRRCTL(0):
487 for (i = 0; i < 64; i++)
488 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
489 break;
490 case IXGBE_DCA_RXCTRL(0):
491 for (i = 0; i < 64; i++)
492 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
493 break;
494 case IXGBE_RDLEN(0):
495 for (i = 0; i < 64; i++)
496 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
497 break;
498 case IXGBE_RDH(0):
499 for (i = 0; i < 64; i++)
500 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
501 break;
502 case IXGBE_RDT(0):
503 for (i = 0; i < 64; i++)
504 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
505 break;
506 case IXGBE_RXDCTL(0):
507 for (i = 0; i < 64; i++)
508 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
509 break;
510 case IXGBE_RDBAL(0):
511 for (i = 0; i < 64; i++)
512 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
513 break;
514 case IXGBE_RDBAH(0):
515 for (i = 0; i < 64; i++)
516 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
517 break;
518 case IXGBE_TDBAL(0):
519 for (i = 0; i < 64; i++)
520 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
521 break;
522 case IXGBE_TDBAH(0):
523 for (i = 0; i < 64; i++)
524 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
525 break;
526 case IXGBE_TDLEN(0):
527 for (i = 0; i < 64; i++)
528 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
529 break;
530 case IXGBE_TDH(0):
531 for (i = 0; i < 64; i++)
532 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
533 break;
534 case IXGBE_TDT(0):
535 for (i = 0; i < 64; i++)
536 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
537 break;
538 case IXGBE_TXDCTL(0):
539 for (i = 0; i < 64; i++)
540 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
541 break;
542 default:
543 pr_info("%-15s %08x\n", reginfo->name,
544 IXGBE_READ_REG(hw, reginfo->ofs));
545 return;
546 }
547
548 for (i = 0; i < 8; i++) {
549 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
550 pr_err("%-15s", rname);
551 for (j = 0; j < 8; j++)
552 pr_cont(" %08x", regs[i*8+j]);
553 pr_cont("\n");
554 }
555
556}
557
558
559
560
561static void ixgbe_dump(struct ixgbe_adapter *adapter)
562{
563 struct net_device *netdev = adapter->netdev;
564 struct ixgbe_hw *hw = &adapter->hw;
565 struct ixgbe_reg_info *reginfo;
566 int n = 0;
567 struct ixgbe_ring *tx_ring;
568 struct ixgbe_tx_buffer *tx_buffer;
569 union ixgbe_adv_tx_desc *tx_desc;
570 struct my_u0 { u64 a; u64 b; } *u0;
571 struct ixgbe_ring *rx_ring;
572 union ixgbe_adv_rx_desc *rx_desc;
573 struct ixgbe_rx_buffer *rx_buffer_info;
574 u32 staterr;
575 int i = 0;
576
577 if (!netif_msg_hw(adapter))
578 return;
579
580
581 if (netdev) {
582 dev_info(&adapter->pdev->dev, "Net device Info\n");
583 pr_info("Device Name state "
584 "trans_start last_rx\n");
585 pr_info("%-15s %016lX %016lX %016lX\n",
586 netdev->name,
587 netdev->state,
588 netdev->trans_start,
589 netdev->last_rx);
590 }
591
592
593 dev_info(&adapter->pdev->dev, "Register Dump\n");
594 pr_info(" Register Name Value\n");
595 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
596 reginfo->name; reginfo++) {
597 ixgbe_regdump(hw, reginfo);
598 }
599
600
601 if (!netdev || !netif_running(netdev))
602 return;
603
604 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
605 pr_info(" %s %s %s %s\n",
606 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
607 "leng", "ntw", "timestamp");
608 for (n = 0; n < adapter->num_tx_queues; n++) {
609 tx_ring = adapter->tx_ring[n];
610 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
611 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
612 n, tx_ring->next_to_use, tx_ring->next_to_clean,
613 (u64)dma_unmap_addr(tx_buffer, dma),
614 dma_unmap_len(tx_buffer, len),
615 tx_buffer->next_to_watch,
616 (u64)tx_buffer->time_stamp);
617 }
618
619
620 if (!netif_msg_tx_done(adapter))
621 goto rx_ring_summary;
622
623 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 for (n = 0; n < adapter->num_tx_queues; n++) {
661 tx_ring = adapter->tx_ring[n];
662 pr_info("------------------------------------\n");
663 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
664 pr_info("------------------------------------\n");
665 pr_info("%s%s %s %s %s %s\n",
666 "T [desc] [address 63:0 ] ",
667 "[PlPOIdStDDt Ln] [bi->dma ] ",
668 "leng", "ntw", "timestamp", "bi->skb");
669
670 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
671 tx_desc = IXGBE_TX_DESC(tx_ring, i);
672 tx_buffer = &tx_ring->tx_buffer_info[i];
673 u0 = (struct my_u0 *)tx_desc;
674 if (dma_unmap_len(tx_buffer, len) > 0) {
675 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
676 i,
677 le64_to_cpu(u0->a),
678 le64_to_cpu(u0->b),
679 (u64)dma_unmap_addr(tx_buffer, dma),
680 dma_unmap_len(tx_buffer, len),
681 tx_buffer->next_to_watch,
682 (u64)tx_buffer->time_stamp,
683 tx_buffer->skb);
684 if (i == tx_ring->next_to_use &&
685 i == tx_ring->next_to_clean)
686 pr_cont(" NTC/U\n");
687 else if (i == tx_ring->next_to_use)
688 pr_cont(" NTU\n");
689 else if (i == tx_ring->next_to_clean)
690 pr_cont(" NTC\n");
691 else
692 pr_cont("\n");
693
694 if (netif_msg_pktdata(adapter) &&
695 tx_buffer->skb)
696 print_hex_dump(KERN_INFO, "",
697 DUMP_PREFIX_ADDRESS, 16, 1,
698 tx_buffer->skb->data,
699 dma_unmap_len(tx_buffer, len),
700 true);
701 }
702 }
703 }
704
705
706rx_ring_summary:
707 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
708 pr_info("Queue [NTU] [NTC]\n");
709 for (n = 0; n < adapter->num_rx_queues; n++) {
710 rx_ring = adapter->rx_ring[n];
711 pr_info("%5d %5X %5X\n",
712 n, rx_ring->next_to_use, rx_ring->next_to_clean);
713 }
714
715
716 if (!netif_msg_rx_status(adapter))
717 return;
718
719 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766 for (n = 0; n < adapter->num_rx_queues; n++) {
767 rx_ring = adapter->rx_ring[n];
768 pr_info("------------------------------------\n");
769 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
770 pr_info("------------------------------------\n");
771 pr_info("%s%s%s",
772 "R [desc] [ PktBuf A0] ",
773 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
774 "<-- Adv Rx Read format\n");
775 pr_info("%s%s%s",
776 "RWB[desc] [PcsmIpSHl PtRs] ",
777 "[vl er S cks ln] ---------------- [bi->skb ] ",
778 "<-- Adv Rx Write-Back format\n");
779
780 for (i = 0; i < rx_ring->count; i++) {
781 rx_buffer_info = &rx_ring->rx_buffer_info[i];
782 rx_desc = IXGBE_RX_DESC(rx_ring, i);
783 u0 = (struct my_u0 *)rx_desc;
784 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
785 if (staterr & IXGBE_RXD_STAT_DD) {
786
787 pr_info("RWB[0x%03X] %016llX "
788 "%016llX ---------------- %p", i,
789 le64_to_cpu(u0->a),
790 le64_to_cpu(u0->b),
791 rx_buffer_info->skb);
792 } else {
793 pr_info("R [0x%03X] %016llX "
794 "%016llX %016llX %p", i,
795 le64_to_cpu(u0->a),
796 le64_to_cpu(u0->b),
797 (u64)rx_buffer_info->dma,
798 rx_buffer_info->skb);
799
800 if (netif_msg_pktdata(adapter) &&
801 rx_buffer_info->dma) {
802 print_hex_dump(KERN_INFO, "",
803 DUMP_PREFIX_ADDRESS, 16, 1,
804 page_address(rx_buffer_info->page) +
805 rx_buffer_info->page_offset,
806 ixgbe_rx_bufsz(rx_ring), true);
807 }
808 }
809
810 if (i == rx_ring->next_to_use)
811 pr_cont(" NTU\n");
812 else if (i == rx_ring->next_to_clean)
813 pr_cont(" NTC\n");
814 else
815 pr_cont("\n");
816
817 }
818 }
819}
820
821static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
822{
823 u32 ctrl_ext;
824
825
826 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
827 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
828 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
829}
830
831static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
832{
833 u32 ctrl_ext;
834
835
836 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
837 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
838 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
839}
840
841
842
843
844
845
846
847
848
849static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
850 u8 queue, u8 msix_vector)
851{
852 u32 ivar, index;
853 struct ixgbe_hw *hw = &adapter->hw;
854 switch (hw->mac.type) {
855 case ixgbe_mac_82598EB:
856 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
857 if (direction == -1)
858 direction = 0;
859 index = (((direction * 64) + queue) >> 2) & 0x1F;
860 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
861 ivar &= ~(0xFF << (8 * (queue & 0x3)));
862 ivar |= (msix_vector << (8 * (queue & 0x3)));
863 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
864 break;
865 case ixgbe_mac_82599EB:
866 case ixgbe_mac_X540:
867 case ixgbe_mac_X550:
868 case ixgbe_mac_X550EM_x:
869 if (direction == -1) {
870
871 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
872 index = ((queue & 1) * 8);
873 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
874 ivar &= ~(0xFF << index);
875 ivar |= (msix_vector << index);
876 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
877 break;
878 } else {
879
880 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
881 index = ((16 * (queue & 1)) + (8 * direction));
882 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
883 ivar &= ~(0xFF << index);
884 ivar |= (msix_vector << index);
885 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
886 break;
887 }
888 default:
889 break;
890 }
891}
892
893static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
894 u64 qmask)
895{
896 u32 mask;
897
898 switch (adapter->hw.mac.type) {
899 case ixgbe_mac_82598EB:
900 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
902 break;
903 case ixgbe_mac_82599EB:
904 case ixgbe_mac_X540:
905 case ixgbe_mac_X550:
906 case ixgbe_mac_X550EM_x:
907 mask = (qmask & 0xFFFFFFFF);
908 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
909 mask = (qmask >> 32);
910 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
911 break;
912 default:
913 break;
914 }
915}
916
917void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
918 struct ixgbe_tx_buffer *tx_buffer)
919{
920 if (tx_buffer->skb) {
921 dev_kfree_skb_any(tx_buffer->skb);
922 if (dma_unmap_len(tx_buffer, len))
923 dma_unmap_single(ring->dev,
924 dma_unmap_addr(tx_buffer, dma),
925 dma_unmap_len(tx_buffer, len),
926 DMA_TO_DEVICE);
927 } else if (dma_unmap_len(tx_buffer, len)) {
928 dma_unmap_page(ring->dev,
929 dma_unmap_addr(tx_buffer, dma),
930 dma_unmap_len(tx_buffer, len),
931 DMA_TO_DEVICE);
932 }
933 tx_buffer->next_to_watch = NULL;
934 tx_buffer->skb = NULL;
935 dma_unmap_len_set(tx_buffer, len, 0);
936
937}
938
939static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
940{
941 struct ixgbe_hw *hw = &adapter->hw;
942 struct ixgbe_hw_stats *hwstats = &adapter->stats;
943 int i;
944 u32 data;
945
946 if ((hw->fc.current_mode != ixgbe_fc_full) &&
947 (hw->fc.current_mode != ixgbe_fc_rx_pause))
948 return;
949
950 switch (hw->mac.type) {
951 case ixgbe_mac_82598EB:
952 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
953 break;
954 default:
955 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
956 }
957 hwstats->lxoffrxc += data;
958
959
960 if (!data)
961 return;
962
963 for (i = 0; i < adapter->num_tx_queues; i++)
964 clear_bit(__IXGBE_HANG_CHECK_ARMED,
965 &adapter->tx_ring[i]->state);
966}
967
968static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
969{
970 struct ixgbe_hw *hw = &adapter->hw;
971 struct ixgbe_hw_stats *hwstats = &adapter->stats;
972 u32 xoff[8] = {0};
973 u8 tc;
974 int i;
975 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
976
977 if (adapter->ixgbe_ieee_pfc)
978 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
979
980 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
981 ixgbe_update_xoff_rx_lfc(adapter);
982 return;
983 }
984
985
986 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
987 u32 pxoffrxc;
988
989 switch (hw->mac.type) {
990 case ixgbe_mac_82598EB:
991 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
992 break;
993 default:
994 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
995 }
996 hwstats->pxoffrxc[i] += pxoffrxc;
997
998 tc = netdev_get_prio_tc_map(adapter->netdev, i);
999 xoff[tc] += pxoffrxc;
1000 }
1001
1002
1003 for (i = 0; i < adapter->num_tx_queues; i++) {
1004 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1005
1006 tc = tx_ring->dcb_tc;
1007 if (xoff[tc])
1008 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1009 }
1010}
1011
1012static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1013{
1014 return ring->stats.packets;
1015}
1016
1017static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1018{
1019 struct ixgbe_adapter *adapter;
1020 struct ixgbe_hw *hw;
1021 u32 head, tail;
1022
1023 if (ring->l2_accel_priv)
1024 adapter = ring->l2_accel_priv->real_adapter;
1025 else
1026 adapter = netdev_priv(ring->netdev);
1027
1028 hw = &adapter->hw;
1029 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
1030 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
1031
1032 if (head != tail)
1033 return (head < tail) ?
1034 tail - head : (tail + ring->count - head);
1035
1036 return 0;
1037}
1038
1039static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1040{
1041 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1042 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1043 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1044
1045 clear_check_for_tx_hang(tx_ring);
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 if (tx_done_old == tx_done && tx_pending)
1060
1061 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1062 &tx_ring->state);
1063
1064 tx_ring->tx_stats.tx_done_old = tx_done;
1065
1066 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1067
1068 return false;
1069}
1070
1071
1072
1073
1074
1075static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1076{
1077
1078
1079 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1080 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
1081 e_warn(drv, "initiating reset due to tx timeout\n");
1082 ixgbe_service_event_schedule(adapter);
1083 }
1084}
1085
1086
1087
1088
1089
1090
1091static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1092 struct ixgbe_ring *tx_ring)
1093{
1094 struct ixgbe_adapter *adapter = q_vector->adapter;
1095 struct ixgbe_tx_buffer *tx_buffer;
1096 union ixgbe_adv_tx_desc *tx_desc;
1097 unsigned int total_bytes = 0, total_packets = 0;
1098 unsigned int budget = q_vector->tx.work_limit;
1099 unsigned int i = tx_ring->next_to_clean;
1100
1101 if (test_bit(__IXGBE_DOWN, &adapter->state))
1102 return true;
1103
1104 tx_buffer = &tx_ring->tx_buffer_info[i];
1105 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1106 i -= tx_ring->count;
1107
1108 do {
1109 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1110
1111
1112 if (!eop_desc)
1113 break;
1114
1115
1116 read_barrier_depends();
1117
1118
1119 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1120 break;
1121
1122
1123 tx_buffer->next_to_watch = NULL;
1124
1125
1126 total_bytes += tx_buffer->bytecount;
1127 total_packets += tx_buffer->gso_segs;
1128
1129
1130 dev_consume_skb_any(tx_buffer->skb);
1131
1132
1133 dma_unmap_single(tx_ring->dev,
1134 dma_unmap_addr(tx_buffer, dma),
1135 dma_unmap_len(tx_buffer, len),
1136 DMA_TO_DEVICE);
1137
1138
1139 tx_buffer->skb = NULL;
1140 dma_unmap_len_set(tx_buffer, len, 0);
1141
1142
1143 while (tx_desc != eop_desc) {
1144 tx_buffer++;
1145 tx_desc++;
1146 i++;
1147 if (unlikely(!i)) {
1148 i -= tx_ring->count;
1149 tx_buffer = tx_ring->tx_buffer_info;
1150 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1151 }
1152
1153
1154 if (dma_unmap_len(tx_buffer, len)) {
1155 dma_unmap_page(tx_ring->dev,
1156 dma_unmap_addr(tx_buffer, dma),
1157 dma_unmap_len(tx_buffer, len),
1158 DMA_TO_DEVICE);
1159 dma_unmap_len_set(tx_buffer, len, 0);
1160 }
1161 }
1162
1163
1164 tx_buffer++;
1165 tx_desc++;
1166 i++;
1167 if (unlikely(!i)) {
1168 i -= tx_ring->count;
1169 tx_buffer = tx_ring->tx_buffer_info;
1170 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1171 }
1172
1173
1174 prefetch(tx_desc);
1175
1176
1177 budget--;
1178 } while (likely(budget));
1179
1180 i += tx_ring->count;
1181 tx_ring->next_to_clean = i;
1182 u64_stats_update_begin(&tx_ring->syncp);
1183 tx_ring->stats.bytes += total_bytes;
1184 tx_ring->stats.packets += total_packets;
1185 u64_stats_update_end(&tx_ring->syncp);
1186 q_vector->tx.total_bytes += total_bytes;
1187 q_vector->tx.total_packets += total_packets;
1188
1189 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1190
1191 struct ixgbe_hw *hw = &adapter->hw;
1192 e_err(drv, "Detected Tx Unit Hang\n"
1193 " Tx Queue <%d>\n"
1194 " TDH, TDT <%x>, <%x>\n"
1195 " next_to_use <%x>\n"
1196 " next_to_clean <%x>\n"
1197 "tx_buffer_info[next_to_clean]\n"
1198 " time_stamp <%lx>\n"
1199 " jiffies <%lx>\n",
1200 tx_ring->queue_index,
1201 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1202 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1203 tx_ring->next_to_use, i,
1204 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1205
1206 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1207
1208 e_info(probe,
1209 "tx hang %d detected on queue %d, resetting adapter\n",
1210 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1211
1212
1213 ixgbe_tx_timeout_reset(adapter);
1214
1215
1216 return true;
1217 }
1218
1219 netdev_tx_completed_queue(txring_txq(tx_ring),
1220 total_packets, total_bytes);
1221
1222#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1223 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1224 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1225
1226
1227
1228 smp_mb();
1229 if (__netif_subqueue_stopped(tx_ring->netdev,
1230 tx_ring->queue_index)
1231 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1232 netif_wake_subqueue(tx_ring->netdev,
1233 tx_ring->queue_index);
1234 ++tx_ring->tx_stats.restart_queue;
1235 }
1236 }
1237
1238 return !!budget;
1239}
1240
1241#ifdef CONFIG_IXGBE_DCA
1242static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1243 struct ixgbe_ring *tx_ring,
1244 int cpu)
1245{
1246 struct ixgbe_hw *hw = &adapter->hw;
1247 u32 txctrl = 0;
1248 u16 reg_offset;
1249
1250 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1251 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1252
1253 switch (hw->mac.type) {
1254 case ixgbe_mac_82598EB:
1255 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1256 break;
1257 case ixgbe_mac_82599EB:
1258 case ixgbe_mac_X540:
1259 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1260 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1261 break;
1262 default:
1263
1264 return;
1265 }
1266
1267
1268
1269
1270
1271
1272 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1273 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1274 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1275
1276 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1277}
1278
1279static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1280 struct ixgbe_ring *rx_ring,
1281 int cpu)
1282{
1283 struct ixgbe_hw *hw = &adapter->hw;
1284 u32 rxctrl = 0;
1285 u8 reg_idx = rx_ring->reg_idx;
1286
1287 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1288 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1289
1290 switch (hw->mac.type) {
1291 case ixgbe_mac_82599EB:
1292 case ixgbe_mac_X540:
1293 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1294 break;
1295 default:
1296 break;
1297 }
1298
1299
1300
1301
1302
1303
1304 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1305 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1306 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1307
1308 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1309}
1310
1311static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1312{
1313 struct ixgbe_adapter *adapter = q_vector->adapter;
1314 struct ixgbe_ring *ring;
1315 int cpu = get_cpu();
1316
1317 if (q_vector->cpu == cpu)
1318 goto out_no_update;
1319
1320 ixgbe_for_each_ring(ring, q_vector->tx)
1321 ixgbe_update_tx_dca(adapter, ring, cpu);
1322
1323 ixgbe_for_each_ring(ring, q_vector->rx)
1324 ixgbe_update_rx_dca(adapter, ring, cpu);
1325
1326 q_vector->cpu = cpu;
1327out_no_update:
1328 put_cpu();
1329}
1330
1331static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1332{
1333 int i;
1334
1335
1336 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1338 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1339 else
1340 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1341 IXGBE_DCA_CTRL_DCA_DISABLE);
1342
1343 for (i = 0; i < adapter->num_q_vectors; i++) {
1344 adapter->q_vector[i]->cpu = -1;
1345 ixgbe_update_dca(adapter->q_vector[i]);
1346 }
1347}
1348
1349static int __ixgbe_notify_dca(struct device *dev, void *data)
1350{
1351 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1352 unsigned long event = *(unsigned long *)data;
1353
1354 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1355 return 0;
1356
1357 switch (event) {
1358 case DCA_PROVIDER_ADD:
1359
1360 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1361 break;
1362 if (dca_add_requester(dev) == 0) {
1363 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1364 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1365 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1366 break;
1367 }
1368
1369 case DCA_PROVIDER_REMOVE:
1370 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1371 dca_remove_requester(dev);
1372 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1373 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1374 IXGBE_DCA_CTRL_DCA_DISABLE);
1375 }
1376 break;
1377 }
1378
1379 return 0;
1380}
1381
1382#endif
1383
1384#define IXGBE_RSS_L4_TYPES_MASK \
1385 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1386 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1387 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1388 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1389
1390static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1391 union ixgbe_adv_rx_desc *rx_desc,
1392 struct sk_buff *skb)
1393{
1394 u16 rss_type;
1395
1396 if (!(ring->netdev->features & NETIF_F_RXHASH))
1397 return;
1398
1399 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1400 IXGBE_RXDADV_RSSTYPE_MASK;
1401
1402 if (!rss_type)
1403 return;
1404
1405 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1406 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1407 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1408}
1409
1410#ifdef IXGBE_FCOE
1411
1412
1413
1414
1415
1416
1417
1418static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1419 union ixgbe_adv_rx_desc *rx_desc)
1420{
1421 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1422
1423 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1424 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1425 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1426 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1427}
1428
1429#endif
1430
1431
1432
1433
1434
1435
1436static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1437 union ixgbe_adv_rx_desc *rx_desc,
1438 struct sk_buff *skb)
1439{
1440 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1441 __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1442 bool encap_pkt = false;
1443
1444 skb_checksum_none_assert(skb);
1445
1446
1447 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1448 return;
1449
1450 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1451 (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1452 encap_pkt = true;
1453 skb->encapsulation = 1;
1454 }
1455
1456
1457 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1458 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1459 ring->rx_stats.csum_err++;
1460 return;
1461 }
1462
1463 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1464 return;
1465
1466 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1467
1468
1469
1470
1471 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1472 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1473 return;
1474
1475 ring->rx_stats.csum_err++;
1476 return;
1477 }
1478
1479
1480 skb->ip_summed = CHECKSUM_UNNECESSARY;
1481 if (encap_pkt) {
1482 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1483 return;
1484
1485 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1486 skb->ip_summed = CHECKSUM_NONE;
1487 return;
1488 }
1489
1490 skb->csum_level = 1;
1491 }
1492}
1493
1494static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1495 struct ixgbe_rx_buffer *bi)
1496{
1497 struct page *page = bi->page;
1498 dma_addr_t dma;
1499
1500
1501 if (likely(page))
1502 return true;
1503
1504
1505 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1506 if (unlikely(!page)) {
1507 rx_ring->rx_stats.alloc_rx_page_failed++;
1508 return false;
1509 }
1510
1511
1512 dma = dma_map_page(rx_ring->dev, page, 0,
1513 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1514
1515
1516
1517
1518
1519 if (dma_mapping_error(rx_ring->dev, dma)) {
1520 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1521
1522 rx_ring->rx_stats.alloc_rx_page_failed++;
1523 return false;
1524 }
1525
1526 bi->dma = dma;
1527 bi->page = page;
1528 bi->page_offset = 0;
1529
1530 return true;
1531}
1532
1533
1534
1535
1536
1537
1538void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1539{
1540 union ixgbe_adv_rx_desc *rx_desc;
1541 struct ixgbe_rx_buffer *bi;
1542 u16 i = rx_ring->next_to_use;
1543
1544
1545 if (!cleaned_count)
1546 return;
1547
1548 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1549 bi = &rx_ring->rx_buffer_info[i];
1550 i -= rx_ring->count;
1551
1552 do {
1553 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1554 break;
1555
1556
1557
1558
1559
1560 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1561
1562 rx_desc++;
1563 bi++;
1564 i++;
1565 if (unlikely(!i)) {
1566 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1567 bi = rx_ring->rx_buffer_info;
1568 i -= rx_ring->count;
1569 }
1570
1571
1572 rx_desc->wb.upper.status_error = 0;
1573
1574 cleaned_count--;
1575 } while (cleaned_count);
1576
1577 i += rx_ring->count;
1578
1579 if (rx_ring->next_to_use != i) {
1580 rx_ring->next_to_use = i;
1581
1582
1583 rx_ring->next_to_alloc = i;
1584
1585
1586
1587
1588
1589
1590 wmb();
1591 writel(i, rx_ring->tail);
1592 }
1593}
1594
1595static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1596 struct sk_buff *skb)
1597{
1598 u16 hdr_len = skb_headlen(skb);
1599
1600
1601 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1602 IXGBE_CB(skb)->append_cnt);
1603 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1604}
1605
1606static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1607 struct sk_buff *skb)
1608{
1609
1610 if (!IXGBE_CB(skb)->append_cnt)
1611 return;
1612
1613 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1614 rx_ring->rx_stats.rsc_flush++;
1615
1616 ixgbe_set_rsc_gso_size(rx_ring, skb);
1617
1618
1619 IXGBE_CB(skb)->append_cnt = 0;
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1633 union ixgbe_adv_rx_desc *rx_desc,
1634 struct sk_buff *skb)
1635{
1636 struct net_device *dev = rx_ring->netdev;
1637 u32 flags = rx_ring->q_vector->adapter->flags;
1638
1639 ixgbe_update_rsc_stats(rx_ring, skb);
1640
1641 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1642
1643 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1644
1645 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1646 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1647
1648 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1649 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1650 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1651 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1652 }
1653
1654 skb_record_rx_queue(skb, rx_ring->queue_index);
1655
1656 skb->protocol = eth_type_trans(skb, dev);
1657}
1658
1659static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1660 struct sk_buff *skb)
1661{
1662 skb_mark_napi_id(skb, &q_vector->napi);
1663 if (ixgbe_qv_busy_polling(q_vector))
1664 netif_receive_skb(skb);
1665 else
1666 napi_gro_receive(&q_vector->napi, skb);
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1681 union ixgbe_adv_rx_desc *rx_desc,
1682 struct sk_buff *skb)
1683{
1684 u32 ntc = rx_ring->next_to_clean + 1;
1685
1686
1687 ntc = (ntc < rx_ring->count) ? ntc : 0;
1688 rx_ring->next_to_clean = ntc;
1689
1690 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1691
1692
1693 if (ring_is_rsc_enabled(rx_ring)) {
1694 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1695 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1696
1697 if (unlikely(rsc_enabled)) {
1698 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1699
1700 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1701 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1702
1703
1704 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1705 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1706 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1707 }
1708 }
1709
1710
1711 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1712 return false;
1713
1714
1715 rx_ring->rx_buffer_info[ntc].skb = skb;
1716 rx_ring->rx_stats.non_eop_descs++;
1717
1718 return true;
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1734 struct sk_buff *skb)
1735{
1736 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1737 unsigned char *va;
1738 unsigned int pull_len;
1739
1740
1741
1742
1743
1744
1745 va = skb_frag_address(frag);
1746
1747
1748
1749
1750
1751 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1752
1753
1754 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1755
1756
1757 skb_frag_size_sub(frag, pull_len);
1758 frag->page_offset += pull_len;
1759 skb->data_len -= pull_len;
1760 skb->tail += pull_len;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1774 struct sk_buff *skb)
1775{
1776
1777 if (unlikely(IXGBE_CB(skb)->page_released)) {
1778 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1779 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1780 IXGBE_CB(skb)->page_released = false;
1781 } else {
1782 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1783
1784 dma_sync_single_range_for_cpu(rx_ring->dev,
1785 IXGBE_CB(skb)->dma,
1786 frag->page_offset,
1787 ixgbe_rx_bufsz(rx_ring),
1788 DMA_FROM_DEVICE);
1789 }
1790 IXGBE_CB(skb)->dma = 0;
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1812 union ixgbe_adv_rx_desc *rx_desc,
1813 struct sk_buff *skb)
1814{
1815 struct net_device *netdev = rx_ring->netdev;
1816
1817
1818 if (unlikely(ixgbe_test_staterr(rx_desc,
1819 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1820 !(netdev->features & NETIF_F_RXALL))) {
1821 dev_kfree_skb_any(skb);
1822 return true;
1823 }
1824
1825
1826 if (skb_is_nonlinear(skb))
1827 ixgbe_pull_tail(rx_ring, skb);
1828
1829#ifdef IXGBE_FCOE
1830
1831 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1832 return false;
1833
1834#endif
1835
1836 if (eth_skb_pad(skb))
1837 return true;
1838
1839 return false;
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1850 struct ixgbe_rx_buffer *old_buff)
1851{
1852 struct ixgbe_rx_buffer *new_buff;
1853 u16 nta = rx_ring->next_to_alloc;
1854
1855 new_buff = &rx_ring->rx_buffer_info[nta];
1856
1857
1858 nta++;
1859 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1860
1861
1862 *new_buff = *old_buff;
1863
1864
1865 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1866 new_buff->page_offset,
1867 ixgbe_rx_bufsz(rx_ring),
1868 DMA_FROM_DEVICE);
1869}
1870
1871static inline bool ixgbe_page_is_reserved(struct page *page)
1872{
1873 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1874}
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1892 struct ixgbe_rx_buffer *rx_buffer,
1893 union ixgbe_adv_rx_desc *rx_desc,
1894 struct sk_buff *skb)
1895{
1896 struct page *page = rx_buffer->page;
1897 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1898#if (PAGE_SIZE < 8192)
1899 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1900#else
1901 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1902 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1903 ixgbe_rx_bufsz(rx_ring);
1904#endif
1905
1906 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1907 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1908
1909 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1910
1911
1912 if (likely(!ixgbe_page_is_reserved(page)))
1913 return true;
1914
1915
1916 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1917 return false;
1918 }
1919
1920 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1921 rx_buffer->page_offset, size, truesize);
1922
1923
1924 if (unlikely(ixgbe_page_is_reserved(page)))
1925 return false;
1926
1927#if (PAGE_SIZE < 8192)
1928
1929 if (unlikely(page_count(page) != 1))
1930 return false;
1931
1932
1933 rx_buffer->page_offset ^= truesize;
1934#else
1935
1936 rx_buffer->page_offset += truesize;
1937
1938 if (rx_buffer->page_offset > last_offset)
1939 return false;
1940#endif
1941
1942
1943
1944
1945 atomic_inc(&page->_count);
1946
1947 return true;
1948}
1949
1950static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1951 union ixgbe_adv_rx_desc *rx_desc)
1952{
1953 struct ixgbe_rx_buffer *rx_buffer;
1954 struct sk_buff *skb;
1955 struct page *page;
1956
1957 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1958 page = rx_buffer->page;
1959 prefetchw(page);
1960
1961 skb = rx_buffer->skb;
1962
1963 if (likely(!skb)) {
1964 void *page_addr = page_address(page) +
1965 rx_buffer->page_offset;
1966
1967
1968 prefetch(page_addr);
1969#if L1_CACHE_BYTES < 128
1970 prefetch(page_addr + L1_CACHE_BYTES);
1971#endif
1972
1973
1974 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1975 IXGBE_RX_HDR_SIZE);
1976 if (unlikely(!skb)) {
1977 rx_ring->rx_stats.alloc_rx_buff_failed++;
1978 return NULL;
1979 }
1980
1981
1982
1983
1984
1985
1986 prefetchw(skb->data);
1987
1988
1989
1990
1991
1992
1993
1994 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1995 goto dma_sync;
1996
1997 IXGBE_CB(skb)->dma = rx_buffer->dma;
1998 } else {
1999 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2000 ixgbe_dma_sync_frag(rx_ring, skb);
2001
2002dma_sync:
2003
2004 dma_sync_single_range_for_cpu(rx_ring->dev,
2005 rx_buffer->dma,
2006 rx_buffer->page_offset,
2007 ixgbe_rx_bufsz(rx_ring),
2008 DMA_FROM_DEVICE);
2009
2010 rx_buffer->skb = NULL;
2011 }
2012
2013
2014 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
2015
2016 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2017 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
2018
2019 IXGBE_CB(skb)->page_released = true;
2020 } else {
2021
2022 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
2023 ixgbe_rx_pg_size(rx_ring),
2024 DMA_FROM_DEVICE);
2025 }
2026
2027
2028 rx_buffer->page = NULL;
2029
2030 return skb;
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2047 struct ixgbe_ring *rx_ring,
2048 const int budget)
2049{
2050 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2051#ifdef IXGBE_FCOE
2052 struct ixgbe_adapter *adapter = q_vector->adapter;
2053 int ddp_bytes;
2054 unsigned int mss = 0;
2055#endif
2056 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2057
2058 while (likely(total_rx_packets < budget)) {
2059 union ixgbe_adv_rx_desc *rx_desc;
2060 struct sk_buff *skb;
2061
2062
2063 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2064 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2065 cleaned_count = 0;
2066 }
2067
2068 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2069
2070 if (!rx_desc->wb.upper.status_error)
2071 break;
2072
2073
2074
2075
2076
2077 dma_rmb();
2078
2079
2080 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
2081
2082
2083 if (!skb)
2084 break;
2085
2086 cleaned_count++;
2087
2088
2089 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2090 continue;
2091
2092
2093 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2094 continue;
2095
2096
2097 total_rx_bytes += skb->len;
2098
2099
2100 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2101
2102#ifdef IXGBE_FCOE
2103
2104 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2105 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2106
2107 if (ddp_bytes > 0) {
2108 if (!mss) {
2109 mss = rx_ring->netdev->mtu -
2110 sizeof(struct fcoe_hdr) -
2111 sizeof(struct fc_frame_header) -
2112 sizeof(struct fcoe_crc_eof);
2113 if (mss > 512)
2114 mss &= ~511;
2115 }
2116 total_rx_bytes += ddp_bytes;
2117 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2118 mss);
2119 }
2120 if (!ddp_bytes) {
2121 dev_kfree_skb_any(skb);
2122 continue;
2123 }
2124 }
2125
2126#endif
2127 ixgbe_rx_skb(q_vector, skb);
2128
2129
2130 total_rx_packets++;
2131 }
2132
2133 u64_stats_update_begin(&rx_ring->syncp);
2134 rx_ring->stats.packets += total_rx_packets;
2135 rx_ring->stats.bytes += total_rx_bytes;
2136 u64_stats_update_end(&rx_ring->syncp);
2137 q_vector->rx.total_packets += total_rx_packets;
2138 q_vector->rx.total_bytes += total_rx_bytes;
2139
2140 return total_rx_packets;
2141}
2142
2143#ifdef CONFIG_NET_RX_BUSY_POLL
2144
2145static int ixgbe_low_latency_recv(struct napi_struct *napi)
2146{
2147 struct ixgbe_q_vector *q_vector =
2148 container_of(napi, struct ixgbe_q_vector, napi);
2149 struct ixgbe_adapter *adapter = q_vector->adapter;
2150 struct ixgbe_ring *ring;
2151 int found = 0;
2152
2153 if (test_bit(__IXGBE_DOWN, &adapter->state))
2154 return LL_FLUSH_FAILED;
2155
2156 if (!ixgbe_qv_lock_poll(q_vector))
2157 return LL_FLUSH_BUSY;
2158
2159 ixgbe_for_each_ring(ring, q_vector->rx) {
2160 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2161#ifdef BP_EXTENDED_STATS
2162 if (found)
2163 ring->stats.cleaned += found;
2164 else
2165 ring->stats.misses++;
2166#endif
2167 if (found)
2168 break;
2169 }
2170
2171 ixgbe_qv_unlock_poll(q_vector);
2172
2173 return found;
2174}
2175#endif
2176
2177
2178
2179
2180
2181
2182
2183
2184static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2185{
2186 struct ixgbe_q_vector *q_vector;
2187 int v_idx;
2188 u32 mask;
2189
2190
2191 if (adapter->num_vfs > 32) {
2192 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2193 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2194 }
2195
2196
2197
2198
2199
2200 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2201 struct ixgbe_ring *ring;
2202 q_vector = adapter->q_vector[v_idx];
2203
2204 ixgbe_for_each_ring(ring, q_vector->rx)
2205 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2206
2207 ixgbe_for_each_ring(ring, q_vector->tx)
2208 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2209
2210 ixgbe_write_eitr(q_vector);
2211 }
2212
2213 switch (adapter->hw.mac.type) {
2214 case ixgbe_mac_82598EB:
2215 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2216 v_idx);
2217 break;
2218 case ixgbe_mac_82599EB:
2219 case ixgbe_mac_X540:
2220 case ixgbe_mac_X550:
2221 case ixgbe_mac_X550EM_x:
2222 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2223 break;
2224 default:
2225 break;
2226 }
2227 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2228
2229
2230 mask = IXGBE_EIMS_ENABLE_MASK;
2231 mask &= ~(IXGBE_EIMS_OTHER |
2232 IXGBE_EIMS_MAILBOX |
2233 IXGBE_EIMS_LSC);
2234
2235 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2236}
2237
2238enum latency_range {
2239 lowest_latency = 0,
2240 low_latency = 1,
2241 bulk_latency = 2,
2242 latency_invalid = 255
2243};
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2261 struct ixgbe_ring_container *ring_container)
2262{
2263 int bytes = ring_container->total_bytes;
2264 int packets = ring_container->total_packets;
2265 u32 timepassed_us;
2266 u64 bytes_perint;
2267 u8 itr_setting = ring_container->itr;
2268
2269 if (packets == 0)
2270 return;
2271
2272
2273
2274
2275
2276
2277
2278 timepassed_us = q_vector->itr >> 2;
2279 if (timepassed_us == 0)
2280 return;
2281
2282 bytes_perint = bytes / timepassed_us;
2283
2284 switch (itr_setting) {
2285 case lowest_latency:
2286 if (bytes_perint > 10)
2287 itr_setting = low_latency;
2288 break;
2289 case low_latency:
2290 if (bytes_perint > 20)
2291 itr_setting = bulk_latency;
2292 else if (bytes_perint <= 10)
2293 itr_setting = lowest_latency;
2294 break;
2295 case bulk_latency:
2296 if (bytes_perint <= 20)
2297 itr_setting = low_latency;
2298 break;
2299 }
2300
2301
2302 ring_container->total_bytes = 0;
2303 ring_container->total_packets = 0;
2304
2305
2306 ring_container->itr = itr_setting;
2307}
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2318{
2319 struct ixgbe_adapter *adapter = q_vector->adapter;
2320 struct ixgbe_hw *hw = &adapter->hw;
2321 int v_idx = q_vector->v_idx;
2322 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2323
2324 switch (adapter->hw.mac.type) {
2325 case ixgbe_mac_82598EB:
2326
2327 itr_reg |= (itr_reg << 16);
2328 break;
2329 case ixgbe_mac_82599EB:
2330 case ixgbe_mac_X540:
2331 case ixgbe_mac_X550:
2332 case ixgbe_mac_X550EM_x:
2333
2334
2335
2336
2337 itr_reg |= IXGBE_EITR_CNT_WDIS;
2338 break;
2339 default:
2340 break;
2341 }
2342 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2343}
2344
2345static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2346{
2347 u32 new_itr = q_vector->itr;
2348 u8 current_itr;
2349
2350 ixgbe_update_itr(q_vector, &q_vector->tx);
2351 ixgbe_update_itr(q_vector, &q_vector->rx);
2352
2353 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2354
2355 switch (current_itr) {
2356
2357 case lowest_latency:
2358 new_itr = IXGBE_100K_ITR;
2359 break;
2360 case low_latency:
2361 new_itr = IXGBE_20K_ITR;
2362 break;
2363 case bulk_latency:
2364 new_itr = IXGBE_12K_ITR;
2365 break;
2366 default:
2367 break;
2368 }
2369
2370 if (new_itr != q_vector->itr) {
2371
2372 new_itr = (10 * new_itr * q_vector->itr) /
2373 ((9 * new_itr) + q_vector->itr);
2374
2375
2376 q_vector->itr = new_itr;
2377
2378 ixgbe_write_eitr(q_vector);
2379 }
2380}
2381
2382
2383
2384
2385
2386static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2387{
2388 struct ixgbe_hw *hw = &adapter->hw;
2389 u32 eicr = adapter->interrupt_event;
2390
2391 if (test_bit(__IXGBE_DOWN, &adapter->state))
2392 return;
2393
2394 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2395 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2396 return;
2397
2398 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2399
2400 switch (hw->device_id) {
2401 case IXGBE_DEV_ID_82599_T3_LOM:
2402
2403
2404
2405
2406
2407
2408
2409 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2410 !(eicr & IXGBE_EICR_LSC))
2411 return;
2412
2413 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2414 u32 speed;
2415 bool link_up = false;
2416
2417 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2418
2419 if (link_up)
2420 return;
2421 }
2422
2423
2424 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2425 return;
2426
2427 break;
2428 default:
2429 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2430 return;
2431 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2432 return;
2433 break;
2434 }
2435 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2436
2437 adapter->interrupt_event = 0;
2438}
2439
2440static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2441{
2442 struct ixgbe_hw *hw = &adapter->hw;
2443
2444 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2445 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2446 e_crit(probe, "Fan has stopped, replace the adapter\n");
2447
2448 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2449 }
2450}
2451
2452static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2453{
2454 struct ixgbe_hw *hw = &adapter->hw;
2455
2456 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2457 return;
2458
2459 switch (adapter->hw.mac.type) {
2460 case ixgbe_mac_82599EB:
2461
2462
2463
2464
2465 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2466 (eicr & IXGBE_EICR_LSC)) &&
2467 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2468 adapter->interrupt_event = eicr;
2469 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2470 ixgbe_service_event_schedule(adapter);
2471 return;
2472 }
2473 return;
2474 case ixgbe_mac_X540:
2475 if (!(eicr & IXGBE_EICR_TS))
2476 return;
2477 break;
2478 default:
2479 return;
2480 }
2481
2482 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2483}
2484
2485static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2486{
2487 switch (hw->mac.type) {
2488 case ixgbe_mac_82598EB:
2489 if (hw->phy.type == ixgbe_phy_nl)
2490 return true;
2491 return false;
2492 case ixgbe_mac_82599EB:
2493 case ixgbe_mac_X550EM_x:
2494 switch (hw->mac.ops.get_media_type(hw)) {
2495 case ixgbe_media_type_fiber:
2496 case ixgbe_media_type_fiber_qsfp:
2497 return true;
2498 default:
2499 return false;
2500 }
2501 default:
2502 return false;
2503 }
2504}
2505
2506static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2507{
2508 struct ixgbe_hw *hw = &adapter->hw;
2509 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2510
2511 if (!ixgbe_is_sfp(hw))
2512 return;
2513
2514
2515 if (hw->mac.type >= ixgbe_mac_X540)
2516 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2517
2518 if (eicr & eicr_mask) {
2519
2520 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2521 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2522 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2523 adapter->sfp_poll_time = 0;
2524 ixgbe_service_event_schedule(adapter);
2525 }
2526 }
2527
2528 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2529 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2530
2531 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2532 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2533 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2534 ixgbe_service_event_schedule(adapter);
2535 }
2536 }
2537}
2538
2539static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2540{
2541 struct ixgbe_hw *hw = &adapter->hw;
2542
2543 adapter->lsc_int++;
2544 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2545 adapter->link_check_timeout = jiffies;
2546 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2547 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2548 IXGBE_WRITE_FLUSH(hw);
2549 ixgbe_service_event_schedule(adapter);
2550 }
2551}
2552
2553static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2554 u64 qmask)
2555{
2556 u32 mask;
2557 struct ixgbe_hw *hw = &adapter->hw;
2558
2559 switch (hw->mac.type) {
2560 case ixgbe_mac_82598EB:
2561 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2562 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2563 break;
2564 case ixgbe_mac_82599EB:
2565 case ixgbe_mac_X540:
2566 case ixgbe_mac_X550:
2567 case ixgbe_mac_X550EM_x:
2568 mask = (qmask & 0xFFFFFFFF);
2569 if (mask)
2570 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2571 mask = (qmask >> 32);
2572 if (mask)
2573 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2574 break;
2575 default:
2576 break;
2577 }
2578
2579}
2580
2581static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2582 u64 qmask)
2583{
2584 u32 mask;
2585 struct ixgbe_hw *hw = &adapter->hw;
2586
2587 switch (hw->mac.type) {
2588 case ixgbe_mac_82598EB:
2589 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2590 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2591 break;
2592 case ixgbe_mac_82599EB:
2593 case ixgbe_mac_X540:
2594 case ixgbe_mac_X550:
2595 case ixgbe_mac_X550EM_x:
2596 mask = (qmask & 0xFFFFFFFF);
2597 if (mask)
2598 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2599 mask = (qmask >> 32);
2600 if (mask)
2601 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2602 break;
2603 default:
2604 break;
2605 }
2606
2607}
2608
2609
2610
2611
2612
2613static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2614 bool flush)
2615{
2616 struct ixgbe_hw *hw = &adapter->hw;
2617 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2618
2619
2620 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2621 mask &= ~IXGBE_EIMS_LSC;
2622
2623 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2624 switch (adapter->hw.mac.type) {
2625 case ixgbe_mac_82599EB:
2626 mask |= IXGBE_EIMS_GPI_SDP0(hw);
2627 break;
2628 case ixgbe_mac_X540:
2629 case ixgbe_mac_X550:
2630 case ixgbe_mac_X550EM_x:
2631 mask |= IXGBE_EIMS_TS;
2632 break;
2633 default:
2634 break;
2635 }
2636 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2637 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2638 switch (adapter->hw.mac.type) {
2639 case ixgbe_mac_82599EB:
2640 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2641 mask |= IXGBE_EIMS_GPI_SDP2(hw);
2642
2643 case ixgbe_mac_X540:
2644 case ixgbe_mac_X550:
2645 case ixgbe_mac_X550EM_x:
2646 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2647 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
2648 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
2649 mask |= IXGBE_EICR_GPI_SDP0_X540;
2650 mask |= IXGBE_EIMS_ECC;
2651 mask |= IXGBE_EIMS_MAILBOX;
2652 break;
2653 default:
2654 break;
2655 }
2656
2657 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2658 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2659 mask |= IXGBE_EIMS_FLOW_DIR;
2660
2661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2662 if (queues)
2663 ixgbe_irq_enable_queues(adapter, ~0);
2664 if (flush)
2665 IXGBE_WRITE_FLUSH(&adapter->hw);
2666}
2667
2668static irqreturn_t ixgbe_msix_other(int irq, void *data)
2669{
2670 struct ixgbe_adapter *adapter = data;
2671 struct ixgbe_hw *hw = &adapter->hw;
2672 u32 eicr;
2673
2674
2675
2676
2677
2678
2679
2680 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2681
2682
2683
2684
2685
2686
2687
2688
2689 eicr &= 0xFFFF0000;
2690
2691 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2692
2693 if (eicr & IXGBE_EICR_LSC)
2694 ixgbe_check_lsc(adapter);
2695
2696 if (eicr & IXGBE_EICR_MAILBOX)
2697 ixgbe_msg_task(adapter);
2698
2699 switch (hw->mac.type) {
2700 case ixgbe_mac_82599EB:
2701 case ixgbe_mac_X540:
2702 case ixgbe_mac_X550:
2703 case ixgbe_mac_X550EM_x:
2704 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
2705 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2706 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
2707 ixgbe_service_event_schedule(adapter);
2708 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2709 IXGBE_EICR_GPI_SDP0_X540);
2710 }
2711 if (eicr & IXGBE_EICR_ECC) {
2712 e_info(link, "Received ECC Err, initiating reset\n");
2713 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2714 ixgbe_service_event_schedule(adapter);
2715 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2716 }
2717
2718 if (eicr & IXGBE_EICR_FLOW_DIR) {
2719 int reinit_count = 0;
2720 int i;
2721 for (i = 0; i < adapter->num_tx_queues; i++) {
2722 struct ixgbe_ring *ring = adapter->tx_ring[i];
2723 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2724 &ring->state))
2725 reinit_count++;
2726 }
2727 if (reinit_count) {
2728
2729 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2730 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2731 ixgbe_service_event_schedule(adapter);
2732 }
2733 }
2734 ixgbe_check_sfp_event(adapter, eicr);
2735 ixgbe_check_overtemp_event(adapter, eicr);
2736 break;
2737 default:
2738 break;
2739 }
2740
2741 ixgbe_check_fan_failure(adapter, eicr);
2742
2743 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2744 ixgbe_ptp_check_pps_event(adapter);
2745
2746
2747 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2748 ixgbe_irq_enable(adapter, false, false);
2749
2750 return IRQ_HANDLED;
2751}
2752
2753static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2754{
2755 struct ixgbe_q_vector *q_vector = data;
2756
2757
2758
2759 if (q_vector->rx.ring || q_vector->tx.ring)
2760 napi_schedule_irqoff(&q_vector->napi);
2761
2762 return IRQ_HANDLED;
2763}
2764
2765
2766
2767
2768
2769
2770
2771
2772int ixgbe_poll(struct napi_struct *napi, int budget)
2773{
2774 struct ixgbe_q_vector *q_vector =
2775 container_of(napi, struct ixgbe_q_vector, napi);
2776 struct ixgbe_adapter *adapter = q_vector->adapter;
2777 struct ixgbe_ring *ring;
2778 int per_ring_budget, work_done = 0;
2779 bool clean_complete = true;
2780
2781#ifdef CONFIG_IXGBE_DCA
2782 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2783 ixgbe_update_dca(q_vector);
2784#endif
2785
2786 ixgbe_for_each_ring(ring, q_vector->tx)
2787 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2788
2789
2790 if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
2791 return budget;
2792
2793
2794
2795 if (q_vector->rx.count > 1)
2796 per_ring_budget = max(budget/q_vector->rx.count, 1);
2797 else
2798 per_ring_budget = budget;
2799
2800 ixgbe_for_each_ring(ring, q_vector->rx) {
2801 int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
2802 per_ring_budget);
2803
2804 work_done += cleaned;
2805 clean_complete &= (cleaned < per_ring_budget);
2806 }
2807
2808 ixgbe_qv_unlock_napi(q_vector);
2809
2810 if (!clean_complete)
2811 return budget;
2812
2813
2814 napi_complete_done(napi, work_done);
2815 if (adapter->rx_itr_setting & 1)
2816 ixgbe_set_itr(q_vector);
2817 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2818 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2819
2820 return 0;
2821}
2822
2823
2824
2825
2826
2827
2828
2829
2830static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2831{
2832 struct net_device *netdev = adapter->netdev;
2833 int vector, err;
2834 int ri = 0, ti = 0;
2835
2836 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2837 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2838 struct msix_entry *entry = &adapter->msix_entries[vector];
2839
2840 if (q_vector->tx.ring && q_vector->rx.ring) {
2841 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2842 "%s-%s-%d", netdev->name, "TxRx", ri++);
2843 ti++;
2844 } else if (q_vector->rx.ring) {
2845 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2846 "%s-%s-%d", netdev->name, "rx", ri++);
2847 } else if (q_vector->tx.ring) {
2848 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2849 "%s-%s-%d", netdev->name, "tx", ti++);
2850 } else {
2851
2852 continue;
2853 }
2854 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2855 q_vector->name, q_vector);
2856 if (err) {
2857 e_err(probe, "request_irq failed for MSIX interrupt "
2858 "Error: %d\n", err);
2859 goto free_queue_irqs;
2860 }
2861
2862 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2863
2864 irq_set_affinity_hint(entry->vector,
2865 &q_vector->affinity_mask);
2866 }
2867 }
2868
2869 err = request_irq(adapter->msix_entries[vector].vector,
2870 ixgbe_msix_other, 0, netdev->name, adapter);
2871 if (err) {
2872 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2873 goto free_queue_irqs;
2874 }
2875
2876 return 0;
2877
2878free_queue_irqs:
2879 while (vector) {
2880 vector--;
2881 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2882 NULL);
2883 free_irq(adapter->msix_entries[vector].vector,
2884 adapter->q_vector[vector]);
2885 }
2886 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2887 pci_disable_msix(adapter->pdev);
2888 kfree(adapter->msix_entries);
2889 adapter->msix_entries = NULL;
2890 return err;
2891}
2892
2893
2894
2895
2896
2897
2898static irqreturn_t ixgbe_intr(int irq, void *data)
2899{
2900 struct ixgbe_adapter *adapter = data;
2901 struct ixgbe_hw *hw = &adapter->hw;
2902 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2903 u32 eicr;
2904
2905
2906
2907
2908
2909 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2910
2911
2912
2913 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2914 if (!eicr) {
2915
2916
2917
2918
2919
2920
2921
2922 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2923 ixgbe_irq_enable(adapter, true, true);
2924 return IRQ_NONE;
2925 }
2926
2927 if (eicr & IXGBE_EICR_LSC)
2928 ixgbe_check_lsc(adapter);
2929
2930 switch (hw->mac.type) {
2931 case ixgbe_mac_82599EB:
2932 ixgbe_check_sfp_event(adapter, eicr);
2933
2934 case ixgbe_mac_X540:
2935 case ixgbe_mac_X550:
2936 case ixgbe_mac_X550EM_x:
2937 if (eicr & IXGBE_EICR_ECC) {
2938 e_info(link, "Received ECC Err, initiating reset\n");
2939 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2940 ixgbe_service_event_schedule(adapter);
2941 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2942 }
2943 ixgbe_check_overtemp_event(adapter, eicr);
2944 break;
2945 default:
2946 break;
2947 }
2948
2949 ixgbe_check_fan_failure(adapter, eicr);
2950 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2951 ixgbe_ptp_check_pps_event(adapter);
2952
2953
2954 napi_schedule_irqoff(&q_vector->napi);
2955
2956
2957
2958
2959
2960 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2961 ixgbe_irq_enable(adapter, false, false);
2962
2963 return IRQ_HANDLED;
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2974{
2975 struct net_device *netdev = adapter->netdev;
2976 int err;
2977
2978 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2979 err = ixgbe_request_msix_irqs(adapter);
2980 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2981 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2982 netdev->name, adapter);
2983 else
2984 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2985 netdev->name, adapter);
2986
2987 if (err)
2988 e_err(probe, "request_irq failed, Error %d\n", err);
2989
2990 return err;
2991}
2992
2993static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2994{
2995 int vector;
2996
2997 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2998 free_irq(adapter->pdev->irq, adapter);
2999 return;
3000 }
3001
3002 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3003 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3004 struct msix_entry *entry = &adapter->msix_entries[vector];
3005
3006
3007 if (!q_vector->rx.ring && !q_vector->tx.ring)
3008 continue;
3009
3010
3011 irq_set_affinity_hint(entry->vector, NULL);
3012
3013 free_irq(entry->vector, q_vector);
3014 }
3015
3016 free_irq(adapter->msix_entries[vector++].vector, adapter);
3017}
3018
3019
3020
3021
3022
3023static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3024{
3025 switch (adapter->hw.mac.type) {
3026 case ixgbe_mac_82598EB:
3027 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3028 break;
3029 case ixgbe_mac_82599EB:
3030 case ixgbe_mac_X540:
3031 case ixgbe_mac_X550:
3032 case ixgbe_mac_X550EM_x:
3033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3034 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3035 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3036 break;
3037 default:
3038 break;
3039 }
3040 IXGBE_WRITE_FLUSH(&adapter->hw);
3041 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3042 int vector;
3043
3044 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3045 synchronize_irq(adapter->msix_entries[vector].vector);
3046
3047 synchronize_irq(adapter->msix_entries[vector++].vector);
3048 } else {
3049 synchronize_irq(adapter->pdev->irq);
3050 }
3051}
3052
3053
3054
3055
3056
3057static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3058{
3059 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3060
3061 ixgbe_write_eitr(q_vector);
3062
3063 ixgbe_set_ivar(adapter, 0, 0, 0);
3064 ixgbe_set_ivar(adapter, 1, 0, 0);
3065
3066 e_info(hw, "Legacy interrupt IVAR setup done\n");
3067}
3068
3069
3070
3071
3072
3073
3074
3075
3076void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3077 struct ixgbe_ring *ring)
3078{
3079 struct ixgbe_hw *hw = &adapter->hw;
3080 u64 tdba = ring->dma;
3081 int wait_loop = 10;
3082 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3083 u8 reg_idx = ring->reg_idx;
3084
3085
3086 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3087 IXGBE_WRITE_FLUSH(hw);
3088
3089 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3090 (tdba & DMA_BIT_MASK(32)));
3091 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3092 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3093 ring->count * sizeof(union ixgbe_adv_tx_desc));
3094 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3095 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3096 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3109 txdctl |= (1 << 16);
3110 else
3111 txdctl |= (8 << 16);
3112
3113
3114
3115
3116
3117 txdctl |= (1 << 8) |
3118 32;
3119
3120
3121 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3122 ring->atr_sample_rate = adapter->atr_sample_rate;
3123 ring->atr_count = 0;
3124 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3125 } else {
3126 ring->atr_sample_rate = 0;
3127 }
3128
3129
3130 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3131 struct ixgbe_q_vector *q_vector = ring->q_vector;
3132
3133 if (q_vector)
3134 netif_set_xps_queue(ring->netdev,
3135 &q_vector->affinity_mask,
3136 ring->queue_index);
3137 }
3138
3139 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3140
3141
3142 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3143
3144
3145 if (hw->mac.type == ixgbe_mac_82598EB &&
3146 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3147 return;
3148
3149
3150 do {
3151 usleep_range(1000, 2000);
3152 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3153 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3154 if (!wait_loop)
3155 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
3156}
3157
3158static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3159{
3160 struct ixgbe_hw *hw = &adapter->hw;
3161 u32 rttdcs, mtqc;
3162 u8 tcs = netdev_get_num_tc(adapter->netdev);
3163
3164 if (hw->mac.type == ixgbe_mac_82598EB)
3165 return;
3166
3167
3168 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3169 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3170 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3171
3172
3173 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3174 mtqc = IXGBE_MTQC_VT_ENA;
3175 if (tcs > 4)
3176 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3177 else if (tcs > 1)
3178 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3179 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3180 mtqc |= IXGBE_MTQC_32VF;
3181 else
3182 mtqc |= IXGBE_MTQC_64VF;
3183 } else {
3184 if (tcs > 4)
3185 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3186 else if (tcs > 1)
3187 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3188 else
3189 mtqc = IXGBE_MTQC_64Q_1PB;
3190 }
3191
3192 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3193
3194
3195 if (tcs) {
3196 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3197 sectx |= IXGBE_SECTX_DCB;
3198 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3199 }
3200
3201
3202 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3203 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3204}
3205
3206
3207
3208
3209
3210
3211
3212static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3213{
3214 struct ixgbe_hw *hw = &adapter->hw;
3215 u32 dmatxctl;
3216 u32 i;
3217
3218 ixgbe_setup_mtqc(adapter);
3219
3220 if (hw->mac.type != ixgbe_mac_82598EB) {
3221
3222 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3223 dmatxctl |= IXGBE_DMATXCTL_TE;
3224 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3225 }
3226
3227
3228 for (i = 0; i < adapter->num_tx_queues; i++)
3229 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3230}
3231
3232static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3233 struct ixgbe_ring *ring)
3234{
3235 struct ixgbe_hw *hw = &adapter->hw;
3236 u8 reg_idx = ring->reg_idx;
3237 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3238
3239 srrctl |= IXGBE_SRRCTL_DROP_EN;
3240
3241 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3242}
3243
3244static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3245 struct ixgbe_ring *ring)
3246{
3247 struct ixgbe_hw *hw = &adapter->hw;
3248 u8 reg_idx = ring->reg_idx;
3249 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3250
3251 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3252
3253 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3254}
3255
3256#ifdef CONFIG_IXGBE_DCB
3257void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3258#else
3259static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3260#endif
3261{
3262 int i;
3263 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3264
3265 if (adapter->ixgbe_ieee_pfc)
3266 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3278 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3279 for (i = 0; i < adapter->num_rx_queues; i++)
3280 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3281 } else {
3282 for (i = 0; i < adapter->num_rx_queues; i++)
3283 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3284 }
3285}
3286
3287#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3288
3289static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3290 struct ixgbe_ring *rx_ring)
3291{
3292 struct ixgbe_hw *hw = &adapter->hw;
3293 u32 srrctl;
3294 u8 reg_idx = rx_ring->reg_idx;
3295
3296 if (hw->mac.type == ixgbe_mac_82598EB) {
3297 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3298
3299
3300
3301
3302
3303 reg_idx &= mask;
3304 }
3305
3306
3307 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3308
3309
3310 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3311
3312
3313 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3314
3315 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3316}
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3327{
3328 if (adapter->hw.mac.type < ixgbe_mac_X550)
3329 return 128;
3330 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3331 return 64;
3332 else
3333 return 512;
3334}
3335
3336
3337
3338
3339
3340
3341
3342void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3343{
3344 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3345 struct ixgbe_hw *hw = &adapter->hw;
3346 u32 reta = 0;
3347 u32 indices_multi;
3348 u8 *indir_tbl = adapter->rss_indir_tbl;
3349
3350
3351
3352
3353
3354
3355
3356 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3357 indices_multi = 0x11;
3358 else
3359 indices_multi = 0x1;
3360
3361
3362 for (i = 0; i < reta_entries; i++) {
3363 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3364 if ((i & 3) == 3) {
3365 if (i < 128)
3366 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3367 else
3368 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3369 reta);
3370 reta = 0;
3371 }
3372 }
3373}
3374
3375
3376
3377
3378
3379
3380
3381static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3382{
3383 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3384 struct ixgbe_hw *hw = &adapter->hw;
3385 u32 vfreta = 0;
3386 unsigned int pf_pool = adapter->num_vfs;
3387
3388
3389 for (i = 0; i < reta_entries; i++) {
3390 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3391 if ((i & 3) == 3) {
3392 IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3393 vfreta);
3394 vfreta = 0;
3395 }
3396 }
3397}
3398
3399static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3400{
3401 struct ixgbe_hw *hw = &adapter->hw;
3402 u32 i, j;
3403 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3404 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3405
3406
3407
3408
3409
3410 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3411 rss_i = 2;
3412
3413
3414 for (i = 0; i < 10; i++)
3415 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3416
3417
3418 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3419
3420 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3421 if (j == rss_i)
3422 j = 0;
3423
3424 adapter->rss_indir_tbl[i] = j;
3425 }
3426
3427 ixgbe_store_reta(adapter);
3428}
3429
3430static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3431{
3432 struct ixgbe_hw *hw = &adapter->hw;
3433 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3434 unsigned int pf_pool = adapter->num_vfs;
3435 int i, j;
3436
3437
3438 for (i = 0; i < 10; i++)
3439 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
3440 adapter->rss_key[i]);
3441
3442
3443 for (i = 0, j = 0; i < 64; i++, j++) {
3444 if (j == rss_i)
3445 j = 0;
3446
3447 adapter->rss_indir_tbl[i] = j;
3448 }
3449
3450 ixgbe_store_vfreta(adapter);
3451}
3452
3453static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3454{
3455 struct ixgbe_hw *hw = &adapter->hw;
3456 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3457 u32 rxcsum;
3458
3459
3460 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3461 rxcsum |= IXGBE_RXCSUM_PCSD;
3462 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3463
3464 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3465 if (adapter->ring_feature[RING_F_RSS].mask)
3466 mrqc = IXGBE_MRQC_RSSEN;
3467 } else {
3468 u8 tcs = netdev_get_num_tc(adapter->netdev);
3469
3470 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3471 if (tcs > 4)
3472 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3473 else if (tcs > 1)
3474 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3475 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3476 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3477 else
3478 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3479 } else {
3480 if (tcs > 4)
3481 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3482 else if (tcs > 1)
3483 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3484 else
3485 mrqc = IXGBE_MRQC_RSSEN;
3486 }
3487 }
3488
3489
3490 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3491 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3492 IXGBE_MRQC_RSS_FIELD_IPV6 |
3493 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3494
3495 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3496 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3497 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3498 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3499
3500 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
3501 if ((hw->mac.type >= ixgbe_mac_X550) &&
3502 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3503 unsigned int pf_pool = adapter->num_vfs;
3504
3505
3506 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3507 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3508
3509
3510 ixgbe_setup_vfreta(adapter);
3511 vfmrqc = IXGBE_MRQC_RSSEN;
3512 vfmrqc |= rss_field;
3513 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3514 } else {
3515 ixgbe_setup_reta(adapter);
3516 mrqc |= rss_field;
3517 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3518 }
3519}
3520
3521
3522
3523
3524
3525
3526static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3527 struct ixgbe_ring *ring)
3528{
3529 struct ixgbe_hw *hw = &adapter->hw;
3530 u32 rscctrl;
3531 u8 reg_idx = ring->reg_idx;
3532
3533 if (!ring_is_rsc_enabled(ring))
3534 return;
3535
3536 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3537 rscctrl |= IXGBE_RSCCTL_RSCEN;
3538
3539
3540
3541
3542
3543 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3544 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3545}
3546
3547#define IXGBE_MAX_RX_DESC_POLL 10
3548static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3549 struct ixgbe_ring *ring)
3550{
3551 struct ixgbe_hw *hw = &adapter->hw;
3552 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3553 u32 rxdctl;
3554 u8 reg_idx = ring->reg_idx;
3555
3556 if (ixgbe_removed(hw->hw_addr))
3557 return;
3558
3559 if (hw->mac.type == ixgbe_mac_82598EB &&
3560 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3561 return;
3562
3563 do {
3564 usleep_range(1000, 2000);
3565 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3566 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3567
3568 if (!wait_loop) {
3569 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3570 "the polling period\n", reg_idx);
3571 }
3572}
3573
3574void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3575 struct ixgbe_ring *ring)
3576{
3577 struct ixgbe_hw *hw = &adapter->hw;
3578 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3579 u32 rxdctl;
3580 u8 reg_idx = ring->reg_idx;
3581
3582 if (ixgbe_removed(hw->hw_addr))
3583 return;
3584 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3585 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3586
3587
3588 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3589
3590 if (hw->mac.type == ixgbe_mac_82598EB &&
3591 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3592 return;
3593
3594
3595 do {
3596 udelay(10);
3597 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3598 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3599
3600 if (!wait_loop) {
3601 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3602 "the polling period\n", reg_idx);
3603 }
3604}
3605
3606void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3607 struct ixgbe_ring *ring)
3608{
3609 struct ixgbe_hw *hw = &adapter->hw;
3610 u64 rdba = ring->dma;
3611 u32 rxdctl;
3612 u8 reg_idx = ring->reg_idx;
3613
3614
3615 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3616 ixgbe_disable_rx_queue(adapter, ring);
3617
3618 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3619 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3620 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3621 ring->count * sizeof(union ixgbe_adv_rx_desc));
3622
3623 IXGBE_WRITE_FLUSH(hw);
3624
3625 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3626 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3627 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
3628
3629 ixgbe_configure_srrctl(adapter, ring);
3630 ixgbe_configure_rscctl(adapter, ring);
3631
3632 if (hw->mac.type == ixgbe_mac_82598EB) {
3633
3634
3635
3636
3637
3638
3639
3640 rxdctl &= ~0x3FFFFF;
3641 rxdctl |= 0x080420;
3642 }
3643
3644
3645 rxdctl |= IXGBE_RXDCTL_ENABLE;
3646 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3647
3648 ixgbe_rx_desc_queue_enable(adapter, ring);
3649 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3650}
3651
3652static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3653{
3654 struct ixgbe_hw *hw = &adapter->hw;
3655 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3656 u16 pool;
3657
3658
3659 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3660 IXGBE_PSRTYPE_UDPHDR |
3661 IXGBE_PSRTYPE_IPV4HDR |
3662 IXGBE_PSRTYPE_L2HDR |
3663 IXGBE_PSRTYPE_IPV6HDR;
3664
3665 if (hw->mac.type == ixgbe_mac_82598EB)
3666 return;
3667
3668 if (rss_i > 3)
3669 psrtype |= 2 << 29;
3670 else if (rss_i > 1)
3671 psrtype |= 1 << 29;
3672
3673 for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3674 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3675}
3676
3677static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3678{
3679 struct ixgbe_hw *hw = &adapter->hw;
3680 u32 reg_offset, vf_shift;
3681 u32 gcr_ext, vmdctl;
3682 int i;
3683
3684 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3685 return;
3686
3687 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3688 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3689 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3690 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3691 vmdctl |= IXGBE_VT_CTL_REPLEN;
3692 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3693
3694 vf_shift = VMDQ_P(0) % 32;
3695 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3696
3697
3698 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3699 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3700 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3701 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3702 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
3703 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3704
3705
3706 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3707
3708
3709 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
3710
3711
3712
3713
3714
3715 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3716 case IXGBE_82599_VMDQ_8Q_MASK:
3717 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3718 break;
3719 case IXGBE_82599_VMDQ_4Q_MASK:
3720 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3721 break;
3722 default:
3723 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3724 break;
3725 }
3726
3727 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3728
3729
3730
3731 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3732 adapter->num_vfs);
3733
3734
3735
3736
3737 if (hw->mac.ops.set_ethertype_anti_spoofing) {
3738 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3739 (IXGBE_ETQF_FILTER_EN |
3740 IXGBE_ETQF_TX_ANTISPOOF |
3741 IXGBE_ETH_P_LLDP));
3742
3743 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
3744 (IXGBE_ETQF_FILTER_EN |
3745 IXGBE_ETQF_TX_ANTISPOOF |
3746 ETH_P_PAUSE));
3747 }
3748
3749
3750 for (i = 0; i < adapter->num_vfs; i++) {
3751 if (!adapter->vfinfo[i].spoofchk_enabled)
3752 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3753
3754
3755 if (hw->mac.ops.set_ethertype_anti_spoofing)
3756 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3757
3758
3759 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
3760 adapter->vfinfo[i].rss_query_enabled);
3761 }
3762}
3763
3764static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3765{
3766 struct ixgbe_hw *hw = &adapter->hw;
3767 struct net_device *netdev = adapter->netdev;
3768 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3769 struct ixgbe_ring *rx_ring;
3770 int i;
3771 u32 mhadd, hlreg0;
3772
3773#ifdef IXGBE_FCOE
3774
3775 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3776 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3777 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3778
3779#endif
3780
3781
3782 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3783 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3784
3785 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3786 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3787 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3788 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3789
3790 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3791 }
3792
3793 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3794
3795 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3796 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3797
3798
3799
3800
3801
3802 for (i = 0; i < adapter->num_rx_queues; i++) {
3803 rx_ring = adapter->rx_ring[i];
3804 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3805 set_ring_rsc_enabled(rx_ring);
3806 else
3807 clear_ring_rsc_enabled(rx_ring);
3808 }
3809}
3810
3811static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3812{
3813 struct ixgbe_hw *hw = &adapter->hw;
3814 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3815
3816 switch (hw->mac.type) {
3817 case ixgbe_mac_82598EB:
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3829 break;
3830 case ixgbe_mac_X550:
3831 case ixgbe_mac_X550EM_x:
3832 if (adapter->num_vfs)
3833 rdrxctl |= IXGBE_RDRXCTL_PSP;
3834
3835 case ixgbe_mac_82599EB:
3836 case ixgbe_mac_X540:
3837
3838 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3839 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3840 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3841
3842 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3843 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3844 break;
3845 default:
3846
3847 return;
3848 }
3849
3850 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3851}
3852
3853
3854
3855
3856
3857
3858
3859static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3860{
3861 struct ixgbe_hw *hw = &adapter->hw;
3862 int i;
3863 u32 rxctrl, rfctl;
3864
3865
3866 hw->mac.ops.disable_rx(hw);
3867
3868 ixgbe_setup_psrtype(adapter);
3869 ixgbe_setup_rdrxctl(adapter);
3870
3871
3872 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3873 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3874 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3875 rfctl |= IXGBE_RFCTL_RSC_DIS;
3876 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3877
3878
3879 ixgbe_setup_mrqc(adapter);
3880
3881
3882 ixgbe_set_rx_buffer_len(adapter);
3883
3884
3885
3886
3887
3888 for (i = 0; i < adapter->num_rx_queues; i++)
3889 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3890
3891 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3892
3893 if (hw->mac.type == ixgbe_mac_82598EB)
3894 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3895
3896
3897 rxctrl |= IXGBE_RXCTRL_RXEN;
3898 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3899}
3900
3901static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3902 __be16 proto, u16 vid)
3903{
3904 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3905 struct ixgbe_hw *hw = &adapter->hw;
3906
3907
3908 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
3909 set_bit(vid, adapter->active_vlans);
3910
3911 return 0;
3912}
3913
3914static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
3915{
3916 u32 vlvf;
3917 int idx;
3918
3919
3920 if (vlan == 0)
3921 return 0;
3922
3923
3924 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
3925 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
3926 if ((vlvf & VLAN_VID_MASK) == vlan)
3927 break;
3928 }
3929
3930 return idx;
3931}
3932
3933void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
3934{
3935 struct ixgbe_hw *hw = &adapter->hw;
3936 u32 bits, word;
3937 int idx;
3938
3939 idx = ixgbe_find_vlvf_entry(hw, vid);
3940 if (!idx)
3941 return;
3942
3943
3944
3945
3946 word = idx * 2 + (VMDQ_P(0) / 32);
3947 bits = ~(1 << (VMDQ_P(0)) % 32);
3948 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
3949
3950
3951 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
3952 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
3953 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
3954 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
3955 }
3956}
3957
3958static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3959 __be16 proto, u16 vid)
3960{
3961 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3962 struct ixgbe_hw *hw = &adapter->hw;
3963
3964
3965 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
3966 ixgbe_update_pf_promisc_vlvf(adapter, vid);
3967 else
3968 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
3969
3970 clear_bit(vid, adapter->active_vlans);
3971
3972 return 0;
3973}
3974
3975
3976
3977
3978
3979static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3980{
3981 struct ixgbe_hw *hw = &adapter->hw;
3982 u32 vlnctrl;
3983 int i, j;
3984
3985 switch (hw->mac.type) {
3986 case ixgbe_mac_82598EB:
3987 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3988 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3989 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3990 break;
3991 case ixgbe_mac_82599EB:
3992 case ixgbe_mac_X540:
3993 case ixgbe_mac_X550:
3994 case ixgbe_mac_X550EM_x:
3995 for (i = 0; i < adapter->num_rx_queues; i++) {
3996 struct ixgbe_ring *ring = adapter->rx_ring[i];
3997
3998 if (ring->l2_accel_priv)
3999 continue;
4000 j = ring->reg_idx;
4001 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4002 vlnctrl &= ~IXGBE_RXDCTL_VME;
4003 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4004 }
4005 break;
4006 default:
4007 break;
4008 }
4009}
4010
4011
4012
4013
4014
4015static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4016{
4017 struct ixgbe_hw *hw = &adapter->hw;
4018 u32 vlnctrl;
4019 int i, j;
4020
4021 switch (hw->mac.type) {
4022 case ixgbe_mac_82598EB:
4023 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4024 vlnctrl |= IXGBE_VLNCTRL_VME;
4025 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4026 break;
4027 case ixgbe_mac_82599EB:
4028 case ixgbe_mac_X540:
4029 case ixgbe_mac_X550:
4030 case ixgbe_mac_X550EM_x:
4031 for (i = 0; i < adapter->num_rx_queues; i++) {
4032 struct ixgbe_ring *ring = adapter->rx_ring[i];
4033
4034 if (ring->l2_accel_priv)
4035 continue;
4036 j = ring->reg_idx;
4037 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4038 vlnctrl |= IXGBE_RXDCTL_VME;
4039 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4040 }
4041 break;
4042 default:
4043 break;
4044 }
4045}
4046
4047static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4048{
4049 struct ixgbe_hw *hw = &adapter->hw;
4050 u32 vlnctrl, i;
4051
4052 switch (hw->mac.type) {
4053 case ixgbe_mac_82599EB:
4054 case ixgbe_mac_X540:
4055 case ixgbe_mac_X550:
4056 case ixgbe_mac_X550EM_x:
4057 default:
4058 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
4059 break;
4060
4061 case ixgbe_mac_82598EB:
4062
4063 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4064 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4065 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4066 return;
4067 }
4068
4069
4070 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4071 return;
4072
4073
4074 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4075
4076
4077 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4078 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4079 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4080
4081 vlvfb |= 1 << (VMDQ_P(0) % 32);
4082 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4083 }
4084
4085
4086 for (i = hw->mac.vft_size; i--;)
4087 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4088}
4089
4090#define VFTA_BLOCK_SIZE 8
4091static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4092{
4093 struct ixgbe_hw *hw = &adapter->hw;
4094 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4095 u32 vid_start = vfta_offset * 32;
4096 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4097 u32 i, vid, word, bits;
4098
4099 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4100 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4101
4102
4103 vid = vlvf & VLAN_VID_MASK;
4104
4105
4106 if (vid < vid_start || vid >= vid_end)
4107 continue;
4108
4109 if (vlvf) {
4110
4111 vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
4112
4113
4114 if (test_bit(vid, adapter->active_vlans))
4115 continue;
4116 }
4117
4118
4119 word = i * 2 + VMDQ_P(0) / 32;
4120 bits = ~(1 << (VMDQ_P(0) % 32));
4121 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4122 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4123 }
4124
4125
4126 for (i = VFTA_BLOCK_SIZE; i--;) {
4127 vid = (vfta_offset + i) * 32;
4128 word = vid / BITS_PER_LONG;
4129 bits = vid % BITS_PER_LONG;
4130
4131 vfta[i] |= adapter->active_vlans[word] >> bits;
4132
4133 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4134 }
4135}
4136
4137static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4138{
4139 struct ixgbe_hw *hw = &adapter->hw;
4140 u32 vlnctrl, i;
4141
4142 switch (hw->mac.type) {
4143 case ixgbe_mac_82599EB:
4144 case ixgbe_mac_X540:
4145 case ixgbe_mac_X550:
4146 case ixgbe_mac_X550EM_x:
4147 default:
4148 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
4149 break;
4150
4151 case ixgbe_mac_82598EB:
4152 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4153 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
4154 vlnctrl |= IXGBE_VLNCTRL_VFE;
4155 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4156 return;
4157 }
4158
4159
4160 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4161 return;
4162
4163
4164 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4165
4166 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4167 ixgbe_scrub_vfta(adapter, i);
4168}
4169
4170static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4171{
4172 u16 vid;
4173
4174 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4175
4176 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4177 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4178}
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4190{
4191 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4192 struct ixgbe_hw *hw = &adapter->hw;
4193
4194 if (!netif_running(netdev))
4195 return 0;
4196
4197 if (hw->mac.ops.update_mc_addr_list)
4198 hw->mac.ops.update_mc_addr_list(hw, netdev);
4199 else
4200 return -ENOMEM;
4201
4202#ifdef CONFIG_PCI_IOV
4203 ixgbe_restore_vf_multicasts(adapter);
4204#endif
4205
4206 return netdev_mc_count(netdev);
4207}
4208
4209#ifdef CONFIG_PCI_IOV
4210void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4211{
4212 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4213 struct ixgbe_hw *hw = &adapter->hw;
4214 int i;
4215
4216 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4217 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4218
4219 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4220 hw->mac.ops.set_rar(hw, i,
4221 mac_table->addr,
4222 mac_table->pool,
4223 IXGBE_RAH_AV);
4224 else
4225 hw->mac.ops.clear_rar(hw, i);
4226 }
4227}
4228
4229#endif
4230static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4231{
4232 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4233 struct ixgbe_hw *hw = &adapter->hw;
4234 int i;
4235
4236 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4237 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4238 continue;
4239
4240 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4241
4242 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4243 hw->mac.ops.set_rar(hw, i,
4244 mac_table->addr,
4245 mac_table->pool,
4246 IXGBE_RAH_AV);
4247 else
4248 hw->mac.ops.clear_rar(hw, i);
4249 }
4250}
4251
4252static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4253{
4254 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4255 struct ixgbe_hw *hw = &adapter->hw;
4256 int i;
4257
4258 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4259 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4260 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4261 }
4262
4263 ixgbe_sync_mac_table(adapter);
4264}
4265
4266static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4267{
4268 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4269 struct ixgbe_hw *hw = &adapter->hw;
4270 int i, count = 0;
4271
4272 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4273
4274 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4275 continue;
4276
4277
4278 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4279 if (mac_table->pool != pool)
4280 continue;
4281 }
4282
4283 count++;
4284 }
4285
4286 return count;
4287}
4288
4289
4290static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4291{
4292 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4293 struct ixgbe_hw *hw = &adapter->hw;
4294
4295 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4296 mac_table->pool = VMDQ_P(0);
4297
4298 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4299
4300 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4301 IXGBE_RAH_AV);
4302}
4303
4304int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4305 const u8 *addr, u16 pool)
4306{
4307 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4308 struct ixgbe_hw *hw = &adapter->hw;
4309 int i;
4310
4311 if (is_zero_ether_addr(addr))
4312 return -EINVAL;
4313
4314 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4315 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4316 continue;
4317
4318 ether_addr_copy(mac_table->addr, addr);
4319 mac_table->pool = pool;
4320
4321 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4322 IXGBE_MAC_STATE_IN_USE;
4323
4324 ixgbe_sync_mac_table(adapter);
4325
4326 return i;
4327 }
4328
4329 return -ENOMEM;
4330}
4331
4332int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4333 const u8 *addr, u16 pool)
4334{
4335 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4336 struct ixgbe_hw *hw = &adapter->hw;
4337 int i;
4338
4339 if (is_zero_ether_addr(addr))
4340 return -EINVAL;
4341
4342
4343 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4344
4345 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4346 continue;
4347
4348 if (mac_table->pool != pool)
4349 continue;
4350
4351 if (!ether_addr_equal(addr, mac_table->addr))
4352 continue;
4353
4354 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4355 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4356
4357 ixgbe_sync_mac_table(adapter);
4358
4359 return 0;
4360 }
4361
4362 return -ENOMEM;
4363}
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4374{
4375 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4376 int count = 0;
4377
4378
4379 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
4380 return -ENOMEM;
4381
4382 if (!netdev_uc_empty(netdev)) {
4383 struct netdev_hw_addr *ha;
4384 netdev_for_each_uc_addr(ha, netdev) {
4385 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4386 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4387 count++;
4388 }
4389 }
4390 return count;
4391}
4392
4393static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4394{
4395 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4396 int ret;
4397
4398 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4399
4400 return min_t(int, ret, 0);
4401}
4402
4403static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4404{
4405 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4406
4407 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4408
4409 return 0;
4410}
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421void ixgbe_set_rx_mode(struct net_device *netdev)
4422{
4423 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4424 struct ixgbe_hw *hw = &adapter->hw;
4425 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4426 int count;
4427
4428
4429 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4430
4431
4432 fctrl &= ~IXGBE_FCTRL_SBP;
4433 fctrl |= IXGBE_FCTRL_BAM;
4434 fctrl |= IXGBE_FCTRL_DPF;
4435 fctrl |= IXGBE_FCTRL_PMCF;
4436
4437
4438 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4439 if (netdev->flags & IFF_PROMISC) {
4440 hw->addr_ctrl.user_set_promisc = true;
4441 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4442 vmolr |= IXGBE_VMOLR_MPE;
4443 ixgbe_vlan_promisc_enable(adapter);
4444 } else {
4445 if (netdev->flags & IFF_ALLMULTI) {
4446 fctrl |= IXGBE_FCTRL_MPE;
4447 vmolr |= IXGBE_VMOLR_MPE;
4448 }
4449 hw->addr_ctrl.user_set_promisc = false;
4450 ixgbe_vlan_promisc_disable(adapter);
4451 }
4452
4453
4454
4455
4456
4457
4458 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4459 fctrl |= IXGBE_FCTRL_UPE;
4460 vmolr |= IXGBE_VMOLR_ROPE;
4461 }
4462
4463
4464
4465
4466
4467 count = ixgbe_write_mc_addr_list(netdev);
4468 if (count < 0) {
4469 fctrl |= IXGBE_FCTRL_MPE;
4470 vmolr |= IXGBE_VMOLR_MPE;
4471 } else if (count) {
4472 vmolr |= IXGBE_VMOLR_ROMPE;
4473 }
4474
4475 if (hw->mac.type != ixgbe_mac_82598EB) {
4476 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4477 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4478 IXGBE_VMOLR_ROPE);
4479 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4480 }
4481
4482
4483 if (adapter->netdev->features & NETIF_F_RXALL) {
4484
4485
4486 fctrl |= (IXGBE_FCTRL_SBP |
4487 IXGBE_FCTRL_BAM |
4488 IXGBE_FCTRL_PMCF);
4489
4490 fctrl &= ~(IXGBE_FCTRL_DPF);
4491
4492 }
4493
4494 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4495
4496 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
4497 ixgbe_vlan_strip_enable(adapter);
4498 else
4499 ixgbe_vlan_strip_disable(adapter);
4500}
4501
4502static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4503{
4504 int q_idx;
4505
4506 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4507 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
4508 napi_enable(&adapter->q_vector[q_idx]->napi);
4509 }
4510}
4511
4512static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4513{
4514 int q_idx;
4515
4516 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4517 napi_disable(&adapter->q_vector[q_idx]->napi);
4518 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
4519 pr_info("QV %d locked\n", q_idx);
4520 usleep_range(1000, 20000);
4521 }
4522 }
4523}
4524
4525static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
4526{
4527 switch (adapter->hw.mac.type) {
4528 case ixgbe_mac_X550:
4529 case ixgbe_mac_X550EM_x:
4530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
4531#ifdef CONFIG_IXGBE_VXLAN
4532 adapter->vxlan_port = 0;
4533#endif
4534 break;
4535 default:
4536 break;
4537 }
4538}
4539
4540#ifdef CONFIG_IXGBE_DCB
4541
4542
4543
4544
4545
4546
4547
4548
4549static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
4550{
4551 struct ixgbe_hw *hw = &adapter->hw;
4552 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4553
4554 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
4555 if (hw->mac.type == ixgbe_mac_82598EB)
4556 netif_set_gso_max_size(adapter->netdev, 65536);
4557 return;
4558 }
4559
4560 if (hw->mac.type == ixgbe_mac_82598EB)
4561 netif_set_gso_max_size(adapter->netdev, 32768);
4562
4563#ifdef IXGBE_FCOE
4564 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
4565 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
4566#endif
4567
4568
4569 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
4570 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4571 DCB_TX_CONFIG);
4572 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4573 DCB_RX_CONFIG);
4574 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
4575 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
4576 ixgbe_dcb_hw_ets(&adapter->hw,
4577 adapter->ixgbe_ieee_ets,
4578 max_frame);
4579 ixgbe_dcb_hw_pfc_config(&adapter->hw,
4580 adapter->ixgbe_ieee_pfc->pfc_en,
4581 adapter->ixgbe_ieee_ets->prio_tc);
4582 }
4583
4584
4585 if (hw->mac.type != ixgbe_mac_82598EB) {
4586 u32 msb = 0;
4587 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
4588
4589 while (rss_i) {
4590 msb++;
4591 rss_i >>= 1;
4592 }
4593
4594
4595 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
4596 }
4597}
4598#endif
4599
4600
4601#define IXGBE_ETH_FRAMING 20
4602
4603
4604
4605
4606
4607
4608
4609static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4610{
4611 struct ixgbe_hw *hw = &adapter->hw;
4612 struct net_device *dev = adapter->netdev;
4613 int link, tc, kb, marker;
4614 u32 dv_id, rx_pba;
4615
4616
4617 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
4618
4619#ifdef IXGBE_FCOE
4620
4621 if ((dev->features & NETIF_F_FCOE_MTU) &&
4622 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4623 (pb == ixgbe_fcoe_get_tc(adapter)))
4624 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4625#endif
4626
4627
4628 switch (hw->mac.type) {
4629 case ixgbe_mac_X540:
4630 case ixgbe_mac_X550:
4631 case ixgbe_mac_X550EM_x:
4632 dv_id = IXGBE_DV_X540(link, tc);
4633 break;
4634 default:
4635 dv_id = IXGBE_DV(link, tc);
4636 break;
4637 }
4638
4639
4640 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4641 dv_id += IXGBE_B2BT(tc);
4642
4643
4644 kb = IXGBE_BT2KB(dv_id);
4645 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4646
4647 marker = rx_pba - kb;
4648
4649
4650
4651
4652
4653 if (marker < 0) {
4654 e_warn(drv, "Packet Buffer(%i) can not provide enough"
4655 "headroom to support flow control."
4656 "Decrease MTU or number of traffic classes\n", pb);
4657 marker = tc + 1;
4658 }
4659
4660 return marker;
4661}
4662
4663
4664
4665
4666
4667
4668
4669static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4670{
4671 struct ixgbe_hw *hw = &adapter->hw;
4672 struct net_device *dev = adapter->netdev;
4673 int tc;
4674 u32 dv_id;
4675
4676
4677 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4678
4679#ifdef IXGBE_FCOE
4680
4681 if ((dev->features & NETIF_F_FCOE_MTU) &&
4682 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4683 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4684 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4685#endif
4686
4687
4688 switch (hw->mac.type) {
4689 case ixgbe_mac_X540:
4690 case ixgbe_mac_X550:
4691 case ixgbe_mac_X550EM_x:
4692 dv_id = IXGBE_LOW_DV_X540(tc);
4693 break;
4694 default:
4695 dv_id = IXGBE_LOW_DV(tc);
4696 break;
4697 }
4698
4699
4700 return IXGBE_BT2KB(dv_id);
4701}
4702
4703
4704
4705
4706static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4707{
4708 struct ixgbe_hw *hw = &adapter->hw;
4709 int num_tc = netdev_get_num_tc(adapter->netdev);
4710 int i;
4711
4712 if (!num_tc)
4713 num_tc = 1;
4714
4715 for (i = 0; i < num_tc; i++) {
4716 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4717 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4718
4719
4720 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4721 hw->fc.low_water[i] = 0;
4722 }
4723
4724 for (; i < MAX_TRAFFIC_CLASS; i++)
4725 hw->fc.high_water[i] = 0;
4726}
4727
4728static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4729{
4730 struct ixgbe_hw *hw = &adapter->hw;
4731 int hdrm;
4732 u8 tc = netdev_get_num_tc(adapter->netdev);
4733
4734 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4735 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4736 hdrm = 32 << adapter->fdir_pballoc;
4737 else
4738 hdrm = 0;
4739
4740 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4741 ixgbe_pbthresh_setup(adapter);
4742}
4743
4744static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4745{
4746 struct ixgbe_hw *hw = &adapter->hw;
4747 struct hlist_node *node2;
4748 struct ixgbe_fdir_filter *filter;
4749
4750 spin_lock(&adapter->fdir_perfect_lock);
4751
4752 if (!hlist_empty(&adapter->fdir_filter_list))
4753 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4754
4755 hlist_for_each_entry_safe(filter, node2,
4756 &adapter->fdir_filter_list, fdir_node) {
4757 ixgbe_fdir_write_perfect_filter_82599(hw,
4758 &filter->filter,
4759 filter->sw_idx,
4760 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4761 IXGBE_FDIR_DROP_QUEUE :
4762 adapter->rx_ring[filter->action]->reg_idx);
4763 }
4764
4765 spin_unlock(&adapter->fdir_perfect_lock);
4766}
4767
4768static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4769 struct ixgbe_adapter *adapter)
4770{
4771 struct ixgbe_hw *hw = &adapter->hw;
4772 u32 vmolr;
4773
4774
4775 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4776 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4777
4778
4779 vmolr &= ~IXGBE_VMOLR_MPE;
4780
4781 if (dev->flags & IFF_ALLMULTI) {
4782 vmolr |= IXGBE_VMOLR_MPE;
4783 } else {
4784 vmolr |= IXGBE_VMOLR_ROMPE;
4785 hw->mac.ops.update_mc_addr_list(hw, dev);
4786 }
4787 ixgbe_write_uc_addr_list(adapter->netdev, pool);
4788 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4789}
4790
4791static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4792{
4793 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4794 int rss_i = adapter->num_rx_queues_per_pool;
4795 struct ixgbe_hw *hw = &adapter->hw;
4796 u16 pool = vadapter->pool;
4797 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4798 IXGBE_PSRTYPE_UDPHDR |
4799 IXGBE_PSRTYPE_IPV4HDR |
4800 IXGBE_PSRTYPE_L2HDR |
4801 IXGBE_PSRTYPE_IPV6HDR;
4802
4803 if (hw->mac.type == ixgbe_mac_82598EB)
4804 return;
4805
4806 if (rss_i > 3)
4807 psrtype |= 2 << 29;
4808 else if (rss_i > 1)
4809 psrtype |= 1 << 29;
4810
4811 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4812}
4813
4814
4815
4816
4817
4818static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4819{
4820 struct device *dev = rx_ring->dev;
4821 unsigned long size;
4822 u16 i;
4823
4824
4825 if (!rx_ring->rx_buffer_info)
4826 return;
4827
4828
4829 for (i = 0; i < rx_ring->count; i++) {
4830 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
4831
4832 if (rx_buffer->skb) {
4833 struct sk_buff *skb = rx_buffer->skb;
4834 if (IXGBE_CB(skb)->page_released)
4835 dma_unmap_page(dev,
4836 IXGBE_CB(skb)->dma,
4837 ixgbe_rx_bufsz(rx_ring),
4838 DMA_FROM_DEVICE);
4839 dev_kfree_skb(skb);
4840 rx_buffer->skb = NULL;
4841 }
4842
4843 if (!rx_buffer->page)
4844 continue;
4845
4846 dma_unmap_page(dev, rx_buffer->dma,
4847 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
4848 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
4849
4850 rx_buffer->page = NULL;
4851 }
4852
4853 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4854 memset(rx_ring->rx_buffer_info, 0, size);
4855
4856
4857 memset(rx_ring->desc, 0, rx_ring->size);
4858
4859 rx_ring->next_to_alloc = 0;
4860 rx_ring->next_to_clean = 0;
4861 rx_ring->next_to_use = 0;
4862}
4863
4864static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4865 struct ixgbe_ring *rx_ring)
4866{
4867 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4868 int index = rx_ring->queue_index + vadapter->rx_base_queue;
4869
4870
4871 ixgbe_disable_rx_queue(adapter, rx_ring);
4872 usleep_range(10000, 20000);
4873 ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
4874 ixgbe_clean_rx_ring(rx_ring);
4875 rx_ring->l2_accel_priv = NULL;
4876}
4877
4878static int ixgbe_fwd_ring_down(struct net_device *vdev,
4879 struct ixgbe_fwd_adapter *accel)
4880{
4881 struct ixgbe_adapter *adapter = accel->real_adapter;
4882 unsigned int rxbase = accel->rx_base_queue;
4883 unsigned int txbase = accel->tx_base_queue;
4884 int i;
4885
4886 netif_tx_stop_all_queues(vdev);
4887
4888 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4889 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4890 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
4891 }
4892
4893 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4894 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
4895 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
4896 }
4897
4898
4899 return 0;
4900}
4901
4902static int ixgbe_fwd_ring_up(struct net_device *vdev,
4903 struct ixgbe_fwd_adapter *accel)
4904{
4905 struct ixgbe_adapter *adapter = accel->real_adapter;
4906 unsigned int rxbase, txbase, queues;
4907 int i, baseq, err = 0;
4908
4909 if (!test_bit(accel->pool, &adapter->fwd_bitmask))
4910 return 0;
4911
4912 baseq = accel->pool * adapter->num_rx_queues_per_pool;
4913 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4914 accel->pool, adapter->num_rx_pools,
4915 baseq, baseq + adapter->num_rx_queues_per_pool,
4916 adapter->fwd_bitmask);
4917
4918 accel->netdev = vdev;
4919 accel->rx_base_queue = rxbase = baseq;
4920 accel->tx_base_queue = txbase = baseq;
4921
4922 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
4923 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4924
4925 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4926 adapter->rx_ring[rxbase + i]->netdev = vdev;
4927 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
4928 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
4929 }
4930
4931 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4932 adapter->tx_ring[txbase + i]->netdev = vdev;
4933 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
4934 }
4935
4936 queues = min_t(unsigned int,
4937 adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
4938 err = netif_set_real_num_tx_queues(vdev, queues);
4939 if (err)
4940 goto fwd_queue_err;
4941
4942 err = netif_set_real_num_rx_queues(vdev, queues);
4943 if (err)
4944 goto fwd_queue_err;
4945
4946 if (is_valid_ether_addr(vdev->dev_addr))
4947 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
4948
4949 ixgbe_fwd_psrtype(accel);
4950 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
4951 return err;
4952fwd_queue_err:
4953 ixgbe_fwd_ring_down(vdev, accel);
4954 return err;
4955}
4956
4957static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
4958{
4959 struct net_device *upper;
4960 struct list_head *iter;
4961 int err;
4962
4963 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4964 if (netif_is_macvlan(upper)) {
4965 struct macvlan_dev *dfwd = netdev_priv(upper);
4966 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
4967
4968 if (dfwd->fwd_priv) {
4969 err = ixgbe_fwd_ring_up(upper, vadapter);
4970 if (err)
4971 continue;
4972 }
4973 }
4974 }
4975}
4976
4977static void ixgbe_configure(struct ixgbe_adapter *adapter)
4978{
4979 struct ixgbe_hw *hw = &adapter->hw;
4980
4981 ixgbe_configure_pb(adapter);
4982#ifdef CONFIG_IXGBE_DCB
4983 ixgbe_configure_dcb(adapter);
4984#endif
4985
4986
4987
4988
4989 ixgbe_configure_virtualization(adapter);
4990
4991 ixgbe_set_rx_mode(adapter->netdev);
4992 ixgbe_restore_vlan(adapter);
4993
4994 switch (hw->mac.type) {
4995 case ixgbe_mac_82599EB:
4996 case ixgbe_mac_X540:
4997 hw->mac.ops.disable_rx_buff(hw);
4998 break;
4999 default:
5000 break;
5001 }
5002
5003 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5004 ixgbe_init_fdir_signature_82599(&adapter->hw,
5005 adapter->fdir_pballoc);
5006 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5007 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5008 adapter->fdir_pballoc);
5009 ixgbe_fdir_filter_restore(adapter);
5010 }
5011
5012 switch (hw->mac.type) {
5013 case ixgbe_mac_82599EB:
5014 case ixgbe_mac_X540:
5015 hw->mac.ops.enable_rx_buff(hw);
5016 break;
5017 default:
5018 break;
5019 }
5020
5021#ifdef CONFIG_IXGBE_DCA
5022
5023 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5024 ixgbe_setup_dca(adapter);
5025#endif
5026
5027#ifdef IXGBE_FCOE
5028
5029 ixgbe_configure_fcoe(adapter);
5030
5031#endif
5032 ixgbe_configure_tx(adapter);
5033 ixgbe_configure_rx(adapter);
5034 ixgbe_configure_dfwd(adapter);
5035}
5036
5037
5038
5039
5040
5041static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5042{
5043
5044
5045
5046
5047
5048
5049 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5050 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5051
5052 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5053 adapter->sfp_poll_time = 0;
5054}
5055
5056
5057
5058
5059
5060
5061
5062static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5063{
5064 u32 speed;
5065 bool autoneg, link_up = false;
5066 int ret = IXGBE_ERR_LINK_SETUP;
5067
5068 if (hw->mac.ops.check_link)
5069 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5070
5071 if (ret)
5072 return ret;
5073
5074 speed = hw->phy.autoneg_advertised;
5075 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5076 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5077 &autoneg);
5078 if (ret)
5079 return ret;
5080
5081 if (hw->mac.ops.setup_link)
5082 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5083
5084 return ret;
5085}
5086
5087static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5088{
5089 struct ixgbe_hw *hw = &adapter->hw;
5090 u32 gpie = 0;
5091
5092 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5093 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5094 IXGBE_GPIE_OCD;
5095 gpie |= IXGBE_GPIE_EIAME;
5096
5097
5098
5099
5100 switch (hw->mac.type) {
5101 case ixgbe_mac_82598EB:
5102 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5103 break;
5104 case ixgbe_mac_82599EB:
5105 case ixgbe_mac_X540:
5106 case ixgbe_mac_X550:
5107 case ixgbe_mac_X550EM_x:
5108 default:
5109 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5110 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5111 break;
5112 }
5113 } else {
5114
5115
5116 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5117 }
5118
5119
5120
5121
5122 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5123 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5124
5125 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5126 case IXGBE_82599_VMDQ_8Q_MASK:
5127 gpie |= IXGBE_GPIE_VTMODE_16;
5128 break;
5129 case IXGBE_82599_VMDQ_4Q_MASK:
5130 gpie |= IXGBE_GPIE_VTMODE_32;
5131 break;
5132 default:
5133 gpie |= IXGBE_GPIE_VTMODE_64;
5134 break;
5135 }
5136 }
5137
5138
5139 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5140 switch (adapter->hw.mac.type) {
5141 case ixgbe_mac_82599EB:
5142 gpie |= IXGBE_SDP0_GPIEN_8259X;
5143 break;
5144 default:
5145 break;
5146 }
5147 }
5148
5149
5150 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5151 gpie |= IXGBE_SDP1_GPIEN(hw);
5152
5153 switch (hw->mac.type) {
5154 case ixgbe_mac_82599EB:
5155 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5156 break;
5157 case ixgbe_mac_X550EM_x:
5158 gpie |= IXGBE_SDP0_GPIEN_X540;
5159 break;
5160 default:
5161 break;
5162 }
5163
5164 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5165}
5166
5167static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5168{
5169 struct ixgbe_hw *hw = &adapter->hw;
5170 int err;
5171 u32 ctrl_ext;
5172
5173 ixgbe_get_hw_control(adapter);
5174 ixgbe_setup_gpie(adapter);
5175
5176 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5177 ixgbe_configure_msix(adapter);
5178 else
5179 ixgbe_configure_msi_and_legacy(adapter);
5180
5181
5182 if (hw->mac.ops.enable_tx_laser)
5183 hw->mac.ops.enable_tx_laser(hw);
5184
5185 if (hw->phy.ops.set_phy_power)
5186 hw->phy.ops.set_phy_power(hw, true);
5187
5188 smp_mb__before_atomic();
5189 clear_bit(__IXGBE_DOWN, &adapter->state);
5190 ixgbe_napi_enable_all(adapter);
5191
5192 if (ixgbe_is_sfp(hw)) {
5193 ixgbe_sfp_link_config(adapter);
5194 } else {
5195 err = ixgbe_non_sfp_link_config(hw);
5196 if (err)
5197 e_err(probe, "link_config FAILED %d\n", err);
5198 }
5199
5200
5201 IXGBE_READ_REG(hw, IXGBE_EICR);
5202 ixgbe_irq_enable(adapter, true, true);
5203
5204
5205
5206
5207
5208 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5209 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5210 if (esdp & IXGBE_ESDP_SDP1)
5211 e_crit(drv, "Fan has stopped, replace the adapter\n");
5212 }
5213
5214
5215
5216 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5217 adapter->link_check_timeout = jiffies;
5218 mod_timer(&adapter->service_timer, jiffies);
5219
5220
5221 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5222 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5223 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5224}
5225
5226void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5227{
5228 WARN_ON(in_interrupt());
5229
5230 adapter->netdev->trans_start = jiffies;
5231
5232 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5233 usleep_range(1000, 2000);
5234 ixgbe_down(adapter);
5235
5236
5237
5238
5239
5240
5241 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5242 msleep(2000);
5243 ixgbe_up(adapter);
5244 clear_bit(__IXGBE_RESETTING, &adapter->state);
5245}
5246
5247void ixgbe_up(struct ixgbe_adapter *adapter)
5248{
5249
5250 ixgbe_configure(adapter);
5251
5252 ixgbe_up_complete(adapter);
5253}
5254
5255void ixgbe_reset(struct ixgbe_adapter *adapter)
5256{
5257 struct ixgbe_hw *hw = &adapter->hw;
5258 struct net_device *netdev = adapter->netdev;
5259 int err;
5260
5261 if (ixgbe_removed(hw->hw_addr))
5262 return;
5263
5264 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5265 usleep_range(1000, 2000);
5266
5267
5268 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5269 IXGBE_FLAG2_SFP_NEEDS_RESET);
5270 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5271
5272 err = hw->mac.ops.init_hw(hw);
5273 switch (err) {
5274 case 0:
5275 case IXGBE_ERR_SFP_NOT_PRESENT:
5276 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5277 break;
5278 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5279 e_dev_err("master disable timed out\n");
5280 break;
5281 case IXGBE_ERR_EEPROM_VERSION:
5282
5283 e_dev_warn("This device is a pre-production adapter/LOM. "
5284 "Please be aware there may be issues associated with "
5285 "your hardware. If you are experiencing problems "
5286 "please contact your Intel or hardware "
5287 "representative who provided you with this "
5288 "hardware.\n");
5289 break;
5290 default:
5291 e_dev_err("Hardware Error: %d\n", err);
5292 }
5293
5294 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5295
5296
5297 ixgbe_flush_sw_mac_table(adapter);
5298 __dev_uc_unsync(netdev, NULL);
5299
5300
5301 ixgbe_mac_set_default_filter(adapter);
5302
5303
5304 if (hw->mac.san_mac_rar_index)
5305 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5306
5307 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5308 ixgbe_ptp_reset(adapter);
5309
5310 if (hw->phy.ops.set_phy_power) {
5311 if (!netif_running(adapter->netdev) && !adapter->wol)
5312 hw->phy.ops.set_phy_power(hw, false);
5313 else
5314 hw->phy.ops.set_phy_power(hw, true);
5315 }
5316}
5317
5318
5319
5320
5321
5322static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5323{
5324 struct ixgbe_tx_buffer *tx_buffer_info;
5325 unsigned long size;
5326 u16 i;
5327
5328
5329 if (!tx_ring->tx_buffer_info)
5330 return;
5331
5332
5333 for (i = 0; i < tx_ring->count; i++) {
5334 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5335 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
5336 }
5337
5338 netdev_tx_reset_queue(txring_txq(tx_ring));
5339
5340 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5341 memset(tx_ring->tx_buffer_info, 0, size);
5342
5343
5344 memset(tx_ring->desc, 0, tx_ring->size);
5345
5346 tx_ring->next_to_use = 0;
5347 tx_ring->next_to_clean = 0;
5348}
5349
5350
5351
5352
5353
5354static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5355{
5356 int i;
5357
5358 for (i = 0; i < adapter->num_rx_queues; i++)
5359 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5360}
5361
5362
5363
5364
5365
5366static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5367{
5368 int i;
5369
5370 for (i = 0; i < adapter->num_tx_queues; i++)
5371 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5372}
5373
5374static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5375{
5376 struct hlist_node *node2;
5377 struct ixgbe_fdir_filter *filter;
5378
5379 spin_lock(&adapter->fdir_perfect_lock);
5380
5381 hlist_for_each_entry_safe(filter, node2,
5382 &adapter->fdir_filter_list, fdir_node) {
5383 hlist_del(&filter->fdir_node);
5384 kfree(filter);
5385 }
5386 adapter->fdir_filter_count = 0;
5387
5388 spin_unlock(&adapter->fdir_perfect_lock);
5389}
5390
5391void ixgbe_down(struct ixgbe_adapter *adapter)
5392{
5393 struct net_device *netdev = adapter->netdev;
5394 struct ixgbe_hw *hw = &adapter->hw;
5395 struct net_device *upper;
5396 struct list_head *iter;
5397 int i;
5398
5399
5400 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5401 return;
5402
5403
5404 hw->mac.ops.disable_rx(hw);
5405
5406
5407 for (i = 0; i < adapter->num_rx_queues; i++)
5408
5409 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5410
5411 usleep_range(10000, 20000);
5412
5413 netif_tx_stop_all_queues(netdev);
5414
5415
5416 netif_carrier_off(netdev);
5417 netif_tx_disable(netdev);
5418
5419
5420 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
5421 if (netif_is_macvlan(upper)) {
5422 struct macvlan_dev *vlan = netdev_priv(upper);
5423
5424 if (vlan->fwd_priv) {
5425 netif_tx_stop_all_queues(upper);
5426 netif_carrier_off(upper);
5427 netif_tx_disable(upper);
5428 }
5429 }
5430 }
5431
5432 ixgbe_irq_disable(adapter);
5433
5434 ixgbe_napi_disable_all(adapter);
5435
5436 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
5437 IXGBE_FLAG2_RESET_REQUESTED);
5438 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5439
5440 del_timer_sync(&adapter->service_timer);
5441
5442 if (adapter->num_vfs) {
5443
5444 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5445
5446
5447 for (i = 0 ; i < adapter->num_vfs; i++)
5448 adapter->vfinfo[i].clear_to_send = false;
5449
5450
5451 ixgbe_ping_all_vfs(adapter);
5452
5453
5454 ixgbe_disable_tx_rx(adapter);
5455 }
5456
5457
5458 for (i = 0; i < adapter->num_tx_queues; i++) {
5459 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5460 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5461 }
5462
5463
5464 switch (hw->mac.type) {
5465 case ixgbe_mac_82599EB:
5466 case ixgbe_mac_X540:
5467 case ixgbe_mac_X550:
5468 case ixgbe_mac_X550EM_x:
5469 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5470 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5471 ~IXGBE_DMATXCTL_TE));
5472 break;
5473 default:
5474 break;
5475 }
5476
5477 if (!pci_channel_offline(adapter->pdev))
5478 ixgbe_reset(adapter);
5479
5480
5481 if (hw->mac.ops.disable_tx_laser)
5482 hw->mac.ops.disable_tx_laser(hw);
5483
5484 ixgbe_clean_all_tx_rings(adapter);
5485 ixgbe_clean_all_rx_rings(adapter);
5486}
5487
5488
5489
5490
5491
5492static void ixgbe_tx_timeout(struct net_device *netdev)
5493{
5494 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5495
5496
5497 ixgbe_tx_timeout_reset(adapter);
5498}
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5509{
5510 struct ixgbe_hw *hw = &adapter->hw;
5511 struct pci_dev *pdev = adapter->pdev;
5512 unsigned int rss, fdir;
5513 u32 fwsm;
5514#ifdef CONFIG_IXGBE_DCB
5515 int j;
5516 struct tc_configuration *tc;
5517#endif
5518
5519
5520
5521 hw->vendor_id = pdev->vendor;
5522 hw->device_id = pdev->device;
5523 hw->revision_id = pdev->revision;
5524 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5525 hw->subsystem_device_id = pdev->subsystem_device;
5526
5527
5528 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
5529 adapter->ring_feature[RING_F_RSS].limit = rss;
5530 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5531 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
5532 adapter->atr_sample_rate = 20;
5533 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
5534 adapter->ring_feature[RING_F_FDIR].limit = fdir;
5535 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
5536#ifdef CONFIG_IXGBE_DCA
5537 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
5538#endif
5539#ifdef IXGBE_FCOE
5540 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5541 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5542#ifdef CONFIG_IXGBE_DCB
5543
5544 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
5545#endif
5546#endif
5547
5548 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5549 hw->mac.num_rar_entries,
5550 GFP_ATOMIC);
5551 if (!adapter->mac_table)
5552 return -ENOMEM;
5553
5554
5555 switch (hw->mac.type) {
5556 case ixgbe_mac_82598EB:
5557 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
5558
5559 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5560 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
5561
5562 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
5563 adapter->ring_feature[RING_F_FDIR].limit = 0;
5564 adapter->atr_sample_rate = 0;
5565 adapter->fdir_pballoc = 0;
5566#ifdef IXGBE_FCOE
5567 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5568 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5569#ifdef CONFIG_IXGBE_DCB
5570 adapter->fcoe.up = 0;
5571#endif
5572#endif
5573 break;
5574 case ixgbe_mac_82599EB:
5575 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5576 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5577 break;
5578 case ixgbe_mac_X540:
5579 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
5580 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5581 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5582 break;
5583 case ixgbe_mac_X550EM_x:
5584 case ixgbe_mac_X550:
5585#ifdef CONFIG_IXGBE_DCA
5586 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5587#endif
5588#ifdef CONFIG_IXGBE_VXLAN
5589 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
5590#endif
5591 break;
5592 default:
5593 break;
5594 }
5595
5596#ifdef IXGBE_FCOE
5597
5598 spin_lock_init(&adapter->fcoe.lock);
5599
5600#endif
5601
5602 spin_lock_init(&adapter->fdir_perfect_lock);
5603
5604#ifdef CONFIG_IXGBE_DCB
5605 switch (hw->mac.type) {
5606 case ixgbe_mac_X540:
5607 case ixgbe_mac_X550:
5608 case ixgbe_mac_X550EM_x:
5609 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5610 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5611 break;
5612 default:
5613 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
5614 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
5615 break;
5616 }
5617
5618
5619 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5620 tc = &adapter->dcb_cfg.tc_config[j];
5621 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5622 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5623 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5624 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5625 tc->dcb_pfc = pfc_disabled;
5626 }
5627
5628
5629 tc = &adapter->dcb_cfg.tc_config[0];
5630 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5631 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5632
5633 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5634 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5635 adapter->dcb_cfg.pfc_mode_enable = false;
5636 adapter->dcb_set_bitmap = 0x00;
5637 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5638 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5639 sizeof(adapter->temp_dcb_cfg));
5640
5641#endif
5642
5643
5644 hw->fc.requested_mode = ixgbe_fc_full;
5645 hw->fc.current_mode = ixgbe_fc_full;
5646 ixgbe_pbthresh_setup(adapter);
5647 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5648 hw->fc.send_xon = true;
5649 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
5650
5651#ifdef CONFIG_PCI_IOV
5652 if (max_vfs > 0)
5653 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5654
5655
5656 if (hw->mac.type != ixgbe_mac_82598EB) {
5657 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
5658 adapter->num_vfs = 0;
5659 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5660 } else {
5661 adapter->num_vfs = max_vfs;
5662 }
5663 }
5664#endif
5665
5666
5667 adapter->rx_itr_setting = 1;
5668 adapter->tx_itr_setting = 1;
5669
5670
5671 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5672 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5673
5674
5675 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
5676
5677
5678 if (ixgbe_init_eeprom_params_generic(hw)) {
5679 e_dev_err("EEPROM initialization failed\n");
5680 return -EIO;
5681 }
5682
5683
5684 set_bit(0, &adapter->fwd_bitmask);
5685 set_bit(__IXGBE_DOWN, &adapter->state);
5686
5687 return 0;
5688}
5689
5690
5691
5692
5693
5694
5695
5696int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5697{
5698 struct device *dev = tx_ring->dev;
5699 int orig_node = dev_to_node(dev);
5700 int ring_node = -1;
5701 int size;
5702
5703 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5704
5705 if (tx_ring->q_vector)
5706 ring_node = tx_ring->q_vector->numa_node;
5707
5708 tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
5709 if (!tx_ring->tx_buffer_info)
5710 tx_ring->tx_buffer_info = vzalloc(size);
5711 if (!tx_ring->tx_buffer_info)
5712 goto err;
5713
5714 u64_stats_init(&tx_ring->syncp);
5715
5716
5717 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
5718 tx_ring->size = ALIGN(tx_ring->size, 4096);
5719
5720 set_dev_node(dev, ring_node);
5721 tx_ring->desc = dma_alloc_coherent(dev,
5722 tx_ring->size,
5723 &tx_ring->dma,
5724 GFP_KERNEL);
5725 set_dev_node(dev, orig_node);
5726 if (!tx_ring->desc)
5727 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
5728 &tx_ring->dma, GFP_KERNEL);
5729 if (!tx_ring->desc)
5730 goto err;
5731
5732 tx_ring->next_to_use = 0;
5733 tx_ring->next_to_clean = 0;
5734 return 0;
5735
5736err:
5737 vfree(tx_ring->tx_buffer_info);
5738 tx_ring->tx_buffer_info = NULL;
5739 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5740 return -ENOMEM;
5741}
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5754{
5755 int i, err = 0;
5756
5757 for (i = 0; i < adapter->num_tx_queues; i++) {
5758 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5759 if (!err)
5760 continue;
5761
5762 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
5763 goto err_setup_tx;
5764 }
5765
5766 return 0;
5767err_setup_tx:
5768
5769 while (i--)
5770 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5771 return err;
5772}
5773
5774
5775
5776
5777
5778
5779
5780int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5781{
5782 struct device *dev = rx_ring->dev;
5783 int orig_node = dev_to_node(dev);
5784 int ring_node = -1;
5785 int size;
5786
5787 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5788
5789 if (rx_ring->q_vector)
5790 ring_node = rx_ring->q_vector->numa_node;
5791
5792 rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
5793 if (!rx_ring->rx_buffer_info)
5794 rx_ring->rx_buffer_info = vzalloc(size);
5795 if (!rx_ring->rx_buffer_info)
5796 goto err;
5797
5798 u64_stats_init(&rx_ring->syncp);
5799
5800
5801 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5802 rx_ring->size = ALIGN(rx_ring->size, 4096);
5803
5804 set_dev_node(dev, ring_node);
5805 rx_ring->desc = dma_alloc_coherent(dev,
5806 rx_ring->size,
5807 &rx_ring->dma,
5808 GFP_KERNEL);
5809 set_dev_node(dev, orig_node);
5810 if (!rx_ring->desc)
5811 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5812 &rx_ring->dma, GFP_KERNEL);
5813 if (!rx_ring->desc)
5814 goto err;
5815
5816 rx_ring->next_to_clean = 0;
5817 rx_ring->next_to_use = 0;
5818
5819 return 0;
5820err:
5821 vfree(rx_ring->rx_buffer_info);
5822 rx_ring->rx_buffer_info = NULL;
5823 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5824 return -ENOMEM;
5825}
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5838{
5839 int i, err = 0;
5840
5841 for (i = 0; i < adapter->num_rx_queues; i++) {
5842 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5843 if (!err)
5844 continue;
5845
5846 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5847 goto err_setup_rx;
5848 }
5849
5850#ifdef IXGBE_FCOE
5851 err = ixgbe_setup_fcoe_ddp_resources(adapter);
5852 if (!err)
5853#endif
5854 return 0;
5855err_setup_rx:
5856
5857 while (i--)
5858 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5859 return err;
5860}
5861
5862
5863
5864
5865
5866
5867
5868void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5869{
5870 ixgbe_clean_tx_ring(tx_ring);
5871
5872 vfree(tx_ring->tx_buffer_info);
5873 tx_ring->tx_buffer_info = NULL;
5874
5875
5876 if (!tx_ring->desc)
5877 return;
5878
5879 dma_free_coherent(tx_ring->dev, tx_ring->size,
5880 tx_ring->desc, tx_ring->dma);
5881
5882 tx_ring->desc = NULL;
5883}
5884
5885
5886
5887
5888
5889
5890
5891static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5892{
5893 int i;
5894
5895 for (i = 0; i < adapter->num_tx_queues; i++)
5896 if (adapter->tx_ring[i]->desc)
5897 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5898}
5899
5900
5901
5902
5903
5904
5905
5906void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5907{
5908 ixgbe_clean_rx_ring(rx_ring);
5909
5910 vfree(rx_ring->rx_buffer_info);
5911 rx_ring->rx_buffer_info = NULL;
5912
5913
5914 if (!rx_ring->desc)
5915 return;
5916
5917 dma_free_coherent(rx_ring->dev, rx_ring->size,
5918 rx_ring->desc, rx_ring->dma);
5919
5920 rx_ring->desc = NULL;
5921}
5922
5923
5924
5925
5926
5927
5928
5929static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5930{
5931 int i;
5932
5933#ifdef IXGBE_FCOE
5934 ixgbe_free_fcoe_ddp_resources(adapter);
5935
5936#endif
5937 for (i = 0; i < adapter->num_rx_queues; i++)
5938 if (adapter->rx_ring[i]->desc)
5939 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5940}
5941
5942
5943
5944
5945
5946
5947
5948
5949static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5950{
5951 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5952 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5953
5954
5955 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5956 return -EINVAL;
5957
5958
5959
5960
5961
5962
5963 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5964 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5965 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5966 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5967
5968 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5969
5970
5971 netdev->mtu = new_mtu;
5972
5973 if (netif_running(netdev))
5974 ixgbe_reinit_locked(adapter);
5975
5976 return 0;
5977}
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988
5989
5990
5991static int ixgbe_open(struct net_device *netdev)
5992{
5993 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5994 struct ixgbe_hw *hw = &adapter->hw;
5995 int err, queues;
5996
5997
5998 if (test_bit(__IXGBE_TESTING, &adapter->state))
5999 return -EBUSY;
6000
6001 netif_carrier_off(netdev);
6002
6003
6004 err = ixgbe_setup_all_tx_resources(adapter);
6005 if (err)
6006 goto err_setup_tx;
6007
6008
6009 err = ixgbe_setup_all_rx_resources(adapter);
6010 if (err)
6011 goto err_setup_rx;
6012
6013 ixgbe_configure(adapter);
6014
6015 err = ixgbe_request_irq(adapter);
6016 if (err)
6017 goto err_req_irq;
6018
6019
6020 if (adapter->num_rx_pools > 1)
6021 queues = adapter->num_rx_queues_per_pool;
6022 else
6023 queues = adapter->num_tx_queues;
6024
6025 err = netif_set_real_num_tx_queues(netdev, queues);
6026 if (err)
6027 goto err_set_queues;
6028
6029 if (adapter->num_rx_pools > 1 &&
6030 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
6031 queues = IXGBE_MAX_L2A_QUEUES;
6032 else
6033 queues = adapter->num_rx_queues;
6034 err = netif_set_real_num_rx_queues(netdev, queues);
6035 if (err)
6036 goto err_set_queues;
6037
6038 ixgbe_ptp_init(adapter);
6039
6040 ixgbe_up_complete(adapter);
6041
6042 ixgbe_clear_vxlan_port(adapter);
6043#ifdef CONFIG_IXGBE_VXLAN
6044 vxlan_get_rx_port(netdev);
6045#endif
6046
6047 return 0;
6048
6049err_set_queues:
6050 ixgbe_free_irq(adapter);
6051err_req_irq:
6052 ixgbe_free_all_rx_resources(adapter);
6053 if (hw->phy.ops.set_phy_power && !adapter->wol)
6054 hw->phy.ops.set_phy_power(&adapter->hw, false);
6055err_setup_rx:
6056 ixgbe_free_all_tx_resources(adapter);
6057err_setup_tx:
6058 ixgbe_reset(adapter);
6059
6060 return err;
6061}
6062
6063static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6064{
6065 ixgbe_ptp_suspend(adapter);
6066
6067 if (adapter->hw.phy.ops.enter_lplu) {
6068 adapter->hw.phy.reset_disable = true;
6069 ixgbe_down(adapter);
6070 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6071 adapter->hw.phy.reset_disable = false;
6072 } else {
6073 ixgbe_down(adapter);
6074 }
6075
6076 ixgbe_free_irq(adapter);
6077
6078 ixgbe_free_all_tx_resources(adapter);
6079 ixgbe_free_all_rx_resources(adapter);
6080}
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093static int ixgbe_close(struct net_device *netdev)
6094{
6095 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6096
6097 ixgbe_ptp_stop(adapter);
6098
6099 ixgbe_close_suspend(adapter);
6100
6101 ixgbe_fdir_filter_exit(adapter);
6102
6103 ixgbe_release_hw_control(adapter);
6104
6105 return 0;
6106}
6107
6108#ifdef CONFIG_PM
6109static int ixgbe_resume(struct pci_dev *pdev)
6110{
6111 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6112 struct net_device *netdev = adapter->netdev;
6113 u32 err;
6114
6115 adapter->hw.hw_addr = adapter->io_addr;
6116 pci_set_power_state(pdev, PCI_D0);
6117 pci_restore_state(pdev);
6118
6119
6120
6121
6122 pci_save_state(pdev);
6123
6124 err = pci_enable_device_mem(pdev);
6125 if (err) {
6126 e_dev_err("Cannot enable PCI device from suspend\n");
6127 return err;
6128 }
6129 smp_mb__before_atomic();
6130 clear_bit(__IXGBE_DISABLED, &adapter->state);
6131 pci_set_master(pdev);
6132
6133 pci_wake_from_d3(pdev, false);
6134
6135 ixgbe_reset(adapter);
6136
6137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6138
6139 rtnl_lock();
6140 err = ixgbe_init_interrupt_scheme(adapter);
6141 if (!err && netif_running(netdev))
6142 err = ixgbe_open(netdev);
6143
6144 rtnl_unlock();
6145
6146 if (err)
6147 return err;
6148
6149 netif_device_attach(netdev);
6150
6151 return 0;
6152}
6153#endif
6154
6155static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6156{
6157 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6158 struct net_device *netdev = adapter->netdev;
6159 struct ixgbe_hw *hw = &adapter->hw;
6160 u32 ctrl, fctrl;
6161 u32 wufc = adapter->wol;
6162#ifdef CONFIG_PM
6163 int retval = 0;
6164#endif
6165
6166 netif_device_detach(netdev);
6167
6168 rtnl_lock();
6169 if (netif_running(netdev))
6170 ixgbe_close_suspend(adapter);
6171 rtnl_unlock();
6172
6173 ixgbe_clear_interrupt_scheme(adapter);
6174
6175#ifdef CONFIG_PM
6176 retval = pci_save_state(pdev);
6177 if (retval)
6178 return retval;
6179
6180#endif
6181 if (hw->mac.ops.stop_link_on_d3)
6182 hw->mac.ops.stop_link_on_d3(hw);
6183
6184 if (wufc) {
6185 ixgbe_set_rx_mode(netdev);
6186
6187
6188 if (hw->mac.ops.enable_tx_laser)
6189 hw->mac.ops.enable_tx_laser(hw);
6190
6191
6192 if (wufc & IXGBE_WUFC_MC) {
6193 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6194 fctrl |= IXGBE_FCTRL_MPE;
6195 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6196 }
6197
6198 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6199 ctrl |= IXGBE_CTRL_GIO_DIS;
6200 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6201
6202 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6203 } else {
6204 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6205 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6206 }
6207
6208 switch (hw->mac.type) {
6209 case ixgbe_mac_82598EB:
6210 pci_wake_from_d3(pdev, false);
6211 break;
6212 case ixgbe_mac_82599EB:
6213 case ixgbe_mac_X540:
6214 case ixgbe_mac_X550:
6215 case ixgbe_mac_X550EM_x:
6216 pci_wake_from_d3(pdev, !!wufc);
6217 break;
6218 default:
6219 break;
6220 }
6221
6222 *enable_wake = !!wufc;
6223 if (hw->phy.ops.set_phy_power && !*enable_wake)
6224 hw->phy.ops.set_phy_power(hw, false);
6225
6226 ixgbe_release_hw_control(adapter);
6227
6228 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6229 pci_disable_device(pdev);
6230
6231 return 0;
6232}
6233
6234#ifdef CONFIG_PM
6235static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6236{
6237 int retval;
6238 bool wake;
6239
6240 retval = __ixgbe_shutdown(pdev, &wake);
6241 if (retval)
6242 return retval;
6243
6244 if (wake) {
6245 pci_prepare_to_sleep(pdev);
6246 } else {
6247 pci_wake_from_d3(pdev, false);
6248 pci_set_power_state(pdev, PCI_D3hot);
6249 }
6250
6251 return 0;
6252}
6253#endif
6254
6255static void ixgbe_shutdown(struct pci_dev *pdev)
6256{
6257 bool wake;
6258
6259 __ixgbe_shutdown(pdev, &wake);
6260
6261 if (system_state == SYSTEM_POWER_OFF) {
6262 pci_wake_from_d3(pdev, wake);
6263 pci_set_power_state(pdev, PCI_D3hot);
6264 }
6265}
6266
6267
6268
6269
6270
6271void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6272{
6273 struct net_device *netdev = adapter->netdev;
6274 struct ixgbe_hw *hw = &adapter->hw;
6275 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6276 u64 total_mpc = 0;
6277 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6278 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6279 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6280 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6281
6282 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6283 test_bit(__IXGBE_RESETTING, &adapter->state))
6284 return;
6285
6286 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6287 u64 rsc_count = 0;
6288 u64 rsc_flush = 0;
6289 for (i = 0; i < adapter->num_rx_queues; i++) {
6290 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6291 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6292 }
6293 adapter->rsc_total_count = rsc_count;
6294 adapter->rsc_total_flush = rsc_flush;
6295 }
6296
6297 for (i = 0; i < adapter->num_rx_queues; i++) {
6298 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6299 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6300 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6301 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6302 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6303 bytes += rx_ring->stats.bytes;
6304 packets += rx_ring->stats.packets;
6305 }
6306 adapter->non_eop_descs = non_eop_descs;
6307 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6308 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6309 adapter->hw_csum_rx_error = hw_csum_rx_error;
6310 netdev->stats.rx_bytes = bytes;
6311 netdev->stats.rx_packets = packets;
6312
6313 bytes = 0;
6314 packets = 0;
6315
6316 for (i = 0; i < adapter->num_tx_queues; i++) {
6317 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6318 restart_queue += tx_ring->tx_stats.restart_queue;
6319 tx_busy += tx_ring->tx_stats.tx_busy;
6320 bytes += tx_ring->stats.bytes;
6321 packets += tx_ring->stats.packets;
6322 }
6323 adapter->restart_queue = restart_queue;
6324 adapter->tx_busy = tx_busy;
6325 netdev->stats.tx_bytes = bytes;
6326 netdev->stats.tx_packets = packets;
6327
6328 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6329
6330
6331 for (i = 0; i < 8; i++) {
6332
6333 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6334 missed_rx += mpc;
6335 hwstats->mpc[i] += mpc;
6336 total_mpc += hwstats->mpc[i];
6337 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6338 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6339 switch (hw->mac.type) {
6340 case ixgbe_mac_82598EB:
6341 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6342 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6343 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6344 hwstats->pxonrxc[i] +=
6345 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6346 break;
6347 case ixgbe_mac_82599EB:
6348 case ixgbe_mac_X540:
6349 case ixgbe_mac_X550:
6350 case ixgbe_mac_X550EM_x:
6351 hwstats->pxonrxc[i] +=
6352 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6353 break;
6354 default:
6355 break;
6356 }
6357 }
6358
6359
6360 for (i = 0; i < 16; i++) {
6361 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6362 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6363 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6364 (hw->mac.type == ixgbe_mac_X540) ||
6365 (hw->mac.type == ixgbe_mac_X550) ||
6366 (hw->mac.type == ixgbe_mac_X550EM_x)) {
6367 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6368 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
6369 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6370 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
6371 }
6372 }
6373
6374 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6375
6376 hwstats->gprc -= missed_rx;
6377
6378 ixgbe_update_xoff_received(adapter);
6379
6380
6381 switch (hw->mac.type) {
6382 case ixgbe_mac_82598EB:
6383 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6384 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6385 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6386 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6387 break;
6388 case ixgbe_mac_X540:
6389 case ixgbe_mac_X550:
6390 case ixgbe_mac_X550EM_x:
6391
6392 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6393 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6394 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6395 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6396 case ixgbe_mac_82599EB:
6397 for (i = 0; i < 16; i++)
6398 adapter->hw_rx_no_dma_resources +=
6399 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6400 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6401 IXGBE_READ_REG(hw, IXGBE_GORCH);
6402 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6403 IXGBE_READ_REG(hw, IXGBE_GOTCH);
6404 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6405 IXGBE_READ_REG(hw, IXGBE_TORH);
6406 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6407 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6408 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6409#ifdef IXGBE_FCOE
6410 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6411 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
6412 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
6413 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
6414 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
6415 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6416
6417 if (adapter->fcoe.ddp_pool) {
6418 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
6419 struct ixgbe_fcoe_ddp_pool *ddp_pool;
6420 unsigned int cpu;
6421 u64 noddp = 0, noddp_ext_buff = 0;
6422 for_each_possible_cpu(cpu) {
6423 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
6424 noddp += ddp_pool->noddp;
6425 noddp_ext_buff += ddp_pool->noddp_ext_buff;
6426 }
6427 hwstats->fcoe_noddp = noddp;
6428 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
6429 }
6430#endif
6431 break;
6432 default:
6433 break;
6434 }
6435 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
6436 hwstats->bprc += bprc;
6437 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
6438 if (hw->mac.type == ixgbe_mac_82598EB)
6439 hwstats->mprc -= bprc;
6440 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
6441 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
6442 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
6443 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
6444 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
6445 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
6446 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
6447 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6448 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
6449 hwstats->lxontxc += lxon;
6450 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
6451 hwstats->lxofftxc += lxoff;
6452 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6453 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6454
6455
6456
6457 xon_off_tot = lxon + lxoff;
6458 hwstats->gptc -= xon_off_tot;
6459 hwstats->mptc -= xon_off_tot;
6460 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
6461 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
6462 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
6463 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
6464 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
6465 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6466 hwstats->ptc64 -= xon_off_tot;
6467 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
6468 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
6469 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
6470 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
6471 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
6472 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
6473
6474
6475 netdev->stats.multicast = hwstats->mprc;
6476
6477
6478 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
6479 netdev->stats.rx_dropped = 0;
6480 netdev->stats.rx_length_errors = hwstats->rlec;
6481 netdev->stats.rx_crc_errors = hwstats->crcerrs;
6482 netdev->stats.rx_missed_errors = total_mpc;
6483}
6484
6485
6486
6487
6488
6489static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
6490{
6491 struct ixgbe_hw *hw = &adapter->hw;
6492 int i;
6493
6494 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
6495 return;
6496
6497 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6498
6499
6500 if (test_bit(__IXGBE_DOWN, &adapter->state))
6501 return;
6502
6503
6504 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
6505 return;
6506
6507 adapter->fdir_overflow++;
6508
6509 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
6510 for (i = 0; i < adapter->num_tx_queues; i++)
6511 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
6512 &(adapter->tx_ring[i]->state));
6513
6514 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
6515 } else {
6516 e_err(probe, "failed to finish FDIR re-initialization, "
6517 "ignored adding FDIR ATR filters\n");
6518 }
6519}
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6531{
6532 struct ixgbe_hw *hw = &adapter->hw;
6533 u64 eics = 0;
6534 int i;
6535
6536
6537 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6538 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6539 test_bit(__IXGBE_RESETTING, &adapter->state))
6540 return;
6541
6542
6543 if (netif_carrier_ok(adapter->netdev)) {
6544 for (i = 0; i < adapter->num_tx_queues; i++)
6545 set_check_for_tx_hang(adapter->tx_ring[i]);
6546 }
6547
6548 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
6549
6550
6551
6552
6553
6554 IXGBE_WRITE_REG(hw, IXGBE_EICS,
6555 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6556 } else {
6557
6558 for (i = 0; i < adapter->num_q_vectors; i++) {
6559 struct ixgbe_q_vector *qv = adapter->q_vector[i];
6560 if (qv->rx.ring || qv->tx.ring)
6561 eics |= ((u64)1 << i);
6562 }
6563 }
6564
6565
6566 ixgbe_irq_rearm_queues(adapter, eics);
6567}
6568
6569
6570
6571
6572
6573
6574static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
6575{
6576 struct ixgbe_hw *hw = &adapter->hw;
6577 u32 link_speed = adapter->link_speed;
6578 bool link_up = adapter->link_up;
6579 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
6580
6581 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6582 return;
6583
6584 if (hw->mac.ops.check_link) {
6585 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6586 } else {
6587
6588 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6589 link_up = true;
6590 }
6591
6592 if (adapter->ixgbe_ieee_pfc)
6593 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
6594
6595 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
6596 hw->mac.ops.fc_enable(hw);
6597 ixgbe_set_rx_drop_en(adapter);
6598 }
6599
6600 if (link_up ||
6601 time_after(jiffies, (adapter->link_check_timeout +
6602 IXGBE_TRY_LINK_TIMEOUT))) {
6603 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6604 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6605 IXGBE_WRITE_FLUSH(hw);
6606 }
6607
6608 adapter->link_up = link_up;
6609 adapter->link_speed = link_speed;
6610}
6611
6612static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
6613{
6614#ifdef CONFIG_IXGBE_DCB
6615 struct net_device *netdev = adapter->netdev;
6616 struct dcb_app app = {
6617 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
6618 .protocol = 0,
6619 };
6620 u8 up = 0;
6621
6622 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
6623 up = dcb_ieee_getapp_mask(netdev, &app);
6624
6625 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
6626#endif
6627}
6628
6629
6630
6631
6632
6633
6634static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6635{
6636 struct net_device *netdev = adapter->netdev;
6637 struct ixgbe_hw *hw = &adapter->hw;
6638 struct net_device *upper;
6639 struct list_head *iter;
6640 u32 link_speed = adapter->link_speed;
6641 const char *speed_str;
6642 bool flow_rx, flow_tx;
6643
6644
6645 if (netif_carrier_ok(netdev))
6646 return;
6647
6648 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6649
6650 switch (hw->mac.type) {
6651 case ixgbe_mac_82598EB: {
6652 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6653 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6654 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6655 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6656 }
6657 break;
6658 case ixgbe_mac_X540:
6659 case ixgbe_mac_X550:
6660 case ixgbe_mac_X550EM_x:
6661 case ixgbe_mac_82599EB: {
6662 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6663 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6664 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6665 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6666 }
6667 break;
6668 default:
6669 flow_tx = false;
6670 flow_rx = false;
6671 break;
6672 }
6673
6674 adapter->last_rx_ptp_check = jiffies;
6675
6676 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6677 ixgbe_ptp_start_cyclecounter(adapter);
6678
6679 switch (link_speed) {
6680 case IXGBE_LINK_SPEED_10GB_FULL:
6681 speed_str = "10 Gbps";
6682 break;
6683 case IXGBE_LINK_SPEED_2_5GB_FULL:
6684 speed_str = "2.5 Gbps";
6685 break;
6686 case IXGBE_LINK_SPEED_1GB_FULL:
6687 speed_str = "1 Gbps";
6688 break;
6689 case IXGBE_LINK_SPEED_100_FULL:
6690 speed_str = "100 Mbps";
6691 break;
6692 default:
6693 speed_str = "unknown speed";
6694 break;
6695 }
6696 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
6697 ((flow_rx && flow_tx) ? "RX/TX" :
6698 (flow_rx ? "RX" :
6699 (flow_tx ? "TX" : "None"))));
6700
6701 netif_carrier_on(netdev);
6702 ixgbe_check_vf_rate_limit(adapter);
6703
6704
6705 netif_tx_wake_all_queues(adapter->netdev);
6706
6707
6708 rtnl_lock();
6709 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
6710 if (netif_is_macvlan(upper)) {
6711 struct macvlan_dev *vlan = netdev_priv(upper);
6712
6713 if (vlan->fwd_priv)
6714 netif_tx_wake_all_queues(upper);
6715 }
6716 }
6717 rtnl_unlock();
6718
6719
6720 ixgbe_update_default_up(adapter);
6721
6722
6723 ixgbe_ping_all_vfs(adapter);
6724}
6725
6726
6727
6728
6729
6730
6731static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
6732{
6733 struct net_device *netdev = adapter->netdev;
6734 struct ixgbe_hw *hw = &adapter->hw;
6735
6736 adapter->link_up = false;
6737 adapter->link_speed = 0;
6738
6739
6740 if (!netif_carrier_ok(netdev))
6741 return;
6742
6743
6744 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6745 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6746
6747 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6748 ixgbe_ptp_start_cyclecounter(adapter);
6749
6750 e_info(drv, "NIC Link is Down\n");
6751 netif_carrier_off(netdev);
6752
6753
6754 ixgbe_ping_all_vfs(adapter);
6755}
6756
6757static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
6758{
6759 int i;
6760
6761 for (i = 0; i < adapter->num_tx_queues; i++) {
6762 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6763
6764 if (tx_ring->next_to_use != tx_ring->next_to_clean)
6765 return true;
6766 }
6767
6768 return false;
6769}
6770
6771static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
6772{
6773 struct ixgbe_hw *hw = &adapter->hw;
6774 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
6775 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6776
6777 int i, j;
6778
6779 if (!adapter->num_vfs)
6780 return false;
6781
6782
6783 if (hw->mac.type >= ixgbe_mac_X550)
6784 return false;
6785
6786 for (i = 0; i < adapter->num_vfs; i++) {
6787 for (j = 0; j < q_per_pool; j++) {
6788 u32 h, t;
6789
6790 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
6791 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
6792
6793 if (h != t)
6794 return true;
6795 }
6796 }
6797
6798 return false;
6799}
6800
6801
6802
6803
6804
6805static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6806{
6807 if (!netif_carrier_ok(adapter->netdev)) {
6808 if (ixgbe_ring_tx_pending(adapter) ||
6809 ixgbe_vf_tx_pending(adapter)) {
6810
6811
6812
6813
6814
6815 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
6816 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
6817 }
6818 }
6819}
6820
6821#ifdef CONFIG_PCI_IOV
6822static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
6823 struct pci_dev *vfdev)
6824{
6825 if (!pci_wait_for_pending_transaction(vfdev))
6826 e_dev_warn("Issuing VFLR with pending transactions\n");
6827
6828 e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
6829 pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
6830
6831 msleep(100);
6832}
6833
6834static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6835{
6836 struct ixgbe_hw *hw = &adapter->hw;
6837 struct pci_dev *pdev = adapter->pdev;
6838 unsigned int vf;
6839 u32 gpc;
6840
6841 if (!(netif_carrier_ok(adapter->netdev)))
6842 return;
6843
6844 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6845 if (gpc)
6846 return;
6847
6848
6849
6850
6851
6852
6853 if (!pdev)
6854 return;
6855
6856
6857 for (vf = 0; vf < adapter->num_vfs; ++vf) {
6858 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
6859 u16 status_reg;
6860
6861 if (!vfdev)
6862 continue;
6863 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
6864 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
6865 status_reg & PCI_STATUS_REC_MASTER_ABORT)
6866 ixgbe_issue_vf_flr(adapter, vfdev);
6867 }
6868}
6869
6870static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6871{
6872 u32 ssvpc;
6873
6874
6875 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
6876 adapter->num_vfs == 0)
6877 return;
6878
6879 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6880
6881
6882
6883
6884
6885 if (!ssvpc)
6886 return;
6887
6888 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
6889}
6890#else
6891static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
6892{
6893}
6894
6895static void
6896ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
6897{
6898}
6899#endif
6900
6901
6902
6903
6904
6905
6906static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6907{
6908
6909 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6910 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6911 test_bit(__IXGBE_RESETTING, &adapter->state))
6912 return;
6913
6914 ixgbe_watchdog_update_link(adapter);
6915
6916 if (adapter->link_up)
6917 ixgbe_watchdog_link_is_up(adapter);
6918 else
6919 ixgbe_watchdog_link_is_down(adapter);
6920
6921 ixgbe_check_for_bad_vf(adapter);
6922 ixgbe_spoof_check(adapter);
6923 ixgbe_update_stats(adapter);
6924
6925 ixgbe_watchdog_flush_tx(adapter);
6926}
6927
6928
6929
6930
6931
6932static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6933{
6934 struct ixgbe_hw *hw = &adapter->hw;
6935 s32 err;
6936
6937
6938 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6939 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6940 return;
6941
6942 if (adapter->sfp_poll_time &&
6943 time_after(adapter->sfp_poll_time, jiffies))
6944 return;
6945
6946
6947 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6948 return;
6949
6950 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
6951
6952 err = hw->phy.ops.identify_sfp(hw);
6953 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6954 goto sfp_out;
6955
6956 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6957
6958
6959 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6960 }
6961
6962
6963 if (err)
6964 goto sfp_out;
6965
6966
6967 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6968 goto sfp_out;
6969
6970 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6971
6972
6973
6974
6975
6976
6977 if (hw->mac.type == ixgbe_mac_82598EB)
6978 err = hw->phy.ops.reset(hw);
6979 else
6980 err = hw->mac.ops.setup_sfp(hw);
6981
6982 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6983 goto sfp_out;
6984
6985 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6986 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6987
6988sfp_out:
6989 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6990
6991 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6992 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6993 e_dev_err("failed to initialize because an unsupported "
6994 "SFP+ module type was detected.\n");
6995 e_dev_err("Reload the driver after installing a "
6996 "supported module.\n");
6997 unregister_netdev(adapter->netdev);
6998 }
6999}
7000
7001
7002
7003
7004
7005static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7006{
7007 struct ixgbe_hw *hw = &adapter->hw;
7008 u32 speed;
7009 bool autoneg = false;
7010
7011 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7012 return;
7013
7014
7015 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7016 return;
7017
7018 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7019
7020 speed = hw->phy.autoneg_advertised;
7021 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
7022 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
7023
7024
7025 if (!autoneg) {
7026 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
7027 speed = IXGBE_LINK_SPEED_10GB_FULL;
7028 }
7029 }
7030
7031 if (hw->mac.ops.setup_link)
7032 hw->mac.ops.setup_link(hw, speed, true);
7033
7034 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7035 adapter->link_check_timeout = jiffies;
7036 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7037}
7038
7039
7040
7041
7042
7043static void ixgbe_service_timer(unsigned long data)
7044{
7045 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
7046 unsigned long next_event_offset;
7047
7048
7049 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7050 next_event_offset = HZ / 10;
7051 else
7052 next_event_offset = HZ * 2;
7053
7054
7055 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7056
7057 ixgbe_service_event_schedule(adapter);
7058}
7059
7060static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7061{
7062 struct ixgbe_hw *hw = &adapter->hw;
7063 u32 status;
7064
7065 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7066 return;
7067
7068 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7069
7070 if (!hw->phy.ops.handle_lasi)
7071 return;
7072
7073 status = hw->phy.ops.handle_lasi(&adapter->hw);
7074 if (status != IXGBE_ERR_OVERTEMP)
7075 return;
7076
7077 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7078}
7079
7080static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7081{
7082 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
7083 return;
7084
7085 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
7086
7087
7088 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7089 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7090 test_bit(__IXGBE_RESETTING, &adapter->state))
7091 return;
7092
7093 ixgbe_dump(adapter);
7094 netdev_err(adapter->netdev, "Reset adapter\n");
7095 adapter->tx_timeout_count++;
7096
7097 rtnl_lock();
7098 ixgbe_reinit_locked(adapter);
7099 rtnl_unlock();
7100}
7101
7102
7103
7104
7105
7106static void ixgbe_service_task(struct work_struct *work)
7107{
7108 struct ixgbe_adapter *adapter = container_of(work,
7109 struct ixgbe_adapter,
7110 service_task);
7111 if (ixgbe_removed(adapter->hw.hw_addr)) {
7112 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7113 rtnl_lock();
7114 ixgbe_down(adapter);
7115 rtnl_unlock();
7116 }
7117 ixgbe_service_event_complete(adapter);
7118 return;
7119 }
7120#ifdef CONFIG_IXGBE_VXLAN
7121 if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
7122 adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
7123 vxlan_get_rx_port(adapter->netdev);
7124 }
7125#endif
7126 ixgbe_reset_subtask(adapter);
7127 ixgbe_phy_interrupt_subtask(adapter);
7128 ixgbe_sfp_detection_subtask(adapter);
7129 ixgbe_sfp_link_config_subtask(adapter);
7130 ixgbe_check_overtemp_subtask(adapter);
7131 ixgbe_watchdog_subtask(adapter);
7132 ixgbe_fdir_reinit_subtask(adapter);
7133 ixgbe_check_hang_subtask(adapter);
7134
7135 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7136 ixgbe_ptp_overflow_check(adapter);
7137 ixgbe_ptp_rx_hang(adapter);
7138 }
7139
7140 ixgbe_service_event_complete(adapter);
7141}
7142
7143static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7144 struct ixgbe_tx_buffer *first,
7145 u8 *hdr_len)
7146{
7147 struct sk_buff *skb = first->skb;
7148 u32 vlan_macip_lens, type_tucmd;
7149 u32 mss_l4len_idx, l4len;
7150 int err;
7151
7152 if (skb->ip_summed != CHECKSUM_PARTIAL)
7153 return 0;
7154
7155 if (!skb_is_gso(skb))
7156 return 0;
7157
7158 err = skb_cow_head(skb, 0);
7159 if (err < 0)
7160 return err;
7161
7162
7163 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7164
7165 if (first->protocol == htons(ETH_P_IP)) {
7166 struct iphdr *iph = ip_hdr(skb);
7167 iph->tot_len = 0;
7168 iph->check = 0;
7169 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7170 iph->daddr, 0,
7171 IPPROTO_TCP,
7172 0);
7173 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7174 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7175 IXGBE_TX_FLAGS_CSUM |
7176 IXGBE_TX_FLAGS_IPV4;
7177 } else if (skb_is_gso_v6(skb)) {
7178 ipv6_hdr(skb)->payload_len = 0;
7179 tcp_hdr(skb)->check =
7180 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
7181 &ipv6_hdr(skb)->daddr,
7182 0, IPPROTO_TCP, 0);
7183 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7184 IXGBE_TX_FLAGS_CSUM;
7185 }
7186
7187
7188 l4len = tcp_hdrlen(skb);
7189 *hdr_len = skb_transport_offset(skb) + l4len;
7190
7191
7192 first->gso_segs = skb_shinfo(skb)->gso_segs;
7193 first->bytecount += (first->gso_segs - 1) * *hdr_len;
7194
7195
7196 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
7197 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
7198
7199
7200 vlan_macip_lens = skb_network_header_len(skb);
7201 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7202 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7203
7204 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
7205 mss_l4len_idx);
7206
7207 return 1;
7208}
7209
7210static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7211 struct ixgbe_tx_buffer *first)
7212{
7213 struct sk_buff *skb = first->skb;
7214 u32 vlan_macip_lens = 0;
7215 u32 mss_l4len_idx = 0;
7216 u32 type_tucmd = 0;
7217
7218 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7219 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
7220 !(first->tx_flags & IXGBE_TX_FLAGS_CC))
7221 return;
7222 vlan_macip_lens = skb_network_offset(skb) <<
7223 IXGBE_ADVTXD_MACLEN_SHIFT;
7224 } else {
7225 u8 l4_hdr = 0;
7226 union {
7227 struct iphdr *ipv4;
7228 struct ipv6hdr *ipv6;
7229 u8 *raw;
7230 } network_hdr;
7231 union {
7232 struct tcphdr *tcphdr;
7233 u8 *raw;
7234 } transport_hdr;
7235 __be16 frag_off;
7236
7237 if (skb->encapsulation) {
7238 network_hdr.raw = skb_inner_network_header(skb);
7239 transport_hdr.raw = skb_inner_transport_header(skb);
7240 vlan_macip_lens = skb_inner_network_offset(skb) <<
7241 IXGBE_ADVTXD_MACLEN_SHIFT;
7242 } else {
7243 network_hdr.raw = skb_network_header(skb);
7244 transport_hdr.raw = skb_transport_header(skb);
7245 vlan_macip_lens = skb_network_offset(skb) <<
7246 IXGBE_ADVTXD_MACLEN_SHIFT;
7247 }
7248
7249
7250 switch (network_hdr.ipv4->version) {
7251 case IPVERSION:
7252 vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7253 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7254 l4_hdr = network_hdr.ipv4->protocol;
7255 break;
7256 case 6:
7257 vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7258 l4_hdr = network_hdr.ipv6->nexthdr;
7259 if (likely((transport_hdr.raw - network_hdr.raw) ==
7260 sizeof(struct ipv6hdr)))
7261 break;
7262 ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
7263 sizeof(struct ipv6hdr),
7264 &l4_hdr, &frag_off);
7265 if (unlikely(frag_off))
7266 l4_hdr = NEXTHDR_FRAGMENT;
7267 break;
7268 default:
7269 break;
7270 }
7271
7272 switch (l4_hdr) {
7273 case IPPROTO_TCP:
7274 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
7275 mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
7276 IXGBE_ADVTXD_L4LEN_SHIFT;
7277 break;
7278 case IPPROTO_SCTP:
7279 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7280 mss_l4len_idx = sizeof(struct sctphdr) <<
7281 IXGBE_ADVTXD_L4LEN_SHIFT;
7282 break;
7283 case IPPROTO_UDP:
7284 mss_l4len_idx = sizeof(struct udphdr) <<
7285 IXGBE_ADVTXD_L4LEN_SHIFT;
7286 break;
7287 default:
7288 if (unlikely(net_ratelimit())) {
7289 dev_warn(tx_ring->dev,
7290 "partial checksum, version=%d, l4 proto=%x\n",
7291 network_hdr.ipv4->version, l4_hdr);
7292 }
7293 skb_checksum_help(skb);
7294 goto no_csum;
7295 }
7296
7297
7298 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7299 }
7300
7301no_csum:
7302
7303 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7304
7305 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
7306 type_tucmd, mss_l4len_idx);
7307}
7308
7309#define IXGBE_SET_FLAG(_input, _flag, _result) \
7310 ((_flag <= _result) ? \
7311 ((u32)(_input & _flag) * (_result / _flag)) : \
7312 ((u32)(_input & _flag) / (_flag / _result)))
7313
7314static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7315{
7316
7317 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7318 IXGBE_ADVTXD_DCMD_DEXT |
7319 IXGBE_ADVTXD_DCMD_IFCS;
7320
7321
7322 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7323 IXGBE_ADVTXD_DCMD_VLE);
7324
7325
7326 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7327 IXGBE_ADVTXD_DCMD_TSE);
7328
7329
7330 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7331 IXGBE_ADVTXD_MAC_TSTAMP);
7332
7333
7334 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7335
7336 return cmd_type;
7337}
7338
7339static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7340 u32 tx_flags, unsigned int paylen)
7341{
7342 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7343
7344
7345 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7346 IXGBE_TX_FLAGS_CSUM,
7347 IXGBE_ADVTXD_POPTS_TXSM);
7348
7349
7350 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7351 IXGBE_TX_FLAGS_IPV4,
7352 IXGBE_ADVTXD_POPTS_IXSM);
7353
7354
7355
7356
7357
7358 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7359 IXGBE_TX_FLAGS_CC,
7360 IXGBE_ADVTXD_CC);
7361
7362 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7363}
7364
7365static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7366{
7367 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7368
7369
7370
7371
7372
7373 smp_mb();
7374
7375
7376
7377
7378 if (likely(ixgbe_desc_unused(tx_ring) < size))
7379 return -EBUSY;
7380
7381
7382 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7383 ++tx_ring->tx_stats.restart_queue;
7384 return 0;
7385}
7386
7387static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7388{
7389 if (likely(ixgbe_desc_unused(tx_ring) >= size))
7390 return 0;
7391
7392 return __ixgbe_maybe_stop_tx(tx_ring, size);
7393}
7394
7395#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7396 IXGBE_TXD_CMD_RS)
7397
7398static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7399 struct ixgbe_tx_buffer *first,
7400 const u8 hdr_len)
7401{
7402 struct sk_buff *skb = first->skb;
7403 struct ixgbe_tx_buffer *tx_buffer;
7404 union ixgbe_adv_tx_desc *tx_desc;
7405 struct skb_frag_struct *frag;
7406 dma_addr_t dma;
7407 unsigned int data_len, size;
7408 u32 tx_flags = first->tx_flags;
7409 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7410 u16 i = tx_ring->next_to_use;
7411
7412 tx_desc = IXGBE_TX_DESC(tx_ring, i);
7413
7414 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7415
7416 size = skb_headlen(skb);
7417 data_len = skb->data_len;
7418
7419#ifdef IXGBE_FCOE
7420 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7421 if (data_len < sizeof(struct fcoe_crc_eof)) {
7422 size -= sizeof(struct fcoe_crc_eof) - data_len;
7423 data_len = 0;
7424 } else {
7425 data_len -= sizeof(struct fcoe_crc_eof);
7426 }
7427 }
7428
7429#endif
7430 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7431
7432 tx_buffer = first;
7433
7434 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
7435 if (dma_mapping_error(tx_ring->dev, dma))
7436 goto dma_error;
7437
7438
7439 dma_unmap_len_set(tx_buffer, len, size);
7440 dma_unmap_addr_set(tx_buffer, dma, dma);
7441
7442 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7443
7444 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
7445 tx_desc->read.cmd_type_len =
7446 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
7447
7448 i++;
7449 tx_desc++;
7450 if (i == tx_ring->count) {
7451 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7452 i = 0;
7453 }
7454 tx_desc->read.olinfo_status = 0;
7455
7456 dma += IXGBE_MAX_DATA_PER_TXD;
7457 size -= IXGBE_MAX_DATA_PER_TXD;
7458
7459 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7460 }
7461
7462 if (likely(!data_len))
7463 break;
7464
7465 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
7466
7467 i++;
7468 tx_desc++;
7469 if (i == tx_ring->count) {
7470 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7471 i = 0;
7472 }
7473 tx_desc->read.olinfo_status = 0;
7474
7475#ifdef IXGBE_FCOE
7476 size = min_t(unsigned int, data_len, skb_frag_size(frag));
7477#else
7478 size = skb_frag_size(frag);
7479#endif
7480 data_len -= size;
7481
7482 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
7483 DMA_TO_DEVICE);
7484
7485 tx_buffer = &tx_ring->tx_buffer_info[i];
7486 }
7487
7488
7489 cmd_type |= size | IXGBE_TXD_CMD;
7490 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
7491
7492 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
7493
7494
7495 first->time_stamp = jiffies;
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505 wmb();
7506
7507
7508 first->next_to_watch = tx_desc;
7509
7510 i++;
7511 if (i == tx_ring->count)
7512 i = 0;
7513
7514 tx_ring->next_to_use = i;
7515
7516 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
7517
7518 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
7519 writel(i, tx_ring->tail);
7520
7521
7522
7523
7524 mmiowb();
7525 }
7526
7527 return;
7528dma_error:
7529 dev_err(tx_ring->dev, "TX DMA map failed\n");
7530
7531
7532 for (;;) {
7533 tx_buffer = &tx_ring->tx_buffer_info[i];
7534 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
7535 if (tx_buffer == first)
7536 break;
7537 if (i == 0)
7538 i = tx_ring->count;
7539 i--;
7540 }
7541
7542 tx_ring->next_to_use = i;
7543}
7544
7545static void ixgbe_atr(struct ixgbe_ring *ring,
7546 struct ixgbe_tx_buffer *first)
7547{
7548 struct ixgbe_q_vector *q_vector = ring->q_vector;
7549 union ixgbe_atr_hash_dword input = { .dword = 0 };
7550 union ixgbe_atr_hash_dword common = { .dword = 0 };
7551 union {
7552 unsigned char *network;
7553 struct iphdr *ipv4;
7554 struct ipv6hdr *ipv6;
7555 } hdr;
7556 struct tcphdr *th;
7557 struct sk_buff *skb;
7558#ifdef CONFIG_IXGBE_VXLAN
7559 u8 encap = false;
7560#endif
7561 __be16 vlan_id;
7562
7563
7564 if (!q_vector)
7565 return;
7566
7567
7568 if (!ring->atr_sample_rate)
7569 return;
7570
7571 ring->atr_count++;
7572
7573
7574 skb = first->skb;
7575 hdr.network = skb_network_header(skb);
7576 if (!skb->encapsulation) {
7577 th = tcp_hdr(skb);
7578 } else {
7579#ifdef CONFIG_IXGBE_VXLAN
7580 struct ixgbe_adapter *adapter = q_vector->adapter;
7581
7582 if (!adapter->vxlan_port)
7583 return;
7584 if (first->protocol != htons(ETH_P_IP) ||
7585 hdr.ipv4->version != IPVERSION ||
7586 hdr.ipv4->protocol != IPPROTO_UDP) {
7587 return;
7588 }
7589 if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
7590 return;
7591 encap = true;
7592 hdr.network = skb_inner_network_header(skb);
7593 th = inner_tcp_hdr(skb);
7594#else
7595 return;
7596#endif
7597 }
7598
7599
7600 switch (hdr.ipv4->version) {
7601 case IPVERSION:
7602 if (hdr.ipv4->protocol != IPPROTO_TCP)
7603 return;
7604 break;
7605 case 6:
7606 if (likely((unsigned char *)th - hdr.network ==
7607 sizeof(struct ipv6hdr))) {
7608 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
7609 return;
7610 } else {
7611 __be16 frag_off;
7612 u8 l4_hdr;
7613
7614 ipv6_skip_exthdr(skb, hdr.network - skb->data +
7615 sizeof(struct ipv6hdr),
7616 &l4_hdr, &frag_off);
7617 if (unlikely(frag_off))
7618 return;
7619 if (l4_hdr != IPPROTO_TCP)
7620 return;
7621 }
7622 break;
7623 default:
7624 return;
7625 }
7626
7627
7628 if (!th || th->fin)
7629 return;
7630
7631
7632 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
7633 return;
7634
7635
7636 ring->atr_count = 0;
7637
7638 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
7639
7640
7641
7642
7643
7644
7645
7646
7647 input.formatted.vlan_id = vlan_id;
7648
7649
7650
7651
7652
7653 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
7654 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
7655 else
7656 common.port.src ^= th->dest ^ first->protocol;
7657 common.port.dst ^= th->source;
7658
7659 switch (hdr.ipv4->version) {
7660 case IPVERSION:
7661 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
7662 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
7663 break;
7664 case 6:
7665 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
7666 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
7667 hdr.ipv6->saddr.s6_addr32[1] ^
7668 hdr.ipv6->saddr.s6_addr32[2] ^
7669 hdr.ipv6->saddr.s6_addr32[3] ^
7670 hdr.ipv6->daddr.s6_addr32[0] ^
7671 hdr.ipv6->daddr.s6_addr32[1] ^
7672 hdr.ipv6->daddr.s6_addr32[2] ^
7673 hdr.ipv6->daddr.s6_addr32[3];
7674 break;
7675 default:
7676 break;
7677 }
7678
7679#ifdef CONFIG_IXGBE_VXLAN
7680 if (encap)
7681 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7682#endif
7683
7684
7685 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
7686 input, common, ring->queue_index);
7687}
7688
7689static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
7690 void *accel_priv, select_queue_fallback_t fallback)
7691{
7692 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
7693#ifdef IXGBE_FCOE
7694 struct ixgbe_adapter *adapter;
7695 struct ixgbe_ring_feature *f;
7696 int txq;
7697#endif
7698
7699 if (fwd_adapter)
7700 return skb->queue_mapping + fwd_adapter->tx_base_queue;
7701
7702#ifdef IXGBE_FCOE
7703
7704
7705
7706
7707
7708 switch (vlan_get_protocol(skb)) {
7709 case htons(ETH_P_FCOE):
7710 case htons(ETH_P_FIP):
7711 adapter = netdev_priv(dev);
7712
7713 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7714 break;
7715 default:
7716 return fallback(dev, skb);
7717 }
7718
7719 f = &adapter->ring_feature[RING_F_FCOE];
7720
7721 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
7722 smp_processor_id();
7723
7724 while (txq >= f->indices)
7725 txq -= f->indices;
7726
7727 return txq + f->offset;
7728#else
7729 return fallback(dev, skb);
7730#endif
7731}
7732
7733netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7734 struct ixgbe_adapter *adapter,
7735 struct ixgbe_ring *tx_ring)
7736{
7737 struct ixgbe_tx_buffer *first;
7738 int tso;
7739 u32 tx_flags = 0;
7740 unsigned short f;
7741 u16 count = TXD_USE_COUNT(skb_headlen(skb));
7742 __be16 protocol = skb->protocol;
7743 u8 hdr_len = 0;
7744
7745
7746
7747
7748
7749
7750
7751
7752 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
7753 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
7754
7755 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
7756 tx_ring->tx_stats.tx_busy++;
7757 return NETDEV_TX_BUSY;
7758 }
7759
7760
7761 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
7762 first->skb = skb;
7763 first->bytecount = skb->len;
7764 first->gso_segs = 1;
7765
7766
7767 if (skb_vlan_tag_present(skb)) {
7768 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
7769 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7770
7771 } else if (protocol == htons(ETH_P_8021Q)) {
7772 struct vlan_hdr *vhdr, _vhdr;
7773 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
7774 if (!vhdr)
7775 goto out_drop;
7776
7777 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7778 IXGBE_TX_FLAGS_VLAN_SHIFT;
7779 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7780 }
7781 protocol = vlan_get_protocol(skb);
7782
7783 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7784 adapter->ptp_clock &&
7785 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
7786 &adapter->state)) {
7787 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7788 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
7789
7790
7791 adapter->ptp_tx_skb = skb_get(skb);
7792 adapter->ptp_tx_start = jiffies;
7793 schedule_work(&adapter->ptp_tx_work);
7794 }
7795
7796 skb_tx_timestamp(skb);
7797
7798#ifdef CONFIG_PCI_IOV
7799
7800
7801
7802
7803 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7804 tx_flags |= IXGBE_TX_FLAGS_CC;
7805
7806#endif
7807
7808 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
7809 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
7810 (skb->priority != TC_PRIO_CONTROL))) {
7811 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
7812 tx_flags |= (skb->priority & 0x7) <<
7813 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
7814 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
7815 struct vlan_ethhdr *vhdr;
7816
7817 if (skb_cow_head(skb, 0))
7818 goto out_drop;
7819 vhdr = (struct vlan_ethhdr *)skb->data;
7820 vhdr->h_vlan_TCI = htons(tx_flags >>
7821 IXGBE_TX_FLAGS_VLAN_SHIFT);
7822 } else {
7823 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7824 }
7825 }
7826
7827
7828 first->tx_flags = tx_flags;
7829 first->protocol = protocol;
7830
7831#ifdef IXGBE_FCOE
7832
7833 if ((protocol == htons(ETH_P_FCOE)) &&
7834 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
7835 tso = ixgbe_fso(tx_ring, first, &hdr_len);
7836 if (tso < 0)
7837 goto out_drop;
7838
7839 goto xmit_fcoe;
7840 }
7841
7842#endif
7843 tso = ixgbe_tso(tx_ring, first, &hdr_len);
7844 if (tso < 0)
7845 goto out_drop;
7846 else if (!tso)
7847 ixgbe_tx_csum(tx_ring, first);
7848
7849
7850 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
7851 ixgbe_atr(tx_ring, first);
7852
7853#ifdef IXGBE_FCOE
7854xmit_fcoe:
7855#endif
7856 ixgbe_tx_map(tx_ring, first, hdr_len);
7857
7858 return NETDEV_TX_OK;
7859
7860out_drop:
7861 dev_kfree_skb_any(first->skb);
7862 first->skb = NULL;
7863
7864 return NETDEV_TX_OK;
7865}
7866
7867static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
7868 struct net_device *netdev,
7869 struct ixgbe_ring *ring)
7870{
7871 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7872 struct ixgbe_ring *tx_ring;
7873
7874
7875
7876
7877
7878 if (skb_put_padto(skb, 17))
7879 return NETDEV_TX_OK;
7880
7881 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
7882
7883 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
7884}
7885
7886static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
7887 struct net_device *netdev)
7888{
7889 return __ixgbe_xmit_frame(skb, netdev, NULL);
7890}
7891
7892
7893
7894
7895
7896
7897
7898
7899static int ixgbe_set_mac(struct net_device *netdev, void *p)
7900{
7901 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7902 struct ixgbe_hw *hw = &adapter->hw;
7903 struct sockaddr *addr = p;
7904
7905 if (!is_valid_ether_addr(addr->sa_data))
7906 return -EADDRNOTAVAIL;
7907
7908 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7909 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7910
7911 ixgbe_mac_set_default_filter(adapter);
7912
7913 return 0;
7914}
7915
7916static int
7917ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
7918{
7919 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7920 struct ixgbe_hw *hw = &adapter->hw;
7921 u16 value;
7922 int rc;
7923
7924 if (prtad != hw->phy.mdio.prtad)
7925 return -EINVAL;
7926 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
7927 if (!rc)
7928 rc = value;
7929 return rc;
7930}
7931
7932static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
7933 u16 addr, u16 value)
7934{
7935 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7936 struct ixgbe_hw *hw = &adapter->hw;
7937
7938 if (prtad != hw->phy.mdio.prtad)
7939 return -EINVAL;
7940 return hw->phy.ops.write_reg(hw, addr, devad, value);
7941}
7942
7943static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7944{
7945 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7946
7947 switch (cmd) {
7948 case SIOCSHWTSTAMP:
7949 return ixgbe_ptp_set_ts_config(adapter, req);
7950 case SIOCGHWTSTAMP:
7951 return ixgbe_ptp_get_ts_config(adapter, req);
7952 default:
7953 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7954 }
7955}
7956
7957
7958
7959
7960
7961
7962
7963
7964static int ixgbe_add_sanmac_netdev(struct net_device *dev)
7965{
7966 int err = 0;
7967 struct ixgbe_adapter *adapter = netdev_priv(dev);
7968 struct ixgbe_hw *hw = &adapter->hw;
7969
7970 if (is_valid_ether_addr(hw->mac.san_addr)) {
7971 rtnl_lock();
7972 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
7973 rtnl_unlock();
7974
7975
7976 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
7977 }
7978 return err;
7979}
7980
7981
7982
7983
7984
7985
7986
7987
7988static int ixgbe_del_sanmac_netdev(struct net_device *dev)
7989{
7990 int err = 0;
7991 struct ixgbe_adapter *adapter = netdev_priv(dev);
7992 struct ixgbe_mac_info *mac = &adapter->hw.mac;
7993
7994 if (is_valid_ether_addr(mac->san_addr)) {
7995 rtnl_lock();
7996 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7997 rtnl_unlock();
7998 }
7999 return err;
8000}
8001
8002#ifdef CONFIG_NET_POLL_CONTROLLER
8003
8004
8005
8006
8007
8008static void ixgbe_netpoll(struct net_device *netdev)
8009{
8010 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8011 int i;
8012
8013
8014 if (test_bit(__IXGBE_DOWN, &adapter->state))
8015 return;
8016
8017
8018 for (i = 0; i < adapter->num_q_vectors; i++)
8019 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
8020}
8021
8022#endif
8023static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
8024 struct rtnl_link_stats64 *stats)
8025{
8026 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8027 int i;
8028
8029 rcu_read_lock();
8030 for (i = 0; i < adapter->num_rx_queues; i++) {
8031 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
8032 u64 bytes, packets;
8033 unsigned int start;
8034
8035 if (ring) {
8036 do {
8037 start = u64_stats_fetch_begin_irq(&ring->syncp);
8038 packets = ring->stats.packets;
8039 bytes = ring->stats.bytes;
8040 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8041 stats->rx_packets += packets;
8042 stats->rx_bytes += bytes;
8043 }
8044 }
8045
8046 for (i = 0; i < adapter->num_tx_queues; i++) {
8047 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
8048 u64 bytes, packets;
8049 unsigned int start;
8050
8051 if (ring) {
8052 do {
8053 start = u64_stats_fetch_begin_irq(&ring->syncp);
8054 packets = ring->stats.packets;
8055 bytes = ring->stats.bytes;
8056 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8057 stats->tx_packets += packets;
8058 stats->tx_bytes += bytes;
8059 }
8060 }
8061 rcu_read_unlock();
8062
8063 stats->multicast = netdev->stats.multicast;
8064 stats->rx_errors = netdev->stats.rx_errors;
8065 stats->rx_length_errors = netdev->stats.rx_length_errors;
8066 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8067 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8068 return stats;
8069}
8070
8071#ifdef CONFIG_IXGBE_DCB
8072
8073
8074
8075
8076
8077
8078
8079
8080static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8081{
8082 struct ixgbe_hw *hw = &adapter->hw;
8083 u32 reg, rsave;
8084 int i;
8085
8086
8087
8088
8089 if (hw->mac.type == ixgbe_mac_82598EB)
8090 return;
8091
8092 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8093 rsave = reg;
8094
8095 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8096 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8097
8098
8099 if (up2tc > tc)
8100 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8101 }
8102
8103 if (reg != rsave)
8104 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8105
8106 return;
8107}
8108
8109
8110
8111
8112
8113
8114
8115static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
8116{
8117 struct net_device *dev = adapter->netdev;
8118 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
8119 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
8120 u8 prio;
8121
8122 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
8123 u8 tc = 0;
8124
8125 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
8126 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
8127 else if (ets)
8128 tc = ets->prio_tc[prio];
8129
8130 netdev_set_prio_tc_map(dev, prio, tc);
8131 }
8132}
8133
8134#endif
8135
8136
8137
8138
8139
8140
8141int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8142{
8143 struct ixgbe_adapter *adapter = netdev_priv(dev);
8144 struct ixgbe_hw *hw = &adapter->hw;
8145 bool pools;
8146
8147
8148 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
8149 return -EINVAL;
8150
8151 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
8152 return -EINVAL;
8153
8154 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
8155 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
8156 return -EBUSY;
8157
8158
8159
8160
8161
8162 if (netif_running(dev))
8163 ixgbe_close(dev);
8164 else
8165 ixgbe_reset(adapter);
8166
8167 ixgbe_clear_interrupt_scheme(adapter);
8168
8169#ifdef CONFIG_IXGBE_DCB
8170 if (tc) {
8171 netdev_set_num_tc(dev, tc);
8172 ixgbe_set_prio_tc_map(adapter);
8173
8174 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
8175
8176 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
8177 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
8178 adapter->hw.fc.requested_mode = ixgbe_fc_none;
8179 }
8180 } else {
8181 netdev_reset_tc(dev);
8182
8183 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8184 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
8185
8186 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
8187
8188 adapter->temp_dcb_cfg.pfc_mode_enable = false;
8189 adapter->dcb_cfg.pfc_mode_enable = false;
8190 }
8191
8192 ixgbe_validate_rtr(adapter, tc);
8193
8194#endif
8195 ixgbe_init_interrupt_scheme(adapter);
8196
8197 if (netif_running(dev))
8198 return ixgbe_open(dev);
8199
8200 return 0;
8201}
8202
8203#ifdef CONFIG_PCI_IOV
8204void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
8205{
8206 struct net_device *netdev = adapter->netdev;
8207
8208 rtnl_lock();
8209 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
8210 rtnl_unlock();
8211}
8212
8213#endif
8214void ixgbe_do_reset(struct net_device *netdev)
8215{
8216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8217
8218 if (netif_running(netdev))
8219 ixgbe_reinit_locked(adapter);
8220 else
8221 ixgbe_reset(adapter);
8222}
8223
8224static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
8225 netdev_features_t features)
8226{
8227 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8228
8229
8230 if (!(features & NETIF_F_RXCSUM))
8231 features &= ~NETIF_F_LRO;
8232
8233
8234 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
8235 features &= ~NETIF_F_LRO;
8236
8237 return features;
8238}
8239
8240static int ixgbe_set_features(struct net_device *netdev,
8241 netdev_features_t features)
8242{
8243 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8244 netdev_features_t changed = netdev->features ^ features;
8245 bool need_reset = false;
8246
8247
8248 if (!(features & NETIF_F_LRO)) {
8249 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8250 need_reset = true;
8251 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
8252 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
8253 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
8254 if (adapter->rx_itr_setting == 1 ||
8255 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
8256 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
8257 need_reset = true;
8258 } else if ((changed ^ features) & NETIF_F_LRO) {
8259 e_info(probe, "rx-usecs set too low, "
8260 "disabling RSC\n");
8261 }
8262 }
8263
8264
8265
8266
8267
8268 switch (features & NETIF_F_NTUPLE) {
8269 case NETIF_F_NTUPLE:
8270
8271 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
8272 need_reset = true;
8273
8274 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
8275 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8276 break;
8277 default:
8278
8279 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
8280 need_reset = true;
8281
8282 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8283
8284
8285 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8286 break;
8287
8288
8289 if (netdev_get_num_tc(netdev) > 1)
8290 break;
8291
8292
8293 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
8294 break;
8295
8296
8297 if (!adapter->atr_sample_rate)
8298 break;
8299
8300 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
8301 break;
8302 }
8303
8304 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8305 ixgbe_vlan_strip_enable(adapter);
8306 else
8307 ixgbe_vlan_strip_disable(adapter);
8308
8309 if (changed & NETIF_F_RXALL)
8310 need_reset = true;
8311
8312 netdev->features = features;
8313
8314#ifdef CONFIG_IXGBE_VXLAN
8315 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
8316 if (features & NETIF_F_RXCSUM)
8317 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8318 else
8319 ixgbe_clear_vxlan_port(adapter);
8320 }
8321#endif
8322
8323 if (need_reset)
8324 ixgbe_do_reset(netdev);
8325
8326 return 0;
8327}
8328
8329#ifdef CONFIG_IXGBE_VXLAN
8330
8331
8332
8333
8334
8335
8336static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8337 __be16 port)
8338{
8339 struct ixgbe_adapter *adapter = netdev_priv(dev);
8340 struct ixgbe_hw *hw = &adapter->hw;
8341 u16 new_port = ntohs(port);
8342
8343 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8344 return;
8345
8346 if (sa_family == AF_INET6)
8347 return;
8348
8349 if (adapter->vxlan_port == new_port)
8350 return;
8351
8352 if (adapter->vxlan_port) {
8353 netdev_info(dev,
8354 "Hit Max num of VXLAN ports, not adding port %d\n",
8355 new_port);
8356 return;
8357 }
8358
8359 adapter->vxlan_port = new_port;
8360 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
8361}
8362
8363
8364
8365
8366
8367
8368
8369static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8370 __be16 port)
8371{
8372 struct ixgbe_adapter *adapter = netdev_priv(dev);
8373 u16 new_port = ntohs(port);
8374
8375 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8376 return;
8377
8378 if (sa_family == AF_INET6)
8379 return;
8380
8381 if (adapter->vxlan_port != new_port) {
8382 netdev_info(dev, "Port %d was not found, not deleting\n",
8383 new_port);
8384 return;
8385 }
8386
8387 ixgbe_clear_vxlan_port(adapter);
8388 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8389}
8390#endif
8391
8392static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8393 struct net_device *dev,
8394 const unsigned char *addr, u16 vid,
8395 u16 flags)
8396{
8397
8398 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
8399 struct ixgbe_adapter *adapter = netdev_priv(dev);
8400 u16 pool = VMDQ_P(0);
8401
8402 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
8403 return -ENOMEM;
8404 }
8405
8406 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
8407}
8408
8409
8410
8411
8412
8413
8414
8415
8416static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
8417 __u16 mode)
8418{
8419 struct ixgbe_hw *hw = &adapter->hw;
8420 unsigned int p, num_pools;
8421 u32 vmdctl;
8422
8423 switch (mode) {
8424 case BRIDGE_MODE_VEPA:
8425
8426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
8427
8428
8429
8430
8431
8432 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8433 vmdctl |= IXGBE_VT_CTL_REPLEN;
8434 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8435
8436
8437
8438
8439 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8440 for (p = 0; p < num_pools; p++) {
8441 if (hw->mac.ops.set_source_address_pruning)
8442 hw->mac.ops.set_source_address_pruning(hw,
8443 true,
8444 p);
8445 }
8446 break;
8447 case BRIDGE_MODE_VEB:
8448
8449 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
8450 IXGBE_PFDTXGSWC_VT_LBEN);
8451
8452
8453
8454
8455 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8456 if (!adapter->num_vfs)
8457 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
8458 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8459
8460
8461
8462
8463 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8464 for (p = 0; p < num_pools; p++) {
8465 if (hw->mac.ops.set_source_address_pruning)
8466 hw->mac.ops.set_source_address_pruning(hw,
8467 false,
8468 p);
8469 }
8470 break;
8471 default:
8472 return -EINVAL;
8473 }
8474
8475 adapter->bridge_mode = mode;
8476
8477 e_info(drv, "enabling bridge mode: %s\n",
8478 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8479
8480 return 0;
8481}
8482
8483static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
8484 struct nlmsghdr *nlh, u16 flags)
8485{
8486 struct ixgbe_adapter *adapter = netdev_priv(dev);
8487 struct nlattr *attr, *br_spec;
8488 int rem;
8489
8490 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8491 return -EOPNOTSUPP;
8492
8493 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8494 if (!br_spec)
8495 return -EINVAL;
8496
8497 nla_for_each_nested(attr, br_spec, rem) {
8498 int status;
8499 __u16 mode;
8500
8501 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8502 continue;
8503
8504 if (nla_len(attr) < sizeof(mode))
8505 return -EINVAL;
8506
8507 mode = nla_get_u16(attr);
8508 status = ixgbe_configure_bridge_mode(adapter, mode);
8509 if (status)
8510 return status;
8511
8512 break;
8513 }
8514
8515 return 0;
8516}
8517
8518static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8519 struct net_device *dev,
8520 u32 filter_mask, int nlflags)
8521{
8522 struct ixgbe_adapter *adapter = netdev_priv(dev);
8523
8524 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8525 return 0;
8526
8527 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
8528 adapter->bridge_mode, 0, 0, nlflags,
8529 filter_mask, NULL);
8530}
8531
8532static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
8533{
8534 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
8535 struct ixgbe_adapter *adapter = netdev_priv(pdev);
8536 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
8537 unsigned int limit;
8538 int pool, err;
8539
8540
8541
8542
8543
8544 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
8545 return ERR_PTR(-EINVAL);
8546
8547#ifdef CONFIG_RPS
8548 if (vdev->num_rx_queues != vdev->num_tx_queues) {
8549 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
8550 vdev->name);
8551 return ERR_PTR(-EINVAL);
8552 }
8553#endif
8554
8555 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
8556 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
8557 netdev_info(pdev,
8558 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
8559 pdev->name);
8560 return ERR_PTR(-EINVAL);
8561 }
8562
8563 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8564 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
8565 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
8566 return ERR_PTR(-EBUSY);
8567
8568 fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
8569 if (!fwd_adapter)
8570 return ERR_PTR(-ENOMEM);
8571
8572 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
8573 adapter->num_rx_pools++;
8574 set_bit(pool, &adapter->fwd_bitmask);
8575 limit = find_last_bit(&adapter->fwd_bitmask, 32);
8576
8577
8578 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
8579 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8580 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
8581
8582
8583 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8584 if (err)
8585 goto fwd_add_err;
8586 fwd_adapter->pool = pool;
8587 fwd_adapter->real_adapter = adapter;
8588 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
8589 if (err)
8590 goto fwd_add_err;
8591 netif_tx_start_all_queues(vdev);
8592 return fwd_adapter;
8593fwd_add_err:
8594
8595 netdev_info(pdev,
8596 "%s: dfwd hardware acceleration failed\n", vdev->name);
8597 clear_bit(pool, &adapter->fwd_bitmask);
8598 adapter->num_rx_pools--;
8599 kfree(fwd_adapter);
8600 return ERR_PTR(err);
8601}
8602
8603static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
8604{
8605 struct ixgbe_fwd_adapter *fwd_adapter = priv;
8606 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
8607 unsigned int limit;
8608
8609 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
8610 adapter->num_rx_pools--;
8611
8612 limit = find_last_bit(&adapter->fwd_bitmask, 32);
8613 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8614 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
8615 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8616 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
8617 fwd_adapter->pool, adapter->num_rx_pools,
8618 fwd_adapter->rx_base_queue,
8619 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
8620 adapter->fwd_bitmask);
8621 kfree(fwd_adapter);
8622}
8623
8624#define IXGBE_MAX_TUNNEL_HDR_LEN 80
8625static netdev_features_t
8626ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
8627 netdev_features_t features)
8628{
8629 if (!skb->encapsulation)
8630 return features;
8631
8632 if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
8633 IXGBE_MAX_TUNNEL_HDR_LEN))
8634 return features & ~NETIF_F_CSUM_MASK;
8635
8636 return features;
8637}
8638
8639static const struct net_device_ops ixgbe_netdev_ops = {
8640 .ndo_open = ixgbe_open,
8641 .ndo_stop = ixgbe_close,
8642 .ndo_start_xmit = ixgbe_xmit_frame,
8643 .ndo_select_queue = ixgbe_select_queue,
8644 .ndo_set_rx_mode = ixgbe_set_rx_mode,
8645 .ndo_validate_addr = eth_validate_addr,
8646 .ndo_set_mac_address = ixgbe_set_mac,
8647 .ndo_change_mtu = ixgbe_change_mtu,
8648 .ndo_tx_timeout = ixgbe_tx_timeout,
8649 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
8650 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
8651 .ndo_do_ioctl = ixgbe_ioctl,
8652 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
8653 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
8654 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
8655 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
8656 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
8657 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
8658 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
8659 .ndo_get_stats64 = ixgbe_get_stats64,
8660#ifdef CONFIG_IXGBE_DCB
8661 .ndo_setup_tc = ixgbe_setup_tc,
8662#endif
8663#ifdef CONFIG_NET_POLL_CONTROLLER
8664 .ndo_poll_controller = ixgbe_netpoll,
8665#endif
8666#ifdef CONFIG_NET_RX_BUSY_POLL
8667 .ndo_busy_poll = ixgbe_low_latency_recv,
8668#endif
8669#ifdef IXGBE_FCOE
8670 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
8671 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
8672 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8673 .ndo_fcoe_enable = ixgbe_fcoe_enable,
8674 .ndo_fcoe_disable = ixgbe_fcoe_disable,
8675 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
8676 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
8677#endif
8678 .ndo_set_features = ixgbe_set_features,
8679 .ndo_fix_features = ixgbe_fix_features,
8680 .ndo_fdb_add = ixgbe_ndo_fdb_add,
8681 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
8682 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
8683 .ndo_dfwd_add_station = ixgbe_fwd_add,
8684 .ndo_dfwd_del_station = ixgbe_fwd_del,
8685#ifdef CONFIG_IXGBE_VXLAN
8686 .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
8687 .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
8688#endif
8689 .ndo_features_check = ixgbe_features_check,
8690};
8691
8692
8693
8694
8695
8696
8697
8698
8699
8700
8701static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
8702{
8703 struct pci_dev *entry, *pdev = adapter->pdev;
8704 int physfns = 0;
8705
8706
8707
8708
8709
8710 if (ixgbe_pcie_from_parent(&adapter->hw))
8711 physfns = 4;
8712
8713 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
8714
8715 if (entry->is_virtfn)
8716 continue;
8717
8718
8719
8720
8721
8722
8723
8724 if ((entry->vendor != pdev->vendor) ||
8725 (entry->device != pdev->device))
8726 return -1;
8727
8728 physfns++;
8729 }
8730
8731 return physfns;
8732}
8733
8734
8735
8736
8737
8738
8739
8740
8741
8742
8743
8744int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
8745 u16 subdevice_id)
8746{
8747 struct ixgbe_hw *hw = &adapter->hw;
8748 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
8749 int is_wol_supported = 0;
8750
8751 switch (device_id) {
8752 case IXGBE_DEV_ID_82599_SFP:
8753
8754 switch (subdevice_id) {
8755 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
8756 case IXGBE_SUBDEV_ID_82599_560FLR:
8757
8758 if (hw->bus.func != 0)
8759 break;
8760 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
8761 case IXGBE_SUBDEV_ID_82599_SFP:
8762 case IXGBE_SUBDEV_ID_82599_RNDC:
8763 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
8764 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
8765 is_wol_supported = 1;
8766 break;
8767 }
8768 break;
8769 case IXGBE_DEV_ID_82599EN_SFP:
8770
8771 switch (subdevice_id) {
8772 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
8773 is_wol_supported = 1;
8774 break;
8775 }
8776 break;
8777 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
8778
8779 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
8780 is_wol_supported = 1;
8781 break;
8782 case IXGBE_DEV_ID_82599_KX4:
8783 is_wol_supported = 1;
8784 break;
8785 case IXGBE_DEV_ID_X540T:
8786 case IXGBE_DEV_ID_X540T1:
8787 case IXGBE_DEV_ID_X550T:
8788 case IXGBE_DEV_ID_X550EM_X_KX4:
8789 case IXGBE_DEV_ID_X550EM_X_KR:
8790 case IXGBE_DEV_ID_X550EM_X_10G_T:
8791
8792 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
8793 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
8794 (hw->bus.func == 0))) {
8795 is_wol_supported = 1;
8796 }
8797 break;
8798 }
8799
8800 return is_wol_supported;
8801}
8802
8803
8804
8805
8806
8807static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
8808{
8809#ifdef CONFIG_OF
8810 struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
8811 struct ixgbe_hw *hw = &adapter->hw;
8812 const unsigned char *addr;
8813
8814 addr = of_get_mac_address(dp);
8815 if (addr) {
8816 ether_addr_copy(hw->mac.perm_addr, addr);
8817 return;
8818 }
8819#endif
8820
8821#ifdef CONFIG_SPARC
8822 ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
8823#endif
8824}
8825
8826
8827
8828
8829
8830
8831
8832
8833
8834
8835
8836
8837static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8838{
8839 struct net_device *netdev;
8840 struct ixgbe_adapter *adapter = NULL;
8841 struct ixgbe_hw *hw;
8842 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
8843 int i, err, pci_using_dac, expected_gts;
8844 unsigned int indices = MAX_TX_QUEUES;
8845 u8 part_str[IXGBE_PBANUM_LENGTH];
8846 bool disable_dev = false;
8847#ifdef IXGBE_FCOE
8848 u16 device_caps;
8849#endif
8850 u32 eec;
8851
8852
8853
8854
8855 if (pdev->is_virtfn) {
8856 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
8857 pci_name(pdev), pdev->vendor, pdev->device);
8858 return -EINVAL;
8859 }
8860
8861 err = pci_enable_device_mem(pdev);
8862 if (err)
8863 return err;
8864
8865 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
8866 pci_using_dac = 1;
8867 } else {
8868 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8869 if (err) {
8870 dev_err(&pdev->dev,
8871 "No usable DMA configuration, aborting\n");
8872 goto err_dma;
8873 }
8874 pci_using_dac = 0;
8875 }
8876
8877 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8878 IORESOURCE_MEM), ixgbe_driver_name);
8879 if (err) {
8880 dev_err(&pdev->dev,
8881 "pci_request_selected_regions failed 0x%x\n", err);
8882 goto err_pci_reg;
8883 }
8884
8885 pci_enable_pcie_error_reporting(pdev);
8886
8887 pci_set_master(pdev);
8888 pci_save_state(pdev);
8889
8890 if (ii->mac == ixgbe_mac_82598EB) {
8891#ifdef CONFIG_IXGBE_DCB
8892
8893 indices = 4 * MAX_TRAFFIC_CLASS;
8894#else
8895 indices = IXGBE_MAX_RSS_INDICES;
8896#endif
8897 }
8898
8899 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
8900 if (!netdev) {
8901 err = -ENOMEM;
8902 goto err_alloc_etherdev;
8903 }
8904
8905 SET_NETDEV_DEV(netdev, &pdev->dev);
8906
8907 adapter = netdev_priv(netdev);
8908
8909 adapter->netdev = netdev;
8910 adapter->pdev = pdev;
8911 hw = &adapter->hw;
8912 hw->back = adapter;
8913 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
8914
8915 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8916 pci_resource_len(pdev, 0));
8917 adapter->io_addr = hw->hw_addr;
8918 if (!hw->hw_addr) {
8919 err = -EIO;
8920 goto err_ioremap;
8921 }
8922
8923 netdev->netdev_ops = &ixgbe_netdev_ops;
8924 ixgbe_set_ethtool_ops(netdev);
8925 netdev->watchdog_timeo = 5 * HZ;
8926 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
8927
8928
8929 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
8930 hw->mac.type = ii->mac;
8931 hw->mvals = ii->mvals;
8932
8933
8934 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
8935 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
8936 if (ixgbe_removed(hw->hw_addr)) {
8937 err = -EIO;
8938 goto err_ioremap;
8939 }
8940
8941 if (!(eec & (1 << 8)))
8942 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
8943
8944
8945 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
8946 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
8947
8948 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
8949 hw->phy.mdio.mmds = 0;
8950 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8951 hw->phy.mdio.dev = netdev;
8952 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
8953 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
8954
8955 ii->get_invariants(hw);
8956
8957
8958 err = ixgbe_sw_init(adapter);
8959 if (err)
8960 goto err_sw_init;
8961
8962
8963 switch (adapter->hw.mac.type) {
8964 case ixgbe_mac_82599EB:
8965 case ixgbe_mac_X540:
8966 case ixgbe_mac_X550:
8967 case ixgbe_mac_X550EM_x:
8968 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8969 break;
8970 default:
8971 break;
8972 }
8973
8974
8975
8976
8977
8978 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
8979 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
8980 if (esdp & IXGBE_ESDP_SDP1)
8981 e_crit(probe, "Fan has stopped, replace the adapter\n");
8982 }
8983
8984 if (allow_unsupported_sfp)
8985 hw->allow_unsupported_sfp = allow_unsupported_sfp;
8986
8987
8988 hw->phy.reset_if_overtemp = true;
8989 err = hw->mac.ops.reset_hw(hw);
8990 hw->phy.reset_if_overtemp = false;
8991 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
8992 err = 0;
8993 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
8994 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
8995 e_dev_err("Reload the driver after installing a supported module.\n");
8996 goto err_sw_init;
8997 } else if (err) {
8998 e_dev_err("HW Init failed: %d\n", err);
8999 goto err_sw_init;
9000 }
9001
9002#ifdef CONFIG_PCI_IOV
9003
9004 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9005 goto skip_sriov;
9006
9007 ixgbe_init_mbx_params_pf(hw);
9008 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
9009 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
9010 ixgbe_enable_sriov(adapter);
9011skip_sriov:
9012
9013#endif
9014 netdev->features = NETIF_F_SG |
9015 NETIF_F_IP_CSUM |
9016 NETIF_F_IPV6_CSUM |
9017 NETIF_F_HW_VLAN_CTAG_TX |
9018 NETIF_F_HW_VLAN_CTAG_RX |
9019 NETIF_F_TSO |
9020 NETIF_F_TSO6 |
9021 NETIF_F_RXHASH |
9022 NETIF_F_RXCSUM;
9023
9024 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
9025
9026 switch (adapter->hw.mac.type) {
9027 case ixgbe_mac_82599EB:
9028 case ixgbe_mac_X540:
9029 case ixgbe_mac_X550:
9030 case ixgbe_mac_X550EM_x:
9031 netdev->features |= NETIF_F_SCTP_CRC;
9032 netdev->hw_features |= NETIF_F_SCTP_CRC |
9033 NETIF_F_NTUPLE;
9034 break;
9035 default:
9036 break;
9037 }
9038
9039 netdev->hw_features |= NETIF_F_RXALL;
9040 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
9041
9042 netdev->vlan_features |= NETIF_F_TSO;
9043 netdev->vlan_features |= NETIF_F_TSO6;
9044 netdev->vlan_features |= NETIF_F_IP_CSUM;
9045 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
9046 netdev->vlan_features |= NETIF_F_SG;
9047
9048 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
9049
9050 netdev->priv_flags |= IFF_UNICAST_FLT;
9051 netdev->priv_flags |= IFF_SUPP_NOFCS;
9052
9053#ifdef CONFIG_IXGBE_VXLAN
9054 switch (adapter->hw.mac.type) {
9055 case ixgbe_mac_X550:
9056 case ixgbe_mac_X550EM_x:
9057 netdev->hw_enc_features |= NETIF_F_RXCSUM;
9058 break;
9059 default:
9060 break;
9061 }
9062#endif
9063
9064#ifdef CONFIG_IXGBE_DCB
9065 netdev->dcbnl_ops = &dcbnl_ops;
9066#endif
9067
9068#ifdef IXGBE_FCOE
9069 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
9070 unsigned int fcoe_l;
9071
9072 if (hw->mac.ops.get_device_caps) {
9073 hw->mac.ops.get_device_caps(hw, &device_caps);
9074 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
9075 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
9076 }
9077
9078
9079 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
9080 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
9081
9082 netdev->features |= NETIF_F_FSO |
9083 NETIF_F_FCOE_CRC;
9084
9085 netdev->vlan_features |= NETIF_F_FSO |
9086 NETIF_F_FCOE_CRC |
9087 NETIF_F_FCOE_MTU;
9088 }
9089#endif
9090 if (pci_using_dac) {
9091 netdev->features |= NETIF_F_HIGHDMA;
9092 netdev->vlan_features |= NETIF_F_HIGHDMA;
9093 }
9094
9095 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
9096 netdev->hw_features |= NETIF_F_LRO;
9097 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9098 netdev->features |= NETIF_F_LRO;
9099
9100
9101 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
9102 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9103 err = -EIO;
9104 goto err_sw_init;
9105 }
9106
9107 ixgbe_get_platform_mac_addr(adapter);
9108
9109 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
9110
9111 if (!is_valid_ether_addr(netdev->dev_addr)) {
9112 e_dev_err("invalid MAC address\n");
9113 err = -EIO;
9114 goto err_sw_init;
9115 }
9116
9117 ixgbe_mac_set_default_filter(adapter);
9118
9119 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
9120 (unsigned long) adapter);
9121
9122 if (ixgbe_removed(hw->hw_addr)) {
9123 err = -EIO;
9124 goto err_sw_init;
9125 }
9126 INIT_WORK(&adapter->service_task, ixgbe_service_task);
9127 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
9128 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
9129
9130 err = ixgbe_init_interrupt_scheme(adapter);
9131 if (err)
9132 goto err_sw_init;
9133
9134
9135 adapter->wol = 0;
9136 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
9137 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
9138 pdev->subsystem_device);
9139 if (hw->wol_enabled)
9140 adapter->wol = IXGBE_WUFC_MAG;
9141
9142 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9143
9144
9145 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
9146 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
9147
9148
9149 if (ixgbe_pcie_from_parent(hw))
9150 ixgbe_get_parent_bus_info(adapter);
9151 else
9152 hw->mac.ops.get_bus_info(hw);
9153
9154
9155
9156
9157
9158
9159 switch (hw->mac.type) {
9160 case ixgbe_mac_82598EB:
9161 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
9162 break;
9163 default:
9164 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
9165 break;
9166 }
9167
9168
9169 if (expected_gts > 0)
9170 ixgbe_check_minimum_link(adapter, expected_gts);
9171
9172 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
9173 if (err)
9174 strlcpy(part_str, "Unknown", sizeof(part_str));
9175 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
9176 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
9177 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
9178 part_str);
9179 else
9180 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
9181 hw->mac.type, hw->phy.type, part_str);
9182
9183 e_dev_info("%pM\n", netdev->dev_addr);
9184
9185
9186 err = hw->mac.ops.start_hw(hw);
9187 if (err == IXGBE_ERR_EEPROM_VERSION) {
9188
9189 e_dev_warn("This device is a pre-production adapter/LOM. "
9190 "Please be aware there may be issues associated "
9191 "with your hardware. If you are experiencing "
9192 "problems please contact your Intel or hardware "
9193 "representative who provided you with this "
9194 "hardware.\n");
9195 }
9196 strcpy(netdev->name, "eth%d");
9197 err = register_netdev(netdev);
9198 if (err)
9199 goto err_register;
9200
9201 pci_set_drvdata(pdev, adapter);
9202
9203
9204 if (hw->mac.ops.disable_tx_laser)
9205 hw->mac.ops.disable_tx_laser(hw);
9206
9207
9208 netif_carrier_off(netdev);
9209
9210#ifdef CONFIG_IXGBE_DCA
9211 if (dca_add_requester(&pdev->dev) == 0) {
9212 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
9213 ixgbe_setup_dca(adapter);
9214 }
9215#endif
9216 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
9217 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
9218 for (i = 0; i < adapter->num_vfs; i++)
9219 ixgbe_vf_configuration(pdev, (i | 0x10000000));
9220 }
9221
9222
9223
9224
9225 if (hw->mac.ops.set_fw_drv_ver)
9226 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
9227 0xFF);
9228
9229
9230 ixgbe_add_sanmac_netdev(netdev);
9231
9232 e_dev_info("%s\n", ixgbe_default_device_descr);
9233
9234#ifdef CONFIG_IXGBE_HWMON
9235 if (ixgbe_sysfs_init(adapter))
9236 e_err(probe, "failed to allocate sysfs resources\n");
9237#endif
9238
9239 ixgbe_dbg_adapter_init(adapter);
9240
9241
9242 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
9243 hw->mac.ops.setup_link(hw,
9244 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
9245 true);
9246
9247 return 0;
9248
9249err_register:
9250 ixgbe_release_hw_control(adapter);
9251 ixgbe_clear_interrupt_scheme(adapter);
9252err_sw_init:
9253 ixgbe_disable_sriov(adapter);
9254 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
9255 iounmap(adapter->io_addr);
9256 kfree(adapter->mac_table);
9257err_ioremap:
9258 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9259 free_netdev(netdev);
9260err_alloc_etherdev:
9261 pci_release_selected_regions(pdev,
9262 pci_select_bars(pdev, IORESOURCE_MEM));
9263err_pci_reg:
9264err_dma:
9265 if (!adapter || disable_dev)
9266 pci_disable_device(pdev);
9267 return err;
9268}
9269
9270
9271
9272
9273
9274
9275
9276
9277
9278
9279static void ixgbe_remove(struct pci_dev *pdev)
9280{
9281 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9282 struct net_device *netdev;
9283 bool disable_dev;
9284
9285
9286 if (!adapter)
9287 return;
9288
9289 netdev = adapter->netdev;
9290 ixgbe_dbg_adapter_exit(adapter);
9291
9292 set_bit(__IXGBE_REMOVING, &adapter->state);
9293 cancel_work_sync(&adapter->service_task);
9294
9295
9296#ifdef CONFIG_IXGBE_DCA
9297 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
9298 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
9299 dca_remove_requester(&pdev->dev);
9300 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
9301 IXGBE_DCA_CTRL_DCA_DISABLE);
9302 }
9303
9304#endif
9305#ifdef CONFIG_IXGBE_HWMON
9306 ixgbe_sysfs_exit(adapter);
9307#endif
9308
9309
9310 ixgbe_del_sanmac_netdev(netdev);
9311
9312#ifdef CONFIG_PCI_IOV
9313 ixgbe_disable_sriov(adapter);
9314#endif
9315 if (netdev->reg_state == NETREG_REGISTERED)
9316 unregister_netdev(netdev);
9317
9318 ixgbe_clear_interrupt_scheme(adapter);
9319
9320 ixgbe_release_hw_control(adapter);
9321
9322#ifdef CONFIG_DCB
9323 kfree(adapter->ixgbe_ieee_pfc);
9324 kfree(adapter->ixgbe_ieee_ets);
9325
9326#endif
9327 iounmap(adapter->io_addr);
9328 pci_release_selected_regions(pdev, pci_select_bars(pdev,
9329 IORESOURCE_MEM));
9330
9331 e_dev_info("complete\n");
9332
9333 kfree(adapter->mac_table);
9334 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9335 free_netdev(netdev);
9336
9337 pci_disable_pcie_error_reporting(pdev);
9338
9339 if (disable_dev)
9340 pci_disable_device(pdev);
9341}
9342
9343
9344
9345
9346
9347
9348
9349
9350
9351static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
9352 pci_channel_state_t state)
9353{
9354 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9355 struct net_device *netdev = adapter->netdev;
9356
9357#ifdef CONFIG_PCI_IOV
9358 struct ixgbe_hw *hw = &adapter->hw;
9359 struct pci_dev *bdev, *vfdev;
9360 u32 dw0, dw1, dw2, dw3;
9361 int vf, pos;
9362 u16 req_id, pf_func;
9363
9364 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
9365 adapter->num_vfs == 0)
9366 goto skip_bad_vf_detection;
9367
9368 bdev = pdev->bus->self;
9369 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
9370 bdev = bdev->bus->self;
9371
9372 if (!bdev)
9373 goto skip_bad_vf_detection;
9374
9375 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
9376 if (!pos)
9377 goto skip_bad_vf_detection;
9378
9379 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
9380 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
9381 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
9382 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
9383 if (ixgbe_removed(hw->hw_addr))
9384 goto skip_bad_vf_detection;
9385
9386 req_id = dw1 >> 16;
9387
9388 if (!(req_id & 0x0080))
9389 goto skip_bad_vf_detection;
9390
9391 pf_func = req_id & 0x01;
9392 if ((pf_func & 1) == (pdev->devfn & 1)) {
9393 unsigned int device_id;
9394
9395 vf = (req_id & 0x7F) >> 1;
9396 e_dev_err("VF %d has caused a PCIe error\n", vf);
9397 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
9398 "%8.8x\tdw3: %8.8x\n",
9399 dw0, dw1, dw2, dw3);
9400 switch (adapter->hw.mac.type) {
9401 case ixgbe_mac_82599EB:
9402 device_id = IXGBE_82599_VF_DEVICE_ID;
9403 break;
9404 case ixgbe_mac_X540:
9405 device_id = IXGBE_X540_VF_DEVICE_ID;
9406 break;
9407 case ixgbe_mac_X550:
9408 device_id = IXGBE_DEV_ID_X550_VF;
9409 break;
9410 case ixgbe_mac_X550EM_x:
9411 device_id = IXGBE_DEV_ID_X550EM_X_VF;
9412 break;
9413 default:
9414 device_id = 0;
9415 break;
9416 }
9417
9418
9419 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
9420 while (vfdev) {
9421 if (vfdev->devfn == (req_id & 0xFF))
9422 break;
9423 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
9424 device_id, vfdev);
9425 }
9426
9427
9428
9429
9430
9431 if (vfdev) {
9432 ixgbe_issue_vf_flr(adapter, vfdev);
9433
9434 pci_dev_put(vfdev);
9435 }
9436
9437 pci_cleanup_aer_uncorrect_error_status(pdev);
9438 }
9439
9440
9441
9442
9443
9444
9445
9446 adapter->vferr_refcount++;
9447
9448 return PCI_ERS_RESULT_RECOVERED;
9449
9450skip_bad_vf_detection:
9451#endif
9452 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
9453 return PCI_ERS_RESULT_DISCONNECT;
9454
9455 rtnl_lock();
9456 netif_device_detach(netdev);
9457
9458 if (state == pci_channel_io_perm_failure) {
9459 rtnl_unlock();
9460 return PCI_ERS_RESULT_DISCONNECT;
9461 }
9462
9463 if (netif_running(netdev))
9464 ixgbe_down(adapter);
9465
9466 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
9467 pci_disable_device(pdev);
9468 rtnl_unlock();
9469
9470
9471 return PCI_ERS_RESULT_NEED_RESET;
9472}
9473
9474
9475
9476
9477
9478
9479
9480static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
9481{
9482 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9483 pci_ers_result_t result;
9484 int err;
9485
9486 if (pci_enable_device_mem(pdev)) {
9487 e_err(probe, "Cannot re-enable PCI device after reset.\n");
9488 result = PCI_ERS_RESULT_DISCONNECT;
9489 } else {
9490 smp_mb__before_atomic();
9491 clear_bit(__IXGBE_DISABLED, &adapter->state);
9492 adapter->hw.hw_addr = adapter->io_addr;
9493 pci_set_master(pdev);
9494 pci_restore_state(pdev);
9495 pci_save_state(pdev);
9496
9497 pci_wake_from_d3(pdev, false);
9498
9499 ixgbe_reset(adapter);
9500 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
9501 result = PCI_ERS_RESULT_RECOVERED;
9502 }
9503
9504 err = pci_cleanup_aer_uncorrect_error_status(pdev);
9505 if (err) {
9506 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
9507 "failed 0x%0x\n", err);
9508
9509 }
9510
9511 return result;
9512}
9513
9514
9515
9516
9517
9518
9519
9520
9521static void ixgbe_io_resume(struct pci_dev *pdev)
9522{
9523 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9524 struct net_device *netdev = adapter->netdev;
9525
9526#ifdef CONFIG_PCI_IOV
9527 if (adapter->vferr_refcount) {
9528 e_info(drv, "Resuming after VF err\n");
9529 adapter->vferr_refcount--;
9530 return;
9531 }
9532
9533#endif
9534 if (netif_running(netdev))
9535 ixgbe_up(adapter);
9536
9537 netif_device_attach(netdev);
9538}
9539
9540static const struct pci_error_handlers ixgbe_err_handler = {
9541 .error_detected = ixgbe_io_error_detected,
9542 .slot_reset = ixgbe_io_slot_reset,
9543 .resume = ixgbe_io_resume,
9544};
9545
9546static struct pci_driver ixgbe_driver = {
9547 .name = ixgbe_driver_name,
9548 .id_table = ixgbe_pci_tbl,
9549 .probe = ixgbe_probe,
9550 .remove = ixgbe_remove,
9551#ifdef CONFIG_PM
9552 .suspend = ixgbe_suspend,
9553 .resume = ixgbe_resume,
9554#endif
9555 .shutdown = ixgbe_shutdown,
9556 .sriov_configure = ixgbe_pci_sriov_configure,
9557 .err_handler = &ixgbe_err_handler
9558};
9559
9560
9561
9562
9563
9564
9565
9566static int __init ixgbe_init_module(void)
9567{
9568 int ret;
9569 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
9570 pr_info("%s\n", ixgbe_copyright);
9571
9572 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
9573 if (!ixgbe_wq) {
9574 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
9575 return -ENOMEM;
9576 }
9577
9578 ixgbe_dbg_init();
9579
9580 ret = pci_register_driver(&ixgbe_driver);
9581 if (ret) {
9582 ixgbe_dbg_exit();
9583 return ret;
9584 }
9585
9586#ifdef CONFIG_IXGBE_DCA
9587 dca_register_notify(&dca_notifier);
9588#endif
9589
9590 return 0;
9591}
9592
9593module_init(ixgbe_init_module);
9594
9595
9596
9597
9598
9599
9600
9601static void __exit ixgbe_exit_module(void)
9602{
9603#ifdef CONFIG_IXGBE_DCA
9604 dca_unregister_notify(&dca_notifier);
9605#endif
9606 pci_unregister_driver(&ixgbe_driver);
9607
9608 ixgbe_dbg_exit();
9609 if (ixgbe_wq) {
9610 destroy_workqueue(ixgbe_wq);
9611 ixgbe_wq = NULL;
9612 }
9613}
9614
9615#ifdef CONFIG_IXGBE_DCA
9616static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
9617 void *p)
9618{
9619 int ret_val;
9620
9621 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
9622 __ixgbe_notify_dca);
9623
9624 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
9625}
9626
9627#endif
9628
9629module_exit(ixgbe_exit_module);
9630
9631
9632