1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/interrupt.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/sctp.h>
40#include <linux/pkt_sched.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
46#include <linux/ethtool.h>
47#include <linux/if.h>
48#include <linux/if_vlan.h>
49#include <linux/if_macvlan.h>
50#include <linux/if_bridge.h>
51#include <linux/prefetch.h>
52#include <linux/bpf.h>
53#include <linux/bpf_trace.h>
54#include <linux/atomic.h>
55#include <scsi/fc/fc_fcoe.h>
56#include <net/udp_tunnel.h>
57#include <net/pkt_cls.h>
58#include <net/tc_act/tc_gact.h>
59#include <net/tc_act/tc_mirred.h>
60#include <net/vxlan.h>
61#include <net/mpls.h>
62
63#include "ixgbe.h"
64#include "ixgbe_common.h"
65#include "ixgbe_dcb_82599.h"
66#include "ixgbe_sriov.h"
67#include "ixgbe_model.h"
68
69char ixgbe_driver_name[] = "ixgbe";
70static const char ixgbe_driver_string[] =
71 "Intel(R) 10 Gigabit PCI Express Network Driver";
72#ifdef IXGBE_FCOE
73char ixgbe_default_device_descr[] =
74 "Intel(R) 10 Gigabit Network Connection";
75#else
76static char ixgbe_default_device_descr[] =
77 "Intel(R) 10 Gigabit Network Connection";
78#endif
79#define DRV_VERSION "5.1.0-k"
80const char ixgbe_driver_version[] = DRV_VERSION;
81static const char ixgbe_copyright[] =
82 "Copyright (c) 1999-2016 Intel Corporation.";
83
84static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
85
86static const struct ixgbe_info *ixgbe_info_tbl[] = {
87 [board_82598] = &ixgbe_82598_info,
88 [board_82599] = &ixgbe_82599_info,
89 [board_X540] = &ixgbe_X540_info,
90 [board_X550] = &ixgbe_X550_info,
91 [board_X550EM_x] = &ixgbe_X550EM_x_info,
92 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
93 [board_x550em_a] = &ixgbe_x550em_a_info,
94 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
95};
96
97
98
99
100
101
102
103
104
105static const struct pci_device_id ixgbe_pci_tbl[] = {
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
140 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
141 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
142 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
143 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
144 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
145 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
146 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
147 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
148 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
149 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
150 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
151 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
152 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
153
154 {0, }
155};
156MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
157
158#ifdef CONFIG_IXGBE_DCA
159static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
160 void *p);
161static struct notifier_block dca_notifier = {
162 .notifier_call = ixgbe_notify_dca,
163 .next = NULL,
164 .priority = 0
165};
166#endif
167
168#ifdef CONFIG_PCI_IOV
169static unsigned int max_vfs;
170module_param(max_vfs, uint, 0);
171MODULE_PARM_DESC(max_vfs,
172 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
173#endif
174
175static unsigned int allow_unsupported_sfp;
176module_param(allow_unsupported_sfp, uint, 0);
177MODULE_PARM_DESC(allow_unsupported_sfp,
178 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
179
180#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
181static int debug = -1;
182module_param(debug, int, 0);
183MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
184
185MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
186MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
187MODULE_LICENSE("GPL");
188MODULE_VERSION(DRV_VERSION);
189
190static struct workqueue_struct *ixgbe_wq;
191
192static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
193static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
194
195static const struct net_device_ops ixgbe_netdev_ops;
196
197static bool netif_is_ixgbe(struct net_device *dev)
198{
199 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
200}
201
202static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
203 u32 reg, u16 *value)
204{
205 struct pci_dev *parent_dev;
206 struct pci_bus *parent_bus;
207
208 parent_bus = adapter->pdev->bus->parent;
209 if (!parent_bus)
210 return -1;
211
212 parent_dev = parent_bus->self;
213 if (!parent_dev)
214 return -1;
215
216 if (!pci_is_pcie(parent_dev))
217 return -1;
218
219 pcie_capability_read_word(parent_dev, reg, value);
220 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
221 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
222 return -1;
223 return 0;
224}
225
226static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
227{
228 struct ixgbe_hw *hw = &adapter->hw;
229 u16 link_status = 0;
230 int err;
231
232 hw->bus.type = ixgbe_bus_type_pci_express;
233
234
235
236
237 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
238
239
240 if (err)
241 return err;
242
243 hw->bus.width = ixgbe_convert_bus_width(link_status);
244 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
245
246 return 0;
247}
248
249
250
251
252
253
254
255
256
257
258static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
259{
260 switch (hw->device_id) {
261 case IXGBE_DEV_ID_82599_SFP_SF_QP:
262 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
263 return true;
264 default:
265 return false;
266 }
267}
268
269static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
270 int expected_gts)
271{
272 struct ixgbe_hw *hw = &adapter->hw;
273 int max_gts = 0;
274 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
275 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
276 struct pci_dev *pdev;
277
278
279
280
281
282 if (hw->bus.type == ixgbe_bus_type_internal)
283 return;
284
285
286 if (ixgbe_pcie_from_parent(&adapter->hw))
287 pdev = adapter->pdev->bus->parent->self;
288 else
289 pdev = adapter->pdev;
290
291 if (pcie_get_minimum_link(pdev, &speed, &width) ||
292 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
293 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
294 return;
295 }
296
297 switch (speed) {
298 case PCIE_SPEED_2_5GT:
299
300 max_gts = 2 * width;
301 break;
302 case PCIE_SPEED_5_0GT:
303
304 max_gts = 4 * width;
305 break;
306 case PCIE_SPEED_8_0GT:
307
308 max_gts = 8 * width;
309 break;
310 default:
311 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
312 return;
313 }
314
315 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
316 max_gts);
317 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
318 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
319 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
320 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
321 "Unknown"),
322 width,
323 (speed == PCIE_SPEED_2_5GT ? "20%" :
324 speed == PCIE_SPEED_5_0GT ? "20%" :
325 speed == PCIE_SPEED_8_0GT ? "<2%" :
326 "Unknown"));
327
328 if (max_gts < expected_gts) {
329 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
330 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
331 expected_gts);
332 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
333 }
334}
335
336static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
337{
338 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
339 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
340 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
341 queue_work(ixgbe_wq, &adapter->service_task);
342}
343
344static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
345{
346 struct ixgbe_adapter *adapter = hw->back;
347
348 if (!hw->hw_addr)
349 return;
350 hw->hw_addr = NULL;
351 e_dev_err("Adapter removed\n");
352 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
353 ixgbe_service_event_schedule(adapter);
354}
355
356static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
357{
358 u8 __iomem *reg_addr;
359 u32 value;
360 int i;
361
362 reg_addr = READ_ONCE(hw->hw_addr);
363 if (ixgbe_removed(reg_addr))
364 return IXGBE_FAILED_READ_REG;
365
366
367
368
369
370 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
371 value = readl(reg_addr + IXGBE_STATUS);
372 if (value != IXGBE_FAILED_READ_REG)
373 break;
374 mdelay(3);
375 }
376
377 if (value == IXGBE_FAILED_READ_REG)
378 ixgbe_remove_adapter(hw);
379 else
380 value = readl(reg_addr + reg);
381 return value;
382}
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
398{
399 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
400 u32 value;
401
402 if (ixgbe_removed(reg_addr))
403 return IXGBE_FAILED_READ_REG;
404 if (unlikely(hw->phy.nw_mng_if_sel &
405 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
406 struct ixgbe_adapter *adapter;
407 int i;
408
409 for (i = 0; i < 200; ++i) {
410 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
411 if (likely(!value))
412 goto writes_completed;
413 if (value == IXGBE_FAILED_READ_REG) {
414 ixgbe_remove_adapter(hw);
415 return IXGBE_FAILED_READ_REG;
416 }
417 udelay(5);
418 }
419
420 adapter = hw->back;
421 e_warn(hw, "register writes incomplete %08x\n", value);
422 }
423
424writes_completed:
425 value = readl(reg_addr + reg);
426 if (unlikely(value == IXGBE_FAILED_READ_REG))
427 value = ixgbe_check_remove(hw, reg);
428 return value;
429}
430
431static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
432{
433 u16 value;
434
435 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
436 if (value == IXGBE_FAILED_READ_CFG_WORD) {
437 ixgbe_remove_adapter(hw);
438 return true;
439 }
440 return false;
441}
442
443u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
444{
445 struct ixgbe_adapter *adapter = hw->back;
446 u16 value;
447
448 if (ixgbe_removed(hw->hw_addr))
449 return IXGBE_FAILED_READ_CFG_WORD;
450 pci_read_config_word(adapter->pdev, reg, &value);
451 if (value == IXGBE_FAILED_READ_CFG_WORD &&
452 ixgbe_check_cfg_remove(hw, adapter->pdev))
453 return IXGBE_FAILED_READ_CFG_WORD;
454 return value;
455}
456
457#ifdef CONFIG_PCI_IOV
458static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
459{
460 struct ixgbe_adapter *adapter = hw->back;
461 u32 value;
462
463 if (ixgbe_removed(hw->hw_addr))
464 return IXGBE_FAILED_READ_CFG_DWORD;
465 pci_read_config_dword(adapter->pdev, reg, &value);
466 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
467 ixgbe_check_cfg_remove(hw, adapter->pdev))
468 return IXGBE_FAILED_READ_CFG_DWORD;
469 return value;
470}
471#endif
472
473void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
474{
475 struct ixgbe_adapter *adapter = hw->back;
476
477 if (ixgbe_removed(hw->hw_addr))
478 return;
479 pci_write_config_word(adapter->pdev, reg, value);
480}
481
482static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
483{
484 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
485
486
487 smp_mb__before_atomic();
488 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
489}
490
491struct ixgbe_reg_info {
492 u32 ofs;
493 char *name;
494};
495
496static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
497
498
499 {IXGBE_CTRL, "CTRL"},
500 {IXGBE_STATUS, "STATUS"},
501 {IXGBE_CTRL_EXT, "CTRL_EXT"},
502
503
504 {IXGBE_EICR, "EICR"},
505
506
507 {IXGBE_SRRCTL(0), "SRRCTL"},
508 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
509 {IXGBE_RDLEN(0), "RDLEN"},
510 {IXGBE_RDH(0), "RDH"},
511 {IXGBE_RDT(0), "RDT"},
512 {IXGBE_RXDCTL(0), "RXDCTL"},
513 {IXGBE_RDBAL(0), "RDBAL"},
514 {IXGBE_RDBAH(0), "RDBAH"},
515
516
517 {IXGBE_TDBAL(0), "TDBAL"},
518 {IXGBE_TDBAH(0), "TDBAH"},
519 {IXGBE_TDLEN(0), "TDLEN"},
520 {IXGBE_TDH(0), "TDH"},
521 {IXGBE_TDT(0), "TDT"},
522 {IXGBE_TXDCTL(0), "TXDCTL"},
523
524
525 { .name = NULL }
526};
527
528
529
530
531
532static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
533{
534 int i;
535 char rname[16];
536 u32 regs[64];
537
538 switch (reginfo->ofs) {
539 case IXGBE_SRRCTL(0):
540 for (i = 0; i < 64; i++)
541 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
542 break;
543 case IXGBE_DCA_RXCTRL(0):
544 for (i = 0; i < 64; i++)
545 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
546 break;
547 case IXGBE_RDLEN(0):
548 for (i = 0; i < 64; i++)
549 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
550 break;
551 case IXGBE_RDH(0):
552 for (i = 0; i < 64; i++)
553 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
554 break;
555 case IXGBE_RDT(0):
556 for (i = 0; i < 64; i++)
557 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
558 break;
559 case IXGBE_RXDCTL(0):
560 for (i = 0; i < 64; i++)
561 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
562 break;
563 case IXGBE_RDBAL(0):
564 for (i = 0; i < 64; i++)
565 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
566 break;
567 case IXGBE_RDBAH(0):
568 for (i = 0; i < 64; i++)
569 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
570 break;
571 case IXGBE_TDBAL(0):
572 for (i = 0; i < 64; i++)
573 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
574 break;
575 case IXGBE_TDBAH(0):
576 for (i = 0; i < 64; i++)
577 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
578 break;
579 case IXGBE_TDLEN(0):
580 for (i = 0; i < 64; i++)
581 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
582 break;
583 case IXGBE_TDH(0):
584 for (i = 0; i < 64; i++)
585 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
586 break;
587 case IXGBE_TDT(0):
588 for (i = 0; i < 64; i++)
589 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
590 break;
591 case IXGBE_TXDCTL(0):
592 for (i = 0; i < 64; i++)
593 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
594 break;
595 default:
596 pr_info("%-15s %08x\n",
597 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
598 return;
599 }
600
601 i = 0;
602 while (i < 64) {
603 int j;
604 char buf[9 * 8 + 1];
605 char *p = buf;
606
607 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
608 for (j = 0; j < 8; j++)
609 p += sprintf(p, " %08x", regs[i++]);
610 pr_err("%-15s%s\n", rname, buf);
611 }
612
613}
614
615static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
616{
617 struct ixgbe_tx_buffer *tx_buffer;
618
619 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
620 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
621 n, ring->next_to_use, ring->next_to_clean,
622 (u64)dma_unmap_addr(tx_buffer, dma),
623 dma_unmap_len(tx_buffer, len),
624 tx_buffer->next_to_watch,
625 (u64)tx_buffer->time_stamp);
626}
627
628
629
630
631static void ixgbe_dump(struct ixgbe_adapter *adapter)
632{
633 struct net_device *netdev = adapter->netdev;
634 struct ixgbe_hw *hw = &adapter->hw;
635 struct ixgbe_reg_info *reginfo;
636 int n = 0;
637 struct ixgbe_ring *ring;
638 struct ixgbe_tx_buffer *tx_buffer;
639 union ixgbe_adv_tx_desc *tx_desc;
640 struct my_u0 { u64 a; u64 b; } *u0;
641 struct ixgbe_ring *rx_ring;
642 union ixgbe_adv_rx_desc *rx_desc;
643 struct ixgbe_rx_buffer *rx_buffer_info;
644 int i = 0;
645
646 if (!netif_msg_hw(adapter))
647 return;
648
649
650 if (netdev) {
651 dev_info(&adapter->pdev->dev, "Net device Info\n");
652 pr_info("Device Name state "
653 "trans_start\n");
654 pr_info("%-15s %016lX %016lX\n",
655 netdev->name,
656 netdev->state,
657 dev_trans_start(netdev));
658 }
659
660
661 dev_info(&adapter->pdev->dev, "Register Dump\n");
662 pr_info(" Register Name Value\n");
663 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
664 reginfo->name; reginfo++) {
665 ixgbe_regdump(hw, reginfo);
666 }
667
668
669 if (!netdev || !netif_running(netdev))
670 return;
671
672 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
673 pr_info(" %s %s %s %s\n",
674 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
675 "leng", "ntw", "timestamp");
676 for (n = 0; n < adapter->num_tx_queues; n++) {
677 ring = adapter->tx_ring[n];
678 ixgbe_print_buffer(ring, n);
679 }
680
681 for (n = 0; n < adapter->num_xdp_queues; n++) {
682 ring = adapter->xdp_ring[n];
683 ixgbe_print_buffer(ring, n);
684 }
685
686
687 if (!netif_msg_tx_done(adapter))
688 goto rx_ring_summary;
689
690 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727 for (n = 0; n < adapter->num_tx_queues; n++) {
728 ring = adapter->tx_ring[n];
729 pr_info("------------------------------------\n");
730 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
731 pr_info("------------------------------------\n");
732 pr_info("%s%s %s %s %s %s\n",
733 "T [desc] [address 63:0 ] ",
734 "[PlPOIdStDDt Ln] [bi->dma ] ",
735 "leng", "ntw", "timestamp", "bi->skb");
736
737 for (i = 0; ring->desc && (i < ring->count); i++) {
738 tx_desc = IXGBE_TX_DESC(ring, i);
739 tx_buffer = &ring->tx_buffer_info[i];
740 u0 = (struct my_u0 *)tx_desc;
741 if (dma_unmap_len(tx_buffer, len) > 0) {
742 const char *ring_desc;
743
744 if (i == ring->next_to_use &&
745 i == ring->next_to_clean)
746 ring_desc = " NTC/U";
747 else if (i == ring->next_to_use)
748 ring_desc = " NTU";
749 else if (i == ring->next_to_clean)
750 ring_desc = " NTC";
751 else
752 ring_desc = "";
753 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
754 i,
755 le64_to_cpu(u0->a),
756 le64_to_cpu(u0->b),
757 (u64)dma_unmap_addr(tx_buffer, dma),
758 dma_unmap_len(tx_buffer, len),
759 tx_buffer->next_to_watch,
760 (u64)tx_buffer->time_stamp,
761 tx_buffer->skb,
762 ring_desc);
763
764 if (netif_msg_pktdata(adapter) &&
765 tx_buffer->skb)
766 print_hex_dump(KERN_INFO, "",
767 DUMP_PREFIX_ADDRESS, 16, 1,
768 tx_buffer->skb->data,
769 dma_unmap_len(tx_buffer, len),
770 true);
771 }
772 }
773 }
774
775
776rx_ring_summary:
777 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
778 pr_info("Queue [NTU] [NTC]\n");
779 for (n = 0; n < adapter->num_rx_queues; n++) {
780 rx_ring = adapter->rx_ring[n];
781 pr_info("%5d %5X %5X\n",
782 n, rx_ring->next_to_use, rx_ring->next_to_clean);
783 }
784
785
786 if (!netif_msg_rx_status(adapter))
787 return;
788
789 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836 for (n = 0; n < adapter->num_rx_queues; n++) {
837 rx_ring = adapter->rx_ring[n];
838 pr_info("------------------------------------\n");
839 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
840 pr_info("------------------------------------\n");
841 pr_info("%s%s%s\n",
842 "R [desc] [ PktBuf A0] ",
843 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
844 "<-- Adv Rx Read format");
845 pr_info("%s%s%s\n",
846 "RWB[desc] [PcsmIpSHl PtRs] ",
847 "[vl er S cks ln] ---------------- [bi->skb ] ",
848 "<-- Adv Rx Write-Back format");
849
850 for (i = 0; i < rx_ring->count; i++) {
851 const char *ring_desc;
852
853 if (i == rx_ring->next_to_use)
854 ring_desc = " NTU";
855 else if (i == rx_ring->next_to_clean)
856 ring_desc = " NTC";
857 else
858 ring_desc = "";
859
860 rx_buffer_info = &rx_ring->rx_buffer_info[i];
861 rx_desc = IXGBE_RX_DESC(rx_ring, i);
862 u0 = (struct my_u0 *)rx_desc;
863 if (rx_desc->wb.upper.length) {
864
865 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
866 i,
867 le64_to_cpu(u0->a),
868 le64_to_cpu(u0->b),
869 rx_buffer_info->skb,
870 ring_desc);
871 } else {
872 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
873 i,
874 le64_to_cpu(u0->a),
875 le64_to_cpu(u0->b),
876 (u64)rx_buffer_info->dma,
877 rx_buffer_info->skb,
878 ring_desc);
879
880 if (netif_msg_pktdata(adapter) &&
881 rx_buffer_info->dma) {
882 print_hex_dump(KERN_INFO, "",
883 DUMP_PREFIX_ADDRESS, 16, 1,
884 page_address(rx_buffer_info->page) +
885 rx_buffer_info->page_offset,
886 ixgbe_rx_bufsz(rx_ring), true);
887 }
888 }
889 }
890 }
891}
892
893static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
894{
895 u32 ctrl_ext;
896
897
898 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
899 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
900 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
901}
902
903static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
904{
905 u32 ctrl_ext;
906
907
908 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
910 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
911}
912
913
914
915
916
917
918
919
920
921static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
922 u8 queue, u8 msix_vector)
923{
924 u32 ivar, index;
925 struct ixgbe_hw *hw = &adapter->hw;
926 switch (hw->mac.type) {
927 case ixgbe_mac_82598EB:
928 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
929 if (direction == -1)
930 direction = 0;
931 index = (((direction * 64) + queue) >> 2) & 0x1F;
932 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
933 ivar &= ~(0xFF << (8 * (queue & 0x3)));
934 ivar |= (msix_vector << (8 * (queue & 0x3)));
935 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
936 break;
937 case ixgbe_mac_82599EB:
938 case ixgbe_mac_X540:
939 case ixgbe_mac_X550:
940 case ixgbe_mac_X550EM_x:
941 case ixgbe_mac_x550em_a:
942 if (direction == -1) {
943
944 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
945 index = ((queue & 1) * 8);
946 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
947 ivar &= ~(0xFF << index);
948 ivar |= (msix_vector << index);
949 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
950 break;
951 } else {
952
953 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
954 index = ((16 * (queue & 1)) + (8 * direction));
955 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
956 ivar &= ~(0xFF << index);
957 ivar |= (msix_vector << index);
958 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
959 break;
960 }
961 default:
962 break;
963 }
964}
965
966static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
967 u64 qmask)
968{
969 u32 mask;
970
971 switch (adapter->hw.mac.type) {
972 case ixgbe_mac_82598EB:
973 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
974 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
975 break;
976 case ixgbe_mac_82599EB:
977 case ixgbe_mac_X540:
978 case ixgbe_mac_X550:
979 case ixgbe_mac_X550EM_x:
980 case ixgbe_mac_x550em_a:
981 mask = (qmask & 0xFFFFFFFF);
982 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
983 mask = (qmask >> 32);
984 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
985 break;
986 default:
987 break;
988 }
989}
990
991static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
992{
993 struct ixgbe_hw *hw = &adapter->hw;
994 struct ixgbe_hw_stats *hwstats = &adapter->stats;
995 int i;
996 u32 data;
997
998 if ((hw->fc.current_mode != ixgbe_fc_full) &&
999 (hw->fc.current_mode != ixgbe_fc_rx_pause))
1000 return;
1001
1002 switch (hw->mac.type) {
1003 case ixgbe_mac_82598EB:
1004 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1005 break;
1006 default:
1007 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1008 }
1009 hwstats->lxoffrxc += data;
1010
1011
1012 if (!data)
1013 return;
1014
1015 for (i = 0; i < adapter->num_tx_queues; i++)
1016 clear_bit(__IXGBE_HANG_CHECK_ARMED,
1017 &adapter->tx_ring[i]->state);
1018
1019 for (i = 0; i < adapter->num_xdp_queues; i++)
1020 clear_bit(__IXGBE_HANG_CHECK_ARMED,
1021 &adapter->xdp_ring[i]->state);
1022}
1023
1024static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
1025{
1026 struct ixgbe_hw *hw = &adapter->hw;
1027 struct ixgbe_hw_stats *hwstats = &adapter->stats;
1028 u32 xoff[8] = {0};
1029 u8 tc;
1030 int i;
1031 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
1032
1033 if (adapter->ixgbe_ieee_pfc)
1034 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
1035
1036 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
1037 ixgbe_update_xoff_rx_lfc(adapter);
1038 return;
1039 }
1040
1041
1042 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1043 u32 pxoffrxc;
1044
1045 switch (hw->mac.type) {
1046 case ixgbe_mac_82598EB:
1047 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1048 break;
1049 default:
1050 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1051 }
1052 hwstats->pxoffrxc[i] += pxoffrxc;
1053
1054 tc = netdev_get_prio_tc_map(adapter->netdev, i);
1055 xoff[tc] += pxoffrxc;
1056 }
1057
1058
1059 for (i = 0; i < adapter->num_tx_queues; i++) {
1060 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1061
1062 tc = tx_ring->dcb_tc;
1063 if (xoff[tc])
1064 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1065 }
1066
1067 for (i = 0; i < adapter->num_xdp_queues; i++) {
1068 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1069
1070 tc = xdp_ring->dcb_tc;
1071 if (xoff[tc])
1072 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1073 }
1074}
1075
1076static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1077{
1078 return ring->stats.packets;
1079}
1080
1081static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1082{
1083 unsigned int head, tail;
1084
1085 head = ring->next_to_clean;
1086 tail = ring->next_to_use;
1087
1088 return ((head <= tail) ? tail : tail + ring->count) - head;
1089}
1090
1091static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1092{
1093 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1094 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1095 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1096
1097 clear_check_for_tx_hang(tx_ring);
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 if (tx_done_old == tx_done && tx_pending)
1112
1113 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1114 &tx_ring->state);
1115
1116 tx_ring->tx_stats.tx_done_old = tx_done;
1117
1118 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1119
1120 return false;
1121}
1122
1123
1124
1125
1126
1127static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1128{
1129
1130
1131 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1132 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1133 e_warn(drv, "initiating reset due to tx timeout\n");
1134 ixgbe_service_event_schedule(adapter);
1135 }
1136}
1137
1138
1139
1140
1141
1142
1143
1144static int ixgbe_tx_maxrate(struct net_device *netdev,
1145 int queue_index, u32 maxrate)
1146{
1147 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1148 struct ixgbe_hw *hw = &adapter->hw;
1149 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1150
1151 if (!maxrate)
1152 return 0;
1153
1154
1155 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1156 bcnrc_val /= maxrate;
1157
1158
1159 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1160 IXGBE_RTTBCNRC_RF_DEC_MASK;
1161
1162
1163 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1164
1165 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1166 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1167
1168 return 0;
1169}
1170
1171
1172
1173
1174
1175
1176
1177static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1178 struct ixgbe_ring *tx_ring, int napi_budget)
1179{
1180 struct ixgbe_adapter *adapter = q_vector->adapter;
1181 struct ixgbe_tx_buffer *tx_buffer;
1182 union ixgbe_adv_tx_desc *tx_desc;
1183 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1184 unsigned int budget = q_vector->tx.work_limit;
1185 unsigned int i = tx_ring->next_to_clean;
1186
1187 if (test_bit(__IXGBE_DOWN, &adapter->state))
1188 return true;
1189
1190 tx_buffer = &tx_ring->tx_buffer_info[i];
1191 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1192 i -= tx_ring->count;
1193
1194 do {
1195 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1196
1197
1198 if (!eop_desc)
1199 break;
1200
1201
1202 smp_rmb();
1203
1204
1205 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1206 break;
1207
1208
1209 tx_buffer->next_to_watch = NULL;
1210
1211
1212 total_bytes += tx_buffer->bytecount;
1213 total_packets += tx_buffer->gso_segs;
1214 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1215 total_ipsec++;
1216
1217
1218 if (ring_is_xdp(tx_ring))
1219 page_frag_free(tx_buffer->data);
1220 else
1221 napi_consume_skb(tx_buffer->skb, napi_budget);
1222
1223
1224 dma_unmap_single(tx_ring->dev,
1225 dma_unmap_addr(tx_buffer, dma),
1226 dma_unmap_len(tx_buffer, len),
1227 DMA_TO_DEVICE);
1228
1229
1230 dma_unmap_len_set(tx_buffer, len, 0);
1231
1232
1233 while (tx_desc != eop_desc) {
1234 tx_buffer++;
1235 tx_desc++;
1236 i++;
1237 if (unlikely(!i)) {
1238 i -= tx_ring->count;
1239 tx_buffer = tx_ring->tx_buffer_info;
1240 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1241 }
1242
1243
1244 if (dma_unmap_len(tx_buffer, len)) {
1245 dma_unmap_page(tx_ring->dev,
1246 dma_unmap_addr(tx_buffer, dma),
1247 dma_unmap_len(tx_buffer, len),
1248 DMA_TO_DEVICE);
1249 dma_unmap_len_set(tx_buffer, len, 0);
1250 }
1251 }
1252
1253
1254 tx_buffer++;
1255 tx_desc++;
1256 i++;
1257 if (unlikely(!i)) {
1258 i -= tx_ring->count;
1259 tx_buffer = tx_ring->tx_buffer_info;
1260 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1261 }
1262
1263
1264 prefetch(tx_desc);
1265
1266
1267 budget--;
1268 } while (likely(budget));
1269
1270 i += tx_ring->count;
1271 tx_ring->next_to_clean = i;
1272 u64_stats_update_begin(&tx_ring->syncp);
1273 tx_ring->stats.bytes += total_bytes;
1274 tx_ring->stats.packets += total_packets;
1275 u64_stats_update_end(&tx_ring->syncp);
1276 q_vector->tx.total_bytes += total_bytes;
1277 q_vector->tx.total_packets += total_packets;
1278 adapter->tx_ipsec += total_ipsec;
1279
1280 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1281
1282 struct ixgbe_hw *hw = &adapter->hw;
1283 e_err(drv, "Detected Tx Unit Hang %s\n"
1284 " Tx Queue <%d>\n"
1285 " TDH, TDT <%x>, <%x>\n"
1286 " next_to_use <%x>\n"
1287 " next_to_clean <%x>\n"
1288 "tx_buffer_info[next_to_clean]\n"
1289 " time_stamp <%lx>\n"
1290 " jiffies <%lx>\n",
1291 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1292 tx_ring->queue_index,
1293 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1294 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1295 tx_ring->next_to_use, i,
1296 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1297
1298 if (!ring_is_xdp(tx_ring))
1299 netif_stop_subqueue(tx_ring->netdev,
1300 tx_ring->queue_index);
1301
1302 e_info(probe,
1303 "tx hang %d detected on queue %d, resetting adapter\n",
1304 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1305
1306
1307 ixgbe_tx_timeout_reset(adapter);
1308
1309
1310 return true;
1311 }
1312
1313 if (ring_is_xdp(tx_ring))
1314 return !!budget;
1315
1316 netdev_tx_completed_queue(txring_txq(tx_ring),
1317 total_packets, total_bytes);
1318
1319#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1320 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1321 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1322
1323
1324
1325 smp_mb();
1326 if (__netif_subqueue_stopped(tx_ring->netdev,
1327 tx_ring->queue_index)
1328 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1329 netif_wake_subqueue(tx_ring->netdev,
1330 tx_ring->queue_index);
1331 ++tx_ring->tx_stats.restart_queue;
1332 }
1333 }
1334
1335 return !!budget;
1336}
1337
1338#ifdef CONFIG_IXGBE_DCA
1339static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1340 struct ixgbe_ring *tx_ring,
1341 int cpu)
1342{
1343 struct ixgbe_hw *hw = &adapter->hw;
1344 u32 txctrl = 0;
1345 u16 reg_offset;
1346
1347 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1348 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1349
1350 switch (hw->mac.type) {
1351 case ixgbe_mac_82598EB:
1352 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1353 break;
1354 case ixgbe_mac_82599EB:
1355 case ixgbe_mac_X540:
1356 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1357 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1358 break;
1359 default:
1360
1361 return;
1362 }
1363
1364
1365
1366
1367
1368
1369 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1370 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1371 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1372
1373 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1374}
1375
1376static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1377 struct ixgbe_ring *rx_ring,
1378 int cpu)
1379{
1380 struct ixgbe_hw *hw = &adapter->hw;
1381 u32 rxctrl = 0;
1382 u8 reg_idx = rx_ring->reg_idx;
1383
1384 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1385 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1386
1387 switch (hw->mac.type) {
1388 case ixgbe_mac_82599EB:
1389 case ixgbe_mac_X540:
1390 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1391 break;
1392 default:
1393 break;
1394 }
1395
1396
1397
1398
1399
1400
1401 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1402 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1403 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1404
1405 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1406}
1407
1408static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1409{
1410 struct ixgbe_adapter *adapter = q_vector->adapter;
1411 struct ixgbe_ring *ring;
1412 int cpu = get_cpu();
1413
1414 if (q_vector->cpu == cpu)
1415 goto out_no_update;
1416
1417 ixgbe_for_each_ring(ring, q_vector->tx)
1418 ixgbe_update_tx_dca(adapter, ring, cpu);
1419
1420 ixgbe_for_each_ring(ring, q_vector->rx)
1421 ixgbe_update_rx_dca(adapter, ring, cpu);
1422
1423 q_vector->cpu = cpu;
1424out_no_update:
1425 put_cpu();
1426}
1427
1428static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1429{
1430 int i;
1431
1432
1433 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1434 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1435 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1436 else
1437 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1438 IXGBE_DCA_CTRL_DCA_DISABLE);
1439
1440 for (i = 0; i < adapter->num_q_vectors; i++) {
1441 adapter->q_vector[i]->cpu = -1;
1442 ixgbe_update_dca(adapter->q_vector[i]);
1443 }
1444}
1445
1446static int __ixgbe_notify_dca(struct device *dev, void *data)
1447{
1448 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1449 unsigned long event = *(unsigned long *)data;
1450
1451 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1452 return 0;
1453
1454 switch (event) {
1455 case DCA_PROVIDER_ADD:
1456
1457 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1458 break;
1459 if (dca_add_requester(dev) == 0) {
1460 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1461 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1462 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1463 break;
1464 }
1465
1466 case DCA_PROVIDER_REMOVE:
1467 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1468 dca_remove_requester(dev);
1469 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1470 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1471 IXGBE_DCA_CTRL_DCA_DISABLE);
1472 }
1473 break;
1474 }
1475
1476 return 0;
1477}
1478
1479#endif
1480
1481#define IXGBE_RSS_L4_TYPES_MASK \
1482 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1483 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1484 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1485 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1486
1487static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1488 union ixgbe_adv_rx_desc *rx_desc,
1489 struct sk_buff *skb)
1490{
1491 u16 rss_type;
1492
1493 if (!(ring->netdev->features & NETIF_F_RXHASH))
1494 return;
1495
1496 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1497 IXGBE_RXDADV_RSSTYPE_MASK;
1498
1499 if (!rss_type)
1500 return;
1501
1502 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1503 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1504 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1505}
1506
1507#ifdef IXGBE_FCOE
1508
1509
1510
1511
1512
1513
1514
1515static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1516 union ixgbe_adv_rx_desc *rx_desc)
1517{
1518 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1519
1520 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1521 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1522 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1523 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1524}
1525
1526#endif
1527
1528
1529
1530
1531
1532
1533static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1534 union ixgbe_adv_rx_desc *rx_desc,
1535 struct sk_buff *skb)
1536{
1537 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1538 bool encap_pkt = false;
1539
1540 skb_checksum_none_assert(skb);
1541
1542
1543 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1544 return;
1545
1546
1547 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1548 encap_pkt = true;
1549 skb->encapsulation = 1;
1550 }
1551
1552
1553 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1554 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1555 ring->rx_stats.csum_err++;
1556 return;
1557 }
1558
1559 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1560 return;
1561
1562 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1563
1564
1565
1566
1567 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1568 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1569 return;
1570
1571 ring->rx_stats.csum_err++;
1572 return;
1573 }
1574
1575
1576 skb->ip_summed = CHECKSUM_UNNECESSARY;
1577 if (encap_pkt) {
1578 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1579 return;
1580
1581 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1582 skb->ip_summed = CHECKSUM_NONE;
1583 return;
1584 }
1585
1586 skb->csum_level = 1;
1587 }
1588}
1589
1590static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1591{
1592 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1593}
1594
1595static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1596 struct ixgbe_rx_buffer *bi)
1597{
1598 struct page *page = bi->page;
1599 dma_addr_t dma;
1600
1601
1602 if (likely(page))
1603 return true;
1604
1605
1606 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1607 if (unlikely(!page)) {
1608 rx_ring->rx_stats.alloc_rx_page_failed++;
1609 return false;
1610 }
1611
1612
1613 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1614 ixgbe_rx_pg_size(rx_ring),
1615 DMA_FROM_DEVICE,
1616 IXGBE_RX_DMA_ATTR);
1617
1618
1619
1620
1621
1622 if (dma_mapping_error(rx_ring->dev, dma)) {
1623 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1624
1625 rx_ring->rx_stats.alloc_rx_page_failed++;
1626 return false;
1627 }
1628
1629 bi->dma = dma;
1630 bi->page = page;
1631 bi->page_offset = ixgbe_rx_offset(rx_ring);
1632 page_ref_add(page, USHRT_MAX - 1);
1633 bi->pagecnt_bias = USHRT_MAX;
1634 rx_ring->rx_stats.alloc_rx_page++;
1635
1636 return true;
1637}
1638
1639
1640
1641
1642
1643
1644void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1645{
1646 union ixgbe_adv_rx_desc *rx_desc;
1647 struct ixgbe_rx_buffer *bi;
1648 u16 i = rx_ring->next_to_use;
1649 u16 bufsz;
1650
1651
1652 if (!cleaned_count)
1653 return;
1654
1655 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1656 bi = &rx_ring->rx_buffer_info[i];
1657 i -= rx_ring->count;
1658
1659 bufsz = ixgbe_rx_bufsz(rx_ring);
1660
1661 do {
1662 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1663 break;
1664
1665
1666 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1667 bi->page_offset, bufsz,
1668 DMA_FROM_DEVICE);
1669
1670
1671
1672
1673
1674 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1675
1676 rx_desc++;
1677 bi++;
1678 i++;
1679 if (unlikely(!i)) {
1680 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1681 bi = rx_ring->rx_buffer_info;
1682 i -= rx_ring->count;
1683 }
1684
1685
1686 rx_desc->wb.upper.length = 0;
1687
1688 cleaned_count--;
1689 } while (cleaned_count);
1690
1691 i += rx_ring->count;
1692
1693 if (rx_ring->next_to_use != i) {
1694 rx_ring->next_to_use = i;
1695
1696
1697 rx_ring->next_to_alloc = i;
1698
1699
1700
1701
1702
1703
1704 wmb();
1705 writel(i, rx_ring->tail);
1706 }
1707}
1708
1709static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1710 struct sk_buff *skb)
1711{
1712 u16 hdr_len = skb_headlen(skb);
1713
1714
1715 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1716 IXGBE_CB(skb)->append_cnt);
1717 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1718}
1719
1720static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1721 struct sk_buff *skb)
1722{
1723
1724 if (!IXGBE_CB(skb)->append_cnt)
1725 return;
1726
1727 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1728 rx_ring->rx_stats.rsc_flush++;
1729
1730 ixgbe_set_rsc_gso_size(rx_ring, skb);
1731
1732
1733 IXGBE_CB(skb)->append_cnt = 0;
1734}
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1747 union ixgbe_adv_rx_desc *rx_desc,
1748 struct sk_buff *skb)
1749{
1750 struct net_device *dev = rx_ring->netdev;
1751 u32 flags = rx_ring->q_vector->adapter->flags;
1752
1753 ixgbe_update_rsc_stats(rx_ring, skb);
1754
1755 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1756
1757 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1758
1759 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1760 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1761
1762 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1763 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1764 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1765 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1766 }
1767
1768 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1769 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1770
1771 skb->protocol = eth_type_trans(skb, dev);
1772
1773
1774 if (netif_is_ixgbe(dev))
1775 skb_record_rx_queue(skb, rx_ring->queue_index);
1776 else
1777 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1778 (skb->pkt_type == PACKET_BROADCAST) ||
1779 (skb->pkt_type == PACKET_MULTICAST));
1780}
1781
1782static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1783 struct sk_buff *skb)
1784{
1785 napi_gro_receive(&q_vector->napi, skb);
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1800 union ixgbe_adv_rx_desc *rx_desc,
1801 struct sk_buff *skb)
1802{
1803 u32 ntc = rx_ring->next_to_clean + 1;
1804
1805
1806 ntc = (ntc < rx_ring->count) ? ntc : 0;
1807 rx_ring->next_to_clean = ntc;
1808
1809 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1810
1811
1812 if (ring_is_rsc_enabled(rx_ring)) {
1813 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1814 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1815
1816 if (unlikely(rsc_enabled)) {
1817 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1818
1819 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1820 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1821
1822
1823 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1824 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1825 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1826 }
1827 }
1828
1829
1830 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1831 return false;
1832
1833
1834 rx_ring->rx_buffer_info[ntc].skb = skb;
1835 rx_ring->rx_stats.non_eop_descs++;
1836
1837 return true;
1838}
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1853 struct sk_buff *skb)
1854{
1855 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1856 unsigned char *va;
1857 unsigned int pull_len;
1858
1859
1860
1861
1862
1863
1864 va = skb_frag_address(frag);
1865
1866
1867
1868
1869
1870 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1871
1872
1873 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1874
1875
1876 skb_frag_size_sub(frag, pull_len);
1877 frag->page_offset += pull_len;
1878 skb->data_len -= pull_len;
1879 skb->tail += pull_len;
1880}
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1893 struct sk_buff *skb)
1894{
1895
1896 if (unlikely(IXGBE_CB(skb)->page_released)) {
1897 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1898 ixgbe_rx_pg_size(rx_ring),
1899 DMA_FROM_DEVICE,
1900 IXGBE_RX_DMA_ATTR);
1901 } else if (ring_uses_build_skb(rx_ring)) {
1902 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1903
1904 dma_sync_single_range_for_cpu(rx_ring->dev,
1905 IXGBE_CB(skb)->dma,
1906 offset,
1907 skb_headlen(skb),
1908 DMA_FROM_DEVICE);
1909 } else {
1910 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1911
1912 dma_sync_single_range_for_cpu(rx_ring->dev,
1913 IXGBE_CB(skb)->dma,
1914 frag->page_offset,
1915 skb_frag_size(frag),
1916 DMA_FROM_DEVICE);
1917 }
1918}
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1943 union ixgbe_adv_rx_desc *rx_desc,
1944 struct sk_buff *skb)
1945{
1946 struct net_device *netdev = rx_ring->netdev;
1947
1948
1949 if (IS_ERR(skb))
1950 return true;
1951
1952
1953
1954
1955 if (!netdev ||
1956 (unlikely(ixgbe_test_staterr(rx_desc,
1957 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1958 !(netdev->features & NETIF_F_RXALL)))) {
1959 dev_kfree_skb_any(skb);
1960 return true;
1961 }
1962
1963
1964 if (!skb_headlen(skb))
1965 ixgbe_pull_tail(rx_ring, skb);
1966
1967#ifdef IXGBE_FCOE
1968
1969 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1970 return false;
1971
1972#endif
1973
1974 if (eth_skb_pad(skb))
1975 return true;
1976
1977 return false;
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1988 struct ixgbe_rx_buffer *old_buff)
1989{
1990 struct ixgbe_rx_buffer *new_buff;
1991 u16 nta = rx_ring->next_to_alloc;
1992
1993 new_buff = &rx_ring->rx_buffer_info[nta];
1994
1995
1996 nta++;
1997 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1998
1999
2000
2001
2002
2003 new_buff->dma = old_buff->dma;
2004 new_buff->page = old_buff->page;
2005 new_buff->page_offset = old_buff->page_offset;
2006 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
2007}
2008
2009static inline bool ixgbe_page_is_reserved(struct page *page)
2010{
2011 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2012}
2013
2014static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
2015{
2016 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
2017 struct page *page = rx_buffer->page;
2018
2019
2020 if (unlikely(ixgbe_page_is_reserved(page)))
2021 return false;
2022
2023#if (PAGE_SIZE < 8192)
2024
2025 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
2026 return false;
2027#else
2028
2029
2030
2031
2032
2033#define IXGBE_LAST_OFFSET \
2034 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
2035 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
2036 return false;
2037#endif
2038
2039
2040
2041
2042
2043 if (unlikely(pagecnt_bias == 1)) {
2044 page_ref_add(page, USHRT_MAX - 1);
2045 rx_buffer->pagecnt_bias = USHRT_MAX;
2046 }
2047
2048 return true;
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2067 struct ixgbe_rx_buffer *rx_buffer,
2068 struct sk_buff *skb,
2069 unsigned int size)
2070{
2071#if (PAGE_SIZE < 8192)
2072 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2073#else
2074 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2075 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2076 SKB_DATA_ALIGN(size);
2077#endif
2078 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2079 rx_buffer->page_offset, size, truesize);
2080#if (PAGE_SIZE < 8192)
2081 rx_buffer->page_offset ^= truesize;
2082#else
2083 rx_buffer->page_offset += truesize;
2084#endif
2085}
2086
2087static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2088 union ixgbe_adv_rx_desc *rx_desc,
2089 struct sk_buff **skb,
2090 const unsigned int size)
2091{
2092 struct ixgbe_rx_buffer *rx_buffer;
2093
2094 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2095 prefetchw(rx_buffer->page);
2096 *skb = rx_buffer->skb;
2097
2098
2099
2100
2101
2102 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2103 if (!*skb)
2104 goto skip_sync;
2105 } else {
2106 if (*skb)
2107 ixgbe_dma_sync_frag(rx_ring, *skb);
2108 }
2109
2110
2111 dma_sync_single_range_for_cpu(rx_ring->dev,
2112 rx_buffer->dma,
2113 rx_buffer->page_offset,
2114 size,
2115 DMA_FROM_DEVICE);
2116skip_sync:
2117 rx_buffer->pagecnt_bias--;
2118
2119 return rx_buffer;
2120}
2121
2122static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2123 struct ixgbe_rx_buffer *rx_buffer,
2124 struct sk_buff *skb)
2125{
2126 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2127
2128 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2129 } else {
2130 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2131
2132 IXGBE_CB(skb)->page_released = true;
2133 } else {
2134
2135 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2136 ixgbe_rx_pg_size(rx_ring),
2137 DMA_FROM_DEVICE,
2138 IXGBE_RX_DMA_ATTR);
2139 }
2140 __page_frag_cache_drain(rx_buffer->page,
2141 rx_buffer->pagecnt_bias);
2142 }
2143
2144
2145 rx_buffer->page = NULL;
2146 rx_buffer->skb = NULL;
2147}
2148
2149static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2150 struct ixgbe_rx_buffer *rx_buffer,
2151 struct xdp_buff *xdp,
2152 union ixgbe_adv_rx_desc *rx_desc)
2153{
2154 unsigned int size = xdp->data_end - xdp->data;
2155#if (PAGE_SIZE < 8192)
2156 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2157#else
2158 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2159 xdp->data_hard_start);
2160#endif
2161 struct sk_buff *skb;
2162
2163
2164 prefetch(xdp->data);
2165#if L1_CACHE_BYTES < 128
2166 prefetch(xdp->data + L1_CACHE_BYTES);
2167#endif
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2186 if (unlikely(!skb))
2187 return NULL;
2188
2189 if (size > IXGBE_RX_HDR_SIZE) {
2190 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2191 IXGBE_CB(skb)->dma = rx_buffer->dma;
2192
2193 skb_add_rx_frag(skb, 0, rx_buffer->page,
2194 xdp->data - page_address(rx_buffer->page),
2195 size, truesize);
2196#if (PAGE_SIZE < 8192)
2197 rx_buffer->page_offset ^= truesize;
2198#else
2199 rx_buffer->page_offset += truesize;
2200#endif
2201 } else {
2202 memcpy(__skb_put(skb, size),
2203 xdp->data, ALIGN(size, sizeof(long)));
2204 rx_buffer->pagecnt_bias++;
2205 }
2206
2207 return skb;
2208}
2209
2210static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2211 struct ixgbe_rx_buffer *rx_buffer,
2212 struct xdp_buff *xdp,
2213 union ixgbe_adv_rx_desc *rx_desc)
2214{
2215 unsigned int metasize = xdp->data - xdp->data_meta;
2216#if (PAGE_SIZE < 8192)
2217 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2218#else
2219 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2220 SKB_DATA_ALIGN(xdp->data_end -
2221 xdp->data_hard_start);
2222#endif
2223 struct sk_buff *skb;
2224
2225
2226
2227
2228
2229
2230 prefetch(xdp->data_meta);
2231#if L1_CACHE_BYTES < 128
2232 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2233#endif
2234
2235
2236 skb = build_skb(xdp->data_hard_start, truesize);
2237 if (unlikely(!skb))
2238 return NULL;
2239
2240
2241 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2242 __skb_put(skb, xdp->data_end - xdp->data);
2243 if (metasize)
2244 skb_metadata_set(skb, metasize);
2245
2246
2247 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2248 IXGBE_CB(skb)->dma = rx_buffer->dma;
2249
2250
2251#if (PAGE_SIZE < 8192)
2252 rx_buffer->page_offset ^= truesize;
2253#else
2254 rx_buffer->page_offset += truesize;
2255#endif
2256
2257 return skb;
2258}
2259
2260#define IXGBE_XDP_PASS 0
2261#define IXGBE_XDP_CONSUMED 1
2262#define IXGBE_XDP_TX 2
2263
2264static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2265 struct xdp_buff *xdp);
2266
2267static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2268 struct ixgbe_ring *rx_ring,
2269 struct xdp_buff *xdp)
2270{
2271 int err, result = IXGBE_XDP_PASS;
2272 struct bpf_prog *xdp_prog;
2273 u32 act;
2274
2275 rcu_read_lock();
2276 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2277
2278 if (!xdp_prog)
2279 goto xdp_out;
2280
2281 act = bpf_prog_run_xdp(xdp_prog, xdp);
2282 switch (act) {
2283 case XDP_PASS:
2284 break;
2285 case XDP_TX:
2286 result = ixgbe_xmit_xdp_ring(adapter, xdp);
2287 break;
2288 case XDP_REDIRECT:
2289 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2290 if (!err)
2291 result = IXGBE_XDP_TX;
2292 else
2293 result = IXGBE_XDP_CONSUMED;
2294 break;
2295 default:
2296 bpf_warn_invalid_xdp_action(act);
2297
2298 case XDP_ABORTED:
2299 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2300
2301 case XDP_DROP:
2302 result = IXGBE_XDP_CONSUMED;
2303 break;
2304 }
2305xdp_out:
2306 rcu_read_unlock();
2307 return ERR_PTR(-result);
2308}
2309
2310static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2311 struct ixgbe_rx_buffer *rx_buffer,
2312 unsigned int size)
2313{
2314#if (PAGE_SIZE < 8192)
2315 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2316
2317 rx_buffer->page_offset ^= truesize;
2318#else
2319 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2320 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2321 SKB_DATA_ALIGN(size);
2322
2323 rx_buffer->page_offset += truesize;
2324#endif
2325}
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2341 struct ixgbe_ring *rx_ring,
2342 const int budget)
2343{
2344 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2345 struct ixgbe_adapter *adapter = q_vector->adapter;
2346#ifdef IXGBE_FCOE
2347 int ddp_bytes;
2348 unsigned int mss = 0;
2349#endif
2350 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2351 bool xdp_xmit = false;
2352 struct xdp_buff xdp;
2353
2354 xdp.rxq = &rx_ring->xdp_rxq;
2355
2356 while (likely(total_rx_packets < budget)) {
2357 union ixgbe_adv_rx_desc *rx_desc;
2358 struct ixgbe_rx_buffer *rx_buffer;
2359 struct sk_buff *skb;
2360 unsigned int size;
2361
2362
2363 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2364 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2365 cleaned_count = 0;
2366 }
2367
2368 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2369 size = le16_to_cpu(rx_desc->wb.upper.length);
2370 if (!size)
2371 break;
2372
2373
2374
2375
2376
2377 dma_rmb();
2378
2379 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2380
2381
2382 if (!skb) {
2383 xdp.data = page_address(rx_buffer->page) +
2384 rx_buffer->page_offset;
2385 xdp.data_meta = xdp.data;
2386 xdp.data_hard_start = xdp.data -
2387 ixgbe_rx_offset(rx_ring);
2388 xdp.data_end = xdp.data + size;
2389
2390 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2391 }
2392
2393 if (IS_ERR(skb)) {
2394 if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
2395 xdp_xmit = true;
2396 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2397 } else {
2398 rx_buffer->pagecnt_bias++;
2399 }
2400 total_rx_packets++;
2401 total_rx_bytes += size;
2402 } else if (skb) {
2403 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2404 } else if (ring_uses_build_skb(rx_ring)) {
2405 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2406 &xdp, rx_desc);
2407 } else {
2408 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2409 &xdp, rx_desc);
2410 }
2411
2412
2413 if (!skb) {
2414 rx_ring->rx_stats.alloc_rx_buff_failed++;
2415 rx_buffer->pagecnt_bias++;
2416 break;
2417 }
2418
2419 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2420 cleaned_count++;
2421
2422
2423 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2424 continue;
2425
2426
2427 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2428 continue;
2429
2430
2431 total_rx_bytes += skb->len;
2432
2433
2434 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2435
2436#ifdef IXGBE_FCOE
2437
2438 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2439 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2440
2441 if (ddp_bytes > 0) {
2442 if (!mss) {
2443 mss = rx_ring->netdev->mtu -
2444 sizeof(struct fcoe_hdr) -
2445 sizeof(struct fc_frame_header) -
2446 sizeof(struct fcoe_crc_eof);
2447 if (mss > 512)
2448 mss &= ~511;
2449 }
2450 total_rx_bytes += ddp_bytes;
2451 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2452 mss);
2453 }
2454 if (!ddp_bytes) {
2455 dev_kfree_skb_any(skb);
2456 continue;
2457 }
2458 }
2459
2460#endif
2461 ixgbe_rx_skb(q_vector, skb);
2462
2463
2464 total_rx_packets++;
2465 }
2466
2467 if (xdp_xmit) {
2468 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2469
2470
2471
2472
2473 wmb();
2474 writel(ring->next_to_use, ring->tail);
2475
2476 xdp_do_flush_map();
2477 }
2478
2479 u64_stats_update_begin(&rx_ring->syncp);
2480 rx_ring->stats.packets += total_rx_packets;
2481 rx_ring->stats.bytes += total_rx_bytes;
2482 u64_stats_update_end(&rx_ring->syncp);
2483 q_vector->rx.total_packets += total_rx_packets;
2484 q_vector->rx.total_bytes += total_rx_bytes;
2485
2486 return total_rx_packets;
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2497{
2498 struct ixgbe_q_vector *q_vector;
2499 int v_idx;
2500 u32 mask;
2501
2502
2503 if (adapter->num_vfs > 32) {
2504 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2505 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2506 }
2507
2508
2509
2510
2511
2512 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2513 struct ixgbe_ring *ring;
2514 q_vector = adapter->q_vector[v_idx];
2515
2516 ixgbe_for_each_ring(ring, q_vector->rx)
2517 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2518
2519 ixgbe_for_each_ring(ring, q_vector->tx)
2520 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2521
2522 ixgbe_write_eitr(q_vector);
2523 }
2524
2525 switch (adapter->hw.mac.type) {
2526 case ixgbe_mac_82598EB:
2527 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2528 v_idx);
2529 break;
2530 case ixgbe_mac_82599EB:
2531 case ixgbe_mac_X540:
2532 case ixgbe_mac_X550:
2533 case ixgbe_mac_X550EM_x:
2534 case ixgbe_mac_x550em_a:
2535 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2536 break;
2537 default:
2538 break;
2539 }
2540 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2541
2542
2543 mask = IXGBE_EIMS_ENABLE_MASK;
2544 mask &= ~(IXGBE_EIMS_OTHER |
2545 IXGBE_EIMS_MAILBOX |
2546 IXGBE_EIMS_LSC);
2547
2548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2549}
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2565 struct ixgbe_ring_container *ring_container)
2566{
2567 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2568 IXGBE_ITR_ADAPTIVE_LATENCY;
2569 unsigned int avg_wire_size, packets, bytes;
2570 unsigned long next_update = jiffies;
2571
2572
2573
2574
2575 if (!ring_container->ring)
2576 return;
2577
2578
2579
2580
2581
2582
2583 if (time_after(next_update, ring_container->next_update))
2584 goto clear_counts;
2585
2586 packets = ring_container->total_packets;
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596 if (!packets) {
2597 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2598 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2599 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2600 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2601 goto clear_counts;
2602 }
2603
2604 bytes = ring_container->total_bytes;
2605
2606
2607
2608
2609
2610 if (packets < 4 && bytes < 9000) {
2611 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2612 goto adjust_by_size;
2613 }
2614
2615
2616
2617
2618
2619 if (packets < 48) {
2620 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2621 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2622 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2623 goto clear_counts;
2624 }
2625
2626
2627
2628
2629 if (packets < 96) {
2630 itr = q_vector->itr >> 2;
2631 goto clear_counts;
2632 }
2633
2634
2635
2636
2637
2638 if (packets < 256) {
2639 itr = q_vector->itr >> 3;
2640 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2641 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2642 goto clear_counts;
2643 }
2644
2645
2646
2647
2648
2649
2650
2651 itr = IXGBE_ITR_ADAPTIVE_BULK;
2652
2653adjust_by_size:
2654
2655
2656
2657
2658
2659 avg_wire_size = bytes / packets;
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 if (avg_wire_size <= 60) {
2677
2678 avg_wire_size = 5120;
2679 } else if (avg_wire_size <= 316) {
2680
2681 avg_wire_size *= 40;
2682 avg_wire_size += 2720;
2683 } else if (avg_wire_size <= 1084) {
2684
2685 avg_wire_size *= 15;
2686 avg_wire_size += 11452;
2687 } else if (avg_wire_size <= 1980) {
2688
2689 avg_wire_size *= 5;
2690 avg_wire_size += 22420;
2691 } else {
2692
2693 avg_wire_size = 32256;
2694 }
2695
2696
2697
2698
2699 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2700 avg_wire_size >>= 1;
2701
2702
2703
2704
2705
2706
2707
2708
2709 switch (q_vector->adapter->link_speed) {
2710 case IXGBE_LINK_SPEED_10GB_FULL:
2711 case IXGBE_LINK_SPEED_100_FULL:
2712 default:
2713 itr += DIV_ROUND_UP(avg_wire_size,
2714 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2715 IXGBE_ITR_ADAPTIVE_MIN_INC;
2716 break;
2717 case IXGBE_LINK_SPEED_2_5GB_FULL:
2718 case IXGBE_LINK_SPEED_1GB_FULL:
2719 case IXGBE_LINK_SPEED_10_FULL:
2720 itr += DIV_ROUND_UP(avg_wire_size,
2721 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2722 IXGBE_ITR_ADAPTIVE_MIN_INC;
2723 break;
2724 }
2725
2726clear_counts:
2727
2728 ring_container->itr = itr;
2729
2730
2731 ring_container->next_update = next_update + 1;
2732
2733 ring_container->total_bytes = 0;
2734 ring_container->total_packets = 0;
2735}
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2746{
2747 struct ixgbe_adapter *adapter = q_vector->adapter;
2748 struct ixgbe_hw *hw = &adapter->hw;
2749 int v_idx = q_vector->v_idx;
2750 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2751
2752 switch (adapter->hw.mac.type) {
2753 case ixgbe_mac_82598EB:
2754
2755 itr_reg |= (itr_reg << 16);
2756 break;
2757 case ixgbe_mac_82599EB:
2758 case ixgbe_mac_X540:
2759 case ixgbe_mac_X550:
2760 case ixgbe_mac_X550EM_x:
2761 case ixgbe_mac_x550em_a:
2762
2763
2764
2765
2766 itr_reg |= IXGBE_EITR_CNT_WDIS;
2767 break;
2768 default:
2769 break;
2770 }
2771 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2772}
2773
2774static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2775{
2776 u32 new_itr;
2777
2778 ixgbe_update_itr(q_vector, &q_vector->tx);
2779 ixgbe_update_itr(q_vector, &q_vector->rx);
2780
2781
2782 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2783
2784
2785 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2786 new_itr <<= 2;
2787
2788 if (new_itr != q_vector->itr) {
2789
2790 q_vector->itr = new_itr;
2791
2792 ixgbe_write_eitr(q_vector);
2793 }
2794}
2795
2796
2797
2798
2799
2800static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2801{
2802 struct ixgbe_hw *hw = &adapter->hw;
2803 u32 eicr = adapter->interrupt_event;
2804 s32 rc;
2805
2806 if (test_bit(__IXGBE_DOWN, &adapter->state))
2807 return;
2808
2809 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2810 return;
2811
2812 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2813
2814 switch (hw->device_id) {
2815 case IXGBE_DEV_ID_82599_T3_LOM:
2816
2817
2818
2819
2820
2821
2822
2823 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2824 !(eicr & IXGBE_EICR_LSC))
2825 return;
2826
2827 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2828 u32 speed;
2829 bool link_up = false;
2830
2831 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2832
2833 if (link_up)
2834 return;
2835 }
2836
2837
2838 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2839 return;
2840
2841 break;
2842 case IXGBE_DEV_ID_X550EM_A_1G_T:
2843 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2844 rc = hw->phy.ops.check_overtemp(hw);
2845 if (rc != IXGBE_ERR_OVERTEMP)
2846 return;
2847 break;
2848 default:
2849 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2850 return;
2851 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2852 return;
2853 break;
2854 }
2855 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2856
2857 adapter->interrupt_event = 0;
2858}
2859
2860static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2861{
2862 struct ixgbe_hw *hw = &adapter->hw;
2863
2864 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2865 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2866 e_crit(probe, "Fan has stopped, replace the adapter\n");
2867
2868 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2869 }
2870}
2871
2872static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2873{
2874 struct ixgbe_hw *hw = &adapter->hw;
2875
2876 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2877 return;
2878
2879 switch (adapter->hw.mac.type) {
2880 case ixgbe_mac_82599EB:
2881
2882
2883
2884
2885 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2886 (eicr & IXGBE_EICR_LSC)) &&
2887 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2888 adapter->interrupt_event = eicr;
2889 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2890 ixgbe_service_event_schedule(adapter);
2891 return;
2892 }
2893 return;
2894 case ixgbe_mac_x550em_a:
2895 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2896 adapter->interrupt_event = eicr;
2897 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2898 ixgbe_service_event_schedule(adapter);
2899 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2900 IXGBE_EICR_GPI_SDP0_X550EM_a);
2901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2902 IXGBE_EICR_GPI_SDP0_X550EM_a);
2903 }
2904 return;
2905 case ixgbe_mac_X550:
2906 case ixgbe_mac_X540:
2907 if (!(eicr & IXGBE_EICR_TS))
2908 return;
2909 break;
2910 default:
2911 return;
2912 }
2913
2914 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2915}
2916
2917static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2918{
2919 switch (hw->mac.type) {
2920 case ixgbe_mac_82598EB:
2921 if (hw->phy.type == ixgbe_phy_nl)
2922 return true;
2923 return false;
2924 case ixgbe_mac_82599EB:
2925 case ixgbe_mac_X550EM_x:
2926 case ixgbe_mac_x550em_a:
2927 switch (hw->mac.ops.get_media_type(hw)) {
2928 case ixgbe_media_type_fiber:
2929 case ixgbe_media_type_fiber_qsfp:
2930 return true;
2931 default:
2932 return false;
2933 }
2934 default:
2935 return false;
2936 }
2937}
2938
2939static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2940{
2941 struct ixgbe_hw *hw = &adapter->hw;
2942 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2943
2944 if (!ixgbe_is_sfp(hw))
2945 return;
2946
2947
2948 if (hw->mac.type >= ixgbe_mac_X540)
2949 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2950
2951 if (eicr & eicr_mask) {
2952
2953 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2954 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2955 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2956 adapter->sfp_poll_time = 0;
2957 ixgbe_service_event_schedule(adapter);
2958 }
2959 }
2960
2961 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2962 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2963
2964 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2965 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2966 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2967 ixgbe_service_event_schedule(adapter);
2968 }
2969 }
2970}
2971
2972static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2973{
2974 struct ixgbe_hw *hw = &adapter->hw;
2975
2976 adapter->lsc_int++;
2977 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2978 adapter->link_check_timeout = jiffies;
2979 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2980 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2981 IXGBE_WRITE_FLUSH(hw);
2982 ixgbe_service_event_schedule(adapter);
2983 }
2984}
2985
2986static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2987 u64 qmask)
2988{
2989 u32 mask;
2990 struct ixgbe_hw *hw = &adapter->hw;
2991
2992 switch (hw->mac.type) {
2993 case ixgbe_mac_82598EB:
2994 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2995 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2996 break;
2997 case ixgbe_mac_82599EB:
2998 case ixgbe_mac_X540:
2999 case ixgbe_mac_X550:
3000 case ixgbe_mac_X550EM_x:
3001 case ixgbe_mac_x550em_a:
3002 mask = (qmask & 0xFFFFFFFF);
3003 if (mask)
3004 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
3005 mask = (qmask >> 32);
3006 if (mask)
3007 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
3008 break;
3009 default:
3010 break;
3011 }
3012
3013}
3014
3015static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
3016 u64 qmask)
3017{
3018 u32 mask;
3019 struct ixgbe_hw *hw = &adapter->hw;
3020
3021 switch (hw->mac.type) {
3022 case ixgbe_mac_82598EB:
3023 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
3024 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3025 break;
3026 case ixgbe_mac_82599EB:
3027 case ixgbe_mac_X540:
3028 case ixgbe_mac_X550:
3029 case ixgbe_mac_X550EM_x:
3030 case ixgbe_mac_x550em_a:
3031 mask = (qmask & 0xFFFFFFFF);
3032 if (mask)
3033 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3034 mask = (qmask >> 32);
3035 if (mask)
3036 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3037 break;
3038 default:
3039 break;
3040 }
3041
3042}
3043
3044
3045
3046
3047
3048
3049
3050static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
3051 bool flush)
3052{
3053 struct ixgbe_hw *hw = &adapter->hw;
3054 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3055
3056
3057 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
3058 mask &= ~IXGBE_EIMS_LSC;
3059
3060 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3061 switch (adapter->hw.mac.type) {
3062 case ixgbe_mac_82599EB:
3063 mask |= IXGBE_EIMS_GPI_SDP0(hw);
3064 break;
3065 case ixgbe_mac_X540:
3066 case ixgbe_mac_X550:
3067 case ixgbe_mac_X550EM_x:
3068 case ixgbe_mac_x550em_a:
3069 mask |= IXGBE_EIMS_TS;
3070 break;
3071 default:
3072 break;
3073 }
3074 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3075 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3076 switch (adapter->hw.mac.type) {
3077 case ixgbe_mac_82599EB:
3078 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3079 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3080
3081 case ixgbe_mac_X540:
3082 case ixgbe_mac_X550:
3083 case ixgbe_mac_X550EM_x:
3084 case ixgbe_mac_x550em_a:
3085 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3086 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3087 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3088 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3089 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3090 mask |= IXGBE_EICR_GPI_SDP0_X540;
3091 mask |= IXGBE_EIMS_ECC;
3092 mask |= IXGBE_EIMS_MAILBOX;
3093 break;
3094 default:
3095 break;
3096 }
3097
3098 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3099 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3100 mask |= IXGBE_EIMS_FLOW_DIR;
3101
3102 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3103 if (queues)
3104 ixgbe_irq_enable_queues(adapter, ~0);
3105 if (flush)
3106 IXGBE_WRITE_FLUSH(&adapter->hw);
3107}
3108
3109static irqreturn_t ixgbe_msix_other(int irq, void *data)
3110{
3111 struct ixgbe_adapter *adapter = data;
3112 struct ixgbe_hw *hw = &adapter->hw;
3113 u32 eicr;
3114
3115
3116
3117
3118
3119
3120
3121 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3122
3123
3124
3125
3126
3127
3128
3129
3130 eicr &= 0xFFFF0000;
3131
3132 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3133
3134 if (eicr & IXGBE_EICR_LSC)
3135 ixgbe_check_lsc(adapter);
3136
3137 if (eicr & IXGBE_EICR_MAILBOX)
3138 ixgbe_msg_task(adapter);
3139
3140 switch (hw->mac.type) {
3141 case ixgbe_mac_82599EB:
3142 case ixgbe_mac_X540:
3143 case ixgbe_mac_X550:
3144 case ixgbe_mac_X550EM_x:
3145 case ixgbe_mac_x550em_a:
3146 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3147 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3148 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3149 ixgbe_service_event_schedule(adapter);
3150 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3151 IXGBE_EICR_GPI_SDP0_X540);
3152 }
3153 if (eicr & IXGBE_EICR_ECC) {
3154 e_info(link, "Received ECC Err, initiating reset\n");
3155 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3156 ixgbe_service_event_schedule(adapter);
3157 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3158 }
3159
3160 if (eicr & IXGBE_EICR_FLOW_DIR) {
3161 int reinit_count = 0;
3162 int i;
3163 for (i = 0; i < adapter->num_tx_queues; i++) {
3164 struct ixgbe_ring *ring = adapter->tx_ring[i];
3165 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3166 &ring->state))
3167 reinit_count++;
3168 }
3169 if (reinit_count) {
3170
3171 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3172 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3173 ixgbe_service_event_schedule(adapter);
3174 }
3175 }
3176 ixgbe_check_sfp_event(adapter, eicr);
3177 ixgbe_check_overtemp_event(adapter, eicr);
3178 break;
3179 default:
3180 break;
3181 }
3182
3183 ixgbe_check_fan_failure(adapter, eicr);
3184
3185 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3186 ixgbe_ptp_check_pps_event(adapter);
3187
3188
3189 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3190 ixgbe_irq_enable(adapter, false, false);
3191
3192 return IRQ_HANDLED;
3193}
3194
3195static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3196{
3197 struct ixgbe_q_vector *q_vector = data;
3198
3199
3200
3201 if (q_vector->rx.ring || q_vector->tx.ring)
3202 napi_schedule_irqoff(&q_vector->napi);
3203
3204 return IRQ_HANDLED;
3205}
3206
3207
3208
3209
3210
3211
3212
3213
3214int ixgbe_poll(struct napi_struct *napi, int budget)
3215{
3216 struct ixgbe_q_vector *q_vector =
3217 container_of(napi, struct ixgbe_q_vector, napi);
3218 struct ixgbe_adapter *adapter = q_vector->adapter;
3219 struct ixgbe_ring *ring;
3220 int per_ring_budget, work_done = 0;
3221 bool clean_complete = true;
3222
3223#ifdef CONFIG_IXGBE_DCA
3224 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3225 ixgbe_update_dca(q_vector);
3226#endif
3227
3228 ixgbe_for_each_ring(ring, q_vector->tx) {
3229 if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
3230 clean_complete = false;
3231 }
3232
3233
3234 if (budget <= 0)
3235 return budget;
3236
3237
3238
3239 if (q_vector->rx.count > 1)
3240 per_ring_budget = max(budget/q_vector->rx.count, 1);
3241 else
3242 per_ring_budget = budget;
3243
3244 ixgbe_for_each_ring(ring, q_vector->rx) {
3245 int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
3246 per_ring_budget);
3247
3248 work_done += cleaned;
3249 if (cleaned >= per_ring_budget)
3250 clean_complete = false;
3251 }
3252
3253
3254 if (!clean_complete)
3255 return budget;
3256
3257
3258 napi_complete_done(napi, work_done);
3259 if (adapter->rx_itr_setting & 1)
3260 ixgbe_set_itr(q_vector);
3261 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3262 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
3263
3264 return min(work_done, budget - 1);
3265}
3266
3267
3268
3269
3270
3271
3272
3273
3274static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3275{
3276 struct net_device *netdev = adapter->netdev;
3277 unsigned int ri = 0, ti = 0;
3278 int vector, err;
3279
3280 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3281 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3282 struct msix_entry *entry = &adapter->msix_entries[vector];
3283
3284 if (q_vector->tx.ring && q_vector->rx.ring) {
3285 snprintf(q_vector->name, sizeof(q_vector->name),
3286 "%s-TxRx-%u", netdev->name, ri++);
3287 ti++;
3288 } else if (q_vector->rx.ring) {
3289 snprintf(q_vector->name, sizeof(q_vector->name),
3290 "%s-rx-%u", netdev->name, ri++);
3291 } else if (q_vector->tx.ring) {
3292 snprintf(q_vector->name, sizeof(q_vector->name),
3293 "%s-tx-%u", netdev->name, ti++);
3294 } else {
3295
3296 continue;
3297 }
3298 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3299 q_vector->name, q_vector);
3300 if (err) {
3301 e_err(probe, "request_irq failed for MSIX interrupt "
3302 "Error: %d\n", err);
3303 goto free_queue_irqs;
3304 }
3305
3306 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3307
3308 irq_set_affinity_hint(entry->vector,
3309 &q_vector->affinity_mask);
3310 }
3311 }
3312
3313 err = request_irq(adapter->msix_entries[vector].vector,
3314 ixgbe_msix_other, 0, netdev->name, adapter);
3315 if (err) {
3316 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3317 goto free_queue_irqs;
3318 }
3319
3320 return 0;
3321
3322free_queue_irqs:
3323 while (vector) {
3324 vector--;
3325 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3326 NULL);
3327 free_irq(adapter->msix_entries[vector].vector,
3328 adapter->q_vector[vector]);
3329 }
3330 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3331 pci_disable_msix(adapter->pdev);
3332 kfree(adapter->msix_entries);
3333 adapter->msix_entries = NULL;
3334 return err;
3335}
3336
3337
3338
3339
3340
3341
3342static irqreturn_t ixgbe_intr(int irq, void *data)
3343{
3344 struct ixgbe_adapter *adapter = data;
3345 struct ixgbe_hw *hw = &adapter->hw;
3346 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3347 u32 eicr;
3348
3349
3350
3351
3352
3353 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3354
3355
3356
3357 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3358 if (!eicr) {
3359
3360
3361
3362
3363
3364
3365
3366 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3367 ixgbe_irq_enable(adapter, true, true);
3368 return IRQ_NONE;
3369 }
3370
3371 if (eicr & IXGBE_EICR_LSC)
3372 ixgbe_check_lsc(adapter);
3373
3374 switch (hw->mac.type) {
3375 case ixgbe_mac_82599EB:
3376 ixgbe_check_sfp_event(adapter, eicr);
3377
3378 case ixgbe_mac_X540:
3379 case ixgbe_mac_X550:
3380 case ixgbe_mac_X550EM_x:
3381 case ixgbe_mac_x550em_a:
3382 if (eicr & IXGBE_EICR_ECC) {
3383 e_info(link, "Received ECC Err, initiating reset\n");
3384 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3385 ixgbe_service_event_schedule(adapter);
3386 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3387 }
3388 ixgbe_check_overtemp_event(adapter, eicr);
3389 break;
3390 default:
3391 break;
3392 }
3393
3394 ixgbe_check_fan_failure(adapter, eicr);
3395 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3396 ixgbe_ptp_check_pps_event(adapter);
3397
3398
3399 napi_schedule_irqoff(&q_vector->napi);
3400
3401
3402
3403
3404
3405 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3406 ixgbe_irq_enable(adapter, false, false);
3407
3408 return IRQ_HANDLED;
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3419{
3420 struct net_device *netdev = adapter->netdev;
3421 int err;
3422
3423 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3424 err = ixgbe_request_msix_irqs(adapter);
3425 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3426 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3427 netdev->name, adapter);
3428 else
3429 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3430 netdev->name, adapter);
3431
3432 if (err)
3433 e_err(probe, "request_irq failed, Error %d\n", err);
3434
3435 return err;
3436}
3437
3438static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3439{
3440 int vector;
3441
3442 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3443 free_irq(adapter->pdev->irq, adapter);
3444 return;
3445 }
3446
3447 if (!adapter->msix_entries)
3448 return;
3449
3450 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3451 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3452 struct msix_entry *entry = &adapter->msix_entries[vector];
3453
3454
3455 if (!q_vector->rx.ring && !q_vector->tx.ring)
3456 continue;
3457
3458
3459 irq_set_affinity_hint(entry->vector, NULL);
3460
3461 free_irq(entry->vector, q_vector);
3462 }
3463
3464 free_irq(adapter->msix_entries[vector].vector, adapter);
3465}
3466
3467
3468
3469
3470
3471static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3472{
3473 switch (adapter->hw.mac.type) {
3474 case ixgbe_mac_82598EB:
3475 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3476 break;
3477 case ixgbe_mac_82599EB:
3478 case ixgbe_mac_X540:
3479 case ixgbe_mac_X550:
3480 case ixgbe_mac_X550EM_x:
3481 case ixgbe_mac_x550em_a:
3482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3483 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3484 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3485 break;
3486 default:
3487 break;
3488 }
3489 IXGBE_WRITE_FLUSH(&adapter->hw);
3490 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3491 int vector;
3492
3493 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3494 synchronize_irq(adapter->msix_entries[vector].vector);
3495
3496 synchronize_irq(adapter->msix_entries[vector++].vector);
3497 } else {
3498 synchronize_irq(adapter->pdev->irq);
3499 }
3500}
3501
3502
3503
3504
3505
3506
3507static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3508{
3509 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3510
3511 ixgbe_write_eitr(q_vector);
3512
3513 ixgbe_set_ivar(adapter, 0, 0, 0);
3514 ixgbe_set_ivar(adapter, 1, 0, 0);
3515
3516 e_info(hw, "Legacy interrupt IVAR setup done\n");
3517}
3518
3519
3520
3521
3522
3523
3524
3525
3526void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3527 struct ixgbe_ring *ring)
3528{
3529 struct ixgbe_hw *hw = &adapter->hw;
3530 u64 tdba = ring->dma;
3531 int wait_loop = 10;
3532 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3533 u8 reg_idx = ring->reg_idx;
3534
3535
3536 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3537 IXGBE_WRITE_FLUSH(hw);
3538
3539 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3540 (tdba & DMA_BIT_MASK(32)));
3541 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3542 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3543 ring->count * sizeof(union ixgbe_adv_tx_desc));
3544 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3545 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3546 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3559 txdctl |= 1u << 16;
3560 else
3561 txdctl |= 8u << 16;
3562
3563
3564
3565
3566
3567 txdctl |= (1u << 8) |
3568 32;
3569
3570
3571 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3572 ring->atr_sample_rate = adapter->atr_sample_rate;
3573 ring->atr_count = 0;
3574 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3575 } else {
3576 ring->atr_sample_rate = 0;
3577 }
3578
3579
3580 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3581 struct ixgbe_q_vector *q_vector = ring->q_vector;
3582
3583 if (q_vector)
3584 netif_set_xps_queue(ring->netdev,
3585 &q_vector->affinity_mask,
3586 ring->queue_index);
3587 }
3588
3589 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3590
3591
3592 memset(ring->tx_buffer_info, 0,
3593 sizeof(struct ixgbe_tx_buffer) * ring->count);
3594
3595
3596 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3597
3598
3599 if (hw->mac.type == ixgbe_mac_82598EB &&
3600 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3601 return;
3602
3603
3604 do {
3605 usleep_range(1000, 2000);
3606 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3607 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3608 if (!wait_loop)
3609 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3610}
3611
3612static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3613{
3614 struct ixgbe_hw *hw = &adapter->hw;
3615 u32 rttdcs, mtqc;
3616 u8 tcs = adapter->hw_tcs;
3617
3618 if (hw->mac.type == ixgbe_mac_82598EB)
3619 return;
3620
3621
3622 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3623 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3624 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3625
3626
3627 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3628 mtqc = IXGBE_MTQC_VT_ENA;
3629 if (tcs > 4)
3630 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3631 else if (tcs > 1)
3632 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3633 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3634 IXGBE_82599_VMDQ_4Q_MASK)
3635 mtqc |= IXGBE_MTQC_32VF;
3636 else
3637 mtqc |= IXGBE_MTQC_64VF;
3638 } else {
3639 if (tcs > 4)
3640 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3641 else if (tcs > 1)
3642 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3643 else
3644 mtqc = IXGBE_MTQC_64Q_1PB;
3645 }
3646
3647 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3648
3649
3650 if (tcs) {
3651 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3652 sectx |= IXGBE_SECTX_DCB;
3653 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3654 }
3655
3656
3657 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3658 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3659}
3660
3661
3662
3663
3664
3665
3666
3667static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3668{
3669 struct ixgbe_hw *hw = &adapter->hw;
3670 u32 dmatxctl;
3671 u32 i;
3672
3673 ixgbe_setup_mtqc(adapter);
3674
3675 if (hw->mac.type != ixgbe_mac_82598EB) {
3676
3677 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3678 dmatxctl |= IXGBE_DMATXCTL_TE;
3679 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3680 }
3681
3682
3683 for (i = 0; i < adapter->num_tx_queues; i++)
3684 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3685 for (i = 0; i < adapter->num_xdp_queues; i++)
3686 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3687}
3688
3689static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3690 struct ixgbe_ring *ring)
3691{
3692 struct ixgbe_hw *hw = &adapter->hw;
3693 u8 reg_idx = ring->reg_idx;
3694 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3695
3696 srrctl |= IXGBE_SRRCTL_DROP_EN;
3697
3698 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3699}
3700
3701static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3702 struct ixgbe_ring *ring)
3703{
3704 struct ixgbe_hw *hw = &adapter->hw;
3705 u8 reg_idx = ring->reg_idx;
3706 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3707
3708 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3709
3710 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3711}
3712
3713#ifdef CONFIG_IXGBE_DCB
3714void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3715#else
3716static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3717#endif
3718{
3719 int i;
3720 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3721
3722 if (adapter->ixgbe_ieee_pfc)
3723 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3735 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3736 for (i = 0; i < adapter->num_rx_queues; i++)
3737 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3738 } else {
3739 for (i = 0; i < adapter->num_rx_queues; i++)
3740 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3741 }
3742}
3743
3744#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3745
3746static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3747 struct ixgbe_ring *rx_ring)
3748{
3749 struct ixgbe_hw *hw = &adapter->hw;
3750 u32 srrctl;
3751 u8 reg_idx = rx_ring->reg_idx;
3752
3753 if (hw->mac.type == ixgbe_mac_82598EB) {
3754 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3755
3756
3757
3758
3759
3760 reg_idx &= mask;
3761 }
3762
3763
3764 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3765
3766
3767 if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
3768 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3769 else
3770 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3771
3772
3773 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3774
3775 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3776}
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3787{
3788 if (adapter->hw.mac.type < ixgbe_mac_X550)
3789 return 128;
3790 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3791 return 64;
3792 else
3793 return 512;
3794}
3795
3796
3797
3798
3799
3800
3801
3802void ixgbe_store_key(struct ixgbe_adapter *adapter)
3803{
3804 struct ixgbe_hw *hw = &adapter->hw;
3805 int i;
3806
3807 for (i = 0; i < 10; i++)
3808 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3809}
3810
3811
3812
3813
3814
3815
3816
3817static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3818{
3819 u32 *rss_key;
3820
3821 if (!adapter->rss_key) {
3822 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3823 if (unlikely(!rss_key))
3824 return -ENOMEM;
3825
3826 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3827 adapter->rss_key = rss_key;
3828 }
3829
3830 return 0;
3831}
3832
3833
3834
3835
3836
3837
3838
3839void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3840{
3841 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3842 struct ixgbe_hw *hw = &adapter->hw;
3843 u32 reta = 0;
3844 u32 indices_multi;
3845 u8 *indir_tbl = adapter->rss_indir_tbl;
3846
3847
3848
3849
3850
3851
3852
3853 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3854 indices_multi = 0x11;
3855 else
3856 indices_multi = 0x1;
3857
3858
3859 for (i = 0; i < reta_entries; i++) {
3860 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3861 if ((i & 3) == 3) {
3862 if (i < 128)
3863 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3864 else
3865 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3866 reta);
3867 reta = 0;
3868 }
3869 }
3870}
3871
3872
3873
3874
3875
3876
3877
3878static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3879{
3880 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3881 struct ixgbe_hw *hw = &adapter->hw;
3882 u32 vfreta = 0;
3883
3884
3885 for (i = 0; i < reta_entries; i++) {
3886 u16 pool = adapter->num_rx_pools;
3887
3888 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3889 if ((i & 3) != 3)
3890 continue;
3891
3892 while (pool--)
3893 IXGBE_WRITE_REG(hw,
3894 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3895 vfreta);
3896 vfreta = 0;
3897 }
3898}
3899
3900static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3901{
3902 u32 i, j;
3903 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3904 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3905
3906
3907
3908
3909
3910 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3911 rss_i = 4;
3912
3913
3914 ixgbe_store_key(adapter);
3915
3916
3917 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3918
3919 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3920 if (j == rss_i)
3921 j = 0;
3922
3923 adapter->rss_indir_tbl[i] = j;
3924 }
3925
3926 ixgbe_store_reta(adapter);
3927}
3928
3929static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3930{
3931 struct ixgbe_hw *hw = &adapter->hw;
3932 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3933 int i, j;
3934
3935
3936 for (i = 0; i < 10; i++) {
3937 u16 pool = adapter->num_rx_pools;
3938
3939 while (pool--)
3940 IXGBE_WRITE_REG(hw,
3941 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3942 *(adapter->rss_key + i));
3943 }
3944
3945
3946 for (i = 0, j = 0; i < 64; i++, j++) {
3947 if (j == rss_i)
3948 j = 0;
3949
3950 adapter->rss_indir_tbl[i] = j;
3951 }
3952
3953 ixgbe_store_vfreta(adapter);
3954}
3955
3956static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3957{
3958 struct ixgbe_hw *hw = &adapter->hw;
3959 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3960 u32 rxcsum;
3961
3962
3963 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3964 rxcsum |= IXGBE_RXCSUM_PCSD;
3965 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3966
3967 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3968 if (adapter->ring_feature[RING_F_RSS].mask)
3969 mrqc = IXGBE_MRQC_RSSEN;
3970 } else {
3971 u8 tcs = adapter->hw_tcs;
3972
3973 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3974 if (tcs > 4)
3975 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3976 else if (tcs > 1)
3977 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3978 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3979 IXGBE_82599_VMDQ_4Q_MASK)
3980 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3981 else
3982 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3983
3984
3985 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3986 } else {
3987 if (tcs > 4)
3988 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3989 else if (tcs > 1)
3990 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3991 else
3992 mrqc = IXGBE_MRQC_RSSEN;
3993 }
3994 }
3995
3996
3997 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3998 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3999 IXGBE_MRQC_RSS_FIELD_IPV6 |
4000 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
4001
4002 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
4003 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
4004 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
4005 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
4006
4007 if ((hw->mac.type >= ixgbe_mac_X550) &&
4008 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
4009 u16 pool = adapter->num_rx_pools;
4010
4011
4012 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
4013 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4014
4015
4016 ixgbe_setup_vfreta(adapter);
4017 vfmrqc = IXGBE_MRQC_RSSEN;
4018 vfmrqc |= rss_field;
4019
4020 while (pool--)
4021 IXGBE_WRITE_REG(hw,
4022 IXGBE_PFVFMRQC(VMDQ_P(pool)),
4023 vfmrqc);
4024 } else {
4025 ixgbe_setup_reta(adapter);
4026 mrqc |= rss_field;
4027 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4028 }
4029}
4030
4031
4032
4033
4034
4035
4036static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4037 struct ixgbe_ring *ring)
4038{
4039 struct ixgbe_hw *hw = &adapter->hw;
4040 u32 rscctrl;
4041 u8 reg_idx = ring->reg_idx;
4042
4043 if (!ring_is_rsc_enabled(ring))
4044 return;
4045
4046 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4047 rscctrl |= IXGBE_RSCCTL_RSCEN;
4048
4049
4050
4051
4052
4053 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4054 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4055}
4056
4057#define IXGBE_MAX_RX_DESC_POLL 10
4058static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4059 struct ixgbe_ring *ring)
4060{
4061 struct ixgbe_hw *hw = &adapter->hw;
4062 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4063 u32 rxdctl;
4064 u8 reg_idx = ring->reg_idx;
4065
4066 if (ixgbe_removed(hw->hw_addr))
4067 return;
4068
4069 if (hw->mac.type == ixgbe_mac_82598EB &&
4070 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4071 return;
4072
4073 do {
4074 usleep_range(1000, 2000);
4075 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4076 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4077
4078 if (!wait_loop) {
4079 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4080 "the polling period\n", reg_idx);
4081 }
4082}
4083
4084void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
4085 struct ixgbe_ring *ring)
4086{
4087 struct ixgbe_hw *hw = &adapter->hw;
4088 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4089 u32 rxdctl;
4090 u8 reg_idx = ring->reg_idx;
4091
4092 if (ixgbe_removed(hw->hw_addr))
4093 return;
4094 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4095 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4096
4097
4098 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4099
4100 if (hw->mac.type == ixgbe_mac_82598EB &&
4101 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4102 return;
4103
4104
4105 do {
4106 udelay(10);
4107 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4108 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
4109
4110 if (!wait_loop) {
4111 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
4112 "the polling period\n", reg_idx);
4113 }
4114}
4115
4116void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4117 struct ixgbe_ring *ring)
4118{
4119 struct ixgbe_hw *hw = &adapter->hw;
4120 union ixgbe_adv_rx_desc *rx_desc;
4121 u64 rdba = ring->dma;
4122 u32 rxdctl;
4123 u8 reg_idx = ring->reg_idx;
4124
4125
4126 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4127 ixgbe_disable_rx_queue(adapter, ring);
4128
4129 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4130 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4131 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4132 ring->count * sizeof(union ixgbe_adv_rx_desc));
4133
4134 IXGBE_WRITE_FLUSH(hw);
4135
4136 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4137 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4138 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4139
4140 ixgbe_configure_srrctl(adapter, ring);
4141 ixgbe_configure_rscctl(adapter, ring);
4142
4143 if (hw->mac.type == ixgbe_mac_82598EB) {
4144
4145
4146
4147
4148
4149
4150
4151 rxdctl &= ~0x3FFFFF;
4152 rxdctl |= 0x080420;
4153#if (PAGE_SIZE < 8192)
4154
4155 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4156 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4157 IXGBE_RXDCTL_RLPML_EN);
4158
4159
4160
4161
4162
4163 if (ring_uses_build_skb(ring) &&
4164 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4165 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4166 IXGBE_RXDCTL_RLPML_EN;
4167#endif
4168 }
4169
4170
4171 memset(ring->rx_buffer_info, 0,
4172 sizeof(struct ixgbe_rx_buffer) * ring->count);
4173
4174
4175 rx_desc = IXGBE_RX_DESC(ring, 0);
4176 rx_desc->wb.upper.length = 0;
4177
4178
4179 rxdctl |= IXGBE_RXDCTL_ENABLE;
4180 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4181
4182 ixgbe_rx_desc_queue_enable(adapter, ring);
4183 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4184}
4185
4186static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4187{
4188 struct ixgbe_hw *hw = &adapter->hw;
4189 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4190 u16 pool = adapter->num_rx_pools;
4191
4192
4193 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4194 IXGBE_PSRTYPE_UDPHDR |
4195 IXGBE_PSRTYPE_IPV4HDR |
4196 IXGBE_PSRTYPE_L2HDR |
4197 IXGBE_PSRTYPE_IPV6HDR;
4198
4199 if (hw->mac.type == ixgbe_mac_82598EB)
4200 return;
4201
4202 if (rss_i > 3)
4203 psrtype |= 2u << 29;
4204 else if (rss_i > 1)
4205 psrtype |= 1u << 29;
4206
4207 while (pool--)
4208 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4209}
4210
4211static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4212{
4213 struct ixgbe_hw *hw = &adapter->hw;
4214 u32 reg_offset, vf_shift;
4215 u32 gcr_ext, vmdctl;
4216 int i;
4217
4218 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4219 return;
4220
4221 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4222 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4223 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4224 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4225 vmdctl |= IXGBE_VT_CTL_REPLEN;
4226 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4227
4228 vf_shift = VMDQ_P(0) % 32;
4229 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4230
4231
4232 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4233 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4234 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4235 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4236 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4237 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4238
4239
4240 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4241
4242
4243 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4244
4245
4246
4247
4248
4249 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4250 case IXGBE_82599_VMDQ_8Q_MASK:
4251 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4252 break;
4253 case IXGBE_82599_VMDQ_4Q_MASK:
4254 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4255 break;
4256 default:
4257 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4258 break;
4259 }
4260
4261 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4262
4263 for (i = 0; i < adapter->num_vfs; i++) {
4264
4265 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4266 adapter->vfinfo[i].spoofchk_enabled);
4267
4268
4269 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4270 adapter->vfinfo[i].rss_query_enabled);
4271 }
4272}
4273
4274static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4275{
4276 struct ixgbe_hw *hw = &adapter->hw;
4277 struct net_device *netdev = adapter->netdev;
4278 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4279 struct ixgbe_ring *rx_ring;
4280 int i;
4281 u32 mhadd, hlreg0;
4282
4283#ifdef IXGBE_FCOE
4284
4285 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4286 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4287 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4288
4289#endif
4290
4291
4292 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4293 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4294
4295 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4296 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4297 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4298 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4299
4300 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4301 }
4302
4303 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4304
4305 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4306 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4307
4308
4309
4310
4311
4312 for (i = 0; i < adapter->num_rx_queues; i++) {
4313 rx_ring = adapter->rx_ring[i];
4314
4315 clear_ring_rsc_enabled(rx_ring);
4316 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4317 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4318
4319 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4320 set_ring_rsc_enabled(rx_ring);
4321
4322 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4323 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4324
4325 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4326 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4327 continue;
4328
4329 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4330
4331#if (PAGE_SIZE < 8192)
4332 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4333 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4334
4335 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4336 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4337 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4338#endif
4339 }
4340}
4341
4342static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4343{
4344 struct ixgbe_hw *hw = &adapter->hw;
4345 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4346
4347 switch (hw->mac.type) {
4348 case ixgbe_mac_82598EB:
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4360 break;
4361 case ixgbe_mac_X550:
4362 case ixgbe_mac_X550EM_x:
4363 case ixgbe_mac_x550em_a:
4364 if (adapter->num_vfs)
4365 rdrxctl |= IXGBE_RDRXCTL_PSP;
4366
4367 case ixgbe_mac_82599EB:
4368 case ixgbe_mac_X540:
4369
4370 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4371 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4372 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4373
4374 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4375 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4376 break;
4377 default:
4378
4379 return;
4380 }
4381
4382 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4383}
4384
4385
4386
4387
4388
4389
4390
4391static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4392{
4393 struct ixgbe_hw *hw = &adapter->hw;
4394 int i;
4395 u32 rxctrl, rfctl;
4396
4397
4398 hw->mac.ops.disable_rx(hw);
4399
4400 ixgbe_setup_psrtype(adapter);
4401 ixgbe_setup_rdrxctl(adapter);
4402
4403
4404 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4405 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4406 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4407 rfctl |= IXGBE_RFCTL_RSC_DIS;
4408
4409
4410 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4411 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4412
4413
4414 ixgbe_setup_mrqc(adapter);
4415
4416
4417 ixgbe_set_rx_buffer_len(adapter);
4418
4419
4420
4421
4422
4423 for (i = 0; i < adapter->num_rx_queues; i++)
4424 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4425
4426 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4427
4428 if (hw->mac.type == ixgbe_mac_82598EB)
4429 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4430
4431
4432 rxctrl |= IXGBE_RXCTRL_RXEN;
4433 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4434}
4435
4436static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4437 __be16 proto, u16 vid)
4438{
4439 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4440 struct ixgbe_hw *hw = &adapter->hw;
4441
4442
4443 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4444 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4445
4446 set_bit(vid, adapter->active_vlans);
4447
4448 return 0;
4449}
4450
4451static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4452{
4453 u32 vlvf;
4454 int idx;
4455
4456
4457 if (vlan == 0)
4458 return 0;
4459
4460
4461 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4462 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4463 if ((vlvf & VLAN_VID_MASK) == vlan)
4464 break;
4465 }
4466
4467 return idx;
4468}
4469
4470void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4471{
4472 struct ixgbe_hw *hw = &adapter->hw;
4473 u32 bits, word;
4474 int idx;
4475
4476 idx = ixgbe_find_vlvf_entry(hw, vid);
4477 if (!idx)
4478 return;
4479
4480
4481
4482
4483 word = idx * 2 + (VMDQ_P(0) / 32);
4484 bits = ~BIT(VMDQ_P(0) % 32);
4485 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4486
4487
4488 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4489 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4490 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4491 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4492 }
4493}
4494
4495static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4496 __be16 proto, u16 vid)
4497{
4498 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4499 struct ixgbe_hw *hw = &adapter->hw;
4500
4501
4502 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4503 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4504
4505 clear_bit(vid, adapter->active_vlans);
4506
4507 return 0;
4508}
4509
4510
4511
4512
4513
4514static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4515{
4516 struct ixgbe_hw *hw = &adapter->hw;
4517 u32 vlnctrl;
4518 int i, j;
4519
4520 switch (hw->mac.type) {
4521 case ixgbe_mac_82598EB:
4522 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4523 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4524 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4525 break;
4526 case ixgbe_mac_82599EB:
4527 case ixgbe_mac_X540:
4528 case ixgbe_mac_X550:
4529 case ixgbe_mac_X550EM_x:
4530 case ixgbe_mac_x550em_a:
4531 for (i = 0; i < adapter->num_rx_queues; i++) {
4532 struct ixgbe_ring *ring = adapter->rx_ring[i];
4533
4534 if (!netif_is_ixgbe(ring->netdev))
4535 continue;
4536
4537 j = ring->reg_idx;
4538 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4539 vlnctrl &= ~IXGBE_RXDCTL_VME;
4540 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4541 }
4542 break;
4543 default:
4544 break;
4545 }
4546}
4547
4548
4549
4550
4551
4552static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4553{
4554 struct ixgbe_hw *hw = &adapter->hw;
4555 u32 vlnctrl;
4556 int i, j;
4557
4558 switch (hw->mac.type) {
4559 case ixgbe_mac_82598EB:
4560 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4561 vlnctrl |= IXGBE_VLNCTRL_VME;
4562 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4563 break;
4564 case ixgbe_mac_82599EB:
4565 case ixgbe_mac_X540:
4566 case ixgbe_mac_X550:
4567 case ixgbe_mac_X550EM_x:
4568 case ixgbe_mac_x550em_a:
4569 for (i = 0; i < adapter->num_rx_queues; i++) {
4570 struct ixgbe_ring *ring = adapter->rx_ring[i];
4571
4572 if (!netif_is_ixgbe(ring->netdev))
4573 continue;
4574
4575 j = ring->reg_idx;
4576 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4577 vlnctrl |= IXGBE_RXDCTL_VME;
4578 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4579 }
4580 break;
4581 default:
4582 break;
4583 }
4584}
4585
4586static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4587{
4588 struct ixgbe_hw *hw = &adapter->hw;
4589 u32 vlnctrl, i;
4590
4591 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4592
4593 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4594
4595 vlnctrl |= IXGBE_VLNCTRL_VFE;
4596 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4597 } else {
4598 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4599 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4600 return;
4601 }
4602
4603
4604 if (hw->mac.type == ixgbe_mac_82598EB)
4605 return;
4606
4607
4608 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4609 return;
4610
4611
4612 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4613
4614
4615 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4616 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4617 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4618
4619 vlvfb |= BIT(VMDQ_P(0) % 32);
4620 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4621 }
4622
4623
4624 for (i = hw->mac.vft_size; i--;)
4625 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4626}
4627
4628#define VFTA_BLOCK_SIZE 8
4629static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4630{
4631 struct ixgbe_hw *hw = &adapter->hw;
4632 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4633 u32 vid_start = vfta_offset * 32;
4634 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4635 u32 i, vid, word, bits;
4636
4637 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4638 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4639
4640
4641 vid = vlvf & VLAN_VID_MASK;
4642
4643
4644 if (vid < vid_start || vid >= vid_end)
4645 continue;
4646
4647 if (vlvf) {
4648
4649 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4650
4651
4652 if (test_bit(vid, adapter->active_vlans))
4653 continue;
4654 }
4655
4656
4657 word = i * 2 + VMDQ_P(0) / 32;
4658 bits = ~BIT(VMDQ_P(0) % 32);
4659 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4660 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4661 }
4662
4663
4664 for (i = VFTA_BLOCK_SIZE; i--;) {
4665 vid = (vfta_offset + i) * 32;
4666 word = vid / BITS_PER_LONG;
4667 bits = vid % BITS_PER_LONG;
4668
4669 vfta[i] |= adapter->active_vlans[word] >> bits;
4670
4671 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4672 }
4673}
4674
4675static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4676{
4677 struct ixgbe_hw *hw = &adapter->hw;
4678 u32 vlnctrl, i;
4679
4680
4681 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4682 vlnctrl |= IXGBE_VLNCTRL_VFE;
4683 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4684
4685 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4686 hw->mac.type == ixgbe_mac_82598EB)
4687 return;
4688
4689
4690 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4691 return;
4692
4693
4694 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4695
4696 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4697 ixgbe_scrub_vfta(adapter, i);
4698}
4699
4700static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4701{
4702 u16 vid = 1;
4703
4704 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4705
4706 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4707 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4708}
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4720{
4721 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4722 struct ixgbe_hw *hw = &adapter->hw;
4723
4724 if (!netif_running(netdev))
4725 return 0;
4726
4727 if (hw->mac.ops.update_mc_addr_list)
4728 hw->mac.ops.update_mc_addr_list(hw, netdev);
4729 else
4730 return -ENOMEM;
4731
4732#ifdef CONFIG_PCI_IOV
4733 ixgbe_restore_vf_multicasts(adapter);
4734#endif
4735
4736 return netdev_mc_count(netdev);
4737}
4738
4739#ifdef CONFIG_PCI_IOV
4740void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4741{
4742 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4743 struct ixgbe_hw *hw = &adapter->hw;
4744 int i;
4745
4746 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4747 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4748
4749 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4750 hw->mac.ops.set_rar(hw, i,
4751 mac_table->addr,
4752 mac_table->pool,
4753 IXGBE_RAH_AV);
4754 else
4755 hw->mac.ops.clear_rar(hw, i);
4756 }
4757}
4758
4759#endif
4760static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4761{
4762 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4763 struct ixgbe_hw *hw = &adapter->hw;
4764 int i;
4765
4766 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4767 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4768 continue;
4769
4770 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4771
4772 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4773 hw->mac.ops.set_rar(hw, i,
4774 mac_table->addr,
4775 mac_table->pool,
4776 IXGBE_RAH_AV);
4777 else
4778 hw->mac.ops.clear_rar(hw, i);
4779 }
4780}
4781
4782static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4783{
4784 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4785 struct ixgbe_hw *hw = &adapter->hw;
4786 int i;
4787
4788 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4789 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4790 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4791 }
4792
4793 ixgbe_sync_mac_table(adapter);
4794}
4795
4796static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4797{
4798 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4799 struct ixgbe_hw *hw = &adapter->hw;
4800 int i, count = 0;
4801
4802 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4803
4804 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4805 continue;
4806
4807
4808 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4809 if (mac_table->pool != pool)
4810 continue;
4811 }
4812
4813 count++;
4814 }
4815
4816 return count;
4817}
4818
4819
4820static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4821{
4822 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4823 struct ixgbe_hw *hw = &adapter->hw;
4824
4825 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4826 mac_table->pool = VMDQ_P(0);
4827
4828 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4829
4830 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4831 IXGBE_RAH_AV);
4832}
4833
4834int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4835 const u8 *addr, u16 pool)
4836{
4837 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4838 struct ixgbe_hw *hw = &adapter->hw;
4839 int i;
4840
4841 if (is_zero_ether_addr(addr))
4842 return -EINVAL;
4843
4844 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4845 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4846 continue;
4847
4848 ether_addr_copy(mac_table->addr, addr);
4849 mac_table->pool = pool;
4850
4851 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4852 IXGBE_MAC_STATE_IN_USE;
4853
4854 ixgbe_sync_mac_table(adapter);
4855
4856 return i;
4857 }
4858
4859 return -ENOMEM;
4860}
4861
4862int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4863 const u8 *addr, u16 pool)
4864{
4865 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4866 struct ixgbe_hw *hw = &adapter->hw;
4867 int i;
4868
4869 if (is_zero_ether_addr(addr))
4870 return -EINVAL;
4871
4872
4873 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4874
4875 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4876 continue;
4877
4878 if (mac_table->pool != pool)
4879 continue;
4880
4881 if (!ether_addr_equal(addr, mac_table->addr))
4882 continue;
4883
4884 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4885 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4886
4887 ixgbe_sync_mac_table(adapter);
4888
4889 return 0;
4890 }
4891
4892 return -ENOMEM;
4893}
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4906{
4907 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4908 int count = 0;
4909
4910
4911 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
4912 return -ENOMEM;
4913
4914 if (!netdev_uc_empty(netdev)) {
4915 struct netdev_hw_addr *ha;
4916 netdev_for_each_uc_addr(ha, netdev) {
4917 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4918 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4919 count++;
4920 }
4921 }
4922 return count;
4923}
4924
4925static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4926{
4927 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4928 int ret;
4929
4930 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4931
4932 return min_t(int, ret, 0);
4933}
4934
4935static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4936{
4937 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4938
4939 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4940
4941 return 0;
4942}
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953void ixgbe_set_rx_mode(struct net_device *netdev)
4954{
4955 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4956 struct ixgbe_hw *hw = &adapter->hw;
4957 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4958 netdev_features_t features = netdev->features;
4959 int count;
4960
4961
4962 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4963
4964
4965 fctrl &= ~IXGBE_FCTRL_SBP;
4966 fctrl |= IXGBE_FCTRL_BAM;
4967 fctrl |= IXGBE_FCTRL_DPF;
4968 fctrl |= IXGBE_FCTRL_PMCF;
4969
4970
4971 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4972 if (netdev->flags & IFF_PROMISC) {
4973 hw->addr_ctrl.user_set_promisc = true;
4974 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4975 vmolr |= IXGBE_VMOLR_MPE;
4976 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4977 } else {
4978 if (netdev->flags & IFF_ALLMULTI) {
4979 fctrl |= IXGBE_FCTRL_MPE;
4980 vmolr |= IXGBE_VMOLR_MPE;
4981 }
4982 hw->addr_ctrl.user_set_promisc = false;
4983 }
4984
4985
4986
4987
4988
4989
4990 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4991 fctrl |= IXGBE_FCTRL_UPE;
4992 vmolr |= IXGBE_VMOLR_ROPE;
4993 }
4994
4995
4996
4997
4998
4999 count = ixgbe_write_mc_addr_list(netdev);
5000 if (count < 0) {
5001 fctrl |= IXGBE_FCTRL_MPE;
5002 vmolr |= IXGBE_VMOLR_MPE;
5003 } else if (count) {
5004 vmolr |= IXGBE_VMOLR_ROMPE;
5005 }
5006
5007 if (hw->mac.type != ixgbe_mac_82598EB) {
5008 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
5009 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
5010 IXGBE_VMOLR_ROPE);
5011 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
5012 }
5013
5014
5015 if (features & NETIF_F_RXALL) {
5016
5017
5018 fctrl |= (IXGBE_FCTRL_SBP |
5019 IXGBE_FCTRL_BAM |
5020 IXGBE_FCTRL_PMCF);
5021
5022 fctrl &= ~(IXGBE_FCTRL_DPF);
5023
5024 }
5025
5026 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5027
5028 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5029 ixgbe_vlan_strip_enable(adapter);
5030 else
5031 ixgbe_vlan_strip_disable(adapter);
5032
5033 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
5034 ixgbe_vlan_promisc_disable(adapter);
5035 else
5036 ixgbe_vlan_promisc_enable(adapter);
5037}
5038
5039static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
5040{
5041 int q_idx;
5042
5043 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5044 napi_enable(&adapter->q_vector[q_idx]->napi);
5045}
5046
5047static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
5048{
5049 int q_idx;
5050
5051 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5052 napi_disable(&adapter->q_vector[q_idx]->napi);
5053}
5054
5055static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
5056{
5057 struct ixgbe_hw *hw = &adapter->hw;
5058 u32 vxlanctrl;
5059
5060 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
5061 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
5062 return;
5063
5064 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
5065 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
5066
5067 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
5068 adapter->vxlan_port = 0;
5069
5070 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
5071 adapter->geneve_port = 0;
5072}
5073
5074#ifdef CONFIG_IXGBE_DCB
5075
5076
5077
5078
5079
5080
5081
5082
5083static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5084{
5085 struct ixgbe_hw *hw = &adapter->hw;
5086 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5087
5088 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5089 if (hw->mac.type == ixgbe_mac_82598EB)
5090 netif_set_gso_max_size(adapter->netdev, 65536);
5091 return;
5092 }
5093
5094 if (hw->mac.type == ixgbe_mac_82598EB)
5095 netif_set_gso_max_size(adapter->netdev, 32768);
5096
5097#ifdef IXGBE_FCOE
5098 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5099 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5100#endif
5101
5102
5103 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5104 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5105 DCB_TX_CONFIG);
5106 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5107 DCB_RX_CONFIG);
5108 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5109 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5110 ixgbe_dcb_hw_ets(&adapter->hw,
5111 adapter->ixgbe_ieee_ets,
5112 max_frame);
5113 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5114 adapter->ixgbe_ieee_pfc->pfc_en,
5115 adapter->ixgbe_ieee_ets->prio_tc);
5116 }
5117
5118
5119 if (hw->mac.type != ixgbe_mac_82598EB) {
5120 u32 msb = 0;
5121 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5122
5123 while (rss_i) {
5124 msb++;
5125 rss_i >>= 1;
5126 }
5127
5128
5129 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5130 }
5131}
5132#endif
5133
5134
5135#define IXGBE_ETH_FRAMING 20
5136
5137
5138
5139
5140
5141
5142
5143static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5144{
5145 struct ixgbe_hw *hw = &adapter->hw;
5146 struct net_device *dev = adapter->netdev;
5147 int link, tc, kb, marker;
5148 u32 dv_id, rx_pba;
5149
5150
5151 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5152
5153#ifdef IXGBE_FCOE
5154
5155 if ((dev->features & NETIF_F_FCOE_MTU) &&
5156 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5157 (pb == ixgbe_fcoe_get_tc(adapter)))
5158 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5159#endif
5160
5161
5162 switch (hw->mac.type) {
5163 case ixgbe_mac_X540:
5164 case ixgbe_mac_X550:
5165 case ixgbe_mac_X550EM_x:
5166 case ixgbe_mac_x550em_a:
5167 dv_id = IXGBE_DV_X540(link, tc);
5168 break;
5169 default:
5170 dv_id = IXGBE_DV(link, tc);
5171 break;
5172 }
5173
5174
5175 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5176 dv_id += IXGBE_B2BT(tc);
5177
5178
5179 kb = IXGBE_BT2KB(dv_id);
5180 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5181
5182 marker = rx_pba - kb;
5183
5184
5185
5186
5187
5188 if (marker < 0) {
5189 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5190 "headroom to support flow control."
5191 "Decrease MTU or number of traffic classes\n", pb);
5192 marker = tc + 1;
5193 }
5194
5195 return marker;
5196}
5197
5198
5199
5200
5201
5202
5203
5204static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5205{
5206 struct ixgbe_hw *hw = &adapter->hw;
5207 struct net_device *dev = adapter->netdev;
5208 int tc;
5209 u32 dv_id;
5210
5211
5212 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5213
5214#ifdef IXGBE_FCOE
5215
5216 if ((dev->features & NETIF_F_FCOE_MTU) &&
5217 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5218 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5219 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5220#endif
5221
5222
5223 switch (hw->mac.type) {
5224 case ixgbe_mac_X540:
5225 case ixgbe_mac_X550:
5226 case ixgbe_mac_X550EM_x:
5227 case ixgbe_mac_x550em_a:
5228 dv_id = IXGBE_LOW_DV_X540(tc);
5229 break;
5230 default:
5231 dv_id = IXGBE_LOW_DV(tc);
5232 break;
5233 }
5234
5235
5236 return IXGBE_BT2KB(dv_id);
5237}
5238
5239
5240
5241
5242static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5243{
5244 struct ixgbe_hw *hw = &adapter->hw;
5245 int num_tc = adapter->hw_tcs;
5246 int i;
5247
5248 if (!num_tc)
5249 num_tc = 1;
5250
5251 for (i = 0; i < num_tc; i++) {
5252 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5253 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5254
5255
5256 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5257 hw->fc.low_water[i] = 0;
5258 }
5259
5260 for (; i < MAX_TRAFFIC_CLASS; i++)
5261 hw->fc.high_water[i] = 0;
5262}
5263
5264static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5265{
5266 struct ixgbe_hw *hw = &adapter->hw;
5267 int hdrm;
5268 u8 tc = adapter->hw_tcs;
5269
5270 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5271 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5272 hdrm = 32 << adapter->fdir_pballoc;
5273 else
5274 hdrm = 0;
5275
5276 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5277 ixgbe_pbthresh_setup(adapter);
5278}
5279
5280static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5281{
5282 struct ixgbe_hw *hw = &adapter->hw;
5283 struct hlist_node *node2;
5284 struct ixgbe_fdir_filter *filter;
5285
5286 spin_lock(&adapter->fdir_perfect_lock);
5287
5288 if (!hlist_empty(&adapter->fdir_filter_list))
5289 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5290
5291 hlist_for_each_entry_safe(filter, node2,
5292 &adapter->fdir_filter_list, fdir_node) {
5293 ixgbe_fdir_write_perfect_filter_82599(hw,
5294 &filter->filter,
5295 filter->sw_idx,
5296 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
5297 IXGBE_FDIR_DROP_QUEUE :
5298 adapter->rx_ring[filter->action]->reg_idx);
5299 }
5300
5301 spin_unlock(&adapter->fdir_perfect_lock);
5302}
5303
5304static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
5305 struct ixgbe_adapter *adapter)
5306{
5307 struct ixgbe_hw *hw = &adapter->hw;
5308 u32 vmolr;
5309
5310
5311 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
5312 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
5313
5314
5315 vmolr &= ~IXGBE_VMOLR_MPE;
5316
5317 if (dev->flags & IFF_ALLMULTI) {
5318 vmolr |= IXGBE_VMOLR_MPE;
5319 } else {
5320 vmolr |= IXGBE_VMOLR_ROMPE;
5321 hw->mac.ops.update_mc_addr_list(hw, dev);
5322 }
5323 ixgbe_write_uc_addr_list(adapter->netdev, pool);
5324 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
5325}
5326
5327
5328
5329
5330
5331static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5332{
5333 u16 i = rx_ring->next_to_clean;
5334 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5335
5336
5337 while (i != rx_ring->next_to_alloc) {
5338 if (rx_buffer->skb) {
5339 struct sk_buff *skb = rx_buffer->skb;
5340 if (IXGBE_CB(skb)->page_released)
5341 dma_unmap_page_attrs(rx_ring->dev,
5342 IXGBE_CB(skb)->dma,
5343 ixgbe_rx_pg_size(rx_ring),
5344 DMA_FROM_DEVICE,
5345 IXGBE_RX_DMA_ATTR);
5346 dev_kfree_skb(skb);
5347 }
5348
5349
5350
5351
5352 dma_sync_single_range_for_cpu(rx_ring->dev,
5353 rx_buffer->dma,
5354 rx_buffer->page_offset,
5355 ixgbe_rx_bufsz(rx_ring),
5356 DMA_FROM_DEVICE);
5357
5358
5359 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5360 ixgbe_rx_pg_size(rx_ring),
5361 DMA_FROM_DEVICE,
5362 IXGBE_RX_DMA_ATTR);
5363 __page_frag_cache_drain(rx_buffer->page,
5364 rx_buffer->pagecnt_bias);
5365
5366 i++;
5367 rx_buffer++;
5368 if (i == rx_ring->count) {
5369 i = 0;
5370 rx_buffer = rx_ring->rx_buffer_info;
5371 }
5372 }
5373
5374 rx_ring->next_to_alloc = 0;
5375 rx_ring->next_to_clean = 0;
5376 rx_ring->next_to_use = 0;
5377}
5378
5379static int ixgbe_fwd_ring_up(struct net_device *vdev,
5380 struct ixgbe_fwd_adapter *accel)
5381{
5382 struct ixgbe_adapter *adapter = accel->real_adapter;
5383 int i, baseq, err;
5384
5385 if (!test_bit(accel->pool, adapter->fwd_bitmask))
5386 return 0;
5387
5388 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5389 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5390 accel->pool, adapter->num_rx_pools,
5391 baseq, baseq + adapter->num_rx_queues_per_pool);
5392
5393 accel->netdev = vdev;
5394 accel->rx_base_queue = baseq;
5395 accel->tx_base_queue = baseq;
5396
5397 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5398 adapter->rx_ring[baseq + i]->netdev = vdev;
5399
5400
5401
5402
5403 wmb();
5404
5405
5406
5407
5408 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5409 VMDQ_P(accel->pool));
5410 if (err >= 0) {
5411 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
5412 return 0;
5413 }
5414
5415 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5416 adapter->rx_ring[baseq + i]->netdev = NULL;
5417
5418 return err;
5419}
5420
5421static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
5422{
5423 if (netif_is_macvlan(upper)) {
5424 struct macvlan_dev *dfwd = netdev_priv(upper);
5425 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
5426
5427 if (dfwd->fwd_priv)
5428 ixgbe_fwd_ring_up(upper, vadapter);
5429 }
5430
5431 return 0;
5432}
5433
5434static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5435{
5436 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5437 ixgbe_upper_dev_walk, NULL);
5438}
5439
5440static void ixgbe_configure(struct ixgbe_adapter *adapter)
5441{
5442 struct ixgbe_hw *hw = &adapter->hw;
5443
5444 ixgbe_configure_pb(adapter);
5445#ifdef CONFIG_IXGBE_DCB
5446 ixgbe_configure_dcb(adapter);
5447#endif
5448
5449
5450
5451
5452 ixgbe_configure_virtualization(adapter);
5453
5454 ixgbe_set_rx_mode(adapter->netdev);
5455 ixgbe_restore_vlan(adapter);
5456 ixgbe_ipsec_restore(adapter);
5457
5458 switch (hw->mac.type) {
5459 case ixgbe_mac_82599EB:
5460 case ixgbe_mac_X540:
5461 hw->mac.ops.disable_rx_buff(hw);
5462 break;
5463 default:
5464 break;
5465 }
5466
5467 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5468 ixgbe_init_fdir_signature_82599(&adapter->hw,
5469 adapter->fdir_pballoc);
5470 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5471 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5472 adapter->fdir_pballoc);
5473 ixgbe_fdir_filter_restore(adapter);
5474 }
5475
5476 switch (hw->mac.type) {
5477 case ixgbe_mac_82599EB:
5478 case ixgbe_mac_X540:
5479 hw->mac.ops.enable_rx_buff(hw);
5480 break;
5481 default:
5482 break;
5483 }
5484
5485#ifdef CONFIG_IXGBE_DCA
5486
5487 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5488 ixgbe_setup_dca(adapter);
5489#endif
5490
5491#ifdef IXGBE_FCOE
5492
5493 ixgbe_configure_fcoe(adapter);
5494
5495#endif
5496 ixgbe_configure_tx(adapter);
5497 ixgbe_configure_rx(adapter);
5498 ixgbe_configure_dfwd(adapter);
5499}
5500
5501
5502
5503
5504
5505static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5506{
5507
5508
5509
5510
5511
5512
5513 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5514 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5515
5516 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5517 adapter->sfp_poll_time = 0;
5518}
5519
5520
5521
5522
5523
5524
5525
5526static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5527{
5528 u32 speed;
5529 bool autoneg, link_up = false;
5530 int ret = IXGBE_ERR_LINK_SETUP;
5531
5532 if (hw->mac.ops.check_link)
5533 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5534
5535 if (ret)
5536 return ret;
5537
5538 speed = hw->phy.autoneg_advertised;
5539 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5540 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5541 &autoneg);
5542 if (ret)
5543 return ret;
5544
5545 if (hw->mac.ops.setup_link)
5546 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5547
5548 return ret;
5549}
5550
5551static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5552{
5553 struct ixgbe_hw *hw = &adapter->hw;
5554 u32 gpie = 0;
5555
5556 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5557 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5558 IXGBE_GPIE_OCD;
5559 gpie |= IXGBE_GPIE_EIAME;
5560
5561
5562
5563
5564 switch (hw->mac.type) {
5565 case ixgbe_mac_82598EB:
5566 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5567 break;
5568 case ixgbe_mac_82599EB:
5569 case ixgbe_mac_X540:
5570 case ixgbe_mac_X550:
5571 case ixgbe_mac_X550EM_x:
5572 case ixgbe_mac_x550em_a:
5573 default:
5574 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5575 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5576 break;
5577 }
5578 } else {
5579
5580
5581 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5582 }
5583
5584
5585
5586
5587 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5588 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5589
5590 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5591 case IXGBE_82599_VMDQ_8Q_MASK:
5592 gpie |= IXGBE_GPIE_VTMODE_16;
5593 break;
5594 case IXGBE_82599_VMDQ_4Q_MASK:
5595 gpie |= IXGBE_GPIE_VTMODE_32;
5596 break;
5597 default:
5598 gpie |= IXGBE_GPIE_VTMODE_64;
5599 break;
5600 }
5601 }
5602
5603
5604 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5605 switch (adapter->hw.mac.type) {
5606 case ixgbe_mac_82599EB:
5607 gpie |= IXGBE_SDP0_GPIEN_8259X;
5608 break;
5609 default:
5610 break;
5611 }
5612 }
5613
5614
5615 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5616 gpie |= IXGBE_SDP1_GPIEN(hw);
5617
5618 switch (hw->mac.type) {
5619 case ixgbe_mac_82599EB:
5620 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5621 break;
5622 case ixgbe_mac_X550EM_x:
5623 case ixgbe_mac_x550em_a:
5624 gpie |= IXGBE_SDP0_GPIEN_X540;
5625 break;
5626 default:
5627 break;
5628 }
5629
5630 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5631}
5632
5633static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5634{
5635 struct ixgbe_hw *hw = &adapter->hw;
5636 int err;
5637 u32 ctrl_ext;
5638
5639 ixgbe_get_hw_control(adapter);
5640 ixgbe_setup_gpie(adapter);
5641
5642 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5643 ixgbe_configure_msix(adapter);
5644 else
5645 ixgbe_configure_msi_and_legacy(adapter);
5646
5647
5648 if (hw->mac.ops.enable_tx_laser)
5649 hw->mac.ops.enable_tx_laser(hw);
5650
5651 if (hw->phy.ops.set_phy_power)
5652 hw->phy.ops.set_phy_power(hw, true);
5653
5654 smp_mb__before_atomic();
5655 clear_bit(__IXGBE_DOWN, &adapter->state);
5656 ixgbe_napi_enable_all(adapter);
5657
5658 if (ixgbe_is_sfp(hw)) {
5659 ixgbe_sfp_link_config(adapter);
5660 } else {
5661 err = ixgbe_non_sfp_link_config(hw);
5662 if (err)
5663 e_err(probe, "link_config FAILED %d\n", err);
5664 }
5665
5666
5667 IXGBE_READ_REG(hw, IXGBE_EICR);
5668 ixgbe_irq_enable(adapter, true, true);
5669
5670
5671
5672
5673
5674 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5675 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5676 if (esdp & IXGBE_ESDP_SDP1)
5677 e_crit(drv, "Fan has stopped, replace the adapter\n");
5678 }
5679
5680
5681
5682 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5683 adapter->link_check_timeout = jiffies;
5684 mod_timer(&adapter->service_timer, jiffies);
5685
5686
5687 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5688 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5689 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5690}
5691
5692void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5693{
5694 WARN_ON(in_interrupt());
5695
5696 netif_trans_update(adapter->netdev);
5697
5698 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5699 usleep_range(1000, 2000);
5700 if (adapter->hw.phy.type == ixgbe_phy_fw)
5701 ixgbe_watchdog_link_is_down(adapter);
5702 ixgbe_down(adapter);
5703
5704
5705
5706
5707
5708
5709 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5710 msleep(2000);
5711 ixgbe_up(adapter);
5712 clear_bit(__IXGBE_RESETTING, &adapter->state);
5713}
5714
5715void ixgbe_up(struct ixgbe_adapter *adapter)
5716{
5717
5718 ixgbe_configure(adapter);
5719
5720 ixgbe_up_complete(adapter);
5721}
5722
5723void ixgbe_reset(struct ixgbe_adapter *adapter)
5724{
5725 struct ixgbe_hw *hw = &adapter->hw;
5726 struct net_device *netdev = adapter->netdev;
5727 int err;
5728
5729 if (ixgbe_removed(hw->hw_addr))
5730 return;
5731
5732 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5733 usleep_range(1000, 2000);
5734
5735
5736 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5737 IXGBE_FLAG2_SFP_NEEDS_RESET);
5738 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5739
5740 err = hw->mac.ops.init_hw(hw);
5741 switch (err) {
5742 case 0:
5743 case IXGBE_ERR_SFP_NOT_PRESENT:
5744 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5745 break;
5746 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5747 e_dev_err("master disable timed out\n");
5748 break;
5749 case IXGBE_ERR_EEPROM_VERSION:
5750
5751 e_dev_warn("This device is a pre-production adapter/LOM. "
5752 "Please be aware there may be issues associated with "
5753 "your hardware. If you are experiencing problems "
5754 "please contact your Intel or hardware "
5755 "representative who provided you with this "
5756 "hardware.\n");
5757 break;
5758 default:
5759 e_dev_err("Hardware Error: %d\n", err);
5760 }
5761
5762 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5763
5764
5765 ixgbe_flush_sw_mac_table(adapter);
5766 __dev_uc_unsync(netdev, NULL);
5767
5768
5769 ixgbe_mac_set_default_filter(adapter);
5770
5771
5772 if (hw->mac.san_mac_rar_index)
5773 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5774
5775 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5776 ixgbe_ptp_reset(adapter);
5777
5778 if (hw->phy.ops.set_phy_power) {
5779 if (!netif_running(adapter->netdev) && !adapter->wol)
5780 hw->phy.ops.set_phy_power(hw, false);
5781 else
5782 hw->phy.ops.set_phy_power(hw, true);
5783 }
5784}
5785
5786
5787
5788
5789
5790static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5791{
5792 u16 i = tx_ring->next_to_clean;
5793 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5794
5795 while (i != tx_ring->next_to_use) {
5796 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5797
5798
5799 if (ring_is_xdp(tx_ring))
5800 page_frag_free(tx_buffer->data);
5801 else
5802 dev_kfree_skb_any(tx_buffer->skb);
5803
5804
5805 dma_unmap_single(tx_ring->dev,
5806 dma_unmap_addr(tx_buffer, dma),
5807 dma_unmap_len(tx_buffer, len),
5808 DMA_TO_DEVICE);
5809
5810
5811 eop_desc = tx_buffer->next_to_watch;
5812 tx_desc = IXGBE_TX_DESC(tx_ring, i);
5813
5814
5815 while (tx_desc != eop_desc) {
5816 tx_buffer++;
5817 tx_desc++;
5818 i++;
5819 if (unlikely(i == tx_ring->count)) {
5820 i = 0;
5821 tx_buffer = tx_ring->tx_buffer_info;
5822 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
5823 }
5824
5825
5826 if (dma_unmap_len(tx_buffer, len))
5827 dma_unmap_page(tx_ring->dev,
5828 dma_unmap_addr(tx_buffer, dma),
5829 dma_unmap_len(tx_buffer, len),
5830 DMA_TO_DEVICE);
5831 }
5832
5833
5834 tx_buffer++;
5835 i++;
5836 if (unlikely(i == tx_ring->count)) {
5837 i = 0;
5838 tx_buffer = tx_ring->tx_buffer_info;
5839 }
5840 }
5841
5842
5843 if (!ring_is_xdp(tx_ring))
5844 netdev_tx_reset_queue(txring_txq(tx_ring));
5845
5846
5847 tx_ring->next_to_use = 0;
5848 tx_ring->next_to_clean = 0;
5849}
5850
5851
5852
5853
5854
5855static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5856{
5857 int i;
5858
5859 for (i = 0; i < adapter->num_rx_queues; i++)
5860 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5861}
5862
5863
5864
5865
5866
5867static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5868{
5869 int i;
5870
5871 for (i = 0; i < adapter->num_tx_queues; i++)
5872 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5873 for (i = 0; i < adapter->num_xdp_queues; i++)
5874 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
5875}
5876
5877static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5878{
5879 struct hlist_node *node2;
5880 struct ixgbe_fdir_filter *filter;
5881
5882 spin_lock(&adapter->fdir_perfect_lock);
5883
5884 hlist_for_each_entry_safe(filter, node2,
5885 &adapter->fdir_filter_list, fdir_node) {
5886 hlist_del(&filter->fdir_node);
5887 kfree(filter);
5888 }
5889 adapter->fdir_filter_count = 0;
5890
5891 spin_unlock(&adapter->fdir_perfect_lock);
5892}
5893
5894void ixgbe_down(struct ixgbe_adapter *adapter)
5895{
5896 struct net_device *netdev = adapter->netdev;
5897 struct ixgbe_hw *hw = &adapter->hw;
5898 int i;
5899
5900
5901 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5902 return;
5903
5904
5905 hw->mac.ops.disable_rx(hw);
5906
5907
5908 for (i = 0; i < adapter->num_rx_queues; i++)
5909
5910 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5911
5912 usleep_range(10000, 20000);
5913
5914
5915 if (adapter->xdp_ring[0])
5916 synchronize_sched();
5917 netif_tx_stop_all_queues(netdev);
5918
5919
5920 netif_carrier_off(netdev);
5921 netif_tx_disable(netdev);
5922
5923 ixgbe_irq_disable(adapter);
5924
5925 ixgbe_napi_disable_all(adapter);
5926
5927 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
5928 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5929 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5930
5931 del_timer_sync(&adapter->service_timer);
5932
5933 if (adapter->num_vfs) {
5934
5935 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5936
5937
5938 for (i = 0 ; i < adapter->num_vfs; i++)
5939 adapter->vfinfo[i].clear_to_send = false;
5940
5941
5942 ixgbe_ping_all_vfs(adapter);
5943
5944
5945 ixgbe_disable_tx_rx(adapter);
5946 }
5947
5948
5949 for (i = 0; i < adapter->num_tx_queues; i++) {
5950 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5951 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5952 }
5953 for (i = 0; i < adapter->num_xdp_queues; i++) {
5954 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
5955
5956 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5957 }
5958
5959
5960 switch (hw->mac.type) {
5961 case ixgbe_mac_82599EB:
5962 case ixgbe_mac_X540:
5963 case ixgbe_mac_X550:
5964 case ixgbe_mac_X550EM_x:
5965 case ixgbe_mac_x550em_a:
5966 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5967 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5968 ~IXGBE_DMATXCTL_TE));
5969 break;
5970 default:
5971 break;
5972 }
5973
5974 if (!pci_channel_offline(adapter->pdev))
5975 ixgbe_reset(adapter);
5976
5977
5978 if (hw->mac.ops.disable_tx_laser)
5979 hw->mac.ops.disable_tx_laser(hw);
5980
5981 ixgbe_clean_all_tx_rings(adapter);
5982 ixgbe_clean_all_rx_rings(adapter);
5983}
5984
5985
5986
5987
5988
5989static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
5990{
5991 struct ixgbe_hw *hw = &adapter->hw;
5992
5993 switch (hw->device_id) {
5994 case IXGBE_DEV_ID_X550EM_A_1G_T:
5995 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
5996 if (!hw->phy.eee_speeds_supported)
5997 break;
5998 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
5999 if (!hw->phy.eee_speeds_advertised)
6000 break;
6001 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6002 break;
6003 default:
6004 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6005 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6006 break;
6007 }
6008}
6009
6010
6011
6012
6013
6014static void ixgbe_tx_timeout(struct net_device *netdev)
6015{
6016 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6017
6018
6019 ixgbe_tx_timeout_reset(adapter);
6020}
6021
6022#ifdef CONFIG_IXGBE_DCB
6023static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6024{
6025 struct ixgbe_hw *hw = &adapter->hw;
6026 struct tc_configuration *tc;
6027 int j;
6028
6029 switch (hw->mac.type) {
6030 case ixgbe_mac_82598EB:
6031 case ixgbe_mac_82599EB:
6032 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6033 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6034 break;
6035 case ixgbe_mac_X540:
6036 case ixgbe_mac_X550:
6037 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6038 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6039 break;
6040 case ixgbe_mac_X550EM_x:
6041 case ixgbe_mac_x550em_a:
6042 default:
6043 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6044 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6045 break;
6046 }
6047
6048
6049 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6050 tc = &adapter->dcb_cfg.tc_config[j];
6051 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6052 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6053 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6054 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6055 tc->dcb_pfc = pfc_disabled;
6056 }
6057
6058
6059 tc = &adapter->dcb_cfg.tc_config[0];
6060 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6061 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6062
6063 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6064 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6065 adapter->dcb_cfg.pfc_mode_enable = false;
6066 adapter->dcb_set_bitmap = 0x00;
6067 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6068 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6069 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6070 sizeof(adapter->temp_dcb_cfg));
6071}
6072#endif
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6084 const struct ixgbe_info *ii)
6085{
6086 struct ixgbe_hw *hw = &adapter->hw;
6087 struct pci_dev *pdev = adapter->pdev;
6088 unsigned int rss, fdir;
6089 u32 fwsm;
6090 int i;
6091
6092
6093
6094 hw->vendor_id = pdev->vendor;
6095 hw->device_id = pdev->device;
6096 hw->revision_id = pdev->revision;
6097 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6098 hw->subsystem_device_id = pdev->subsystem_device;
6099
6100
6101 ii->get_invariants(hw);
6102
6103
6104 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6105 adapter->ring_feature[RING_F_RSS].limit = rss;
6106 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6107 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6108 adapter->atr_sample_rate = 20;
6109 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6110 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6111 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6112 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6113#ifdef CONFIG_IXGBE_DCA
6114 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6115#endif
6116#ifdef CONFIG_IXGBE_DCB
6117 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6118 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6119#endif
6120#ifdef IXGBE_FCOE
6121 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6122 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6123#ifdef CONFIG_IXGBE_DCB
6124
6125 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6126#endif
6127#endif
6128
6129
6130 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6131 GFP_KERNEL);
6132 if (!adapter->jump_tables[0])
6133 return -ENOMEM;
6134 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6135
6136 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6137 adapter->jump_tables[i] = NULL;
6138
6139 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
6140 hw->mac.num_rar_entries,
6141 GFP_ATOMIC);
6142 if (!adapter->mac_table)
6143 return -ENOMEM;
6144
6145 if (ixgbe_init_rss_key(adapter))
6146 return -ENOMEM;
6147
6148
6149 switch (hw->mac.type) {
6150 case ixgbe_mac_82598EB:
6151 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6152
6153 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6154 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6155
6156 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6157 adapter->ring_feature[RING_F_FDIR].limit = 0;
6158 adapter->atr_sample_rate = 0;
6159 adapter->fdir_pballoc = 0;
6160#ifdef IXGBE_FCOE
6161 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6162 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6163#ifdef CONFIG_IXGBE_DCB
6164 adapter->fcoe.up = 0;
6165#endif
6166#endif
6167 break;
6168 case ixgbe_mac_82599EB:
6169 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6170 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6171 break;
6172 case ixgbe_mac_X540:
6173 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6174 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6175 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6176 break;
6177 case ixgbe_mac_x550em_a:
6178 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
6179 switch (hw->device_id) {
6180 case IXGBE_DEV_ID_X550EM_A_1G_T:
6181 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6182 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6183 break;
6184 default:
6185 break;
6186 }
6187
6188 case ixgbe_mac_X550EM_x:
6189#ifdef CONFIG_IXGBE_DCB
6190 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6191#endif
6192#ifdef IXGBE_FCOE
6193 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6194#ifdef CONFIG_IXGBE_DCB
6195 adapter->fcoe.up = 0;
6196#endif
6197#endif
6198
6199 case ixgbe_mac_X550:
6200 if (hw->mac.type == ixgbe_mac_X550)
6201 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6202#ifdef CONFIG_IXGBE_DCA
6203 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6204#endif
6205 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
6206 break;
6207 default:
6208 break;
6209 }
6210
6211#ifdef IXGBE_FCOE
6212
6213 spin_lock_init(&adapter->fcoe.lock);
6214
6215#endif
6216
6217 spin_lock_init(&adapter->fdir_perfect_lock);
6218
6219#ifdef CONFIG_IXGBE_DCB
6220 ixgbe_init_dcb(adapter);
6221#endif
6222
6223
6224 hw->fc.requested_mode = ixgbe_fc_full;
6225 hw->fc.current_mode = ixgbe_fc_full;
6226 ixgbe_pbthresh_setup(adapter);
6227 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6228 hw->fc.send_xon = true;
6229 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6230
6231#ifdef CONFIG_PCI_IOV
6232 if (max_vfs > 0)
6233 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6234
6235
6236 if (hw->mac.type != ixgbe_mac_82598EB) {
6237 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6238 max_vfs = 0;
6239 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6240 }
6241 }
6242#endif
6243
6244
6245 adapter->rx_itr_setting = 1;
6246 adapter->tx_itr_setting = 1;
6247
6248
6249 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6250 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6251
6252
6253 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6254
6255
6256 if (ixgbe_init_eeprom_params_generic(hw)) {
6257 e_dev_err("EEPROM initialization failed\n");
6258 return -EIO;
6259 }
6260
6261
6262 set_bit(0, adapter->fwd_bitmask);
6263 set_bit(__IXGBE_DOWN, &adapter->state);
6264
6265 return 0;
6266}
6267
6268
6269
6270
6271
6272
6273
6274int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6275{
6276 struct device *dev = tx_ring->dev;
6277 int orig_node = dev_to_node(dev);
6278 int ring_node = -1;
6279 int size;
6280
6281 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6282
6283 if (tx_ring->q_vector)
6284 ring_node = tx_ring->q_vector->numa_node;
6285
6286 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6287 if (!tx_ring->tx_buffer_info)
6288 tx_ring->tx_buffer_info = vmalloc(size);
6289 if (!tx_ring->tx_buffer_info)
6290 goto err;
6291
6292
6293 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6294 tx_ring->size = ALIGN(tx_ring->size, 4096);
6295
6296 set_dev_node(dev, ring_node);
6297 tx_ring->desc = dma_alloc_coherent(dev,
6298 tx_ring->size,
6299 &tx_ring->dma,
6300 GFP_KERNEL);
6301 set_dev_node(dev, orig_node);
6302 if (!tx_ring->desc)
6303 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6304 &tx_ring->dma, GFP_KERNEL);
6305 if (!tx_ring->desc)
6306 goto err;
6307
6308 tx_ring->next_to_use = 0;
6309 tx_ring->next_to_clean = 0;
6310 return 0;
6311
6312err:
6313 vfree(tx_ring->tx_buffer_info);
6314 tx_ring->tx_buffer_info = NULL;
6315 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6316 return -ENOMEM;
6317}
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6330{
6331 int i, j = 0, err = 0;
6332
6333 for (i = 0; i < adapter->num_tx_queues; i++) {
6334 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6335 if (!err)
6336 continue;
6337
6338 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6339 goto err_setup_tx;
6340 }
6341 for (j = 0; j < adapter->num_xdp_queues; j++) {
6342 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6343 if (!err)
6344 continue;
6345
6346 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6347 goto err_setup_tx;
6348 }
6349
6350 return 0;
6351err_setup_tx:
6352
6353 while (j--)
6354 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6355 while (i--)
6356 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6357 return err;
6358}
6359
6360
6361
6362
6363
6364
6365
6366
6367int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6368 struct ixgbe_ring *rx_ring)
6369{
6370 struct device *dev = rx_ring->dev;
6371 int orig_node = dev_to_node(dev);
6372 int ring_node = -1;
6373 int size;
6374
6375 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6376
6377 if (rx_ring->q_vector)
6378 ring_node = rx_ring->q_vector->numa_node;
6379
6380 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6381 if (!rx_ring->rx_buffer_info)
6382 rx_ring->rx_buffer_info = vmalloc(size);
6383 if (!rx_ring->rx_buffer_info)
6384 goto err;
6385
6386
6387 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6388 rx_ring->size = ALIGN(rx_ring->size, 4096);
6389
6390 set_dev_node(dev, ring_node);
6391 rx_ring->desc = dma_alloc_coherent(dev,
6392 rx_ring->size,
6393 &rx_ring->dma,
6394 GFP_KERNEL);
6395 set_dev_node(dev, orig_node);
6396 if (!rx_ring->desc)
6397 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6398 &rx_ring->dma, GFP_KERNEL);
6399 if (!rx_ring->desc)
6400 goto err;
6401
6402 rx_ring->next_to_clean = 0;
6403 rx_ring->next_to_use = 0;
6404
6405
6406 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6407 rx_ring->queue_index) < 0)
6408 goto err;
6409
6410 rx_ring->xdp_prog = adapter->xdp_prog;
6411
6412 return 0;
6413err:
6414 vfree(rx_ring->rx_buffer_info);
6415 rx_ring->rx_buffer_info = NULL;
6416 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6417 return -ENOMEM;
6418}
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6431{
6432 int i, err = 0;
6433
6434 for (i = 0; i < adapter->num_rx_queues; i++) {
6435 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6436 if (!err)
6437 continue;
6438
6439 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6440 goto err_setup_rx;
6441 }
6442
6443#ifdef IXGBE_FCOE
6444 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6445 if (!err)
6446#endif
6447 return 0;
6448err_setup_rx:
6449
6450 while (i--)
6451 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6452 return err;
6453}
6454
6455
6456
6457
6458
6459
6460
6461void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6462{
6463 ixgbe_clean_tx_ring(tx_ring);
6464
6465 vfree(tx_ring->tx_buffer_info);
6466 tx_ring->tx_buffer_info = NULL;
6467
6468
6469 if (!tx_ring->desc)
6470 return;
6471
6472 dma_free_coherent(tx_ring->dev, tx_ring->size,
6473 tx_ring->desc, tx_ring->dma);
6474
6475 tx_ring->desc = NULL;
6476}
6477
6478
6479
6480
6481
6482
6483
6484static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6485{
6486 int i;
6487
6488 for (i = 0; i < adapter->num_tx_queues; i++)
6489 if (adapter->tx_ring[i]->desc)
6490 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6491 for (i = 0; i < adapter->num_xdp_queues; i++)
6492 if (adapter->xdp_ring[i]->desc)
6493 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6494}
6495
6496
6497
6498
6499
6500
6501
6502void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6503{
6504 ixgbe_clean_rx_ring(rx_ring);
6505
6506 rx_ring->xdp_prog = NULL;
6507 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6508 vfree(rx_ring->rx_buffer_info);
6509 rx_ring->rx_buffer_info = NULL;
6510
6511
6512 if (!rx_ring->desc)
6513 return;
6514
6515 dma_free_coherent(rx_ring->dev, rx_ring->size,
6516 rx_ring->desc, rx_ring->dma);
6517
6518 rx_ring->desc = NULL;
6519}
6520
6521
6522
6523
6524
6525
6526
6527static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6528{
6529 int i;
6530
6531#ifdef IXGBE_FCOE
6532 ixgbe_free_fcoe_ddp_resources(adapter);
6533
6534#endif
6535 for (i = 0; i < adapter->num_rx_queues; i++)
6536 if (adapter->rx_ring[i]->desc)
6537 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6538}
6539
6540
6541
6542
6543
6544
6545
6546
6547static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6548{
6549 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6550
6551
6552
6553
6554
6555
6556 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6557 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6558 (new_mtu > ETH_DATA_LEN))
6559 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6560
6561 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6562
6563
6564 netdev->mtu = new_mtu;
6565
6566 if (netif_running(netdev))
6567 ixgbe_reinit_locked(adapter);
6568
6569 return 0;
6570}
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584int ixgbe_open(struct net_device *netdev)
6585{
6586 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6587 struct ixgbe_hw *hw = &adapter->hw;
6588 int err, queues;
6589
6590
6591 if (test_bit(__IXGBE_TESTING, &adapter->state))
6592 return -EBUSY;
6593
6594 netif_carrier_off(netdev);
6595
6596
6597 err = ixgbe_setup_all_tx_resources(adapter);
6598 if (err)
6599 goto err_setup_tx;
6600
6601
6602 err = ixgbe_setup_all_rx_resources(adapter);
6603 if (err)
6604 goto err_setup_rx;
6605
6606 ixgbe_configure(adapter);
6607
6608 err = ixgbe_request_irq(adapter);
6609 if (err)
6610 goto err_req_irq;
6611
6612
6613 queues = adapter->num_tx_queues;
6614 err = netif_set_real_num_tx_queues(netdev, queues);
6615 if (err)
6616 goto err_set_queues;
6617
6618 queues = adapter->num_rx_queues;
6619 err = netif_set_real_num_rx_queues(netdev, queues);
6620 if (err)
6621 goto err_set_queues;
6622
6623 ixgbe_ptp_init(adapter);
6624
6625 ixgbe_up_complete(adapter);
6626
6627 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6628 udp_tunnel_get_rx_info(netdev);
6629
6630 return 0;
6631
6632err_set_queues:
6633 ixgbe_free_irq(adapter);
6634err_req_irq:
6635 ixgbe_free_all_rx_resources(adapter);
6636 if (hw->phy.ops.set_phy_power && !adapter->wol)
6637 hw->phy.ops.set_phy_power(&adapter->hw, false);
6638err_setup_rx:
6639 ixgbe_free_all_tx_resources(adapter);
6640err_setup_tx:
6641 ixgbe_reset(adapter);
6642
6643 return err;
6644}
6645
6646static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6647{
6648 ixgbe_ptp_suspend(adapter);
6649
6650 if (adapter->hw.phy.ops.enter_lplu) {
6651 adapter->hw.phy.reset_disable = true;
6652 ixgbe_down(adapter);
6653 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6654 adapter->hw.phy.reset_disable = false;
6655 } else {
6656 ixgbe_down(adapter);
6657 }
6658
6659 ixgbe_free_irq(adapter);
6660
6661 ixgbe_free_all_tx_resources(adapter);
6662 ixgbe_free_all_rx_resources(adapter);
6663}
6664
6665
6666
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676int ixgbe_close(struct net_device *netdev)
6677{
6678 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6679
6680 ixgbe_ptp_stop(adapter);
6681
6682 if (netif_device_present(netdev))
6683 ixgbe_close_suspend(adapter);
6684
6685 ixgbe_fdir_filter_exit(adapter);
6686
6687 ixgbe_release_hw_control(adapter);
6688
6689 return 0;
6690}
6691
6692#ifdef CONFIG_PM
6693static int ixgbe_resume(struct pci_dev *pdev)
6694{
6695 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6696 struct net_device *netdev = adapter->netdev;
6697 u32 err;
6698
6699 adapter->hw.hw_addr = adapter->io_addr;
6700 pci_set_power_state(pdev, PCI_D0);
6701 pci_restore_state(pdev);
6702
6703
6704
6705
6706 pci_save_state(pdev);
6707
6708 err = pci_enable_device_mem(pdev);
6709 if (err) {
6710 e_dev_err("Cannot enable PCI device from suspend\n");
6711 return err;
6712 }
6713 smp_mb__before_atomic();
6714 clear_bit(__IXGBE_DISABLED, &adapter->state);
6715 pci_set_master(pdev);
6716
6717 pci_wake_from_d3(pdev, false);
6718
6719 ixgbe_reset(adapter);
6720
6721 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6722
6723 rtnl_lock();
6724 err = ixgbe_init_interrupt_scheme(adapter);
6725 if (!err && netif_running(netdev))
6726 err = ixgbe_open(netdev);
6727
6728
6729 if (!err)
6730 netif_device_attach(netdev);
6731 rtnl_unlock();
6732
6733 return err;
6734}
6735#endif
6736
6737static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6738{
6739 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6740 struct net_device *netdev = adapter->netdev;
6741 struct ixgbe_hw *hw = &adapter->hw;
6742 u32 ctrl;
6743 u32 wufc = adapter->wol;
6744#ifdef CONFIG_PM
6745 int retval = 0;
6746#endif
6747
6748 rtnl_lock();
6749 netif_device_detach(netdev);
6750
6751 if (netif_running(netdev))
6752 ixgbe_close_suspend(adapter);
6753
6754 ixgbe_clear_interrupt_scheme(adapter);
6755 rtnl_unlock();
6756
6757#ifdef CONFIG_PM
6758 retval = pci_save_state(pdev);
6759 if (retval)
6760 return retval;
6761
6762#endif
6763 if (hw->mac.ops.stop_link_on_d3)
6764 hw->mac.ops.stop_link_on_d3(hw);
6765
6766 if (wufc) {
6767 u32 fctrl;
6768
6769 ixgbe_set_rx_mode(netdev);
6770
6771
6772 if (hw->mac.ops.enable_tx_laser)
6773 hw->mac.ops.enable_tx_laser(hw);
6774
6775
6776 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6777 fctrl |= IXGBE_FCTRL_MPE;
6778 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6779
6780 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6781 ctrl |= IXGBE_CTRL_GIO_DIS;
6782 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6783
6784 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6785 } else {
6786 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6787 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6788 }
6789
6790 switch (hw->mac.type) {
6791 case ixgbe_mac_82598EB:
6792 pci_wake_from_d3(pdev, false);
6793 break;
6794 case ixgbe_mac_82599EB:
6795 case ixgbe_mac_X540:
6796 case ixgbe_mac_X550:
6797 case ixgbe_mac_X550EM_x:
6798 case ixgbe_mac_x550em_a:
6799 pci_wake_from_d3(pdev, !!wufc);
6800 break;
6801 default:
6802 break;
6803 }
6804
6805 *enable_wake = !!wufc;
6806 if (hw->phy.ops.set_phy_power && !*enable_wake)
6807 hw->phy.ops.set_phy_power(hw, false);
6808
6809 ixgbe_release_hw_control(adapter);
6810
6811 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6812 pci_disable_device(pdev);
6813
6814 return 0;
6815}
6816
6817#ifdef CONFIG_PM
6818static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6819{
6820 int retval;
6821 bool wake;
6822
6823 retval = __ixgbe_shutdown(pdev, &wake);
6824 if (retval)
6825 return retval;
6826
6827 if (wake) {
6828 pci_prepare_to_sleep(pdev);
6829 } else {
6830 pci_wake_from_d3(pdev, false);
6831 pci_set_power_state(pdev, PCI_D3hot);
6832 }
6833
6834 return 0;
6835}
6836#endif
6837
6838static void ixgbe_shutdown(struct pci_dev *pdev)
6839{
6840 bool wake;
6841
6842 __ixgbe_shutdown(pdev, &wake);
6843
6844 if (system_state == SYSTEM_POWER_OFF) {
6845 pci_wake_from_d3(pdev, wake);
6846 pci_set_power_state(pdev, PCI_D3hot);
6847 }
6848}
6849
6850
6851
6852
6853
6854void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6855{
6856 struct net_device *netdev = adapter->netdev;
6857 struct ixgbe_hw *hw = &adapter->hw;
6858 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6859 u64 total_mpc = 0;
6860 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6861 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6862 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6863 u64 alloc_rx_page = 0;
6864 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6865
6866 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6867 test_bit(__IXGBE_RESETTING, &adapter->state))
6868 return;
6869
6870 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6871 u64 rsc_count = 0;
6872 u64 rsc_flush = 0;
6873 for (i = 0; i < adapter->num_rx_queues; i++) {
6874 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6875 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6876 }
6877 adapter->rsc_total_count = rsc_count;
6878 adapter->rsc_total_flush = rsc_flush;
6879 }
6880
6881 for (i = 0; i < adapter->num_rx_queues; i++) {
6882 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6883 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6884 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
6885 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6886 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6887 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6888 bytes += rx_ring->stats.bytes;
6889 packets += rx_ring->stats.packets;
6890 }
6891 adapter->non_eop_descs = non_eop_descs;
6892 adapter->alloc_rx_page = alloc_rx_page;
6893 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6894 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6895 adapter->hw_csum_rx_error = hw_csum_rx_error;
6896 netdev->stats.rx_bytes = bytes;
6897 netdev->stats.rx_packets = packets;
6898
6899 bytes = 0;
6900 packets = 0;
6901
6902 for (i = 0; i < adapter->num_tx_queues; i++) {
6903 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6904 restart_queue += tx_ring->tx_stats.restart_queue;
6905 tx_busy += tx_ring->tx_stats.tx_busy;
6906 bytes += tx_ring->stats.bytes;
6907 packets += tx_ring->stats.packets;
6908 }
6909 for (i = 0; i < adapter->num_xdp_queues; i++) {
6910 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
6911
6912 restart_queue += xdp_ring->tx_stats.restart_queue;
6913 tx_busy += xdp_ring->tx_stats.tx_busy;
6914 bytes += xdp_ring->stats.bytes;
6915 packets += xdp_ring->stats.packets;
6916 }
6917 adapter->restart_queue = restart_queue;
6918 adapter->tx_busy = tx_busy;
6919 netdev->stats.tx_bytes = bytes;
6920 netdev->stats.tx_packets = packets;
6921
6922 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6923
6924
6925 for (i = 0; i < 8; i++) {
6926
6927 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6928 missed_rx += mpc;
6929 hwstats->mpc[i] += mpc;
6930 total_mpc += hwstats->mpc[i];
6931 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6932 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6933 switch (hw->mac.type) {
6934 case ixgbe_mac_82598EB:
6935 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6936 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6937 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6938 hwstats->pxonrxc[i] +=
6939 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6940 break;
6941 case ixgbe_mac_82599EB:
6942 case ixgbe_mac_X540:
6943 case ixgbe_mac_X550:
6944 case ixgbe_mac_X550EM_x:
6945 case ixgbe_mac_x550em_a:
6946 hwstats->pxonrxc[i] +=
6947 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6948 break;
6949 default:
6950 break;
6951 }
6952 }
6953
6954
6955 for (i = 0; i < 16; i++) {
6956 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6957 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6958 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6959 (hw->mac.type == ixgbe_mac_X540) ||
6960 (hw->mac.type == ixgbe_mac_X550) ||
6961 (hw->mac.type == ixgbe_mac_X550EM_x) ||
6962 (hw->mac.type == ixgbe_mac_x550em_a)) {
6963 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6964 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
6965 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6966 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
6967 }
6968 }
6969
6970 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6971
6972 hwstats->gprc -= missed_rx;
6973
6974 ixgbe_update_xoff_received(adapter);
6975
6976
6977 switch (hw->mac.type) {
6978 case ixgbe_mac_82598EB:
6979 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6980 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6981 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6982 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6983 break;
6984 case ixgbe_mac_X540:
6985 case ixgbe_mac_X550:
6986 case ixgbe_mac_X550EM_x:
6987 case ixgbe_mac_x550em_a:
6988
6989 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6990 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6991 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6992 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6993
6994 case ixgbe_mac_82599EB:
6995 for (i = 0; i < 16; i++)
6996 adapter->hw_rx_no_dma_resources +=
6997 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6998 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6999 IXGBE_READ_REG(hw, IXGBE_GORCH);
7000 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7001 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7002 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7003 IXGBE_READ_REG(hw, IXGBE_TORH);
7004 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7005 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7006 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7007#ifdef IXGBE_FCOE
7008 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7009 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7010 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7011 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7012 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7013 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7014
7015 if (adapter->fcoe.ddp_pool) {
7016 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7017 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7018 unsigned int cpu;
7019 u64 noddp = 0, noddp_ext_buff = 0;
7020 for_each_possible_cpu(cpu) {
7021 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7022 noddp += ddp_pool->noddp;
7023 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7024 }
7025 hwstats->fcoe_noddp = noddp;
7026 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7027 }
7028#endif
7029 break;
7030 default:
7031 break;
7032 }
7033 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7034 hwstats->bprc += bprc;
7035 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7036 if (hw->mac.type == ixgbe_mac_82598EB)
7037 hwstats->mprc -= bprc;
7038 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7039 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7040 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7041 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7042 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7043 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7044 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7045 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7046 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7047 hwstats->lxontxc += lxon;
7048 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7049 hwstats->lxofftxc += lxoff;
7050 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7051 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7052
7053
7054
7055 xon_off_tot = lxon + lxoff;
7056 hwstats->gptc -= xon_off_tot;
7057 hwstats->mptc -= xon_off_tot;
7058 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7059 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7060 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7061 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7062 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7063 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7064 hwstats->ptc64 -= xon_off_tot;
7065 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7066 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7067 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7068 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7069 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7070 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7071
7072
7073 netdev->stats.multicast = hwstats->mprc;
7074
7075
7076 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7077 netdev->stats.rx_dropped = 0;
7078 netdev->stats.rx_length_errors = hwstats->rlec;
7079 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7080 netdev->stats.rx_missed_errors = total_mpc;
7081}
7082
7083
7084
7085
7086
7087static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7088{
7089 struct ixgbe_hw *hw = &adapter->hw;
7090 int i;
7091
7092 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7093 return;
7094
7095 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7096
7097
7098 if (test_bit(__IXGBE_DOWN, &adapter->state))
7099 return;
7100
7101
7102 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7103 return;
7104
7105 adapter->fdir_overflow++;
7106
7107 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7108 for (i = 0; i < adapter->num_tx_queues; i++)
7109 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7110 &(adapter->tx_ring[i]->state));
7111 for (i = 0; i < adapter->num_xdp_queues; i++)
7112 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7113 &adapter->xdp_ring[i]->state);
7114
7115 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7116 } else {
7117 e_err(probe, "failed to finish FDIR re-initialization, "
7118 "ignored adding FDIR ATR filters\n");
7119 }
7120}
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7132{
7133 struct ixgbe_hw *hw = &adapter->hw;
7134 u64 eics = 0;
7135 int i;
7136
7137
7138 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7139 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7140 test_bit(__IXGBE_RESETTING, &adapter->state))
7141 return;
7142
7143
7144 if (netif_carrier_ok(adapter->netdev)) {
7145 for (i = 0; i < adapter->num_tx_queues; i++)
7146 set_check_for_tx_hang(adapter->tx_ring[i]);
7147 for (i = 0; i < adapter->num_xdp_queues; i++)
7148 set_check_for_tx_hang(adapter->xdp_ring[i]);
7149 }
7150
7151 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7152
7153
7154
7155
7156
7157 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7158 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7159 } else {
7160
7161 for (i = 0; i < adapter->num_q_vectors; i++) {
7162 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7163 if (qv->rx.ring || qv->tx.ring)
7164 eics |= BIT_ULL(i);
7165 }
7166 }
7167
7168
7169 ixgbe_irq_rearm_queues(adapter, eics);
7170}
7171
7172
7173
7174
7175
7176static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7177{
7178 struct ixgbe_hw *hw = &adapter->hw;
7179 u32 link_speed = adapter->link_speed;
7180 bool link_up = adapter->link_up;
7181 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7182
7183 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7184 return;
7185
7186 if (hw->mac.ops.check_link) {
7187 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7188 } else {
7189
7190 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7191 link_up = true;
7192 }
7193
7194 if (adapter->ixgbe_ieee_pfc)
7195 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7196
7197 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7198 hw->mac.ops.fc_enable(hw);
7199 ixgbe_set_rx_drop_en(adapter);
7200 }
7201
7202 if (link_up ||
7203 time_after(jiffies, (adapter->link_check_timeout +
7204 IXGBE_TRY_LINK_TIMEOUT))) {
7205 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7206 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7207 IXGBE_WRITE_FLUSH(hw);
7208 }
7209
7210 adapter->link_up = link_up;
7211 adapter->link_speed = link_speed;
7212}
7213
7214static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7215{
7216#ifdef CONFIG_IXGBE_DCB
7217 struct net_device *netdev = adapter->netdev;
7218 struct dcb_app app = {
7219 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7220 .protocol = 0,
7221 };
7222 u8 up = 0;
7223
7224 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7225 up = dcb_ieee_getapp_mask(netdev, &app);
7226
7227 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7228#endif
7229}
7230
7231
7232
7233
7234
7235
7236static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7237{
7238 struct net_device *netdev = adapter->netdev;
7239 struct ixgbe_hw *hw = &adapter->hw;
7240 u32 link_speed = adapter->link_speed;
7241 const char *speed_str;
7242 bool flow_rx, flow_tx;
7243
7244
7245 if (netif_carrier_ok(netdev))
7246 return;
7247
7248 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7249
7250 switch (hw->mac.type) {
7251 case ixgbe_mac_82598EB: {
7252 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7253 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7254 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7255 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7256 }
7257 break;
7258 case ixgbe_mac_X540:
7259 case ixgbe_mac_X550:
7260 case ixgbe_mac_X550EM_x:
7261 case ixgbe_mac_x550em_a:
7262 case ixgbe_mac_82599EB: {
7263 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7264 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7265 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7266 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7267 }
7268 break;
7269 default:
7270 flow_tx = false;
7271 flow_rx = false;
7272 break;
7273 }
7274
7275 adapter->last_rx_ptp_check = jiffies;
7276
7277 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7278 ixgbe_ptp_start_cyclecounter(adapter);
7279
7280 switch (link_speed) {
7281 case IXGBE_LINK_SPEED_10GB_FULL:
7282 speed_str = "10 Gbps";
7283 break;
7284 case IXGBE_LINK_SPEED_5GB_FULL:
7285 speed_str = "5 Gbps";
7286 break;
7287 case IXGBE_LINK_SPEED_2_5GB_FULL:
7288 speed_str = "2.5 Gbps";
7289 break;
7290 case IXGBE_LINK_SPEED_1GB_FULL:
7291 speed_str = "1 Gbps";
7292 break;
7293 case IXGBE_LINK_SPEED_100_FULL:
7294 speed_str = "100 Mbps";
7295 break;
7296 case IXGBE_LINK_SPEED_10_FULL:
7297 speed_str = "10 Mbps";
7298 break;
7299 default:
7300 speed_str = "unknown speed";
7301 break;
7302 }
7303 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7304 ((flow_rx && flow_tx) ? "RX/TX" :
7305 (flow_rx ? "RX" :
7306 (flow_tx ? "TX" : "None"))));
7307
7308 netif_carrier_on(netdev);
7309 ixgbe_check_vf_rate_limit(adapter);
7310
7311
7312 netif_tx_wake_all_queues(adapter->netdev);
7313
7314
7315 ixgbe_update_default_up(adapter);
7316
7317
7318 ixgbe_ping_all_vfs(adapter);
7319}
7320
7321
7322
7323
7324
7325
7326static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7327{
7328 struct net_device *netdev = adapter->netdev;
7329 struct ixgbe_hw *hw = &adapter->hw;
7330
7331 adapter->link_up = false;
7332 adapter->link_speed = 0;
7333
7334
7335 if (!netif_carrier_ok(netdev))
7336 return;
7337
7338
7339 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7340 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7341
7342 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7343 ixgbe_ptp_start_cyclecounter(adapter);
7344
7345 e_info(drv, "NIC Link is Down\n");
7346 netif_carrier_off(netdev);
7347
7348
7349 ixgbe_ping_all_vfs(adapter);
7350}
7351
7352static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7353{
7354 int i;
7355
7356 for (i = 0; i < adapter->num_tx_queues; i++) {
7357 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7358
7359 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7360 return true;
7361 }
7362
7363 for (i = 0; i < adapter->num_xdp_queues; i++) {
7364 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7365
7366 if (ring->next_to_use != ring->next_to_clean)
7367 return true;
7368 }
7369
7370 return false;
7371}
7372
7373static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7374{
7375 struct ixgbe_hw *hw = &adapter->hw;
7376 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7377 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7378
7379 int i, j;
7380
7381 if (!adapter->num_vfs)
7382 return false;
7383
7384
7385 if (hw->mac.type >= ixgbe_mac_X550)
7386 return false;
7387
7388 for (i = 0; i < adapter->num_vfs; i++) {
7389 for (j = 0; j < q_per_pool; j++) {
7390 u32 h, t;
7391
7392 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7393 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7394
7395 if (h != t)
7396 return true;
7397 }
7398 }
7399
7400 return false;
7401}
7402
7403
7404
7405
7406
7407static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7408{
7409 if (!netif_carrier_ok(adapter->netdev)) {
7410 if (ixgbe_ring_tx_pending(adapter) ||
7411 ixgbe_vf_tx_pending(adapter)) {
7412
7413
7414
7415
7416
7417 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7418 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7419 }
7420 }
7421}
7422
7423#ifdef CONFIG_PCI_IOV
7424static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7425{
7426 struct ixgbe_hw *hw = &adapter->hw;
7427 struct pci_dev *pdev = adapter->pdev;
7428 unsigned int vf;
7429 u32 gpc;
7430
7431 if (!(netif_carrier_ok(adapter->netdev)))
7432 return;
7433
7434 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7435 if (gpc)
7436 return;
7437
7438
7439
7440
7441
7442
7443 if (!pdev)
7444 return;
7445
7446
7447 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7448 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7449 u16 status_reg;
7450
7451 if (!vfdev)
7452 continue;
7453 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7454 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7455 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7456 pcie_flr(vfdev);
7457 }
7458}
7459
7460static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7461{
7462 u32 ssvpc;
7463
7464
7465 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7466 adapter->num_vfs == 0)
7467 return;
7468
7469 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7470
7471
7472
7473
7474
7475 if (!ssvpc)
7476 return;
7477
7478 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7479}
7480#else
7481static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7482{
7483}
7484
7485static void
7486ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7487{
7488}
7489#endif
7490
7491
7492
7493
7494
7495
7496static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7497{
7498
7499 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7500 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7501 test_bit(__IXGBE_RESETTING, &adapter->state))
7502 return;
7503
7504 ixgbe_watchdog_update_link(adapter);
7505
7506 if (adapter->link_up)
7507 ixgbe_watchdog_link_is_up(adapter);
7508 else
7509 ixgbe_watchdog_link_is_down(adapter);
7510
7511 ixgbe_check_for_bad_vf(adapter);
7512 ixgbe_spoof_check(adapter);
7513 ixgbe_update_stats(adapter);
7514
7515 ixgbe_watchdog_flush_tx(adapter);
7516}
7517
7518
7519
7520
7521
7522static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7523{
7524 struct ixgbe_hw *hw = &adapter->hw;
7525 s32 err;
7526
7527
7528 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7529 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7530 return;
7531
7532 if (adapter->sfp_poll_time &&
7533 time_after(adapter->sfp_poll_time, jiffies))
7534 return;
7535
7536
7537 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7538 return;
7539
7540 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7541
7542 err = hw->phy.ops.identify_sfp(hw);
7543 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7544 goto sfp_out;
7545
7546 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7547
7548
7549 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7550 }
7551
7552
7553 if (err)
7554 goto sfp_out;
7555
7556
7557 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7558 goto sfp_out;
7559
7560 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7561
7562
7563
7564
7565
7566
7567 if (hw->mac.type == ixgbe_mac_82598EB)
7568 err = hw->phy.ops.reset(hw);
7569 else
7570 err = hw->mac.ops.setup_sfp(hw);
7571
7572 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7573 goto sfp_out;
7574
7575 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7576 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7577
7578sfp_out:
7579 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7580
7581 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7582 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7583 e_dev_err("failed to initialize because an unsupported "
7584 "SFP+ module type was detected.\n");
7585 e_dev_err("Reload the driver after installing a "
7586 "supported module.\n");
7587 unregister_netdev(adapter->netdev);
7588 }
7589}
7590
7591
7592
7593
7594
7595static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7596{
7597 struct ixgbe_hw *hw = &adapter->hw;
7598 u32 cap_speed;
7599 u32 speed;
7600 bool autoneg = false;
7601
7602 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7603 return;
7604
7605
7606 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7607 return;
7608
7609 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7610
7611 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7612
7613
7614 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7615 speed = IXGBE_LINK_SPEED_10GB_FULL;
7616 else
7617 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7618 IXGBE_LINK_SPEED_1GB_FULL);
7619
7620 if (hw->mac.ops.setup_link)
7621 hw->mac.ops.setup_link(hw, speed, true);
7622
7623 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7624 adapter->link_check_timeout = jiffies;
7625 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7626}
7627
7628
7629
7630
7631
7632static void ixgbe_service_timer(struct timer_list *t)
7633{
7634 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7635 unsigned long next_event_offset;
7636
7637
7638 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7639 next_event_offset = HZ / 10;
7640 else
7641 next_event_offset = HZ * 2;
7642
7643
7644 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7645
7646 ixgbe_service_event_schedule(adapter);
7647}
7648
7649static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7650{
7651 struct ixgbe_hw *hw = &adapter->hw;
7652 u32 status;
7653
7654 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7655 return;
7656
7657 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7658
7659 if (!hw->phy.ops.handle_lasi)
7660 return;
7661
7662 status = hw->phy.ops.handle_lasi(&adapter->hw);
7663 if (status != IXGBE_ERR_OVERTEMP)
7664 return;
7665
7666 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7667}
7668
7669static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7670{
7671 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7672 return;
7673
7674
7675 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7676 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7677 test_bit(__IXGBE_RESETTING, &adapter->state))
7678 return;
7679
7680 ixgbe_dump(adapter);
7681 netdev_err(adapter->netdev, "Reset adapter\n");
7682 adapter->tx_timeout_count++;
7683
7684 rtnl_lock();
7685 ixgbe_reinit_locked(adapter);
7686 rtnl_unlock();
7687}
7688
7689
7690
7691
7692
7693static void ixgbe_service_task(struct work_struct *work)
7694{
7695 struct ixgbe_adapter *adapter = container_of(work,
7696 struct ixgbe_adapter,
7697 service_task);
7698 if (ixgbe_removed(adapter->hw.hw_addr)) {
7699 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7700 rtnl_lock();
7701 ixgbe_down(adapter);
7702 rtnl_unlock();
7703 }
7704 ixgbe_service_event_complete(adapter);
7705 return;
7706 }
7707 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7708 rtnl_lock();
7709 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7710 udp_tunnel_get_rx_info(adapter->netdev);
7711 rtnl_unlock();
7712 }
7713 ixgbe_reset_subtask(adapter);
7714 ixgbe_phy_interrupt_subtask(adapter);
7715 ixgbe_sfp_detection_subtask(adapter);
7716 ixgbe_sfp_link_config_subtask(adapter);
7717 ixgbe_check_overtemp_subtask(adapter);
7718 ixgbe_watchdog_subtask(adapter);
7719 ixgbe_fdir_reinit_subtask(adapter);
7720 ixgbe_check_hang_subtask(adapter);
7721
7722 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7723 ixgbe_ptp_overflow_check(adapter);
7724 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7725 ixgbe_ptp_rx_hang(adapter);
7726 ixgbe_ptp_tx_hang(adapter);
7727 }
7728
7729 ixgbe_service_event_complete(adapter);
7730}
7731
7732static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7733 struct ixgbe_tx_buffer *first,
7734 u8 *hdr_len,
7735 struct ixgbe_ipsec_tx_data *itd)
7736{
7737 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7738 struct sk_buff *skb = first->skb;
7739 union {
7740 struct iphdr *v4;
7741 struct ipv6hdr *v6;
7742 unsigned char *hdr;
7743 } ip;
7744 union {
7745 struct tcphdr *tcp;
7746 unsigned char *hdr;
7747 } l4;
7748 u32 paylen, l4_offset;
7749 u32 fceof_saidx = 0;
7750 int err;
7751
7752 if (skb->ip_summed != CHECKSUM_PARTIAL)
7753 return 0;
7754
7755 if (!skb_is_gso(skb))
7756 return 0;
7757
7758 err = skb_cow_head(skb, 0);
7759 if (err < 0)
7760 return err;
7761
7762 if (eth_p_mpls(first->protocol))
7763 ip.hdr = skb_inner_network_header(skb);
7764 else
7765 ip.hdr = skb_network_header(skb);
7766 l4.hdr = skb_checksum_start(skb);
7767
7768
7769 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7770
7771
7772 if (ip.v4->version == 4) {
7773 unsigned char *csum_start = skb_checksum_start(skb);
7774 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7775 int len = csum_start - trans_start;
7776
7777
7778
7779
7780
7781 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
7782 csum_fold(csum_partial(trans_start,
7783 len, 0)) : 0;
7784 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7785
7786 ip.v4->tot_len = 0;
7787 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7788 IXGBE_TX_FLAGS_CSUM |
7789 IXGBE_TX_FLAGS_IPV4;
7790 } else {
7791 ip.v6->payload_len = 0;
7792 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7793 IXGBE_TX_FLAGS_CSUM;
7794 }
7795
7796
7797 l4_offset = l4.hdr - skb->data;
7798
7799
7800 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
7801
7802
7803 paylen = skb->len - l4_offset;
7804 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
7805
7806
7807 first->gso_segs = skb_shinfo(skb)->gso_segs;
7808 first->bytecount += (first->gso_segs - 1) * *hdr_len;
7809
7810
7811 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
7812 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
7813
7814 fceof_saidx |= itd->sa_idx;
7815 type_tucmd |= itd->flags | itd->trailer_len;
7816
7817
7818 vlan_macip_lens = l4.hdr - ip.hdr;
7819 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
7820 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7821
7822 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
7823 mss_l4len_idx);
7824
7825 return 1;
7826}
7827
7828static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
7829{
7830 unsigned int offset = 0;
7831
7832 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
7833
7834 return offset == skb_checksum_start_offset(skb);
7835}
7836
7837static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7838 struct ixgbe_tx_buffer *first,
7839 struct ixgbe_ipsec_tx_data *itd)
7840{
7841 struct sk_buff *skb = first->skb;
7842 u32 vlan_macip_lens = 0;
7843 u32 fceof_saidx = 0;
7844 u32 type_tucmd = 0;
7845
7846 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7847csum_failed:
7848 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
7849 IXGBE_TX_FLAGS_CC)))
7850 return;
7851 goto no_csum;
7852 }
7853
7854 switch (skb->csum_offset) {
7855 case offsetof(struct tcphdr, check):
7856 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7857
7858 case offsetof(struct udphdr, check):
7859 break;
7860 case offsetof(struct sctphdr, checksum):
7861
7862 if (((first->protocol == htons(ETH_P_IP)) &&
7863 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
7864 ((first->protocol == htons(ETH_P_IPV6)) &&
7865 ixgbe_ipv6_csum_is_sctp(skb))) {
7866 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7867 break;
7868 }
7869
7870 default:
7871 skb_checksum_help(skb);
7872 goto csum_failed;
7873 }
7874
7875
7876 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7877 vlan_macip_lens = skb_checksum_start_offset(skb) -
7878 skb_network_offset(skb);
7879no_csum:
7880
7881 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7882 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7883
7884 fceof_saidx |= itd->sa_idx;
7885 type_tucmd |= itd->flags | itd->trailer_len;
7886
7887 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
7888}
7889
7890#define IXGBE_SET_FLAG(_input, _flag, _result) \
7891 ((_flag <= _result) ? \
7892 ((u32)(_input & _flag) * (_result / _flag)) : \
7893 ((u32)(_input & _flag) / (_flag / _result)))
7894
7895static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7896{
7897
7898 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7899 IXGBE_ADVTXD_DCMD_DEXT |
7900 IXGBE_ADVTXD_DCMD_IFCS;
7901
7902
7903 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7904 IXGBE_ADVTXD_DCMD_VLE);
7905
7906
7907 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7908 IXGBE_ADVTXD_DCMD_TSE);
7909
7910
7911 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7912 IXGBE_ADVTXD_MAC_TSTAMP);
7913
7914
7915 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7916
7917 return cmd_type;
7918}
7919
7920static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7921 u32 tx_flags, unsigned int paylen)
7922{
7923 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7924
7925
7926 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7927 IXGBE_TX_FLAGS_CSUM,
7928 IXGBE_ADVTXD_POPTS_TXSM);
7929
7930
7931 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7932 IXGBE_TX_FLAGS_IPV4,
7933 IXGBE_ADVTXD_POPTS_IXSM);
7934
7935
7936 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7937 IXGBE_TX_FLAGS_IPSEC,
7938 IXGBE_ADVTXD_POPTS_IPSEC);
7939
7940
7941
7942
7943
7944 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7945 IXGBE_TX_FLAGS_CC,
7946 IXGBE_ADVTXD_CC);
7947
7948 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7949}
7950
7951static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7952{
7953 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7954
7955
7956
7957
7958
7959 smp_mb();
7960
7961
7962
7963
7964 if (likely(ixgbe_desc_unused(tx_ring) < size))
7965 return -EBUSY;
7966
7967
7968 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7969 ++tx_ring->tx_stats.restart_queue;
7970 return 0;
7971}
7972
7973static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7974{
7975 if (likely(ixgbe_desc_unused(tx_ring) >= size))
7976 return 0;
7977
7978 return __ixgbe_maybe_stop_tx(tx_ring, size);
7979}
7980
7981#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7982 IXGBE_TXD_CMD_RS)
7983
7984static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7985 struct ixgbe_tx_buffer *first,
7986 const u8 hdr_len)
7987{
7988 struct sk_buff *skb = first->skb;
7989 struct ixgbe_tx_buffer *tx_buffer;
7990 union ixgbe_adv_tx_desc *tx_desc;
7991 struct skb_frag_struct *frag;
7992 dma_addr_t dma;
7993 unsigned int data_len, size;
7994 u32 tx_flags = first->tx_flags;
7995 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7996 u16 i = tx_ring->next_to_use;
7997
7998 tx_desc = IXGBE_TX_DESC(tx_ring, i);
7999
8000 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8001
8002 size = skb_headlen(skb);
8003 data_len = skb->data_len;
8004
8005#ifdef IXGBE_FCOE
8006 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8007 if (data_len < sizeof(struct fcoe_crc_eof)) {
8008 size -= sizeof(struct fcoe_crc_eof) - data_len;
8009 data_len = 0;
8010 } else {
8011 data_len -= sizeof(struct fcoe_crc_eof);
8012 }
8013 }
8014
8015#endif
8016 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8017
8018 tx_buffer = first;
8019
8020 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8021 if (dma_mapping_error(tx_ring->dev, dma))
8022 goto dma_error;
8023
8024
8025 dma_unmap_len_set(tx_buffer, len, size);
8026 dma_unmap_addr_set(tx_buffer, dma, dma);
8027
8028 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8029
8030 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8031 tx_desc->read.cmd_type_len =
8032 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8033
8034 i++;
8035 tx_desc++;
8036 if (i == tx_ring->count) {
8037 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8038 i = 0;
8039 }
8040 tx_desc->read.olinfo_status = 0;
8041
8042 dma += IXGBE_MAX_DATA_PER_TXD;
8043 size -= IXGBE_MAX_DATA_PER_TXD;
8044
8045 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8046 }
8047
8048 if (likely(!data_len))
8049 break;
8050
8051 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8052
8053 i++;
8054 tx_desc++;
8055 if (i == tx_ring->count) {
8056 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8057 i = 0;
8058 }
8059 tx_desc->read.olinfo_status = 0;
8060
8061#ifdef IXGBE_FCOE
8062 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8063#else
8064 size = skb_frag_size(frag);
8065#endif
8066 data_len -= size;
8067
8068 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8069 DMA_TO_DEVICE);
8070
8071 tx_buffer = &tx_ring->tx_buffer_info[i];
8072 }
8073
8074
8075 cmd_type |= size | IXGBE_TXD_CMD;
8076 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8077
8078 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8079
8080
8081 first->time_stamp = jiffies;
8082
8083
8084
8085
8086
8087
8088
8089
8090
8091 wmb();
8092
8093
8094 first->next_to_watch = tx_desc;
8095
8096 i++;
8097 if (i == tx_ring->count)
8098 i = 0;
8099
8100 tx_ring->next_to_use = i;
8101
8102 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8103
8104 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
8105 writel(i, tx_ring->tail);
8106
8107
8108
8109
8110 mmiowb();
8111 }
8112
8113 return 0;
8114dma_error:
8115 dev_err(tx_ring->dev, "TX DMA map failed\n");
8116
8117
8118 for (;;) {
8119 tx_buffer = &tx_ring->tx_buffer_info[i];
8120 if (dma_unmap_len(tx_buffer, len))
8121 dma_unmap_page(tx_ring->dev,
8122 dma_unmap_addr(tx_buffer, dma),
8123 dma_unmap_len(tx_buffer, len),
8124 DMA_TO_DEVICE);
8125 dma_unmap_len_set(tx_buffer, len, 0);
8126 if (tx_buffer == first)
8127 break;
8128 if (i == 0)
8129 i += tx_ring->count;
8130 i--;
8131 }
8132
8133 dev_kfree_skb_any(first->skb);
8134 first->skb = NULL;
8135
8136 tx_ring->next_to_use = i;
8137
8138 return -1;
8139}
8140
8141static void ixgbe_atr(struct ixgbe_ring *ring,
8142 struct ixgbe_tx_buffer *first)
8143{
8144 struct ixgbe_q_vector *q_vector = ring->q_vector;
8145 union ixgbe_atr_hash_dword input = { .dword = 0 };
8146 union ixgbe_atr_hash_dword common = { .dword = 0 };
8147 union {
8148 unsigned char *network;
8149 struct iphdr *ipv4;
8150 struct ipv6hdr *ipv6;
8151 } hdr;
8152 struct tcphdr *th;
8153 unsigned int hlen;
8154 struct sk_buff *skb;
8155 __be16 vlan_id;
8156 int l4_proto;
8157
8158
8159 if (!q_vector)
8160 return;
8161
8162
8163 if (!ring->atr_sample_rate)
8164 return;
8165
8166 ring->atr_count++;
8167
8168
8169 if ((first->protocol != htons(ETH_P_IP)) &&
8170 (first->protocol != htons(ETH_P_IPV6)))
8171 return;
8172
8173
8174 skb = first->skb;
8175 hdr.network = skb_network_header(skb);
8176 if (unlikely(hdr.network <= skb->data))
8177 return;
8178 if (skb->encapsulation &&
8179 first->protocol == htons(ETH_P_IP) &&
8180 hdr.ipv4->protocol == IPPROTO_UDP) {
8181 struct ixgbe_adapter *adapter = q_vector->adapter;
8182
8183 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8184 VXLAN_HEADROOM))
8185 return;
8186
8187
8188 if (adapter->vxlan_port &&
8189 udp_hdr(skb)->dest == adapter->vxlan_port)
8190 hdr.network = skb_inner_network_header(skb);
8191
8192 if (adapter->geneve_port &&
8193 udp_hdr(skb)->dest == adapter->geneve_port)
8194 hdr.network = skb_inner_network_header(skb);
8195 }
8196
8197
8198
8199
8200 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8201 return;
8202
8203
8204 switch (hdr.ipv4->version) {
8205 case IPVERSION:
8206
8207 hlen = (hdr.network[0] & 0x0F) << 2;
8208 l4_proto = hdr.ipv4->protocol;
8209 break;
8210 case 6:
8211 hlen = hdr.network - skb->data;
8212 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8213 hlen -= hdr.network - skb->data;
8214 break;
8215 default:
8216 return;
8217 }
8218
8219 if (l4_proto != IPPROTO_TCP)
8220 return;
8221
8222 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8223 hlen + sizeof(struct tcphdr)))
8224 return;
8225
8226 th = (struct tcphdr *)(hdr.network + hlen);
8227
8228
8229 if (th->fin)
8230 return;
8231
8232
8233 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8234 return;
8235
8236
8237 ring->atr_count = 0;
8238
8239 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8240
8241
8242
8243
8244
8245
8246
8247
8248 input.formatted.vlan_id = vlan_id;
8249
8250
8251
8252
8253
8254 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8255 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8256 else
8257 common.port.src ^= th->dest ^ first->protocol;
8258 common.port.dst ^= th->source;
8259
8260 switch (hdr.ipv4->version) {
8261 case IPVERSION:
8262 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8263 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8264 break;
8265 case 6:
8266 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8267 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8268 hdr.ipv6->saddr.s6_addr32[1] ^
8269 hdr.ipv6->saddr.s6_addr32[2] ^
8270 hdr.ipv6->saddr.s6_addr32[3] ^
8271 hdr.ipv6->daddr.s6_addr32[0] ^
8272 hdr.ipv6->daddr.s6_addr32[1] ^
8273 hdr.ipv6->daddr.s6_addr32[2] ^
8274 hdr.ipv6->daddr.s6_addr32[3];
8275 break;
8276 default:
8277 break;
8278 }
8279
8280 if (hdr.network != skb_network_header(skb))
8281 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8282
8283
8284 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8285 input, common, ring->queue_index);
8286}
8287
8288static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8289 void *accel_priv, select_queue_fallback_t fallback)
8290{
8291 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
8292 struct ixgbe_adapter *adapter;
8293 int txq;
8294#ifdef IXGBE_FCOE
8295 struct ixgbe_ring_feature *f;
8296#endif
8297
8298 if (fwd_adapter) {
8299 adapter = netdev_priv(dev);
8300 txq = reciprocal_scale(skb_get_hash(skb),
8301 adapter->num_rx_queues_per_pool);
8302
8303 return txq + fwd_adapter->tx_base_queue;
8304 }
8305
8306#ifdef IXGBE_FCOE
8307
8308
8309
8310
8311
8312 switch (vlan_get_protocol(skb)) {
8313 case htons(ETH_P_FCOE):
8314 case htons(ETH_P_FIP):
8315 adapter = netdev_priv(dev);
8316
8317 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
8318 break;
8319
8320 default:
8321 return fallback(dev, skb);
8322 }
8323
8324 f = &adapter->ring_feature[RING_F_FCOE];
8325
8326 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8327 smp_processor_id();
8328
8329 while (txq >= f->indices)
8330 txq -= f->indices;
8331
8332 return txq + f->offset;
8333#else
8334 return fallback(dev, skb);
8335#endif
8336}
8337
8338static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8339 struct xdp_buff *xdp)
8340{
8341 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8342 struct ixgbe_tx_buffer *tx_buffer;
8343 union ixgbe_adv_tx_desc *tx_desc;
8344 u32 len, cmd_type;
8345 dma_addr_t dma;
8346 u16 i;
8347
8348 len = xdp->data_end - xdp->data;
8349
8350 if (unlikely(!ixgbe_desc_unused(ring)))
8351 return IXGBE_XDP_CONSUMED;
8352
8353 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
8354 if (dma_mapping_error(ring->dev, dma))
8355 return IXGBE_XDP_CONSUMED;
8356
8357
8358 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8359 tx_buffer->bytecount = len;
8360 tx_buffer->gso_segs = 1;
8361 tx_buffer->protocol = 0;
8362
8363 i = ring->next_to_use;
8364 tx_desc = IXGBE_TX_DESC(ring, i);
8365
8366 dma_unmap_len_set(tx_buffer, len, len);
8367 dma_unmap_addr_set(tx_buffer, dma, dma);
8368 tx_buffer->data = xdp->data;
8369 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8370
8371
8372 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8373 IXGBE_ADVTXD_DCMD_DEXT |
8374 IXGBE_ADVTXD_DCMD_IFCS;
8375 cmd_type |= len | IXGBE_TXD_CMD;
8376 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8377 tx_desc->read.olinfo_status =
8378 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8379
8380
8381 smp_wmb();
8382
8383
8384 i++;
8385 if (i == ring->count)
8386 i = 0;
8387
8388 tx_buffer->next_to_watch = tx_desc;
8389 ring->next_to_use = i;
8390
8391 return IXGBE_XDP_TX;
8392}
8393
8394netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8395 struct ixgbe_adapter *adapter,
8396 struct ixgbe_ring *tx_ring)
8397{
8398 struct ixgbe_tx_buffer *first;
8399 int tso;
8400 u32 tx_flags = 0;
8401 unsigned short f;
8402 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8403 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8404 __be16 protocol = skb->protocol;
8405 u8 hdr_len = 0;
8406
8407
8408
8409
8410
8411
8412
8413
8414 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8415 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
8416
8417 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8418 tx_ring->tx_stats.tx_busy++;
8419 return NETDEV_TX_BUSY;
8420 }
8421
8422
8423 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8424 first->skb = skb;
8425 first->bytecount = skb->len;
8426 first->gso_segs = 1;
8427
8428
8429 if (skb_vlan_tag_present(skb)) {
8430 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8431 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8432
8433 } else if (protocol == htons(ETH_P_8021Q)) {
8434 struct vlan_hdr *vhdr, _vhdr;
8435 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8436 if (!vhdr)
8437 goto out_drop;
8438
8439 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8440 IXGBE_TX_FLAGS_VLAN_SHIFT;
8441 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8442 }
8443 protocol = vlan_get_protocol(skb);
8444
8445 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8446 adapter->ptp_clock) {
8447 if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8448 &adapter->state)) {
8449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8450 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8451
8452
8453 adapter->ptp_tx_skb = skb_get(skb);
8454 adapter->ptp_tx_start = jiffies;
8455 schedule_work(&adapter->ptp_tx_work);
8456 } else {
8457 adapter->tx_hwtstamp_skipped++;
8458 }
8459 }
8460
8461 skb_tx_timestamp(skb);
8462
8463#ifdef CONFIG_PCI_IOV
8464
8465
8466
8467
8468 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8469 tx_flags |= IXGBE_TX_FLAGS_CC;
8470
8471#endif
8472
8473 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8474 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8475 (skb->priority != TC_PRIO_CONTROL))) {
8476 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8477 tx_flags |= (skb->priority & 0x7) <<
8478 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8479 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8480 struct vlan_ethhdr *vhdr;
8481
8482 if (skb_cow_head(skb, 0))
8483 goto out_drop;
8484 vhdr = (struct vlan_ethhdr *)skb->data;
8485 vhdr->h_vlan_TCI = htons(tx_flags >>
8486 IXGBE_TX_FLAGS_VLAN_SHIFT);
8487 } else {
8488 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8489 }
8490 }
8491
8492
8493 first->tx_flags = tx_flags;
8494 first->protocol = protocol;
8495
8496#ifdef IXGBE_FCOE
8497
8498 if ((protocol == htons(ETH_P_FCOE)) &&
8499 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8500 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8501 if (tso < 0)
8502 goto out_drop;
8503
8504 goto xmit_fcoe;
8505 }
8506
8507#endif
8508
8509#ifdef CONFIG_XFRM_OFFLOAD
8510 if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8511 goto out_drop;
8512#endif
8513 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8514 if (tso < 0)
8515 goto out_drop;
8516 else if (!tso)
8517 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8518
8519
8520 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8521 ixgbe_atr(tx_ring, first);
8522
8523#ifdef IXGBE_FCOE
8524xmit_fcoe:
8525#endif
8526 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8527 goto cleanup_tx_timestamp;
8528
8529 return NETDEV_TX_OK;
8530
8531out_drop:
8532 dev_kfree_skb_any(first->skb);
8533 first->skb = NULL;
8534cleanup_tx_timestamp:
8535 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8536 dev_kfree_skb_any(adapter->ptp_tx_skb);
8537 adapter->ptp_tx_skb = NULL;
8538 cancel_work_sync(&adapter->ptp_tx_work);
8539 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8540 }
8541
8542 return NETDEV_TX_OK;
8543}
8544
8545static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8546 struct net_device *netdev,
8547 struct ixgbe_ring *ring)
8548{
8549 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8550 struct ixgbe_ring *tx_ring;
8551
8552
8553
8554
8555
8556 if (skb_put_padto(skb, 17))
8557 return NETDEV_TX_OK;
8558
8559 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
8560
8561 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8562}
8563
8564static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8565 struct net_device *netdev)
8566{
8567 return __ixgbe_xmit_frame(skb, netdev, NULL);
8568}
8569
8570
8571
8572
8573
8574
8575
8576
8577static int ixgbe_set_mac(struct net_device *netdev, void *p)
8578{
8579 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8580 struct ixgbe_hw *hw = &adapter->hw;
8581 struct sockaddr *addr = p;
8582
8583 if (!is_valid_ether_addr(addr->sa_data))
8584 return -EADDRNOTAVAIL;
8585
8586 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8587 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8588
8589 ixgbe_mac_set_default_filter(adapter);
8590
8591 return 0;
8592}
8593
8594static int
8595ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8596{
8597 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8598 struct ixgbe_hw *hw = &adapter->hw;
8599 u16 value;
8600 int rc;
8601
8602 if (prtad != hw->phy.mdio.prtad)
8603 return -EINVAL;
8604 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8605 if (!rc)
8606 rc = value;
8607 return rc;
8608}
8609
8610static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8611 u16 addr, u16 value)
8612{
8613 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8614 struct ixgbe_hw *hw = &adapter->hw;
8615
8616 if (prtad != hw->phy.mdio.prtad)
8617 return -EINVAL;
8618 return hw->phy.ops.write_reg(hw, addr, devad, value);
8619}
8620
8621static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8622{
8623 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8624
8625 switch (cmd) {
8626 case SIOCSHWTSTAMP:
8627 return ixgbe_ptp_set_ts_config(adapter, req);
8628 case SIOCGHWTSTAMP:
8629 return ixgbe_ptp_get_ts_config(adapter, req);
8630 case SIOCGMIIPHY:
8631 if (!adapter->hw.phy.ops.read_reg)
8632 return -EOPNOTSUPP;
8633
8634 default:
8635 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8636 }
8637}
8638
8639
8640
8641
8642
8643
8644
8645
8646static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8647{
8648 int err = 0;
8649 struct ixgbe_adapter *adapter = netdev_priv(dev);
8650 struct ixgbe_hw *hw = &adapter->hw;
8651
8652 if (is_valid_ether_addr(hw->mac.san_addr)) {
8653 rtnl_lock();
8654 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8655 rtnl_unlock();
8656
8657
8658 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8659 }
8660 return err;
8661}
8662
8663
8664
8665
8666
8667
8668
8669
8670static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8671{
8672 int err = 0;
8673 struct ixgbe_adapter *adapter = netdev_priv(dev);
8674 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8675
8676 if (is_valid_ether_addr(mac->san_addr)) {
8677 rtnl_lock();
8678 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8679 rtnl_unlock();
8680 }
8681 return err;
8682}
8683
8684#ifdef CONFIG_NET_POLL_CONTROLLER
8685
8686
8687
8688
8689
8690static void ixgbe_netpoll(struct net_device *netdev)
8691{
8692 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8693 int i;
8694
8695
8696 if (test_bit(__IXGBE_DOWN, &adapter->state))
8697 return;
8698
8699
8700 for (i = 0; i < adapter->num_q_vectors; i++)
8701 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
8702}
8703
8704#endif
8705
8706static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8707 struct ixgbe_ring *ring)
8708{
8709 u64 bytes, packets;
8710 unsigned int start;
8711
8712 if (ring) {
8713 do {
8714 start = u64_stats_fetch_begin_irq(&ring->syncp);
8715 packets = ring->stats.packets;
8716 bytes = ring->stats.bytes;
8717 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8718 stats->tx_packets += packets;
8719 stats->tx_bytes += bytes;
8720 }
8721}
8722
8723static void ixgbe_get_stats64(struct net_device *netdev,
8724 struct rtnl_link_stats64 *stats)
8725{
8726 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8727 int i;
8728
8729 rcu_read_lock();
8730 for (i = 0; i < adapter->num_rx_queues; i++) {
8731 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8732 u64 bytes, packets;
8733 unsigned int start;
8734
8735 if (ring) {
8736 do {
8737 start = u64_stats_fetch_begin_irq(&ring->syncp);
8738 packets = ring->stats.packets;
8739 bytes = ring->stats.bytes;
8740 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8741 stats->rx_packets += packets;
8742 stats->rx_bytes += bytes;
8743 }
8744 }
8745
8746 for (i = 0; i < adapter->num_tx_queues; i++) {
8747 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8748
8749 ixgbe_get_ring_stats64(stats, ring);
8750 }
8751 for (i = 0; i < adapter->num_xdp_queues; i++) {
8752 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8753
8754 ixgbe_get_ring_stats64(stats, ring);
8755 }
8756 rcu_read_unlock();
8757
8758
8759 stats->multicast = netdev->stats.multicast;
8760 stats->rx_errors = netdev->stats.rx_errors;
8761 stats->rx_length_errors = netdev->stats.rx_length_errors;
8762 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8763 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8764}
8765
8766#ifdef CONFIG_IXGBE_DCB
8767
8768
8769
8770
8771
8772
8773
8774
8775static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8776{
8777 struct ixgbe_hw *hw = &adapter->hw;
8778 u32 reg, rsave;
8779 int i;
8780
8781
8782
8783
8784 if (hw->mac.type == ixgbe_mac_82598EB)
8785 return;
8786
8787 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8788 rsave = reg;
8789
8790 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8791 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8792
8793
8794 if (up2tc > tc)
8795 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8796 }
8797
8798 if (reg != rsave)
8799 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8800
8801 return;
8802}
8803
8804
8805
8806
8807
8808
8809
8810static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
8811{
8812 struct net_device *dev = adapter->netdev;
8813 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
8814 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
8815 u8 prio;
8816
8817 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
8818 u8 tc = 0;
8819
8820 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
8821 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
8822 else if (ets)
8823 tc = ets->prio_tc[prio];
8824
8825 netdev_set_prio_tc_map(dev, prio, tc);
8826 }
8827}
8828
8829#endif
8830
8831
8832
8833
8834
8835
8836int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8837{
8838 struct ixgbe_adapter *adapter = netdev_priv(dev);
8839 struct ixgbe_hw *hw = &adapter->hw;
8840
8841
8842 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
8843 return -EINVAL;
8844
8845 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
8846 return -EINVAL;
8847
8848
8849
8850
8851
8852 if (netif_running(dev))
8853 ixgbe_close(dev);
8854 else
8855 ixgbe_reset(adapter);
8856
8857 ixgbe_clear_interrupt_scheme(adapter);
8858
8859#ifdef CONFIG_IXGBE_DCB
8860 if (tc) {
8861 netdev_set_num_tc(dev, tc);
8862 ixgbe_set_prio_tc_map(adapter);
8863
8864 adapter->hw_tcs = tc;
8865 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
8866
8867 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
8868 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
8869 adapter->hw.fc.requested_mode = ixgbe_fc_none;
8870 }
8871 } else {
8872 netdev_reset_tc(dev);
8873
8874
8875
8876
8877
8878
8879 if (!tc && adapter->num_rx_pools > 1)
8880 netdev_set_num_tc(dev, 1);
8881
8882 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8883 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
8884
8885 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
8886 adapter->hw_tcs = tc;
8887
8888 adapter->temp_dcb_cfg.pfc_mode_enable = false;
8889 adapter->dcb_cfg.pfc_mode_enable = false;
8890 }
8891
8892 ixgbe_validate_rtr(adapter, tc);
8893
8894#endif
8895 ixgbe_init_interrupt_scheme(adapter);
8896
8897 if (netif_running(dev))
8898 return ixgbe_open(dev);
8899
8900 return 0;
8901}
8902
8903static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8904 struct tc_cls_u32_offload *cls)
8905{
8906 u32 hdl = cls->knode.handle;
8907 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8908 u32 loc = cls->knode.handle & 0xfffff;
8909 int err = 0, i, j;
8910 struct ixgbe_jump_table *jump = NULL;
8911
8912 if (loc > IXGBE_MAX_HW_ENTRIES)
8913 return -EINVAL;
8914
8915 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8916 return -EINVAL;
8917
8918
8919 if (uhtid != 0x800) {
8920 jump = adapter->jump_tables[uhtid];
8921 if (!jump)
8922 return -EINVAL;
8923 if (!test_bit(loc - 1, jump->child_loc_map))
8924 return -EINVAL;
8925 clear_bit(loc - 1, jump->child_loc_map);
8926 }
8927
8928
8929 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
8930 jump = adapter->jump_tables[i];
8931 if (jump && jump->link_hdl == hdl) {
8932
8933
8934
8935 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
8936 if (!test_bit(j, jump->child_loc_map))
8937 continue;
8938 spin_lock(&adapter->fdir_perfect_lock);
8939 err = ixgbe_update_ethtool_fdir_entry(adapter,
8940 NULL,
8941 j + 1);
8942 spin_unlock(&adapter->fdir_perfect_lock);
8943 clear_bit(j, jump->child_loc_map);
8944 }
8945
8946 kfree(jump->input);
8947 kfree(jump->mask);
8948 kfree(jump);
8949 adapter->jump_tables[i] = NULL;
8950 return err;
8951 }
8952 }
8953
8954 spin_lock(&adapter->fdir_perfect_lock);
8955 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
8956 spin_unlock(&adapter->fdir_perfect_lock);
8957 return err;
8958}
8959
8960static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
8961 struct tc_cls_u32_offload *cls)
8962{
8963 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8964
8965 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8966 return -EINVAL;
8967
8968
8969
8970
8971 if (cls->hnode.divisor > 0)
8972 return -EINVAL;
8973
8974 set_bit(uhtid - 1, &adapter->tables);
8975 return 0;
8976}
8977
8978static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
8979 struct tc_cls_u32_offload *cls)
8980{
8981 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8982
8983 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8984 return -EINVAL;
8985
8986 clear_bit(uhtid - 1, &adapter->tables);
8987 return 0;
8988}
8989
8990#ifdef CONFIG_NET_CLS_ACT
8991struct upper_walk_data {
8992 struct ixgbe_adapter *adapter;
8993 u64 action;
8994 int ifindex;
8995 u8 queue;
8996};
8997
8998static int get_macvlan_queue(struct net_device *upper, void *_data)
8999{
9000 if (netif_is_macvlan(upper)) {
9001 struct macvlan_dev *dfwd = netdev_priv(upper);
9002 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
9003 struct upper_walk_data *data = _data;
9004 struct ixgbe_adapter *adapter = data->adapter;
9005 int ifindex = data->ifindex;
9006
9007 if (vadapter && vadapter->netdev->ifindex == ifindex) {
9008 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9009 data->action = data->queue;
9010 return 1;
9011 }
9012 }
9013
9014 return 0;
9015}
9016
9017static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9018 u8 *queue, u64 *action)
9019{
9020 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9021 unsigned int num_vfs = adapter->num_vfs, vf;
9022 struct upper_walk_data data;
9023 struct net_device *upper;
9024
9025
9026 for (vf = 0; vf < num_vfs; ++vf) {
9027 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9028 if (upper->ifindex == ifindex) {
9029 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9030 *action = vf + 1;
9031 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9032 return 0;
9033 }
9034 }
9035
9036
9037 data.adapter = adapter;
9038 data.ifindex = ifindex;
9039 data.action = 0;
9040 data.queue = 0;
9041 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9042 get_macvlan_queue, &data)) {
9043 *action = data.action;
9044 *queue = data.queue;
9045
9046 return 0;
9047 }
9048
9049 return -EINVAL;
9050}
9051
9052static int parse_tc_actions(struct ixgbe_adapter *adapter,
9053 struct tcf_exts *exts, u64 *action, u8 *queue)
9054{
9055 const struct tc_action *a;
9056 LIST_HEAD(actions);
9057
9058 if (!tcf_exts_has_actions(exts))
9059 return -EINVAL;
9060
9061 tcf_exts_to_list(exts, &actions);
9062 list_for_each_entry(a, &actions, list) {
9063
9064
9065 if (is_tcf_gact_shot(a)) {
9066 *action = IXGBE_FDIR_DROP_QUEUE;
9067 *queue = IXGBE_FDIR_DROP_QUEUE;
9068 return 0;
9069 }
9070
9071
9072 if (is_tcf_mirred_egress_redirect(a)) {
9073 struct net_device *dev = tcf_mirred_dev(a);
9074
9075 if (!dev)
9076 return -EINVAL;
9077 return handle_redirect_action(adapter, dev->ifindex,
9078 queue, action);
9079 }
9080
9081 return -EINVAL;
9082 }
9083
9084 return -EINVAL;
9085}
9086#else
9087static int parse_tc_actions(struct ixgbe_adapter *adapter,
9088 struct tcf_exts *exts, u64 *action, u8 *queue)
9089{
9090 return -EINVAL;
9091}
9092#endif
9093
9094static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9095 union ixgbe_atr_input *mask,
9096 struct tc_cls_u32_offload *cls,
9097 struct ixgbe_mat_field *field_ptr,
9098 struct ixgbe_nexthdr *nexthdr)
9099{
9100 int i, j, off;
9101 __be32 val, m;
9102 bool found_entry = false, found_jump_field = false;
9103
9104 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9105 off = cls->knode.sel->keys[i].off;
9106 val = cls->knode.sel->keys[i].val;
9107 m = cls->knode.sel->keys[i].mask;
9108
9109 for (j = 0; field_ptr[j].val; j++) {
9110 if (field_ptr[j].off == off) {
9111 field_ptr[j].val(input, mask, val, m);
9112 input->filter.formatted.flow_type |=
9113 field_ptr[j].type;
9114 found_entry = true;
9115 break;
9116 }
9117 }
9118 if (nexthdr) {
9119 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9120 nexthdr->val == cls->knode.sel->keys[i].val &&
9121 nexthdr->mask == cls->knode.sel->keys[i].mask)
9122 found_jump_field = true;
9123 else
9124 continue;
9125 }
9126 }
9127
9128 if (nexthdr && !found_jump_field)
9129 return -EINVAL;
9130
9131 if (!found_entry)
9132 return 0;
9133
9134 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9135 IXGBE_ATR_L4TYPE_MASK;
9136
9137 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9138 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9139
9140 return 0;
9141}
9142
9143static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9144 struct tc_cls_u32_offload *cls)
9145{
9146 __be16 protocol = cls->common.protocol;
9147 u32 loc = cls->knode.handle & 0xfffff;
9148 struct ixgbe_hw *hw = &adapter->hw;
9149 struct ixgbe_mat_field *field_ptr;
9150 struct ixgbe_fdir_filter *input = NULL;
9151 union ixgbe_atr_input *mask = NULL;
9152 struct ixgbe_jump_table *jump = NULL;
9153 int i, err = -EINVAL;
9154 u8 queue;
9155 u32 uhtid, link_uhtid;
9156
9157 uhtid = TC_U32_USERHTID(cls->knode.handle);
9158 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9159
9160
9161
9162
9163
9164
9165
9166
9167 if (protocol != htons(ETH_P_IP))
9168 return err;
9169
9170 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9171 e_err(drv, "Location out of range\n");
9172 return err;
9173 }
9174
9175
9176
9177
9178
9179
9180
9181
9182 if (uhtid == 0x800) {
9183 field_ptr = (adapter->jump_tables[0])->mat;
9184 } else {
9185 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9186 return err;
9187 if (!adapter->jump_tables[uhtid])
9188 return err;
9189 field_ptr = (adapter->jump_tables[uhtid])->mat;
9190 }
9191
9192 if (!field_ptr)
9193 return err;
9194
9195
9196
9197
9198
9199
9200
9201 if (link_uhtid) {
9202 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9203
9204 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9205 return err;
9206
9207 if (!test_bit(link_uhtid - 1, &adapter->tables))
9208 return err;
9209
9210
9211
9212
9213
9214
9215 if (adapter->jump_tables[link_uhtid] &&
9216 (adapter->jump_tables[link_uhtid])->link_hdl) {
9217 e_err(drv, "Link filter exists for link: %x\n",
9218 link_uhtid);
9219 return err;
9220 }
9221
9222 for (i = 0; nexthdr[i].jump; i++) {
9223 if (nexthdr[i].o != cls->knode.sel->offoff ||
9224 nexthdr[i].s != cls->knode.sel->offshift ||
9225 nexthdr[i].m != cls->knode.sel->offmask)
9226 return err;
9227
9228 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9229 if (!jump)
9230 return -ENOMEM;
9231 input = kzalloc(sizeof(*input), GFP_KERNEL);
9232 if (!input) {
9233 err = -ENOMEM;
9234 goto free_jump;
9235 }
9236 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9237 if (!mask) {
9238 err = -ENOMEM;
9239 goto free_input;
9240 }
9241 jump->input = input;
9242 jump->mask = mask;
9243 jump->link_hdl = cls->knode.handle;
9244
9245 err = ixgbe_clsu32_build_input(input, mask, cls,
9246 field_ptr, &nexthdr[i]);
9247 if (!err) {
9248 jump->mat = nexthdr[i].jump;
9249 adapter->jump_tables[link_uhtid] = jump;
9250 break;
9251 }
9252 }
9253 return 0;
9254 }
9255
9256 input = kzalloc(sizeof(*input), GFP_KERNEL);
9257 if (!input)
9258 return -ENOMEM;
9259 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9260 if (!mask) {
9261 err = -ENOMEM;
9262 goto free_input;
9263 }
9264
9265 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9266 if ((adapter->jump_tables[uhtid])->input)
9267 memcpy(input, (adapter->jump_tables[uhtid])->input,
9268 sizeof(*input));
9269 if ((adapter->jump_tables[uhtid])->mask)
9270 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9271 sizeof(*mask));
9272
9273
9274
9275
9276 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9277 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9278
9279 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9280 e_err(drv, "Filter exists in location: %x\n",
9281 loc);
9282 err = -EINVAL;
9283 goto err_out;
9284 }
9285 }
9286 }
9287 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9288 if (err)
9289 goto err_out;
9290
9291 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9292 &queue);
9293 if (err < 0)
9294 goto err_out;
9295
9296 input->sw_idx = loc;
9297
9298 spin_lock(&adapter->fdir_perfect_lock);
9299
9300 if (hlist_empty(&adapter->fdir_filter_list)) {
9301 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9302 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9303 if (err)
9304 goto err_out_w_lock;
9305 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9306 err = -EINVAL;
9307 goto err_out_w_lock;
9308 }
9309
9310 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9311 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9312 input->sw_idx, queue);
9313 if (!err)
9314 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9315 spin_unlock(&adapter->fdir_perfect_lock);
9316
9317 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9318 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9319
9320 kfree(mask);
9321 return err;
9322err_out_w_lock:
9323 spin_unlock(&adapter->fdir_perfect_lock);
9324err_out:
9325 kfree(mask);
9326free_input:
9327 kfree(input);
9328free_jump:
9329 kfree(jump);
9330 return err;
9331}
9332
9333static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9334 struct tc_cls_u32_offload *cls_u32)
9335{
9336 switch (cls_u32->command) {
9337 case TC_CLSU32_NEW_KNODE:
9338 case TC_CLSU32_REPLACE_KNODE:
9339 return ixgbe_configure_clsu32(adapter, cls_u32);
9340 case TC_CLSU32_DELETE_KNODE:
9341 return ixgbe_delete_clsu32(adapter, cls_u32);
9342 case TC_CLSU32_NEW_HNODE:
9343 case TC_CLSU32_REPLACE_HNODE:
9344 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9345 case TC_CLSU32_DELETE_HNODE:
9346 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9347 default:
9348 return -EOPNOTSUPP;
9349 }
9350}
9351
9352static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9353 void *cb_priv)
9354{
9355 struct ixgbe_adapter *adapter = cb_priv;
9356
9357 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9358 return -EOPNOTSUPP;
9359
9360 switch (type) {
9361 case TC_SETUP_CLSU32:
9362 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9363 default:
9364 return -EOPNOTSUPP;
9365 }
9366}
9367
9368static int ixgbe_setup_tc_block(struct net_device *dev,
9369 struct tc_block_offload *f)
9370{
9371 struct ixgbe_adapter *adapter = netdev_priv(dev);
9372
9373 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9374 return -EOPNOTSUPP;
9375
9376 switch (f->command) {
9377 case TC_BLOCK_BIND:
9378 return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
9379 adapter, adapter);
9380 case TC_BLOCK_UNBIND:
9381 tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
9382 adapter);
9383 return 0;
9384 default:
9385 return -EOPNOTSUPP;
9386 }
9387}
9388
9389static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9390 struct tc_mqprio_qopt *mqprio)
9391{
9392 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9393 return ixgbe_setup_tc(dev, mqprio->num_tc);
9394}
9395
9396static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9397 void *type_data)
9398{
9399 switch (type) {
9400 case TC_SETUP_BLOCK:
9401 return ixgbe_setup_tc_block(dev, type_data);
9402 case TC_SETUP_QDISC_MQPRIO:
9403 return ixgbe_setup_tc_mqprio(dev, type_data);
9404 default:
9405 return -EOPNOTSUPP;
9406 }
9407}
9408
9409#ifdef CONFIG_PCI_IOV
9410void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9411{
9412 struct net_device *netdev = adapter->netdev;
9413
9414 rtnl_lock();
9415 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9416 rtnl_unlock();
9417}
9418
9419#endif
9420void ixgbe_do_reset(struct net_device *netdev)
9421{
9422 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9423
9424 if (netif_running(netdev))
9425 ixgbe_reinit_locked(adapter);
9426 else
9427 ixgbe_reset(adapter);
9428}
9429
9430static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9431 netdev_features_t features)
9432{
9433 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9434
9435
9436 if (!(features & NETIF_F_RXCSUM))
9437 features &= ~NETIF_F_LRO;
9438
9439
9440 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9441 features &= ~NETIF_F_LRO;
9442
9443 return features;
9444}
9445
9446static int ixgbe_set_features(struct net_device *netdev,
9447 netdev_features_t features)
9448{
9449 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9450 netdev_features_t changed = netdev->features ^ features;
9451 bool need_reset = false;
9452
9453
9454 if (!(features & NETIF_F_LRO)) {
9455 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9456 need_reset = true;
9457 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9458 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9459 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9460 if (adapter->rx_itr_setting == 1 ||
9461 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9462 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9463 need_reset = true;
9464 } else if ((changed ^ features) & NETIF_F_LRO) {
9465 e_info(probe, "rx-usecs set too low, "
9466 "disabling RSC\n");
9467 }
9468 }
9469
9470
9471
9472
9473
9474 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9475
9476 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9477 need_reset = true;
9478
9479 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9480 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9481 } else {
9482
9483 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9484 need_reset = true;
9485
9486 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9487
9488
9489 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9490
9491 (adapter->hw_tcs > 1) ||
9492
9493 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9494
9495 (!adapter->atr_sample_rate))
9496 ;
9497 else
9498 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9499 }
9500
9501 if (changed & NETIF_F_RXALL)
9502 need_reset = true;
9503
9504 netdev->features = features;
9505
9506 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9507 if (features & NETIF_F_RXCSUM) {
9508 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9509 } else {
9510 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9511
9512 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9513 }
9514 }
9515
9516 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9517 if (features & NETIF_F_RXCSUM) {
9518 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9519 } else {
9520 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9521
9522 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9523 }
9524 }
9525
9526 if (need_reset)
9527 ixgbe_do_reset(netdev);
9528 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9529 NETIF_F_HW_VLAN_CTAG_FILTER))
9530 ixgbe_set_rx_mode(netdev);
9531
9532 return 0;
9533}
9534
9535
9536
9537
9538
9539
9540static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9541 struct udp_tunnel_info *ti)
9542{
9543 struct ixgbe_adapter *adapter = netdev_priv(dev);
9544 struct ixgbe_hw *hw = &adapter->hw;
9545 __be16 port = ti->port;
9546 u32 port_shift = 0;
9547 u32 reg;
9548
9549 if (ti->sa_family != AF_INET)
9550 return;
9551
9552 switch (ti->type) {
9553 case UDP_TUNNEL_TYPE_VXLAN:
9554 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9555 return;
9556
9557 if (adapter->vxlan_port == port)
9558 return;
9559
9560 if (adapter->vxlan_port) {
9561 netdev_info(dev,
9562 "VXLAN port %d set, not adding port %d\n",
9563 ntohs(adapter->vxlan_port),
9564 ntohs(port));
9565 return;
9566 }
9567
9568 adapter->vxlan_port = port;
9569 break;
9570 case UDP_TUNNEL_TYPE_GENEVE:
9571 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9572 return;
9573
9574 if (adapter->geneve_port == port)
9575 return;
9576
9577 if (adapter->geneve_port) {
9578 netdev_info(dev,
9579 "GENEVE port %d set, not adding port %d\n",
9580 ntohs(adapter->geneve_port),
9581 ntohs(port));
9582 return;
9583 }
9584
9585 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9586 adapter->geneve_port = port;
9587 break;
9588 default:
9589 return;
9590 }
9591
9592 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9593 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9594}
9595
9596
9597
9598
9599
9600
9601static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9602 struct udp_tunnel_info *ti)
9603{
9604 struct ixgbe_adapter *adapter = netdev_priv(dev);
9605 u32 port_mask;
9606
9607 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9608 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9609 return;
9610
9611 if (ti->sa_family != AF_INET)
9612 return;
9613
9614 switch (ti->type) {
9615 case UDP_TUNNEL_TYPE_VXLAN:
9616 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9617 return;
9618
9619 if (adapter->vxlan_port != ti->port) {
9620 netdev_info(dev, "VXLAN port %d not found\n",
9621 ntohs(ti->port));
9622 return;
9623 }
9624
9625 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9626 break;
9627 case UDP_TUNNEL_TYPE_GENEVE:
9628 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9629 return;
9630
9631 if (adapter->geneve_port != ti->port) {
9632 netdev_info(dev, "GENEVE port %d not found\n",
9633 ntohs(ti->port));
9634 return;
9635 }
9636
9637 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9638 break;
9639 default:
9640 return;
9641 }
9642
9643 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9644 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9645}
9646
9647static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9648 struct net_device *dev,
9649 const unsigned char *addr, u16 vid,
9650 u16 flags)
9651{
9652
9653 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9654 struct ixgbe_adapter *adapter = netdev_priv(dev);
9655 u16 pool = VMDQ_P(0);
9656
9657 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9658 return -ENOMEM;
9659 }
9660
9661 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9662}
9663
9664
9665
9666
9667
9668
9669
9670
9671static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9672 __u16 mode)
9673{
9674 struct ixgbe_hw *hw = &adapter->hw;
9675 unsigned int p, num_pools;
9676 u32 vmdctl;
9677
9678 switch (mode) {
9679 case BRIDGE_MODE_VEPA:
9680
9681 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9682
9683
9684
9685
9686
9687 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9688 vmdctl |= IXGBE_VT_CTL_REPLEN;
9689 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9690
9691
9692
9693
9694 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9695 for (p = 0; p < num_pools; p++) {
9696 if (hw->mac.ops.set_source_address_pruning)
9697 hw->mac.ops.set_source_address_pruning(hw,
9698 true,
9699 p);
9700 }
9701 break;
9702 case BRIDGE_MODE_VEB:
9703
9704 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9705 IXGBE_PFDTXGSWC_VT_LBEN);
9706
9707
9708
9709
9710 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9711 if (!adapter->num_vfs)
9712 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9713 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9714
9715
9716
9717
9718 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9719 for (p = 0; p < num_pools; p++) {
9720 if (hw->mac.ops.set_source_address_pruning)
9721 hw->mac.ops.set_source_address_pruning(hw,
9722 false,
9723 p);
9724 }
9725 break;
9726 default:
9727 return -EINVAL;
9728 }
9729
9730 adapter->bridge_mode = mode;
9731
9732 e_info(drv, "enabling bridge mode: %s\n",
9733 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9734
9735 return 0;
9736}
9737
9738static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9739 struct nlmsghdr *nlh, u16 flags)
9740{
9741 struct ixgbe_adapter *adapter = netdev_priv(dev);
9742 struct nlattr *attr, *br_spec;
9743 int rem;
9744
9745 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9746 return -EOPNOTSUPP;
9747
9748 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9749 if (!br_spec)
9750 return -EINVAL;
9751
9752 nla_for_each_nested(attr, br_spec, rem) {
9753 int status;
9754 __u16 mode;
9755
9756 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9757 continue;
9758
9759 if (nla_len(attr) < sizeof(mode))
9760 return -EINVAL;
9761
9762 mode = nla_get_u16(attr);
9763 status = ixgbe_configure_bridge_mode(adapter, mode);
9764 if (status)
9765 return status;
9766
9767 break;
9768 }
9769
9770 return 0;
9771}
9772
9773static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9774 struct net_device *dev,
9775 u32 filter_mask, int nlflags)
9776{
9777 struct ixgbe_adapter *adapter = netdev_priv(dev);
9778
9779 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9780 return 0;
9781
9782 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
9783 adapter->bridge_mode, 0, 0, nlflags,
9784 filter_mask, NULL);
9785}
9786
9787static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9788{
9789 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
9790 struct ixgbe_adapter *adapter = netdev_priv(pdev);
9791 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
9792 int tcs = adapter->hw_tcs ? : 1;
9793 unsigned int limit;
9794 int pool, err;
9795
9796
9797
9798
9799
9800 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9801 return ERR_PTR(-EINVAL);
9802
9803 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9804 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
9805 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
9806 return ERR_PTR(-EBUSY);
9807
9808 fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
9809 if (!fwd_adapter)
9810 return ERR_PTR(-ENOMEM);
9811
9812 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9813 set_bit(pool, adapter->fwd_bitmask);
9814 limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
9815
9816
9817 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
9818 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9819
9820 fwd_adapter->pool = pool;
9821 fwd_adapter->real_adapter = adapter;
9822
9823
9824 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
9825
9826 if (!err && netif_running(pdev))
9827 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
9828
9829 if (!err)
9830 return fwd_adapter;
9831
9832
9833 netdev_info(pdev,
9834 "%s: dfwd hardware acceleration failed\n", vdev->name);
9835 clear_bit(pool, adapter->fwd_bitmask);
9836 kfree(fwd_adapter);
9837 return ERR_PTR(err);
9838}
9839
9840static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
9841{
9842 struct ixgbe_fwd_adapter *accel = priv;
9843 struct ixgbe_adapter *adapter = accel->real_adapter;
9844 unsigned int rxbase = accel->rx_base_queue;
9845 unsigned int limit, i;
9846
9847
9848 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
9849 VMDQ_P(accel->pool));
9850
9851
9852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
9853
9854
9855
9856
9857 usleep_range(10000, 20000);
9858
9859 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
9860 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
9861 struct ixgbe_q_vector *qv = ring->q_vector;
9862
9863
9864
9865
9866 if (netif_running(adapter->netdev))
9867 napi_synchronize(&qv->napi);
9868 ring->netdev = NULL;
9869 }
9870
9871 clear_bit(accel->pool, adapter->fwd_bitmask);
9872 limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9873 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9874
9875
9876 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
9877 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9878 num_online_cpus());
9879
9880 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
9881 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
9882 adapter->ring_feature[RING_F_RSS].limit = rss;
9883 }
9884
9885 ixgbe_setup_tc(pdev, adapter->hw_tcs);
9886 netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
9887 accel->pool, adapter->num_rx_pools,
9888 accel->rx_base_queue,
9889 accel->rx_base_queue +
9890 adapter->num_rx_queues_per_pool);
9891 kfree(accel);
9892}
9893
9894#define IXGBE_MAX_MAC_HDR_LEN 127
9895#define IXGBE_MAX_NETWORK_HDR_LEN 511
9896
9897static netdev_features_t
9898ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9899 netdev_features_t features)
9900{
9901 unsigned int network_hdr_len, mac_hdr_len;
9902
9903
9904 mac_hdr_len = skb_network_header(skb) - skb->data;
9905 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
9906 return features & ~(NETIF_F_HW_CSUM |
9907 NETIF_F_SCTP_CRC |
9908 NETIF_F_HW_VLAN_CTAG_TX |
9909 NETIF_F_TSO |
9910 NETIF_F_TSO6);
9911
9912 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
9913 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
9914 return features & ~(NETIF_F_HW_CSUM |
9915 NETIF_F_SCTP_CRC |
9916 NETIF_F_TSO |
9917 NETIF_F_TSO6);
9918
9919
9920
9921
9922
9923
9924 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
9925#ifdef CONFIG_XFRM
9926 if (!skb->sp)
9927#endif
9928 features &= ~NETIF_F_TSO;
9929 }
9930
9931 return features;
9932}
9933
9934static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
9935{
9936 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9937 struct ixgbe_adapter *adapter = netdev_priv(dev);
9938 struct bpf_prog *old_prog;
9939
9940 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
9941 return -EINVAL;
9942
9943 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
9944 return -EINVAL;
9945
9946
9947 for (i = 0; i < adapter->num_rx_queues; i++) {
9948 struct ixgbe_ring *ring = adapter->rx_ring[i];
9949
9950 if (ring_is_rsc_enabled(ring))
9951 return -EINVAL;
9952
9953 if (frame_size > ixgbe_rx_bufsz(ring))
9954 return -EINVAL;
9955 }
9956
9957 if (nr_cpu_ids > MAX_XDP_QUEUES)
9958 return -ENOMEM;
9959
9960 old_prog = xchg(&adapter->xdp_prog, prog);
9961
9962
9963 if (!!prog != !!old_prog) {
9964 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
9965
9966 if (err) {
9967 rcu_assign_pointer(adapter->xdp_prog, old_prog);
9968 return -EINVAL;
9969 }
9970 } else {
9971 for (i = 0; i < adapter->num_rx_queues; i++)
9972 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
9973 }
9974
9975 if (old_prog)
9976 bpf_prog_put(old_prog);
9977
9978 return 0;
9979}
9980
9981static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
9982{
9983 struct ixgbe_adapter *adapter = netdev_priv(dev);
9984
9985 switch (xdp->command) {
9986 case XDP_SETUP_PROG:
9987 return ixgbe_xdp_setup(dev, xdp->prog);
9988 case XDP_QUERY_PROG:
9989 xdp->prog_attached = !!(adapter->xdp_prog);
9990 xdp->prog_id = adapter->xdp_prog ?
9991 adapter->xdp_prog->aux->id : 0;
9992 return 0;
9993 default:
9994 return -EINVAL;
9995 }
9996}
9997
9998static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
9999{
10000 struct ixgbe_adapter *adapter = netdev_priv(dev);
10001 struct ixgbe_ring *ring;
10002 int err;
10003
10004 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10005 return -ENETDOWN;
10006
10007
10008
10009
10010 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10011 if (unlikely(!ring))
10012 return -ENXIO;
10013
10014 err = ixgbe_xmit_xdp_ring(adapter, xdp);
10015 if (err != IXGBE_XDP_TX)
10016 return -ENOSPC;
10017
10018 return 0;
10019}
10020
10021static void ixgbe_xdp_flush(struct net_device *dev)
10022{
10023 struct ixgbe_adapter *adapter = netdev_priv(dev);
10024 struct ixgbe_ring *ring;
10025
10026
10027
10028
10029 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10030 return;
10031
10032 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10033 if (unlikely(!ring))
10034 return;
10035
10036
10037
10038
10039 wmb();
10040 writel(ring->next_to_use, ring->tail);
10041
10042 return;
10043}
10044
10045static const struct net_device_ops ixgbe_netdev_ops = {
10046 .ndo_open = ixgbe_open,
10047 .ndo_stop = ixgbe_close,
10048 .ndo_start_xmit = ixgbe_xmit_frame,
10049 .ndo_select_queue = ixgbe_select_queue,
10050 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10051 .ndo_validate_addr = eth_validate_addr,
10052 .ndo_set_mac_address = ixgbe_set_mac,
10053 .ndo_change_mtu = ixgbe_change_mtu,
10054 .ndo_tx_timeout = ixgbe_tx_timeout,
10055 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10056 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10057 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10058 .ndo_do_ioctl = ixgbe_ioctl,
10059 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10060 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10061 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10062 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10063 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10064 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10065 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10066 .ndo_get_stats64 = ixgbe_get_stats64,
10067 .ndo_setup_tc = __ixgbe_setup_tc,
10068#ifdef CONFIG_NET_POLL_CONTROLLER
10069 .ndo_poll_controller = ixgbe_netpoll,
10070#endif
10071#ifdef IXGBE_FCOE
10072 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10073 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10074 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10075 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10076 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10077 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10078 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10079#endif
10080 .ndo_set_features = ixgbe_set_features,
10081 .ndo_fix_features = ixgbe_fix_features,
10082 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10083 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10084 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10085 .ndo_dfwd_add_station = ixgbe_fwd_add,
10086 .ndo_dfwd_del_station = ixgbe_fwd_del,
10087 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10088 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10089 .ndo_features_check = ixgbe_features_check,
10090 .ndo_bpf = ixgbe_xdp,
10091 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10092 .ndo_xdp_flush = ixgbe_xdp_flush,
10093};
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10105{
10106 struct pci_dev *entry, *pdev = adapter->pdev;
10107 int physfns = 0;
10108
10109
10110
10111
10112
10113 if (ixgbe_pcie_from_parent(&adapter->hw))
10114 physfns = 4;
10115
10116 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10117
10118 if (entry->is_virtfn)
10119 continue;
10120
10121
10122
10123
10124
10125
10126
10127 if ((entry->vendor != pdev->vendor) ||
10128 (entry->device != pdev->device))
10129 return -1;
10130
10131 physfns++;
10132 }
10133
10134 return physfns;
10135}
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10148 u16 subdevice_id)
10149{
10150 struct ixgbe_hw *hw = &adapter->hw;
10151 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10152
10153
10154 if (hw->mac.type == ixgbe_mac_82598EB)
10155 return false;
10156
10157
10158 if (hw->mac.type >= ixgbe_mac_X540) {
10159 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10160 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10161 (hw->bus.func == 0)))
10162 return true;
10163 }
10164
10165
10166 switch (device_id) {
10167 case IXGBE_DEV_ID_82599_SFP:
10168
10169 switch (subdevice_id) {
10170 case IXGBE_SUBDEV_ID_82599_560FLR:
10171 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10172 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10173 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10174
10175 if (hw->bus.func != 0)
10176 break;
10177
10178 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10179 case IXGBE_SUBDEV_ID_82599_SFP:
10180 case IXGBE_SUBDEV_ID_82599_RNDC:
10181 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10182 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10183 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10184 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10185 return true;
10186 }
10187 break;
10188 case IXGBE_DEV_ID_82599EN_SFP:
10189
10190 switch (subdevice_id) {
10191 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10192 return true;
10193 }
10194 break;
10195 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10196
10197 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10198 return true;
10199 break;
10200 case IXGBE_DEV_ID_82599_KX4:
10201 return true;
10202 default:
10203 break;
10204 }
10205
10206 return false;
10207}
10208
10209
10210
10211
10212
10213
10214
10215
10216static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10217{
10218 struct ixgbe_hw *hw = &adapter->hw;
10219 struct ixgbe_nvm_version nvm_ver;
10220
10221 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10222 if (nvm_ver.oem_valid) {
10223 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10224 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10225 nvm_ver.oem_release);
10226 return;
10227 }
10228
10229 ixgbe_get_etk_id(hw, &nvm_ver);
10230 ixgbe_get_orom_version(hw, &nvm_ver);
10231
10232 if (nvm_ver.or_valid) {
10233 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10234 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10235 nvm_ver.or_build, nvm_ver.or_patch);
10236 return;
10237 }
10238
10239
10240 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10241 "0x%08x", nvm_ver.etk_id);
10242}
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10256{
10257 struct net_device *netdev;
10258 struct ixgbe_adapter *adapter = NULL;
10259 struct ixgbe_hw *hw;
10260 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10261 int i, err, pci_using_dac, expected_gts;
10262 unsigned int indices = MAX_TX_QUEUES;
10263 u8 part_str[IXGBE_PBANUM_LENGTH];
10264 bool disable_dev = false;
10265#ifdef IXGBE_FCOE
10266 u16 device_caps;
10267#endif
10268 u32 eec;
10269
10270
10271
10272
10273 if (pdev->is_virtfn) {
10274 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10275 pci_name(pdev), pdev->vendor, pdev->device);
10276 return -EINVAL;
10277 }
10278
10279 err = pci_enable_device_mem(pdev);
10280 if (err)
10281 return err;
10282
10283 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10284 pci_using_dac = 1;
10285 } else {
10286 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10287 if (err) {
10288 dev_err(&pdev->dev,
10289 "No usable DMA configuration, aborting\n");
10290 goto err_dma;
10291 }
10292 pci_using_dac = 0;
10293 }
10294
10295 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10296 if (err) {
10297 dev_err(&pdev->dev,
10298 "pci_request_selected_regions failed 0x%x\n", err);
10299 goto err_pci_reg;
10300 }
10301
10302 pci_enable_pcie_error_reporting(pdev);
10303
10304 pci_set_master(pdev);
10305 pci_save_state(pdev);
10306
10307 if (ii->mac == ixgbe_mac_82598EB) {
10308#ifdef CONFIG_IXGBE_DCB
10309
10310 indices = 4 * MAX_TRAFFIC_CLASS;
10311#else
10312 indices = IXGBE_MAX_RSS_INDICES;
10313#endif
10314 }
10315
10316 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10317 if (!netdev) {
10318 err = -ENOMEM;
10319 goto err_alloc_etherdev;
10320 }
10321
10322 SET_NETDEV_DEV(netdev, &pdev->dev);
10323
10324 adapter = netdev_priv(netdev);
10325
10326 adapter->netdev = netdev;
10327 adapter->pdev = pdev;
10328 hw = &adapter->hw;
10329 hw->back = adapter;
10330 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10331
10332 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10333 pci_resource_len(pdev, 0));
10334 adapter->io_addr = hw->hw_addr;
10335 if (!hw->hw_addr) {
10336 err = -EIO;
10337 goto err_ioremap;
10338 }
10339
10340 netdev->netdev_ops = &ixgbe_netdev_ops;
10341 ixgbe_set_ethtool_ops(netdev);
10342 netdev->watchdog_timeo = 5 * HZ;
10343 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10344
10345
10346 hw->mac.ops = *ii->mac_ops;
10347 hw->mac.type = ii->mac;
10348 hw->mvals = ii->mvals;
10349 if (ii->link_ops)
10350 hw->link.ops = *ii->link_ops;
10351
10352
10353 hw->eeprom.ops = *ii->eeprom_ops;
10354 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10355 if (ixgbe_removed(hw->hw_addr)) {
10356 err = -EIO;
10357 goto err_ioremap;
10358 }
10359
10360 if (!(eec & BIT(8)))
10361 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10362
10363
10364 hw->phy.ops = *ii->phy_ops;
10365 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10366
10367 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10368 hw->phy.mdio.mmds = 0;
10369 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10370 hw->phy.mdio.dev = netdev;
10371 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10372 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10373
10374
10375 err = ixgbe_sw_init(adapter, ii);
10376 if (err)
10377 goto err_sw_init;
10378
10379
10380 if (hw->mac.ops.init_swfw_sync)
10381 hw->mac.ops.init_swfw_sync(hw);
10382
10383
10384 switch (adapter->hw.mac.type) {
10385 case ixgbe_mac_82599EB:
10386 case ixgbe_mac_X540:
10387 case ixgbe_mac_X550:
10388 case ixgbe_mac_X550EM_x:
10389 case ixgbe_mac_x550em_a:
10390 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10391 break;
10392 default:
10393 break;
10394 }
10395
10396
10397
10398
10399
10400 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10401 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10402 if (esdp & IXGBE_ESDP_SDP1)
10403 e_crit(probe, "Fan has stopped, replace the adapter\n");
10404 }
10405
10406 if (allow_unsupported_sfp)
10407 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10408
10409
10410 hw->phy.reset_if_overtemp = true;
10411 err = hw->mac.ops.reset_hw(hw);
10412 hw->phy.reset_if_overtemp = false;
10413 ixgbe_set_eee_capable(adapter);
10414 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10415 err = 0;
10416 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10417 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10418 e_dev_err("Reload the driver after installing a supported module.\n");
10419 goto err_sw_init;
10420 } else if (err) {
10421 e_dev_err("HW Init failed: %d\n", err);
10422 goto err_sw_init;
10423 }
10424
10425#ifdef CONFIG_PCI_IOV
10426
10427 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10428 goto skip_sriov;
10429
10430 ixgbe_init_mbx_params_pf(hw);
10431 hw->mbx.ops = ii->mbx_ops;
10432 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10433 ixgbe_enable_sriov(adapter, max_vfs);
10434skip_sriov:
10435
10436#endif
10437 netdev->features = NETIF_F_SG |
10438 NETIF_F_TSO |
10439 NETIF_F_TSO6 |
10440 NETIF_F_RXHASH |
10441 NETIF_F_RXCSUM |
10442 NETIF_F_HW_CSUM;
10443
10444#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10445 NETIF_F_GSO_GRE_CSUM | \
10446 NETIF_F_GSO_IPXIP4 | \
10447 NETIF_F_GSO_IPXIP6 | \
10448 NETIF_F_GSO_UDP_TUNNEL | \
10449 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10450
10451 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10452 netdev->features |= NETIF_F_GSO_PARTIAL |
10453 IXGBE_GSO_PARTIAL_FEATURES;
10454
10455 if (hw->mac.type >= ixgbe_mac_82599EB)
10456 netdev->features |= NETIF_F_SCTP_CRC;
10457
10458
10459 netdev->hw_features |= netdev->features |
10460 NETIF_F_HW_VLAN_CTAG_FILTER |
10461 NETIF_F_HW_VLAN_CTAG_RX |
10462 NETIF_F_HW_VLAN_CTAG_TX |
10463 NETIF_F_RXALL |
10464 NETIF_F_HW_L2FW_DOFFLOAD;
10465
10466 if (hw->mac.type >= ixgbe_mac_82599EB)
10467 netdev->hw_features |= NETIF_F_NTUPLE |
10468 NETIF_F_HW_TC;
10469
10470 if (pci_using_dac)
10471 netdev->features |= NETIF_F_HIGHDMA;
10472
10473 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10474 netdev->hw_enc_features |= netdev->vlan_features;
10475 netdev->mpls_features |= NETIF_F_SG |
10476 NETIF_F_TSO |
10477 NETIF_F_TSO6 |
10478 NETIF_F_HW_CSUM;
10479 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10480
10481
10482 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10483 NETIF_F_HW_VLAN_CTAG_RX |
10484 NETIF_F_HW_VLAN_CTAG_TX;
10485
10486 netdev->priv_flags |= IFF_UNICAST_FLT;
10487 netdev->priv_flags |= IFF_SUPP_NOFCS;
10488
10489
10490 netdev->min_mtu = ETH_MIN_MTU;
10491 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10492
10493#ifdef CONFIG_IXGBE_DCB
10494 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10495 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10496#endif
10497
10498#ifdef IXGBE_FCOE
10499 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10500 unsigned int fcoe_l;
10501
10502 if (hw->mac.ops.get_device_caps) {
10503 hw->mac.ops.get_device_caps(hw, &device_caps);
10504 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10505 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10506 }
10507
10508
10509 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10510 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10511
10512 netdev->features |= NETIF_F_FSO |
10513 NETIF_F_FCOE_CRC;
10514
10515 netdev->vlan_features |= NETIF_F_FSO |
10516 NETIF_F_FCOE_CRC |
10517 NETIF_F_FCOE_MTU;
10518 }
10519#endif
10520 ixgbe_init_ipsec_offload(adapter);
10521
10522 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10523 netdev->hw_features |= NETIF_F_LRO;
10524 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10525 netdev->features |= NETIF_F_LRO;
10526
10527
10528 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10529 e_dev_err("The EEPROM Checksum Is Not Valid\n");
10530 err = -EIO;
10531 goto err_sw_init;
10532 }
10533
10534 eth_platform_get_mac_address(&adapter->pdev->dev,
10535 adapter->hw.mac.perm_addr);
10536
10537 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
10538
10539 if (!is_valid_ether_addr(netdev->dev_addr)) {
10540 e_dev_err("invalid MAC address\n");
10541 err = -EIO;
10542 goto err_sw_init;
10543 }
10544
10545
10546 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
10547 ixgbe_mac_set_default_filter(adapter);
10548
10549 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
10550
10551 if (ixgbe_removed(hw->hw_addr)) {
10552 err = -EIO;
10553 goto err_sw_init;
10554 }
10555 INIT_WORK(&adapter->service_task, ixgbe_service_task);
10556 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
10557 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
10558
10559 err = ixgbe_init_interrupt_scheme(adapter);
10560 if (err)
10561 goto err_sw_init;
10562
10563 for (i = 0; i < adapter->num_rx_queues; i++)
10564 u64_stats_init(&adapter->rx_ring[i]->syncp);
10565 for (i = 0; i < adapter->num_tx_queues; i++)
10566 u64_stats_init(&adapter->tx_ring[i]->syncp);
10567 for (i = 0; i < adapter->num_xdp_queues; i++)
10568 u64_stats_init(&adapter->xdp_ring[i]->syncp);
10569
10570
10571 adapter->wol = 0;
10572 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
10573 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
10574 pdev->subsystem_device);
10575 if (hw->wol_enabled)
10576 adapter->wol = IXGBE_WUFC_MAG;
10577
10578 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
10579
10580
10581 ixgbe_set_fw_version(adapter);
10582
10583
10584 if (ixgbe_pcie_from_parent(hw))
10585 ixgbe_get_parent_bus_info(adapter);
10586 else
10587 hw->mac.ops.get_bus_info(hw);
10588
10589
10590
10591
10592
10593
10594 switch (hw->mac.type) {
10595 case ixgbe_mac_82598EB:
10596 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
10597 break;
10598 default:
10599 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
10600 break;
10601 }
10602
10603
10604 if (expected_gts > 0)
10605 ixgbe_check_minimum_link(adapter, expected_gts);
10606
10607 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
10608 if (err)
10609 strlcpy(part_str, "Unknown", sizeof(part_str));
10610 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
10611 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
10612 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
10613 part_str);
10614 else
10615 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
10616 hw->mac.type, hw->phy.type, part_str);
10617
10618 e_dev_info("%pM\n", netdev->dev_addr);
10619
10620
10621 err = hw->mac.ops.start_hw(hw);
10622 if (err == IXGBE_ERR_EEPROM_VERSION) {
10623
10624 e_dev_warn("This device is a pre-production adapter/LOM. "
10625 "Please be aware there may be issues associated "
10626 "with your hardware. If you are experiencing "
10627 "problems please contact your Intel or hardware "
10628 "representative who provided you with this "
10629 "hardware.\n");
10630 }
10631 strcpy(netdev->name, "eth%d");
10632 pci_set_drvdata(pdev, adapter);
10633 err = register_netdev(netdev);
10634 if (err)
10635 goto err_register;
10636
10637
10638
10639 if (hw->mac.ops.disable_tx_laser)
10640 hw->mac.ops.disable_tx_laser(hw);
10641
10642
10643 netif_carrier_off(netdev);
10644
10645#ifdef CONFIG_IXGBE_DCA
10646 if (dca_add_requester(&pdev->dev) == 0) {
10647 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
10648 ixgbe_setup_dca(adapter);
10649 }
10650#endif
10651 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
10652 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
10653 for (i = 0; i < adapter->num_vfs; i++)
10654 ixgbe_vf_configuration(pdev, (i | 0x10000000));
10655 }
10656
10657
10658
10659
10660 if (hw->mac.ops.set_fw_drv_ver)
10661 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
10662 sizeof(ixgbe_driver_version) - 1,
10663 ixgbe_driver_version);
10664
10665
10666 ixgbe_add_sanmac_netdev(netdev);
10667
10668 e_dev_info("%s\n", ixgbe_default_device_descr);
10669
10670#ifdef CONFIG_IXGBE_HWMON
10671 if (ixgbe_sysfs_init(adapter))
10672 e_err(probe, "failed to allocate sysfs resources\n");
10673#endif
10674
10675 ixgbe_dbg_adapter_init(adapter);
10676
10677
10678 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
10679 hw->mac.ops.setup_link(hw,
10680 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
10681 true);
10682
10683 return 0;
10684
10685err_register:
10686 ixgbe_release_hw_control(adapter);
10687 ixgbe_clear_interrupt_scheme(adapter);
10688err_sw_init:
10689 ixgbe_disable_sriov(adapter);
10690 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
10691 iounmap(adapter->io_addr);
10692 kfree(adapter->jump_tables[0]);
10693 kfree(adapter->mac_table);
10694 kfree(adapter->rss_key);
10695err_ioremap:
10696 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10697 free_netdev(netdev);
10698err_alloc_etherdev:
10699 pci_release_mem_regions(pdev);
10700err_pci_reg:
10701err_dma:
10702 if (!adapter || disable_dev)
10703 pci_disable_device(pdev);
10704 return err;
10705}
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716static void ixgbe_remove(struct pci_dev *pdev)
10717{
10718 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10719 struct net_device *netdev;
10720 bool disable_dev;
10721 int i;
10722
10723
10724 if (!adapter)
10725 return;
10726
10727 netdev = adapter->netdev;
10728 ixgbe_dbg_adapter_exit(adapter);
10729
10730 set_bit(__IXGBE_REMOVING, &adapter->state);
10731 cancel_work_sync(&adapter->service_task);
10732
10733
10734#ifdef CONFIG_IXGBE_DCA
10735 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
10736 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
10737 dca_remove_requester(&pdev->dev);
10738 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
10739 IXGBE_DCA_CTRL_DCA_DISABLE);
10740 }
10741
10742#endif
10743#ifdef CONFIG_IXGBE_HWMON
10744 ixgbe_sysfs_exit(adapter);
10745#endif
10746
10747
10748 ixgbe_del_sanmac_netdev(netdev);
10749
10750#ifdef CONFIG_PCI_IOV
10751 ixgbe_disable_sriov(adapter);
10752#endif
10753 if (netdev->reg_state == NETREG_REGISTERED)
10754 unregister_netdev(netdev);
10755
10756 ixgbe_stop_ipsec_offload(adapter);
10757 ixgbe_clear_interrupt_scheme(adapter);
10758
10759 ixgbe_release_hw_control(adapter);
10760
10761#ifdef CONFIG_DCB
10762 kfree(adapter->ixgbe_ieee_pfc);
10763 kfree(adapter->ixgbe_ieee_ets);
10764
10765#endif
10766 iounmap(adapter->io_addr);
10767 pci_release_mem_regions(pdev);
10768
10769 e_dev_info("complete\n");
10770
10771 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
10772 if (adapter->jump_tables[i]) {
10773 kfree(adapter->jump_tables[i]->input);
10774 kfree(adapter->jump_tables[i]->mask);
10775 }
10776 kfree(adapter->jump_tables[i]);
10777 }
10778
10779 kfree(adapter->mac_table);
10780 kfree(adapter->rss_key);
10781 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10782 free_netdev(netdev);
10783
10784 pci_disable_pcie_error_reporting(pdev);
10785
10786 if (disable_dev)
10787 pci_disable_device(pdev);
10788}
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
10799 pci_channel_state_t state)
10800{
10801 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10802 struct net_device *netdev = adapter->netdev;
10803
10804#ifdef CONFIG_PCI_IOV
10805 struct ixgbe_hw *hw = &adapter->hw;
10806 struct pci_dev *bdev, *vfdev;
10807 u32 dw0, dw1, dw2, dw3;
10808 int vf, pos;
10809 u16 req_id, pf_func;
10810
10811 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
10812 adapter->num_vfs == 0)
10813 goto skip_bad_vf_detection;
10814
10815 bdev = pdev->bus->self;
10816 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
10817 bdev = bdev->bus->self;
10818
10819 if (!bdev)
10820 goto skip_bad_vf_detection;
10821
10822 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
10823 if (!pos)
10824 goto skip_bad_vf_detection;
10825
10826 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
10827 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
10828 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
10829 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
10830 if (ixgbe_removed(hw->hw_addr))
10831 goto skip_bad_vf_detection;
10832
10833 req_id = dw1 >> 16;
10834
10835 if (!(req_id & 0x0080))
10836 goto skip_bad_vf_detection;
10837
10838 pf_func = req_id & 0x01;
10839 if ((pf_func & 1) == (pdev->devfn & 1)) {
10840 unsigned int device_id;
10841
10842 vf = (req_id & 0x7F) >> 1;
10843 e_dev_err("VF %d has caused a PCIe error\n", vf);
10844 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
10845 "%8.8x\tdw3: %8.8x\n",
10846 dw0, dw1, dw2, dw3);
10847 switch (adapter->hw.mac.type) {
10848 case ixgbe_mac_82599EB:
10849 device_id = IXGBE_82599_VF_DEVICE_ID;
10850 break;
10851 case ixgbe_mac_X540:
10852 device_id = IXGBE_X540_VF_DEVICE_ID;
10853 break;
10854 case ixgbe_mac_X550:
10855 device_id = IXGBE_DEV_ID_X550_VF;
10856 break;
10857 case ixgbe_mac_X550EM_x:
10858 device_id = IXGBE_DEV_ID_X550EM_X_VF;
10859 break;
10860 case ixgbe_mac_x550em_a:
10861 device_id = IXGBE_DEV_ID_X550EM_A_VF;
10862 break;
10863 default:
10864 device_id = 0;
10865 break;
10866 }
10867
10868
10869 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
10870 while (vfdev) {
10871 if (vfdev->devfn == (req_id & 0xFF))
10872 break;
10873 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
10874 device_id, vfdev);
10875 }
10876
10877
10878
10879
10880
10881 if (vfdev) {
10882 pcie_flr(vfdev);
10883
10884 pci_dev_put(vfdev);
10885 }
10886
10887 pci_cleanup_aer_uncorrect_error_status(pdev);
10888 }
10889
10890
10891
10892
10893
10894
10895
10896 adapter->vferr_refcount++;
10897
10898 return PCI_ERS_RESULT_RECOVERED;
10899
10900skip_bad_vf_detection:
10901#endif
10902 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
10903 return PCI_ERS_RESULT_DISCONNECT;
10904
10905 if (!netif_device_present(netdev))
10906 return PCI_ERS_RESULT_DISCONNECT;
10907
10908 rtnl_lock();
10909 netif_device_detach(netdev);
10910
10911 if (state == pci_channel_io_perm_failure) {
10912 rtnl_unlock();
10913 return PCI_ERS_RESULT_DISCONNECT;
10914 }
10915
10916 if (netif_running(netdev))
10917 ixgbe_close_suspend(adapter);
10918
10919 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
10920 pci_disable_device(pdev);
10921 rtnl_unlock();
10922
10923
10924 return PCI_ERS_RESULT_NEED_RESET;
10925}
10926
10927
10928
10929
10930
10931
10932
10933static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
10934{
10935 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10936 pci_ers_result_t result;
10937 int err;
10938
10939 if (pci_enable_device_mem(pdev)) {
10940 e_err(probe, "Cannot re-enable PCI device after reset.\n");
10941 result = PCI_ERS_RESULT_DISCONNECT;
10942 } else {
10943 smp_mb__before_atomic();
10944 clear_bit(__IXGBE_DISABLED, &adapter->state);
10945 adapter->hw.hw_addr = adapter->io_addr;
10946 pci_set_master(pdev);
10947 pci_restore_state(pdev);
10948 pci_save_state(pdev);
10949
10950 pci_wake_from_d3(pdev, false);
10951
10952 ixgbe_reset(adapter);
10953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10954 result = PCI_ERS_RESULT_RECOVERED;
10955 }
10956
10957 err = pci_cleanup_aer_uncorrect_error_status(pdev);
10958 if (err) {
10959 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
10960 "failed 0x%0x\n", err);
10961
10962 }
10963
10964 return result;
10965}
10966
10967
10968
10969
10970
10971
10972
10973
10974static void ixgbe_io_resume(struct pci_dev *pdev)
10975{
10976 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10977 struct net_device *netdev = adapter->netdev;
10978
10979#ifdef CONFIG_PCI_IOV
10980 if (adapter->vferr_refcount) {
10981 e_info(drv, "Resuming after VF err\n");
10982 adapter->vferr_refcount--;
10983 return;
10984 }
10985
10986#endif
10987 rtnl_lock();
10988 if (netif_running(netdev))
10989 ixgbe_open(netdev);
10990
10991 netif_device_attach(netdev);
10992 rtnl_unlock();
10993}
10994
10995static const struct pci_error_handlers ixgbe_err_handler = {
10996 .error_detected = ixgbe_io_error_detected,
10997 .slot_reset = ixgbe_io_slot_reset,
10998 .resume = ixgbe_io_resume,
10999};
11000
11001static struct pci_driver ixgbe_driver = {
11002 .name = ixgbe_driver_name,
11003 .id_table = ixgbe_pci_tbl,
11004 .probe = ixgbe_probe,
11005 .remove = ixgbe_remove,
11006#ifdef CONFIG_PM
11007 .suspend = ixgbe_suspend,
11008 .resume = ixgbe_resume,
11009#endif
11010 .shutdown = ixgbe_shutdown,
11011 .sriov_configure = ixgbe_pci_sriov_configure,
11012 .err_handler = &ixgbe_err_handler
11013};
11014
11015
11016
11017
11018
11019
11020
11021static int __init ixgbe_init_module(void)
11022{
11023 int ret;
11024 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
11025 pr_info("%s\n", ixgbe_copyright);
11026
11027 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11028 if (!ixgbe_wq) {
11029 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11030 return -ENOMEM;
11031 }
11032
11033 ixgbe_dbg_init();
11034
11035 ret = pci_register_driver(&ixgbe_driver);
11036 if (ret) {
11037 destroy_workqueue(ixgbe_wq);
11038 ixgbe_dbg_exit();
11039 return ret;
11040 }
11041
11042#ifdef CONFIG_IXGBE_DCA
11043 dca_register_notify(&dca_notifier);
11044#endif
11045
11046 return 0;
11047}
11048
11049module_init(ixgbe_init_module);
11050
11051
11052
11053
11054
11055
11056
11057static void __exit ixgbe_exit_module(void)
11058{
11059#ifdef CONFIG_IXGBE_DCA
11060 dca_unregister_notify(&dca_notifier);
11061#endif
11062 pci_unregister_driver(&ixgbe_driver);
11063
11064 ixgbe_dbg_exit();
11065 if (ixgbe_wq) {
11066 destroy_workqueue(ixgbe_wq);
11067 ixgbe_wq = NULL;
11068 }
11069}
11070
11071#ifdef CONFIG_IXGBE_DCA
11072static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11073 void *p)
11074{
11075 int ret_val;
11076
11077 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11078 __ixgbe_notify_dca);
11079
11080 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11081}
11082
11083#endif
11084
11085module_exit(ixgbe_exit_module);
11086
11087
11088