1
2
3
4#include <linux/types.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/netdevice.h>
8#include <linux/vmalloc.h>
9#include <linux/string.h>
10#include <linux/in.h>
11#include <linux/interrupt.h>
12#include <linux/ip.h>
13#include <linux/tcp.h>
14#include <linux/sctp.h>
15#include <linux/pkt_sched.h>
16#include <linux/ipv6.h>
17#include <linux/slab.h>
18#include <net/checksum.h>
19#include <net/ip6_checksum.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/if_macvlan.h>
25#include <linux/if_bridge.h>
26#include <linux/prefetch.h>
27#include <linux/bpf.h>
28#include <linux/bpf_trace.h>
29#include <linux/atomic.h>
30#include <linux/numa.h>
31#include <generated/utsrelease.h>
32#include <scsi/fc/fc_fcoe.h>
33#include <net/udp_tunnel.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
36#include <net/tc_act/tc_mirred.h>
37#include <net/vxlan.h>
38#include <net/mpls.h>
39#include <net/xdp_sock_drv.h>
40#include <net/xfrm.h>
41
42#include "ixgbe.h"
43#include "ixgbe_common.h"
44#include "ixgbe_dcb_82599.h"
45#include "ixgbe_phy.h"
46#include "ixgbe_sriov.h"
47#include "ixgbe_model.h"
48#include "ixgbe_txrx_common.h"
49
50char ixgbe_driver_name[] = "ixgbe";
51static const char ixgbe_driver_string[] =
52 "Intel(R) 10 Gigabit PCI Express Network Driver";
53#ifdef IXGBE_FCOE
54char ixgbe_default_device_descr[] =
55 "Intel(R) 10 Gigabit Network Connection";
56#else
57static char ixgbe_default_device_descr[] =
58 "Intel(R) 10 Gigabit Network Connection";
59#endif
60static const char ixgbe_copyright[] =
61 "Copyright (c) 1999-2016 Intel Corporation.";
62
63static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
64
65static const struct ixgbe_info *ixgbe_info_tbl[] = {
66 [board_82598] = &ixgbe_82598_info,
67 [board_82599] = &ixgbe_82599_info,
68 [board_X540] = &ixgbe_X540_info,
69 [board_X550] = &ixgbe_X550_info,
70 [board_X550EM_x] = &ixgbe_X550EM_x_info,
71 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
72 [board_x550em_a] = &ixgbe_x550em_a_info,
73 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
74};
75
76
77
78
79
80
81
82
83
84static const struct pci_device_id ixgbe_pci_tbl[] = {
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
132
133 {0, }
134};
135MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
136
137#ifdef CONFIG_IXGBE_DCA
138static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
139 void *p);
140static struct notifier_block dca_notifier = {
141 .notifier_call = ixgbe_notify_dca,
142 .next = NULL,
143 .priority = 0
144};
145#endif
146
147#ifdef CONFIG_PCI_IOV
148static unsigned int max_vfs;
149module_param(max_vfs, uint, 0);
150MODULE_PARM_DESC(max_vfs,
151 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
152#endif
153
154static unsigned int allow_unsupported_sfp;
155module_param(allow_unsupported_sfp, uint, 0);
156MODULE_PARM_DESC(allow_unsupported_sfp,
157 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
158
159#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
160static int debug = -1;
161module_param(debug, int, 0);
162MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
163
164MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
165MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
166MODULE_LICENSE("GPL v2");
167
168static struct workqueue_struct *ixgbe_wq;
169
170static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
171static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
172
173static const struct net_device_ops ixgbe_netdev_ops;
174
175static bool netif_is_ixgbe(struct net_device *dev)
176{
177 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
178}
179
180static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
181 u32 reg, u16 *value)
182{
183 struct pci_dev *parent_dev;
184 struct pci_bus *parent_bus;
185
186 parent_bus = adapter->pdev->bus->parent;
187 if (!parent_bus)
188 return -1;
189
190 parent_dev = parent_bus->self;
191 if (!parent_dev)
192 return -1;
193
194 if (!pci_is_pcie(parent_dev))
195 return -1;
196
197 pcie_capability_read_word(parent_dev, reg, value);
198 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
199 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
200 return -1;
201 return 0;
202}
203
204static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
205{
206 struct ixgbe_hw *hw = &adapter->hw;
207 u16 link_status = 0;
208 int err;
209
210 hw->bus.type = ixgbe_bus_type_pci_express;
211
212
213
214
215 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
216
217
218 if (err)
219 return err;
220
221 hw->bus.width = ixgbe_convert_bus_width(link_status);
222 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
223
224 return 0;
225}
226
227
228
229
230
231
232
233
234
235
236static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
237{
238 switch (hw->device_id) {
239 case IXGBE_DEV_ID_82599_SFP_SF_QP:
240 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
241 return true;
242 default:
243 return false;
244 }
245}
246
247static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
248 int expected_gts)
249{
250 struct ixgbe_hw *hw = &adapter->hw;
251 struct pci_dev *pdev;
252
253
254
255
256
257 if (hw->bus.type == ixgbe_bus_type_internal)
258 return;
259
260
261 if (ixgbe_pcie_from_parent(&adapter->hw))
262 pdev = adapter->pdev->bus->parent->self;
263 else
264 pdev = adapter->pdev;
265
266 pcie_print_link_status(pdev);
267}
268
269static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
270{
271 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
272 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
273 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
274 queue_work(ixgbe_wq, &adapter->service_task);
275}
276
277static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
278{
279 struct ixgbe_adapter *adapter = hw->back;
280
281 if (!hw->hw_addr)
282 return;
283 hw->hw_addr = NULL;
284 e_dev_err("Adapter removed\n");
285 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
286 ixgbe_service_event_schedule(adapter);
287}
288
289static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
290{
291 u8 __iomem *reg_addr;
292 u32 value;
293 int i;
294
295 reg_addr = READ_ONCE(hw->hw_addr);
296 if (ixgbe_removed(reg_addr))
297 return IXGBE_FAILED_READ_REG;
298
299
300
301
302
303 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
304 value = readl(reg_addr + IXGBE_STATUS);
305 if (value != IXGBE_FAILED_READ_REG)
306 break;
307 mdelay(3);
308 }
309
310 if (value == IXGBE_FAILED_READ_REG)
311 ixgbe_remove_adapter(hw);
312 else
313 value = readl(reg_addr + reg);
314 return value;
315}
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
331{
332 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
333 u32 value;
334
335 if (ixgbe_removed(reg_addr))
336 return IXGBE_FAILED_READ_REG;
337 if (unlikely(hw->phy.nw_mng_if_sel &
338 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
339 struct ixgbe_adapter *adapter;
340 int i;
341
342 for (i = 0; i < 200; ++i) {
343 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
344 if (likely(!value))
345 goto writes_completed;
346 if (value == IXGBE_FAILED_READ_REG) {
347 ixgbe_remove_adapter(hw);
348 return IXGBE_FAILED_READ_REG;
349 }
350 udelay(5);
351 }
352
353 adapter = hw->back;
354 e_warn(hw, "register writes incomplete %08x\n", value);
355 }
356
357writes_completed:
358 value = readl(reg_addr + reg);
359 if (unlikely(value == IXGBE_FAILED_READ_REG))
360 value = ixgbe_check_remove(hw, reg);
361 return value;
362}
363
364static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
365{
366 u16 value;
367
368 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
369 if (value == IXGBE_FAILED_READ_CFG_WORD) {
370 ixgbe_remove_adapter(hw);
371 return true;
372 }
373 return false;
374}
375
376u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
377{
378 struct ixgbe_adapter *adapter = hw->back;
379 u16 value;
380
381 if (ixgbe_removed(hw->hw_addr))
382 return IXGBE_FAILED_READ_CFG_WORD;
383 pci_read_config_word(adapter->pdev, reg, &value);
384 if (value == IXGBE_FAILED_READ_CFG_WORD &&
385 ixgbe_check_cfg_remove(hw, adapter->pdev))
386 return IXGBE_FAILED_READ_CFG_WORD;
387 return value;
388}
389
390#ifdef CONFIG_PCI_IOV
391static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
392{
393 struct ixgbe_adapter *adapter = hw->back;
394 u32 value;
395
396 if (ixgbe_removed(hw->hw_addr))
397 return IXGBE_FAILED_READ_CFG_DWORD;
398 pci_read_config_dword(adapter->pdev, reg, &value);
399 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
400 ixgbe_check_cfg_remove(hw, adapter->pdev))
401 return IXGBE_FAILED_READ_CFG_DWORD;
402 return value;
403}
404#endif
405
406void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
407{
408 struct ixgbe_adapter *adapter = hw->back;
409
410 if (ixgbe_removed(hw->hw_addr))
411 return;
412 pci_write_config_word(adapter->pdev, reg, value);
413}
414
415static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
416{
417 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
418
419
420 smp_mb__before_atomic();
421 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
422}
423
424struct ixgbe_reg_info {
425 u32 ofs;
426 char *name;
427};
428
429static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
430
431
432 {IXGBE_CTRL, "CTRL"},
433 {IXGBE_STATUS, "STATUS"},
434 {IXGBE_CTRL_EXT, "CTRL_EXT"},
435
436
437 {IXGBE_EICR, "EICR"},
438
439
440 {IXGBE_SRRCTL(0), "SRRCTL"},
441 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
442 {IXGBE_RDLEN(0), "RDLEN"},
443 {IXGBE_RDH(0), "RDH"},
444 {IXGBE_RDT(0), "RDT"},
445 {IXGBE_RXDCTL(0), "RXDCTL"},
446 {IXGBE_RDBAL(0), "RDBAL"},
447 {IXGBE_RDBAH(0), "RDBAH"},
448
449
450 {IXGBE_TDBAL(0), "TDBAL"},
451 {IXGBE_TDBAH(0), "TDBAH"},
452 {IXGBE_TDLEN(0), "TDLEN"},
453 {IXGBE_TDH(0), "TDH"},
454 {IXGBE_TDT(0), "TDT"},
455 {IXGBE_TXDCTL(0), "TXDCTL"},
456
457
458 { .name = NULL }
459};
460
461
462
463
464
465static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
466{
467 int i;
468 char rname[16];
469 u32 regs[64];
470
471 switch (reginfo->ofs) {
472 case IXGBE_SRRCTL(0):
473 for (i = 0; i < 64; i++)
474 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
475 break;
476 case IXGBE_DCA_RXCTRL(0):
477 for (i = 0; i < 64; i++)
478 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
479 break;
480 case IXGBE_RDLEN(0):
481 for (i = 0; i < 64; i++)
482 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
483 break;
484 case IXGBE_RDH(0):
485 for (i = 0; i < 64; i++)
486 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
487 break;
488 case IXGBE_RDT(0):
489 for (i = 0; i < 64; i++)
490 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
491 break;
492 case IXGBE_RXDCTL(0):
493 for (i = 0; i < 64; i++)
494 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
495 break;
496 case IXGBE_RDBAL(0):
497 for (i = 0; i < 64; i++)
498 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
499 break;
500 case IXGBE_RDBAH(0):
501 for (i = 0; i < 64; i++)
502 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
503 break;
504 case IXGBE_TDBAL(0):
505 for (i = 0; i < 64; i++)
506 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
507 break;
508 case IXGBE_TDBAH(0):
509 for (i = 0; i < 64; i++)
510 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
511 break;
512 case IXGBE_TDLEN(0):
513 for (i = 0; i < 64; i++)
514 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
515 break;
516 case IXGBE_TDH(0):
517 for (i = 0; i < 64; i++)
518 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
519 break;
520 case IXGBE_TDT(0):
521 for (i = 0; i < 64; i++)
522 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
523 break;
524 case IXGBE_TXDCTL(0):
525 for (i = 0; i < 64; i++)
526 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
527 break;
528 default:
529 pr_info("%-15s %08x\n",
530 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
531 return;
532 }
533
534 i = 0;
535 while (i < 64) {
536 int j;
537 char buf[9 * 8 + 1];
538 char *p = buf;
539
540 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
541 for (j = 0; j < 8; j++)
542 p += sprintf(p, " %08x", regs[i++]);
543 pr_err("%-15s%s\n", rname, buf);
544 }
545
546}
547
548static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
549{
550 struct ixgbe_tx_buffer *tx_buffer;
551
552 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
553 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
554 n, ring->next_to_use, ring->next_to_clean,
555 (u64)dma_unmap_addr(tx_buffer, dma),
556 dma_unmap_len(tx_buffer, len),
557 tx_buffer->next_to_watch,
558 (u64)tx_buffer->time_stamp);
559}
560
561
562
563
564static void ixgbe_dump(struct ixgbe_adapter *adapter)
565{
566 struct net_device *netdev = adapter->netdev;
567 struct ixgbe_hw *hw = &adapter->hw;
568 struct ixgbe_reg_info *reginfo;
569 int n = 0;
570 struct ixgbe_ring *ring;
571 struct ixgbe_tx_buffer *tx_buffer;
572 union ixgbe_adv_tx_desc *tx_desc;
573 struct my_u0 { u64 a; u64 b; } *u0;
574 struct ixgbe_ring *rx_ring;
575 union ixgbe_adv_rx_desc *rx_desc;
576 struct ixgbe_rx_buffer *rx_buffer_info;
577 int i = 0;
578
579 if (!netif_msg_hw(adapter))
580 return;
581
582
583 if (netdev) {
584 dev_info(&adapter->pdev->dev, "Net device Info\n");
585 pr_info("Device Name state "
586 "trans_start\n");
587 pr_info("%-15s %016lX %016lX\n",
588 netdev->name,
589 netdev->state,
590 dev_trans_start(netdev));
591 }
592
593
594 dev_info(&adapter->pdev->dev, "Register Dump\n");
595 pr_info(" Register Name Value\n");
596 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
597 reginfo->name; reginfo++) {
598 ixgbe_regdump(hw, reginfo);
599 }
600
601
602 if (!netdev || !netif_running(netdev))
603 return;
604
605 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
606 pr_info(" %s %s %s %s\n",
607 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
608 "leng", "ntw", "timestamp");
609 for (n = 0; n < adapter->num_tx_queues; n++) {
610 ring = adapter->tx_ring[n];
611 ixgbe_print_buffer(ring, n);
612 }
613
614 for (n = 0; n < adapter->num_xdp_queues; n++) {
615 ring = adapter->xdp_ring[n];
616 ixgbe_print_buffer(ring, n);
617 }
618
619
620 if (!netif_msg_tx_done(adapter))
621 goto rx_ring_summary;
622
623 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 for (n = 0; n < adapter->num_tx_queues; n++) {
661 ring = adapter->tx_ring[n];
662 pr_info("------------------------------------\n");
663 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
664 pr_info("------------------------------------\n");
665 pr_info("%s%s %s %s %s %s\n",
666 "T [desc] [address 63:0 ] ",
667 "[PlPOIdStDDt Ln] [bi->dma ] ",
668 "leng", "ntw", "timestamp", "bi->skb");
669
670 for (i = 0; ring->desc && (i < ring->count); i++) {
671 tx_desc = IXGBE_TX_DESC(ring, i);
672 tx_buffer = &ring->tx_buffer_info[i];
673 u0 = (struct my_u0 *)tx_desc;
674 if (dma_unmap_len(tx_buffer, len) > 0) {
675 const char *ring_desc;
676
677 if (i == ring->next_to_use &&
678 i == ring->next_to_clean)
679 ring_desc = " NTC/U";
680 else if (i == ring->next_to_use)
681 ring_desc = " NTU";
682 else if (i == ring->next_to_clean)
683 ring_desc = " NTC";
684 else
685 ring_desc = "";
686 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
687 i,
688 le64_to_cpu((__force __le64)u0->a),
689 le64_to_cpu((__force __le64)u0->b),
690 (u64)dma_unmap_addr(tx_buffer, dma),
691 dma_unmap_len(tx_buffer, len),
692 tx_buffer->next_to_watch,
693 (u64)tx_buffer->time_stamp,
694 tx_buffer->skb,
695 ring_desc);
696
697 if (netif_msg_pktdata(adapter) &&
698 tx_buffer->skb)
699 print_hex_dump(KERN_INFO, "",
700 DUMP_PREFIX_ADDRESS, 16, 1,
701 tx_buffer->skb->data,
702 dma_unmap_len(tx_buffer, len),
703 true);
704 }
705 }
706 }
707
708
709rx_ring_summary:
710 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
711 pr_info("Queue [NTU] [NTC]\n");
712 for (n = 0; n < adapter->num_rx_queues; n++) {
713 rx_ring = adapter->rx_ring[n];
714 pr_info("%5d %5X %5X\n",
715 n, rx_ring->next_to_use, rx_ring->next_to_clean);
716 }
717
718
719 if (!netif_msg_rx_status(adapter))
720 return;
721
722 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769 for (n = 0; n < adapter->num_rx_queues; n++) {
770 rx_ring = adapter->rx_ring[n];
771 pr_info("------------------------------------\n");
772 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
773 pr_info("------------------------------------\n");
774 pr_info("%s%s%s\n",
775 "R [desc] [ PktBuf A0] ",
776 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
777 "<-- Adv Rx Read format");
778 pr_info("%s%s%s\n",
779 "RWB[desc] [PcsmIpSHl PtRs] ",
780 "[vl er S cks ln] ---------------- [bi->skb ] ",
781 "<-- Adv Rx Write-Back format");
782
783 for (i = 0; i < rx_ring->count; i++) {
784 const char *ring_desc;
785
786 if (i == rx_ring->next_to_use)
787 ring_desc = " NTU";
788 else if (i == rx_ring->next_to_clean)
789 ring_desc = " NTC";
790 else
791 ring_desc = "";
792
793 rx_buffer_info = &rx_ring->rx_buffer_info[i];
794 rx_desc = IXGBE_RX_DESC(rx_ring, i);
795 u0 = (struct my_u0 *)rx_desc;
796 if (rx_desc->wb.upper.length) {
797
798 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
799 i,
800 le64_to_cpu((__force __le64)u0->a),
801 le64_to_cpu((__force __le64)u0->b),
802 rx_buffer_info->skb,
803 ring_desc);
804 } else {
805 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
806 i,
807 le64_to_cpu((__force __le64)u0->a),
808 le64_to_cpu((__force __le64)u0->b),
809 (u64)rx_buffer_info->dma,
810 rx_buffer_info->skb,
811 ring_desc);
812
813 if (netif_msg_pktdata(adapter) &&
814 rx_buffer_info->dma) {
815 print_hex_dump(KERN_INFO, "",
816 DUMP_PREFIX_ADDRESS, 16, 1,
817 page_address(rx_buffer_info->page) +
818 rx_buffer_info->page_offset,
819 ixgbe_rx_bufsz(rx_ring), true);
820 }
821 }
822 }
823 }
824}
825
826static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
827{
828 u32 ctrl_ext;
829
830
831 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
832 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
833 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
834}
835
836static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
837{
838 u32 ctrl_ext;
839
840
841 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
842 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
843 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
844}
845
846
847
848
849
850
851
852
853
854static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
855 u8 queue, u8 msix_vector)
856{
857 u32 ivar, index;
858 struct ixgbe_hw *hw = &adapter->hw;
859 switch (hw->mac.type) {
860 case ixgbe_mac_82598EB:
861 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
862 if (direction == -1)
863 direction = 0;
864 index = (((direction * 64) + queue) >> 2) & 0x1F;
865 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
866 ivar &= ~(0xFF << (8 * (queue & 0x3)));
867 ivar |= (msix_vector << (8 * (queue & 0x3)));
868 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
869 break;
870 case ixgbe_mac_82599EB:
871 case ixgbe_mac_X540:
872 case ixgbe_mac_X550:
873 case ixgbe_mac_X550EM_x:
874 case ixgbe_mac_x550em_a:
875 if (direction == -1) {
876
877 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
878 index = ((queue & 1) * 8);
879 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
880 ivar &= ~(0xFF << index);
881 ivar |= (msix_vector << index);
882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
883 break;
884 } else {
885
886 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
887 index = ((16 * (queue & 1)) + (8 * direction));
888 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
889 ivar &= ~(0xFF << index);
890 ivar |= (msix_vector << index);
891 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
892 break;
893 }
894 default:
895 break;
896 }
897}
898
899void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
900 u64 qmask)
901{
902 u32 mask;
903
904 switch (adapter->hw.mac.type) {
905 case ixgbe_mac_82598EB:
906 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
908 break;
909 case ixgbe_mac_82599EB:
910 case ixgbe_mac_X540:
911 case ixgbe_mac_X550:
912 case ixgbe_mac_X550EM_x:
913 case ixgbe_mac_x550em_a:
914 mask = (qmask & 0xFFFFFFFF);
915 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
916 mask = (qmask >> 32);
917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
918 break;
919 default:
920 break;
921 }
922}
923
924static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
925{
926 struct ixgbe_hw *hw = &adapter->hw;
927 struct ixgbe_hw_stats *hwstats = &adapter->stats;
928 int i;
929 u32 data;
930
931 if ((hw->fc.current_mode != ixgbe_fc_full) &&
932 (hw->fc.current_mode != ixgbe_fc_rx_pause))
933 return;
934
935 switch (hw->mac.type) {
936 case ixgbe_mac_82598EB:
937 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
938 break;
939 default:
940 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
941 }
942 hwstats->lxoffrxc += data;
943
944
945 if (!data)
946 return;
947
948 for (i = 0; i < adapter->num_tx_queues; i++)
949 clear_bit(__IXGBE_HANG_CHECK_ARMED,
950 &adapter->tx_ring[i]->state);
951
952 for (i = 0; i < adapter->num_xdp_queues; i++)
953 clear_bit(__IXGBE_HANG_CHECK_ARMED,
954 &adapter->xdp_ring[i]->state);
955}
956
957static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
958{
959 struct ixgbe_hw *hw = &adapter->hw;
960 struct ixgbe_hw_stats *hwstats = &adapter->stats;
961 u32 xoff[8] = {0};
962 u8 tc;
963 int i;
964 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
965
966 if (adapter->ixgbe_ieee_pfc)
967 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
968
969 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
970 ixgbe_update_xoff_rx_lfc(adapter);
971 return;
972 }
973
974
975 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
976 u32 pxoffrxc;
977
978 switch (hw->mac.type) {
979 case ixgbe_mac_82598EB:
980 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
981 break;
982 default:
983 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
984 }
985 hwstats->pxoffrxc[i] += pxoffrxc;
986
987 tc = netdev_get_prio_tc_map(adapter->netdev, i);
988 xoff[tc] += pxoffrxc;
989 }
990
991
992 for (i = 0; i < adapter->num_tx_queues; i++) {
993 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
994
995 tc = tx_ring->dcb_tc;
996 if (xoff[tc])
997 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
998 }
999
1000 for (i = 0; i < adapter->num_xdp_queues; i++) {
1001 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1002
1003 tc = xdp_ring->dcb_tc;
1004 if (xoff[tc])
1005 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1006 }
1007}
1008
1009static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1010{
1011 return ring->stats.packets;
1012}
1013
1014static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1015{
1016 unsigned int head, tail;
1017
1018 head = ring->next_to_clean;
1019 tail = ring->next_to_use;
1020
1021 return ((head <= tail) ? tail : tail + ring->count) - head;
1022}
1023
1024static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1025{
1026 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1027 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1028 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1029
1030 clear_check_for_tx_hang(tx_ring);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 if (tx_done_old == tx_done && tx_pending)
1045
1046 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1047 &tx_ring->state);
1048
1049 tx_ring->tx_stats.tx_done_old = tx_done;
1050
1051 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1052
1053 return false;
1054}
1055
1056
1057
1058
1059
1060static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1061{
1062
1063
1064 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1065 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1066 e_warn(drv, "initiating reset due to tx timeout\n");
1067 ixgbe_service_event_schedule(adapter);
1068 }
1069}
1070
1071
1072
1073
1074
1075
1076
1077static int ixgbe_tx_maxrate(struct net_device *netdev,
1078 int queue_index, u32 maxrate)
1079{
1080 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1081 struct ixgbe_hw *hw = &adapter->hw;
1082 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1083
1084 if (!maxrate)
1085 return 0;
1086
1087
1088 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1089 bcnrc_val /= maxrate;
1090
1091
1092 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1093 IXGBE_RTTBCNRC_RF_DEC_MASK;
1094
1095
1096 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1097
1098 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1099 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1100
1101 return 0;
1102}
1103
1104
1105
1106
1107
1108
1109
1110static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1111 struct ixgbe_ring *tx_ring, int napi_budget)
1112{
1113 struct ixgbe_adapter *adapter = q_vector->adapter;
1114 struct ixgbe_tx_buffer *tx_buffer;
1115 union ixgbe_adv_tx_desc *tx_desc;
1116 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1117 unsigned int budget = q_vector->tx.work_limit;
1118 unsigned int i = tx_ring->next_to_clean;
1119
1120 if (test_bit(__IXGBE_DOWN, &adapter->state))
1121 return true;
1122
1123 tx_buffer = &tx_ring->tx_buffer_info[i];
1124 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1125 i -= tx_ring->count;
1126
1127 do {
1128 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1129
1130
1131 if (!eop_desc)
1132 break;
1133
1134
1135 smp_rmb();
1136
1137
1138 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1139 break;
1140
1141
1142 tx_buffer->next_to_watch = NULL;
1143
1144
1145 total_bytes += tx_buffer->bytecount;
1146 total_packets += tx_buffer->gso_segs;
1147 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1148 total_ipsec++;
1149
1150
1151 if (ring_is_xdp(tx_ring))
1152 xdp_return_frame(tx_buffer->xdpf);
1153 else
1154 napi_consume_skb(tx_buffer->skb, napi_budget);
1155
1156
1157 dma_unmap_single(tx_ring->dev,
1158 dma_unmap_addr(tx_buffer, dma),
1159 dma_unmap_len(tx_buffer, len),
1160 DMA_TO_DEVICE);
1161
1162
1163 dma_unmap_len_set(tx_buffer, len, 0);
1164
1165
1166 while (tx_desc != eop_desc) {
1167 tx_buffer++;
1168 tx_desc++;
1169 i++;
1170 if (unlikely(!i)) {
1171 i -= tx_ring->count;
1172 tx_buffer = tx_ring->tx_buffer_info;
1173 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1174 }
1175
1176
1177 if (dma_unmap_len(tx_buffer, len)) {
1178 dma_unmap_page(tx_ring->dev,
1179 dma_unmap_addr(tx_buffer, dma),
1180 dma_unmap_len(tx_buffer, len),
1181 DMA_TO_DEVICE);
1182 dma_unmap_len_set(tx_buffer, len, 0);
1183 }
1184 }
1185
1186
1187 tx_buffer++;
1188 tx_desc++;
1189 i++;
1190 if (unlikely(!i)) {
1191 i -= tx_ring->count;
1192 tx_buffer = tx_ring->tx_buffer_info;
1193 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1194 }
1195
1196
1197 prefetch(tx_desc);
1198
1199
1200 budget--;
1201 } while (likely(budget));
1202
1203 i += tx_ring->count;
1204 tx_ring->next_to_clean = i;
1205 u64_stats_update_begin(&tx_ring->syncp);
1206 tx_ring->stats.bytes += total_bytes;
1207 tx_ring->stats.packets += total_packets;
1208 u64_stats_update_end(&tx_ring->syncp);
1209 q_vector->tx.total_bytes += total_bytes;
1210 q_vector->tx.total_packets += total_packets;
1211 adapter->tx_ipsec += total_ipsec;
1212
1213 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1214
1215 struct ixgbe_hw *hw = &adapter->hw;
1216 e_err(drv, "Detected Tx Unit Hang %s\n"
1217 " Tx Queue <%d>\n"
1218 " TDH, TDT <%x>, <%x>\n"
1219 " next_to_use <%x>\n"
1220 " next_to_clean <%x>\n"
1221 "tx_buffer_info[next_to_clean]\n"
1222 " time_stamp <%lx>\n"
1223 " jiffies <%lx>\n",
1224 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1225 tx_ring->queue_index,
1226 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1227 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1228 tx_ring->next_to_use, i,
1229 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1230
1231 if (!ring_is_xdp(tx_ring))
1232 netif_stop_subqueue(tx_ring->netdev,
1233 tx_ring->queue_index);
1234
1235 e_info(probe,
1236 "tx hang %d detected on queue %d, resetting adapter\n",
1237 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1238
1239
1240 ixgbe_tx_timeout_reset(adapter);
1241
1242
1243 return true;
1244 }
1245
1246 if (ring_is_xdp(tx_ring))
1247 return !!budget;
1248
1249 netdev_tx_completed_queue(txring_txq(tx_ring),
1250 total_packets, total_bytes);
1251
1252#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1253 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1254 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1255
1256
1257
1258 smp_mb();
1259 if (__netif_subqueue_stopped(tx_ring->netdev,
1260 tx_ring->queue_index)
1261 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1262 netif_wake_subqueue(tx_ring->netdev,
1263 tx_ring->queue_index);
1264 ++tx_ring->tx_stats.restart_queue;
1265 }
1266 }
1267
1268 return !!budget;
1269}
1270
1271#ifdef CONFIG_IXGBE_DCA
1272static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1273 struct ixgbe_ring *tx_ring,
1274 int cpu)
1275{
1276 struct ixgbe_hw *hw = &adapter->hw;
1277 u32 txctrl = 0;
1278 u16 reg_offset;
1279
1280 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1281 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1282
1283 switch (hw->mac.type) {
1284 case ixgbe_mac_82598EB:
1285 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1286 break;
1287 case ixgbe_mac_82599EB:
1288 case ixgbe_mac_X540:
1289 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1290 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1291 break;
1292 default:
1293
1294 return;
1295 }
1296
1297
1298
1299
1300
1301
1302 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1303 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1304 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1305
1306 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1307}
1308
1309static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1310 struct ixgbe_ring *rx_ring,
1311 int cpu)
1312{
1313 struct ixgbe_hw *hw = &adapter->hw;
1314 u32 rxctrl = 0;
1315 u8 reg_idx = rx_ring->reg_idx;
1316
1317 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1318 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1319
1320 switch (hw->mac.type) {
1321 case ixgbe_mac_82599EB:
1322 case ixgbe_mac_X540:
1323 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1324 break;
1325 default:
1326 break;
1327 }
1328
1329
1330
1331
1332
1333
1334 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1335 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1336 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1337
1338 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1339}
1340
1341static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1342{
1343 struct ixgbe_adapter *adapter = q_vector->adapter;
1344 struct ixgbe_ring *ring;
1345 int cpu = get_cpu();
1346
1347 if (q_vector->cpu == cpu)
1348 goto out_no_update;
1349
1350 ixgbe_for_each_ring(ring, q_vector->tx)
1351 ixgbe_update_tx_dca(adapter, ring, cpu);
1352
1353 ixgbe_for_each_ring(ring, q_vector->rx)
1354 ixgbe_update_rx_dca(adapter, ring, cpu);
1355
1356 q_vector->cpu = cpu;
1357out_no_update:
1358 put_cpu();
1359}
1360
1361static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1362{
1363 int i;
1364
1365
1366 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1368 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1369 else
1370 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1371 IXGBE_DCA_CTRL_DCA_DISABLE);
1372
1373 for (i = 0; i < adapter->num_q_vectors; i++) {
1374 adapter->q_vector[i]->cpu = -1;
1375 ixgbe_update_dca(adapter->q_vector[i]);
1376 }
1377}
1378
1379static int __ixgbe_notify_dca(struct device *dev, void *data)
1380{
1381 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1382 unsigned long event = *(unsigned long *)data;
1383
1384 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1385 return 0;
1386
1387 switch (event) {
1388 case DCA_PROVIDER_ADD:
1389
1390 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1391 break;
1392 if (dca_add_requester(dev) == 0) {
1393 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1394 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1395 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1396 break;
1397 }
1398 fallthrough;
1399 case DCA_PROVIDER_REMOVE:
1400 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1401 dca_remove_requester(dev);
1402 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1404 IXGBE_DCA_CTRL_DCA_DISABLE);
1405 }
1406 break;
1407 }
1408
1409 return 0;
1410}
1411
1412#endif
1413
1414#define IXGBE_RSS_L4_TYPES_MASK \
1415 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1416 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1417 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1418 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1419
1420static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1421 union ixgbe_adv_rx_desc *rx_desc,
1422 struct sk_buff *skb)
1423{
1424 u16 rss_type;
1425
1426 if (!(ring->netdev->features & NETIF_F_RXHASH))
1427 return;
1428
1429 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1430 IXGBE_RXDADV_RSSTYPE_MASK;
1431
1432 if (!rss_type)
1433 return;
1434
1435 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1436 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1437 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1438}
1439
1440#ifdef IXGBE_FCOE
1441
1442
1443
1444
1445
1446
1447
1448static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1449 union ixgbe_adv_rx_desc *rx_desc)
1450{
1451 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1452
1453 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1454 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1455 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1456 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1457}
1458
1459#endif
1460
1461
1462
1463
1464
1465
1466static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1467 union ixgbe_adv_rx_desc *rx_desc,
1468 struct sk_buff *skb)
1469{
1470 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1471 bool encap_pkt = false;
1472
1473 skb_checksum_none_assert(skb);
1474
1475
1476 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1477 return;
1478
1479
1480 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1481 encap_pkt = true;
1482 skb->encapsulation = 1;
1483 }
1484
1485
1486 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1487 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1488 ring->rx_stats.csum_err++;
1489 return;
1490 }
1491
1492 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1493 return;
1494
1495 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1496
1497
1498
1499
1500 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1501 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1502 return;
1503
1504 ring->rx_stats.csum_err++;
1505 return;
1506 }
1507
1508
1509 skb->ip_summed = CHECKSUM_UNNECESSARY;
1510 if (encap_pkt) {
1511 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1512 return;
1513
1514 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1515 skb->ip_summed = CHECKSUM_NONE;
1516 return;
1517 }
1518
1519 skb->csum_level = 1;
1520 }
1521}
1522
1523static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1524{
1525 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1526}
1527
1528static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1529 struct ixgbe_rx_buffer *bi)
1530{
1531 struct page *page = bi->page;
1532 dma_addr_t dma;
1533
1534
1535 if (likely(page))
1536 return true;
1537
1538
1539 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1540 if (unlikely(!page)) {
1541 rx_ring->rx_stats.alloc_rx_page_failed++;
1542 return false;
1543 }
1544
1545
1546 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1547 ixgbe_rx_pg_size(rx_ring),
1548 DMA_FROM_DEVICE,
1549 IXGBE_RX_DMA_ATTR);
1550
1551
1552
1553
1554
1555 if (dma_mapping_error(rx_ring->dev, dma)) {
1556 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1557
1558 rx_ring->rx_stats.alloc_rx_page_failed++;
1559 return false;
1560 }
1561
1562 bi->dma = dma;
1563 bi->page = page;
1564 bi->page_offset = ixgbe_rx_offset(rx_ring);
1565 page_ref_add(page, USHRT_MAX - 1);
1566 bi->pagecnt_bias = USHRT_MAX;
1567 rx_ring->rx_stats.alloc_rx_page++;
1568
1569 return true;
1570}
1571
1572
1573
1574
1575
1576
1577void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1578{
1579 union ixgbe_adv_rx_desc *rx_desc;
1580 struct ixgbe_rx_buffer *bi;
1581 u16 i = rx_ring->next_to_use;
1582 u16 bufsz;
1583
1584
1585 if (!cleaned_count)
1586 return;
1587
1588 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1589 bi = &rx_ring->rx_buffer_info[i];
1590 i -= rx_ring->count;
1591
1592 bufsz = ixgbe_rx_bufsz(rx_ring);
1593
1594 do {
1595 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1596 break;
1597
1598
1599 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1600 bi->page_offset, bufsz,
1601 DMA_FROM_DEVICE);
1602
1603
1604
1605
1606
1607 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1608
1609 rx_desc++;
1610 bi++;
1611 i++;
1612 if (unlikely(!i)) {
1613 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1614 bi = rx_ring->rx_buffer_info;
1615 i -= rx_ring->count;
1616 }
1617
1618
1619 rx_desc->wb.upper.length = 0;
1620
1621 cleaned_count--;
1622 } while (cleaned_count);
1623
1624 i += rx_ring->count;
1625
1626 if (rx_ring->next_to_use != i) {
1627 rx_ring->next_to_use = i;
1628
1629
1630 rx_ring->next_to_alloc = i;
1631
1632
1633
1634
1635
1636
1637 wmb();
1638 writel(i, rx_ring->tail);
1639 }
1640}
1641
1642static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1643 struct sk_buff *skb)
1644{
1645 u16 hdr_len = skb_headlen(skb);
1646
1647
1648 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1649 IXGBE_CB(skb)->append_cnt);
1650 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1651}
1652
1653static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1654 struct sk_buff *skb)
1655{
1656
1657 if (!IXGBE_CB(skb)->append_cnt)
1658 return;
1659
1660 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1661 rx_ring->rx_stats.rsc_flush++;
1662
1663 ixgbe_set_rsc_gso_size(rx_ring, skb);
1664
1665
1666 IXGBE_CB(skb)->append_cnt = 0;
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1680 union ixgbe_adv_rx_desc *rx_desc,
1681 struct sk_buff *skb)
1682{
1683 struct net_device *dev = rx_ring->netdev;
1684 u32 flags = rx_ring->q_vector->adapter->flags;
1685
1686 ixgbe_update_rsc_stats(rx_ring, skb);
1687
1688 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1689
1690 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1691
1692 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1693 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1694
1695 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1696 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1697 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1699 }
1700
1701 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1702 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1703
1704
1705 if (netif_is_ixgbe(dev))
1706 skb_record_rx_queue(skb, rx_ring->queue_index);
1707 else
1708 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1709 false);
1710
1711 skb->protocol = eth_type_trans(skb, dev);
1712}
1713
1714void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1715 struct sk_buff *skb)
1716{
1717 napi_gro_receive(&q_vector->napi, skb);
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1732 union ixgbe_adv_rx_desc *rx_desc,
1733 struct sk_buff *skb)
1734{
1735 u32 ntc = rx_ring->next_to_clean + 1;
1736
1737
1738 ntc = (ntc < rx_ring->count) ? ntc : 0;
1739 rx_ring->next_to_clean = ntc;
1740
1741 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1742
1743
1744 if (ring_is_rsc_enabled(rx_ring)) {
1745 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1746 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1747
1748 if (unlikely(rsc_enabled)) {
1749 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1750
1751 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1752 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1753
1754
1755 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1756 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1757 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1758 }
1759 }
1760
1761
1762 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1763 return false;
1764
1765
1766 rx_ring->rx_buffer_info[ntc].skb = skb;
1767 rx_ring->rx_stats.non_eop_descs++;
1768
1769 return true;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1785 struct sk_buff *skb)
1786{
1787 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1788 unsigned char *va;
1789 unsigned int pull_len;
1790
1791
1792
1793
1794
1795
1796 va = skb_frag_address(frag);
1797
1798
1799
1800
1801
1802 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1803
1804
1805 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1806
1807
1808 skb_frag_size_sub(frag, pull_len);
1809 skb_frag_off_add(frag, pull_len);
1810 skb->data_len -= pull_len;
1811 skb->tail += pull_len;
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1825 struct sk_buff *skb)
1826{
1827 if (ring_uses_build_skb(rx_ring)) {
1828 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1829
1830 dma_sync_single_range_for_cpu(rx_ring->dev,
1831 IXGBE_CB(skb)->dma,
1832 offset,
1833 skb_headlen(skb),
1834 DMA_FROM_DEVICE);
1835 } else {
1836 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1837
1838 dma_sync_single_range_for_cpu(rx_ring->dev,
1839 IXGBE_CB(skb)->dma,
1840 skb_frag_off(frag),
1841 skb_frag_size(frag),
1842 DMA_FROM_DEVICE);
1843 }
1844
1845
1846 if (unlikely(IXGBE_CB(skb)->page_released)) {
1847 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1848 ixgbe_rx_pg_size(rx_ring),
1849 DMA_FROM_DEVICE,
1850 IXGBE_RX_DMA_ATTR);
1851 }
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1877 union ixgbe_adv_rx_desc *rx_desc,
1878 struct sk_buff *skb)
1879{
1880 struct net_device *netdev = rx_ring->netdev;
1881
1882
1883 if (IS_ERR(skb))
1884 return true;
1885
1886
1887
1888
1889 if (!netdev ||
1890 (unlikely(ixgbe_test_staterr(rx_desc,
1891 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1892 !(netdev->features & NETIF_F_RXALL)))) {
1893 dev_kfree_skb_any(skb);
1894 return true;
1895 }
1896
1897
1898 if (!skb_headlen(skb))
1899 ixgbe_pull_tail(rx_ring, skb);
1900
1901#ifdef IXGBE_FCOE
1902
1903 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1904 return false;
1905
1906#endif
1907
1908 if (eth_skb_pad(skb))
1909 return true;
1910
1911 return false;
1912}
1913
1914
1915
1916
1917
1918
1919
1920
1921static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1922 struct ixgbe_rx_buffer *old_buff)
1923{
1924 struct ixgbe_rx_buffer *new_buff;
1925 u16 nta = rx_ring->next_to_alloc;
1926
1927 new_buff = &rx_ring->rx_buffer_info[nta];
1928
1929
1930 nta++;
1931 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1932
1933
1934
1935
1936
1937 new_buff->dma = old_buff->dma;
1938 new_buff->page = old_buff->page;
1939 new_buff->page_offset = old_buff->page_offset;
1940 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1941}
1942
1943static inline bool ixgbe_page_is_reserved(struct page *page)
1944{
1945 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1946}
1947
1948static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
1949 int rx_buffer_pgcnt)
1950{
1951 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1952 struct page *page = rx_buffer->page;
1953
1954
1955 if (unlikely(ixgbe_page_is_reserved(page)))
1956 return false;
1957
1958#if (PAGE_SIZE < 8192)
1959
1960 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1961 return false;
1962#else
1963
1964
1965
1966
1967
1968#define IXGBE_LAST_OFFSET \
1969 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1970 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1971 return false;
1972#endif
1973
1974
1975
1976
1977
1978 if (unlikely(pagecnt_bias == 1)) {
1979 page_ref_add(page, USHRT_MAX - 1);
1980 rx_buffer->pagecnt_bias = USHRT_MAX;
1981 }
1982
1983 return true;
1984}
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2002 struct ixgbe_rx_buffer *rx_buffer,
2003 struct sk_buff *skb,
2004 unsigned int size)
2005{
2006#if (PAGE_SIZE < 8192)
2007 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2008#else
2009 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2010 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2011 SKB_DATA_ALIGN(size);
2012#endif
2013 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2014 rx_buffer->page_offset, size, truesize);
2015#if (PAGE_SIZE < 8192)
2016 rx_buffer->page_offset ^= truesize;
2017#else
2018 rx_buffer->page_offset += truesize;
2019#endif
2020}
2021
2022static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2023 union ixgbe_adv_rx_desc *rx_desc,
2024 struct sk_buff **skb,
2025 const unsigned int size,
2026 int *rx_buffer_pgcnt)
2027{
2028 struct ixgbe_rx_buffer *rx_buffer;
2029
2030 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2031 *rx_buffer_pgcnt =
2032#if (PAGE_SIZE < 8192)
2033 page_count(rx_buffer->page);
2034#else
2035 0;
2036#endif
2037 prefetchw(rx_buffer->page);
2038 *skb = rx_buffer->skb;
2039
2040
2041
2042
2043
2044 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2045 if (!*skb)
2046 goto skip_sync;
2047 } else {
2048 if (*skb)
2049 ixgbe_dma_sync_frag(rx_ring, *skb);
2050 }
2051
2052
2053 dma_sync_single_range_for_cpu(rx_ring->dev,
2054 rx_buffer->dma,
2055 rx_buffer->page_offset,
2056 size,
2057 DMA_FROM_DEVICE);
2058skip_sync:
2059 rx_buffer->pagecnt_bias--;
2060
2061 return rx_buffer;
2062}
2063
2064static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2065 struct ixgbe_rx_buffer *rx_buffer,
2066 struct sk_buff *skb,
2067 int rx_buffer_pgcnt)
2068{
2069 if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2070
2071 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2072 } else {
2073 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2074
2075 IXGBE_CB(skb)->page_released = true;
2076 } else {
2077
2078 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2079 ixgbe_rx_pg_size(rx_ring),
2080 DMA_FROM_DEVICE,
2081 IXGBE_RX_DMA_ATTR);
2082 }
2083 __page_frag_cache_drain(rx_buffer->page,
2084 rx_buffer->pagecnt_bias);
2085 }
2086
2087
2088 rx_buffer->page = NULL;
2089 rx_buffer->skb = NULL;
2090}
2091
2092static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2093 struct ixgbe_rx_buffer *rx_buffer,
2094 struct xdp_buff *xdp,
2095 union ixgbe_adv_rx_desc *rx_desc)
2096{
2097 unsigned int size = xdp->data_end - xdp->data;
2098#if (PAGE_SIZE < 8192)
2099 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2100#else
2101 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2102 xdp->data_hard_start);
2103#endif
2104 struct sk_buff *skb;
2105
2106
2107 net_prefetch(xdp->data);
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2127 if (unlikely(!skb))
2128 return NULL;
2129
2130 if (size > IXGBE_RX_HDR_SIZE) {
2131 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2132 IXGBE_CB(skb)->dma = rx_buffer->dma;
2133
2134 skb_add_rx_frag(skb, 0, rx_buffer->page,
2135 xdp->data - page_address(rx_buffer->page),
2136 size, truesize);
2137#if (PAGE_SIZE < 8192)
2138 rx_buffer->page_offset ^= truesize;
2139#else
2140 rx_buffer->page_offset += truesize;
2141#endif
2142 } else {
2143 memcpy(__skb_put(skb, size),
2144 xdp->data, ALIGN(size, sizeof(long)));
2145 rx_buffer->pagecnt_bias++;
2146 }
2147
2148 return skb;
2149}
2150
2151static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2152 struct ixgbe_rx_buffer *rx_buffer,
2153 struct xdp_buff *xdp,
2154 union ixgbe_adv_rx_desc *rx_desc)
2155{
2156 unsigned int metasize = xdp->data - xdp->data_meta;
2157#if (PAGE_SIZE < 8192)
2158 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2159#else
2160 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2161 SKB_DATA_ALIGN(xdp->data_end -
2162 xdp->data_hard_start);
2163#endif
2164 struct sk_buff *skb;
2165
2166
2167
2168
2169
2170
2171 net_prefetch(xdp->data_meta);
2172
2173
2174 skb = build_skb(xdp->data_hard_start, truesize);
2175 if (unlikely(!skb))
2176 return NULL;
2177
2178
2179 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2180 __skb_put(skb, xdp->data_end - xdp->data);
2181 if (metasize)
2182 skb_metadata_set(skb, metasize);
2183
2184
2185 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2186 IXGBE_CB(skb)->dma = rx_buffer->dma;
2187
2188
2189#if (PAGE_SIZE < 8192)
2190 rx_buffer->page_offset ^= truesize;
2191#else
2192 rx_buffer->page_offset += truesize;
2193#endif
2194
2195 return skb;
2196}
2197
2198static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2199 struct ixgbe_ring *rx_ring,
2200 struct xdp_buff *xdp)
2201{
2202 int err, result = IXGBE_XDP_PASS;
2203 struct bpf_prog *xdp_prog;
2204 struct xdp_frame *xdpf;
2205 u32 act;
2206
2207 rcu_read_lock();
2208 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2209
2210 if (!xdp_prog)
2211 goto xdp_out;
2212
2213 prefetchw(xdp->data_hard_start);
2214
2215 act = bpf_prog_run_xdp(xdp_prog, xdp);
2216 switch (act) {
2217 case XDP_PASS:
2218 break;
2219 case XDP_TX:
2220 xdpf = xdp_convert_buff_to_frame(xdp);
2221 if (unlikely(!xdpf)) {
2222 result = IXGBE_XDP_CONSUMED;
2223 break;
2224 }
2225 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2226 break;
2227 case XDP_REDIRECT:
2228 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2229 if (!err)
2230 result = IXGBE_XDP_REDIR;
2231 else
2232 result = IXGBE_XDP_CONSUMED;
2233 break;
2234 default:
2235 bpf_warn_invalid_xdp_action(act);
2236 fallthrough;
2237 case XDP_ABORTED:
2238 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2239 fallthrough;
2240 case XDP_DROP:
2241 result = IXGBE_XDP_CONSUMED;
2242 break;
2243 }
2244xdp_out:
2245 rcu_read_unlock();
2246 return ERR_PTR(-result);
2247}
2248
2249static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
2250 unsigned int size)
2251{
2252 unsigned int truesize;
2253
2254#if (PAGE_SIZE < 8192)
2255 truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2256#else
2257 truesize = ring_uses_build_skb(rx_ring) ?
2258 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
2259 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2260 SKB_DATA_ALIGN(size);
2261#endif
2262 return truesize;
2263}
2264
2265static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2266 struct ixgbe_rx_buffer *rx_buffer,
2267 unsigned int size)
2268{
2269 unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
2270#if (PAGE_SIZE < 8192)
2271 rx_buffer->page_offset ^= truesize;
2272#else
2273 rx_buffer->page_offset += truesize;
2274#endif
2275}
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2291 struct ixgbe_ring *rx_ring,
2292 const int budget)
2293{
2294 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2295 struct ixgbe_adapter *adapter = q_vector->adapter;
2296#ifdef IXGBE_FCOE
2297 int ddp_bytes;
2298 unsigned int mss = 0;
2299#endif
2300 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2301 unsigned int xdp_xmit = 0;
2302 struct xdp_buff xdp;
2303
2304 xdp.rxq = &rx_ring->xdp_rxq;
2305
2306
2307#if (PAGE_SIZE < 8192)
2308 xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
2309#endif
2310
2311 while (likely(total_rx_packets < budget)) {
2312 union ixgbe_adv_rx_desc *rx_desc;
2313 struct ixgbe_rx_buffer *rx_buffer;
2314 struct sk_buff *skb;
2315 int rx_buffer_pgcnt;
2316 unsigned int size;
2317
2318
2319 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2320 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2321 cleaned_count = 0;
2322 }
2323
2324 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2325 size = le16_to_cpu(rx_desc->wb.upper.length);
2326 if (!size)
2327 break;
2328
2329
2330
2331
2332
2333 dma_rmb();
2334
2335 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
2336
2337
2338 if (!skb) {
2339 xdp.data = page_address(rx_buffer->page) +
2340 rx_buffer->page_offset;
2341 xdp.data_meta = xdp.data;
2342 xdp.data_hard_start = xdp.data -
2343 ixgbe_rx_offset(rx_ring);
2344 xdp.data_end = xdp.data + size;
2345#if (PAGE_SIZE > 4096)
2346
2347 xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
2348#endif
2349 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2350 }
2351
2352 if (IS_ERR(skb)) {
2353 unsigned int xdp_res = -PTR_ERR(skb);
2354
2355 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2356 xdp_xmit |= xdp_res;
2357 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2358 } else {
2359 rx_buffer->pagecnt_bias++;
2360 }
2361 total_rx_packets++;
2362 total_rx_bytes += size;
2363 } else if (skb) {
2364 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2365 } else if (ring_uses_build_skb(rx_ring)) {
2366 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2367 &xdp, rx_desc);
2368 } else {
2369 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2370 &xdp, rx_desc);
2371 }
2372
2373
2374 if (!skb) {
2375 rx_ring->rx_stats.alloc_rx_buff_failed++;
2376 rx_buffer->pagecnt_bias++;
2377 break;
2378 }
2379
2380 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
2381 cleaned_count++;
2382
2383
2384 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2385 continue;
2386
2387
2388 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2389 continue;
2390
2391
2392 total_rx_bytes += skb->len;
2393
2394
2395 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2396
2397#ifdef IXGBE_FCOE
2398
2399 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2400 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2401
2402 if (ddp_bytes > 0) {
2403 if (!mss) {
2404 mss = rx_ring->netdev->mtu -
2405 sizeof(struct fcoe_hdr) -
2406 sizeof(struct fc_frame_header) -
2407 sizeof(struct fcoe_crc_eof);
2408 if (mss > 512)
2409 mss &= ~511;
2410 }
2411 total_rx_bytes += ddp_bytes;
2412 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2413 mss);
2414 }
2415 if (!ddp_bytes) {
2416 dev_kfree_skb_any(skb);
2417 continue;
2418 }
2419 }
2420
2421#endif
2422 ixgbe_rx_skb(q_vector, skb);
2423
2424
2425 total_rx_packets++;
2426 }
2427
2428 if (xdp_xmit & IXGBE_XDP_REDIR)
2429 xdp_do_flush_map();
2430
2431 if (xdp_xmit & IXGBE_XDP_TX) {
2432 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2433
2434
2435
2436
2437 wmb();
2438 writel(ring->next_to_use, ring->tail);
2439 }
2440
2441 u64_stats_update_begin(&rx_ring->syncp);
2442 rx_ring->stats.packets += total_rx_packets;
2443 rx_ring->stats.bytes += total_rx_bytes;
2444 u64_stats_update_end(&rx_ring->syncp);
2445 q_vector->rx.total_packets += total_rx_packets;
2446 q_vector->rx.total_bytes += total_rx_bytes;
2447
2448 return total_rx_packets;
2449}
2450
2451
2452
2453
2454
2455
2456
2457
2458static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2459{
2460 struct ixgbe_q_vector *q_vector;
2461 int v_idx;
2462 u32 mask;
2463
2464
2465 if (adapter->num_vfs > 32) {
2466 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2467 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2468 }
2469
2470
2471
2472
2473
2474 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2475 struct ixgbe_ring *ring;
2476 q_vector = adapter->q_vector[v_idx];
2477
2478 ixgbe_for_each_ring(ring, q_vector->rx)
2479 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2480
2481 ixgbe_for_each_ring(ring, q_vector->tx)
2482 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2483
2484 ixgbe_write_eitr(q_vector);
2485 }
2486
2487 switch (adapter->hw.mac.type) {
2488 case ixgbe_mac_82598EB:
2489 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2490 v_idx);
2491 break;
2492 case ixgbe_mac_82599EB:
2493 case ixgbe_mac_X540:
2494 case ixgbe_mac_X550:
2495 case ixgbe_mac_X550EM_x:
2496 case ixgbe_mac_x550em_a:
2497 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2498 break;
2499 default:
2500 break;
2501 }
2502 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2503
2504
2505 mask = IXGBE_EIMS_ENABLE_MASK;
2506 mask &= ~(IXGBE_EIMS_OTHER |
2507 IXGBE_EIMS_MAILBOX |
2508 IXGBE_EIMS_LSC);
2509
2510 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2511}
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2527 struct ixgbe_ring_container *ring_container)
2528{
2529 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2530 IXGBE_ITR_ADAPTIVE_LATENCY;
2531 unsigned int avg_wire_size, packets, bytes;
2532 unsigned long next_update = jiffies;
2533
2534
2535
2536
2537 if (!ring_container->ring)
2538 return;
2539
2540
2541
2542
2543
2544
2545 if (time_after(next_update, ring_container->next_update))
2546 goto clear_counts;
2547
2548 packets = ring_container->total_packets;
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558 if (!packets) {
2559 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2560 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2561 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2562 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2563 goto clear_counts;
2564 }
2565
2566 bytes = ring_container->total_bytes;
2567
2568
2569
2570
2571
2572 if (packets < 4 && bytes < 9000) {
2573 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2574 goto adjust_by_size;
2575 }
2576
2577
2578
2579
2580
2581 if (packets < 48) {
2582 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2583 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2584 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2585 goto clear_counts;
2586 }
2587
2588
2589
2590
2591 if (packets < 96) {
2592 itr = q_vector->itr >> 2;
2593 goto clear_counts;
2594 }
2595
2596
2597
2598
2599
2600 if (packets < 256) {
2601 itr = q_vector->itr >> 3;
2602 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2603 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2604 goto clear_counts;
2605 }
2606
2607
2608
2609
2610
2611
2612
2613 itr = IXGBE_ITR_ADAPTIVE_BULK;
2614
2615adjust_by_size:
2616
2617
2618
2619
2620
2621 avg_wire_size = bytes / packets;
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638 if (avg_wire_size <= 60) {
2639
2640 avg_wire_size = 5120;
2641 } else if (avg_wire_size <= 316) {
2642
2643 avg_wire_size *= 40;
2644 avg_wire_size += 2720;
2645 } else if (avg_wire_size <= 1084) {
2646
2647 avg_wire_size *= 15;
2648 avg_wire_size += 11452;
2649 } else if (avg_wire_size < 1968) {
2650
2651 avg_wire_size *= 5;
2652 avg_wire_size += 22420;
2653 } else {
2654
2655 avg_wire_size = 32256;
2656 }
2657
2658
2659
2660
2661 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2662 avg_wire_size >>= 1;
2663
2664
2665
2666
2667
2668
2669
2670
2671 switch (q_vector->adapter->link_speed) {
2672 case IXGBE_LINK_SPEED_10GB_FULL:
2673 case IXGBE_LINK_SPEED_100_FULL:
2674 default:
2675 itr += DIV_ROUND_UP(avg_wire_size,
2676 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2677 IXGBE_ITR_ADAPTIVE_MIN_INC;
2678 break;
2679 case IXGBE_LINK_SPEED_2_5GB_FULL:
2680 case IXGBE_LINK_SPEED_1GB_FULL:
2681 case IXGBE_LINK_SPEED_10_FULL:
2682 if (avg_wire_size > 8064)
2683 avg_wire_size = 8064;
2684 itr += DIV_ROUND_UP(avg_wire_size,
2685 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2686 IXGBE_ITR_ADAPTIVE_MIN_INC;
2687 break;
2688 }
2689
2690clear_counts:
2691
2692 ring_container->itr = itr;
2693
2694
2695 ring_container->next_update = next_update + 1;
2696
2697 ring_container->total_bytes = 0;
2698 ring_container->total_packets = 0;
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2710{
2711 struct ixgbe_adapter *adapter = q_vector->adapter;
2712 struct ixgbe_hw *hw = &adapter->hw;
2713 int v_idx = q_vector->v_idx;
2714 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2715
2716 switch (adapter->hw.mac.type) {
2717 case ixgbe_mac_82598EB:
2718
2719 itr_reg |= (itr_reg << 16);
2720 break;
2721 case ixgbe_mac_82599EB:
2722 case ixgbe_mac_X540:
2723 case ixgbe_mac_X550:
2724 case ixgbe_mac_X550EM_x:
2725 case ixgbe_mac_x550em_a:
2726
2727
2728
2729
2730 itr_reg |= IXGBE_EITR_CNT_WDIS;
2731 break;
2732 default:
2733 break;
2734 }
2735 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2736}
2737
2738static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2739{
2740 u32 new_itr;
2741
2742 ixgbe_update_itr(q_vector, &q_vector->tx);
2743 ixgbe_update_itr(q_vector, &q_vector->rx);
2744
2745
2746 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2747
2748
2749 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2750 new_itr <<= 2;
2751
2752 if (new_itr != q_vector->itr) {
2753
2754 q_vector->itr = new_itr;
2755
2756 ixgbe_write_eitr(q_vector);
2757 }
2758}
2759
2760
2761
2762
2763
2764static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2765{
2766 struct ixgbe_hw *hw = &adapter->hw;
2767 u32 eicr = adapter->interrupt_event;
2768 s32 rc;
2769
2770 if (test_bit(__IXGBE_DOWN, &adapter->state))
2771 return;
2772
2773 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2774 return;
2775
2776 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2777
2778 switch (hw->device_id) {
2779 case IXGBE_DEV_ID_82599_T3_LOM:
2780
2781
2782
2783
2784
2785
2786
2787 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2788 !(eicr & IXGBE_EICR_LSC))
2789 return;
2790
2791 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2792 u32 speed;
2793 bool link_up = false;
2794
2795 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2796
2797 if (link_up)
2798 return;
2799 }
2800
2801
2802 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2803 return;
2804
2805 break;
2806 case IXGBE_DEV_ID_X550EM_A_1G_T:
2807 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2808 rc = hw->phy.ops.check_overtemp(hw);
2809 if (rc != IXGBE_ERR_OVERTEMP)
2810 return;
2811 break;
2812 default:
2813 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2814 return;
2815 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2816 return;
2817 break;
2818 }
2819 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2820
2821 adapter->interrupt_event = 0;
2822}
2823
2824static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2825{
2826 struct ixgbe_hw *hw = &adapter->hw;
2827
2828 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2829 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2830 e_crit(probe, "Fan has stopped, replace the adapter\n");
2831
2832 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2833 }
2834}
2835
2836static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2837{
2838 struct ixgbe_hw *hw = &adapter->hw;
2839
2840 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2841 return;
2842
2843 switch (adapter->hw.mac.type) {
2844 case ixgbe_mac_82599EB:
2845
2846
2847
2848
2849 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2850 (eicr & IXGBE_EICR_LSC)) &&
2851 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2852 adapter->interrupt_event = eicr;
2853 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2854 ixgbe_service_event_schedule(adapter);
2855 return;
2856 }
2857 return;
2858 case ixgbe_mac_x550em_a:
2859 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2860 adapter->interrupt_event = eicr;
2861 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2862 ixgbe_service_event_schedule(adapter);
2863 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2864 IXGBE_EICR_GPI_SDP0_X550EM_a);
2865 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2866 IXGBE_EICR_GPI_SDP0_X550EM_a);
2867 }
2868 return;
2869 case ixgbe_mac_X550:
2870 case ixgbe_mac_X540:
2871 if (!(eicr & IXGBE_EICR_TS))
2872 return;
2873 break;
2874 default:
2875 return;
2876 }
2877
2878 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2879}
2880
2881static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2882{
2883 switch (hw->mac.type) {
2884 case ixgbe_mac_82598EB:
2885 if (hw->phy.type == ixgbe_phy_nl)
2886 return true;
2887 return false;
2888 case ixgbe_mac_82599EB:
2889 case ixgbe_mac_X550EM_x:
2890 case ixgbe_mac_x550em_a:
2891 switch (hw->mac.ops.get_media_type(hw)) {
2892 case ixgbe_media_type_fiber:
2893 case ixgbe_media_type_fiber_qsfp:
2894 return true;
2895 default:
2896 return false;
2897 }
2898 default:
2899 return false;
2900 }
2901}
2902
2903static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2904{
2905 struct ixgbe_hw *hw = &adapter->hw;
2906 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2907
2908 if (!ixgbe_is_sfp(hw))
2909 return;
2910
2911
2912 if (hw->mac.type >= ixgbe_mac_X540)
2913 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2914
2915 if (eicr & eicr_mask) {
2916
2917 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2918 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2919 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2920 adapter->sfp_poll_time = 0;
2921 ixgbe_service_event_schedule(adapter);
2922 }
2923 }
2924
2925 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2926 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2927
2928 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2929 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2930 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2931 ixgbe_service_event_schedule(adapter);
2932 }
2933 }
2934}
2935
2936static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2937{
2938 struct ixgbe_hw *hw = &adapter->hw;
2939
2940 adapter->lsc_int++;
2941 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2942 adapter->link_check_timeout = jiffies;
2943 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2944 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2945 IXGBE_WRITE_FLUSH(hw);
2946 ixgbe_service_event_schedule(adapter);
2947 }
2948}
2949
2950static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2951 u64 qmask)
2952{
2953 u32 mask;
2954 struct ixgbe_hw *hw = &adapter->hw;
2955
2956 switch (hw->mac.type) {
2957 case ixgbe_mac_82598EB:
2958 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2959 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2960 break;
2961 case ixgbe_mac_82599EB:
2962 case ixgbe_mac_X540:
2963 case ixgbe_mac_X550:
2964 case ixgbe_mac_X550EM_x:
2965 case ixgbe_mac_x550em_a:
2966 mask = (qmask & 0xFFFFFFFF);
2967 if (mask)
2968 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2969 mask = (qmask >> 32);
2970 if (mask)
2971 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2972 break;
2973 default:
2974 break;
2975 }
2976
2977}
2978
2979
2980
2981
2982
2983
2984
2985static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2986 bool flush)
2987{
2988 struct ixgbe_hw *hw = &adapter->hw;
2989 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2990
2991
2992 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2993 mask &= ~IXGBE_EIMS_LSC;
2994
2995 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2996 switch (adapter->hw.mac.type) {
2997 case ixgbe_mac_82599EB:
2998 mask |= IXGBE_EIMS_GPI_SDP0(hw);
2999 break;
3000 case ixgbe_mac_X540:
3001 case ixgbe_mac_X550:
3002 case ixgbe_mac_X550EM_x:
3003 case ixgbe_mac_x550em_a:
3004 mask |= IXGBE_EIMS_TS;
3005 break;
3006 default:
3007 break;
3008 }
3009 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3010 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3011 switch (adapter->hw.mac.type) {
3012 case ixgbe_mac_82599EB:
3013 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3014 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3015 fallthrough;
3016 case ixgbe_mac_X540:
3017 case ixgbe_mac_X550:
3018 case ixgbe_mac_X550EM_x:
3019 case ixgbe_mac_x550em_a:
3020 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3021 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3022 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3023 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3024 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3025 mask |= IXGBE_EICR_GPI_SDP0_X540;
3026 mask |= IXGBE_EIMS_ECC;
3027 mask |= IXGBE_EIMS_MAILBOX;
3028 break;
3029 default:
3030 break;
3031 }
3032
3033 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3034 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3035 mask |= IXGBE_EIMS_FLOW_DIR;
3036
3037 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3038 if (queues)
3039 ixgbe_irq_enable_queues(adapter, ~0);
3040 if (flush)
3041 IXGBE_WRITE_FLUSH(&adapter->hw);
3042}
3043
3044static irqreturn_t ixgbe_msix_other(int irq, void *data)
3045{
3046 struct ixgbe_adapter *adapter = data;
3047 struct ixgbe_hw *hw = &adapter->hw;
3048 u32 eicr;
3049
3050
3051
3052
3053
3054
3055
3056 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3057
3058
3059
3060
3061
3062
3063
3064
3065 eicr &= 0xFFFF0000;
3066
3067 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3068
3069 if (eicr & IXGBE_EICR_LSC)
3070 ixgbe_check_lsc(adapter);
3071
3072 if (eicr & IXGBE_EICR_MAILBOX)
3073 ixgbe_msg_task(adapter);
3074
3075 switch (hw->mac.type) {
3076 case ixgbe_mac_82599EB:
3077 case ixgbe_mac_X540:
3078 case ixgbe_mac_X550:
3079 case ixgbe_mac_X550EM_x:
3080 case ixgbe_mac_x550em_a:
3081 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3082 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3083 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3084 ixgbe_service_event_schedule(adapter);
3085 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3086 IXGBE_EICR_GPI_SDP0_X540);
3087 }
3088 if (eicr & IXGBE_EICR_ECC) {
3089 e_info(link, "Received ECC Err, initiating reset\n");
3090 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3091 ixgbe_service_event_schedule(adapter);
3092 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3093 }
3094
3095 if (eicr & IXGBE_EICR_FLOW_DIR) {
3096 int reinit_count = 0;
3097 int i;
3098 for (i = 0; i < adapter->num_tx_queues; i++) {
3099 struct ixgbe_ring *ring = adapter->tx_ring[i];
3100 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3101 &ring->state))
3102 reinit_count++;
3103 }
3104 if (reinit_count) {
3105
3106 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3107 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3108 ixgbe_service_event_schedule(adapter);
3109 }
3110 }
3111 ixgbe_check_sfp_event(adapter, eicr);
3112 ixgbe_check_overtemp_event(adapter, eicr);
3113 break;
3114 default:
3115 break;
3116 }
3117
3118 ixgbe_check_fan_failure(adapter, eicr);
3119
3120 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3121 ixgbe_ptp_check_pps_event(adapter);
3122
3123
3124 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3125 ixgbe_irq_enable(adapter, false, false);
3126
3127 return IRQ_HANDLED;
3128}
3129
3130static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3131{
3132 struct ixgbe_q_vector *q_vector = data;
3133
3134
3135
3136 if (q_vector->rx.ring || q_vector->tx.ring)
3137 napi_schedule_irqoff(&q_vector->napi);
3138
3139 return IRQ_HANDLED;
3140}
3141
3142
3143
3144
3145
3146
3147
3148
3149int ixgbe_poll(struct napi_struct *napi, int budget)
3150{
3151 struct ixgbe_q_vector *q_vector =
3152 container_of(napi, struct ixgbe_q_vector, napi);
3153 struct ixgbe_adapter *adapter = q_vector->adapter;
3154 struct ixgbe_ring *ring;
3155 int per_ring_budget, work_done = 0;
3156 bool clean_complete = true;
3157
3158#ifdef CONFIG_IXGBE_DCA
3159 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3160 ixgbe_update_dca(q_vector);
3161#endif
3162
3163 ixgbe_for_each_ring(ring, q_vector->tx) {
3164 bool wd = ring->xsk_pool ?
3165 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3166 ixgbe_clean_tx_irq(q_vector, ring, budget);
3167
3168 if (!wd)
3169 clean_complete = false;
3170 }
3171
3172
3173 if (budget <= 0)
3174 return budget;
3175
3176
3177
3178 if (q_vector->rx.count > 1)
3179 per_ring_budget = max(budget/q_vector->rx.count, 1);
3180 else
3181 per_ring_budget = budget;
3182
3183 ixgbe_for_each_ring(ring, q_vector->rx) {
3184 int cleaned = ring->xsk_pool ?
3185 ixgbe_clean_rx_irq_zc(q_vector, ring,
3186 per_ring_budget) :
3187 ixgbe_clean_rx_irq(q_vector, ring,
3188 per_ring_budget);
3189
3190 work_done += cleaned;
3191 if (cleaned >= per_ring_budget)
3192 clean_complete = false;
3193 }
3194
3195
3196 if (!clean_complete)
3197 return budget;
3198
3199
3200 if (likely(napi_complete_done(napi, work_done))) {
3201 if (adapter->rx_itr_setting & 1)
3202 ixgbe_set_itr(q_vector);
3203 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3204 ixgbe_irq_enable_queues(adapter,
3205 BIT_ULL(q_vector->v_idx));
3206 }
3207
3208 return min(work_done, budget - 1);
3209}
3210
3211
3212
3213
3214
3215
3216
3217
3218static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3219{
3220 struct net_device *netdev = adapter->netdev;
3221 unsigned int ri = 0, ti = 0;
3222 int vector, err;
3223
3224 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3225 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3226 struct msix_entry *entry = &adapter->msix_entries[vector];
3227
3228 if (q_vector->tx.ring && q_vector->rx.ring) {
3229 snprintf(q_vector->name, sizeof(q_vector->name),
3230 "%s-TxRx-%u", netdev->name, ri++);
3231 ti++;
3232 } else if (q_vector->rx.ring) {
3233 snprintf(q_vector->name, sizeof(q_vector->name),
3234 "%s-rx-%u", netdev->name, ri++);
3235 } else if (q_vector->tx.ring) {
3236 snprintf(q_vector->name, sizeof(q_vector->name),
3237 "%s-tx-%u", netdev->name, ti++);
3238 } else {
3239
3240 continue;
3241 }
3242 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3243 q_vector->name, q_vector);
3244 if (err) {
3245 e_err(probe, "request_irq failed for MSIX interrupt "
3246 "Error: %d\n", err);
3247 goto free_queue_irqs;
3248 }
3249
3250 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3251
3252 irq_set_affinity_hint(entry->vector,
3253 &q_vector->affinity_mask);
3254 }
3255 }
3256
3257 err = request_irq(adapter->msix_entries[vector].vector,
3258 ixgbe_msix_other, 0, netdev->name, adapter);
3259 if (err) {
3260 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3261 goto free_queue_irqs;
3262 }
3263
3264 return 0;
3265
3266free_queue_irqs:
3267 while (vector) {
3268 vector--;
3269 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3270 NULL);
3271 free_irq(adapter->msix_entries[vector].vector,
3272 adapter->q_vector[vector]);
3273 }
3274 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3275 pci_disable_msix(adapter->pdev);
3276 kfree(adapter->msix_entries);
3277 adapter->msix_entries = NULL;
3278 return err;
3279}
3280
3281
3282
3283
3284
3285
3286static irqreturn_t ixgbe_intr(int irq, void *data)
3287{
3288 struct ixgbe_adapter *adapter = data;
3289 struct ixgbe_hw *hw = &adapter->hw;
3290 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3291 u32 eicr;
3292
3293
3294
3295
3296
3297 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3298
3299
3300
3301 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3302 if (!eicr) {
3303
3304
3305
3306
3307
3308
3309
3310 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3311 ixgbe_irq_enable(adapter, true, true);
3312 return IRQ_NONE;
3313 }
3314
3315 if (eicr & IXGBE_EICR_LSC)
3316 ixgbe_check_lsc(adapter);
3317
3318 switch (hw->mac.type) {
3319 case ixgbe_mac_82599EB:
3320 ixgbe_check_sfp_event(adapter, eicr);
3321 fallthrough;
3322 case ixgbe_mac_X540:
3323 case ixgbe_mac_X550:
3324 case ixgbe_mac_X550EM_x:
3325 case ixgbe_mac_x550em_a:
3326 if (eicr & IXGBE_EICR_ECC) {
3327 e_info(link, "Received ECC Err, initiating reset\n");
3328 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3329 ixgbe_service_event_schedule(adapter);
3330 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3331 }
3332 ixgbe_check_overtemp_event(adapter, eicr);
3333 break;
3334 default:
3335 break;
3336 }
3337
3338 ixgbe_check_fan_failure(adapter, eicr);
3339 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3340 ixgbe_ptp_check_pps_event(adapter);
3341
3342
3343 napi_schedule_irqoff(&q_vector->napi);
3344
3345
3346
3347
3348
3349 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3350 ixgbe_irq_enable(adapter, false, false);
3351
3352 return IRQ_HANDLED;
3353}
3354
3355
3356
3357
3358
3359
3360
3361
3362static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3363{
3364 struct net_device *netdev = adapter->netdev;
3365 int err;
3366
3367 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3368 err = ixgbe_request_msix_irqs(adapter);
3369 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3370 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3371 netdev->name, adapter);
3372 else
3373 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3374 netdev->name, adapter);
3375
3376 if (err)
3377 e_err(probe, "request_irq failed, Error %d\n", err);
3378
3379 return err;
3380}
3381
3382static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3383{
3384 int vector;
3385
3386 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3387 free_irq(adapter->pdev->irq, adapter);
3388 return;
3389 }
3390
3391 if (!adapter->msix_entries)
3392 return;
3393
3394 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3395 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3396 struct msix_entry *entry = &adapter->msix_entries[vector];
3397
3398
3399 if (!q_vector->rx.ring && !q_vector->tx.ring)
3400 continue;
3401
3402
3403 irq_set_affinity_hint(entry->vector, NULL);
3404
3405 free_irq(entry->vector, q_vector);
3406 }
3407
3408 free_irq(adapter->msix_entries[vector].vector, adapter);
3409}
3410
3411
3412
3413
3414
3415static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3416{
3417 switch (adapter->hw.mac.type) {
3418 case ixgbe_mac_82598EB:
3419 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3420 break;
3421 case ixgbe_mac_82599EB:
3422 case ixgbe_mac_X540:
3423 case ixgbe_mac_X550:
3424 case ixgbe_mac_X550EM_x:
3425 case ixgbe_mac_x550em_a:
3426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3427 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3428 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3429 break;
3430 default:
3431 break;
3432 }
3433 IXGBE_WRITE_FLUSH(&adapter->hw);
3434 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3435 int vector;
3436
3437 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3438 synchronize_irq(adapter->msix_entries[vector].vector);
3439
3440 synchronize_irq(adapter->msix_entries[vector++].vector);
3441 } else {
3442 synchronize_irq(adapter->pdev->irq);
3443 }
3444}
3445
3446
3447
3448
3449
3450
3451static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3452{
3453 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3454
3455 ixgbe_write_eitr(q_vector);
3456
3457 ixgbe_set_ivar(adapter, 0, 0, 0);
3458 ixgbe_set_ivar(adapter, 1, 0, 0);
3459
3460 e_info(hw, "Legacy interrupt IVAR setup done\n");
3461}
3462
3463
3464
3465
3466
3467
3468
3469
3470void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3471 struct ixgbe_ring *ring)
3472{
3473 struct ixgbe_hw *hw = &adapter->hw;
3474 u64 tdba = ring->dma;
3475 int wait_loop = 10;
3476 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3477 u8 reg_idx = ring->reg_idx;
3478
3479 ring->xsk_pool = NULL;
3480 if (ring_is_xdp(ring))
3481 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
3482
3483
3484 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3485 IXGBE_WRITE_FLUSH(hw);
3486
3487 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3488 (tdba & DMA_BIT_MASK(32)));
3489 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3490 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3491 ring->count * sizeof(union ixgbe_adv_tx_desc));
3492 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3493 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3494 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3507 txdctl |= 1u << 16;
3508 else
3509 txdctl |= 8u << 16;
3510
3511
3512
3513
3514
3515 txdctl |= (1u << 8) |
3516 32;
3517
3518
3519 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3520 ring->atr_sample_rate = adapter->atr_sample_rate;
3521 ring->atr_count = 0;
3522 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3523 } else {
3524 ring->atr_sample_rate = 0;
3525 }
3526
3527
3528 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3529 struct ixgbe_q_vector *q_vector = ring->q_vector;
3530
3531 if (q_vector)
3532 netif_set_xps_queue(ring->netdev,
3533 &q_vector->affinity_mask,
3534 ring->queue_index);
3535 }
3536
3537 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3538
3539
3540 memset(ring->tx_buffer_info, 0,
3541 sizeof(struct ixgbe_tx_buffer) * ring->count);
3542
3543
3544 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3545
3546
3547 if (hw->mac.type == ixgbe_mac_82598EB &&
3548 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3549 return;
3550
3551
3552 do {
3553 usleep_range(1000, 2000);
3554 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3555 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3556 if (!wait_loop)
3557 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3558}
3559
3560static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3561{
3562 struct ixgbe_hw *hw = &adapter->hw;
3563 u32 rttdcs, mtqc;
3564 u8 tcs = adapter->hw_tcs;
3565
3566 if (hw->mac.type == ixgbe_mac_82598EB)
3567 return;
3568
3569
3570 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3571 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3572 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3573
3574
3575 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3576 mtqc = IXGBE_MTQC_VT_ENA;
3577 if (tcs > 4)
3578 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3579 else if (tcs > 1)
3580 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3581 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3582 IXGBE_82599_VMDQ_4Q_MASK)
3583 mtqc |= IXGBE_MTQC_32VF;
3584 else
3585 mtqc |= IXGBE_MTQC_64VF;
3586 } else {
3587 if (tcs > 4) {
3588 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3589 } else if (tcs > 1) {
3590 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3591 } else {
3592 u8 max_txq = adapter->num_tx_queues +
3593 adapter->num_xdp_queues;
3594 if (max_txq > 63)
3595 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3596 else
3597 mtqc = IXGBE_MTQC_64Q_1PB;
3598 }
3599 }
3600
3601 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3602
3603
3604 if (tcs) {
3605 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3606 sectx |= IXGBE_SECTX_DCB;
3607 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3608 }
3609
3610
3611 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3612 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3613}
3614
3615
3616
3617
3618
3619
3620
3621static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3622{
3623 struct ixgbe_hw *hw = &adapter->hw;
3624 u32 dmatxctl;
3625 u32 i;
3626
3627 ixgbe_setup_mtqc(adapter);
3628
3629 if (hw->mac.type != ixgbe_mac_82598EB) {
3630
3631 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3632 dmatxctl |= IXGBE_DMATXCTL_TE;
3633 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3634 }
3635
3636
3637 for (i = 0; i < adapter->num_tx_queues; i++)
3638 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3639 for (i = 0; i < adapter->num_xdp_queues; i++)
3640 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3641}
3642
3643static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3644 struct ixgbe_ring *ring)
3645{
3646 struct ixgbe_hw *hw = &adapter->hw;
3647 u8 reg_idx = ring->reg_idx;
3648 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3649
3650 srrctl |= IXGBE_SRRCTL_DROP_EN;
3651
3652 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3653}
3654
3655static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3656 struct ixgbe_ring *ring)
3657{
3658 struct ixgbe_hw *hw = &adapter->hw;
3659 u8 reg_idx = ring->reg_idx;
3660 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3661
3662 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3663
3664 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3665}
3666
3667#ifdef CONFIG_IXGBE_DCB
3668void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3669#else
3670static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3671#endif
3672{
3673 int i;
3674 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3675
3676 if (adapter->ixgbe_ieee_pfc)
3677 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3689 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3690 for (i = 0; i < adapter->num_rx_queues; i++)
3691 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3692 } else {
3693 for (i = 0; i < adapter->num_rx_queues; i++)
3694 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3695 }
3696}
3697
3698#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3699
3700static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3701 struct ixgbe_ring *rx_ring)
3702{
3703 struct ixgbe_hw *hw = &adapter->hw;
3704 u32 srrctl;
3705 u8 reg_idx = rx_ring->reg_idx;
3706
3707 if (hw->mac.type == ixgbe_mac_82598EB) {
3708 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3709
3710
3711
3712
3713
3714 reg_idx &= mask;
3715 }
3716
3717
3718 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3719
3720
3721 if (rx_ring->xsk_pool) {
3722 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732 if (hw->mac.type != ixgbe_mac_82599EB)
3733 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3734 else
3735 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3736 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3737 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3738 } else {
3739 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3740 }
3741
3742
3743 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3744
3745 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3746}
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3757{
3758 if (adapter->hw.mac.type < ixgbe_mac_X550)
3759 return 128;
3760 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3761 return 64;
3762 else
3763 return 512;
3764}
3765
3766
3767
3768
3769
3770
3771
3772void ixgbe_store_key(struct ixgbe_adapter *adapter)
3773{
3774 struct ixgbe_hw *hw = &adapter->hw;
3775 int i;
3776
3777 for (i = 0; i < 10; i++)
3778 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3779}
3780
3781
3782
3783
3784
3785
3786
3787static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3788{
3789 u32 *rss_key;
3790
3791 if (!adapter->rss_key) {
3792 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3793 if (unlikely(!rss_key))
3794 return -ENOMEM;
3795
3796 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3797 adapter->rss_key = rss_key;
3798 }
3799
3800 return 0;
3801}
3802
3803
3804
3805
3806
3807
3808
3809void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3810{
3811 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3812 struct ixgbe_hw *hw = &adapter->hw;
3813 u32 reta = 0;
3814 u32 indices_multi;
3815 u8 *indir_tbl = adapter->rss_indir_tbl;
3816
3817
3818
3819
3820
3821
3822
3823 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3824 indices_multi = 0x11;
3825 else
3826 indices_multi = 0x1;
3827
3828
3829 for (i = 0; i < reta_entries; i++) {
3830 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3831 if ((i & 3) == 3) {
3832 if (i < 128)
3833 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3834 else
3835 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3836 reta);
3837 reta = 0;
3838 }
3839 }
3840}
3841
3842
3843
3844
3845
3846
3847
3848static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3849{
3850 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3851 struct ixgbe_hw *hw = &adapter->hw;
3852 u32 vfreta = 0;
3853
3854
3855 for (i = 0; i < reta_entries; i++) {
3856 u16 pool = adapter->num_rx_pools;
3857
3858 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3859 if ((i & 3) != 3)
3860 continue;
3861
3862 while (pool--)
3863 IXGBE_WRITE_REG(hw,
3864 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3865 vfreta);
3866 vfreta = 0;
3867 }
3868}
3869
3870static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3871{
3872 u32 i, j;
3873 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3874 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3875
3876
3877
3878
3879
3880 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3881 rss_i = 4;
3882
3883
3884 ixgbe_store_key(adapter);
3885
3886
3887 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3888
3889 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3890 if (j == rss_i)
3891 j = 0;
3892
3893 adapter->rss_indir_tbl[i] = j;
3894 }
3895
3896 ixgbe_store_reta(adapter);
3897}
3898
3899static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3900{
3901 struct ixgbe_hw *hw = &adapter->hw;
3902 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3903 int i, j;
3904
3905
3906 for (i = 0; i < 10; i++) {
3907 u16 pool = adapter->num_rx_pools;
3908
3909 while (pool--)
3910 IXGBE_WRITE_REG(hw,
3911 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3912 *(adapter->rss_key + i));
3913 }
3914
3915
3916 for (i = 0, j = 0; i < 64; i++, j++) {
3917 if (j == rss_i)
3918 j = 0;
3919
3920 adapter->rss_indir_tbl[i] = j;
3921 }
3922
3923 ixgbe_store_vfreta(adapter);
3924}
3925
3926static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3927{
3928 struct ixgbe_hw *hw = &adapter->hw;
3929 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3930 u32 rxcsum;
3931
3932
3933 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3934 rxcsum |= IXGBE_RXCSUM_PCSD;
3935 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3936
3937 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3938 if (adapter->ring_feature[RING_F_RSS].mask)
3939 mrqc = IXGBE_MRQC_RSSEN;
3940 } else {
3941 u8 tcs = adapter->hw_tcs;
3942
3943 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3944 if (tcs > 4)
3945 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3946 else if (tcs > 1)
3947 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3948 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3949 IXGBE_82599_VMDQ_4Q_MASK)
3950 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3951 else
3952 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3953
3954
3955
3956
3957 if (hw->mac.type >= ixgbe_mac_X550)
3958 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3959 } else {
3960 if (tcs > 4)
3961 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3962 else if (tcs > 1)
3963 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3964 else
3965 mrqc = IXGBE_MRQC_RSSEN;
3966 }
3967 }
3968
3969
3970 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3971 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3972 IXGBE_MRQC_RSS_FIELD_IPV6 |
3973 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3974
3975 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3976 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3977 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3978 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3979
3980 if ((hw->mac.type >= ixgbe_mac_X550) &&
3981 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3982 u16 pool = adapter->num_rx_pools;
3983
3984
3985 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3986 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3987
3988
3989 ixgbe_setup_vfreta(adapter);
3990 vfmrqc = IXGBE_MRQC_RSSEN;
3991 vfmrqc |= rss_field;
3992
3993 while (pool--)
3994 IXGBE_WRITE_REG(hw,
3995 IXGBE_PFVFMRQC(VMDQ_P(pool)),
3996 vfmrqc);
3997 } else {
3998 ixgbe_setup_reta(adapter);
3999 mrqc |= rss_field;
4000 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4001 }
4002}
4003
4004
4005
4006
4007
4008
4009static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4010 struct ixgbe_ring *ring)
4011{
4012 struct ixgbe_hw *hw = &adapter->hw;
4013 u32 rscctrl;
4014 u8 reg_idx = ring->reg_idx;
4015
4016 if (!ring_is_rsc_enabled(ring))
4017 return;
4018
4019 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4020 rscctrl |= IXGBE_RSCCTL_RSCEN;
4021
4022
4023
4024
4025
4026 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4027 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4028}
4029
4030#define IXGBE_MAX_RX_DESC_POLL 10
4031static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4032 struct ixgbe_ring *ring)
4033{
4034 struct ixgbe_hw *hw = &adapter->hw;
4035 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4036 u32 rxdctl;
4037 u8 reg_idx = ring->reg_idx;
4038
4039 if (ixgbe_removed(hw->hw_addr))
4040 return;
4041
4042 if (hw->mac.type == ixgbe_mac_82598EB &&
4043 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4044 return;
4045
4046 do {
4047 usleep_range(1000, 2000);
4048 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4049 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4050
4051 if (!wait_loop) {
4052 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4053 "the polling period\n", reg_idx);
4054 }
4055}
4056
4057void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4058 struct ixgbe_ring *ring)
4059{
4060 struct ixgbe_hw *hw = &adapter->hw;
4061 union ixgbe_adv_rx_desc *rx_desc;
4062 u64 rdba = ring->dma;
4063 u32 rxdctl;
4064 u8 reg_idx = ring->reg_idx;
4065
4066 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4067 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
4068 if (ring->xsk_pool) {
4069 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4070 MEM_TYPE_XSK_BUFF_POOL,
4071 NULL));
4072 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
4073 } else {
4074 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4075 MEM_TYPE_PAGE_SHARED, NULL));
4076 }
4077
4078
4079 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4080 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4081
4082
4083 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4084 IXGBE_WRITE_FLUSH(hw);
4085
4086 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4087 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4088 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4089 ring->count * sizeof(union ixgbe_adv_rx_desc));
4090
4091 IXGBE_WRITE_FLUSH(hw);
4092
4093 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4094 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4095 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4096
4097 ixgbe_configure_srrctl(adapter, ring);
4098 ixgbe_configure_rscctl(adapter, ring);
4099
4100 if (hw->mac.type == ixgbe_mac_82598EB) {
4101
4102
4103
4104
4105
4106
4107
4108 rxdctl &= ~0x3FFFFF;
4109 rxdctl |= 0x080420;
4110#if (PAGE_SIZE < 8192)
4111
4112 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4113 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4114 IXGBE_RXDCTL_RLPML_EN);
4115
4116
4117
4118
4119
4120 if (ring_uses_build_skb(ring) &&
4121 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4122 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4123 IXGBE_RXDCTL_RLPML_EN;
4124#endif
4125 }
4126
4127 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
4128 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
4129
4130 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4131 IXGBE_RXDCTL_RLPML_EN);
4132 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4133
4134 ring->rx_buf_len = xsk_buf_len;
4135 }
4136
4137
4138 memset(ring->rx_buffer_info, 0,
4139 sizeof(struct ixgbe_rx_buffer) * ring->count);
4140
4141
4142 rx_desc = IXGBE_RX_DESC(ring, 0);
4143 rx_desc->wb.upper.length = 0;
4144
4145
4146 rxdctl |= IXGBE_RXDCTL_ENABLE;
4147 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4148
4149 ixgbe_rx_desc_queue_enable(adapter, ring);
4150 if (ring->xsk_pool)
4151 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4152 else
4153 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4154}
4155
4156static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4157{
4158 struct ixgbe_hw *hw = &adapter->hw;
4159 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4160 u16 pool = adapter->num_rx_pools;
4161
4162
4163 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4164 IXGBE_PSRTYPE_UDPHDR |
4165 IXGBE_PSRTYPE_IPV4HDR |
4166 IXGBE_PSRTYPE_L2HDR |
4167 IXGBE_PSRTYPE_IPV6HDR;
4168
4169 if (hw->mac.type == ixgbe_mac_82598EB)
4170 return;
4171
4172 if (rss_i > 3)
4173 psrtype |= 2u << 29;
4174 else if (rss_i > 1)
4175 psrtype |= 1u << 29;
4176
4177 while (pool--)
4178 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4179}
4180
4181static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4182{
4183 struct ixgbe_hw *hw = &adapter->hw;
4184 u16 pool = adapter->num_rx_pools;
4185 u32 reg_offset, vf_shift, vmolr;
4186 u32 gcr_ext, vmdctl;
4187 int i;
4188
4189 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4190 return;
4191
4192 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4193 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4194 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4195 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4196 vmdctl |= IXGBE_VT_CTL_REPLEN;
4197 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4198
4199
4200
4201
4202 vmolr = IXGBE_VMOLR_AUPE;
4203 while (pool--)
4204 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4205
4206 vf_shift = VMDQ_P(0) % 32;
4207 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4208
4209
4210 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4211 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4212 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4213 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4214 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4215 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4216
4217
4218 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4219
4220
4221 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4222
4223
4224
4225
4226
4227 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4228 case IXGBE_82599_VMDQ_8Q_MASK:
4229 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4230 break;
4231 case IXGBE_82599_VMDQ_4Q_MASK:
4232 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4233 break;
4234 default:
4235 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4236 break;
4237 }
4238
4239 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4240
4241 for (i = 0; i < adapter->num_vfs; i++) {
4242
4243 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4244 adapter->vfinfo[i].spoofchk_enabled);
4245
4246
4247 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4248 adapter->vfinfo[i].rss_query_enabled);
4249 }
4250}
4251
4252static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4253{
4254 struct ixgbe_hw *hw = &adapter->hw;
4255 struct net_device *netdev = adapter->netdev;
4256 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4257 struct ixgbe_ring *rx_ring;
4258 int i;
4259 u32 mhadd, hlreg0;
4260
4261#ifdef IXGBE_FCOE
4262
4263 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4264 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4265 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4266
4267#endif
4268
4269
4270 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4271 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4272
4273 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4274 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4275 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4276 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4277
4278 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4279 }
4280
4281 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4282
4283 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4284 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4285
4286
4287
4288
4289
4290 for (i = 0; i < adapter->num_rx_queues; i++) {
4291 rx_ring = adapter->rx_ring[i];
4292
4293 clear_ring_rsc_enabled(rx_ring);
4294 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4295 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4296
4297 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4298 set_ring_rsc_enabled(rx_ring);
4299
4300 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4301 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4302
4303 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4304 continue;
4305
4306 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4307
4308#if (PAGE_SIZE < 8192)
4309 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4310 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4311
4312 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4313 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4314 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4315#endif
4316 }
4317}
4318
4319static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4320{
4321 struct ixgbe_hw *hw = &adapter->hw;
4322 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4323
4324 switch (hw->mac.type) {
4325 case ixgbe_mac_82598EB:
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4337 break;
4338 case ixgbe_mac_X550:
4339 case ixgbe_mac_X550EM_x:
4340 case ixgbe_mac_x550em_a:
4341 if (adapter->num_vfs)
4342 rdrxctl |= IXGBE_RDRXCTL_PSP;
4343 fallthrough;
4344 case ixgbe_mac_82599EB:
4345 case ixgbe_mac_X540:
4346
4347 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4348 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4349 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4350
4351 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4352 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4353 break;
4354 default:
4355
4356 return;
4357 }
4358
4359 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4360}
4361
4362
4363
4364
4365
4366
4367
4368static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4369{
4370 struct ixgbe_hw *hw = &adapter->hw;
4371 int i;
4372 u32 rxctrl, rfctl;
4373
4374
4375 hw->mac.ops.disable_rx(hw);
4376
4377 ixgbe_setup_psrtype(adapter);
4378 ixgbe_setup_rdrxctl(adapter);
4379
4380
4381 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4382 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4383 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4384 rfctl |= IXGBE_RFCTL_RSC_DIS;
4385
4386
4387 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4388 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4389
4390
4391 ixgbe_setup_mrqc(adapter);
4392
4393
4394 ixgbe_set_rx_buffer_len(adapter);
4395
4396
4397
4398
4399
4400 for (i = 0; i < adapter->num_rx_queues; i++)
4401 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4402
4403 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4404
4405 if (hw->mac.type == ixgbe_mac_82598EB)
4406 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4407
4408
4409 rxctrl |= IXGBE_RXCTRL_RXEN;
4410 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4411}
4412
4413static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4414 __be16 proto, u16 vid)
4415{
4416 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4417 struct ixgbe_hw *hw = &adapter->hw;
4418
4419
4420 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4421 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4422
4423 set_bit(vid, adapter->active_vlans);
4424
4425 return 0;
4426}
4427
4428static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4429{
4430 u32 vlvf;
4431 int idx;
4432
4433
4434 if (vlan == 0)
4435 return 0;
4436
4437
4438 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4439 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4440 if ((vlvf & VLAN_VID_MASK) == vlan)
4441 break;
4442 }
4443
4444 return idx;
4445}
4446
4447void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4448{
4449 struct ixgbe_hw *hw = &adapter->hw;
4450 u32 bits, word;
4451 int idx;
4452
4453 idx = ixgbe_find_vlvf_entry(hw, vid);
4454 if (!idx)
4455 return;
4456
4457
4458
4459
4460 word = idx * 2 + (VMDQ_P(0) / 32);
4461 bits = ~BIT(VMDQ_P(0) % 32);
4462 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4463
4464
4465 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4466 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4467 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4468 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4469 }
4470}
4471
4472static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4473 __be16 proto, u16 vid)
4474{
4475 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4476 struct ixgbe_hw *hw = &adapter->hw;
4477
4478
4479 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4480 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4481
4482 clear_bit(vid, adapter->active_vlans);
4483
4484 return 0;
4485}
4486
4487
4488
4489
4490
4491static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4492{
4493 struct ixgbe_hw *hw = &adapter->hw;
4494 u32 vlnctrl;
4495 int i, j;
4496
4497 switch (hw->mac.type) {
4498 case ixgbe_mac_82598EB:
4499 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4500 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4501 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4502 break;
4503 case ixgbe_mac_82599EB:
4504 case ixgbe_mac_X540:
4505 case ixgbe_mac_X550:
4506 case ixgbe_mac_X550EM_x:
4507 case ixgbe_mac_x550em_a:
4508 for (i = 0; i < adapter->num_rx_queues; i++) {
4509 struct ixgbe_ring *ring = adapter->rx_ring[i];
4510
4511 if (!netif_is_ixgbe(ring->netdev))
4512 continue;
4513
4514 j = ring->reg_idx;
4515 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4516 vlnctrl &= ~IXGBE_RXDCTL_VME;
4517 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4518 }
4519 break;
4520 default:
4521 break;
4522 }
4523}
4524
4525
4526
4527
4528
4529static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4530{
4531 struct ixgbe_hw *hw = &adapter->hw;
4532 u32 vlnctrl;
4533 int i, j;
4534
4535 switch (hw->mac.type) {
4536 case ixgbe_mac_82598EB:
4537 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4538 vlnctrl |= IXGBE_VLNCTRL_VME;
4539 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4540 break;
4541 case ixgbe_mac_82599EB:
4542 case ixgbe_mac_X540:
4543 case ixgbe_mac_X550:
4544 case ixgbe_mac_X550EM_x:
4545 case ixgbe_mac_x550em_a:
4546 for (i = 0; i < adapter->num_rx_queues; i++) {
4547 struct ixgbe_ring *ring = adapter->rx_ring[i];
4548
4549 if (!netif_is_ixgbe(ring->netdev))
4550 continue;
4551
4552 j = ring->reg_idx;
4553 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4554 vlnctrl |= IXGBE_RXDCTL_VME;
4555 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4556 }
4557 break;
4558 default:
4559 break;
4560 }
4561}
4562
4563static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4564{
4565 struct ixgbe_hw *hw = &adapter->hw;
4566 u32 vlnctrl, i;
4567
4568 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4569
4570 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4571
4572 vlnctrl |= IXGBE_VLNCTRL_VFE;
4573 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4574 } else {
4575 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4576 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4577 return;
4578 }
4579
4580
4581 if (hw->mac.type == ixgbe_mac_82598EB)
4582 return;
4583
4584
4585 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4586 return;
4587
4588
4589 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4590
4591
4592 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4593 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4594 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4595
4596 vlvfb |= BIT(VMDQ_P(0) % 32);
4597 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4598 }
4599
4600
4601 for (i = hw->mac.vft_size; i--;)
4602 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4603}
4604
4605#define VFTA_BLOCK_SIZE 8
4606static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4607{
4608 struct ixgbe_hw *hw = &adapter->hw;
4609 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4610 u32 vid_start = vfta_offset * 32;
4611 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4612 u32 i, vid, word, bits;
4613
4614 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4615 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4616
4617
4618 vid = vlvf & VLAN_VID_MASK;
4619
4620
4621 if (vid < vid_start || vid >= vid_end)
4622 continue;
4623
4624 if (vlvf) {
4625
4626 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4627
4628
4629 if (test_bit(vid, adapter->active_vlans))
4630 continue;
4631 }
4632
4633
4634 word = i * 2 + VMDQ_P(0) / 32;
4635 bits = ~BIT(VMDQ_P(0) % 32);
4636 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4637 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4638 }
4639
4640
4641 for (i = VFTA_BLOCK_SIZE; i--;) {
4642 vid = (vfta_offset + i) * 32;
4643 word = vid / BITS_PER_LONG;
4644 bits = vid % BITS_PER_LONG;
4645
4646 vfta[i] |= adapter->active_vlans[word] >> bits;
4647
4648 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4649 }
4650}
4651
4652static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4653{
4654 struct ixgbe_hw *hw = &adapter->hw;
4655 u32 vlnctrl, i;
4656
4657
4658 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4659 vlnctrl |= IXGBE_VLNCTRL_VFE;
4660 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4661
4662 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4663 hw->mac.type == ixgbe_mac_82598EB)
4664 return;
4665
4666
4667 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4668 return;
4669
4670
4671 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4672
4673 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4674 ixgbe_scrub_vfta(adapter, i);
4675}
4676
4677static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4678{
4679 u16 vid = 1;
4680
4681 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4682
4683 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4684 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4685}
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4697{
4698 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4699 struct ixgbe_hw *hw = &adapter->hw;
4700
4701 if (!netif_running(netdev))
4702 return 0;
4703
4704 if (hw->mac.ops.update_mc_addr_list)
4705 hw->mac.ops.update_mc_addr_list(hw, netdev);
4706 else
4707 return -ENOMEM;
4708
4709#ifdef CONFIG_PCI_IOV
4710 ixgbe_restore_vf_multicasts(adapter);
4711#endif
4712
4713 return netdev_mc_count(netdev);
4714}
4715
4716#ifdef CONFIG_PCI_IOV
4717void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4718{
4719 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4720 struct ixgbe_hw *hw = &adapter->hw;
4721 int i;
4722
4723 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4724 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4725
4726 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4727 hw->mac.ops.set_rar(hw, i,
4728 mac_table->addr,
4729 mac_table->pool,
4730 IXGBE_RAH_AV);
4731 else
4732 hw->mac.ops.clear_rar(hw, i);
4733 }
4734}
4735
4736#endif
4737static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4738{
4739 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4740 struct ixgbe_hw *hw = &adapter->hw;
4741 int i;
4742
4743 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4744 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4745 continue;
4746
4747 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4748
4749 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4750 hw->mac.ops.set_rar(hw, i,
4751 mac_table->addr,
4752 mac_table->pool,
4753 IXGBE_RAH_AV);
4754 else
4755 hw->mac.ops.clear_rar(hw, i);
4756 }
4757}
4758
4759static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4760{
4761 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4762 struct ixgbe_hw *hw = &adapter->hw;
4763 int i;
4764
4765 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4766 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4767 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4768 }
4769
4770 ixgbe_sync_mac_table(adapter);
4771}
4772
4773static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4774{
4775 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4776 struct ixgbe_hw *hw = &adapter->hw;
4777 int i, count = 0;
4778
4779 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4780
4781 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4782 continue;
4783
4784
4785 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4786 if (mac_table->pool != pool)
4787 continue;
4788 }
4789
4790 count++;
4791 }
4792
4793 return count;
4794}
4795
4796
4797static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4798{
4799 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4800 struct ixgbe_hw *hw = &adapter->hw;
4801
4802 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4803 mac_table->pool = VMDQ_P(0);
4804
4805 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4806
4807 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4808 IXGBE_RAH_AV);
4809}
4810
4811int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4812 const u8 *addr, u16 pool)
4813{
4814 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4815 struct ixgbe_hw *hw = &adapter->hw;
4816 int i;
4817
4818 if (is_zero_ether_addr(addr))
4819 return -EINVAL;
4820
4821 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4822 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4823 continue;
4824
4825 ether_addr_copy(mac_table->addr, addr);
4826 mac_table->pool = pool;
4827
4828 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4829 IXGBE_MAC_STATE_IN_USE;
4830
4831 ixgbe_sync_mac_table(adapter);
4832
4833 return i;
4834 }
4835
4836 return -ENOMEM;
4837}
4838
4839int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4840 const u8 *addr, u16 pool)
4841{
4842 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4843 struct ixgbe_hw *hw = &adapter->hw;
4844 int i;
4845
4846 if (is_zero_ether_addr(addr))
4847 return -EINVAL;
4848
4849
4850 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4851
4852 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4853 continue;
4854
4855 if (mac_table->pool != pool)
4856 continue;
4857
4858 if (!ether_addr_equal(addr, mac_table->addr))
4859 continue;
4860
4861 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4862 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4863
4864 ixgbe_sync_mac_table(adapter);
4865
4866 return 0;
4867 }
4868
4869 return -ENOMEM;
4870}
4871
4872static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4873{
4874 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4875 int ret;
4876
4877 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4878
4879 return min_t(int, ret, 0);
4880}
4881
4882static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4883{
4884 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4885
4886 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4887
4888 return 0;
4889}
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900void ixgbe_set_rx_mode(struct net_device *netdev)
4901{
4902 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4903 struct ixgbe_hw *hw = &adapter->hw;
4904 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4905 netdev_features_t features = netdev->features;
4906 int count;
4907
4908
4909 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4910
4911
4912 fctrl &= ~IXGBE_FCTRL_SBP;
4913 fctrl |= IXGBE_FCTRL_BAM;
4914 fctrl |= IXGBE_FCTRL_DPF;
4915 fctrl |= IXGBE_FCTRL_PMCF;
4916
4917
4918 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4919 if (netdev->flags & IFF_PROMISC) {
4920 hw->addr_ctrl.user_set_promisc = true;
4921 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4922 vmolr |= IXGBE_VMOLR_MPE;
4923 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4924 } else {
4925 if (netdev->flags & IFF_ALLMULTI) {
4926 fctrl |= IXGBE_FCTRL_MPE;
4927 vmolr |= IXGBE_VMOLR_MPE;
4928 }
4929 hw->addr_ctrl.user_set_promisc = false;
4930 }
4931
4932
4933
4934
4935
4936
4937 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4938 fctrl |= IXGBE_FCTRL_UPE;
4939 vmolr |= IXGBE_VMOLR_ROPE;
4940 }
4941
4942
4943
4944
4945
4946 count = ixgbe_write_mc_addr_list(netdev);
4947 if (count < 0) {
4948 fctrl |= IXGBE_FCTRL_MPE;
4949 vmolr |= IXGBE_VMOLR_MPE;
4950 } else if (count) {
4951 vmolr |= IXGBE_VMOLR_ROMPE;
4952 }
4953
4954 if (hw->mac.type != ixgbe_mac_82598EB) {
4955 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4956 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4957 IXGBE_VMOLR_ROPE);
4958 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4959 }
4960
4961
4962 if (features & NETIF_F_RXALL) {
4963
4964
4965 fctrl |= (IXGBE_FCTRL_SBP |
4966 IXGBE_FCTRL_BAM |
4967 IXGBE_FCTRL_PMCF);
4968
4969 fctrl &= ~(IXGBE_FCTRL_DPF);
4970
4971 }
4972
4973 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4974
4975 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4976 ixgbe_vlan_strip_enable(adapter);
4977 else
4978 ixgbe_vlan_strip_disable(adapter);
4979
4980 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4981 ixgbe_vlan_promisc_disable(adapter);
4982 else
4983 ixgbe_vlan_promisc_enable(adapter);
4984}
4985
4986static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4987{
4988 int q_idx;
4989
4990 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4991 napi_enable(&adapter->q_vector[q_idx]->napi);
4992}
4993
4994static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4995{
4996 int q_idx;
4997
4998 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4999 napi_disable(&adapter->q_vector[q_idx]->napi);
5000}
5001
5002static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
5003{
5004 struct ixgbe_adapter *adapter = netdev_priv(dev);
5005 struct ixgbe_hw *hw = &adapter->hw;
5006 struct udp_tunnel_info ti;
5007
5008 udp_tunnel_nic_get_port(dev, table, 0, &ti);
5009 if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
5010 adapter->vxlan_port = ti.port;
5011 else
5012 adapter->geneve_port = ti.port;
5013
5014 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
5015 ntohs(adapter->vxlan_port) |
5016 ntohs(adapter->geneve_port) <<
5017 IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
5018 return 0;
5019}
5020
5021static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
5022 .sync_table = ixgbe_udp_tunnel_sync,
5023 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5024 .tables = {
5025 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5026 },
5027};
5028
5029static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
5030 .sync_table = ixgbe_udp_tunnel_sync,
5031 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5032 .tables = {
5033 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5034 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
5035 },
5036};
5037
5038#ifdef CONFIG_IXGBE_DCB
5039
5040
5041
5042
5043
5044
5045
5046
5047static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5048{
5049 struct ixgbe_hw *hw = &adapter->hw;
5050 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5051
5052 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5053 if (hw->mac.type == ixgbe_mac_82598EB)
5054 netif_set_gso_max_size(adapter->netdev, 65536);
5055 return;
5056 }
5057
5058 if (hw->mac.type == ixgbe_mac_82598EB)
5059 netif_set_gso_max_size(adapter->netdev, 32768);
5060
5061#ifdef IXGBE_FCOE
5062 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5063 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5064#endif
5065
5066
5067 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5068 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5069 DCB_TX_CONFIG);
5070 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5071 DCB_RX_CONFIG);
5072 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5073 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5074 ixgbe_dcb_hw_ets(&adapter->hw,
5075 adapter->ixgbe_ieee_ets,
5076 max_frame);
5077 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5078 adapter->ixgbe_ieee_pfc->pfc_en,
5079 adapter->ixgbe_ieee_ets->prio_tc);
5080 }
5081
5082
5083 if (hw->mac.type != ixgbe_mac_82598EB) {
5084 u32 msb = 0;
5085 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5086
5087 while (rss_i) {
5088 msb++;
5089 rss_i >>= 1;
5090 }
5091
5092
5093 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5094 }
5095}
5096#endif
5097
5098
5099#define IXGBE_ETH_FRAMING 20
5100
5101
5102
5103
5104
5105
5106
5107static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5108{
5109 struct ixgbe_hw *hw = &adapter->hw;
5110 struct net_device *dev = adapter->netdev;
5111 int link, tc, kb, marker;
5112 u32 dv_id, rx_pba;
5113
5114
5115 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5116
5117#ifdef IXGBE_FCOE
5118
5119 if ((dev->features & NETIF_F_FCOE_MTU) &&
5120 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5121 (pb == ixgbe_fcoe_get_tc(adapter)))
5122 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5123#endif
5124
5125
5126 switch (hw->mac.type) {
5127 case ixgbe_mac_X540:
5128 case ixgbe_mac_X550:
5129 case ixgbe_mac_X550EM_x:
5130 case ixgbe_mac_x550em_a:
5131 dv_id = IXGBE_DV_X540(link, tc);
5132 break;
5133 default:
5134 dv_id = IXGBE_DV(link, tc);
5135 break;
5136 }
5137
5138
5139 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5140 dv_id += IXGBE_B2BT(tc);
5141
5142
5143 kb = IXGBE_BT2KB(dv_id);
5144 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5145
5146 marker = rx_pba - kb;
5147
5148
5149
5150
5151
5152 if (marker < 0) {
5153 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5154 "headroom to support flow control."
5155 "Decrease MTU or number of traffic classes\n", pb);
5156 marker = tc + 1;
5157 }
5158
5159 return marker;
5160}
5161
5162
5163
5164
5165
5166
5167
5168static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5169{
5170 struct ixgbe_hw *hw = &adapter->hw;
5171 struct net_device *dev = adapter->netdev;
5172 int tc;
5173 u32 dv_id;
5174
5175
5176 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5177
5178#ifdef IXGBE_FCOE
5179
5180 if ((dev->features & NETIF_F_FCOE_MTU) &&
5181 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5182 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5183 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5184#endif
5185
5186
5187 switch (hw->mac.type) {
5188 case ixgbe_mac_X540:
5189 case ixgbe_mac_X550:
5190 case ixgbe_mac_X550EM_x:
5191 case ixgbe_mac_x550em_a:
5192 dv_id = IXGBE_LOW_DV_X540(tc);
5193 break;
5194 default:
5195 dv_id = IXGBE_LOW_DV(tc);
5196 break;
5197 }
5198
5199
5200 return IXGBE_BT2KB(dv_id);
5201}
5202
5203
5204
5205
5206static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5207{
5208 struct ixgbe_hw *hw = &adapter->hw;
5209 int num_tc = adapter->hw_tcs;
5210 int i;
5211
5212 if (!num_tc)
5213 num_tc = 1;
5214
5215 for (i = 0; i < num_tc; i++) {
5216 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5217 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5218
5219
5220 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5221 hw->fc.low_water[i] = 0;
5222 }
5223
5224 for (; i < MAX_TRAFFIC_CLASS; i++)
5225 hw->fc.high_water[i] = 0;
5226}
5227
5228static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5229{
5230 struct ixgbe_hw *hw = &adapter->hw;
5231 int hdrm;
5232 u8 tc = adapter->hw_tcs;
5233
5234 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5235 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5236 hdrm = 32 << adapter->fdir_pballoc;
5237 else
5238 hdrm = 0;
5239
5240 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5241 ixgbe_pbthresh_setup(adapter);
5242}
5243
5244static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5245{
5246 struct ixgbe_hw *hw = &adapter->hw;
5247 struct hlist_node *node2;
5248 struct ixgbe_fdir_filter *filter;
5249 u8 queue;
5250
5251 spin_lock(&adapter->fdir_perfect_lock);
5252
5253 if (!hlist_empty(&adapter->fdir_filter_list))
5254 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5255
5256 hlist_for_each_entry_safe(filter, node2,
5257 &adapter->fdir_filter_list, fdir_node) {
5258 if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
5259 queue = IXGBE_FDIR_DROP_QUEUE;
5260 } else {
5261 u32 ring = ethtool_get_flow_spec_ring(filter->action);
5262 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
5263
5264 if (!vf && (ring >= adapter->num_rx_queues)) {
5265 e_err(drv, "FDIR restore failed without VF, ring: %u\n",
5266 ring);
5267 continue;
5268 } else if (vf &&
5269 ((vf > adapter->num_vfs) ||
5270 ring >= adapter->num_rx_queues_per_pool)) {
5271 e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
5272 vf, ring);
5273 continue;
5274 }
5275
5276
5277 if (!vf)
5278 queue = adapter->rx_ring[ring]->reg_idx;
5279 else
5280 queue = ((vf - 1) *
5281 adapter->num_rx_queues_per_pool) + ring;
5282 }
5283
5284 ixgbe_fdir_write_perfect_filter_82599(hw,
5285 &filter->filter, filter->sw_idx, queue);
5286 }
5287
5288 spin_unlock(&adapter->fdir_perfect_lock);
5289}
5290
5291
5292
5293
5294
5295static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5296{
5297 u16 i = rx_ring->next_to_clean;
5298 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5299
5300 if (rx_ring->xsk_pool) {
5301 ixgbe_xsk_clean_rx_ring(rx_ring);
5302 goto skip_free;
5303 }
5304
5305
5306 while (i != rx_ring->next_to_alloc) {
5307 if (rx_buffer->skb) {
5308 struct sk_buff *skb = rx_buffer->skb;
5309 if (IXGBE_CB(skb)->page_released)
5310 dma_unmap_page_attrs(rx_ring->dev,
5311 IXGBE_CB(skb)->dma,
5312 ixgbe_rx_pg_size(rx_ring),
5313 DMA_FROM_DEVICE,
5314 IXGBE_RX_DMA_ATTR);
5315 dev_kfree_skb(skb);
5316 }
5317
5318
5319
5320
5321 dma_sync_single_range_for_cpu(rx_ring->dev,
5322 rx_buffer->dma,
5323 rx_buffer->page_offset,
5324 ixgbe_rx_bufsz(rx_ring),
5325 DMA_FROM_DEVICE);
5326
5327
5328 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5329 ixgbe_rx_pg_size(rx_ring),
5330 DMA_FROM_DEVICE,
5331 IXGBE_RX_DMA_ATTR);
5332 __page_frag_cache_drain(rx_buffer->page,
5333 rx_buffer->pagecnt_bias);
5334
5335 i++;
5336 rx_buffer++;
5337 if (i == rx_ring->count) {
5338 i = 0;
5339 rx_buffer = rx_ring->rx_buffer_info;
5340 }
5341 }
5342
5343skip_free:
5344 rx_ring->next_to_alloc = 0;
5345 rx_ring->next_to_clean = 0;
5346 rx_ring->next_to_use = 0;
5347}
5348
5349static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5350 struct ixgbe_fwd_adapter *accel)
5351{
5352 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5353 int num_tc = netdev_get_num_tc(adapter->netdev);
5354 struct net_device *vdev = accel->netdev;
5355 int i, baseq, err;
5356
5357 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5358 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5359 accel->pool, adapter->num_rx_pools,
5360 baseq, baseq + adapter->num_rx_queues_per_pool);
5361
5362 accel->rx_base_queue = baseq;
5363 accel->tx_base_queue = baseq;
5364
5365
5366 for (i = 0; i < num_tc; i++)
5367 netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5368 i, rss_i, baseq + (rss_i * i));
5369
5370 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5371 adapter->rx_ring[baseq + i]->netdev = vdev;
5372
5373
5374
5375
5376 wmb();
5377
5378
5379
5380
5381 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5382 VMDQ_P(accel->pool));
5383 if (err >= 0)
5384 return 0;
5385
5386
5387 macvlan_release_l2fw_offload(vdev);
5388
5389 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5390 adapter->rx_ring[baseq + i]->netdev = NULL;
5391
5392 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5393
5394
5395 netdev_unbind_sb_channel(adapter->netdev, vdev);
5396 netdev_set_sb_channel(vdev, 0);
5397
5398 clear_bit(accel->pool, adapter->fwd_bitmask);
5399 kfree(accel);
5400
5401 return err;
5402}
5403
5404static int ixgbe_macvlan_up(struct net_device *vdev,
5405 struct netdev_nested_priv *priv)
5406{
5407 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
5408 struct ixgbe_fwd_adapter *accel;
5409
5410 if (!netif_is_macvlan(vdev))
5411 return 0;
5412
5413 accel = macvlan_accel_priv(vdev);
5414 if (!accel)
5415 return 0;
5416
5417 ixgbe_fwd_ring_up(adapter, accel);
5418
5419 return 0;
5420}
5421
5422static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5423{
5424 struct netdev_nested_priv priv = {
5425 .data = (void *)adapter,
5426 };
5427
5428 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5429 ixgbe_macvlan_up, &priv);
5430}
5431
5432static void ixgbe_configure(struct ixgbe_adapter *adapter)
5433{
5434 struct ixgbe_hw *hw = &adapter->hw;
5435
5436 ixgbe_configure_pb(adapter);
5437#ifdef CONFIG_IXGBE_DCB
5438 ixgbe_configure_dcb(adapter);
5439#endif
5440
5441
5442
5443
5444 ixgbe_configure_virtualization(adapter);
5445
5446 ixgbe_set_rx_mode(adapter->netdev);
5447 ixgbe_restore_vlan(adapter);
5448 ixgbe_ipsec_restore(adapter);
5449
5450 switch (hw->mac.type) {
5451 case ixgbe_mac_82599EB:
5452 case ixgbe_mac_X540:
5453 hw->mac.ops.disable_rx_buff(hw);
5454 break;
5455 default:
5456 break;
5457 }
5458
5459 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5460 ixgbe_init_fdir_signature_82599(&adapter->hw,
5461 adapter->fdir_pballoc);
5462 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5463 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5464 adapter->fdir_pballoc);
5465 ixgbe_fdir_filter_restore(adapter);
5466 }
5467
5468 switch (hw->mac.type) {
5469 case ixgbe_mac_82599EB:
5470 case ixgbe_mac_X540:
5471 hw->mac.ops.enable_rx_buff(hw);
5472 break;
5473 default:
5474 break;
5475 }
5476
5477#ifdef CONFIG_IXGBE_DCA
5478
5479 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5480 ixgbe_setup_dca(adapter);
5481#endif
5482
5483#ifdef IXGBE_FCOE
5484
5485 ixgbe_configure_fcoe(adapter);
5486
5487#endif
5488 ixgbe_configure_tx(adapter);
5489 ixgbe_configure_rx(adapter);
5490 ixgbe_configure_dfwd(adapter);
5491}
5492
5493
5494
5495
5496
5497static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5498{
5499
5500
5501
5502
5503
5504
5505 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5506 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5507
5508 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5509 adapter->sfp_poll_time = 0;
5510}
5511
5512
5513
5514
5515
5516
5517
5518static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5519{
5520 u32 speed;
5521 bool autoneg, link_up = false;
5522 int ret = IXGBE_ERR_LINK_SETUP;
5523
5524 if (hw->mac.ops.check_link)
5525 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5526
5527 if (ret)
5528 return ret;
5529
5530 speed = hw->phy.autoneg_advertised;
5531 if (!speed && hw->mac.ops.get_link_capabilities) {
5532 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5533 &autoneg);
5534 speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
5535 IXGBE_LINK_SPEED_2_5GB_FULL);
5536 }
5537
5538 if (ret)
5539 return ret;
5540
5541 if (hw->mac.ops.setup_link)
5542 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5543
5544 return ret;
5545}
5546
5547static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5548{
5549 struct ixgbe_hw *hw = &adapter->hw;
5550 u32 gpie = 0;
5551
5552 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5553 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5554 IXGBE_GPIE_OCD;
5555 gpie |= IXGBE_GPIE_EIAME;
5556
5557
5558
5559
5560 switch (hw->mac.type) {
5561 case ixgbe_mac_82598EB:
5562 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5563 break;
5564 case ixgbe_mac_82599EB:
5565 case ixgbe_mac_X540:
5566 case ixgbe_mac_X550:
5567 case ixgbe_mac_X550EM_x:
5568 case ixgbe_mac_x550em_a:
5569 default:
5570 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5571 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5572 break;
5573 }
5574 } else {
5575
5576
5577 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5578 }
5579
5580
5581
5582
5583 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5584 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5585
5586 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5587 case IXGBE_82599_VMDQ_8Q_MASK:
5588 gpie |= IXGBE_GPIE_VTMODE_16;
5589 break;
5590 case IXGBE_82599_VMDQ_4Q_MASK:
5591 gpie |= IXGBE_GPIE_VTMODE_32;
5592 break;
5593 default:
5594 gpie |= IXGBE_GPIE_VTMODE_64;
5595 break;
5596 }
5597 }
5598
5599
5600 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5601 switch (adapter->hw.mac.type) {
5602 case ixgbe_mac_82599EB:
5603 gpie |= IXGBE_SDP0_GPIEN_8259X;
5604 break;
5605 default:
5606 break;
5607 }
5608 }
5609
5610
5611 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5612 gpie |= IXGBE_SDP1_GPIEN(hw);
5613
5614 switch (hw->mac.type) {
5615 case ixgbe_mac_82599EB:
5616 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5617 break;
5618 case ixgbe_mac_X550EM_x:
5619 case ixgbe_mac_x550em_a:
5620 gpie |= IXGBE_SDP0_GPIEN_X540;
5621 break;
5622 default:
5623 break;
5624 }
5625
5626 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5627}
5628
5629static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5630{
5631 struct ixgbe_hw *hw = &adapter->hw;
5632 int err;
5633 u32 ctrl_ext;
5634
5635 ixgbe_get_hw_control(adapter);
5636 ixgbe_setup_gpie(adapter);
5637
5638 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5639 ixgbe_configure_msix(adapter);
5640 else
5641 ixgbe_configure_msi_and_legacy(adapter);
5642
5643
5644 if (hw->mac.ops.enable_tx_laser)
5645 hw->mac.ops.enable_tx_laser(hw);
5646
5647 if (hw->phy.ops.set_phy_power)
5648 hw->phy.ops.set_phy_power(hw, true);
5649
5650 smp_mb__before_atomic();
5651 clear_bit(__IXGBE_DOWN, &adapter->state);
5652 ixgbe_napi_enable_all(adapter);
5653
5654 if (ixgbe_is_sfp(hw)) {
5655 ixgbe_sfp_link_config(adapter);
5656 } else {
5657 err = ixgbe_non_sfp_link_config(hw);
5658 if (err)
5659 e_err(probe, "link_config FAILED %d\n", err);
5660 }
5661
5662
5663 IXGBE_READ_REG(hw, IXGBE_EICR);
5664 ixgbe_irq_enable(adapter, true, true);
5665
5666
5667
5668
5669
5670 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5671 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5672 if (esdp & IXGBE_ESDP_SDP1)
5673 e_crit(drv, "Fan has stopped, replace the adapter\n");
5674 }
5675
5676
5677
5678 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5679 adapter->link_check_timeout = jiffies;
5680 mod_timer(&adapter->service_timer, jiffies);
5681
5682
5683 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5684 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5685 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5686}
5687
5688void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5689{
5690
5691 netif_trans_update(adapter->netdev);
5692
5693 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5694 usleep_range(1000, 2000);
5695 if (adapter->hw.phy.type == ixgbe_phy_fw)
5696 ixgbe_watchdog_link_is_down(adapter);
5697 ixgbe_down(adapter);
5698
5699
5700
5701
5702
5703
5704 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5705 msleep(2000);
5706 ixgbe_up(adapter);
5707 clear_bit(__IXGBE_RESETTING, &adapter->state);
5708}
5709
5710void ixgbe_up(struct ixgbe_adapter *adapter)
5711{
5712
5713 ixgbe_configure(adapter);
5714
5715 ixgbe_up_complete(adapter);
5716}
5717
5718static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5719{
5720 u16 devctl2;
5721
5722 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5723
5724 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5725 case IXGBE_PCIDEVCTRL2_17_34s:
5726 case IXGBE_PCIDEVCTRL2_4_8s:
5727
5728
5729
5730
5731 case IXGBE_PCIDEVCTRL2_1_2s:
5732 return 2000000ul;
5733 case IXGBE_PCIDEVCTRL2_260_520ms:
5734 return 520000ul;
5735 case IXGBE_PCIDEVCTRL2_65_130ms:
5736 return 130000ul;
5737 case IXGBE_PCIDEVCTRL2_16_32ms:
5738 return 32000ul;
5739 case IXGBE_PCIDEVCTRL2_1_2ms:
5740 return 2000ul;
5741 case IXGBE_PCIDEVCTRL2_50_100us:
5742 return 100ul;
5743 case IXGBE_PCIDEVCTRL2_16_32ms_def:
5744 return 32000ul;
5745 default:
5746 break;
5747 }
5748
5749
5750
5751
5752 return 32000ul;
5753}
5754
5755void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5756{
5757 unsigned long wait_delay, delay_interval;
5758 struct ixgbe_hw *hw = &adapter->hw;
5759 int i, wait_loop;
5760 u32 rxdctl;
5761
5762
5763 hw->mac.ops.disable_rx(hw);
5764
5765 if (ixgbe_removed(hw->hw_addr))
5766 return;
5767
5768
5769 for (i = 0; i < adapter->num_rx_queues; i++) {
5770 struct ixgbe_ring *ring = adapter->rx_ring[i];
5771 u8 reg_idx = ring->reg_idx;
5772
5773 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5774 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5775 rxdctl |= IXGBE_RXDCTL_SWFLSH;
5776
5777
5778 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5779 }
5780
5781
5782 if (hw->mac.type == ixgbe_mac_82598EB &&
5783 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5784 return;
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5798
5799 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5800 wait_delay = delay_interval;
5801
5802 while (wait_loop--) {
5803 usleep_range(wait_delay, wait_delay + 10);
5804 wait_delay += delay_interval * 2;
5805 rxdctl = 0;
5806
5807
5808
5809
5810
5811
5812 for (i = 0; i < adapter->num_rx_queues; i++) {
5813 struct ixgbe_ring *ring = adapter->rx_ring[i];
5814 u8 reg_idx = ring->reg_idx;
5815
5816 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5817 }
5818
5819 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5820 return;
5821 }
5822
5823 e_err(drv,
5824 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5825}
5826
5827void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5828{
5829 unsigned long wait_delay, delay_interval;
5830 struct ixgbe_hw *hw = &adapter->hw;
5831 int i, wait_loop;
5832 u32 txdctl;
5833
5834 if (ixgbe_removed(hw->hw_addr))
5835 return;
5836
5837
5838 for (i = 0; i < adapter->num_tx_queues; i++) {
5839 struct ixgbe_ring *ring = adapter->tx_ring[i];
5840 u8 reg_idx = ring->reg_idx;
5841
5842 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5843 }
5844
5845
5846 for (i = 0; i < adapter->num_xdp_queues; i++) {
5847 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5848 u8 reg_idx = ring->reg_idx;
5849
5850 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5851 }
5852
5853
5854
5855
5856
5857
5858 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5859 goto dma_engine_disable;
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5873
5874 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5875 wait_delay = delay_interval;
5876
5877 while (wait_loop--) {
5878 usleep_range(wait_delay, wait_delay + 10);
5879 wait_delay += delay_interval * 2;
5880 txdctl = 0;
5881
5882
5883
5884
5885
5886
5887 for (i = 0; i < adapter->num_tx_queues; i++) {
5888 struct ixgbe_ring *ring = adapter->tx_ring[i];
5889 u8 reg_idx = ring->reg_idx;
5890
5891 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5892 }
5893 for (i = 0; i < adapter->num_xdp_queues; i++) {
5894 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5895 u8 reg_idx = ring->reg_idx;
5896
5897 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5898 }
5899
5900 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5901 goto dma_engine_disable;
5902 }
5903
5904 e_err(drv,
5905 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5906
5907dma_engine_disable:
5908
5909 switch (hw->mac.type) {
5910 case ixgbe_mac_82599EB:
5911 case ixgbe_mac_X540:
5912 case ixgbe_mac_X550:
5913 case ixgbe_mac_X550EM_x:
5914 case ixgbe_mac_x550em_a:
5915 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5916 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5917 ~IXGBE_DMATXCTL_TE));
5918 fallthrough;
5919 default:
5920 break;
5921 }
5922}
5923
5924void ixgbe_reset(struct ixgbe_adapter *adapter)
5925{
5926 struct ixgbe_hw *hw = &adapter->hw;
5927 struct net_device *netdev = adapter->netdev;
5928 int err;
5929
5930 if (ixgbe_removed(hw->hw_addr))
5931 return;
5932
5933 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5934 usleep_range(1000, 2000);
5935
5936
5937 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5938 IXGBE_FLAG2_SFP_NEEDS_RESET);
5939 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5940
5941 err = hw->mac.ops.init_hw(hw);
5942 switch (err) {
5943 case 0:
5944 case IXGBE_ERR_SFP_NOT_PRESENT:
5945 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5946 break;
5947 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5948 e_dev_err("master disable timed out\n");
5949 break;
5950 case IXGBE_ERR_EEPROM_VERSION:
5951
5952 e_dev_warn("This device is a pre-production adapter/LOM. "
5953 "Please be aware there may be issues associated with "
5954 "your hardware. If you are experiencing problems "
5955 "please contact your Intel or hardware "
5956 "representative who provided you with this "
5957 "hardware.\n");
5958 break;
5959 default:
5960 e_dev_err("Hardware Error: %d\n", err);
5961 }
5962
5963 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5964
5965
5966 ixgbe_flush_sw_mac_table(adapter);
5967 __dev_uc_unsync(netdev, NULL);
5968
5969
5970 ixgbe_mac_set_default_filter(adapter);
5971
5972
5973 if (hw->mac.san_mac_rar_index)
5974 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5975
5976 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5977 ixgbe_ptp_reset(adapter);
5978
5979 if (hw->phy.ops.set_phy_power) {
5980 if (!netif_running(adapter->netdev) && !adapter->wol)
5981 hw->phy.ops.set_phy_power(hw, false);
5982 else
5983 hw->phy.ops.set_phy_power(hw, true);
5984 }
5985}
5986
5987
5988
5989
5990
5991static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5992{
5993 u16 i = tx_ring->next_to_clean;
5994 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5995
5996 if (tx_ring->xsk_pool) {
5997 ixgbe_xsk_clean_tx_ring(tx_ring);
5998 goto out;
5999 }
6000
6001 while (i != tx_ring->next_to_use) {
6002 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
6003
6004
6005 if (ring_is_xdp(tx_ring))
6006 xdp_return_frame(tx_buffer->xdpf);
6007 else
6008 dev_kfree_skb_any(tx_buffer->skb);
6009
6010
6011 dma_unmap_single(tx_ring->dev,
6012 dma_unmap_addr(tx_buffer, dma),
6013 dma_unmap_len(tx_buffer, len),
6014 DMA_TO_DEVICE);
6015
6016
6017 eop_desc = tx_buffer->next_to_watch;
6018 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6019
6020
6021 while (tx_desc != eop_desc) {
6022 tx_buffer++;
6023 tx_desc++;
6024 i++;
6025 if (unlikely(i == tx_ring->count)) {
6026 i = 0;
6027 tx_buffer = tx_ring->tx_buffer_info;
6028 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6029 }
6030
6031
6032 if (dma_unmap_len(tx_buffer, len))
6033 dma_unmap_page(tx_ring->dev,
6034 dma_unmap_addr(tx_buffer, dma),
6035 dma_unmap_len(tx_buffer, len),
6036 DMA_TO_DEVICE);
6037 }
6038
6039
6040 tx_buffer++;
6041 i++;
6042 if (unlikely(i == tx_ring->count)) {
6043 i = 0;
6044 tx_buffer = tx_ring->tx_buffer_info;
6045 }
6046 }
6047
6048
6049 if (!ring_is_xdp(tx_ring))
6050 netdev_tx_reset_queue(txring_txq(tx_ring));
6051
6052out:
6053
6054 tx_ring->next_to_use = 0;
6055 tx_ring->next_to_clean = 0;
6056}
6057
6058
6059
6060
6061
6062static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6063{
6064 int i;
6065
6066 for (i = 0; i < adapter->num_rx_queues; i++)
6067 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6068}
6069
6070
6071
6072
6073
6074static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6075{
6076 int i;
6077
6078 for (i = 0; i < adapter->num_tx_queues; i++)
6079 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6080 for (i = 0; i < adapter->num_xdp_queues; i++)
6081 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6082}
6083
6084static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6085{
6086 struct hlist_node *node2;
6087 struct ixgbe_fdir_filter *filter;
6088
6089 spin_lock(&adapter->fdir_perfect_lock);
6090
6091 hlist_for_each_entry_safe(filter, node2,
6092 &adapter->fdir_filter_list, fdir_node) {
6093 hlist_del(&filter->fdir_node);
6094 kfree(filter);
6095 }
6096 adapter->fdir_filter_count = 0;
6097
6098 spin_unlock(&adapter->fdir_perfect_lock);
6099}
6100
6101void ixgbe_down(struct ixgbe_adapter *adapter)
6102{
6103 struct net_device *netdev = adapter->netdev;
6104 struct ixgbe_hw *hw = &adapter->hw;
6105 int i;
6106
6107
6108 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6109 return;
6110
6111
6112 netif_tx_stop_all_queues(netdev);
6113
6114
6115 netif_carrier_off(netdev);
6116 netif_tx_disable(netdev);
6117
6118
6119 ixgbe_disable_rx(adapter);
6120
6121
6122 if (adapter->xdp_ring[0])
6123 synchronize_rcu();
6124
6125 ixgbe_irq_disable(adapter);
6126
6127 ixgbe_napi_disable_all(adapter);
6128
6129 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6130 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6131 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6132
6133 del_timer_sync(&adapter->service_timer);
6134
6135 if (adapter->num_vfs) {
6136
6137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6138
6139
6140 for (i = 0 ; i < adapter->num_vfs; i++)
6141 adapter->vfinfo[i].clear_to_send = false;
6142
6143
6144 ixgbe_ping_all_vfs(adapter);
6145
6146
6147 ixgbe_disable_tx_rx(adapter);
6148 }
6149
6150
6151 ixgbe_disable_tx(adapter);
6152
6153 if (!pci_channel_offline(adapter->pdev))
6154 ixgbe_reset(adapter);
6155
6156
6157 if (hw->mac.ops.disable_tx_laser)
6158 hw->mac.ops.disable_tx_laser(hw);
6159
6160 ixgbe_clean_all_tx_rings(adapter);
6161 ixgbe_clean_all_rx_rings(adapter);
6162}
6163
6164
6165
6166
6167
6168static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6169{
6170 struct ixgbe_hw *hw = &adapter->hw;
6171
6172 switch (hw->device_id) {
6173 case IXGBE_DEV_ID_X550EM_A_1G_T:
6174 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6175 if (!hw->phy.eee_speeds_supported)
6176 break;
6177 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6178 if (!hw->phy.eee_speeds_advertised)
6179 break;
6180 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6181 break;
6182 default:
6183 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6184 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6185 break;
6186 }
6187}
6188
6189
6190
6191
6192
6193
6194static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6195{
6196 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6197
6198
6199 ixgbe_tx_timeout_reset(adapter);
6200}
6201
6202#ifdef CONFIG_IXGBE_DCB
6203static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6204{
6205 struct ixgbe_hw *hw = &adapter->hw;
6206 struct tc_configuration *tc;
6207 int j;
6208
6209 switch (hw->mac.type) {
6210 case ixgbe_mac_82598EB:
6211 case ixgbe_mac_82599EB:
6212 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6213 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6214 break;
6215 case ixgbe_mac_X540:
6216 case ixgbe_mac_X550:
6217 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6218 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6219 break;
6220 case ixgbe_mac_X550EM_x:
6221 case ixgbe_mac_x550em_a:
6222 default:
6223 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6224 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6225 break;
6226 }
6227
6228
6229 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6230 tc = &adapter->dcb_cfg.tc_config[j];
6231 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6232 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6233 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6234 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6235 tc->dcb_pfc = pfc_disabled;
6236 }
6237
6238
6239 tc = &adapter->dcb_cfg.tc_config[0];
6240 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6241 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6242
6243 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6244 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6245 adapter->dcb_cfg.pfc_mode_enable = false;
6246 adapter->dcb_set_bitmap = 0x00;
6247 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6248 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6249 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6250 sizeof(adapter->temp_dcb_cfg));
6251}
6252#endif
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6264 const struct ixgbe_info *ii)
6265{
6266 struct ixgbe_hw *hw = &adapter->hw;
6267 struct pci_dev *pdev = adapter->pdev;
6268 unsigned int rss, fdir;
6269 u32 fwsm;
6270 int i;
6271
6272
6273
6274 hw->vendor_id = pdev->vendor;
6275 hw->device_id = pdev->device;
6276 hw->revision_id = pdev->revision;
6277 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6278 hw->subsystem_device_id = pdev->subsystem_device;
6279
6280
6281 ii->get_invariants(hw);
6282
6283
6284 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6285 adapter->ring_feature[RING_F_RSS].limit = rss;
6286 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6287 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6288 adapter->atr_sample_rate = 20;
6289 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6290 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6291 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6292 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6293#ifdef CONFIG_IXGBE_DCA
6294 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6295#endif
6296#ifdef CONFIG_IXGBE_DCB
6297 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6298 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6299#endif
6300#ifdef IXGBE_FCOE
6301 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6302 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6303#ifdef CONFIG_IXGBE_DCB
6304
6305 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6306#endif
6307#endif
6308
6309
6310 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6311 GFP_KERNEL);
6312 if (!adapter->jump_tables[0])
6313 return -ENOMEM;
6314 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6315
6316 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6317 adapter->jump_tables[i] = NULL;
6318
6319 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6320 sizeof(struct ixgbe_mac_addr),
6321 GFP_KERNEL);
6322 if (!adapter->mac_table)
6323 return -ENOMEM;
6324
6325 if (ixgbe_init_rss_key(adapter))
6326 return -ENOMEM;
6327
6328 adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6329 if (!adapter->af_xdp_zc_qps)
6330 return -ENOMEM;
6331
6332
6333 switch (hw->mac.type) {
6334 case ixgbe_mac_82598EB:
6335 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6336
6337 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6338 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6339
6340 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6341 adapter->ring_feature[RING_F_FDIR].limit = 0;
6342 adapter->atr_sample_rate = 0;
6343 adapter->fdir_pballoc = 0;
6344#ifdef IXGBE_FCOE
6345 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6346 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6347#ifdef CONFIG_IXGBE_DCB
6348 adapter->fcoe.up = 0;
6349#endif
6350#endif
6351 break;
6352 case ixgbe_mac_82599EB:
6353 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6354 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6355 break;
6356 case ixgbe_mac_X540:
6357 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6358 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6359 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6360 break;
6361 case ixgbe_mac_x550em_a:
6362 switch (hw->device_id) {
6363 case IXGBE_DEV_ID_X550EM_A_1G_T:
6364 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6365 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6366 break;
6367 default:
6368 break;
6369 }
6370 fallthrough;
6371 case ixgbe_mac_X550EM_x:
6372#ifdef CONFIG_IXGBE_DCB
6373 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6374#endif
6375#ifdef IXGBE_FCOE
6376 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6377#ifdef CONFIG_IXGBE_DCB
6378 adapter->fcoe.up = 0;
6379#endif
6380#endif
6381 fallthrough;
6382 case ixgbe_mac_X550:
6383 if (hw->mac.type == ixgbe_mac_X550)
6384 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6385#ifdef CONFIG_IXGBE_DCA
6386 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6387#endif
6388 break;
6389 default:
6390 break;
6391 }
6392
6393#ifdef IXGBE_FCOE
6394
6395 spin_lock_init(&adapter->fcoe.lock);
6396
6397#endif
6398
6399 spin_lock_init(&adapter->fdir_perfect_lock);
6400
6401#ifdef CONFIG_IXGBE_DCB
6402 ixgbe_init_dcb(adapter);
6403#endif
6404 ixgbe_init_ipsec_offload(adapter);
6405
6406
6407 hw->fc.requested_mode = ixgbe_fc_full;
6408 hw->fc.current_mode = ixgbe_fc_full;
6409 ixgbe_pbthresh_setup(adapter);
6410 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6411 hw->fc.send_xon = true;
6412 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6413
6414#ifdef CONFIG_PCI_IOV
6415 if (max_vfs > 0)
6416 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6417
6418
6419 if (hw->mac.type != ixgbe_mac_82598EB) {
6420 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6421 max_vfs = 0;
6422 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6423 }
6424 }
6425#endif
6426
6427
6428 adapter->rx_itr_setting = 1;
6429 adapter->tx_itr_setting = 1;
6430
6431
6432 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6433 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6434
6435
6436 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6437
6438
6439 if (ixgbe_init_eeprom_params_generic(hw)) {
6440 e_dev_err("EEPROM initialization failed\n");
6441 return -EIO;
6442 }
6443
6444
6445 set_bit(0, adapter->fwd_bitmask);
6446 set_bit(__IXGBE_DOWN, &adapter->state);
6447
6448 return 0;
6449}
6450
6451
6452
6453
6454
6455
6456
6457int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6458{
6459 struct device *dev = tx_ring->dev;
6460 int orig_node = dev_to_node(dev);
6461 int ring_node = NUMA_NO_NODE;
6462 int size;
6463
6464 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6465
6466 if (tx_ring->q_vector)
6467 ring_node = tx_ring->q_vector->numa_node;
6468
6469 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6470 if (!tx_ring->tx_buffer_info)
6471 tx_ring->tx_buffer_info = vmalloc(size);
6472 if (!tx_ring->tx_buffer_info)
6473 goto err;
6474
6475
6476 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6477 tx_ring->size = ALIGN(tx_ring->size, 4096);
6478
6479 set_dev_node(dev, ring_node);
6480 tx_ring->desc = dma_alloc_coherent(dev,
6481 tx_ring->size,
6482 &tx_ring->dma,
6483 GFP_KERNEL);
6484 set_dev_node(dev, orig_node);
6485 if (!tx_ring->desc)
6486 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6487 &tx_ring->dma, GFP_KERNEL);
6488 if (!tx_ring->desc)
6489 goto err;
6490
6491 tx_ring->next_to_use = 0;
6492 tx_ring->next_to_clean = 0;
6493 return 0;
6494
6495err:
6496 vfree(tx_ring->tx_buffer_info);
6497 tx_ring->tx_buffer_info = NULL;
6498 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6499 return -ENOMEM;
6500}
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6513{
6514 int i, j = 0, err = 0;
6515
6516 for (i = 0; i < adapter->num_tx_queues; i++) {
6517 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6518 if (!err)
6519 continue;
6520
6521 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6522 goto err_setup_tx;
6523 }
6524 for (j = 0; j < adapter->num_xdp_queues; j++) {
6525 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6526 if (!err)
6527 continue;
6528
6529 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6530 goto err_setup_tx;
6531 }
6532
6533 return 0;
6534err_setup_tx:
6535
6536 while (j--)
6537 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6538 while (i--)
6539 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6540 return err;
6541}
6542
6543
6544
6545
6546
6547
6548
6549
6550int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6551 struct ixgbe_ring *rx_ring)
6552{
6553 struct device *dev = rx_ring->dev;
6554 int orig_node = dev_to_node(dev);
6555 int ring_node = NUMA_NO_NODE;
6556 int size;
6557
6558 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6559
6560 if (rx_ring->q_vector)
6561 ring_node = rx_ring->q_vector->numa_node;
6562
6563 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6564 if (!rx_ring->rx_buffer_info)
6565 rx_ring->rx_buffer_info = vmalloc(size);
6566 if (!rx_ring->rx_buffer_info)
6567 goto err;
6568
6569
6570 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6571 rx_ring->size = ALIGN(rx_ring->size, 4096);
6572
6573 set_dev_node(dev, ring_node);
6574 rx_ring->desc = dma_alloc_coherent(dev,
6575 rx_ring->size,
6576 &rx_ring->dma,
6577 GFP_KERNEL);
6578 set_dev_node(dev, orig_node);
6579 if (!rx_ring->desc)
6580 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6581 &rx_ring->dma, GFP_KERNEL);
6582 if (!rx_ring->desc)
6583 goto err;
6584
6585 rx_ring->next_to_clean = 0;
6586 rx_ring->next_to_use = 0;
6587
6588
6589 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6590 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
6591 goto err;
6592
6593 rx_ring->xdp_prog = adapter->xdp_prog;
6594
6595 return 0;
6596err:
6597 vfree(rx_ring->rx_buffer_info);
6598 rx_ring->rx_buffer_info = NULL;
6599 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6600 return -ENOMEM;
6601}
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6614{
6615 int i, err = 0;
6616
6617 for (i = 0; i < adapter->num_rx_queues; i++) {
6618 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6619 if (!err)
6620 continue;
6621
6622 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6623 goto err_setup_rx;
6624 }
6625
6626#ifdef IXGBE_FCOE
6627 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6628 if (!err)
6629#endif
6630 return 0;
6631err_setup_rx:
6632
6633 while (i--)
6634 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6635 return err;
6636}
6637
6638
6639
6640
6641
6642
6643
6644void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6645{
6646 ixgbe_clean_tx_ring(tx_ring);
6647
6648 vfree(tx_ring->tx_buffer_info);
6649 tx_ring->tx_buffer_info = NULL;
6650
6651
6652 if (!tx_ring->desc)
6653 return;
6654
6655 dma_free_coherent(tx_ring->dev, tx_ring->size,
6656 tx_ring->desc, tx_ring->dma);
6657
6658 tx_ring->desc = NULL;
6659}
6660
6661
6662
6663
6664
6665
6666
6667static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6668{
6669 int i;
6670
6671 for (i = 0; i < adapter->num_tx_queues; i++)
6672 if (adapter->tx_ring[i]->desc)
6673 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6674 for (i = 0; i < adapter->num_xdp_queues; i++)
6675 if (adapter->xdp_ring[i]->desc)
6676 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6677}
6678
6679
6680
6681
6682
6683
6684
6685void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6686{
6687 ixgbe_clean_rx_ring(rx_ring);
6688
6689 rx_ring->xdp_prog = NULL;
6690 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6691 vfree(rx_ring->rx_buffer_info);
6692 rx_ring->rx_buffer_info = NULL;
6693
6694
6695 if (!rx_ring->desc)
6696 return;
6697
6698 dma_free_coherent(rx_ring->dev, rx_ring->size,
6699 rx_ring->desc, rx_ring->dma);
6700
6701 rx_ring->desc = NULL;
6702}
6703
6704
6705
6706
6707
6708
6709
6710static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6711{
6712 int i;
6713
6714#ifdef IXGBE_FCOE
6715 ixgbe_free_fcoe_ddp_resources(adapter);
6716
6717#endif
6718 for (i = 0; i < adapter->num_rx_queues; i++)
6719 if (adapter->rx_ring[i]->desc)
6720 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6721}
6722
6723
6724
6725
6726
6727
6728
6729
6730static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6731{
6732 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6733
6734 if (adapter->xdp_prog) {
6735 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6736 VLAN_HLEN;
6737 int i;
6738
6739 for (i = 0; i < adapter->num_rx_queues; i++) {
6740 struct ixgbe_ring *ring = adapter->rx_ring[i];
6741
6742 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6743 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6744 return -EINVAL;
6745 }
6746 }
6747 }
6748
6749
6750
6751
6752
6753
6754 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6755 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6756 (new_mtu > ETH_DATA_LEN))
6757 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6758
6759 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6760 netdev->mtu, new_mtu);
6761
6762
6763 netdev->mtu = new_mtu;
6764
6765 if (netif_running(netdev))
6766 ixgbe_reinit_locked(adapter);
6767
6768 return 0;
6769}
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782
6783int ixgbe_open(struct net_device *netdev)
6784{
6785 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6786 struct ixgbe_hw *hw = &adapter->hw;
6787 int err, queues;
6788
6789
6790 if (test_bit(__IXGBE_TESTING, &adapter->state))
6791 return -EBUSY;
6792
6793 netif_carrier_off(netdev);
6794
6795
6796 err = ixgbe_setup_all_tx_resources(adapter);
6797 if (err)
6798 goto err_setup_tx;
6799
6800
6801 err = ixgbe_setup_all_rx_resources(adapter);
6802 if (err)
6803 goto err_setup_rx;
6804
6805 ixgbe_configure(adapter);
6806
6807 err = ixgbe_request_irq(adapter);
6808 if (err)
6809 goto err_req_irq;
6810
6811
6812 queues = adapter->num_tx_queues;
6813 err = netif_set_real_num_tx_queues(netdev, queues);
6814 if (err)
6815 goto err_set_queues;
6816
6817 queues = adapter->num_rx_queues;
6818 err = netif_set_real_num_rx_queues(netdev, queues);
6819 if (err)
6820 goto err_set_queues;
6821
6822 ixgbe_ptp_init(adapter);
6823
6824 ixgbe_up_complete(adapter);
6825
6826 udp_tunnel_nic_reset_ntf(netdev);
6827
6828 return 0;
6829
6830err_set_queues:
6831 ixgbe_free_irq(adapter);
6832err_req_irq:
6833 ixgbe_free_all_rx_resources(adapter);
6834 if (hw->phy.ops.set_phy_power && !adapter->wol)
6835 hw->phy.ops.set_phy_power(&adapter->hw, false);
6836err_setup_rx:
6837 ixgbe_free_all_tx_resources(adapter);
6838err_setup_tx:
6839 ixgbe_reset(adapter);
6840
6841 return err;
6842}
6843
6844static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6845{
6846 ixgbe_ptp_suspend(adapter);
6847
6848 if (adapter->hw.phy.ops.enter_lplu) {
6849 adapter->hw.phy.reset_disable = true;
6850 ixgbe_down(adapter);
6851 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6852 adapter->hw.phy.reset_disable = false;
6853 } else {
6854 ixgbe_down(adapter);
6855 }
6856
6857 ixgbe_free_irq(adapter);
6858
6859 ixgbe_free_all_tx_resources(adapter);
6860 ixgbe_free_all_rx_resources(adapter);
6861}
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874int ixgbe_close(struct net_device *netdev)
6875{
6876 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6877
6878 ixgbe_ptp_stop(adapter);
6879
6880 if (netif_device_present(netdev))
6881 ixgbe_close_suspend(adapter);
6882
6883 ixgbe_fdir_filter_exit(adapter);
6884
6885 ixgbe_release_hw_control(adapter);
6886
6887 return 0;
6888}
6889
6890static int __maybe_unused ixgbe_resume(struct device *dev_d)
6891{
6892 struct pci_dev *pdev = to_pci_dev(dev_d);
6893 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6894 struct net_device *netdev = adapter->netdev;
6895 u32 err;
6896
6897 adapter->hw.hw_addr = adapter->io_addr;
6898
6899 smp_mb__before_atomic();
6900 clear_bit(__IXGBE_DISABLED, &adapter->state);
6901 pci_set_master(pdev);
6902
6903 device_wakeup_disable(dev_d);
6904
6905 ixgbe_reset(adapter);
6906
6907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6908
6909 rtnl_lock();
6910 err = ixgbe_init_interrupt_scheme(adapter);
6911 if (!err && netif_running(netdev))
6912 err = ixgbe_open(netdev);
6913
6914
6915 if (!err)
6916 netif_device_attach(netdev);
6917 rtnl_unlock();
6918
6919 return err;
6920}
6921
6922static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6923{
6924 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6925 struct net_device *netdev = adapter->netdev;
6926 struct ixgbe_hw *hw = &adapter->hw;
6927 u32 ctrl;
6928 u32 wufc = adapter->wol;
6929
6930 rtnl_lock();
6931 netif_device_detach(netdev);
6932
6933 if (netif_running(netdev))
6934 ixgbe_close_suspend(adapter);
6935
6936 ixgbe_clear_interrupt_scheme(adapter);
6937 rtnl_unlock();
6938
6939 if (hw->mac.ops.stop_link_on_d3)
6940 hw->mac.ops.stop_link_on_d3(hw);
6941
6942 if (wufc) {
6943 u32 fctrl;
6944
6945 ixgbe_set_rx_mode(netdev);
6946
6947
6948 if (hw->mac.ops.enable_tx_laser)
6949 hw->mac.ops.enable_tx_laser(hw);
6950
6951
6952 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6953 fctrl |= IXGBE_FCTRL_MPE;
6954 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6955
6956 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6957 ctrl |= IXGBE_CTRL_GIO_DIS;
6958 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6959
6960 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6961 } else {
6962 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6963 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6964 }
6965
6966 switch (hw->mac.type) {
6967 case ixgbe_mac_82598EB:
6968 pci_wake_from_d3(pdev, false);
6969 break;
6970 case ixgbe_mac_82599EB:
6971 case ixgbe_mac_X540:
6972 case ixgbe_mac_X550:
6973 case ixgbe_mac_X550EM_x:
6974 case ixgbe_mac_x550em_a:
6975 pci_wake_from_d3(pdev, !!wufc);
6976 break;
6977 default:
6978 break;
6979 }
6980
6981 *enable_wake = !!wufc;
6982 if (hw->phy.ops.set_phy_power && !*enable_wake)
6983 hw->phy.ops.set_phy_power(hw, false);
6984
6985 ixgbe_release_hw_control(adapter);
6986
6987 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6988 pci_disable_device(pdev);
6989
6990 return 0;
6991}
6992
6993static int __maybe_unused ixgbe_suspend(struct device *dev_d)
6994{
6995 struct pci_dev *pdev = to_pci_dev(dev_d);
6996 int retval;
6997 bool wake;
6998
6999 retval = __ixgbe_shutdown(pdev, &wake);
7000
7001 device_set_wakeup_enable(dev_d, wake);
7002
7003 return retval;
7004}
7005
7006static void ixgbe_shutdown(struct pci_dev *pdev)
7007{
7008 bool wake;
7009
7010 __ixgbe_shutdown(pdev, &wake);
7011
7012 if (system_state == SYSTEM_POWER_OFF) {
7013 pci_wake_from_d3(pdev, wake);
7014 pci_set_power_state(pdev, PCI_D3hot);
7015 }
7016}
7017
7018
7019
7020
7021
7022void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7023{
7024 struct net_device *netdev = adapter->netdev;
7025 struct ixgbe_hw *hw = &adapter->hw;
7026 struct ixgbe_hw_stats *hwstats = &adapter->stats;
7027 u64 total_mpc = 0;
7028 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7029 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7030 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7031 u64 alloc_rx_page = 0;
7032 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7033
7034 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7035 test_bit(__IXGBE_RESETTING, &adapter->state))
7036 return;
7037
7038 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7039 u64 rsc_count = 0;
7040 u64 rsc_flush = 0;
7041 for (i = 0; i < adapter->num_rx_queues; i++) {
7042 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7043 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7044 }
7045 adapter->rsc_total_count = rsc_count;
7046 adapter->rsc_total_flush = rsc_flush;
7047 }
7048
7049 for (i = 0; i < adapter->num_rx_queues; i++) {
7050 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
7051
7052 if (!rx_ring)
7053 continue;
7054 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7055 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7056 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7057 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7058 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7059 bytes += rx_ring->stats.bytes;
7060 packets += rx_ring->stats.packets;
7061 }
7062 adapter->non_eop_descs = non_eop_descs;
7063 adapter->alloc_rx_page = alloc_rx_page;
7064 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7065 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7066 adapter->hw_csum_rx_error = hw_csum_rx_error;
7067 netdev->stats.rx_bytes = bytes;
7068 netdev->stats.rx_packets = packets;
7069
7070 bytes = 0;
7071 packets = 0;
7072
7073 for (i = 0; i < adapter->num_tx_queues; i++) {
7074 struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
7075
7076 if (!tx_ring)
7077 continue;
7078 restart_queue += tx_ring->tx_stats.restart_queue;
7079 tx_busy += tx_ring->tx_stats.tx_busy;
7080 bytes += tx_ring->stats.bytes;
7081 packets += tx_ring->stats.packets;
7082 }
7083 for (i = 0; i < adapter->num_xdp_queues; i++) {
7084 struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
7085
7086 if (!xdp_ring)
7087 continue;
7088 restart_queue += xdp_ring->tx_stats.restart_queue;
7089 tx_busy += xdp_ring->tx_stats.tx_busy;
7090 bytes += xdp_ring->stats.bytes;
7091 packets += xdp_ring->stats.packets;
7092 }
7093 adapter->restart_queue = restart_queue;
7094 adapter->tx_busy = tx_busy;
7095 netdev->stats.tx_bytes = bytes;
7096 netdev->stats.tx_packets = packets;
7097
7098 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7099
7100
7101 for (i = 0; i < 8; i++) {
7102
7103 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7104 missed_rx += mpc;
7105 hwstats->mpc[i] += mpc;
7106 total_mpc += hwstats->mpc[i];
7107 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7108 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7109 switch (hw->mac.type) {
7110 case ixgbe_mac_82598EB:
7111 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7112 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7113 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7114 hwstats->pxonrxc[i] +=
7115 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7116 break;
7117 case ixgbe_mac_82599EB:
7118 case ixgbe_mac_X540:
7119 case ixgbe_mac_X550:
7120 case ixgbe_mac_X550EM_x:
7121 case ixgbe_mac_x550em_a:
7122 hwstats->pxonrxc[i] +=
7123 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7124 break;
7125 default:
7126 break;
7127 }
7128 }
7129
7130
7131 for (i = 0; i < 16; i++) {
7132 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7133 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7134 if ((hw->mac.type == ixgbe_mac_82599EB) ||
7135 (hw->mac.type == ixgbe_mac_X540) ||
7136 (hw->mac.type == ixgbe_mac_X550) ||
7137 (hw->mac.type == ixgbe_mac_X550EM_x) ||
7138 (hw->mac.type == ixgbe_mac_x550em_a)) {
7139 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7140 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
7141 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7142 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
7143 }
7144 }
7145
7146 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7147
7148 hwstats->gprc -= missed_rx;
7149
7150 ixgbe_update_xoff_received(adapter);
7151
7152
7153 switch (hw->mac.type) {
7154 case ixgbe_mac_82598EB:
7155 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7156 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7157 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7158 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7159 break;
7160 case ixgbe_mac_X540:
7161 case ixgbe_mac_X550:
7162 case ixgbe_mac_X550EM_x:
7163 case ixgbe_mac_x550em_a:
7164
7165 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7166 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7167 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7168 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7169 fallthrough;
7170 case ixgbe_mac_82599EB:
7171 for (i = 0; i < 16; i++)
7172 adapter->hw_rx_no_dma_resources +=
7173 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7174 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7175 IXGBE_READ_REG(hw, IXGBE_GORCH);
7176 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7177 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7178 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7179 IXGBE_READ_REG(hw, IXGBE_TORH);
7180 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7181 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7182 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7183#ifdef IXGBE_FCOE
7184 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7185 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7186 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7187 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7188 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7189 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7190
7191 if (adapter->fcoe.ddp_pool) {
7192 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7193 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7194 unsigned int cpu;
7195 u64 noddp = 0, noddp_ext_buff = 0;
7196 for_each_possible_cpu(cpu) {
7197 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7198 noddp += ddp_pool->noddp;
7199 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7200 }
7201 hwstats->fcoe_noddp = noddp;
7202 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7203 }
7204#endif
7205 break;
7206 default:
7207 break;
7208 }
7209 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7210 hwstats->bprc += bprc;
7211 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7212 if (hw->mac.type == ixgbe_mac_82598EB)
7213 hwstats->mprc -= bprc;
7214 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7215 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7216 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7217 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7218 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7219 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7220 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7221 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7222 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7223 hwstats->lxontxc += lxon;
7224 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7225 hwstats->lxofftxc += lxoff;
7226 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7227 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7228
7229
7230
7231 xon_off_tot = lxon + lxoff;
7232 hwstats->gptc -= xon_off_tot;
7233 hwstats->mptc -= xon_off_tot;
7234 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7235 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7236 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7237 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7238 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7239 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7240 hwstats->ptc64 -= xon_off_tot;
7241 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7242 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7243 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7244 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7245 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7246 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7247
7248
7249 netdev->stats.multicast = hwstats->mprc;
7250
7251
7252 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7253 netdev->stats.rx_dropped = 0;
7254 netdev->stats.rx_length_errors = hwstats->rlec;
7255 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7256 netdev->stats.rx_missed_errors = total_mpc;
7257}
7258
7259
7260
7261
7262
7263static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7264{
7265 struct ixgbe_hw *hw = &adapter->hw;
7266 int i;
7267
7268 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7269 return;
7270
7271 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7272
7273
7274 if (test_bit(__IXGBE_DOWN, &adapter->state))
7275 return;
7276
7277
7278 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7279 return;
7280
7281 adapter->fdir_overflow++;
7282
7283 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7284 for (i = 0; i < adapter->num_tx_queues; i++)
7285 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7286 &(adapter->tx_ring[i]->state));
7287 for (i = 0; i < adapter->num_xdp_queues; i++)
7288 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7289 &adapter->xdp_ring[i]->state);
7290
7291 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7292 } else {
7293 e_err(probe, "failed to finish FDIR re-initialization, "
7294 "ignored adding FDIR ATR filters\n");
7295 }
7296}
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7308{
7309 struct ixgbe_hw *hw = &adapter->hw;
7310 u64 eics = 0;
7311 int i;
7312
7313
7314 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7315 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7316 test_bit(__IXGBE_RESETTING, &adapter->state))
7317 return;
7318
7319
7320 if (netif_carrier_ok(adapter->netdev)) {
7321 for (i = 0; i < adapter->num_tx_queues; i++)
7322 set_check_for_tx_hang(adapter->tx_ring[i]);
7323 for (i = 0; i < adapter->num_xdp_queues; i++)
7324 set_check_for_tx_hang(adapter->xdp_ring[i]);
7325 }
7326
7327 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7328
7329
7330
7331
7332
7333 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7334 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7335 } else {
7336
7337 for (i = 0; i < adapter->num_q_vectors; i++) {
7338 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7339 if (qv->rx.ring || qv->tx.ring)
7340 eics |= BIT_ULL(i);
7341 }
7342 }
7343
7344
7345 ixgbe_irq_rearm_queues(adapter, eics);
7346}
7347
7348
7349
7350
7351
7352static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7353{
7354 struct ixgbe_hw *hw = &adapter->hw;
7355 u32 link_speed = adapter->link_speed;
7356 bool link_up = adapter->link_up;
7357 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7358
7359 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7360 return;
7361
7362 if (hw->mac.ops.check_link) {
7363 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7364 } else {
7365
7366 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7367 link_up = true;
7368 }
7369
7370 if (adapter->ixgbe_ieee_pfc)
7371 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7372
7373 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7374 hw->mac.ops.fc_enable(hw);
7375 ixgbe_set_rx_drop_en(adapter);
7376 }
7377
7378 if (link_up ||
7379 time_after(jiffies, (adapter->link_check_timeout +
7380 IXGBE_TRY_LINK_TIMEOUT))) {
7381 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7382 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7383 IXGBE_WRITE_FLUSH(hw);
7384 }
7385
7386 adapter->link_up = link_up;
7387 adapter->link_speed = link_speed;
7388}
7389
7390static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7391{
7392#ifdef CONFIG_IXGBE_DCB
7393 struct net_device *netdev = adapter->netdev;
7394 struct dcb_app app = {
7395 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7396 .protocol = 0,
7397 };
7398 u8 up = 0;
7399
7400 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7401 up = dcb_ieee_getapp_mask(netdev, &app);
7402
7403 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7404#endif
7405}
7406
7407
7408
7409
7410
7411
7412static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7413{
7414 struct net_device *netdev = adapter->netdev;
7415 struct ixgbe_hw *hw = &adapter->hw;
7416 u32 link_speed = adapter->link_speed;
7417 const char *speed_str;
7418 bool flow_rx, flow_tx;
7419
7420
7421 if (netif_carrier_ok(netdev))
7422 return;
7423
7424 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7425
7426 switch (hw->mac.type) {
7427 case ixgbe_mac_82598EB: {
7428 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7429 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7430 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7431 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7432 }
7433 break;
7434 case ixgbe_mac_X540:
7435 case ixgbe_mac_X550:
7436 case ixgbe_mac_X550EM_x:
7437 case ixgbe_mac_x550em_a:
7438 case ixgbe_mac_82599EB: {
7439 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7440 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7441 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7442 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7443 }
7444 break;
7445 default:
7446 flow_tx = false;
7447 flow_rx = false;
7448 break;
7449 }
7450
7451 adapter->last_rx_ptp_check = jiffies;
7452
7453 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7454 ixgbe_ptp_start_cyclecounter(adapter);
7455
7456 switch (link_speed) {
7457 case IXGBE_LINK_SPEED_10GB_FULL:
7458 speed_str = "10 Gbps";
7459 break;
7460 case IXGBE_LINK_SPEED_5GB_FULL:
7461 speed_str = "5 Gbps";
7462 break;
7463 case IXGBE_LINK_SPEED_2_5GB_FULL:
7464 speed_str = "2.5 Gbps";
7465 break;
7466 case IXGBE_LINK_SPEED_1GB_FULL:
7467 speed_str = "1 Gbps";
7468 break;
7469 case IXGBE_LINK_SPEED_100_FULL:
7470 speed_str = "100 Mbps";
7471 break;
7472 case IXGBE_LINK_SPEED_10_FULL:
7473 speed_str = "10 Mbps";
7474 break;
7475 default:
7476 speed_str = "unknown speed";
7477 break;
7478 }
7479 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7480 ((flow_rx && flow_tx) ? "RX/TX" :
7481 (flow_rx ? "RX" :
7482 (flow_tx ? "TX" : "None"))));
7483
7484 netif_carrier_on(netdev);
7485 ixgbe_check_vf_rate_limit(adapter);
7486
7487
7488 netif_tx_wake_all_queues(adapter->netdev);
7489
7490
7491 ixgbe_update_default_up(adapter);
7492
7493
7494 ixgbe_ping_all_vfs(adapter);
7495}
7496
7497
7498
7499
7500
7501
7502static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7503{
7504 struct net_device *netdev = adapter->netdev;
7505 struct ixgbe_hw *hw = &adapter->hw;
7506
7507 adapter->link_up = false;
7508 adapter->link_speed = 0;
7509
7510
7511 if (!netif_carrier_ok(netdev))
7512 return;
7513
7514
7515 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7516 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7517
7518 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7519 ixgbe_ptp_start_cyclecounter(adapter);
7520
7521 e_info(drv, "NIC Link is Down\n");
7522 netif_carrier_off(netdev);
7523
7524
7525 ixgbe_ping_all_vfs(adapter);
7526}
7527
7528static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7529{
7530 int i;
7531
7532 for (i = 0; i < adapter->num_tx_queues; i++) {
7533 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7534
7535 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7536 return true;
7537 }
7538
7539 for (i = 0; i < adapter->num_xdp_queues; i++) {
7540 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7541
7542 if (ring->next_to_use != ring->next_to_clean)
7543 return true;
7544 }
7545
7546 return false;
7547}
7548
7549static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7550{
7551 struct ixgbe_hw *hw = &adapter->hw;
7552 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7553 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7554
7555 int i, j;
7556
7557 if (!adapter->num_vfs)
7558 return false;
7559
7560
7561 if (hw->mac.type >= ixgbe_mac_X550)
7562 return false;
7563
7564 for (i = 0; i < adapter->num_vfs; i++) {
7565 for (j = 0; j < q_per_pool; j++) {
7566 u32 h, t;
7567
7568 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7569 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7570
7571 if (h != t)
7572 return true;
7573 }
7574 }
7575
7576 return false;
7577}
7578
7579
7580
7581
7582
7583static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7584{
7585 if (!netif_carrier_ok(adapter->netdev)) {
7586 if (ixgbe_ring_tx_pending(adapter) ||
7587 ixgbe_vf_tx_pending(adapter)) {
7588
7589
7590
7591
7592
7593 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7594 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7595 }
7596 }
7597}
7598
7599#ifdef CONFIG_PCI_IOV
7600static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7601{
7602 struct ixgbe_hw *hw = &adapter->hw;
7603 struct pci_dev *pdev = adapter->pdev;
7604 unsigned int vf;
7605 u32 gpc;
7606
7607 if (!(netif_carrier_ok(adapter->netdev)))
7608 return;
7609
7610 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7611 if (gpc)
7612 return;
7613
7614
7615
7616
7617
7618
7619 if (!pdev)
7620 return;
7621
7622
7623 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7624 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7625 u16 status_reg;
7626
7627 if (!vfdev)
7628 continue;
7629 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7630 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7631 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7632 pcie_flr(vfdev);
7633 }
7634}
7635
7636static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7637{
7638 u32 ssvpc;
7639
7640
7641 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7642 adapter->num_vfs == 0)
7643 return;
7644
7645 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7646
7647
7648
7649
7650
7651 if (!ssvpc)
7652 return;
7653
7654 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7655}
7656#else
7657static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7658{
7659}
7660
7661static void
7662ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7663{
7664}
7665#endif
7666
7667
7668
7669
7670
7671
7672static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7673{
7674
7675 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7676 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7677 test_bit(__IXGBE_RESETTING, &adapter->state))
7678 return;
7679
7680 ixgbe_watchdog_update_link(adapter);
7681
7682 if (adapter->link_up)
7683 ixgbe_watchdog_link_is_up(adapter);
7684 else
7685 ixgbe_watchdog_link_is_down(adapter);
7686
7687 ixgbe_check_for_bad_vf(adapter);
7688 ixgbe_spoof_check(adapter);
7689 ixgbe_update_stats(adapter);
7690
7691 ixgbe_watchdog_flush_tx(adapter);
7692}
7693
7694
7695
7696
7697
7698static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7699{
7700 struct ixgbe_hw *hw = &adapter->hw;
7701 s32 err;
7702
7703
7704 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7705 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7706 return;
7707
7708 if (adapter->sfp_poll_time &&
7709 time_after(adapter->sfp_poll_time, jiffies))
7710 return;
7711
7712
7713 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7714 return;
7715
7716 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7717
7718 err = hw->phy.ops.identify_sfp(hw);
7719 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7720 goto sfp_out;
7721
7722 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7723
7724
7725 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7726 }
7727
7728
7729 if (err)
7730 goto sfp_out;
7731
7732
7733 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7734 goto sfp_out;
7735
7736 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7737
7738
7739
7740
7741
7742
7743 if (hw->mac.type == ixgbe_mac_82598EB)
7744 err = hw->phy.ops.reset(hw);
7745 else
7746 err = hw->mac.ops.setup_sfp(hw);
7747
7748 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7749 goto sfp_out;
7750
7751 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7752 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7753
7754sfp_out:
7755 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7756
7757 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7758 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7759 e_dev_err("failed to initialize because an unsupported "
7760 "SFP+ module type was detected.\n");
7761 e_dev_err("Reload the driver after installing a "
7762 "supported module.\n");
7763 unregister_netdev(adapter->netdev);
7764 }
7765}
7766
7767
7768
7769
7770
7771static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7772{
7773 struct ixgbe_hw *hw = &adapter->hw;
7774 u32 cap_speed;
7775 u32 speed;
7776 bool autoneg = false;
7777
7778 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7779 return;
7780
7781
7782 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7783 return;
7784
7785 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7786
7787 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7788
7789
7790 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7791 speed = IXGBE_LINK_SPEED_10GB_FULL;
7792 else
7793 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7794 IXGBE_LINK_SPEED_1GB_FULL);
7795
7796 if (hw->mac.ops.setup_link)
7797 hw->mac.ops.setup_link(hw, speed, true);
7798
7799 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7800 adapter->link_check_timeout = jiffies;
7801 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7802}
7803
7804
7805
7806
7807
7808static void ixgbe_service_timer(struct timer_list *t)
7809{
7810 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7811 unsigned long next_event_offset;
7812
7813
7814 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7815 next_event_offset = HZ / 10;
7816 else
7817 next_event_offset = HZ * 2;
7818
7819
7820 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7821
7822 ixgbe_service_event_schedule(adapter);
7823}
7824
7825static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7826{
7827 struct ixgbe_hw *hw = &adapter->hw;
7828 u32 status;
7829
7830 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7831 return;
7832
7833 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7834
7835 if (!hw->phy.ops.handle_lasi)
7836 return;
7837
7838 status = hw->phy.ops.handle_lasi(&adapter->hw);
7839 if (status != IXGBE_ERR_OVERTEMP)
7840 return;
7841
7842 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7843}
7844
7845static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7846{
7847 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7848 return;
7849
7850 rtnl_lock();
7851
7852 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7853 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7854 test_bit(__IXGBE_RESETTING, &adapter->state)) {
7855 rtnl_unlock();
7856 return;
7857 }
7858
7859 ixgbe_dump(adapter);
7860 netdev_err(adapter->netdev, "Reset adapter\n");
7861 adapter->tx_timeout_count++;
7862
7863 ixgbe_reinit_locked(adapter);
7864 rtnl_unlock();
7865}
7866
7867
7868
7869
7870
7871
7872
7873static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7874{
7875 struct ixgbe_hw *hw = &adapter->hw;
7876 u32 fwsm;
7877
7878
7879 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7880
7881 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7882 !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7883 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7884 fwsm);
7885
7886 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7887 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7888 return true;
7889 }
7890
7891 return false;
7892}
7893
7894
7895
7896
7897
7898static void ixgbe_service_task(struct work_struct *work)
7899{
7900 struct ixgbe_adapter *adapter = container_of(work,
7901 struct ixgbe_adapter,
7902 service_task);
7903 if (ixgbe_removed(adapter->hw.hw_addr)) {
7904 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7905 rtnl_lock();
7906 ixgbe_down(adapter);
7907 rtnl_unlock();
7908 }
7909 ixgbe_service_event_complete(adapter);
7910 return;
7911 }
7912 if (ixgbe_check_fw_error(adapter)) {
7913 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7914 unregister_netdev(adapter->netdev);
7915 ixgbe_service_event_complete(adapter);
7916 return;
7917 }
7918 ixgbe_reset_subtask(adapter);
7919 ixgbe_phy_interrupt_subtask(adapter);
7920 ixgbe_sfp_detection_subtask(adapter);
7921 ixgbe_sfp_link_config_subtask(adapter);
7922 ixgbe_check_overtemp_subtask(adapter);
7923 ixgbe_watchdog_subtask(adapter);
7924 ixgbe_fdir_reinit_subtask(adapter);
7925 ixgbe_check_hang_subtask(adapter);
7926
7927 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7928 ixgbe_ptp_overflow_check(adapter);
7929 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7930 ixgbe_ptp_rx_hang(adapter);
7931 ixgbe_ptp_tx_hang(adapter);
7932 }
7933
7934 ixgbe_service_event_complete(adapter);
7935}
7936
7937static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7938 struct ixgbe_tx_buffer *first,
7939 u8 *hdr_len,
7940 struct ixgbe_ipsec_tx_data *itd)
7941{
7942 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7943 struct sk_buff *skb = first->skb;
7944 union {
7945 struct iphdr *v4;
7946 struct ipv6hdr *v6;
7947 unsigned char *hdr;
7948 } ip;
7949 union {
7950 struct tcphdr *tcp;
7951 struct udphdr *udp;
7952 unsigned char *hdr;
7953 } l4;
7954 u32 paylen, l4_offset;
7955 u32 fceof_saidx = 0;
7956 int err;
7957
7958 if (skb->ip_summed != CHECKSUM_PARTIAL)
7959 return 0;
7960
7961 if (!skb_is_gso(skb))
7962 return 0;
7963
7964 err = skb_cow_head(skb, 0);
7965 if (err < 0)
7966 return err;
7967
7968 if (eth_p_mpls(first->protocol))
7969 ip.hdr = skb_inner_network_header(skb);
7970 else
7971 ip.hdr = skb_network_header(skb);
7972 l4.hdr = skb_checksum_start(skb);
7973
7974
7975 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
7976 IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
7977
7978
7979 if (ip.v4->version == 4) {
7980 unsigned char *csum_start = skb_checksum_start(skb);
7981 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7982 int len = csum_start - trans_start;
7983
7984
7985
7986
7987
7988 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
7989 csum_fold(csum_partial(trans_start,
7990 len, 0)) : 0;
7991 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7992
7993 ip.v4->tot_len = 0;
7994 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7995 IXGBE_TX_FLAGS_CSUM |
7996 IXGBE_TX_FLAGS_IPV4;
7997 } else {
7998 ip.v6->payload_len = 0;
7999 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8000 IXGBE_TX_FLAGS_CSUM;
8001 }
8002
8003
8004 l4_offset = l4.hdr - skb->data;
8005
8006
8007 paylen = skb->len - l4_offset;
8008
8009 if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
8010
8011 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8012 csum_replace_by_diff(&l4.tcp->check,
8013 (__force __wsum)htonl(paylen));
8014 } else {
8015
8016 *hdr_len = sizeof(*l4.udp) + l4_offset;
8017 csum_replace_by_diff(&l4.udp->check,
8018 (__force __wsum)htonl(paylen));
8019 }
8020
8021
8022 first->gso_segs = skb_shinfo(skb)->gso_segs;
8023 first->bytecount += (first->gso_segs - 1) * *hdr_len;
8024
8025
8026 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8027 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8028
8029 fceof_saidx |= itd->sa_idx;
8030 type_tucmd |= itd->flags | itd->trailer_len;
8031
8032
8033 vlan_macip_lens = l4.hdr - ip.hdr;
8034 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8035 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8036
8037 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8038 mss_l4len_idx);
8039
8040 return 1;
8041}
8042
8043static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
8044{
8045 unsigned int offset = 0;
8046
8047 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
8048
8049 return offset == skb_checksum_start_offset(skb);
8050}
8051
8052static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8053 struct ixgbe_tx_buffer *first,
8054 struct ixgbe_ipsec_tx_data *itd)
8055{
8056 struct sk_buff *skb = first->skb;
8057 u32 vlan_macip_lens = 0;
8058 u32 fceof_saidx = 0;
8059 u32 type_tucmd = 0;
8060
8061 if (skb->ip_summed != CHECKSUM_PARTIAL) {
8062csum_failed:
8063 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8064 IXGBE_TX_FLAGS_CC)))
8065 return;
8066 goto no_csum;
8067 }
8068
8069 switch (skb->csum_offset) {
8070 case offsetof(struct tcphdr, check):
8071 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8072 fallthrough;
8073 case offsetof(struct udphdr, check):
8074 break;
8075 case offsetof(struct sctphdr, checksum):
8076
8077 if (((first->protocol == htons(ETH_P_IP)) &&
8078 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
8079 ((first->protocol == htons(ETH_P_IPV6)) &&
8080 ixgbe_ipv6_csum_is_sctp(skb))) {
8081 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8082 break;
8083 }
8084 fallthrough;
8085 default:
8086 skb_checksum_help(skb);
8087 goto csum_failed;
8088 }
8089
8090
8091 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8092 vlan_macip_lens = skb_checksum_start_offset(skb) -
8093 skb_network_offset(skb);
8094no_csum:
8095
8096 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8097 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8098
8099 fceof_saidx |= itd->sa_idx;
8100 type_tucmd |= itd->flags | itd->trailer_len;
8101
8102 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8103}
8104
8105#define IXGBE_SET_FLAG(_input, _flag, _result) \
8106 ((_flag <= _result) ? \
8107 ((u32)(_input & _flag) * (_result / _flag)) : \
8108 ((u32)(_input & _flag) / (_flag / _result)))
8109
8110static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8111{
8112
8113 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8114 IXGBE_ADVTXD_DCMD_DEXT |
8115 IXGBE_ADVTXD_DCMD_IFCS;
8116
8117
8118 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8119 IXGBE_ADVTXD_DCMD_VLE);
8120
8121
8122 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8123 IXGBE_ADVTXD_DCMD_TSE);
8124
8125
8126 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8127 IXGBE_ADVTXD_MAC_TSTAMP);
8128
8129
8130 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8131
8132 return cmd_type;
8133}
8134
8135static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8136 u32 tx_flags, unsigned int paylen)
8137{
8138 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8139
8140
8141 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8142 IXGBE_TX_FLAGS_CSUM,
8143 IXGBE_ADVTXD_POPTS_TXSM);
8144
8145
8146 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8147 IXGBE_TX_FLAGS_IPV4,
8148 IXGBE_ADVTXD_POPTS_IXSM);
8149
8150
8151 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8152 IXGBE_TX_FLAGS_IPSEC,
8153 IXGBE_ADVTXD_POPTS_IPSEC);
8154
8155
8156
8157
8158
8159 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8160 IXGBE_TX_FLAGS_CC,
8161 IXGBE_ADVTXD_CC);
8162
8163 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8164}
8165
8166static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8167{
8168 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8169
8170
8171
8172
8173
8174 smp_mb();
8175
8176
8177
8178
8179 if (likely(ixgbe_desc_unused(tx_ring) < size))
8180 return -EBUSY;
8181
8182
8183 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8184 ++tx_ring->tx_stats.restart_queue;
8185 return 0;
8186}
8187
8188static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8189{
8190 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8191 return 0;
8192
8193 return __ixgbe_maybe_stop_tx(tx_ring, size);
8194}
8195
8196static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8197 struct ixgbe_tx_buffer *first,
8198 const u8 hdr_len)
8199{
8200 struct sk_buff *skb = first->skb;
8201 struct ixgbe_tx_buffer *tx_buffer;
8202 union ixgbe_adv_tx_desc *tx_desc;
8203 skb_frag_t *frag;
8204 dma_addr_t dma;
8205 unsigned int data_len, size;
8206 u32 tx_flags = first->tx_flags;
8207 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8208 u16 i = tx_ring->next_to_use;
8209
8210 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8211
8212 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8213
8214 size = skb_headlen(skb);
8215 data_len = skb->data_len;
8216
8217#ifdef IXGBE_FCOE
8218 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8219 if (data_len < sizeof(struct fcoe_crc_eof)) {
8220 size -= sizeof(struct fcoe_crc_eof) - data_len;
8221 data_len = 0;
8222 } else {
8223 data_len -= sizeof(struct fcoe_crc_eof);
8224 }
8225 }
8226
8227#endif
8228 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8229
8230 tx_buffer = first;
8231
8232 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8233 if (dma_mapping_error(tx_ring->dev, dma))
8234 goto dma_error;
8235
8236
8237 dma_unmap_len_set(tx_buffer, len, size);
8238 dma_unmap_addr_set(tx_buffer, dma, dma);
8239
8240 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8241
8242 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8243 tx_desc->read.cmd_type_len =
8244 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8245
8246 i++;
8247 tx_desc++;
8248 if (i == tx_ring->count) {
8249 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8250 i = 0;
8251 }
8252 tx_desc->read.olinfo_status = 0;
8253
8254 dma += IXGBE_MAX_DATA_PER_TXD;
8255 size -= IXGBE_MAX_DATA_PER_TXD;
8256
8257 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8258 }
8259
8260 if (likely(!data_len))
8261 break;
8262
8263 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8264
8265 i++;
8266 tx_desc++;
8267 if (i == tx_ring->count) {
8268 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8269 i = 0;
8270 }
8271 tx_desc->read.olinfo_status = 0;
8272
8273#ifdef IXGBE_FCOE
8274 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8275#else
8276 size = skb_frag_size(frag);
8277#endif
8278 data_len -= size;
8279
8280 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8281 DMA_TO_DEVICE);
8282
8283 tx_buffer = &tx_ring->tx_buffer_info[i];
8284 }
8285
8286
8287 cmd_type |= size | IXGBE_TXD_CMD;
8288 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8289
8290 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8291
8292
8293 first->time_stamp = jiffies;
8294
8295 skb_tx_timestamp(skb);
8296
8297
8298
8299
8300
8301
8302
8303
8304
8305 wmb();
8306
8307
8308 first->next_to_watch = tx_desc;
8309
8310 i++;
8311 if (i == tx_ring->count)
8312 i = 0;
8313
8314 tx_ring->next_to_use = i;
8315
8316 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8317
8318 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8319 writel(i, tx_ring->tail);
8320 }
8321
8322 return 0;
8323dma_error:
8324 dev_err(tx_ring->dev, "TX DMA map failed\n");
8325
8326
8327 for (;;) {
8328 tx_buffer = &tx_ring->tx_buffer_info[i];
8329 if (dma_unmap_len(tx_buffer, len))
8330 dma_unmap_page(tx_ring->dev,
8331 dma_unmap_addr(tx_buffer, dma),
8332 dma_unmap_len(tx_buffer, len),
8333 DMA_TO_DEVICE);
8334 dma_unmap_len_set(tx_buffer, len, 0);
8335 if (tx_buffer == first)
8336 break;
8337 if (i == 0)
8338 i += tx_ring->count;
8339 i--;
8340 }
8341
8342 dev_kfree_skb_any(first->skb);
8343 first->skb = NULL;
8344
8345 tx_ring->next_to_use = i;
8346
8347 return -1;
8348}
8349
8350static void ixgbe_atr(struct ixgbe_ring *ring,
8351 struct ixgbe_tx_buffer *first)
8352{
8353 struct ixgbe_q_vector *q_vector = ring->q_vector;
8354 union ixgbe_atr_hash_dword input = { .dword = 0 };
8355 union ixgbe_atr_hash_dword common = { .dword = 0 };
8356 union {
8357 unsigned char *network;
8358 struct iphdr *ipv4;
8359 struct ipv6hdr *ipv6;
8360 } hdr;
8361 struct tcphdr *th;
8362 unsigned int hlen;
8363 struct sk_buff *skb;
8364 __be16 vlan_id;
8365 int l4_proto;
8366
8367
8368 if (!q_vector)
8369 return;
8370
8371
8372 if (!ring->atr_sample_rate)
8373 return;
8374
8375 ring->atr_count++;
8376
8377
8378 if ((first->protocol != htons(ETH_P_IP)) &&
8379 (first->protocol != htons(ETH_P_IPV6)))
8380 return;
8381
8382
8383 skb = first->skb;
8384 hdr.network = skb_network_header(skb);
8385 if (unlikely(hdr.network <= skb->data))
8386 return;
8387 if (skb->encapsulation &&
8388 first->protocol == htons(ETH_P_IP) &&
8389 hdr.ipv4->protocol == IPPROTO_UDP) {
8390 struct ixgbe_adapter *adapter = q_vector->adapter;
8391
8392 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8393 VXLAN_HEADROOM))
8394 return;
8395
8396
8397 if (adapter->vxlan_port &&
8398 udp_hdr(skb)->dest == adapter->vxlan_port)
8399 hdr.network = skb_inner_network_header(skb);
8400
8401 if (adapter->geneve_port &&
8402 udp_hdr(skb)->dest == adapter->geneve_port)
8403 hdr.network = skb_inner_network_header(skb);
8404 }
8405
8406
8407
8408
8409 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8410 return;
8411
8412
8413 switch (hdr.ipv4->version) {
8414 case IPVERSION:
8415
8416 hlen = (hdr.network[0] & 0x0F) << 2;
8417 l4_proto = hdr.ipv4->protocol;
8418 break;
8419 case 6:
8420 hlen = hdr.network - skb->data;
8421 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8422 hlen -= hdr.network - skb->data;
8423 break;
8424 default:
8425 return;
8426 }
8427
8428 if (l4_proto != IPPROTO_TCP)
8429 return;
8430
8431 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8432 hlen + sizeof(struct tcphdr)))
8433 return;
8434
8435 th = (struct tcphdr *)(hdr.network + hlen);
8436
8437
8438 if (th->fin)
8439 return;
8440
8441
8442 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8443 return;
8444
8445
8446 ring->atr_count = 0;
8447
8448 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8449
8450
8451
8452
8453
8454
8455
8456
8457 input.formatted.vlan_id = vlan_id;
8458
8459
8460
8461
8462
8463 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8464 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8465 else
8466 common.port.src ^= th->dest ^ first->protocol;
8467 common.port.dst ^= th->source;
8468
8469 switch (hdr.ipv4->version) {
8470 case IPVERSION:
8471 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8472 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8473 break;
8474 case 6:
8475 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8476 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8477 hdr.ipv6->saddr.s6_addr32[1] ^
8478 hdr.ipv6->saddr.s6_addr32[2] ^
8479 hdr.ipv6->saddr.s6_addr32[3] ^
8480 hdr.ipv6->daddr.s6_addr32[0] ^
8481 hdr.ipv6->daddr.s6_addr32[1] ^
8482 hdr.ipv6->daddr.s6_addr32[2] ^
8483 hdr.ipv6->daddr.s6_addr32[3];
8484 break;
8485 default:
8486 break;
8487 }
8488
8489 if (hdr.network != skb_network_header(skb))
8490 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8491
8492
8493 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8494 input, common, ring->queue_index);
8495}
8496
8497#ifdef IXGBE_FCOE
8498static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8499 struct net_device *sb_dev)
8500{
8501 struct ixgbe_adapter *adapter;
8502 struct ixgbe_ring_feature *f;
8503 int txq;
8504
8505 if (sb_dev) {
8506 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8507 struct net_device *vdev = sb_dev;
8508
8509 txq = vdev->tc_to_txq[tc].offset;
8510 txq += reciprocal_scale(skb_get_hash(skb),
8511 vdev->tc_to_txq[tc].count);
8512
8513 return txq;
8514 }
8515
8516
8517
8518
8519
8520 switch (vlan_get_protocol(skb)) {
8521 case htons(ETH_P_FCOE):
8522 case htons(ETH_P_FIP):
8523 adapter = netdev_priv(dev);
8524
8525 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8526 break;
8527 fallthrough;
8528 default:
8529 return netdev_pick_tx(dev, skb, sb_dev);
8530 }
8531
8532 f = &adapter->ring_feature[RING_F_FCOE];
8533
8534 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8535 smp_processor_id();
8536
8537 while (txq >= f->indices)
8538 txq -= f->indices;
8539
8540 return txq + f->offset;
8541}
8542
8543#endif
8544int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8545 struct xdp_frame *xdpf)
8546{
8547 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8548 struct ixgbe_tx_buffer *tx_buffer;
8549 union ixgbe_adv_tx_desc *tx_desc;
8550 u32 len, cmd_type;
8551 dma_addr_t dma;
8552 u16 i;
8553
8554 len = xdpf->len;
8555
8556 if (unlikely(!ixgbe_desc_unused(ring)))
8557 return IXGBE_XDP_CONSUMED;
8558
8559 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
8560 if (dma_mapping_error(ring->dev, dma))
8561 return IXGBE_XDP_CONSUMED;
8562
8563
8564 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8565 tx_buffer->bytecount = len;
8566 tx_buffer->gso_segs = 1;
8567 tx_buffer->protocol = 0;
8568
8569 i = ring->next_to_use;
8570 tx_desc = IXGBE_TX_DESC(ring, i);
8571
8572 dma_unmap_len_set(tx_buffer, len, len);
8573 dma_unmap_addr_set(tx_buffer, dma, dma);
8574 tx_buffer->xdpf = xdpf;
8575
8576 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8577
8578
8579 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8580 IXGBE_ADVTXD_DCMD_DEXT |
8581 IXGBE_ADVTXD_DCMD_IFCS;
8582 cmd_type |= len | IXGBE_TXD_CMD;
8583 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8584 tx_desc->read.olinfo_status =
8585 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8586
8587
8588 smp_wmb();
8589
8590
8591 i++;
8592 if (i == ring->count)
8593 i = 0;
8594
8595 tx_buffer->next_to_watch = tx_desc;
8596 ring->next_to_use = i;
8597
8598 return IXGBE_XDP_TX;
8599}
8600
8601netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8602 struct ixgbe_adapter *adapter,
8603 struct ixgbe_ring *tx_ring)
8604{
8605 struct ixgbe_tx_buffer *first;
8606 int tso;
8607 u32 tx_flags = 0;
8608 unsigned short f;
8609 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8610 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8611 __be16 protocol = skb->protocol;
8612 u8 hdr_len = 0;
8613
8614
8615
8616
8617
8618
8619
8620
8621 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8622 count += TXD_USE_COUNT(skb_frag_size(
8623 &skb_shinfo(skb)->frags[f]));
8624
8625 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8626 tx_ring->tx_stats.tx_busy++;
8627 return NETDEV_TX_BUSY;
8628 }
8629
8630
8631 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8632 first->skb = skb;
8633 first->bytecount = skb->len;
8634 first->gso_segs = 1;
8635
8636
8637 if (skb_vlan_tag_present(skb)) {
8638 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8639 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8640
8641 } else if (protocol == htons(ETH_P_8021Q)) {
8642 struct vlan_hdr *vhdr, _vhdr;
8643 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8644 if (!vhdr)
8645 goto out_drop;
8646
8647 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8648 IXGBE_TX_FLAGS_VLAN_SHIFT;
8649 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8650 }
8651 protocol = vlan_get_protocol(skb);
8652
8653 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8654 adapter->ptp_clock) {
8655 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
8656 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8657 &adapter->state)) {
8658 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8659 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8660
8661
8662 adapter->ptp_tx_skb = skb_get(skb);
8663 adapter->ptp_tx_start = jiffies;
8664 schedule_work(&adapter->ptp_tx_work);
8665 } else {
8666 adapter->tx_hwtstamp_skipped++;
8667 }
8668 }
8669
8670#ifdef CONFIG_PCI_IOV
8671
8672
8673
8674
8675 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8676 tx_flags |= IXGBE_TX_FLAGS_CC;
8677
8678#endif
8679
8680 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8681 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8682 (skb->priority != TC_PRIO_CONTROL))) {
8683 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8684 tx_flags |= (skb->priority & 0x7) <<
8685 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8686 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8687 struct vlan_ethhdr *vhdr;
8688
8689 if (skb_cow_head(skb, 0))
8690 goto out_drop;
8691 vhdr = (struct vlan_ethhdr *)skb->data;
8692 vhdr->h_vlan_TCI = htons(tx_flags >>
8693 IXGBE_TX_FLAGS_VLAN_SHIFT);
8694 } else {
8695 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8696 }
8697 }
8698
8699
8700 first->tx_flags = tx_flags;
8701 first->protocol = protocol;
8702
8703#ifdef IXGBE_FCOE
8704
8705 if ((protocol == htons(ETH_P_FCOE)) &&
8706 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8707 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8708 if (tso < 0)
8709 goto out_drop;
8710
8711 goto xmit_fcoe;
8712 }
8713
8714#endif
8715
8716#ifdef CONFIG_IXGBE_IPSEC
8717 if (xfrm_offload(skb) &&
8718 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8719 goto out_drop;
8720#endif
8721 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8722 if (tso < 0)
8723 goto out_drop;
8724 else if (!tso)
8725 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8726
8727
8728 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8729 ixgbe_atr(tx_ring, first);
8730
8731#ifdef IXGBE_FCOE
8732xmit_fcoe:
8733#endif
8734 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8735 goto cleanup_tx_timestamp;
8736
8737 return NETDEV_TX_OK;
8738
8739out_drop:
8740 dev_kfree_skb_any(first->skb);
8741 first->skb = NULL;
8742cleanup_tx_timestamp:
8743 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8744 dev_kfree_skb_any(adapter->ptp_tx_skb);
8745 adapter->ptp_tx_skb = NULL;
8746 cancel_work_sync(&adapter->ptp_tx_work);
8747 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8748 }
8749
8750 return NETDEV_TX_OK;
8751}
8752
8753static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8754 struct net_device *netdev,
8755 struct ixgbe_ring *ring)
8756{
8757 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8758 struct ixgbe_ring *tx_ring;
8759
8760
8761
8762
8763
8764 if (skb_put_padto(skb, 17))
8765 return NETDEV_TX_OK;
8766
8767 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8768 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8769 return NETDEV_TX_BUSY;
8770
8771 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8772}
8773
8774static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8775 struct net_device *netdev)
8776{
8777 return __ixgbe_xmit_frame(skb, netdev, NULL);
8778}
8779
8780
8781
8782
8783
8784
8785
8786
8787static int ixgbe_set_mac(struct net_device *netdev, void *p)
8788{
8789 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8790 struct ixgbe_hw *hw = &adapter->hw;
8791 struct sockaddr *addr = p;
8792
8793 if (!is_valid_ether_addr(addr->sa_data))
8794 return -EADDRNOTAVAIL;
8795
8796 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8797 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8798
8799 ixgbe_mac_set_default_filter(adapter);
8800
8801 return 0;
8802}
8803
8804static int
8805ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8806{
8807 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8808 struct ixgbe_hw *hw = &adapter->hw;
8809 u16 value;
8810 int rc;
8811
8812 if (adapter->mii_bus) {
8813 int regnum = addr;
8814
8815 if (devad != MDIO_DEVAD_NONE)
8816 regnum |= (devad << 16) | MII_ADDR_C45;
8817
8818 return mdiobus_read(adapter->mii_bus, prtad, regnum);
8819 }
8820
8821 if (prtad != hw->phy.mdio.prtad)
8822 return -EINVAL;
8823 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8824 if (!rc)
8825 rc = value;
8826 return rc;
8827}
8828
8829static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8830 u16 addr, u16 value)
8831{
8832 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8833 struct ixgbe_hw *hw = &adapter->hw;
8834
8835 if (adapter->mii_bus) {
8836 int regnum = addr;
8837
8838 if (devad != MDIO_DEVAD_NONE)
8839 regnum |= (devad << 16) | MII_ADDR_C45;
8840
8841 return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8842 }
8843
8844 if (prtad != hw->phy.mdio.prtad)
8845 return -EINVAL;
8846 return hw->phy.ops.write_reg(hw, addr, devad, value);
8847}
8848
8849static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8850{
8851 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8852
8853 switch (cmd) {
8854 case SIOCSHWTSTAMP:
8855 return ixgbe_ptp_set_ts_config(adapter, req);
8856 case SIOCGHWTSTAMP:
8857 return ixgbe_ptp_get_ts_config(adapter, req);
8858 case SIOCGMIIPHY:
8859 if (!adapter->hw.phy.ops.read_reg)
8860 return -EOPNOTSUPP;
8861 fallthrough;
8862 default:
8863 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8864 }
8865}
8866
8867
8868
8869
8870
8871
8872
8873
8874static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8875{
8876 int err = 0;
8877 struct ixgbe_adapter *adapter = netdev_priv(dev);
8878 struct ixgbe_hw *hw = &adapter->hw;
8879
8880 if (is_valid_ether_addr(hw->mac.san_addr)) {
8881 rtnl_lock();
8882 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8883 rtnl_unlock();
8884
8885
8886 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8887 }
8888 return err;
8889}
8890
8891
8892
8893
8894
8895
8896
8897
8898static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8899{
8900 int err = 0;
8901 struct ixgbe_adapter *adapter = netdev_priv(dev);
8902 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8903
8904 if (is_valid_ether_addr(mac->san_addr)) {
8905 rtnl_lock();
8906 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8907 rtnl_unlock();
8908 }
8909 return err;
8910}
8911
8912static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8913 struct ixgbe_ring *ring)
8914{
8915 u64 bytes, packets;
8916 unsigned int start;
8917
8918 if (ring) {
8919 do {
8920 start = u64_stats_fetch_begin_irq(&ring->syncp);
8921 packets = ring->stats.packets;
8922 bytes = ring->stats.bytes;
8923 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8924 stats->tx_packets += packets;
8925 stats->tx_bytes += bytes;
8926 }
8927}
8928
8929static void ixgbe_get_stats64(struct net_device *netdev,
8930 struct rtnl_link_stats64 *stats)
8931{
8932 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8933 int i;
8934
8935 rcu_read_lock();
8936 for (i = 0; i < adapter->num_rx_queues; i++) {
8937 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8938 u64 bytes, packets;
8939 unsigned int start;
8940
8941 if (ring) {
8942 do {
8943 start = u64_stats_fetch_begin_irq(&ring->syncp);
8944 packets = ring->stats.packets;
8945 bytes = ring->stats.bytes;
8946 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8947 stats->rx_packets += packets;
8948 stats->rx_bytes += bytes;
8949 }
8950 }
8951
8952 for (i = 0; i < adapter->num_tx_queues; i++) {
8953 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8954
8955 ixgbe_get_ring_stats64(stats, ring);
8956 }
8957 for (i = 0; i < adapter->num_xdp_queues; i++) {
8958 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8959
8960 ixgbe_get_ring_stats64(stats, ring);
8961 }
8962 rcu_read_unlock();
8963
8964
8965 stats->multicast = netdev->stats.multicast;
8966 stats->rx_errors = netdev->stats.rx_errors;
8967 stats->rx_length_errors = netdev->stats.rx_length_errors;
8968 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8969 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8970}
8971
8972#ifdef CONFIG_IXGBE_DCB
8973
8974
8975
8976
8977
8978
8979
8980
8981static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8982{
8983 struct ixgbe_hw *hw = &adapter->hw;
8984 u32 reg, rsave;
8985 int i;
8986
8987
8988
8989
8990 if (hw->mac.type == ixgbe_mac_82598EB)
8991 return;
8992
8993 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8994 rsave = reg;
8995
8996 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8997 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8998
8999
9000 if (up2tc > tc)
9001 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
9002 }
9003
9004 if (reg != rsave)
9005 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
9006
9007 return;
9008}
9009
9010
9011
9012
9013
9014
9015
9016static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9017{
9018 struct net_device *dev = adapter->netdev;
9019 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9020 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9021 u8 prio;
9022
9023 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9024 u8 tc = 0;
9025
9026 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9027 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9028 else if (ets)
9029 tc = ets->prio_tc[prio];
9030
9031 netdev_set_prio_tc_map(dev, prio, tc);
9032 }
9033}
9034
9035#endif
9036static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
9037 struct netdev_nested_priv *priv)
9038{
9039 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
9040 struct ixgbe_fwd_adapter *accel;
9041 int pool;
9042
9043
9044 if (!netif_is_macvlan(vdev))
9045 return 0;
9046
9047
9048 accel = macvlan_accel_priv(vdev);
9049 if (!accel)
9050 return 0;
9051
9052
9053 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9054 if (pool < adapter->num_rx_pools) {
9055 set_bit(pool, adapter->fwd_bitmask);
9056 accel->pool = pool;
9057 return 0;
9058 }
9059
9060
9061 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9062 macvlan_release_l2fw_offload(vdev);
9063
9064
9065 netdev_unbind_sb_channel(adapter->netdev, vdev);
9066 netdev_set_sb_channel(vdev, 0);
9067
9068 kfree(accel);
9069
9070 return 0;
9071}
9072
9073static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9074{
9075 struct ixgbe_adapter *adapter = netdev_priv(dev);
9076 struct netdev_nested_priv priv = {
9077 .data = (void *)adapter,
9078 };
9079
9080
9081 bitmap_clear(adapter->fwd_bitmask, 1, 63);
9082
9083
9084 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9085 &priv);
9086}
9087
9088
9089
9090
9091
9092
9093
9094int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9095{
9096 struct ixgbe_adapter *adapter = netdev_priv(dev);
9097 struct ixgbe_hw *hw = &adapter->hw;
9098
9099
9100 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9101 return -EINVAL;
9102
9103 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9104 return -EINVAL;
9105
9106
9107
9108
9109
9110 if (netif_running(dev))
9111 ixgbe_close(dev);
9112 else
9113 ixgbe_reset(adapter);
9114
9115 ixgbe_clear_interrupt_scheme(adapter);
9116
9117#ifdef CONFIG_IXGBE_DCB
9118 if (tc) {
9119 if (adapter->xdp_prog) {
9120 e_warn(probe, "DCB is not supported with XDP\n");
9121
9122 ixgbe_init_interrupt_scheme(adapter);
9123 if (netif_running(dev))
9124 ixgbe_open(dev);
9125 return -EINVAL;
9126 }
9127
9128 netdev_set_num_tc(dev, tc);
9129 ixgbe_set_prio_tc_map(adapter);
9130
9131 adapter->hw_tcs = tc;
9132 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9133
9134 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9135 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9136 adapter->hw.fc.requested_mode = ixgbe_fc_none;
9137 }
9138 } else {
9139 netdev_reset_tc(dev);
9140
9141 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9142 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9143
9144 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9145 adapter->hw_tcs = tc;
9146
9147 adapter->temp_dcb_cfg.pfc_mode_enable = false;
9148 adapter->dcb_cfg.pfc_mode_enable = false;
9149 }
9150
9151 ixgbe_validate_rtr(adapter, tc);
9152
9153#endif
9154 ixgbe_init_interrupt_scheme(adapter);
9155
9156 ixgbe_defrag_macvlan_pools(dev);
9157
9158 if (netif_running(dev))
9159 return ixgbe_open(dev);
9160
9161 return 0;
9162}
9163
9164static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9165 struct tc_cls_u32_offload *cls)
9166{
9167 u32 hdl = cls->knode.handle;
9168 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9169 u32 loc = cls->knode.handle & 0xfffff;
9170 int err = 0, i, j;
9171 struct ixgbe_jump_table *jump = NULL;
9172
9173 if (loc > IXGBE_MAX_HW_ENTRIES)
9174 return -EINVAL;
9175
9176 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9177 return -EINVAL;
9178
9179
9180 if (uhtid != 0x800) {
9181 jump = adapter->jump_tables[uhtid];
9182 if (!jump)
9183 return -EINVAL;
9184 if (!test_bit(loc - 1, jump->child_loc_map))
9185 return -EINVAL;
9186 clear_bit(loc - 1, jump->child_loc_map);
9187 }
9188
9189
9190 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9191 jump = adapter->jump_tables[i];
9192 if (jump && jump->link_hdl == hdl) {
9193
9194
9195
9196 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9197 if (!test_bit(j, jump->child_loc_map))
9198 continue;
9199 spin_lock(&adapter->fdir_perfect_lock);
9200 err = ixgbe_update_ethtool_fdir_entry(adapter,
9201 NULL,
9202 j + 1);
9203 spin_unlock(&adapter->fdir_perfect_lock);
9204 clear_bit(j, jump->child_loc_map);
9205 }
9206
9207 kfree(jump->input);
9208 kfree(jump->mask);
9209 kfree(jump);
9210 adapter->jump_tables[i] = NULL;
9211 return err;
9212 }
9213 }
9214
9215 spin_lock(&adapter->fdir_perfect_lock);
9216 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9217 spin_unlock(&adapter->fdir_perfect_lock);
9218 return err;
9219}
9220
9221static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9222 struct tc_cls_u32_offload *cls)
9223{
9224 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9225
9226 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9227 return -EINVAL;
9228
9229
9230
9231
9232 if (cls->hnode.divisor > 0)
9233 return -EINVAL;
9234
9235 set_bit(uhtid - 1, &adapter->tables);
9236 return 0;
9237}
9238
9239static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9240 struct tc_cls_u32_offload *cls)
9241{
9242 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9243
9244 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9245 return -EINVAL;
9246
9247 clear_bit(uhtid - 1, &adapter->tables);
9248 return 0;
9249}
9250
9251#ifdef CONFIG_NET_CLS_ACT
9252struct upper_walk_data {
9253 struct ixgbe_adapter *adapter;
9254 u64 action;
9255 int ifindex;
9256 u8 queue;
9257};
9258
9259static int get_macvlan_queue(struct net_device *upper,
9260 struct netdev_nested_priv *priv)
9261{
9262 if (netif_is_macvlan(upper)) {
9263 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9264 struct ixgbe_adapter *adapter;
9265 struct upper_walk_data *data;
9266 int ifindex;
9267
9268 data = (struct upper_walk_data *)priv->data;
9269 ifindex = data->ifindex;
9270 adapter = data->adapter;
9271 if (vadapter && upper->ifindex == ifindex) {
9272 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9273 data->action = data->queue;
9274 return 1;
9275 }
9276 }
9277
9278 return 0;
9279}
9280
9281static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9282 u8 *queue, u64 *action)
9283{
9284 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9285 unsigned int num_vfs = adapter->num_vfs, vf;
9286 struct netdev_nested_priv priv;
9287 struct upper_walk_data data;
9288 struct net_device *upper;
9289
9290
9291 for (vf = 0; vf < num_vfs; ++vf) {
9292 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9293 if (upper->ifindex == ifindex) {
9294 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9295 *action = vf + 1;
9296 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9297 return 0;
9298 }
9299 }
9300
9301
9302 data.adapter = adapter;
9303 data.ifindex = ifindex;
9304 data.action = 0;
9305 data.queue = 0;
9306 priv.data = (void *)&data;
9307 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9308 get_macvlan_queue, &priv)) {
9309 *action = data.action;
9310 *queue = data.queue;
9311
9312 return 0;
9313 }
9314
9315 return -EINVAL;
9316}
9317
9318static int parse_tc_actions(struct ixgbe_adapter *adapter,
9319 struct tcf_exts *exts, u64 *action, u8 *queue)
9320{
9321 const struct tc_action *a;
9322 int i;
9323
9324 if (!tcf_exts_has_actions(exts))
9325 return -EINVAL;
9326
9327 tcf_exts_for_each_action(i, a, exts) {
9328
9329 if (is_tcf_gact_shot(a)) {
9330 *action = IXGBE_FDIR_DROP_QUEUE;
9331 *queue = IXGBE_FDIR_DROP_QUEUE;
9332 return 0;
9333 }
9334
9335
9336 if (is_tcf_mirred_egress_redirect(a)) {
9337 struct net_device *dev = tcf_mirred_dev(a);
9338
9339 if (!dev)
9340 return -EINVAL;
9341 return handle_redirect_action(adapter, dev->ifindex,
9342 queue, action);
9343 }
9344
9345 return -EINVAL;
9346 }
9347
9348 return -EINVAL;
9349}
9350#else
9351static int parse_tc_actions(struct ixgbe_adapter *adapter,
9352 struct tcf_exts *exts, u64 *action, u8 *queue)
9353{
9354 return -EINVAL;
9355}
9356#endif
9357
9358static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9359 union ixgbe_atr_input *mask,
9360 struct tc_cls_u32_offload *cls,
9361 struct ixgbe_mat_field *field_ptr,
9362 struct ixgbe_nexthdr *nexthdr)
9363{
9364 int i, j, off;
9365 __be32 val, m;
9366 bool found_entry = false, found_jump_field = false;
9367
9368 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9369 off = cls->knode.sel->keys[i].off;
9370 val = cls->knode.sel->keys[i].val;
9371 m = cls->knode.sel->keys[i].mask;
9372
9373 for (j = 0; field_ptr[j].val; j++) {
9374 if (field_ptr[j].off == off) {
9375 field_ptr[j].val(input, mask, (__force u32)val,
9376 (__force u32)m);
9377 input->filter.formatted.flow_type |=
9378 field_ptr[j].type;
9379 found_entry = true;
9380 break;
9381 }
9382 }
9383 if (nexthdr) {
9384 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9385 nexthdr->val ==
9386 (__force u32)cls->knode.sel->keys[i].val &&
9387 nexthdr->mask ==
9388 (__force u32)cls->knode.sel->keys[i].mask)
9389 found_jump_field = true;
9390 else
9391 continue;
9392 }
9393 }
9394
9395 if (nexthdr && !found_jump_field)
9396 return -EINVAL;
9397
9398 if (!found_entry)
9399 return 0;
9400
9401 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9402 IXGBE_ATR_L4TYPE_MASK;
9403
9404 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9405 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9406
9407 return 0;
9408}
9409
9410static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9411 struct tc_cls_u32_offload *cls)
9412{
9413 __be16 protocol = cls->common.protocol;
9414 u32 loc = cls->knode.handle & 0xfffff;
9415 struct ixgbe_hw *hw = &adapter->hw;
9416 struct ixgbe_mat_field *field_ptr;
9417 struct ixgbe_fdir_filter *input = NULL;
9418 union ixgbe_atr_input *mask = NULL;
9419 struct ixgbe_jump_table *jump = NULL;
9420 int i, err = -EINVAL;
9421 u8 queue;
9422 u32 uhtid, link_uhtid;
9423
9424 uhtid = TC_U32_USERHTID(cls->knode.handle);
9425 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9426
9427
9428
9429
9430
9431
9432
9433
9434 if (protocol != htons(ETH_P_IP))
9435 return err;
9436
9437 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9438 e_err(drv, "Location out of range\n");
9439 return err;
9440 }
9441
9442
9443
9444
9445
9446
9447
9448
9449 if (uhtid == 0x800) {
9450 field_ptr = (adapter->jump_tables[0])->mat;
9451 } else {
9452 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9453 return err;
9454 if (!adapter->jump_tables[uhtid])
9455 return err;
9456 field_ptr = (adapter->jump_tables[uhtid])->mat;
9457 }
9458
9459 if (!field_ptr)
9460 return err;
9461
9462
9463
9464
9465
9466
9467
9468 if (link_uhtid) {
9469 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9470
9471 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9472 return err;
9473
9474 if (!test_bit(link_uhtid - 1, &adapter->tables))
9475 return err;
9476
9477
9478
9479
9480
9481
9482 if (adapter->jump_tables[link_uhtid] &&
9483 (adapter->jump_tables[link_uhtid])->link_hdl) {
9484 e_err(drv, "Link filter exists for link: %x\n",
9485 link_uhtid);
9486 return err;
9487 }
9488
9489 for (i = 0; nexthdr[i].jump; i++) {
9490 if (nexthdr[i].o != cls->knode.sel->offoff ||
9491 nexthdr[i].s != cls->knode.sel->offshift ||
9492 nexthdr[i].m !=
9493 (__force u32)cls->knode.sel->offmask)
9494 return err;
9495
9496 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9497 if (!jump)
9498 return -ENOMEM;
9499 input = kzalloc(sizeof(*input), GFP_KERNEL);
9500 if (!input) {
9501 err = -ENOMEM;
9502 goto free_jump;
9503 }
9504 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9505 if (!mask) {
9506 err = -ENOMEM;
9507 goto free_input;
9508 }
9509 jump->input = input;
9510 jump->mask = mask;
9511 jump->link_hdl = cls->knode.handle;
9512
9513 err = ixgbe_clsu32_build_input(input, mask, cls,
9514 field_ptr, &nexthdr[i]);
9515 if (!err) {
9516 jump->mat = nexthdr[i].jump;
9517 adapter->jump_tables[link_uhtid] = jump;
9518 break;
9519 } else {
9520 kfree(mask);
9521 kfree(input);
9522 kfree(jump);
9523 }
9524 }
9525 return 0;
9526 }
9527
9528 input = kzalloc(sizeof(*input), GFP_KERNEL);
9529 if (!input)
9530 return -ENOMEM;
9531 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9532 if (!mask) {
9533 err = -ENOMEM;
9534 goto free_input;
9535 }
9536
9537 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9538 if ((adapter->jump_tables[uhtid])->input)
9539 memcpy(input, (adapter->jump_tables[uhtid])->input,
9540 sizeof(*input));
9541 if ((adapter->jump_tables[uhtid])->mask)
9542 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9543 sizeof(*mask));
9544
9545
9546
9547
9548 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9549 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9550
9551 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9552 e_err(drv, "Filter exists in location: %x\n",
9553 loc);
9554 err = -EINVAL;
9555 goto err_out;
9556 }
9557 }
9558 }
9559 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9560 if (err)
9561 goto err_out;
9562
9563 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9564 &queue);
9565 if (err < 0)
9566 goto err_out;
9567
9568 input->sw_idx = loc;
9569
9570 spin_lock(&adapter->fdir_perfect_lock);
9571
9572 if (hlist_empty(&adapter->fdir_filter_list)) {
9573 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9574 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9575 if (err)
9576 goto err_out_w_lock;
9577 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9578 err = -EINVAL;
9579 goto err_out_w_lock;
9580 }
9581
9582 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9583 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9584 input->sw_idx, queue);
9585 if (!err)
9586 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9587 spin_unlock(&adapter->fdir_perfect_lock);
9588
9589 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9590 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9591
9592 kfree(mask);
9593 return err;
9594err_out_w_lock:
9595 spin_unlock(&adapter->fdir_perfect_lock);
9596err_out:
9597 kfree(mask);
9598free_input:
9599 kfree(input);
9600free_jump:
9601 kfree(jump);
9602 return err;
9603}
9604
9605static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9606 struct tc_cls_u32_offload *cls_u32)
9607{
9608 switch (cls_u32->command) {
9609 case TC_CLSU32_NEW_KNODE:
9610 case TC_CLSU32_REPLACE_KNODE:
9611 return ixgbe_configure_clsu32(adapter, cls_u32);
9612 case TC_CLSU32_DELETE_KNODE:
9613 return ixgbe_delete_clsu32(adapter, cls_u32);
9614 case TC_CLSU32_NEW_HNODE:
9615 case TC_CLSU32_REPLACE_HNODE:
9616 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9617 case TC_CLSU32_DELETE_HNODE:
9618 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9619 default:
9620 return -EOPNOTSUPP;
9621 }
9622}
9623
9624static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9625 void *cb_priv)
9626{
9627 struct ixgbe_adapter *adapter = cb_priv;
9628
9629 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9630 return -EOPNOTSUPP;
9631
9632 switch (type) {
9633 case TC_SETUP_CLSU32:
9634 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9635 default:
9636 return -EOPNOTSUPP;
9637 }
9638}
9639
9640static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9641 struct tc_mqprio_qopt *mqprio)
9642{
9643 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9644 return ixgbe_setup_tc(dev, mqprio->num_tc);
9645}
9646
9647static LIST_HEAD(ixgbe_block_cb_list);
9648
9649static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9650 void *type_data)
9651{
9652 struct ixgbe_adapter *adapter = netdev_priv(dev);
9653
9654 switch (type) {
9655 case TC_SETUP_BLOCK:
9656 return flow_block_cb_setup_simple(type_data,
9657 &ixgbe_block_cb_list,
9658 ixgbe_setup_tc_block_cb,
9659 adapter, adapter, true);
9660 case TC_SETUP_QDISC_MQPRIO:
9661 return ixgbe_setup_tc_mqprio(dev, type_data);
9662 default:
9663 return -EOPNOTSUPP;
9664 }
9665}
9666
9667#ifdef CONFIG_PCI_IOV
9668void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9669{
9670 struct net_device *netdev = adapter->netdev;
9671
9672 rtnl_lock();
9673 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9674 rtnl_unlock();
9675}
9676
9677#endif
9678void ixgbe_do_reset(struct net_device *netdev)
9679{
9680 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9681
9682 if (netif_running(netdev))
9683 ixgbe_reinit_locked(adapter);
9684 else
9685 ixgbe_reset(adapter);
9686}
9687
9688static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9689 netdev_features_t features)
9690{
9691 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9692
9693
9694 if (!(features & NETIF_F_RXCSUM))
9695 features &= ~NETIF_F_LRO;
9696
9697
9698 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9699 features &= ~NETIF_F_LRO;
9700
9701 if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9702 e_dev_err("LRO is not supported with XDP\n");
9703 features &= ~NETIF_F_LRO;
9704 }
9705
9706 return features;
9707}
9708
9709static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9710{
9711 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9712 num_online_cpus());
9713
9714
9715 if (!adapter->ring_feature[RING_F_VMDQ].offset)
9716 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9717 IXGBE_FLAG_SRIOV_ENABLED);
9718
9719 adapter->ring_feature[RING_F_RSS].limit = rss;
9720 adapter->ring_feature[RING_F_VMDQ].limit = 1;
9721
9722 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9723}
9724
9725static int ixgbe_set_features(struct net_device *netdev,
9726 netdev_features_t features)
9727{
9728 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9729 netdev_features_t changed = netdev->features ^ features;
9730 bool need_reset = false;
9731
9732
9733 if (!(features & NETIF_F_LRO)) {
9734 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9735 need_reset = true;
9736 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9737 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9738 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9739 if (adapter->rx_itr_setting == 1 ||
9740 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9741 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9742 need_reset = true;
9743 } else if ((changed ^ features) & NETIF_F_LRO) {
9744 e_info(probe, "rx-usecs set too low, "
9745 "disabling RSC\n");
9746 }
9747 }
9748
9749
9750
9751
9752
9753 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9754
9755 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9756 need_reset = true;
9757
9758 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9759 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9760 } else {
9761
9762 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9763 need_reset = true;
9764
9765 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9766
9767
9768 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9769
9770 (adapter->hw_tcs > 1) ||
9771
9772 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9773
9774 (!adapter->atr_sample_rate))
9775 ;
9776 else
9777 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9778 }
9779
9780 if (changed & NETIF_F_RXALL)
9781 need_reset = true;
9782
9783 netdev->features = features;
9784
9785 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9786 ixgbe_reset_l2fw_offload(adapter);
9787 else if (need_reset)
9788 ixgbe_do_reset(netdev);
9789 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9790 NETIF_F_HW_VLAN_CTAG_FILTER))
9791 ixgbe_set_rx_mode(netdev);
9792
9793 return 1;
9794}
9795
9796static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9797 struct net_device *dev,
9798 const unsigned char *addr, u16 vid,
9799 u16 flags,
9800 struct netlink_ext_ack *extack)
9801{
9802
9803 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9804 struct ixgbe_adapter *adapter = netdev_priv(dev);
9805 u16 pool = VMDQ_P(0);
9806
9807 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9808 return -ENOMEM;
9809 }
9810
9811 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9812}
9813
9814
9815
9816
9817
9818
9819
9820
9821static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9822 __u16 mode)
9823{
9824 struct ixgbe_hw *hw = &adapter->hw;
9825 unsigned int p, num_pools;
9826 u32 vmdctl;
9827
9828 switch (mode) {
9829 case BRIDGE_MODE_VEPA:
9830
9831 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9832
9833
9834
9835
9836
9837 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9838 vmdctl |= IXGBE_VT_CTL_REPLEN;
9839 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9840
9841
9842
9843
9844 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9845 for (p = 0; p < num_pools; p++) {
9846 if (hw->mac.ops.set_source_address_pruning)
9847 hw->mac.ops.set_source_address_pruning(hw,
9848 true,
9849 p);
9850 }
9851 break;
9852 case BRIDGE_MODE_VEB:
9853
9854 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9855 IXGBE_PFDTXGSWC_VT_LBEN);
9856
9857
9858
9859
9860 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9861 if (!adapter->num_vfs)
9862 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9863 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9864
9865
9866
9867
9868 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9869 for (p = 0; p < num_pools; p++) {
9870 if (hw->mac.ops.set_source_address_pruning)
9871 hw->mac.ops.set_source_address_pruning(hw,
9872 false,
9873 p);
9874 }
9875 break;
9876 default:
9877 return -EINVAL;
9878 }
9879
9880 adapter->bridge_mode = mode;
9881
9882 e_info(drv, "enabling bridge mode: %s\n",
9883 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9884
9885 return 0;
9886}
9887
9888static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9889 struct nlmsghdr *nlh, u16 flags,
9890 struct netlink_ext_ack *extack)
9891{
9892 struct ixgbe_adapter *adapter = netdev_priv(dev);
9893 struct nlattr *attr, *br_spec;
9894 int rem;
9895
9896 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9897 return -EOPNOTSUPP;
9898
9899 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9900 if (!br_spec)
9901 return -EINVAL;
9902
9903 nla_for_each_nested(attr, br_spec, rem) {
9904 int status;
9905 __u16 mode;
9906
9907 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9908 continue;
9909
9910 if (nla_len(attr) < sizeof(mode))
9911 return -EINVAL;
9912
9913 mode = nla_get_u16(attr);
9914 status = ixgbe_configure_bridge_mode(adapter, mode);
9915 if (status)
9916 return status;
9917
9918 break;
9919 }
9920
9921 return 0;
9922}
9923
9924static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9925 struct net_device *dev,
9926 u32 filter_mask, int nlflags)
9927{
9928 struct ixgbe_adapter *adapter = netdev_priv(dev);
9929
9930 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9931 return 0;
9932
9933 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
9934 adapter->bridge_mode, 0, 0, nlflags,
9935 filter_mask, NULL);
9936}
9937
9938static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9939{
9940 struct ixgbe_adapter *adapter = netdev_priv(pdev);
9941 struct ixgbe_fwd_adapter *accel;
9942 int tcs = adapter->hw_tcs ? : 1;
9943 int pool, err;
9944
9945 if (adapter->xdp_prog) {
9946 e_warn(probe, "L2FW offload is not supported with XDP\n");
9947 return ERR_PTR(-EINVAL);
9948 }
9949
9950
9951
9952
9953
9954 if (!macvlan_supports_dest_filter(vdev))
9955 return ERR_PTR(-EMEDIUMTYPE);
9956
9957
9958
9959
9960
9961 if (netif_is_multiqueue(vdev))
9962 return ERR_PTR(-ERANGE);
9963
9964 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9965 if (pool == adapter->num_rx_pools) {
9966 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
9967 u16 reserved_pools;
9968
9969 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9970 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
9971 adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
9972 return ERR_PTR(-EBUSY);
9973
9974
9975
9976
9977
9978 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9979 return ERR_PTR(-EBUSY);
9980
9981
9982 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
9983 IXGBE_FLAG_SRIOV_ENABLED;
9984
9985
9986
9987
9988
9989 if (used_pools < 32 && adapter->num_rx_pools < 16)
9990 reserved_pools = min_t(u16,
9991 32 - used_pools,
9992 16 - adapter->num_rx_pools);
9993 else if (adapter->num_rx_pools < 32)
9994 reserved_pools = min_t(u16,
9995 64 - used_pools,
9996 32 - adapter->num_rx_pools);
9997 else
9998 reserved_pools = 64 - used_pools;
9999
10000
10001 if (!reserved_pools)
10002 return ERR_PTR(-EBUSY);
10003
10004 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10005
10006
10007 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10008 if (err)
10009 return ERR_PTR(err);
10010
10011 if (pool >= adapter->num_rx_pools)
10012 return ERR_PTR(-ENOMEM);
10013 }
10014
10015 accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10016 if (!accel)
10017 return ERR_PTR(-ENOMEM);
10018
10019 set_bit(pool, adapter->fwd_bitmask);
10020 netdev_set_sb_channel(vdev, pool);
10021 accel->pool = pool;
10022 accel->netdev = vdev;
10023
10024 if (!netif_running(pdev))
10025 return accel;
10026
10027 err = ixgbe_fwd_ring_up(adapter, accel);
10028 if (err)
10029 return ERR_PTR(err);
10030
10031 return accel;
10032}
10033
10034static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10035{
10036 struct ixgbe_fwd_adapter *accel = priv;
10037 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10038 unsigned int rxbase = accel->rx_base_queue;
10039 unsigned int i;
10040
10041
10042 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10043 VMDQ_P(accel->pool));
10044
10045
10046
10047
10048 usleep_range(10000, 20000);
10049
10050 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10051 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10052 struct ixgbe_q_vector *qv = ring->q_vector;
10053
10054
10055
10056
10057 if (netif_running(adapter->netdev))
10058 napi_synchronize(&qv->napi);
10059 ring->netdev = NULL;
10060 }
10061
10062
10063 netdev_unbind_sb_channel(pdev, accel->netdev);
10064 netdev_set_sb_channel(accel->netdev, 0);
10065
10066 clear_bit(accel->pool, adapter->fwd_bitmask);
10067 kfree(accel);
10068}
10069
10070#define IXGBE_MAX_MAC_HDR_LEN 127
10071#define IXGBE_MAX_NETWORK_HDR_LEN 511
10072
10073static netdev_features_t
10074ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10075 netdev_features_t features)
10076{
10077 unsigned int network_hdr_len, mac_hdr_len;
10078
10079
10080 mac_hdr_len = skb_network_header(skb) - skb->data;
10081 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10082 return features & ~(NETIF_F_HW_CSUM |
10083 NETIF_F_SCTP_CRC |
10084 NETIF_F_GSO_UDP_L4 |
10085 NETIF_F_HW_VLAN_CTAG_TX |
10086 NETIF_F_TSO |
10087 NETIF_F_TSO6);
10088
10089 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10090 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
10091 return features & ~(NETIF_F_HW_CSUM |
10092 NETIF_F_SCTP_CRC |
10093 NETIF_F_GSO_UDP_L4 |
10094 NETIF_F_TSO |
10095 NETIF_F_TSO6);
10096
10097
10098
10099
10100
10101
10102 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10103#ifdef CONFIG_IXGBE_IPSEC
10104 if (!secpath_exists(skb))
10105#endif
10106 features &= ~NETIF_F_TSO;
10107 }
10108
10109 return features;
10110}
10111
10112static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10113{
10114 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10115 struct ixgbe_adapter *adapter = netdev_priv(dev);
10116 struct bpf_prog *old_prog;
10117 bool need_reset;
10118
10119 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10120 return -EINVAL;
10121
10122 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10123 return -EINVAL;
10124
10125
10126 for (i = 0; i < adapter->num_rx_queues; i++) {
10127 struct ixgbe_ring *ring = adapter->rx_ring[i];
10128
10129 if (ring_is_rsc_enabled(ring))
10130 return -EINVAL;
10131
10132 if (frame_size > ixgbe_rx_bufsz(ring))
10133 return -EINVAL;
10134 }
10135
10136 if (nr_cpu_ids > MAX_XDP_QUEUES)
10137 return -ENOMEM;
10138
10139 old_prog = xchg(&adapter->xdp_prog, prog);
10140 need_reset = (!!prog != !!old_prog);
10141
10142
10143 if (need_reset) {
10144 int err;
10145
10146 if (!prog)
10147
10148 synchronize_rcu();
10149 err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10150
10151 if (err) {
10152 rcu_assign_pointer(adapter->xdp_prog, old_prog);
10153 return -EINVAL;
10154 }
10155 } else {
10156 for (i = 0; i < adapter->num_rx_queues; i++)
10157 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
10158 adapter->xdp_prog);
10159 }
10160
10161 if (old_prog)
10162 bpf_prog_put(old_prog);
10163
10164
10165
10166
10167 if (need_reset && prog)
10168 for (i = 0; i < adapter->num_rx_queues; i++)
10169 if (adapter->xdp_ring[i]->xsk_pool)
10170 (void)ixgbe_xsk_wakeup(adapter->netdev, i,
10171 XDP_WAKEUP_RX);
10172
10173 return 0;
10174}
10175
10176static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10177{
10178 struct ixgbe_adapter *adapter = netdev_priv(dev);
10179
10180 switch (xdp->command) {
10181 case XDP_SETUP_PROG:
10182 return ixgbe_xdp_setup(dev, xdp->prog);
10183 case XDP_SETUP_XSK_POOL:
10184 return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
10185 xdp->xsk.queue_id);
10186
10187 default:
10188 return -EINVAL;
10189 }
10190}
10191
10192void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10193{
10194
10195
10196
10197 wmb();
10198 writel(ring->next_to_use, ring->tail);
10199}
10200
10201static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10202 struct xdp_frame **frames, u32 flags)
10203{
10204 struct ixgbe_adapter *adapter = netdev_priv(dev);
10205 struct ixgbe_ring *ring;
10206 int drops = 0;
10207 int i;
10208
10209 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10210 return -ENETDOWN;
10211
10212 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10213 return -EINVAL;
10214
10215
10216
10217
10218 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10219 if (unlikely(!ring))
10220 return -ENXIO;
10221
10222 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10223 return -ENXIO;
10224
10225 for (i = 0; i < n; i++) {
10226 struct xdp_frame *xdpf = frames[i];
10227 int err;
10228
10229 err = ixgbe_xmit_xdp_ring(adapter, xdpf);
10230 if (err != IXGBE_XDP_TX) {
10231 xdp_return_frame_rx_napi(xdpf);
10232 drops++;
10233 }
10234 }
10235
10236 if (unlikely(flags & XDP_XMIT_FLUSH))
10237 ixgbe_xdp_ring_update_tail(ring);
10238
10239 return n - drops;
10240}
10241
10242static const struct net_device_ops ixgbe_netdev_ops = {
10243 .ndo_open = ixgbe_open,
10244 .ndo_stop = ixgbe_close,
10245 .ndo_start_xmit = ixgbe_xmit_frame,
10246 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10247 .ndo_validate_addr = eth_validate_addr,
10248 .ndo_set_mac_address = ixgbe_set_mac,
10249 .ndo_change_mtu = ixgbe_change_mtu,
10250 .ndo_tx_timeout = ixgbe_tx_timeout,
10251 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10252 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10253 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10254 .ndo_do_ioctl = ixgbe_ioctl,
10255 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10256 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10257 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10258 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10259 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10260 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10261 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10262 .ndo_get_stats64 = ixgbe_get_stats64,
10263 .ndo_setup_tc = __ixgbe_setup_tc,
10264#ifdef IXGBE_FCOE
10265 .ndo_select_queue = ixgbe_select_queue,
10266 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10267 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10268 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10269 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10270 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10271 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10272 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10273#endif
10274 .ndo_set_features = ixgbe_set_features,
10275 .ndo_fix_features = ixgbe_fix_features,
10276 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10277 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10278 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10279 .ndo_dfwd_add_station = ixgbe_fwd_add,
10280 .ndo_dfwd_del_station = ixgbe_fwd_del,
10281 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
10282 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
10283 .ndo_features_check = ixgbe_features_check,
10284 .ndo_bpf = ixgbe_xdp,
10285 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10286 .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
10287};
10288
10289static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10290 struct ixgbe_ring *tx_ring)
10291{
10292 unsigned long wait_delay, delay_interval;
10293 struct ixgbe_hw *hw = &adapter->hw;
10294 u8 reg_idx = tx_ring->reg_idx;
10295 int wait_loop;
10296 u32 txdctl;
10297
10298 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10299
10300
10301 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10302
10303 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10304 wait_delay = delay_interval;
10305
10306 while (wait_loop--) {
10307 usleep_range(wait_delay, wait_delay + 10);
10308 wait_delay += delay_interval * 2;
10309 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10310
10311 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10312 return;
10313 }
10314
10315 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10316}
10317
10318static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10319 struct ixgbe_ring *tx_ring)
10320{
10321 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10322 ixgbe_disable_txr_hw(adapter, tx_ring);
10323}
10324
10325static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10326 struct ixgbe_ring *rx_ring)
10327{
10328 unsigned long wait_delay, delay_interval;
10329 struct ixgbe_hw *hw = &adapter->hw;
10330 u8 reg_idx = rx_ring->reg_idx;
10331 int wait_loop;
10332 u32 rxdctl;
10333
10334 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10335 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10336 rxdctl |= IXGBE_RXDCTL_SWFLSH;
10337
10338
10339 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10340
10341
10342 if (hw->mac.type == ixgbe_mac_82598EB &&
10343 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10344 return;
10345
10346
10347 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10348
10349 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10350 wait_delay = delay_interval;
10351
10352 while (wait_loop--) {
10353 usleep_range(wait_delay, wait_delay + 10);
10354 wait_delay += delay_interval * 2;
10355 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10356
10357 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10358 return;
10359 }
10360
10361 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10362}
10363
10364static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10365{
10366 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10367 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10368}
10369
10370static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10371{
10372 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10373 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10374}
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10385{
10386 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10387
10388 rx_ring = adapter->rx_ring[ring];
10389 tx_ring = adapter->tx_ring[ring];
10390 xdp_ring = adapter->xdp_ring[ring];
10391
10392 ixgbe_disable_txr(adapter, tx_ring);
10393 if (xdp_ring)
10394 ixgbe_disable_txr(adapter, xdp_ring);
10395 ixgbe_disable_rxr_hw(adapter, rx_ring);
10396
10397 if (xdp_ring)
10398 synchronize_rcu();
10399
10400
10401 napi_disable(&rx_ring->q_vector->napi);
10402
10403 ixgbe_clean_tx_ring(tx_ring);
10404 if (xdp_ring)
10405 ixgbe_clean_tx_ring(xdp_ring);
10406 ixgbe_clean_rx_ring(rx_ring);
10407
10408 ixgbe_reset_txr_stats(tx_ring);
10409 if (xdp_ring)
10410 ixgbe_reset_txr_stats(xdp_ring);
10411 ixgbe_reset_rxr_stats(rx_ring);
10412}
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10423{
10424 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10425
10426 rx_ring = adapter->rx_ring[ring];
10427 tx_ring = adapter->tx_ring[ring];
10428 xdp_ring = adapter->xdp_ring[ring];
10429
10430
10431 napi_enable(&rx_ring->q_vector->napi);
10432
10433 ixgbe_configure_tx_ring(adapter, tx_ring);
10434 if (xdp_ring)
10435 ixgbe_configure_tx_ring(adapter, xdp_ring);
10436 ixgbe_configure_rx_ring(adapter, rx_ring);
10437
10438 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10439 if (xdp_ring)
10440 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10441}
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10453{
10454 struct pci_dev *entry, *pdev = adapter->pdev;
10455 int physfns = 0;
10456
10457
10458
10459
10460
10461 if (ixgbe_pcie_from_parent(&adapter->hw))
10462 physfns = 4;
10463
10464 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10465
10466 if (entry->is_virtfn)
10467 continue;
10468
10469
10470
10471
10472
10473
10474
10475 if ((entry->vendor != pdev->vendor) ||
10476 (entry->device != pdev->device))
10477 return -1;
10478
10479 physfns++;
10480 }
10481
10482 return physfns;
10483}
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10496 u16 subdevice_id)
10497{
10498 struct ixgbe_hw *hw = &adapter->hw;
10499 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10500
10501
10502 if (hw->mac.type == ixgbe_mac_82598EB)
10503 return false;
10504
10505
10506 if (hw->mac.type >= ixgbe_mac_X540) {
10507 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10508 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10509 (hw->bus.func == 0)))
10510 return true;
10511 }
10512
10513
10514 switch (device_id) {
10515 case IXGBE_DEV_ID_82599_SFP:
10516
10517 switch (subdevice_id) {
10518 case IXGBE_SUBDEV_ID_82599_560FLR:
10519 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10520 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10521 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10522
10523 if (hw->bus.func != 0)
10524 break;
10525 fallthrough;
10526 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10527 case IXGBE_SUBDEV_ID_82599_SFP:
10528 case IXGBE_SUBDEV_ID_82599_RNDC:
10529 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10530 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10531 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10532 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10533 return true;
10534 }
10535 break;
10536 case IXGBE_DEV_ID_82599EN_SFP:
10537
10538 switch (subdevice_id) {
10539 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10540 return true;
10541 }
10542 break;
10543 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10544
10545 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10546 return true;
10547 break;
10548 case IXGBE_DEV_ID_82599_KX4:
10549 return true;
10550 default:
10551 break;
10552 }
10553
10554 return false;
10555}
10556
10557
10558
10559
10560
10561
10562
10563
10564static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10565{
10566 struct ixgbe_hw *hw = &adapter->hw;
10567 struct ixgbe_nvm_version nvm_ver;
10568
10569 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10570 if (nvm_ver.oem_valid) {
10571 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10572 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10573 nvm_ver.oem_release);
10574 return;
10575 }
10576
10577 ixgbe_get_etk_id(hw, &nvm_ver);
10578 ixgbe_get_orom_version(hw, &nvm_ver);
10579
10580 if (nvm_ver.or_valid) {
10581 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10582 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10583 nvm_ver.or_build, nvm_ver.or_patch);
10584 return;
10585 }
10586
10587
10588 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10589 "0x%08x", nvm_ver.etk_id);
10590}
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10604{
10605 struct net_device *netdev;
10606 struct ixgbe_adapter *adapter = NULL;
10607 struct ixgbe_hw *hw;
10608 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10609 int i, err, pci_using_dac, expected_gts;
10610 unsigned int indices = MAX_TX_QUEUES;
10611 u8 part_str[IXGBE_PBANUM_LENGTH];
10612 bool disable_dev = false;
10613#ifdef IXGBE_FCOE
10614 u16 device_caps;
10615#endif
10616 u32 eec;
10617
10618
10619
10620
10621 if (pdev->is_virtfn) {
10622 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10623 pci_name(pdev), pdev->vendor, pdev->device);
10624 return -EINVAL;
10625 }
10626
10627 err = pci_enable_device_mem(pdev);
10628 if (err)
10629 return err;
10630
10631 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10632 pci_using_dac = 1;
10633 } else {
10634 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10635 if (err) {
10636 dev_err(&pdev->dev,
10637 "No usable DMA configuration, aborting\n");
10638 goto err_dma;
10639 }
10640 pci_using_dac = 0;
10641 }
10642
10643 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10644 if (err) {
10645 dev_err(&pdev->dev,
10646 "pci_request_selected_regions failed 0x%x\n", err);
10647 goto err_pci_reg;
10648 }
10649
10650 pci_enable_pcie_error_reporting(pdev);
10651
10652 pci_set_master(pdev);
10653 pci_save_state(pdev);
10654
10655 if (ii->mac == ixgbe_mac_82598EB) {
10656#ifdef CONFIG_IXGBE_DCB
10657
10658 indices = 4 * MAX_TRAFFIC_CLASS;
10659#else
10660 indices = IXGBE_MAX_RSS_INDICES;
10661#endif
10662 }
10663
10664 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10665 if (!netdev) {
10666 err = -ENOMEM;
10667 goto err_alloc_etherdev;
10668 }
10669
10670 SET_NETDEV_DEV(netdev, &pdev->dev);
10671
10672 adapter = netdev_priv(netdev);
10673
10674 adapter->netdev = netdev;
10675 adapter->pdev = pdev;
10676 hw = &adapter->hw;
10677 hw->back = adapter;
10678 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10679
10680 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10681 pci_resource_len(pdev, 0));
10682 adapter->io_addr = hw->hw_addr;
10683 if (!hw->hw_addr) {
10684 err = -EIO;
10685 goto err_ioremap;
10686 }
10687
10688 netdev->netdev_ops = &ixgbe_netdev_ops;
10689 ixgbe_set_ethtool_ops(netdev);
10690 netdev->watchdog_timeo = 5 * HZ;
10691 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10692
10693
10694 hw->mac.ops = *ii->mac_ops;
10695 hw->mac.type = ii->mac;
10696 hw->mvals = ii->mvals;
10697 if (ii->link_ops)
10698 hw->link.ops = *ii->link_ops;
10699
10700
10701 hw->eeprom.ops = *ii->eeprom_ops;
10702 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10703 if (ixgbe_removed(hw->hw_addr)) {
10704 err = -EIO;
10705 goto err_ioremap;
10706 }
10707
10708 if (!(eec & BIT(8)))
10709 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10710
10711
10712 hw->phy.ops = *ii->phy_ops;
10713 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10714
10715 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10716 hw->phy.mdio.mmds = 0;
10717 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10718 hw->phy.mdio.dev = netdev;
10719 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10720 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10721
10722
10723 err = ixgbe_sw_init(adapter, ii);
10724 if (err)
10725 goto err_sw_init;
10726
10727 switch (adapter->hw.mac.type) {
10728 case ixgbe_mac_X550:
10729 case ixgbe_mac_X550EM_x:
10730 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
10731 break;
10732 case ixgbe_mac_x550em_a:
10733 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
10734 break;
10735 default:
10736 break;
10737 }
10738
10739
10740 if (hw->mac.ops.init_swfw_sync)
10741 hw->mac.ops.init_swfw_sync(hw);
10742
10743
10744 switch (adapter->hw.mac.type) {
10745 case ixgbe_mac_82599EB:
10746 case ixgbe_mac_X540:
10747 case ixgbe_mac_X550:
10748 case ixgbe_mac_X550EM_x:
10749 case ixgbe_mac_x550em_a:
10750 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10751 break;
10752 default:
10753 break;
10754 }
10755
10756
10757
10758
10759
10760 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10761 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10762 if (esdp & IXGBE_ESDP_SDP1)
10763 e_crit(probe, "Fan has stopped, replace the adapter\n");
10764 }
10765
10766 if (allow_unsupported_sfp)
10767 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10768
10769
10770 hw->phy.reset_if_overtemp = true;
10771 err = hw->mac.ops.reset_hw(hw);
10772 hw->phy.reset_if_overtemp = false;
10773 ixgbe_set_eee_capable(adapter);
10774 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10775 err = 0;
10776 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10777 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10778 e_dev_err("Reload the driver after installing a supported module.\n");
10779 goto err_sw_init;
10780 } else if (err) {
10781 e_dev_err("HW Init failed: %d\n", err);
10782 goto err_sw_init;
10783 }
10784
10785#ifdef CONFIG_PCI_IOV
10786
10787 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10788 goto skip_sriov;
10789
10790 ixgbe_init_mbx_params_pf(hw);
10791 hw->mbx.ops = ii->mbx_ops;
10792 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10793 ixgbe_enable_sriov(adapter, max_vfs);
10794skip_sriov:
10795
10796#endif
10797 netdev->features = NETIF_F_SG |
10798 NETIF_F_TSO |
10799 NETIF_F_TSO6 |
10800 NETIF_F_RXHASH |
10801 NETIF_F_RXCSUM |
10802 NETIF_F_HW_CSUM;
10803
10804#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10805 NETIF_F_GSO_GRE_CSUM | \
10806 NETIF_F_GSO_IPXIP4 | \
10807 NETIF_F_GSO_IPXIP6 | \
10808 NETIF_F_GSO_UDP_TUNNEL | \
10809 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10810
10811 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10812 netdev->features |= NETIF_F_GSO_PARTIAL |
10813 IXGBE_GSO_PARTIAL_FEATURES;
10814
10815 if (hw->mac.type >= ixgbe_mac_82599EB)
10816 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
10817
10818#ifdef CONFIG_IXGBE_IPSEC
10819#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10820 NETIF_F_HW_ESP_TX_CSUM | \
10821 NETIF_F_GSO_ESP)
10822
10823 if (adapter->ipsec)
10824 netdev->features |= IXGBE_ESP_FEATURES;
10825#endif
10826
10827 netdev->hw_features |= netdev->features |
10828 NETIF_F_HW_VLAN_CTAG_FILTER |
10829 NETIF_F_HW_VLAN_CTAG_RX |
10830 NETIF_F_HW_VLAN_CTAG_TX |
10831 NETIF_F_RXALL |
10832 NETIF_F_HW_L2FW_DOFFLOAD;
10833
10834 if (hw->mac.type >= ixgbe_mac_82599EB)
10835 netdev->hw_features |= NETIF_F_NTUPLE |
10836 NETIF_F_HW_TC;
10837
10838 if (pci_using_dac)
10839 netdev->features |= NETIF_F_HIGHDMA;
10840
10841 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10842 netdev->hw_enc_features |= netdev->vlan_features;
10843 netdev->mpls_features |= NETIF_F_SG |
10844 NETIF_F_TSO |
10845 NETIF_F_TSO6 |
10846 NETIF_F_HW_CSUM;
10847 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10848
10849
10850 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10851 NETIF_F_HW_VLAN_CTAG_RX |
10852 NETIF_F_HW_VLAN_CTAG_TX;
10853
10854 netdev->priv_flags |= IFF_UNICAST_FLT;
10855 netdev->priv_flags |= IFF_SUPP_NOFCS;
10856
10857
10858 netdev->min_mtu = ETH_MIN_MTU;
10859 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10860
10861#ifdef CONFIG_IXGBE_DCB
10862 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10863 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10864#endif
10865
10866#ifdef IXGBE_FCOE
10867 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10868 unsigned int fcoe_l;
10869
10870 if (hw->mac.ops.get_device_caps) {
10871 hw->mac.ops.get_device_caps(hw, &device_caps);
10872 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10873 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10874 }
10875
10876
10877 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10878 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10879
10880 netdev->features |= NETIF_F_FSO |
10881 NETIF_F_FCOE_CRC;
10882
10883 netdev->vlan_features |= NETIF_F_FSO |
10884 NETIF_F_FCOE_CRC |
10885 NETIF_F_FCOE_MTU;
10886 }
10887#endif
10888 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10889 netdev->hw_features |= NETIF_F_LRO;
10890 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10891 netdev->features |= NETIF_F_LRO;
10892
10893 if (ixgbe_check_fw_error(adapter)) {
10894 err = -EIO;
10895 goto err_sw_init;
10896 }
10897
10898
10899 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10900 e_dev_err("The EEPROM Checksum Is Not Valid\n");
10901 err = -EIO;
10902 goto err_sw_init;
10903 }
10904
10905 eth_platform_get_mac_address(&adapter->pdev->dev,
10906 adapter->hw.mac.perm_addr);
10907
10908 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
10909
10910 if (!is_valid_ether_addr(netdev->dev_addr)) {
10911 e_dev_err("invalid MAC address\n");
10912 err = -EIO;
10913 goto err_sw_init;
10914 }
10915
10916
10917 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
10918 ixgbe_mac_set_default_filter(adapter);
10919
10920 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
10921
10922 if (ixgbe_removed(hw->hw_addr)) {
10923 err = -EIO;
10924 goto err_sw_init;
10925 }
10926 INIT_WORK(&adapter->service_task, ixgbe_service_task);
10927 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
10928 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
10929
10930 err = ixgbe_init_interrupt_scheme(adapter);
10931 if (err)
10932 goto err_sw_init;
10933
10934 for (i = 0; i < adapter->num_rx_queues; i++)
10935 u64_stats_init(&adapter->rx_ring[i]->syncp);
10936 for (i = 0; i < adapter->num_tx_queues; i++)
10937 u64_stats_init(&adapter->tx_ring[i]->syncp);
10938 for (i = 0; i < adapter->num_xdp_queues; i++)
10939 u64_stats_init(&adapter->xdp_ring[i]->syncp);
10940
10941
10942 adapter->wol = 0;
10943 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
10944 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
10945 pdev->subsystem_device);
10946 if (hw->wol_enabled)
10947 adapter->wol = IXGBE_WUFC_MAG;
10948
10949 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
10950
10951
10952 ixgbe_set_fw_version(adapter);
10953
10954
10955 if (ixgbe_pcie_from_parent(hw))
10956 ixgbe_get_parent_bus_info(adapter);
10957 else
10958 hw->mac.ops.get_bus_info(hw);
10959
10960
10961
10962
10963
10964
10965 switch (hw->mac.type) {
10966 case ixgbe_mac_82598EB:
10967 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
10968 break;
10969 default:
10970 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
10971 break;
10972 }
10973
10974
10975 if (expected_gts > 0)
10976 ixgbe_check_minimum_link(adapter, expected_gts);
10977
10978 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
10979 if (err)
10980 strlcpy(part_str, "Unknown", sizeof(part_str));
10981 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
10982 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
10983 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
10984 part_str);
10985 else
10986 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
10987 hw->mac.type, hw->phy.type, part_str);
10988
10989 e_dev_info("%pM\n", netdev->dev_addr);
10990
10991
10992 err = hw->mac.ops.start_hw(hw);
10993 if (err == IXGBE_ERR_EEPROM_VERSION) {
10994
10995 e_dev_warn("This device is a pre-production adapter/LOM. "
10996 "Please be aware there may be issues associated "
10997 "with your hardware. If you are experiencing "
10998 "problems please contact your Intel or hardware "
10999 "representative who provided you with this "
11000 "hardware.\n");
11001 }
11002 strcpy(netdev->name, "eth%d");
11003 pci_set_drvdata(pdev, adapter);
11004 err = register_netdev(netdev);
11005 if (err)
11006 goto err_register;
11007
11008
11009
11010 if (hw->mac.ops.disable_tx_laser)
11011 hw->mac.ops.disable_tx_laser(hw);
11012
11013
11014 netif_carrier_off(netdev);
11015
11016#ifdef CONFIG_IXGBE_DCA
11017 if (dca_add_requester(&pdev->dev) == 0) {
11018 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11019 ixgbe_setup_dca(adapter);
11020 }
11021#endif
11022 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11023 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11024 for (i = 0; i < adapter->num_vfs; i++)
11025 ixgbe_vf_configuration(pdev, (i | 0x10000000));
11026 }
11027
11028
11029
11030
11031 if (hw->mac.ops.set_fw_drv_ver)
11032 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11033 sizeof(UTS_RELEASE) - 1,
11034 UTS_RELEASE);
11035
11036
11037 ixgbe_add_sanmac_netdev(netdev);
11038
11039 e_dev_info("%s\n", ixgbe_default_device_descr);
11040
11041#ifdef CONFIG_IXGBE_HWMON
11042 if (ixgbe_sysfs_init(adapter))
11043 e_err(probe, "failed to allocate sysfs resources\n");
11044#endif
11045
11046 ixgbe_dbg_adapter_init(adapter);
11047
11048
11049 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11050 hw->mac.ops.setup_link(hw,
11051 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11052 true);
11053
11054 err = ixgbe_mii_bus_init(hw);
11055 if (err)
11056 goto err_netdev;
11057
11058 return 0;
11059
11060err_netdev:
11061 unregister_netdev(netdev);
11062err_register:
11063 ixgbe_release_hw_control(adapter);
11064 ixgbe_clear_interrupt_scheme(adapter);
11065err_sw_init:
11066 ixgbe_disable_sriov(adapter);
11067 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11068 iounmap(adapter->io_addr);
11069 kfree(adapter->jump_tables[0]);
11070 kfree(adapter->mac_table);
11071 kfree(adapter->rss_key);
11072 bitmap_free(adapter->af_xdp_zc_qps);
11073err_ioremap:
11074 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11075 free_netdev(netdev);
11076err_alloc_etherdev:
11077 pci_release_mem_regions(pdev);
11078err_pci_reg:
11079err_dma:
11080 if (!adapter || disable_dev)
11081 pci_disable_device(pdev);
11082 return err;
11083}
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093
11094static void ixgbe_remove(struct pci_dev *pdev)
11095{
11096 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11097 struct net_device *netdev;
11098 bool disable_dev;
11099 int i;
11100
11101
11102 if (!adapter)
11103 return;
11104
11105 netdev = adapter->netdev;
11106 ixgbe_dbg_adapter_exit(adapter);
11107
11108 set_bit(__IXGBE_REMOVING, &adapter->state);
11109 cancel_work_sync(&adapter->service_task);
11110
11111 if (adapter->mii_bus)
11112 mdiobus_unregister(adapter->mii_bus);
11113
11114#ifdef CONFIG_IXGBE_DCA
11115 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11116 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11117 dca_remove_requester(&pdev->dev);
11118 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11119 IXGBE_DCA_CTRL_DCA_DISABLE);
11120 }
11121
11122#endif
11123#ifdef CONFIG_IXGBE_HWMON
11124 ixgbe_sysfs_exit(adapter);
11125#endif
11126
11127
11128 ixgbe_del_sanmac_netdev(netdev);
11129
11130#ifdef CONFIG_PCI_IOV
11131 ixgbe_disable_sriov(adapter);
11132#endif
11133 if (netdev->reg_state == NETREG_REGISTERED)
11134 unregister_netdev(netdev);
11135
11136 ixgbe_stop_ipsec_offload(adapter);
11137 ixgbe_clear_interrupt_scheme(adapter);
11138
11139 ixgbe_release_hw_control(adapter);
11140
11141#ifdef CONFIG_DCB
11142 kfree(adapter->ixgbe_ieee_pfc);
11143 kfree(adapter->ixgbe_ieee_ets);
11144
11145#endif
11146 iounmap(adapter->io_addr);
11147 pci_release_mem_regions(pdev);
11148
11149 e_dev_info("complete\n");
11150
11151 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11152 if (adapter->jump_tables[i]) {
11153 kfree(adapter->jump_tables[i]->input);
11154 kfree(adapter->jump_tables[i]->mask);
11155 }
11156 kfree(adapter->jump_tables[i]);
11157 }
11158
11159 kfree(adapter->mac_table);
11160 kfree(adapter->rss_key);
11161 bitmap_free(adapter->af_xdp_zc_qps);
11162 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11163 free_netdev(netdev);
11164
11165 pci_disable_pcie_error_reporting(pdev);
11166
11167 if (disable_dev)
11168 pci_disable_device(pdev);
11169}
11170
11171
11172
11173
11174
11175
11176
11177
11178
11179static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11180 pci_channel_state_t state)
11181{
11182 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11183 struct net_device *netdev = adapter->netdev;
11184
11185#ifdef CONFIG_PCI_IOV
11186 struct ixgbe_hw *hw = &adapter->hw;
11187 struct pci_dev *bdev, *vfdev;
11188 u32 dw0, dw1, dw2, dw3;
11189 int vf, pos;
11190 u16 req_id, pf_func;
11191
11192 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11193 adapter->num_vfs == 0)
11194 goto skip_bad_vf_detection;
11195
11196 bdev = pdev->bus->self;
11197 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11198 bdev = bdev->bus->self;
11199
11200 if (!bdev)
11201 goto skip_bad_vf_detection;
11202
11203 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11204 if (!pos)
11205 goto skip_bad_vf_detection;
11206
11207 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11208 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11209 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11210 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11211 if (ixgbe_removed(hw->hw_addr))
11212 goto skip_bad_vf_detection;
11213
11214 req_id = dw1 >> 16;
11215
11216 if (!(req_id & 0x0080))
11217 goto skip_bad_vf_detection;
11218
11219 pf_func = req_id & 0x01;
11220 if ((pf_func & 1) == (pdev->devfn & 1)) {
11221 unsigned int device_id;
11222
11223 vf = (req_id & 0x7F) >> 1;
11224 e_dev_err("VF %d has caused a PCIe error\n", vf);
11225 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11226 "%8.8x\tdw3: %8.8x\n",
11227 dw0, dw1, dw2, dw3);
11228 switch (adapter->hw.mac.type) {
11229 case ixgbe_mac_82599EB:
11230 device_id = IXGBE_82599_VF_DEVICE_ID;
11231 break;
11232 case ixgbe_mac_X540:
11233 device_id = IXGBE_X540_VF_DEVICE_ID;
11234 break;
11235 case ixgbe_mac_X550:
11236 device_id = IXGBE_DEV_ID_X550_VF;
11237 break;
11238 case ixgbe_mac_X550EM_x:
11239 device_id = IXGBE_DEV_ID_X550EM_X_VF;
11240 break;
11241 case ixgbe_mac_x550em_a:
11242 device_id = IXGBE_DEV_ID_X550EM_A_VF;
11243 break;
11244 default:
11245 device_id = 0;
11246 break;
11247 }
11248
11249
11250 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11251 while (vfdev) {
11252 if (vfdev->devfn == (req_id & 0xFF))
11253 break;
11254 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11255 device_id, vfdev);
11256 }
11257
11258
11259
11260
11261
11262 if (vfdev) {
11263 pcie_flr(vfdev);
11264
11265 pci_dev_put(vfdev);
11266 }
11267 }
11268
11269
11270
11271
11272
11273
11274
11275 adapter->vferr_refcount++;
11276
11277 return PCI_ERS_RESULT_RECOVERED;
11278
11279skip_bad_vf_detection:
11280#endif
11281 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11282 return PCI_ERS_RESULT_DISCONNECT;
11283
11284 if (!netif_device_present(netdev))
11285 return PCI_ERS_RESULT_DISCONNECT;
11286
11287 rtnl_lock();
11288 netif_device_detach(netdev);
11289
11290 if (netif_running(netdev))
11291 ixgbe_close_suspend(adapter);
11292
11293 if (state == pci_channel_io_perm_failure) {
11294 rtnl_unlock();
11295 return PCI_ERS_RESULT_DISCONNECT;
11296 }
11297
11298 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11299 pci_disable_device(pdev);
11300 rtnl_unlock();
11301
11302
11303 return PCI_ERS_RESULT_NEED_RESET;
11304}
11305
11306
11307
11308
11309
11310
11311
11312static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11313{
11314 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11315 pci_ers_result_t result;
11316
11317 if (pci_enable_device_mem(pdev)) {
11318 e_err(probe, "Cannot re-enable PCI device after reset.\n");
11319 result = PCI_ERS_RESULT_DISCONNECT;
11320 } else {
11321 smp_mb__before_atomic();
11322 clear_bit(__IXGBE_DISABLED, &adapter->state);
11323 adapter->hw.hw_addr = adapter->io_addr;
11324 pci_set_master(pdev);
11325 pci_restore_state(pdev);
11326 pci_save_state(pdev);
11327
11328 pci_wake_from_d3(pdev, false);
11329
11330 ixgbe_reset(adapter);
11331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11332 result = PCI_ERS_RESULT_RECOVERED;
11333 }
11334
11335 return result;
11336}
11337
11338
11339
11340
11341
11342
11343
11344
11345static void ixgbe_io_resume(struct pci_dev *pdev)
11346{
11347 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11348 struct net_device *netdev = adapter->netdev;
11349
11350#ifdef CONFIG_PCI_IOV
11351 if (adapter->vferr_refcount) {
11352 e_info(drv, "Resuming after VF err\n");
11353 adapter->vferr_refcount--;
11354 return;
11355 }
11356
11357#endif
11358 rtnl_lock();
11359 if (netif_running(netdev))
11360 ixgbe_open(netdev);
11361
11362 netif_device_attach(netdev);
11363 rtnl_unlock();
11364}
11365
11366static const struct pci_error_handlers ixgbe_err_handler = {
11367 .error_detected = ixgbe_io_error_detected,
11368 .slot_reset = ixgbe_io_slot_reset,
11369 .resume = ixgbe_io_resume,
11370};
11371
11372static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
11373
11374static struct pci_driver ixgbe_driver = {
11375 .name = ixgbe_driver_name,
11376 .id_table = ixgbe_pci_tbl,
11377 .probe = ixgbe_probe,
11378 .remove = ixgbe_remove,
11379 .driver.pm = &ixgbe_pm_ops,
11380 .shutdown = ixgbe_shutdown,
11381 .sriov_configure = ixgbe_pci_sriov_configure,
11382 .err_handler = &ixgbe_err_handler
11383};
11384
11385
11386
11387
11388
11389
11390
11391static int __init ixgbe_init_module(void)
11392{
11393 int ret;
11394 pr_info("%s\n", ixgbe_driver_string);
11395 pr_info("%s\n", ixgbe_copyright);
11396
11397 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11398 if (!ixgbe_wq) {
11399 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11400 return -ENOMEM;
11401 }
11402
11403 ixgbe_dbg_init();
11404
11405 ret = pci_register_driver(&ixgbe_driver);
11406 if (ret) {
11407 destroy_workqueue(ixgbe_wq);
11408 ixgbe_dbg_exit();
11409 return ret;
11410 }
11411
11412#ifdef CONFIG_IXGBE_DCA
11413 dca_register_notify(&dca_notifier);
11414#endif
11415
11416 return 0;
11417}
11418
11419module_init(ixgbe_init_module);
11420
11421
11422
11423
11424
11425
11426
11427static void __exit ixgbe_exit_module(void)
11428{
11429#ifdef CONFIG_IXGBE_DCA
11430 dca_unregister_notify(&dca_notifier);
11431#endif
11432 pci_unregister_driver(&ixgbe_driver);
11433
11434 ixgbe_dbg_exit();
11435 if (ixgbe_wq) {
11436 destroy_workqueue(ixgbe_wq);
11437 ixgbe_wq = NULL;
11438 }
11439}
11440
11441#ifdef CONFIG_IXGBE_DCA
11442static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11443 void *p)
11444{
11445 int ret_val;
11446
11447 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11448 __ixgbe_notify_dca);
11449
11450 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11451}
11452
11453#endif
11454
11455module_exit(ixgbe_exit_module);
11456
11457
11458