1
2
3
4#include <linux/types.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/netdevice.h>
8#include <linux/vmalloc.h>
9#include <linux/string.h>
10#include <linux/in.h>
11#include <linux/interrupt.h>
12#include <linux/ip.h>
13#include <linux/tcp.h>
14#include <linux/sctp.h>
15#include <linux/pkt_sched.h>
16#include <linux/ipv6.h>
17#include <linux/slab.h>
18#include <net/checksum.h>
19#include <net/ip6_checksum.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/if_macvlan.h>
25#include <linux/if_bridge.h>
26#include <linux/prefetch.h>
27#include <linux/bpf.h>
28#include <linux/bpf_trace.h>
29#include <linux/atomic.h>
30#include <linux/numa.h>
31#include <generated/utsrelease.h>
32#include <scsi/fc/fc_fcoe.h>
33#include <net/udp_tunnel.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
36#include <net/tc_act/tc_mirred.h>
37#include <net/vxlan.h>
38#include <net/mpls.h>
39#include <net/xdp_sock_drv.h>
40#include <net/xfrm.h>
41
42#include "ixgbe.h"
43#include "ixgbe_common.h"
44#include "ixgbe_dcb_82599.h"
45#include "ixgbe_phy.h"
46#include "ixgbe_sriov.h"
47#include "ixgbe_model.h"
48#include "ixgbe_txrx_common.h"
49
50char ixgbe_driver_name[] = "ixgbe";
51static const char ixgbe_driver_string[] =
52 "Intel(R) 10 Gigabit PCI Express Network Driver";
53#ifdef IXGBE_FCOE
54char ixgbe_default_device_descr[] =
55 "Intel(R) 10 Gigabit Network Connection";
56#else
57static char ixgbe_default_device_descr[] =
58 "Intel(R) 10 Gigabit Network Connection";
59#endif
60static const char ixgbe_copyright[] =
61 "Copyright (c) 1999-2016 Intel Corporation.";
62
63static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
64
65static const struct ixgbe_info *ixgbe_info_tbl[] = {
66 [board_82598] = &ixgbe_82598_info,
67 [board_82599] = &ixgbe_82599_info,
68 [board_X540] = &ixgbe_X540_info,
69 [board_X550] = &ixgbe_X550_info,
70 [board_X550EM_x] = &ixgbe_X550EM_x_info,
71 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
72 [board_x550em_a] = &ixgbe_x550em_a_info,
73 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
74};
75
76
77
78
79
80
81
82
83
84static const struct pci_device_id ixgbe_pci_tbl[] = {
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
132
133 {0, }
134};
135MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
136
137#ifdef CONFIG_IXGBE_DCA
138static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
139 void *p);
140static struct notifier_block dca_notifier = {
141 .notifier_call = ixgbe_notify_dca,
142 .next = NULL,
143 .priority = 0
144};
145#endif
146
147#ifdef CONFIG_PCI_IOV
148static unsigned int max_vfs;
149module_param(max_vfs, uint, 0);
150MODULE_PARM_DESC(max_vfs,
151 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
152#endif
153
154static unsigned int allow_unsupported_sfp;
155module_param(allow_unsupported_sfp, uint, 0);
156MODULE_PARM_DESC(allow_unsupported_sfp,
157 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
158
159#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
160static int debug = -1;
161module_param(debug, int, 0);
162MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
163
164MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
165MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
166MODULE_LICENSE("GPL v2");
167
168static struct workqueue_struct *ixgbe_wq;
169
170static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
171static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
172
173static const struct net_device_ops ixgbe_netdev_ops;
174
175static bool netif_is_ixgbe(struct net_device *dev)
176{
177 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
178}
179
180static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
181 u32 reg, u16 *value)
182{
183 struct pci_dev *parent_dev;
184 struct pci_bus *parent_bus;
185
186 parent_bus = adapter->pdev->bus->parent;
187 if (!parent_bus)
188 return -1;
189
190 parent_dev = parent_bus->self;
191 if (!parent_dev)
192 return -1;
193
194 if (!pci_is_pcie(parent_dev))
195 return -1;
196
197 pcie_capability_read_word(parent_dev, reg, value);
198 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
199 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
200 return -1;
201 return 0;
202}
203
204static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
205{
206 struct ixgbe_hw *hw = &adapter->hw;
207 u16 link_status = 0;
208 int err;
209
210 hw->bus.type = ixgbe_bus_type_pci_express;
211
212
213
214
215 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
216
217
218 if (err)
219 return err;
220
221 hw->bus.width = ixgbe_convert_bus_width(link_status);
222 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
223
224 return 0;
225}
226
227
228
229
230
231
232
233
234
235
236static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
237{
238 switch (hw->device_id) {
239 case IXGBE_DEV_ID_82599_SFP_SF_QP:
240 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
241 return true;
242 default:
243 return false;
244 }
245}
246
247static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
248 int expected_gts)
249{
250 struct ixgbe_hw *hw = &adapter->hw;
251 struct pci_dev *pdev;
252
253
254
255
256
257 if (hw->bus.type == ixgbe_bus_type_internal)
258 return;
259
260
261 if (ixgbe_pcie_from_parent(&adapter->hw))
262 pdev = adapter->pdev->bus->parent->self;
263 else
264 pdev = adapter->pdev;
265
266 pcie_print_link_status(pdev);
267}
268
269static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
270{
271 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
272 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
273 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
274 queue_work(ixgbe_wq, &adapter->service_task);
275}
276
277static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
278{
279 struct ixgbe_adapter *adapter = hw->back;
280
281 if (!hw->hw_addr)
282 return;
283 hw->hw_addr = NULL;
284 e_dev_err("Adapter removed\n");
285 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
286 ixgbe_service_event_schedule(adapter);
287}
288
289static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
290{
291 u8 __iomem *reg_addr;
292 u32 value;
293 int i;
294
295 reg_addr = READ_ONCE(hw->hw_addr);
296 if (ixgbe_removed(reg_addr))
297 return IXGBE_FAILED_READ_REG;
298
299
300
301
302
303 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
304 value = readl(reg_addr + IXGBE_STATUS);
305 if (value != IXGBE_FAILED_READ_REG)
306 break;
307 mdelay(3);
308 }
309
310 if (value == IXGBE_FAILED_READ_REG)
311 ixgbe_remove_adapter(hw);
312 else
313 value = readl(reg_addr + reg);
314 return value;
315}
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
331{
332 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
333 u32 value;
334
335 if (ixgbe_removed(reg_addr))
336 return IXGBE_FAILED_READ_REG;
337 if (unlikely(hw->phy.nw_mng_if_sel &
338 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
339 struct ixgbe_adapter *adapter;
340 int i;
341
342 for (i = 0; i < 200; ++i) {
343 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
344 if (likely(!value))
345 goto writes_completed;
346 if (value == IXGBE_FAILED_READ_REG) {
347 ixgbe_remove_adapter(hw);
348 return IXGBE_FAILED_READ_REG;
349 }
350 udelay(5);
351 }
352
353 adapter = hw->back;
354 e_warn(hw, "register writes incomplete %08x\n", value);
355 }
356
357writes_completed:
358 value = readl(reg_addr + reg);
359 if (unlikely(value == IXGBE_FAILED_READ_REG))
360 value = ixgbe_check_remove(hw, reg);
361 return value;
362}
363
364static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
365{
366 u16 value;
367
368 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
369 if (value == IXGBE_FAILED_READ_CFG_WORD) {
370 ixgbe_remove_adapter(hw);
371 return true;
372 }
373 return false;
374}
375
376u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
377{
378 struct ixgbe_adapter *adapter = hw->back;
379 u16 value;
380
381 if (ixgbe_removed(hw->hw_addr))
382 return IXGBE_FAILED_READ_CFG_WORD;
383 pci_read_config_word(adapter->pdev, reg, &value);
384 if (value == IXGBE_FAILED_READ_CFG_WORD &&
385 ixgbe_check_cfg_remove(hw, adapter->pdev))
386 return IXGBE_FAILED_READ_CFG_WORD;
387 return value;
388}
389
390#ifdef CONFIG_PCI_IOV
391static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
392{
393 struct ixgbe_adapter *adapter = hw->back;
394 u32 value;
395
396 if (ixgbe_removed(hw->hw_addr))
397 return IXGBE_FAILED_READ_CFG_DWORD;
398 pci_read_config_dword(adapter->pdev, reg, &value);
399 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
400 ixgbe_check_cfg_remove(hw, adapter->pdev))
401 return IXGBE_FAILED_READ_CFG_DWORD;
402 return value;
403}
404#endif
405
406void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
407{
408 struct ixgbe_adapter *adapter = hw->back;
409
410 if (ixgbe_removed(hw->hw_addr))
411 return;
412 pci_write_config_word(adapter->pdev, reg, value);
413}
414
415static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
416{
417 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
418
419
420 smp_mb__before_atomic();
421 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
422}
423
424struct ixgbe_reg_info {
425 u32 ofs;
426 char *name;
427};
428
429static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
430
431
432 {IXGBE_CTRL, "CTRL"},
433 {IXGBE_STATUS, "STATUS"},
434 {IXGBE_CTRL_EXT, "CTRL_EXT"},
435
436
437 {IXGBE_EICR, "EICR"},
438
439
440 {IXGBE_SRRCTL(0), "SRRCTL"},
441 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
442 {IXGBE_RDLEN(0), "RDLEN"},
443 {IXGBE_RDH(0), "RDH"},
444 {IXGBE_RDT(0), "RDT"},
445 {IXGBE_RXDCTL(0), "RXDCTL"},
446 {IXGBE_RDBAL(0), "RDBAL"},
447 {IXGBE_RDBAH(0), "RDBAH"},
448
449
450 {IXGBE_TDBAL(0), "TDBAL"},
451 {IXGBE_TDBAH(0), "TDBAH"},
452 {IXGBE_TDLEN(0), "TDLEN"},
453 {IXGBE_TDH(0), "TDH"},
454 {IXGBE_TDT(0), "TDT"},
455 {IXGBE_TXDCTL(0), "TXDCTL"},
456
457
458 { .name = NULL }
459};
460
461
462
463
464
465static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
466{
467 int i;
468 char rname[16];
469 u32 regs[64];
470
471 switch (reginfo->ofs) {
472 case IXGBE_SRRCTL(0):
473 for (i = 0; i < 64; i++)
474 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
475 break;
476 case IXGBE_DCA_RXCTRL(0):
477 for (i = 0; i < 64; i++)
478 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
479 break;
480 case IXGBE_RDLEN(0):
481 for (i = 0; i < 64; i++)
482 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
483 break;
484 case IXGBE_RDH(0):
485 for (i = 0; i < 64; i++)
486 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
487 break;
488 case IXGBE_RDT(0):
489 for (i = 0; i < 64; i++)
490 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
491 break;
492 case IXGBE_RXDCTL(0):
493 for (i = 0; i < 64; i++)
494 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
495 break;
496 case IXGBE_RDBAL(0):
497 for (i = 0; i < 64; i++)
498 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
499 break;
500 case IXGBE_RDBAH(0):
501 for (i = 0; i < 64; i++)
502 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
503 break;
504 case IXGBE_TDBAL(0):
505 for (i = 0; i < 64; i++)
506 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
507 break;
508 case IXGBE_TDBAH(0):
509 for (i = 0; i < 64; i++)
510 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
511 break;
512 case IXGBE_TDLEN(0):
513 for (i = 0; i < 64; i++)
514 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
515 break;
516 case IXGBE_TDH(0):
517 for (i = 0; i < 64; i++)
518 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
519 break;
520 case IXGBE_TDT(0):
521 for (i = 0; i < 64; i++)
522 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
523 break;
524 case IXGBE_TXDCTL(0):
525 for (i = 0; i < 64; i++)
526 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
527 break;
528 default:
529 pr_info("%-15s %08x\n",
530 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
531 return;
532 }
533
534 i = 0;
535 while (i < 64) {
536 int j;
537 char buf[9 * 8 + 1];
538 char *p = buf;
539
540 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
541 for (j = 0; j < 8; j++)
542 p += sprintf(p, " %08x", regs[i++]);
543 pr_err("%-15s%s\n", rname, buf);
544 }
545
546}
547
548static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
549{
550 struct ixgbe_tx_buffer *tx_buffer;
551
552 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
553 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
554 n, ring->next_to_use, ring->next_to_clean,
555 (u64)dma_unmap_addr(tx_buffer, dma),
556 dma_unmap_len(tx_buffer, len),
557 tx_buffer->next_to_watch,
558 (u64)tx_buffer->time_stamp);
559}
560
561
562
563
564static void ixgbe_dump(struct ixgbe_adapter *adapter)
565{
566 struct net_device *netdev = adapter->netdev;
567 struct ixgbe_hw *hw = &adapter->hw;
568 struct ixgbe_reg_info *reginfo;
569 int n = 0;
570 struct ixgbe_ring *ring;
571 struct ixgbe_tx_buffer *tx_buffer;
572 union ixgbe_adv_tx_desc *tx_desc;
573 struct my_u0 { u64 a; u64 b; } *u0;
574 struct ixgbe_ring *rx_ring;
575 union ixgbe_adv_rx_desc *rx_desc;
576 struct ixgbe_rx_buffer *rx_buffer_info;
577 int i = 0;
578
579 if (!netif_msg_hw(adapter))
580 return;
581
582
583 if (netdev) {
584 dev_info(&adapter->pdev->dev, "Net device Info\n");
585 pr_info("Device Name state "
586 "trans_start\n");
587 pr_info("%-15s %016lX %016lX\n",
588 netdev->name,
589 netdev->state,
590 dev_trans_start(netdev));
591 }
592
593
594 dev_info(&adapter->pdev->dev, "Register Dump\n");
595 pr_info(" Register Name Value\n");
596 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
597 reginfo->name; reginfo++) {
598 ixgbe_regdump(hw, reginfo);
599 }
600
601
602 if (!netdev || !netif_running(netdev))
603 return;
604
605 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
606 pr_info(" %s %s %s %s\n",
607 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
608 "leng", "ntw", "timestamp");
609 for (n = 0; n < adapter->num_tx_queues; n++) {
610 ring = adapter->tx_ring[n];
611 ixgbe_print_buffer(ring, n);
612 }
613
614 for (n = 0; n < adapter->num_xdp_queues; n++) {
615 ring = adapter->xdp_ring[n];
616 ixgbe_print_buffer(ring, n);
617 }
618
619
620 if (!netif_msg_tx_done(adapter))
621 goto rx_ring_summary;
622
623 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 for (n = 0; n < adapter->num_tx_queues; n++) {
661 ring = adapter->tx_ring[n];
662 pr_info("------------------------------------\n");
663 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
664 pr_info("------------------------------------\n");
665 pr_info("%s%s %s %s %s %s\n",
666 "T [desc] [address 63:0 ] ",
667 "[PlPOIdStDDt Ln] [bi->dma ] ",
668 "leng", "ntw", "timestamp", "bi->skb");
669
670 for (i = 0; ring->desc && (i < ring->count); i++) {
671 tx_desc = IXGBE_TX_DESC(ring, i);
672 tx_buffer = &ring->tx_buffer_info[i];
673 u0 = (struct my_u0 *)tx_desc;
674 if (dma_unmap_len(tx_buffer, len) > 0) {
675 const char *ring_desc;
676
677 if (i == ring->next_to_use &&
678 i == ring->next_to_clean)
679 ring_desc = " NTC/U";
680 else if (i == ring->next_to_use)
681 ring_desc = " NTU";
682 else if (i == ring->next_to_clean)
683 ring_desc = " NTC";
684 else
685 ring_desc = "";
686 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
687 i,
688 le64_to_cpu((__force __le64)u0->a),
689 le64_to_cpu((__force __le64)u0->b),
690 (u64)dma_unmap_addr(tx_buffer, dma),
691 dma_unmap_len(tx_buffer, len),
692 tx_buffer->next_to_watch,
693 (u64)tx_buffer->time_stamp,
694 tx_buffer->skb,
695 ring_desc);
696
697 if (netif_msg_pktdata(adapter) &&
698 tx_buffer->skb)
699 print_hex_dump(KERN_INFO, "",
700 DUMP_PREFIX_ADDRESS, 16, 1,
701 tx_buffer->skb->data,
702 dma_unmap_len(tx_buffer, len),
703 true);
704 }
705 }
706 }
707
708
709rx_ring_summary:
710 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
711 pr_info("Queue [NTU] [NTC]\n");
712 for (n = 0; n < adapter->num_rx_queues; n++) {
713 rx_ring = adapter->rx_ring[n];
714 pr_info("%5d %5X %5X\n",
715 n, rx_ring->next_to_use, rx_ring->next_to_clean);
716 }
717
718
719 if (!netif_msg_rx_status(adapter))
720 return;
721
722 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769 for (n = 0; n < adapter->num_rx_queues; n++) {
770 rx_ring = adapter->rx_ring[n];
771 pr_info("------------------------------------\n");
772 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
773 pr_info("------------------------------------\n");
774 pr_info("%s%s%s\n",
775 "R [desc] [ PktBuf A0] ",
776 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
777 "<-- Adv Rx Read format");
778 pr_info("%s%s%s\n",
779 "RWB[desc] [PcsmIpSHl PtRs] ",
780 "[vl er S cks ln] ---------------- [bi->skb ] ",
781 "<-- Adv Rx Write-Back format");
782
783 for (i = 0; i < rx_ring->count; i++) {
784 const char *ring_desc;
785
786 if (i == rx_ring->next_to_use)
787 ring_desc = " NTU";
788 else if (i == rx_ring->next_to_clean)
789 ring_desc = " NTC";
790 else
791 ring_desc = "";
792
793 rx_buffer_info = &rx_ring->rx_buffer_info[i];
794 rx_desc = IXGBE_RX_DESC(rx_ring, i);
795 u0 = (struct my_u0 *)rx_desc;
796 if (rx_desc->wb.upper.length) {
797
798 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
799 i,
800 le64_to_cpu((__force __le64)u0->a),
801 le64_to_cpu((__force __le64)u0->b),
802 rx_buffer_info->skb,
803 ring_desc);
804 } else {
805 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
806 i,
807 le64_to_cpu((__force __le64)u0->a),
808 le64_to_cpu((__force __le64)u0->b),
809 (u64)rx_buffer_info->dma,
810 rx_buffer_info->skb,
811 ring_desc);
812
813 if (netif_msg_pktdata(adapter) &&
814 rx_buffer_info->dma) {
815 print_hex_dump(KERN_INFO, "",
816 DUMP_PREFIX_ADDRESS, 16, 1,
817 page_address(rx_buffer_info->page) +
818 rx_buffer_info->page_offset,
819 ixgbe_rx_bufsz(rx_ring), true);
820 }
821 }
822 }
823 }
824}
825
826static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
827{
828 u32 ctrl_ext;
829
830
831 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
832 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
833 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
834}
835
836static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
837{
838 u32 ctrl_ext;
839
840
841 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
842 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
843 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
844}
845
846
847
848
849
850
851
852
853
854static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
855 u8 queue, u8 msix_vector)
856{
857 u32 ivar, index;
858 struct ixgbe_hw *hw = &adapter->hw;
859 switch (hw->mac.type) {
860 case ixgbe_mac_82598EB:
861 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
862 if (direction == -1)
863 direction = 0;
864 index = (((direction * 64) + queue) >> 2) & 0x1F;
865 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
866 ivar &= ~(0xFF << (8 * (queue & 0x3)));
867 ivar |= (msix_vector << (8 * (queue & 0x3)));
868 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
869 break;
870 case ixgbe_mac_82599EB:
871 case ixgbe_mac_X540:
872 case ixgbe_mac_X550:
873 case ixgbe_mac_X550EM_x:
874 case ixgbe_mac_x550em_a:
875 if (direction == -1) {
876
877 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
878 index = ((queue & 1) * 8);
879 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
880 ivar &= ~(0xFF << index);
881 ivar |= (msix_vector << index);
882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
883 break;
884 } else {
885
886 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
887 index = ((16 * (queue & 1)) + (8 * direction));
888 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
889 ivar &= ~(0xFF << index);
890 ivar |= (msix_vector << index);
891 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
892 break;
893 }
894 default:
895 break;
896 }
897}
898
899void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
900 u64 qmask)
901{
902 u32 mask;
903
904 switch (adapter->hw.mac.type) {
905 case ixgbe_mac_82598EB:
906 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
908 break;
909 case ixgbe_mac_82599EB:
910 case ixgbe_mac_X540:
911 case ixgbe_mac_X550:
912 case ixgbe_mac_X550EM_x:
913 case ixgbe_mac_x550em_a:
914 mask = (qmask & 0xFFFFFFFF);
915 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
916 mask = (qmask >> 32);
917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
918 break;
919 default:
920 break;
921 }
922}
923
924static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
925{
926 struct ixgbe_hw *hw = &adapter->hw;
927 struct ixgbe_hw_stats *hwstats = &adapter->stats;
928 int i;
929 u32 data;
930
931 if ((hw->fc.current_mode != ixgbe_fc_full) &&
932 (hw->fc.current_mode != ixgbe_fc_rx_pause))
933 return;
934
935 switch (hw->mac.type) {
936 case ixgbe_mac_82598EB:
937 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
938 break;
939 default:
940 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
941 }
942 hwstats->lxoffrxc += data;
943
944
945 if (!data)
946 return;
947
948 for (i = 0; i < adapter->num_tx_queues; i++)
949 clear_bit(__IXGBE_HANG_CHECK_ARMED,
950 &adapter->tx_ring[i]->state);
951
952 for (i = 0; i < adapter->num_xdp_queues; i++)
953 clear_bit(__IXGBE_HANG_CHECK_ARMED,
954 &adapter->xdp_ring[i]->state);
955}
956
957static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
958{
959 struct ixgbe_hw *hw = &adapter->hw;
960 struct ixgbe_hw_stats *hwstats = &adapter->stats;
961 u32 xoff[8] = {0};
962 u8 tc;
963 int i;
964 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
965
966 if (adapter->ixgbe_ieee_pfc)
967 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
968
969 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
970 ixgbe_update_xoff_rx_lfc(adapter);
971 return;
972 }
973
974
975 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
976 u32 pxoffrxc;
977
978 switch (hw->mac.type) {
979 case ixgbe_mac_82598EB:
980 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
981 break;
982 default:
983 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
984 }
985 hwstats->pxoffrxc[i] += pxoffrxc;
986
987 tc = netdev_get_prio_tc_map(adapter->netdev, i);
988 xoff[tc] += pxoffrxc;
989 }
990
991
992 for (i = 0; i < adapter->num_tx_queues; i++) {
993 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
994
995 tc = tx_ring->dcb_tc;
996 if (xoff[tc])
997 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
998 }
999
1000 for (i = 0; i < adapter->num_xdp_queues; i++) {
1001 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1002
1003 tc = xdp_ring->dcb_tc;
1004 if (xoff[tc])
1005 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1006 }
1007}
1008
1009static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1010{
1011 return ring->stats.packets;
1012}
1013
1014static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1015{
1016 unsigned int head, tail;
1017
1018 head = ring->next_to_clean;
1019 tail = ring->next_to_use;
1020
1021 return ((head <= tail) ? tail : tail + ring->count) - head;
1022}
1023
1024static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1025{
1026 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1027 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1028 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1029
1030 clear_check_for_tx_hang(tx_ring);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 if (tx_done_old == tx_done && tx_pending)
1045
1046 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1047 &tx_ring->state);
1048
1049 tx_ring->tx_stats.tx_done_old = tx_done;
1050
1051 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1052
1053 return false;
1054}
1055
1056
1057
1058
1059
1060static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1061{
1062
1063
1064 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1065 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1066 e_warn(drv, "initiating reset due to tx timeout\n");
1067 ixgbe_service_event_schedule(adapter);
1068 }
1069}
1070
1071
1072
1073
1074
1075
1076
1077static int ixgbe_tx_maxrate(struct net_device *netdev,
1078 int queue_index, u32 maxrate)
1079{
1080 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1081 struct ixgbe_hw *hw = &adapter->hw;
1082 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1083
1084 if (!maxrate)
1085 return 0;
1086
1087
1088 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1089 bcnrc_val /= maxrate;
1090
1091
1092 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1093 IXGBE_RTTBCNRC_RF_DEC_MASK;
1094
1095
1096 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1097
1098 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1099 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1100
1101 return 0;
1102}
1103
1104
1105
1106
1107
1108
1109
1110static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1111 struct ixgbe_ring *tx_ring, int napi_budget)
1112{
1113 struct ixgbe_adapter *adapter = q_vector->adapter;
1114 struct ixgbe_tx_buffer *tx_buffer;
1115 union ixgbe_adv_tx_desc *tx_desc;
1116 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1117 unsigned int budget = q_vector->tx.work_limit;
1118 unsigned int i = tx_ring->next_to_clean;
1119
1120 if (test_bit(__IXGBE_DOWN, &adapter->state))
1121 return true;
1122
1123 tx_buffer = &tx_ring->tx_buffer_info[i];
1124 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1125 i -= tx_ring->count;
1126
1127 do {
1128 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1129
1130
1131 if (!eop_desc)
1132 break;
1133
1134
1135 smp_rmb();
1136
1137
1138 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1139 break;
1140
1141
1142 tx_buffer->next_to_watch = NULL;
1143
1144
1145 total_bytes += tx_buffer->bytecount;
1146 total_packets += tx_buffer->gso_segs;
1147 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1148 total_ipsec++;
1149
1150
1151 if (ring_is_xdp(tx_ring))
1152 xdp_return_frame(tx_buffer->xdpf);
1153 else
1154 napi_consume_skb(tx_buffer->skb, napi_budget);
1155
1156
1157 dma_unmap_single(tx_ring->dev,
1158 dma_unmap_addr(tx_buffer, dma),
1159 dma_unmap_len(tx_buffer, len),
1160 DMA_TO_DEVICE);
1161
1162
1163 dma_unmap_len_set(tx_buffer, len, 0);
1164
1165
1166 while (tx_desc != eop_desc) {
1167 tx_buffer++;
1168 tx_desc++;
1169 i++;
1170 if (unlikely(!i)) {
1171 i -= tx_ring->count;
1172 tx_buffer = tx_ring->tx_buffer_info;
1173 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1174 }
1175
1176
1177 if (dma_unmap_len(tx_buffer, len)) {
1178 dma_unmap_page(tx_ring->dev,
1179 dma_unmap_addr(tx_buffer, dma),
1180 dma_unmap_len(tx_buffer, len),
1181 DMA_TO_DEVICE);
1182 dma_unmap_len_set(tx_buffer, len, 0);
1183 }
1184 }
1185
1186
1187 tx_buffer++;
1188 tx_desc++;
1189 i++;
1190 if (unlikely(!i)) {
1191 i -= tx_ring->count;
1192 tx_buffer = tx_ring->tx_buffer_info;
1193 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1194 }
1195
1196
1197 prefetch(tx_desc);
1198
1199
1200 budget--;
1201 } while (likely(budget));
1202
1203 i += tx_ring->count;
1204 tx_ring->next_to_clean = i;
1205 u64_stats_update_begin(&tx_ring->syncp);
1206 tx_ring->stats.bytes += total_bytes;
1207 tx_ring->stats.packets += total_packets;
1208 u64_stats_update_end(&tx_ring->syncp);
1209 q_vector->tx.total_bytes += total_bytes;
1210 q_vector->tx.total_packets += total_packets;
1211 adapter->tx_ipsec += total_ipsec;
1212
1213 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1214
1215 struct ixgbe_hw *hw = &adapter->hw;
1216 e_err(drv, "Detected Tx Unit Hang %s\n"
1217 " Tx Queue <%d>\n"
1218 " TDH, TDT <%x>, <%x>\n"
1219 " next_to_use <%x>\n"
1220 " next_to_clean <%x>\n"
1221 "tx_buffer_info[next_to_clean]\n"
1222 " time_stamp <%lx>\n"
1223 " jiffies <%lx>\n",
1224 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1225 tx_ring->queue_index,
1226 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1227 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1228 tx_ring->next_to_use, i,
1229 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1230
1231 if (!ring_is_xdp(tx_ring))
1232 netif_stop_subqueue(tx_ring->netdev,
1233 tx_ring->queue_index);
1234
1235 e_info(probe,
1236 "tx hang %d detected on queue %d, resetting adapter\n",
1237 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1238
1239
1240 ixgbe_tx_timeout_reset(adapter);
1241
1242
1243 return true;
1244 }
1245
1246 if (ring_is_xdp(tx_ring))
1247 return !!budget;
1248
1249 netdev_tx_completed_queue(txring_txq(tx_ring),
1250 total_packets, total_bytes);
1251
1252#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1253 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1254 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1255
1256
1257
1258 smp_mb();
1259 if (__netif_subqueue_stopped(tx_ring->netdev,
1260 tx_ring->queue_index)
1261 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1262 netif_wake_subqueue(tx_ring->netdev,
1263 tx_ring->queue_index);
1264 ++tx_ring->tx_stats.restart_queue;
1265 }
1266 }
1267
1268 return !!budget;
1269}
1270
1271#ifdef CONFIG_IXGBE_DCA
1272static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1273 struct ixgbe_ring *tx_ring,
1274 int cpu)
1275{
1276 struct ixgbe_hw *hw = &adapter->hw;
1277 u32 txctrl = 0;
1278 u16 reg_offset;
1279
1280 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1281 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1282
1283 switch (hw->mac.type) {
1284 case ixgbe_mac_82598EB:
1285 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1286 break;
1287 case ixgbe_mac_82599EB:
1288 case ixgbe_mac_X540:
1289 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1290 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1291 break;
1292 default:
1293
1294 return;
1295 }
1296
1297
1298
1299
1300
1301
1302 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1303 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1304 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1305
1306 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1307}
1308
1309static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1310 struct ixgbe_ring *rx_ring,
1311 int cpu)
1312{
1313 struct ixgbe_hw *hw = &adapter->hw;
1314 u32 rxctrl = 0;
1315 u8 reg_idx = rx_ring->reg_idx;
1316
1317 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1318 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1319
1320 switch (hw->mac.type) {
1321 case ixgbe_mac_82599EB:
1322 case ixgbe_mac_X540:
1323 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1324 break;
1325 default:
1326 break;
1327 }
1328
1329
1330
1331
1332
1333
1334 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1335 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1336 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1337
1338 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1339}
1340
1341static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1342{
1343 struct ixgbe_adapter *adapter = q_vector->adapter;
1344 struct ixgbe_ring *ring;
1345 int cpu = get_cpu();
1346
1347 if (q_vector->cpu == cpu)
1348 goto out_no_update;
1349
1350 ixgbe_for_each_ring(ring, q_vector->tx)
1351 ixgbe_update_tx_dca(adapter, ring, cpu);
1352
1353 ixgbe_for_each_ring(ring, q_vector->rx)
1354 ixgbe_update_rx_dca(adapter, ring, cpu);
1355
1356 q_vector->cpu = cpu;
1357out_no_update:
1358 put_cpu();
1359}
1360
1361static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1362{
1363 int i;
1364
1365
1366 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1368 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1369 else
1370 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1371 IXGBE_DCA_CTRL_DCA_DISABLE);
1372
1373 for (i = 0; i < adapter->num_q_vectors; i++) {
1374 adapter->q_vector[i]->cpu = -1;
1375 ixgbe_update_dca(adapter->q_vector[i]);
1376 }
1377}
1378
1379static int __ixgbe_notify_dca(struct device *dev, void *data)
1380{
1381 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1382 unsigned long event = *(unsigned long *)data;
1383
1384 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1385 return 0;
1386
1387 switch (event) {
1388 case DCA_PROVIDER_ADD:
1389
1390 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1391 break;
1392 if (dca_add_requester(dev) == 0) {
1393 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1394 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1395 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1396 break;
1397 }
1398 fallthrough;
1399 case DCA_PROVIDER_REMOVE:
1400 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1401 dca_remove_requester(dev);
1402 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1404 IXGBE_DCA_CTRL_DCA_DISABLE);
1405 }
1406 break;
1407 }
1408
1409 return 0;
1410}
1411
1412#endif
1413
1414#define IXGBE_RSS_L4_TYPES_MASK \
1415 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1416 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1417 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1418 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1419
1420static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1421 union ixgbe_adv_rx_desc *rx_desc,
1422 struct sk_buff *skb)
1423{
1424 u16 rss_type;
1425
1426 if (!(ring->netdev->features & NETIF_F_RXHASH))
1427 return;
1428
1429 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1430 IXGBE_RXDADV_RSSTYPE_MASK;
1431
1432 if (!rss_type)
1433 return;
1434
1435 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1436 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1437 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1438}
1439
1440#ifdef IXGBE_FCOE
1441
1442
1443
1444
1445
1446
1447
1448static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1449 union ixgbe_adv_rx_desc *rx_desc)
1450{
1451 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1452
1453 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1454 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1455 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1456 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1457}
1458
1459#endif
1460
1461
1462
1463
1464
1465
1466static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1467 union ixgbe_adv_rx_desc *rx_desc,
1468 struct sk_buff *skb)
1469{
1470 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1471 bool encap_pkt = false;
1472
1473 skb_checksum_none_assert(skb);
1474
1475
1476 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1477 return;
1478
1479
1480 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1481 encap_pkt = true;
1482 skb->encapsulation = 1;
1483 }
1484
1485
1486 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1487 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1488 ring->rx_stats.csum_err++;
1489 return;
1490 }
1491
1492 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1493 return;
1494
1495 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1496
1497
1498
1499
1500 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1501 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1502 return;
1503
1504 ring->rx_stats.csum_err++;
1505 return;
1506 }
1507
1508
1509 skb->ip_summed = CHECKSUM_UNNECESSARY;
1510 if (encap_pkt) {
1511 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1512 return;
1513
1514 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1515 skb->ip_summed = CHECKSUM_NONE;
1516 return;
1517 }
1518
1519 skb->csum_level = 1;
1520 }
1521}
1522
1523static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1524{
1525 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1526}
1527
1528static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1529 struct ixgbe_rx_buffer *bi)
1530{
1531 struct page *page = bi->page;
1532 dma_addr_t dma;
1533
1534
1535 if (likely(page))
1536 return true;
1537
1538
1539 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1540 if (unlikely(!page)) {
1541 rx_ring->rx_stats.alloc_rx_page_failed++;
1542 return false;
1543 }
1544
1545
1546 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1547 ixgbe_rx_pg_size(rx_ring),
1548 DMA_FROM_DEVICE,
1549 IXGBE_RX_DMA_ATTR);
1550
1551
1552
1553
1554
1555 if (dma_mapping_error(rx_ring->dev, dma)) {
1556 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1557
1558 rx_ring->rx_stats.alloc_rx_page_failed++;
1559 return false;
1560 }
1561
1562 bi->dma = dma;
1563 bi->page = page;
1564 bi->page_offset = rx_ring->rx_offset;
1565 page_ref_add(page, USHRT_MAX - 1);
1566 bi->pagecnt_bias = USHRT_MAX;
1567 rx_ring->rx_stats.alloc_rx_page++;
1568
1569 return true;
1570}
1571
1572
1573
1574
1575
1576
1577void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1578{
1579 union ixgbe_adv_rx_desc *rx_desc;
1580 struct ixgbe_rx_buffer *bi;
1581 u16 i = rx_ring->next_to_use;
1582 u16 bufsz;
1583
1584
1585 if (!cleaned_count)
1586 return;
1587
1588 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1589 bi = &rx_ring->rx_buffer_info[i];
1590 i -= rx_ring->count;
1591
1592 bufsz = ixgbe_rx_bufsz(rx_ring);
1593
1594 do {
1595 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1596 break;
1597
1598
1599 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1600 bi->page_offset, bufsz,
1601 DMA_FROM_DEVICE);
1602
1603
1604
1605
1606
1607 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1608
1609 rx_desc++;
1610 bi++;
1611 i++;
1612 if (unlikely(!i)) {
1613 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1614 bi = rx_ring->rx_buffer_info;
1615 i -= rx_ring->count;
1616 }
1617
1618
1619 rx_desc->wb.upper.length = 0;
1620
1621 cleaned_count--;
1622 } while (cleaned_count);
1623
1624 i += rx_ring->count;
1625
1626 if (rx_ring->next_to_use != i) {
1627 rx_ring->next_to_use = i;
1628
1629
1630 rx_ring->next_to_alloc = i;
1631
1632
1633
1634
1635
1636
1637 wmb();
1638 writel(i, rx_ring->tail);
1639 }
1640}
1641
1642static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1643 struct sk_buff *skb)
1644{
1645 u16 hdr_len = skb_headlen(skb);
1646
1647
1648 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1649 IXGBE_CB(skb)->append_cnt);
1650 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1651}
1652
1653static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1654 struct sk_buff *skb)
1655{
1656
1657 if (!IXGBE_CB(skb)->append_cnt)
1658 return;
1659
1660 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1661 rx_ring->rx_stats.rsc_flush++;
1662
1663 ixgbe_set_rsc_gso_size(rx_ring, skb);
1664
1665
1666 IXGBE_CB(skb)->append_cnt = 0;
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1680 union ixgbe_adv_rx_desc *rx_desc,
1681 struct sk_buff *skb)
1682{
1683 struct net_device *dev = rx_ring->netdev;
1684 u32 flags = rx_ring->q_vector->adapter->flags;
1685
1686 ixgbe_update_rsc_stats(rx_ring, skb);
1687
1688 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1689
1690 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1691
1692 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1693 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1694
1695 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1696 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1697 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1699 }
1700
1701 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1702 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1703
1704
1705 if (netif_is_ixgbe(dev))
1706 skb_record_rx_queue(skb, rx_ring->queue_index);
1707 else
1708 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1709 false);
1710
1711 skb->protocol = eth_type_trans(skb, dev);
1712}
1713
1714void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1715 struct sk_buff *skb)
1716{
1717 napi_gro_receive(&q_vector->napi, skb);
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1732 union ixgbe_adv_rx_desc *rx_desc,
1733 struct sk_buff *skb)
1734{
1735 u32 ntc = rx_ring->next_to_clean + 1;
1736
1737
1738 ntc = (ntc < rx_ring->count) ? ntc : 0;
1739 rx_ring->next_to_clean = ntc;
1740
1741 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1742
1743
1744 if (ring_is_rsc_enabled(rx_ring)) {
1745 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1746 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1747
1748 if (unlikely(rsc_enabled)) {
1749 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1750
1751 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1752 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1753
1754
1755 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1756 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1757 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1758 }
1759 }
1760
1761
1762 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1763 return false;
1764
1765
1766 rx_ring->rx_buffer_info[ntc].skb = skb;
1767 rx_ring->rx_stats.non_eop_descs++;
1768
1769 return true;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1785 struct sk_buff *skb)
1786{
1787 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1788 unsigned char *va;
1789 unsigned int pull_len;
1790
1791
1792
1793
1794
1795
1796 va = skb_frag_address(frag);
1797
1798
1799
1800
1801
1802 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1803
1804
1805 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1806
1807
1808 skb_frag_size_sub(frag, pull_len);
1809 skb_frag_off_add(frag, pull_len);
1810 skb->data_len -= pull_len;
1811 skb->tail += pull_len;
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1825 struct sk_buff *skb)
1826{
1827 if (ring_uses_build_skb(rx_ring)) {
1828 unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
1829 unsigned long offset = (unsigned long)(skb->data) & mask;
1830
1831 dma_sync_single_range_for_cpu(rx_ring->dev,
1832 IXGBE_CB(skb)->dma,
1833 offset,
1834 skb_headlen(skb),
1835 DMA_FROM_DEVICE);
1836 } else {
1837 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1838
1839 dma_sync_single_range_for_cpu(rx_ring->dev,
1840 IXGBE_CB(skb)->dma,
1841 skb_frag_off(frag),
1842 skb_frag_size(frag),
1843 DMA_FROM_DEVICE);
1844 }
1845
1846
1847 if (unlikely(IXGBE_CB(skb)->page_released)) {
1848 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1849 ixgbe_rx_pg_size(rx_ring),
1850 DMA_FROM_DEVICE,
1851 IXGBE_RX_DMA_ATTR);
1852 }
1853}
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1878 union ixgbe_adv_rx_desc *rx_desc,
1879 struct sk_buff *skb)
1880{
1881 struct net_device *netdev = rx_ring->netdev;
1882
1883
1884 if (IS_ERR(skb))
1885 return true;
1886
1887
1888
1889
1890 if (!netdev ||
1891 (unlikely(ixgbe_test_staterr(rx_desc,
1892 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1893 !(netdev->features & NETIF_F_RXALL)))) {
1894 dev_kfree_skb_any(skb);
1895 return true;
1896 }
1897
1898
1899 if (!skb_headlen(skb))
1900 ixgbe_pull_tail(rx_ring, skb);
1901
1902#ifdef IXGBE_FCOE
1903
1904 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1905 return false;
1906
1907#endif
1908
1909 if (eth_skb_pad(skb))
1910 return true;
1911
1912 return false;
1913}
1914
1915
1916
1917
1918
1919
1920
1921
1922static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1923 struct ixgbe_rx_buffer *old_buff)
1924{
1925 struct ixgbe_rx_buffer *new_buff;
1926 u16 nta = rx_ring->next_to_alloc;
1927
1928 new_buff = &rx_ring->rx_buffer_info[nta];
1929
1930
1931 nta++;
1932 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1933
1934
1935
1936
1937
1938 new_buff->dma = old_buff->dma;
1939 new_buff->page = old_buff->page;
1940 new_buff->page_offset = old_buff->page_offset;
1941 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1942}
1943
1944static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
1945 int rx_buffer_pgcnt)
1946{
1947 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1948 struct page *page = rx_buffer->page;
1949
1950
1951 if (!dev_page_is_reusable(page))
1952 return false;
1953
1954#if (PAGE_SIZE < 8192)
1955
1956 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1957 return false;
1958#else
1959
1960
1961
1962
1963
1964#define IXGBE_LAST_OFFSET \
1965 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1966 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1967 return false;
1968#endif
1969
1970
1971
1972
1973
1974 if (unlikely(pagecnt_bias == 1)) {
1975 page_ref_add(page, USHRT_MAX - 1);
1976 rx_buffer->pagecnt_bias = USHRT_MAX;
1977 }
1978
1979 return true;
1980}
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1998 struct ixgbe_rx_buffer *rx_buffer,
1999 struct sk_buff *skb,
2000 unsigned int size)
2001{
2002#if (PAGE_SIZE < 8192)
2003 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2004#else
2005 unsigned int truesize = rx_ring->rx_offset ?
2006 SKB_DATA_ALIGN(rx_ring->rx_offset + size) :
2007 SKB_DATA_ALIGN(size);
2008#endif
2009 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2010 rx_buffer->page_offset, size, truesize);
2011#if (PAGE_SIZE < 8192)
2012 rx_buffer->page_offset ^= truesize;
2013#else
2014 rx_buffer->page_offset += truesize;
2015#endif
2016}
2017
2018static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2019 union ixgbe_adv_rx_desc *rx_desc,
2020 struct sk_buff **skb,
2021 const unsigned int size,
2022 int *rx_buffer_pgcnt)
2023{
2024 struct ixgbe_rx_buffer *rx_buffer;
2025
2026 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2027 *rx_buffer_pgcnt =
2028#if (PAGE_SIZE < 8192)
2029 page_count(rx_buffer->page);
2030#else
2031 0;
2032#endif
2033 prefetchw(rx_buffer->page);
2034 *skb = rx_buffer->skb;
2035
2036
2037
2038
2039
2040 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2041 if (!*skb)
2042 goto skip_sync;
2043 } else {
2044 if (*skb)
2045 ixgbe_dma_sync_frag(rx_ring, *skb);
2046 }
2047
2048
2049 dma_sync_single_range_for_cpu(rx_ring->dev,
2050 rx_buffer->dma,
2051 rx_buffer->page_offset,
2052 size,
2053 DMA_FROM_DEVICE);
2054skip_sync:
2055 rx_buffer->pagecnt_bias--;
2056
2057 return rx_buffer;
2058}
2059
2060static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2061 struct ixgbe_rx_buffer *rx_buffer,
2062 struct sk_buff *skb,
2063 int rx_buffer_pgcnt)
2064{
2065 if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2066
2067 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2068 } else {
2069 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2070
2071 IXGBE_CB(skb)->page_released = true;
2072 } else {
2073
2074 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2075 ixgbe_rx_pg_size(rx_ring),
2076 DMA_FROM_DEVICE,
2077 IXGBE_RX_DMA_ATTR);
2078 }
2079 __page_frag_cache_drain(rx_buffer->page,
2080 rx_buffer->pagecnt_bias);
2081 }
2082
2083
2084 rx_buffer->page = NULL;
2085 rx_buffer->skb = NULL;
2086}
2087
2088static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2089 struct ixgbe_rx_buffer *rx_buffer,
2090 struct xdp_buff *xdp,
2091 union ixgbe_adv_rx_desc *rx_desc)
2092{
2093 unsigned int size = xdp->data_end - xdp->data;
2094#if (PAGE_SIZE < 8192)
2095 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2096#else
2097 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2098 xdp->data_hard_start);
2099#endif
2100 struct sk_buff *skb;
2101
2102
2103 net_prefetch(xdp->data);
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2123 if (unlikely(!skb))
2124 return NULL;
2125
2126 if (size > IXGBE_RX_HDR_SIZE) {
2127 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2128 IXGBE_CB(skb)->dma = rx_buffer->dma;
2129
2130 skb_add_rx_frag(skb, 0, rx_buffer->page,
2131 xdp->data - page_address(rx_buffer->page),
2132 size, truesize);
2133#if (PAGE_SIZE < 8192)
2134 rx_buffer->page_offset ^= truesize;
2135#else
2136 rx_buffer->page_offset += truesize;
2137#endif
2138 } else {
2139 memcpy(__skb_put(skb, size),
2140 xdp->data, ALIGN(size, sizeof(long)));
2141 rx_buffer->pagecnt_bias++;
2142 }
2143
2144 return skb;
2145}
2146
2147static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2148 struct ixgbe_rx_buffer *rx_buffer,
2149 struct xdp_buff *xdp,
2150 union ixgbe_adv_rx_desc *rx_desc)
2151{
2152 unsigned int metasize = xdp->data - xdp->data_meta;
2153#if (PAGE_SIZE < 8192)
2154 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2155#else
2156 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2157 SKB_DATA_ALIGN(xdp->data_end -
2158 xdp->data_hard_start);
2159#endif
2160 struct sk_buff *skb;
2161
2162
2163
2164
2165
2166
2167 net_prefetch(xdp->data_meta);
2168
2169
2170 skb = build_skb(xdp->data_hard_start, truesize);
2171 if (unlikely(!skb))
2172 return NULL;
2173
2174
2175 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2176 __skb_put(skb, xdp->data_end - xdp->data);
2177 if (metasize)
2178 skb_metadata_set(skb, metasize);
2179
2180
2181 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2182 IXGBE_CB(skb)->dma = rx_buffer->dma;
2183
2184
2185#if (PAGE_SIZE < 8192)
2186 rx_buffer->page_offset ^= truesize;
2187#else
2188 rx_buffer->page_offset += truesize;
2189#endif
2190
2191 return skb;
2192}
2193
2194static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2195 struct ixgbe_ring *rx_ring,
2196 struct xdp_buff *xdp)
2197{
2198 int err, result = IXGBE_XDP_PASS;
2199 struct bpf_prog *xdp_prog;
2200 struct xdp_frame *xdpf;
2201 u32 act;
2202
2203 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2204
2205 if (!xdp_prog)
2206 goto xdp_out;
2207
2208 prefetchw(xdp->data_hard_start);
2209
2210 act = bpf_prog_run_xdp(xdp_prog, xdp);
2211 switch (act) {
2212 case XDP_PASS:
2213 break;
2214 case XDP_TX:
2215 xdpf = xdp_convert_buff_to_frame(xdp);
2216 if (unlikely(!xdpf))
2217 goto out_failure;
2218 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2219 if (result == IXGBE_XDP_CONSUMED)
2220 goto out_failure;
2221 break;
2222 case XDP_REDIRECT:
2223 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2224 if (err)
2225 goto out_failure;
2226 result = IXGBE_XDP_REDIR;
2227 break;
2228 default:
2229 bpf_warn_invalid_xdp_action(act);
2230 fallthrough;
2231 case XDP_ABORTED:
2232out_failure:
2233 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2234 fallthrough;
2235 case XDP_DROP:
2236 result = IXGBE_XDP_CONSUMED;
2237 break;
2238 }
2239xdp_out:
2240 return ERR_PTR(-result);
2241}
2242
2243static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
2244 unsigned int size)
2245{
2246 unsigned int truesize;
2247
2248#if (PAGE_SIZE < 8192)
2249 truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2250#else
2251 truesize = rx_ring->rx_offset ?
2252 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
2253 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2254 SKB_DATA_ALIGN(size);
2255#endif
2256 return truesize;
2257}
2258
2259static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2260 struct ixgbe_rx_buffer *rx_buffer,
2261 unsigned int size)
2262{
2263 unsigned int truesize = ixgbe_rx_frame_truesize(rx_ring, size);
2264#if (PAGE_SIZE < 8192)
2265 rx_buffer->page_offset ^= truesize;
2266#else
2267 rx_buffer->page_offset += truesize;
2268#endif
2269}
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2285 struct ixgbe_ring *rx_ring,
2286 const int budget)
2287{
2288 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
2289 struct ixgbe_adapter *adapter = q_vector->adapter;
2290#ifdef IXGBE_FCOE
2291 int ddp_bytes;
2292 unsigned int mss = 0;
2293#endif
2294 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2295 unsigned int offset = rx_ring->rx_offset;
2296 unsigned int xdp_xmit = 0;
2297 struct xdp_buff xdp;
2298
2299
2300#if (PAGE_SIZE < 8192)
2301 frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
2302#endif
2303 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
2304
2305 while (likely(total_rx_packets < budget)) {
2306 union ixgbe_adv_rx_desc *rx_desc;
2307 struct ixgbe_rx_buffer *rx_buffer;
2308 struct sk_buff *skb;
2309 int rx_buffer_pgcnt;
2310 unsigned int size;
2311
2312
2313 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2314 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2315 cleaned_count = 0;
2316 }
2317
2318 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2319 size = le16_to_cpu(rx_desc->wb.upper.length);
2320 if (!size)
2321 break;
2322
2323
2324
2325
2326
2327 dma_rmb();
2328
2329 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
2330
2331
2332 if (!skb) {
2333 unsigned char *hard_start;
2334
2335 hard_start = page_address(rx_buffer->page) +
2336 rx_buffer->page_offset - offset;
2337 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
2338#if (PAGE_SIZE > 4096)
2339
2340 xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
2341#endif
2342 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2343 }
2344
2345 if (IS_ERR(skb)) {
2346 unsigned int xdp_res = -PTR_ERR(skb);
2347
2348 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2349 xdp_xmit |= xdp_res;
2350 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2351 } else {
2352 rx_buffer->pagecnt_bias++;
2353 }
2354 total_rx_packets++;
2355 total_rx_bytes += size;
2356 } else if (skb) {
2357 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2358 } else if (ring_uses_build_skb(rx_ring)) {
2359 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2360 &xdp, rx_desc);
2361 } else {
2362 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2363 &xdp, rx_desc);
2364 }
2365
2366
2367 if (!skb) {
2368 rx_ring->rx_stats.alloc_rx_buff_failed++;
2369 rx_buffer->pagecnt_bias++;
2370 break;
2371 }
2372
2373 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
2374 cleaned_count++;
2375
2376
2377 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2378 continue;
2379
2380
2381 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2382 continue;
2383
2384
2385 total_rx_bytes += skb->len;
2386
2387
2388 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2389
2390#ifdef IXGBE_FCOE
2391
2392 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2393 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2394
2395 if (ddp_bytes > 0) {
2396 if (!mss) {
2397 mss = rx_ring->netdev->mtu -
2398 sizeof(struct fcoe_hdr) -
2399 sizeof(struct fc_frame_header) -
2400 sizeof(struct fcoe_crc_eof);
2401 if (mss > 512)
2402 mss &= ~511;
2403 }
2404 total_rx_bytes += ddp_bytes;
2405 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2406 mss);
2407 }
2408 if (!ddp_bytes) {
2409 dev_kfree_skb_any(skb);
2410 continue;
2411 }
2412 }
2413
2414#endif
2415 ixgbe_rx_skb(q_vector, skb);
2416
2417
2418 total_rx_packets++;
2419 }
2420
2421 if (xdp_xmit & IXGBE_XDP_REDIR)
2422 xdp_do_flush_map();
2423
2424 if (xdp_xmit & IXGBE_XDP_TX) {
2425 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2426
2427
2428
2429
2430 wmb();
2431 writel(ring->next_to_use, ring->tail);
2432 }
2433
2434 u64_stats_update_begin(&rx_ring->syncp);
2435 rx_ring->stats.packets += total_rx_packets;
2436 rx_ring->stats.bytes += total_rx_bytes;
2437 u64_stats_update_end(&rx_ring->syncp);
2438 q_vector->rx.total_packets += total_rx_packets;
2439 q_vector->rx.total_bytes += total_rx_bytes;
2440
2441 return total_rx_packets;
2442}
2443
2444
2445
2446
2447
2448
2449
2450
2451static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2452{
2453 struct ixgbe_q_vector *q_vector;
2454 int v_idx;
2455 u32 mask;
2456
2457
2458 if (adapter->num_vfs > 32) {
2459 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2460 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2461 }
2462
2463
2464
2465
2466
2467 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2468 struct ixgbe_ring *ring;
2469 q_vector = adapter->q_vector[v_idx];
2470
2471 ixgbe_for_each_ring(ring, q_vector->rx)
2472 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2473
2474 ixgbe_for_each_ring(ring, q_vector->tx)
2475 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2476
2477 ixgbe_write_eitr(q_vector);
2478 }
2479
2480 switch (adapter->hw.mac.type) {
2481 case ixgbe_mac_82598EB:
2482 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2483 v_idx);
2484 break;
2485 case ixgbe_mac_82599EB:
2486 case ixgbe_mac_X540:
2487 case ixgbe_mac_X550:
2488 case ixgbe_mac_X550EM_x:
2489 case ixgbe_mac_x550em_a:
2490 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2491 break;
2492 default:
2493 break;
2494 }
2495 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2496
2497
2498 mask = IXGBE_EIMS_ENABLE_MASK;
2499 mask &= ~(IXGBE_EIMS_OTHER |
2500 IXGBE_EIMS_MAILBOX |
2501 IXGBE_EIMS_LSC);
2502
2503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2504}
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2520 struct ixgbe_ring_container *ring_container)
2521{
2522 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2523 IXGBE_ITR_ADAPTIVE_LATENCY;
2524 unsigned int avg_wire_size, packets, bytes;
2525 unsigned long next_update = jiffies;
2526
2527
2528
2529
2530 if (!ring_container->ring)
2531 return;
2532
2533
2534
2535
2536
2537
2538 if (time_after(next_update, ring_container->next_update))
2539 goto clear_counts;
2540
2541 packets = ring_container->total_packets;
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551 if (!packets) {
2552 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2553 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2554 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2555 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2556 goto clear_counts;
2557 }
2558
2559 bytes = ring_container->total_bytes;
2560
2561
2562
2563
2564
2565 if (packets < 4 && bytes < 9000) {
2566 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2567 goto adjust_by_size;
2568 }
2569
2570
2571
2572
2573
2574 if (packets < 48) {
2575 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2576 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2577 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2578 goto clear_counts;
2579 }
2580
2581
2582
2583
2584 if (packets < 96) {
2585 itr = q_vector->itr >> 2;
2586 goto clear_counts;
2587 }
2588
2589
2590
2591
2592
2593 if (packets < 256) {
2594 itr = q_vector->itr >> 3;
2595 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2596 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2597 goto clear_counts;
2598 }
2599
2600
2601
2602
2603
2604
2605
2606 itr = IXGBE_ITR_ADAPTIVE_BULK;
2607
2608adjust_by_size:
2609
2610
2611
2612
2613
2614 avg_wire_size = bytes / packets;
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631 if (avg_wire_size <= 60) {
2632
2633 avg_wire_size = 5120;
2634 } else if (avg_wire_size <= 316) {
2635
2636 avg_wire_size *= 40;
2637 avg_wire_size += 2720;
2638 } else if (avg_wire_size <= 1084) {
2639
2640 avg_wire_size *= 15;
2641 avg_wire_size += 11452;
2642 } else if (avg_wire_size < 1968) {
2643
2644 avg_wire_size *= 5;
2645 avg_wire_size += 22420;
2646 } else {
2647
2648 avg_wire_size = 32256;
2649 }
2650
2651
2652
2653
2654 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2655 avg_wire_size >>= 1;
2656
2657
2658
2659
2660
2661
2662
2663
2664 switch (q_vector->adapter->link_speed) {
2665 case IXGBE_LINK_SPEED_10GB_FULL:
2666 case IXGBE_LINK_SPEED_100_FULL:
2667 default:
2668 itr += DIV_ROUND_UP(avg_wire_size,
2669 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2670 IXGBE_ITR_ADAPTIVE_MIN_INC;
2671 break;
2672 case IXGBE_LINK_SPEED_2_5GB_FULL:
2673 case IXGBE_LINK_SPEED_1GB_FULL:
2674 case IXGBE_LINK_SPEED_10_FULL:
2675 if (avg_wire_size > 8064)
2676 avg_wire_size = 8064;
2677 itr += DIV_ROUND_UP(avg_wire_size,
2678 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2679 IXGBE_ITR_ADAPTIVE_MIN_INC;
2680 break;
2681 }
2682
2683clear_counts:
2684
2685 ring_container->itr = itr;
2686
2687
2688 ring_container->next_update = next_update + 1;
2689
2690 ring_container->total_bytes = 0;
2691 ring_container->total_packets = 0;
2692}
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2703{
2704 struct ixgbe_adapter *adapter = q_vector->adapter;
2705 struct ixgbe_hw *hw = &adapter->hw;
2706 int v_idx = q_vector->v_idx;
2707 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2708
2709 switch (adapter->hw.mac.type) {
2710 case ixgbe_mac_82598EB:
2711
2712 itr_reg |= (itr_reg << 16);
2713 break;
2714 case ixgbe_mac_82599EB:
2715 case ixgbe_mac_X540:
2716 case ixgbe_mac_X550:
2717 case ixgbe_mac_X550EM_x:
2718 case ixgbe_mac_x550em_a:
2719
2720
2721
2722
2723 itr_reg |= IXGBE_EITR_CNT_WDIS;
2724 break;
2725 default:
2726 break;
2727 }
2728 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2729}
2730
2731static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2732{
2733 u32 new_itr;
2734
2735 ixgbe_update_itr(q_vector, &q_vector->tx);
2736 ixgbe_update_itr(q_vector, &q_vector->rx);
2737
2738
2739 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2740
2741
2742 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2743 new_itr <<= 2;
2744
2745 if (new_itr != q_vector->itr) {
2746
2747 q_vector->itr = new_itr;
2748
2749 ixgbe_write_eitr(q_vector);
2750 }
2751}
2752
2753
2754
2755
2756
2757static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2758{
2759 struct ixgbe_hw *hw = &adapter->hw;
2760 u32 eicr = adapter->interrupt_event;
2761 s32 rc;
2762
2763 if (test_bit(__IXGBE_DOWN, &adapter->state))
2764 return;
2765
2766 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2767 return;
2768
2769 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2770
2771 switch (hw->device_id) {
2772 case IXGBE_DEV_ID_82599_T3_LOM:
2773
2774
2775
2776
2777
2778
2779
2780 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2781 !(eicr & IXGBE_EICR_LSC))
2782 return;
2783
2784 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2785 u32 speed;
2786 bool link_up = false;
2787
2788 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2789
2790 if (link_up)
2791 return;
2792 }
2793
2794
2795 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2796 return;
2797
2798 break;
2799 case IXGBE_DEV_ID_X550EM_A_1G_T:
2800 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2801 rc = hw->phy.ops.check_overtemp(hw);
2802 if (rc != IXGBE_ERR_OVERTEMP)
2803 return;
2804 break;
2805 default:
2806 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2807 return;
2808 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2809 return;
2810 break;
2811 }
2812 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2813
2814 adapter->interrupt_event = 0;
2815}
2816
2817static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2818{
2819 struct ixgbe_hw *hw = &adapter->hw;
2820
2821 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2822 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2823 e_crit(probe, "Fan has stopped, replace the adapter\n");
2824
2825 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2826 }
2827}
2828
2829static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2830{
2831 struct ixgbe_hw *hw = &adapter->hw;
2832
2833 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2834 return;
2835
2836 switch (adapter->hw.mac.type) {
2837 case ixgbe_mac_82599EB:
2838
2839
2840
2841
2842 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2843 (eicr & IXGBE_EICR_LSC)) &&
2844 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2845 adapter->interrupt_event = eicr;
2846 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2847 ixgbe_service_event_schedule(adapter);
2848 return;
2849 }
2850 return;
2851 case ixgbe_mac_x550em_a:
2852 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2853 adapter->interrupt_event = eicr;
2854 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2855 ixgbe_service_event_schedule(adapter);
2856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2857 IXGBE_EICR_GPI_SDP0_X550EM_a);
2858 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2859 IXGBE_EICR_GPI_SDP0_X550EM_a);
2860 }
2861 return;
2862 case ixgbe_mac_X550:
2863 case ixgbe_mac_X540:
2864 if (!(eicr & IXGBE_EICR_TS))
2865 return;
2866 break;
2867 default:
2868 return;
2869 }
2870
2871 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2872}
2873
2874static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2875{
2876 switch (hw->mac.type) {
2877 case ixgbe_mac_82598EB:
2878 if (hw->phy.type == ixgbe_phy_nl)
2879 return true;
2880 return false;
2881 case ixgbe_mac_82599EB:
2882 case ixgbe_mac_X550EM_x:
2883 case ixgbe_mac_x550em_a:
2884 switch (hw->mac.ops.get_media_type(hw)) {
2885 case ixgbe_media_type_fiber:
2886 case ixgbe_media_type_fiber_qsfp:
2887 return true;
2888 default:
2889 return false;
2890 }
2891 default:
2892 return false;
2893 }
2894}
2895
2896static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2897{
2898 struct ixgbe_hw *hw = &adapter->hw;
2899 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2900
2901 if (!ixgbe_is_sfp(hw))
2902 return;
2903
2904
2905 if (hw->mac.type >= ixgbe_mac_X540)
2906 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2907
2908 if (eicr & eicr_mask) {
2909
2910 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2911 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2912 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2913 adapter->sfp_poll_time = 0;
2914 ixgbe_service_event_schedule(adapter);
2915 }
2916 }
2917
2918 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2919 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2920
2921 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2922 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2923 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2924 ixgbe_service_event_schedule(adapter);
2925 }
2926 }
2927}
2928
2929static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2930{
2931 struct ixgbe_hw *hw = &adapter->hw;
2932
2933 adapter->lsc_int++;
2934 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2935 adapter->link_check_timeout = jiffies;
2936 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2937 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2938 IXGBE_WRITE_FLUSH(hw);
2939 ixgbe_service_event_schedule(adapter);
2940 }
2941}
2942
2943static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2944 u64 qmask)
2945{
2946 u32 mask;
2947 struct ixgbe_hw *hw = &adapter->hw;
2948
2949 switch (hw->mac.type) {
2950 case ixgbe_mac_82598EB:
2951 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2952 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2953 break;
2954 case ixgbe_mac_82599EB:
2955 case ixgbe_mac_X540:
2956 case ixgbe_mac_X550:
2957 case ixgbe_mac_X550EM_x:
2958 case ixgbe_mac_x550em_a:
2959 mask = (qmask & 0xFFFFFFFF);
2960 if (mask)
2961 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2962 mask = (qmask >> 32);
2963 if (mask)
2964 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2965 break;
2966 default:
2967 break;
2968 }
2969
2970}
2971
2972
2973
2974
2975
2976
2977
2978static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2979 bool flush)
2980{
2981 struct ixgbe_hw *hw = &adapter->hw;
2982 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2983
2984
2985 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2986 mask &= ~IXGBE_EIMS_LSC;
2987
2988 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2989 switch (adapter->hw.mac.type) {
2990 case ixgbe_mac_82599EB:
2991 mask |= IXGBE_EIMS_GPI_SDP0(hw);
2992 break;
2993 case ixgbe_mac_X540:
2994 case ixgbe_mac_X550:
2995 case ixgbe_mac_X550EM_x:
2996 case ixgbe_mac_x550em_a:
2997 mask |= IXGBE_EIMS_TS;
2998 break;
2999 default:
3000 break;
3001 }
3002 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3003 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3004 switch (adapter->hw.mac.type) {
3005 case ixgbe_mac_82599EB:
3006 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3007 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3008 fallthrough;
3009 case ixgbe_mac_X540:
3010 case ixgbe_mac_X550:
3011 case ixgbe_mac_X550EM_x:
3012 case ixgbe_mac_x550em_a:
3013 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3014 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3015 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3016 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3017 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3018 mask |= IXGBE_EICR_GPI_SDP0_X540;
3019 mask |= IXGBE_EIMS_ECC;
3020 mask |= IXGBE_EIMS_MAILBOX;
3021 break;
3022 default:
3023 break;
3024 }
3025
3026 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3027 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3028 mask |= IXGBE_EIMS_FLOW_DIR;
3029
3030 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3031 if (queues)
3032 ixgbe_irq_enable_queues(adapter, ~0);
3033 if (flush)
3034 IXGBE_WRITE_FLUSH(&adapter->hw);
3035}
3036
3037static irqreturn_t ixgbe_msix_other(int irq, void *data)
3038{
3039 struct ixgbe_adapter *adapter = data;
3040 struct ixgbe_hw *hw = &adapter->hw;
3041 u32 eicr;
3042
3043
3044
3045
3046
3047
3048
3049 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3050
3051
3052
3053
3054
3055
3056
3057
3058 eicr &= 0xFFFF0000;
3059
3060 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3061
3062 if (eicr & IXGBE_EICR_LSC)
3063 ixgbe_check_lsc(adapter);
3064
3065 if (eicr & IXGBE_EICR_MAILBOX)
3066 ixgbe_msg_task(adapter);
3067
3068 switch (hw->mac.type) {
3069 case ixgbe_mac_82599EB:
3070 case ixgbe_mac_X540:
3071 case ixgbe_mac_X550:
3072 case ixgbe_mac_X550EM_x:
3073 case ixgbe_mac_x550em_a:
3074 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3075 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3076 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3077 ixgbe_service_event_schedule(adapter);
3078 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3079 IXGBE_EICR_GPI_SDP0_X540);
3080 }
3081 if (eicr & IXGBE_EICR_ECC) {
3082 e_info(link, "Received ECC Err, initiating reset\n");
3083 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3084 ixgbe_service_event_schedule(adapter);
3085 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3086 }
3087
3088 if (eicr & IXGBE_EICR_FLOW_DIR) {
3089 int reinit_count = 0;
3090 int i;
3091 for (i = 0; i < adapter->num_tx_queues; i++) {
3092 struct ixgbe_ring *ring = adapter->tx_ring[i];
3093 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3094 &ring->state))
3095 reinit_count++;
3096 }
3097 if (reinit_count) {
3098
3099 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3100 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3101 ixgbe_service_event_schedule(adapter);
3102 }
3103 }
3104 ixgbe_check_sfp_event(adapter, eicr);
3105 ixgbe_check_overtemp_event(adapter, eicr);
3106 break;
3107 default:
3108 break;
3109 }
3110
3111 ixgbe_check_fan_failure(adapter, eicr);
3112
3113 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3114 ixgbe_ptp_check_pps_event(adapter);
3115
3116
3117 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3118 ixgbe_irq_enable(adapter, false, false);
3119
3120 return IRQ_HANDLED;
3121}
3122
3123static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3124{
3125 struct ixgbe_q_vector *q_vector = data;
3126
3127
3128
3129 if (q_vector->rx.ring || q_vector->tx.ring)
3130 napi_schedule_irqoff(&q_vector->napi);
3131
3132 return IRQ_HANDLED;
3133}
3134
3135
3136
3137
3138
3139
3140
3141
3142int ixgbe_poll(struct napi_struct *napi, int budget)
3143{
3144 struct ixgbe_q_vector *q_vector =
3145 container_of(napi, struct ixgbe_q_vector, napi);
3146 struct ixgbe_adapter *adapter = q_vector->adapter;
3147 struct ixgbe_ring *ring;
3148 int per_ring_budget, work_done = 0;
3149 bool clean_complete = true;
3150
3151#ifdef CONFIG_IXGBE_DCA
3152 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3153 ixgbe_update_dca(q_vector);
3154#endif
3155
3156 ixgbe_for_each_ring(ring, q_vector->tx) {
3157 bool wd = ring->xsk_pool ?
3158 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3159 ixgbe_clean_tx_irq(q_vector, ring, budget);
3160
3161 if (!wd)
3162 clean_complete = false;
3163 }
3164
3165
3166 if (budget <= 0)
3167 return budget;
3168
3169
3170
3171 if (q_vector->rx.count > 1)
3172 per_ring_budget = max(budget/q_vector->rx.count, 1);
3173 else
3174 per_ring_budget = budget;
3175
3176 ixgbe_for_each_ring(ring, q_vector->rx) {
3177 int cleaned = ring->xsk_pool ?
3178 ixgbe_clean_rx_irq_zc(q_vector, ring,
3179 per_ring_budget) :
3180 ixgbe_clean_rx_irq(q_vector, ring,
3181 per_ring_budget);
3182
3183 work_done += cleaned;
3184 if (cleaned >= per_ring_budget)
3185 clean_complete = false;
3186 }
3187
3188
3189 if (!clean_complete)
3190 return budget;
3191
3192
3193 if (likely(napi_complete_done(napi, work_done))) {
3194 if (adapter->rx_itr_setting & 1)
3195 ixgbe_set_itr(q_vector);
3196 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3197 ixgbe_irq_enable_queues(adapter,
3198 BIT_ULL(q_vector->v_idx));
3199 }
3200
3201 return min(work_done, budget - 1);
3202}
3203
3204
3205
3206
3207
3208
3209
3210
3211static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3212{
3213 struct net_device *netdev = adapter->netdev;
3214 unsigned int ri = 0, ti = 0;
3215 int vector, err;
3216
3217 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3218 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3219 struct msix_entry *entry = &adapter->msix_entries[vector];
3220
3221 if (q_vector->tx.ring && q_vector->rx.ring) {
3222 snprintf(q_vector->name, sizeof(q_vector->name),
3223 "%s-TxRx-%u", netdev->name, ri++);
3224 ti++;
3225 } else if (q_vector->rx.ring) {
3226 snprintf(q_vector->name, sizeof(q_vector->name),
3227 "%s-rx-%u", netdev->name, ri++);
3228 } else if (q_vector->tx.ring) {
3229 snprintf(q_vector->name, sizeof(q_vector->name),
3230 "%s-tx-%u", netdev->name, ti++);
3231 } else {
3232
3233 continue;
3234 }
3235 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3236 q_vector->name, q_vector);
3237 if (err) {
3238 e_err(probe, "request_irq failed for MSIX interrupt "
3239 "Error: %d\n", err);
3240 goto free_queue_irqs;
3241 }
3242
3243 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3244
3245 irq_set_affinity_hint(entry->vector,
3246 &q_vector->affinity_mask);
3247 }
3248 }
3249
3250 err = request_irq(adapter->msix_entries[vector].vector,
3251 ixgbe_msix_other, 0, netdev->name, adapter);
3252 if (err) {
3253 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3254 goto free_queue_irqs;
3255 }
3256
3257 return 0;
3258
3259free_queue_irqs:
3260 while (vector) {
3261 vector--;
3262 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3263 NULL);
3264 free_irq(adapter->msix_entries[vector].vector,
3265 adapter->q_vector[vector]);
3266 }
3267 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3268 pci_disable_msix(adapter->pdev);
3269 kfree(adapter->msix_entries);
3270 adapter->msix_entries = NULL;
3271 return err;
3272}
3273
3274
3275
3276
3277
3278
3279static irqreturn_t ixgbe_intr(int irq, void *data)
3280{
3281 struct ixgbe_adapter *adapter = data;
3282 struct ixgbe_hw *hw = &adapter->hw;
3283 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3284 u32 eicr;
3285
3286
3287
3288
3289
3290 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3291
3292
3293
3294 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3295 if (!eicr) {
3296
3297
3298
3299
3300
3301
3302
3303 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3304 ixgbe_irq_enable(adapter, true, true);
3305 return IRQ_NONE;
3306 }
3307
3308 if (eicr & IXGBE_EICR_LSC)
3309 ixgbe_check_lsc(adapter);
3310
3311 switch (hw->mac.type) {
3312 case ixgbe_mac_82599EB:
3313 ixgbe_check_sfp_event(adapter, eicr);
3314 fallthrough;
3315 case ixgbe_mac_X540:
3316 case ixgbe_mac_X550:
3317 case ixgbe_mac_X550EM_x:
3318 case ixgbe_mac_x550em_a:
3319 if (eicr & IXGBE_EICR_ECC) {
3320 e_info(link, "Received ECC Err, initiating reset\n");
3321 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3322 ixgbe_service_event_schedule(adapter);
3323 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3324 }
3325 ixgbe_check_overtemp_event(adapter, eicr);
3326 break;
3327 default:
3328 break;
3329 }
3330
3331 ixgbe_check_fan_failure(adapter, eicr);
3332 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3333 ixgbe_ptp_check_pps_event(adapter);
3334
3335
3336 napi_schedule_irqoff(&q_vector->napi);
3337
3338
3339
3340
3341
3342 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3343 ixgbe_irq_enable(adapter, false, false);
3344
3345 return IRQ_HANDLED;
3346}
3347
3348
3349
3350
3351
3352
3353
3354
3355static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3356{
3357 struct net_device *netdev = adapter->netdev;
3358 int err;
3359
3360 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3361 err = ixgbe_request_msix_irqs(adapter);
3362 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3363 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3364 netdev->name, adapter);
3365 else
3366 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3367 netdev->name, adapter);
3368
3369 if (err)
3370 e_err(probe, "request_irq failed, Error %d\n", err);
3371
3372 return err;
3373}
3374
3375static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3376{
3377 int vector;
3378
3379 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3380 free_irq(adapter->pdev->irq, adapter);
3381 return;
3382 }
3383
3384 if (!adapter->msix_entries)
3385 return;
3386
3387 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3388 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3389 struct msix_entry *entry = &adapter->msix_entries[vector];
3390
3391
3392 if (!q_vector->rx.ring && !q_vector->tx.ring)
3393 continue;
3394
3395
3396 irq_set_affinity_hint(entry->vector, NULL);
3397
3398 free_irq(entry->vector, q_vector);
3399 }
3400
3401 free_irq(adapter->msix_entries[vector].vector, adapter);
3402}
3403
3404
3405
3406
3407
3408static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3409{
3410 switch (adapter->hw.mac.type) {
3411 case ixgbe_mac_82598EB:
3412 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3413 break;
3414 case ixgbe_mac_82599EB:
3415 case ixgbe_mac_X540:
3416 case ixgbe_mac_X550:
3417 case ixgbe_mac_X550EM_x:
3418 case ixgbe_mac_x550em_a:
3419 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3420 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3421 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3422 break;
3423 default:
3424 break;
3425 }
3426 IXGBE_WRITE_FLUSH(&adapter->hw);
3427 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3428 int vector;
3429
3430 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3431 synchronize_irq(adapter->msix_entries[vector].vector);
3432
3433 synchronize_irq(adapter->msix_entries[vector++].vector);
3434 } else {
3435 synchronize_irq(adapter->pdev->irq);
3436 }
3437}
3438
3439
3440
3441
3442
3443
3444static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3445{
3446 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3447
3448 ixgbe_write_eitr(q_vector);
3449
3450 ixgbe_set_ivar(adapter, 0, 0, 0);
3451 ixgbe_set_ivar(adapter, 1, 0, 0);
3452
3453 e_info(hw, "Legacy interrupt IVAR setup done\n");
3454}
3455
3456
3457
3458
3459
3460
3461
3462
3463void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3464 struct ixgbe_ring *ring)
3465{
3466 struct ixgbe_hw *hw = &adapter->hw;
3467 u64 tdba = ring->dma;
3468 int wait_loop = 10;
3469 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3470 u8 reg_idx = ring->reg_idx;
3471
3472 ring->xsk_pool = NULL;
3473 if (ring_is_xdp(ring))
3474 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
3475
3476
3477 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3478 IXGBE_WRITE_FLUSH(hw);
3479
3480 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3481 (tdba & DMA_BIT_MASK(32)));
3482 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3483 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3484 ring->count * sizeof(union ixgbe_adv_tx_desc));
3485 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3486 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3487 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3500 txdctl |= 1u << 16;
3501 else
3502 txdctl |= 8u << 16;
3503
3504
3505
3506
3507
3508 txdctl |= (1u << 8) |
3509 32;
3510
3511
3512 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3513 ring->atr_sample_rate = adapter->atr_sample_rate;
3514 ring->atr_count = 0;
3515 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3516 } else {
3517 ring->atr_sample_rate = 0;
3518 }
3519
3520
3521 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3522 struct ixgbe_q_vector *q_vector = ring->q_vector;
3523
3524 if (q_vector)
3525 netif_set_xps_queue(ring->netdev,
3526 &q_vector->affinity_mask,
3527 ring->queue_index);
3528 }
3529
3530 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3531
3532
3533 memset(ring->tx_buffer_info, 0,
3534 sizeof(struct ixgbe_tx_buffer) * ring->count);
3535
3536
3537 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3538
3539
3540 if (hw->mac.type == ixgbe_mac_82598EB &&
3541 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3542 return;
3543
3544
3545 do {
3546 usleep_range(1000, 2000);
3547 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3548 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3549 if (!wait_loop)
3550 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3551}
3552
3553static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3554{
3555 struct ixgbe_hw *hw = &adapter->hw;
3556 u32 rttdcs, mtqc;
3557 u8 tcs = adapter->hw_tcs;
3558
3559 if (hw->mac.type == ixgbe_mac_82598EB)
3560 return;
3561
3562
3563 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3564 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3565 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3566
3567
3568 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3569 mtqc = IXGBE_MTQC_VT_ENA;
3570 if (tcs > 4)
3571 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3572 else if (tcs > 1)
3573 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3574 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3575 IXGBE_82599_VMDQ_4Q_MASK)
3576 mtqc |= IXGBE_MTQC_32VF;
3577 else
3578 mtqc |= IXGBE_MTQC_64VF;
3579 } else {
3580 if (tcs > 4) {
3581 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3582 } else if (tcs > 1) {
3583 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3584 } else {
3585 u8 max_txq = adapter->num_tx_queues +
3586 adapter->num_xdp_queues;
3587 if (max_txq > 63)
3588 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3589 else
3590 mtqc = IXGBE_MTQC_64Q_1PB;
3591 }
3592 }
3593
3594 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3595
3596
3597 if (tcs) {
3598 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3599 sectx |= IXGBE_SECTX_DCB;
3600 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3601 }
3602
3603
3604 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3605 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3606}
3607
3608
3609
3610
3611
3612
3613
3614static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3615{
3616 struct ixgbe_hw *hw = &adapter->hw;
3617 u32 dmatxctl;
3618 u32 i;
3619
3620 ixgbe_setup_mtqc(adapter);
3621
3622 if (hw->mac.type != ixgbe_mac_82598EB) {
3623
3624 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3625 dmatxctl |= IXGBE_DMATXCTL_TE;
3626 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3627 }
3628
3629
3630 for (i = 0; i < adapter->num_tx_queues; i++)
3631 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3632 for (i = 0; i < adapter->num_xdp_queues; i++)
3633 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3634}
3635
3636static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3637 struct ixgbe_ring *ring)
3638{
3639 struct ixgbe_hw *hw = &adapter->hw;
3640 u8 reg_idx = ring->reg_idx;
3641 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3642
3643 srrctl |= IXGBE_SRRCTL_DROP_EN;
3644
3645 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3646}
3647
3648static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3649 struct ixgbe_ring *ring)
3650{
3651 struct ixgbe_hw *hw = &adapter->hw;
3652 u8 reg_idx = ring->reg_idx;
3653 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3654
3655 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3656
3657 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3658}
3659
3660#ifdef CONFIG_IXGBE_DCB
3661void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3662#else
3663static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3664#endif
3665{
3666 int i;
3667 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3668
3669 if (adapter->ixgbe_ieee_pfc)
3670 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3682 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3683 for (i = 0; i < adapter->num_rx_queues; i++)
3684 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3685 } else {
3686 for (i = 0; i < adapter->num_rx_queues; i++)
3687 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3688 }
3689}
3690
3691#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3692
3693static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3694 struct ixgbe_ring *rx_ring)
3695{
3696 struct ixgbe_hw *hw = &adapter->hw;
3697 u32 srrctl;
3698 u8 reg_idx = rx_ring->reg_idx;
3699
3700 if (hw->mac.type == ixgbe_mac_82598EB) {
3701 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3702
3703
3704
3705
3706
3707 reg_idx &= mask;
3708 }
3709
3710
3711 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3712
3713
3714 if (rx_ring->xsk_pool) {
3715 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725 if (hw->mac.type != ixgbe_mac_82599EB)
3726 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3727 else
3728 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3729 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3730 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3731 } else {
3732 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3733 }
3734
3735
3736 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3737
3738 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3739}
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3750{
3751 if (adapter->hw.mac.type < ixgbe_mac_X550)
3752 return 128;
3753 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3754 return 64;
3755 else
3756 return 512;
3757}
3758
3759
3760
3761
3762
3763
3764
3765void ixgbe_store_key(struct ixgbe_adapter *adapter)
3766{
3767 struct ixgbe_hw *hw = &adapter->hw;
3768 int i;
3769
3770 for (i = 0; i < 10; i++)
3771 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3772}
3773
3774
3775
3776
3777
3778
3779
3780static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3781{
3782 u32 *rss_key;
3783
3784 if (!adapter->rss_key) {
3785 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3786 if (unlikely(!rss_key))
3787 return -ENOMEM;
3788
3789 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3790 adapter->rss_key = rss_key;
3791 }
3792
3793 return 0;
3794}
3795
3796
3797
3798
3799
3800
3801
3802void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3803{
3804 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3805 struct ixgbe_hw *hw = &adapter->hw;
3806 u32 reta = 0;
3807 u32 indices_multi;
3808 u8 *indir_tbl = adapter->rss_indir_tbl;
3809
3810
3811
3812
3813
3814
3815
3816 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3817 indices_multi = 0x11;
3818 else
3819 indices_multi = 0x1;
3820
3821
3822 for (i = 0; i < reta_entries; i++) {
3823 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3824 if ((i & 3) == 3) {
3825 if (i < 128)
3826 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3827 else
3828 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3829 reta);
3830 reta = 0;
3831 }
3832 }
3833}
3834
3835
3836
3837
3838
3839
3840
3841static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3842{
3843 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3844 struct ixgbe_hw *hw = &adapter->hw;
3845 u32 vfreta = 0;
3846
3847
3848 for (i = 0; i < reta_entries; i++) {
3849 u16 pool = adapter->num_rx_pools;
3850
3851 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3852 if ((i & 3) != 3)
3853 continue;
3854
3855 while (pool--)
3856 IXGBE_WRITE_REG(hw,
3857 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3858 vfreta);
3859 vfreta = 0;
3860 }
3861}
3862
3863static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3864{
3865 u32 i, j;
3866 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3867 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3868
3869
3870
3871
3872
3873 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3874 rss_i = 4;
3875
3876
3877 ixgbe_store_key(adapter);
3878
3879
3880 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3881
3882 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3883 if (j == rss_i)
3884 j = 0;
3885
3886 adapter->rss_indir_tbl[i] = j;
3887 }
3888
3889 ixgbe_store_reta(adapter);
3890}
3891
3892static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3893{
3894 struct ixgbe_hw *hw = &adapter->hw;
3895 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3896 int i, j;
3897
3898
3899 for (i = 0; i < 10; i++) {
3900 u16 pool = adapter->num_rx_pools;
3901
3902 while (pool--)
3903 IXGBE_WRITE_REG(hw,
3904 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3905 *(adapter->rss_key + i));
3906 }
3907
3908
3909 for (i = 0, j = 0; i < 64; i++, j++) {
3910 if (j == rss_i)
3911 j = 0;
3912
3913 adapter->rss_indir_tbl[i] = j;
3914 }
3915
3916 ixgbe_store_vfreta(adapter);
3917}
3918
3919static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3920{
3921 struct ixgbe_hw *hw = &adapter->hw;
3922 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3923 u32 rxcsum;
3924
3925
3926 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3927 rxcsum |= IXGBE_RXCSUM_PCSD;
3928 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3929
3930 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3931 if (adapter->ring_feature[RING_F_RSS].mask)
3932 mrqc = IXGBE_MRQC_RSSEN;
3933 } else {
3934 u8 tcs = adapter->hw_tcs;
3935
3936 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3937 if (tcs > 4)
3938 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3939 else if (tcs > 1)
3940 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3941 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3942 IXGBE_82599_VMDQ_4Q_MASK)
3943 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3944 else
3945 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3946
3947
3948
3949
3950 if (hw->mac.type >= ixgbe_mac_X550)
3951 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3952 } else {
3953 if (tcs > 4)
3954 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3955 else if (tcs > 1)
3956 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3957 else
3958 mrqc = IXGBE_MRQC_RSSEN;
3959 }
3960 }
3961
3962
3963 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3964 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3965 IXGBE_MRQC_RSS_FIELD_IPV6 |
3966 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3967
3968 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3969 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3970 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3971 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3972
3973 if ((hw->mac.type >= ixgbe_mac_X550) &&
3974 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3975 u16 pool = adapter->num_rx_pools;
3976
3977
3978 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3979 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3980
3981
3982 ixgbe_setup_vfreta(adapter);
3983 vfmrqc = IXGBE_MRQC_RSSEN;
3984 vfmrqc |= rss_field;
3985
3986 while (pool--)
3987 IXGBE_WRITE_REG(hw,
3988 IXGBE_PFVFMRQC(VMDQ_P(pool)),
3989 vfmrqc);
3990 } else {
3991 ixgbe_setup_reta(adapter);
3992 mrqc |= rss_field;
3993 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3994 }
3995}
3996
3997
3998
3999
4000
4001
4002static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4003 struct ixgbe_ring *ring)
4004{
4005 struct ixgbe_hw *hw = &adapter->hw;
4006 u32 rscctrl;
4007 u8 reg_idx = ring->reg_idx;
4008
4009 if (!ring_is_rsc_enabled(ring))
4010 return;
4011
4012 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4013 rscctrl |= IXGBE_RSCCTL_RSCEN;
4014
4015
4016
4017
4018
4019 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4020 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4021}
4022
4023#define IXGBE_MAX_RX_DESC_POLL 10
4024static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4025 struct ixgbe_ring *ring)
4026{
4027 struct ixgbe_hw *hw = &adapter->hw;
4028 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4029 u32 rxdctl;
4030 u8 reg_idx = ring->reg_idx;
4031
4032 if (ixgbe_removed(hw->hw_addr))
4033 return;
4034
4035 if (hw->mac.type == ixgbe_mac_82598EB &&
4036 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4037 return;
4038
4039 do {
4040 usleep_range(1000, 2000);
4041 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4042 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4043
4044 if (!wait_loop) {
4045 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4046 "the polling period\n", reg_idx);
4047 }
4048}
4049
4050void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4051 struct ixgbe_ring *ring)
4052{
4053 struct ixgbe_hw *hw = &adapter->hw;
4054 union ixgbe_adv_rx_desc *rx_desc;
4055 u64 rdba = ring->dma;
4056 u32 rxdctl;
4057 u8 reg_idx = ring->reg_idx;
4058
4059 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4060 ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
4061 if (ring->xsk_pool) {
4062 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4063 MEM_TYPE_XSK_BUFF_POOL,
4064 NULL));
4065 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
4066 } else {
4067 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4068 MEM_TYPE_PAGE_SHARED, NULL));
4069 }
4070
4071
4072 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4073 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4074
4075
4076 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4077 IXGBE_WRITE_FLUSH(hw);
4078
4079 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4080 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4081 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4082 ring->count * sizeof(union ixgbe_adv_rx_desc));
4083
4084 IXGBE_WRITE_FLUSH(hw);
4085
4086 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4087 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4088 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4089
4090 ixgbe_configure_srrctl(adapter, ring);
4091 ixgbe_configure_rscctl(adapter, ring);
4092
4093 if (hw->mac.type == ixgbe_mac_82598EB) {
4094
4095
4096
4097
4098
4099
4100
4101 rxdctl &= ~0x3FFFFF;
4102 rxdctl |= 0x080420;
4103#if (PAGE_SIZE < 8192)
4104
4105 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4106 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4107 IXGBE_RXDCTL_RLPML_EN);
4108
4109
4110
4111
4112
4113 if (ring_uses_build_skb(ring) &&
4114 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4115 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4116 IXGBE_RXDCTL_RLPML_EN;
4117#endif
4118 }
4119
4120 ring->rx_offset = ixgbe_rx_offset(ring);
4121
4122 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
4123 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
4124
4125 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4126 IXGBE_RXDCTL_RLPML_EN);
4127 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4128
4129 ring->rx_buf_len = xsk_buf_len;
4130 }
4131
4132
4133 memset(ring->rx_buffer_info, 0,
4134 sizeof(struct ixgbe_rx_buffer) * ring->count);
4135
4136
4137 rx_desc = IXGBE_RX_DESC(ring, 0);
4138 rx_desc->wb.upper.length = 0;
4139
4140
4141 rxdctl |= IXGBE_RXDCTL_ENABLE;
4142 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4143
4144 ixgbe_rx_desc_queue_enable(adapter, ring);
4145 if (ring->xsk_pool)
4146 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4147 else
4148 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4149}
4150
4151static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4152{
4153 struct ixgbe_hw *hw = &adapter->hw;
4154 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4155 u16 pool = adapter->num_rx_pools;
4156
4157
4158 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4159 IXGBE_PSRTYPE_UDPHDR |
4160 IXGBE_PSRTYPE_IPV4HDR |
4161 IXGBE_PSRTYPE_L2HDR |
4162 IXGBE_PSRTYPE_IPV6HDR;
4163
4164 if (hw->mac.type == ixgbe_mac_82598EB)
4165 return;
4166
4167 if (rss_i > 3)
4168 psrtype |= 2u << 29;
4169 else if (rss_i > 1)
4170 psrtype |= 1u << 29;
4171
4172 while (pool--)
4173 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4174}
4175
4176static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4177{
4178 struct ixgbe_hw *hw = &adapter->hw;
4179 u16 pool = adapter->num_rx_pools;
4180 u32 reg_offset, vf_shift, vmolr;
4181 u32 gcr_ext, vmdctl;
4182 int i;
4183
4184 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4185 return;
4186
4187 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4188 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4189 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4190 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4191 vmdctl |= IXGBE_VT_CTL_REPLEN;
4192 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4193
4194
4195
4196
4197 vmolr = IXGBE_VMOLR_AUPE;
4198 while (pool--)
4199 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4200
4201 vf_shift = VMDQ_P(0) % 32;
4202 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4203
4204
4205 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4206 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4207 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4208 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4209 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4210 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4211
4212
4213 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4214
4215
4216 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4217
4218
4219
4220
4221
4222 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4223 case IXGBE_82599_VMDQ_8Q_MASK:
4224 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4225 break;
4226 case IXGBE_82599_VMDQ_4Q_MASK:
4227 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4228 break;
4229 default:
4230 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4231 break;
4232 }
4233
4234 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4235
4236 for (i = 0; i < adapter->num_vfs; i++) {
4237
4238 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4239 adapter->vfinfo[i].spoofchk_enabled);
4240
4241
4242 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4243 adapter->vfinfo[i].rss_query_enabled);
4244 }
4245}
4246
4247static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4248{
4249 struct ixgbe_hw *hw = &adapter->hw;
4250 struct net_device *netdev = adapter->netdev;
4251 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4252 struct ixgbe_ring *rx_ring;
4253 int i;
4254 u32 mhadd, hlreg0;
4255
4256#ifdef IXGBE_FCOE
4257
4258 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4259 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4260 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4261
4262#endif
4263
4264
4265 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4266 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4267
4268 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4269 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4270 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4271 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4272
4273 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4274 }
4275
4276 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4277
4278 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4279 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4280
4281
4282
4283
4284
4285 for (i = 0; i < adapter->num_rx_queues; i++) {
4286 rx_ring = adapter->rx_ring[i];
4287
4288 clear_ring_rsc_enabled(rx_ring);
4289 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4290 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4291
4292 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4293 set_ring_rsc_enabled(rx_ring);
4294
4295 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4296 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4297
4298 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4299 continue;
4300
4301 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4302
4303#if (PAGE_SIZE < 8192)
4304 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4305 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4306
4307 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4308 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4309 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4310#endif
4311 }
4312}
4313
4314static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4315{
4316 struct ixgbe_hw *hw = &adapter->hw;
4317 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4318
4319 switch (hw->mac.type) {
4320 case ixgbe_mac_82598EB:
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4332 break;
4333 case ixgbe_mac_X550:
4334 case ixgbe_mac_X550EM_x:
4335 case ixgbe_mac_x550em_a:
4336 if (adapter->num_vfs)
4337 rdrxctl |= IXGBE_RDRXCTL_PSP;
4338 fallthrough;
4339 case ixgbe_mac_82599EB:
4340 case ixgbe_mac_X540:
4341
4342 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4343 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4344 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4345
4346 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4347 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4348 break;
4349 default:
4350
4351 return;
4352 }
4353
4354 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4355}
4356
4357
4358
4359
4360
4361
4362
4363static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4364{
4365 struct ixgbe_hw *hw = &adapter->hw;
4366 int i;
4367 u32 rxctrl, rfctl;
4368
4369
4370 hw->mac.ops.disable_rx(hw);
4371
4372 ixgbe_setup_psrtype(adapter);
4373 ixgbe_setup_rdrxctl(adapter);
4374
4375
4376 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4377 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4378 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4379 rfctl |= IXGBE_RFCTL_RSC_DIS;
4380
4381
4382 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4383 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4384
4385
4386 ixgbe_setup_mrqc(adapter);
4387
4388
4389 ixgbe_set_rx_buffer_len(adapter);
4390
4391
4392
4393
4394
4395 for (i = 0; i < adapter->num_rx_queues; i++)
4396 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4397
4398 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4399
4400 if (hw->mac.type == ixgbe_mac_82598EB)
4401 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4402
4403
4404 rxctrl |= IXGBE_RXCTRL_RXEN;
4405 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4406}
4407
4408static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4409 __be16 proto, u16 vid)
4410{
4411 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4412 struct ixgbe_hw *hw = &adapter->hw;
4413
4414
4415 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4416 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4417
4418 set_bit(vid, adapter->active_vlans);
4419
4420 return 0;
4421}
4422
4423static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4424{
4425 u32 vlvf;
4426 int idx;
4427
4428
4429 if (vlan == 0)
4430 return 0;
4431
4432
4433 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4434 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4435 if ((vlvf & VLAN_VID_MASK) == vlan)
4436 break;
4437 }
4438
4439 return idx;
4440}
4441
4442void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4443{
4444 struct ixgbe_hw *hw = &adapter->hw;
4445 u32 bits, word;
4446 int idx;
4447
4448 idx = ixgbe_find_vlvf_entry(hw, vid);
4449 if (!idx)
4450 return;
4451
4452
4453
4454
4455 word = idx * 2 + (VMDQ_P(0) / 32);
4456 bits = ~BIT(VMDQ_P(0) % 32);
4457 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4458
4459
4460 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4461 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4462 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4463 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4464 }
4465}
4466
4467static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4468 __be16 proto, u16 vid)
4469{
4470 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4471 struct ixgbe_hw *hw = &adapter->hw;
4472
4473
4474 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4475 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4476
4477 clear_bit(vid, adapter->active_vlans);
4478
4479 return 0;
4480}
4481
4482
4483
4484
4485
4486static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4487{
4488 struct ixgbe_hw *hw = &adapter->hw;
4489 u32 vlnctrl;
4490 int i, j;
4491
4492 switch (hw->mac.type) {
4493 case ixgbe_mac_82598EB:
4494 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4495 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4496 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4497 break;
4498 case ixgbe_mac_82599EB:
4499 case ixgbe_mac_X540:
4500 case ixgbe_mac_X550:
4501 case ixgbe_mac_X550EM_x:
4502 case ixgbe_mac_x550em_a:
4503 for (i = 0; i < adapter->num_rx_queues; i++) {
4504 struct ixgbe_ring *ring = adapter->rx_ring[i];
4505
4506 if (!netif_is_ixgbe(ring->netdev))
4507 continue;
4508
4509 j = ring->reg_idx;
4510 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4511 vlnctrl &= ~IXGBE_RXDCTL_VME;
4512 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4513 }
4514 break;
4515 default:
4516 break;
4517 }
4518}
4519
4520
4521
4522
4523
4524static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4525{
4526 struct ixgbe_hw *hw = &adapter->hw;
4527 u32 vlnctrl;
4528 int i, j;
4529
4530 switch (hw->mac.type) {
4531 case ixgbe_mac_82598EB:
4532 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4533 vlnctrl |= IXGBE_VLNCTRL_VME;
4534 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4535 break;
4536 case ixgbe_mac_82599EB:
4537 case ixgbe_mac_X540:
4538 case ixgbe_mac_X550:
4539 case ixgbe_mac_X550EM_x:
4540 case ixgbe_mac_x550em_a:
4541 for (i = 0; i < adapter->num_rx_queues; i++) {
4542 struct ixgbe_ring *ring = adapter->rx_ring[i];
4543
4544 if (!netif_is_ixgbe(ring->netdev))
4545 continue;
4546
4547 j = ring->reg_idx;
4548 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4549 vlnctrl |= IXGBE_RXDCTL_VME;
4550 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4551 }
4552 break;
4553 default:
4554 break;
4555 }
4556}
4557
4558static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4559{
4560 struct ixgbe_hw *hw = &adapter->hw;
4561 u32 vlnctrl, i;
4562
4563 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4564
4565 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4566
4567 vlnctrl |= IXGBE_VLNCTRL_VFE;
4568 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4569 } else {
4570 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4571 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4572 return;
4573 }
4574
4575
4576 if (hw->mac.type == ixgbe_mac_82598EB)
4577 return;
4578
4579
4580 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4581 return;
4582
4583
4584 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4585
4586
4587 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4588 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4589 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4590
4591 vlvfb |= BIT(VMDQ_P(0) % 32);
4592 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4593 }
4594
4595
4596 for (i = hw->mac.vft_size; i--;)
4597 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4598}
4599
4600#define VFTA_BLOCK_SIZE 8
4601static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4602{
4603 struct ixgbe_hw *hw = &adapter->hw;
4604 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4605 u32 vid_start = vfta_offset * 32;
4606 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4607 u32 i, vid, word, bits;
4608
4609 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4610 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4611
4612
4613 vid = vlvf & VLAN_VID_MASK;
4614
4615
4616 if (vid < vid_start || vid >= vid_end)
4617 continue;
4618
4619 if (vlvf) {
4620
4621 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4622
4623
4624 if (test_bit(vid, adapter->active_vlans))
4625 continue;
4626 }
4627
4628
4629 word = i * 2 + VMDQ_P(0) / 32;
4630 bits = ~BIT(VMDQ_P(0) % 32);
4631 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4632 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4633 }
4634
4635
4636 for (i = VFTA_BLOCK_SIZE; i--;) {
4637 vid = (vfta_offset + i) * 32;
4638 word = vid / BITS_PER_LONG;
4639 bits = vid % BITS_PER_LONG;
4640
4641 vfta[i] |= adapter->active_vlans[word] >> bits;
4642
4643 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4644 }
4645}
4646
4647static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4648{
4649 struct ixgbe_hw *hw = &adapter->hw;
4650 u32 vlnctrl, i;
4651
4652
4653 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4654 vlnctrl |= IXGBE_VLNCTRL_VFE;
4655 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4656
4657 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4658 hw->mac.type == ixgbe_mac_82598EB)
4659 return;
4660
4661
4662 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4663 return;
4664
4665
4666 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4667
4668 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4669 ixgbe_scrub_vfta(adapter, i);
4670}
4671
4672static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4673{
4674 u16 vid = 1;
4675
4676 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4677
4678 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4679 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4680}
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4692{
4693 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4694 struct ixgbe_hw *hw = &adapter->hw;
4695
4696 if (!netif_running(netdev))
4697 return 0;
4698
4699 if (hw->mac.ops.update_mc_addr_list)
4700 hw->mac.ops.update_mc_addr_list(hw, netdev);
4701 else
4702 return -ENOMEM;
4703
4704#ifdef CONFIG_PCI_IOV
4705 ixgbe_restore_vf_multicasts(adapter);
4706#endif
4707
4708 return netdev_mc_count(netdev);
4709}
4710
4711#ifdef CONFIG_PCI_IOV
4712void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4713{
4714 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4715 struct ixgbe_hw *hw = &adapter->hw;
4716 int i;
4717
4718 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4719 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4720
4721 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4722 hw->mac.ops.set_rar(hw, i,
4723 mac_table->addr,
4724 mac_table->pool,
4725 IXGBE_RAH_AV);
4726 else
4727 hw->mac.ops.clear_rar(hw, i);
4728 }
4729}
4730
4731#endif
4732static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4733{
4734 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4735 struct ixgbe_hw *hw = &adapter->hw;
4736 int i;
4737
4738 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4739 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4740 continue;
4741
4742 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4743
4744 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4745 hw->mac.ops.set_rar(hw, i,
4746 mac_table->addr,
4747 mac_table->pool,
4748 IXGBE_RAH_AV);
4749 else
4750 hw->mac.ops.clear_rar(hw, i);
4751 }
4752}
4753
4754static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4755{
4756 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4757 struct ixgbe_hw *hw = &adapter->hw;
4758 int i;
4759
4760 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4761 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4762 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4763 }
4764
4765 ixgbe_sync_mac_table(adapter);
4766}
4767
4768static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4769{
4770 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4771 struct ixgbe_hw *hw = &adapter->hw;
4772 int i, count = 0;
4773
4774 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4775
4776 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4777 continue;
4778
4779
4780 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4781 if (mac_table->pool != pool)
4782 continue;
4783 }
4784
4785 count++;
4786 }
4787
4788 return count;
4789}
4790
4791
4792static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4793{
4794 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4795 struct ixgbe_hw *hw = &adapter->hw;
4796
4797 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4798 mac_table->pool = VMDQ_P(0);
4799
4800 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4801
4802 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4803 IXGBE_RAH_AV);
4804}
4805
4806int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4807 const u8 *addr, u16 pool)
4808{
4809 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4810 struct ixgbe_hw *hw = &adapter->hw;
4811 int i;
4812
4813 if (is_zero_ether_addr(addr))
4814 return -EINVAL;
4815
4816 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4817 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4818 continue;
4819
4820 ether_addr_copy(mac_table->addr, addr);
4821 mac_table->pool = pool;
4822
4823 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4824 IXGBE_MAC_STATE_IN_USE;
4825
4826 ixgbe_sync_mac_table(adapter);
4827
4828 return i;
4829 }
4830
4831 return -ENOMEM;
4832}
4833
4834int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4835 const u8 *addr, u16 pool)
4836{
4837 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4838 struct ixgbe_hw *hw = &adapter->hw;
4839 int i;
4840
4841 if (is_zero_ether_addr(addr))
4842 return -EINVAL;
4843
4844
4845 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4846
4847 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4848 continue;
4849
4850 if (mac_table->pool != pool)
4851 continue;
4852
4853 if (!ether_addr_equal(addr, mac_table->addr))
4854 continue;
4855
4856 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4857 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4858
4859 ixgbe_sync_mac_table(adapter);
4860
4861 return 0;
4862 }
4863
4864 return -ENOMEM;
4865}
4866
4867static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4868{
4869 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4870 int ret;
4871
4872 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4873
4874 return min_t(int, ret, 0);
4875}
4876
4877static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4878{
4879 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4880
4881 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4882
4883 return 0;
4884}
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895void ixgbe_set_rx_mode(struct net_device *netdev)
4896{
4897 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4898 struct ixgbe_hw *hw = &adapter->hw;
4899 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4900 netdev_features_t features = netdev->features;
4901 int count;
4902
4903
4904 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4905
4906
4907 fctrl &= ~IXGBE_FCTRL_SBP;
4908 fctrl |= IXGBE_FCTRL_BAM;
4909 fctrl |= IXGBE_FCTRL_DPF;
4910 fctrl |= IXGBE_FCTRL_PMCF;
4911
4912
4913 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4914 if (netdev->flags & IFF_PROMISC) {
4915 hw->addr_ctrl.user_set_promisc = true;
4916 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4917 vmolr |= IXGBE_VMOLR_MPE;
4918 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4919 } else {
4920 if (netdev->flags & IFF_ALLMULTI) {
4921 fctrl |= IXGBE_FCTRL_MPE;
4922 vmolr |= IXGBE_VMOLR_MPE;
4923 }
4924 hw->addr_ctrl.user_set_promisc = false;
4925 }
4926
4927
4928
4929
4930
4931
4932 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4933 fctrl |= IXGBE_FCTRL_UPE;
4934 vmolr |= IXGBE_VMOLR_ROPE;
4935 }
4936
4937
4938
4939
4940
4941 count = ixgbe_write_mc_addr_list(netdev);
4942 if (count < 0) {
4943 fctrl |= IXGBE_FCTRL_MPE;
4944 vmolr |= IXGBE_VMOLR_MPE;
4945 } else if (count) {
4946 vmolr |= IXGBE_VMOLR_ROMPE;
4947 }
4948
4949 if (hw->mac.type != ixgbe_mac_82598EB) {
4950 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4951 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4952 IXGBE_VMOLR_ROPE);
4953 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4954 }
4955
4956
4957 if (features & NETIF_F_RXALL) {
4958
4959
4960 fctrl |= (IXGBE_FCTRL_SBP |
4961 IXGBE_FCTRL_BAM |
4962 IXGBE_FCTRL_PMCF);
4963
4964 fctrl &= ~(IXGBE_FCTRL_DPF);
4965
4966 }
4967
4968 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4969
4970 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4971 ixgbe_vlan_strip_enable(adapter);
4972 else
4973 ixgbe_vlan_strip_disable(adapter);
4974
4975 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4976 ixgbe_vlan_promisc_disable(adapter);
4977 else
4978 ixgbe_vlan_promisc_enable(adapter);
4979}
4980
4981static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4982{
4983 int q_idx;
4984
4985 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4986 napi_enable(&adapter->q_vector[q_idx]->napi);
4987}
4988
4989static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4990{
4991 int q_idx;
4992
4993 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4994 napi_disable(&adapter->q_vector[q_idx]->napi);
4995}
4996
4997static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
4998{
4999 struct ixgbe_adapter *adapter = netdev_priv(dev);
5000 struct ixgbe_hw *hw = &adapter->hw;
5001 struct udp_tunnel_info ti;
5002
5003 udp_tunnel_nic_get_port(dev, table, 0, &ti);
5004 if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
5005 adapter->vxlan_port = ti.port;
5006 else
5007 adapter->geneve_port = ti.port;
5008
5009 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
5010 ntohs(adapter->vxlan_port) |
5011 ntohs(adapter->geneve_port) <<
5012 IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
5013 return 0;
5014}
5015
5016static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
5017 .sync_table = ixgbe_udp_tunnel_sync,
5018 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5019 .tables = {
5020 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5021 },
5022};
5023
5024static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
5025 .sync_table = ixgbe_udp_tunnel_sync,
5026 .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
5027 .tables = {
5028 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
5029 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
5030 },
5031};
5032
5033#ifdef CONFIG_IXGBE_DCB
5034
5035
5036
5037
5038
5039
5040
5041
5042static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5043{
5044 struct ixgbe_hw *hw = &adapter->hw;
5045 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5046
5047 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5048 if (hw->mac.type == ixgbe_mac_82598EB)
5049 netif_set_gso_max_size(adapter->netdev, 65536);
5050 return;
5051 }
5052
5053 if (hw->mac.type == ixgbe_mac_82598EB)
5054 netif_set_gso_max_size(adapter->netdev, 32768);
5055
5056#ifdef IXGBE_FCOE
5057 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5058 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5059#endif
5060
5061
5062 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5063 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5064 DCB_TX_CONFIG);
5065 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5066 DCB_RX_CONFIG);
5067 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5068 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5069 ixgbe_dcb_hw_ets(&adapter->hw,
5070 adapter->ixgbe_ieee_ets,
5071 max_frame);
5072 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5073 adapter->ixgbe_ieee_pfc->pfc_en,
5074 adapter->ixgbe_ieee_ets->prio_tc);
5075 }
5076
5077
5078 if (hw->mac.type != ixgbe_mac_82598EB) {
5079 u32 msb = 0;
5080 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5081
5082 while (rss_i) {
5083 msb++;
5084 rss_i >>= 1;
5085 }
5086
5087
5088 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5089 }
5090}
5091#endif
5092
5093
5094#define IXGBE_ETH_FRAMING 20
5095
5096
5097
5098
5099
5100
5101
5102static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5103{
5104 struct ixgbe_hw *hw = &adapter->hw;
5105 struct net_device *dev = adapter->netdev;
5106 int link, tc, kb, marker;
5107 u32 dv_id, rx_pba;
5108
5109
5110 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5111
5112#ifdef IXGBE_FCOE
5113
5114 if ((dev->features & NETIF_F_FCOE_MTU) &&
5115 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5116 (pb == ixgbe_fcoe_get_tc(adapter)))
5117 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5118#endif
5119
5120
5121 switch (hw->mac.type) {
5122 case ixgbe_mac_X540:
5123 case ixgbe_mac_X550:
5124 case ixgbe_mac_X550EM_x:
5125 case ixgbe_mac_x550em_a:
5126 dv_id = IXGBE_DV_X540(link, tc);
5127 break;
5128 default:
5129 dv_id = IXGBE_DV(link, tc);
5130 break;
5131 }
5132
5133
5134 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5135 dv_id += IXGBE_B2BT(tc);
5136
5137
5138 kb = IXGBE_BT2KB(dv_id);
5139 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5140
5141 marker = rx_pba - kb;
5142
5143
5144
5145
5146
5147 if (marker < 0) {
5148 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5149 "headroom to support flow control."
5150 "Decrease MTU or number of traffic classes\n", pb);
5151 marker = tc + 1;
5152 }
5153
5154 return marker;
5155}
5156
5157
5158
5159
5160
5161
5162
5163static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5164{
5165 struct ixgbe_hw *hw = &adapter->hw;
5166 struct net_device *dev = adapter->netdev;
5167 int tc;
5168 u32 dv_id;
5169
5170
5171 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5172
5173#ifdef IXGBE_FCOE
5174
5175 if ((dev->features & NETIF_F_FCOE_MTU) &&
5176 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5177 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5178 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5179#endif
5180
5181
5182 switch (hw->mac.type) {
5183 case ixgbe_mac_X540:
5184 case ixgbe_mac_X550:
5185 case ixgbe_mac_X550EM_x:
5186 case ixgbe_mac_x550em_a:
5187 dv_id = IXGBE_LOW_DV_X540(tc);
5188 break;
5189 default:
5190 dv_id = IXGBE_LOW_DV(tc);
5191 break;
5192 }
5193
5194
5195 return IXGBE_BT2KB(dv_id);
5196}
5197
5198
5199
5200
5201static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5202{
5203 struct ixgbe_hw *hw = &adapter->hw;
5204 int num_tc = adapter->hw_tcs;
5205 int i;
5206
5207 if (!num_tc)
5208 num_tc = 1;
5209
5210 for (i = 0; i < num_tc; i++) {
5211 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5212 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5213
5214
5215 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5216 hw->fc.low_water[i] = 0;
5217 }
5218
5219 for (; i < MAX_TRAFFIC_CLASS; i++)
5220 hw->fc.high_water[i] = 0;
5221}
5222
5223static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5224{
5225 struct ixgbe_hw *hw = &adapter->hw;
5226 int hdrm;
5227 u8 tc = adapter->hw_tcs;
5228
5229 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5230 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5231 hdrm = 32 << adapter->fdir_pballoc;
5232 else
5233 hdrm = 0;
5234
5235 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5236 ixgbe_pbthresh_setup(adapter);
5237}
5238
5239static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5240{
5241 struct ixgbe_hw *hw = &adapter->hw;
5242 struct hlist_node *node2;
5243 struct ixgbe_fdir_filter *filter;
5244 u8 queue;
5245
5246 spin_lock(&adapter->fdir_perfect_lock);
5247
5248 if (!hlist_empty(&adapter->fdir_filter_list))
5249 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5250
5251 hlist_for_each_entry_safe(filter, node2,
5252 &adapter->fdir_filter_list, fdir_node) {
5253 if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
5254 queue = IXGBE_FDIR_DROP_QUEUE;
5255 } else {
5256 u32 ring = ethtool_get_flow_spec_ring(filter->action);
5257 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
5258
5259 if (!vf && (ring >= adapter->num_rx_queues)) {
5260 e_err(drv, "FDIR restore failed without VF, ring: %u\n",
5261 ring);
5262 continue;
5263 } else if (vf &&
5264 ((vf > adapter->num_vfs) ||
5265 ring >= adapter->num_rx_queues_per_pool)) {
5266 e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
5267 vf, ring);
5268 continue;
5269 }
5270
5271
5272 if (!vf)
5273 queue = adapter->rx_ring[ring]->reg_idx;
5274 else
5275 queue = ((vf - 1) *
5276 adapter->num_rx_queues_per_pool) + ring;
5277 }
5278
5279 ixgbe_fdir_write_perfect_filter_82599(hw,
5280 &filter->filter, filter->sw_idx, queue);
5281 }
5282
5283 spin_unlock(&adapter->fdir_perfect_lock);
5284}
5285
5286
5287
5288
5289
5290static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5291{
5292 u16 i = rx_ring->next_to_clean;
5293 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5294
5295 if (rx_ring->xsk_pool) {
5296 ixgbe_xsk_clean_rx_ring(rx_ring);
5297 goto skip_free;
5298 }
5299
5300
5301 while (i != rx_ring->next_to_alloc) {
5302 if (rx_buffer->skb) {
5303 struct sk_buff *skb = rx_buffer->skb;
5304 if (IXGBE_CB(skb)->page_released)
5305 dma_unmap_page_attrs(rx_ring->dev,
5306 IXGBE_CB(skb)->dma,
5307 ixgbe_rx_pg_size(rx_ring),
5308 DMA_FROM_DEVICE,
5309 IXGBE_RX_DMA_ATTR);
5310 dev_kfree_skb(skb);
5311 }
5312
5313
5314
5315
5316 dma_sync_single_range_for_cpu(rx_ring->dev,
5317 rx_buffer->dma,
5318 rx_buffer->page_offset,
5319 ixgbe_rx_bufsz(rx_ring),
5320 DMA_FROM_DEVICE);
5321
5322
5323 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5324 ixgbe_rx_pg_size(rx_ring),
5325 DMA_FROM_DEVICE,
5326 IXGBE_RX_DMA_ATTR);
5327 __page_frag_cache_drain(rx_buffer->page,
5328 rx_buffer->pagecnt_bias);
5329
5330 i++;
5331 rx_buffer++;
5332 if (i == rx_ring->count) {
5333 i = 0;
5334 rx_buffer = rx_ring->rx_buffer_info;
5335 }
5336 }
5337
5338skip_free:
5339 rx_ring->next_to_alloc = 0;
5340 rx_ring->next_to_clean = 0;
5341 rx_ring->next_to_use = 0;
5342}
5343
5344static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5345 struct ixgbe_fwd_adapter *accel)
5346{
5347 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5348 int num_tc = netdev_get_num_tc(adapter->netdev);
5349 struct net_device *vdev = accel->netdev;
5350 int i, baseq, err;
5351
5352 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5353 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5354 accel->pool, adapter->num_rx_pools,
5355 baseq, baseq + adapter->num_rx_queues_per_pool);
5356
5357 accel->rx_base_queue = baseq;
5358 accel->tx_base_queue = baseq;
5359
5360
5361 for (i = 0; i < num_tc; i++)
5362 netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5363 i, rss_i, baseq + (rss_i * i));
5364
5365 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5366 adapter->rx_ring[baseq + i]->netdev = vdev;
5367
5368
5369
5370
5371 wmb();
5372
5373
5374
5375
5376 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5377 VMDQ_P(accel->pool));
5378 if (err >= 0)
5379 return 0;
5380
5381
5382 macvlan_release_l2fw_offload(vdev);
5383
5384 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5385 adapter->rx_ring[baseq + i]->netdev = NULL;
5386
5387 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5388
5389
5390 netdev_unbind_sb_channel(adapter->netdev, vdev);
5391 netdev_set_sb_channel(vdev, 0);
5392
5393 clear_bit(accel->pool, adapter->fwd_bitmask);
5394 kfree(accel);
5395
5396 return err;
5397}
5398
5399static int ixgbe_macvlan_up(struct net_device *vdev,
5400 struct netdev_nested_priv *priv)
5401{
5402 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
5403 struct ixgbe_fwd_adapter *accel;
5404
5405 if (!netif_is_macvlan(vdev))
5406 return 0;
5407
5408 accel = macvlan_accel_priv(vdev);
5409 if (!accel)
5410 return 0;
5411
5412 ixgbe_fwd_ring_up(adapter, accel);
5413
5414 return 0;
5415}
5416
5417static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5418{
5419 struct netdev_nested_priv priv = {
5420 .data = (void *)adapter,
5421 };
5422
5423 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5424 ixgbe_macvlan_up, &priv);
5425}
5426
5427static void ixgbe_configure(struct ixgbe_adapter *adapter)
5428{
5429 struct ixgbe_hw *hw = &adapter->hw;
5430
5431 ixgbe_configure_pb(adapter);
5432#ifdef CONFIG_IXGBE_DCB
5433 ixgbe_configure_dcb(adapter);
5434#endif
5435
5436
5437
5438
5439 ixgbe_configure_virtualization(adapter);
5440
5441 ixgbe_set_rx_mode(adapter->netdev);
5442 ixgbe_restore_vlan(adapter);
5443 ixgbe_ipsec_restore(adapter);
5444
5445 switch (hw->mac.type) {
5446 case ixgbe_mac_82599EB:
5447 case ixgbe_mac_X540:
5448 hw->mac.ops.disable_rx_buff(hw);
5449 break;
5450 default:
5451 break;
5452 }
5453
5454 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5455 ixgbe_init_fdir_signature_82599(&adapter->hw,
5456 adapter->fdir_pballoc);
5457 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5458 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5459 adapter->fdir_pballoc);
5460 ixgbe_fdir_filter_restore(adapter);
5461 }
5462
5463 switch (hw->mac.type) {
5464 case ixgbe_mac_82599EB:
5465 case ixgbe_mac_X540:
5466 hw->mac.ops.enable_rx_buff(hw);
5467 break;
5468 default:
5469 break;
5470 }
5471
5472#ifdef CONFIG_IXGBE_DCA
5473
5474 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5475 ixgbe_setup_dca(adapter);
5476#endif
5477
5478#ifdef IXGBE_FCOE
5479
5480 ixgbe_configure_fcoe(adapter);
5481
5482#endif
5483 ixgbe_configure_tx(adapter);
5484 ixgbe_configure_rx(adapter);
5485 ixgbe_configure_dfwd(adapter);
5486}
5487
5488
5489
5490
5491
5492static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5493{
5494
5495
5496
5497
5498
5499
5500 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5501 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5502
5503 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5504 adapter->sfp_poll_time = 0;
5505}
5506
5507
5508
5509
5510
5511
5512
5513static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5514{
5515 u32 speed;
5516 bool autoneg, link_up = false;
5517 int ret = IXGBE_ERR_LINK_SETUP;
5518
5519 if (hw->mac.ops.check_link)
5520 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5521
5522 if (ret)
5523 return ret;
5524
5525 speed = hw->phy.autoneg_advertised;
5526 if (!speed && hw->mac.ops.get_link_capabilities) {
5527 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5528 &autoneg);
5529 speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
5530 IXGBE_LINK_SPEED_2_5GB_FULL);
5531 }
5532
5533 if (ret)
5534 return ret;
5535
5536 if (hw->mac.ops.setup_link)
5537 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5538
5539 return ret;
5540}
5541
5542static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5543{
5544 struct ixgbe_hw *hw = &adapter->hw;
5545 u32 gpie = 0;
5546
5547 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5548 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5549 IXGBE_GPIE_OCD;
5550 gpie |= IXGBE_GPIE_EIAME;
5551
5552
5553
5554
5555 switch (hw->mac.type) {
5556 case ixgbe_mac_82598EB:
5557 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5558 break;
5559 case ixgbe_mac_82599EB:
5560 case ixgbe_mac_X540:
5561 case ixgbe_mac_X550:
5562 case ixgbe_mac_X550EM_x:
5563 case ixgbe_mac_x550em_a:
5564 default:
5565 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5566 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5567 break;
5568 }
5569 } else {
5570
5571
5572 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5573 }
5574
5575
5576
5577
5578 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5579 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5580
5581 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5582 case IXGBE_82599_VMDQ_8Q_MASK:
5583 gpie |= IXGBE_GPIE_VTMODE_16;
5584 break;
5585 case IXGBE_82599_VMDQ_4Q_MASK:
5586 gpie |= IXGBE_GPIE_VTMODE_32;
5587 break;
5588 default:
5589 gpie |= IXGBE_GPIE_VTMODE_64;
5590 break;
5591 }
5592 }
5593
5594
5595 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5596 switch (adapter->hw.mac.type) {
5597 case ixgbe_mac_82599EB:
5598 gpie |= IXGBE_SDP0_GPIEN_8259X;
5599 break;
5600 default:
5601 break;
5602 }
5603 }
5604
5605
5606 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5607 gpie |= IXGBE_SDP1_GPIEN(hw);
5608
5609 switch (hw->mac.type) {
5610 case ixgbe_mac_82599EB:
5611 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5612 break;
5613 case ixgbe_mac_X550EM_x:
5614 case ixgbe_mac_x550em_a:
5615 gpie |= IXGBE_SDP0_GPIEN_X540;
5616 break;
5617 default:
5618 break;
5619 }
5620
5621 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5622}
5623
5624static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5625{
5626 struct ixgbe_hw *hw = &adapter->hw;
5627 int err;
5628 u32 ctrl_ext;
5629
5630 ixgbe_get_hw_control(adapter);
5631 ixgbe_setup_gpie(adapter);
5632
5633 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5634 ixgbe_configure_msix(adapter);
5635 else
5636 ixgbe_configure_msi_and_legacy(adapter);
5637
5638
5639 if (hw->mac.ops.enable_tx_laser)
5640 hw->mac.ops.enable_tx_laser(hw);
5641
5642 if (hw->phy.ops.set_phy_power)
5643 hw->phy.ops.set_phy_power(hw, true);
5644
5645 smp_mb__before_atomic();
5646 clear_bit(__IXGBE_DOWN, &adapter->state);
5647 ixgbe_napi_enable_all(adapter);
5648
5649 if (ixgbe_is_sfp(hw)) {
5650 ixgbe_sfp_link_config(adapter);
5651 } else {
5652 err = ixgbe_non_sfp_link_config(hw);
5653 if (err)
5654 e_err(probe, "link_config FAILED %d\n", err);
5655 }
5656
5657
5658 IXGBE_READ_REG(hw, IXGBE_EICR);
5659 ixgbe_irq_enable(adapter, true, true);
5660
5661
5662
5663
5664
5665 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5666 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5667 if (esdp & IXGBE_ESDP_SDP1)
5668 e_crit(drv, "Fan has stopped, replace the adapter\n");
5669 }
5670
5671
5672
5673 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5674 adapter->link_check_timeout = jiffies;
5675 mod_timer(&adapter->service_timer, jiffies);
5676
5677
5678 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5679 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5680 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5681}
5682
5683void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5684{
5685
5686 netif_trans_update(adapter->netdev);
5687
5688 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5689 usleep_range(1000, 2000);
5690 if (adapter->hw.phy.type == ixgbe_phy_fw)
5691 ixgbe_watchdog_link_is_down(adapter);
5692 ixgbe_down(adapter);
5693
5694
5695
5696
5697
5698
5699 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5700 msleep(2000);
5701 ixgbe_up(adapter);
5702 clear_bit(__IXGBE_RESETTING, &adapter->state);
5703}
5704
5705void ixgbe_up(struct ixgbe_adapter *adapter)
5706{
5707
5708 ixgbe_configure(adapter);
5709
5710 ixgbe_up_complete(adapter);
5711}
5712
5713static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5714{
5715 u16 devctl2;
5716
5717 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5718
5719 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5720 case IXGBE_PCIDEVCTRL2_17_34s:
5721 case IXGBE_PCIDEVCTRL2_4_8s:
5722
5723
5724
5725
5726 case IXGBE_PCIDEVCTRL2_1_2s:
5727 return 2000000ul;
5728 case IXGBE_PCIDEVCTRL2_260_520ms:
5729 return 520000ul;
5730 case IXGBE_PCIDEVCTRL2_65_130ms:
5731 return 130000ul;
5732 case IXGBE_PCIDEVCTRL2_16_32ms:
5733 return 32000ul;
5734 case IXGBE_PCIDEVCTRL2_1_2ms:
5735 return 2000ul;
5736 case IXGBE_PCIDEVCTRL2_50_100us:
5737 return 100ul;
5738 case IXGBE_PCIDEVCTRL2_16_32ms_def:
5739 return 32000ul;
5740 default:
5741 break;
5742 }
5743
5744
5745
5746
5747 return 32000ul;
5748}
5749
5750void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5751{
5752 unsigned long wait_delay, delay_interval;
5753 struct ixgbe_hw *hw = &adapter->hw;
5754 int i, wait_loop;
5755 u32 rxdctl;
5756
5757
5758 hw->mac.ops.disable_rx(hw);
5759
5760 if (ixgbe_removed(hw->hw_addr))
5761 return;
5762
5763
5764 for (i = 0; i < adapter->num_rx_queues; i++) {
5765 struct ixgbe_ring *ring = adapter->rx_ring[i];
5766 u8 reg_idx = ring->reg_idx;
5767
5768 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5769 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5770 rxdctl |= IXGBE_RXDCTL_SWFLSH;
5771
5772
5773 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5774 }
5775
5776
5777 if (hw->mac.type == ixgbe_mac_82598EB &&
5778 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5779 return;
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5793
5794 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5795 wait_delay = delay_interval;
5796
5797 while (wait_loop--) {
5798 usleep_range(wait_delay, wait_delay + 10);
5799 wait_delay += delay_interval * 2;
5800 rxdctl = 0;
5801
5802
5803
5804
5805
5806
5807 for (i = 0; i < adapter->num_rx_queues; i++) {
5808 struct ixgbe_ring *ring = adapter->rx_ring[i];
5809 u8 reg_idx = ring->reg_idx;
5810
5811 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5812 }
5813
5814 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5815 return;
5816 }
5817
5818 e_err(drv,
5819 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5820}
5821
5822void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5823{
5824 unsigned long wait_delay, delay_interval;
5825 struct ixgbe_hw *hw = &adapter->hw;
5826 int i, wait_loop;
5827 u32 txdctl;
5828
5829 if (ixgbe_removed(hw->hw_addr))
5830 return;
5831
5832
5833 for (i = 0; i < adapter->num_tx_queues; i++) {
5834 struct ixgbe_ring *ring = adapter->tx_ring[i];
5835 u8 reg_idx = ring->reg_idx;
5836
5837 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5838 }
5839
5840
5841 for (i = 0; i < adapter->num_xdp_queues; i++) {
5842 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5843 u8 reg_idx = ring->reg_idx;
5844
5845 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5846 }
5847
5848
5849
5850
5851
5852
5853 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5854 goto dma_engine_disable;
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5868
5869 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5870 wait_delay = delay_interval;
5871
5872 while (wait_loop--) {
5873 usleep_range(wait_delay, wait_delay + 10);
5874 wait_delay += delay_interval * 2;
5875 txdctl = 0;
5876
5877
5878
5879
5880
5881
5882 for (i = 0; i < adapter->num_tx_queues; i++) {
5883 struct ixgbe_ring *ring = adapter->tx_ring[i];
5884 u8 reg_idx = ring->reg_idx;
5885
5886 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5887 }
5888 for (i = 0; i < adapter->num_xdp_queues; i++) {
5889 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5890 u8 reg_idx = ring->reg_idx;
5891
5892 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5893 }
5894
5895 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5896 goto dma_engine_disable;
5897 }
5898
5899 e_err(drv,
5900 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5901
5902dma_engine_disable:
5903
5904 switch (hw->mac.type) {
5905 case ixgbe_mac_82599EB:
5906 case ixgbe_mac_X540:
5907 case ixgbe_mac_X550:
5908 case ixgbe_mac_X550EM_x:
5909 case ixgbe_mac_x550em_a:
5910 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5911 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5912 ~IXGBE_DMATXCTL_TE));
5913 fallthrough;
5914 default:
5915 break;
5916 }
5917}
5918
5919void ixgbe_reset(struct ixgbe_adapter *adapter)
5920{
5921 struct ixgbe_hw *hw = &adapter->hw;
5922 struct net_device *netdev = adapter->netdev;
5923 int err;
5924
5925 if (ixgbe_removed(hw->hw_addr))
5926 return;
5927
5928 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5929 usleep_range(1000, 2000);
5930
5931
5932 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5933 IXGBE_FLAG2_SFP_NEEDS_RESET);
5934 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5935
5936 err = hw->mac.ops.init_hw(hw);
5937 switch (err) {
5938 case 0:
5939 case IXGBE_ERR_SFP_NOT_PRESENT:
5940 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5941 break;
5942 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5943 e_dev_err("master disable timed out\n");
5944 break;
5945 case IXGBE_ERR_EEPROM_VERSION:
5946
5947 e_dev_warn("This device is a pre-production adapter/LOM. "
5948 "Please be aware there may be issues associated with "
5949 "your hardware. If you are experiencing problems "
5950 "please contact your Intel or hardware "
5951 "representative who provided you with this "
5952 "hardware.\n");
5953 break;
5954 default:
5955 e_dev_err("Hardware Error: %d\n", err);
5956 }
5957
5958 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5959
5960
5961 ixgbe_flush_sw_mac_table(adapter);
5962 __dev_uc_unsync(netdev, NULL);
5963
5964
5965 ixgbe_mac_set_default_filter(adapter);
5966
5967
5968 if (hw->mac.san_mac_rar_index)
5969 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5970
5971 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5972 ixgbe_ptp_reset(adapter);
5973
5974 if (hw->phy.ops.set_phy_power) {
5975 if (!netif_running(adapter->netdev) && !adapter->wol)
5976 hw->phy.ops.set_phy_power(hw, false);
5977 else
5978 hw->phy.ops.set_phy_power(hw, true);
5979 }
5980}
5981
5982
5983
5984
5985
5986static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5987{
5988 u16 i = tx_ring->next_to_clean;
5989 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5990
5991 if (tx_ring->xsk_pool) {
5992 ixgbe_xsk_clean_tx_ring(tx_ring);
5993 goto out;
5994 }
5995
5996 while (i != tx_ring->next_to_use) {
5997 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5998
5999
6000 if (ring_is_xdp(tx_ring))
6001 xdp_return_frame(tx_buffer->xdpf);
6002 else
6003 dev_kfree_skb_any(tx_buffer->skb);
6004
6005
6006 dma_unmap_single(tx_ring->dev,
6007 dma_unmap_addr(tx_buffer, dma),
6008 dma_unmap_len(tx_buffer, len),
6009 DMA_TO_DEVICE);
6010
6011
6012 eop_desc = tx_buffer->next_to_watch;
6013 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6014
6015
6016 while (tx_desc != eop_desc) {
6017 tx_buffer++;
6018 tx_desc++;
6019 i++;
6020 if (unlikely(i == tx_ring->count)) {
6021 i = 0;
6022 tx_buffer = tx_ring->tx_buffer_info;
6023 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6024 }
6025
6026
6027 if (dma_unmap_len(tx_buffer, len))
6028 dma_unmap_page(tx_ring->dev,
6029 dma_unmap_addr(tx_buffer, dma),
6030 dma_unmap_len(tx_buffer, len),
6031 DMA_TO_DEVICE);
6032 }
6033
6034
6035 tx_buffer++;
6036 i++;
6037 if (unlikely(i == tx_ring->count)) {
6038 i = 0;
6039 tx_buffer = tx_ring->tx_buffer_info;
6040 }
6041 }
6042
6043
6044 if (!ring_is_xdp(tx_ring))
6045 netdev_tx_reset_queue(txring_txq(tx_ring));
6046
6047out:
6048
6049 tx_ring->next_to_use = 0;
6050 tx_ring->next_to_clean = 0;
6051}
6052
6053
6054
6055
6056
6057static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6058{
6059 int i;
6060
6061 for (i = 0; i < adapter->num_rx_queues; i++)
6062 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6063}
6064
6065
6066
6067
6068
6069static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6070{
6071 int i;
6072
6073 for (i = 0; i < adapter->num_tx_queues; i++)
6074 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6075 for (i = 0; i < adapter->num_xdp_queues; i++)
6076 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6077}
6078
6079static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6080{
6081 struct hlist_node *node2;
6082 struct ixgbe_fdir_filter *filter;
6083
6084 spin_lock(&adapter->fdir_perfect_lock);
6085
6086 hlist_for_each_entry_safe(filter, node2,
6087 &adapter->fdir_filter_list, fdir_node) {
6088 hlist_del(&filter->fdir_node);
6089 kfree(filter);
6090 }
6091 adapter->fdir_filter_count = 0;
6092
6093 spin_unlock(&adapter->fdir_perfect_lock);
6094}
6095
6096void ixgbe_down(struct ixgbe_adapter *adapter)
6097{
6098 struct net_device *netdev = adapter->netdev;
6099 struct ixgbe_hw *hw = &adapter->hw;
6100 int i;
6101
6102
6103 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6104 return;
6105
6106
6107 netif_tx_stop_all_queues(netdev);
6108
6109
6110 netif_carrier_off(netdev);
6111 netif_tx_disable(netdev);
6112
6113
6114 ixgbe_disable_rx(adapter);
6115
6116
6117 if (adapter->xdp_ring[0])
6118 synchronize_rcu();
6119
6120 ixgbe_irq_disable(adapter);
6121
6122 ixgbe_napi_disable_all(adapter);
6123
6124 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6125 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6126 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6127
6128 del_timer_sync(&adapter->service_timer);
6129
6130 if (adapter->num_vfs) {
6131
6132 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6133
6134
6135 for (i = 0 ; i < adapter->num_vfs; i++)
6136 adapter->vfinfo[i].clear_to_send = false;
6137
6138
6139 ixgbe_ping_all_vfs(adapter);
6140
6141
6142 ixgbe_disable_tx_rx(adapter);
6143 }
6144
6145
6146 ixgbe_disable_tx(adapter);
6147
6148 if (!pci_channel_offline(adapter->pdev))
6149 ixgbe_reset(adapter);
6150
6151
6152 if (hw->mac.ops.disable_tx_laser)
6153 hw->mac.ops.disable_tx_laser(hw);
6154
6155 ixgbe_clean_all_tx_rings(adapter);
6156 ixgbe_clean_all_rx_rings(adapter);
6157}
6158
6159
6160
6161
6162
6163static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6164{
6165 struct ixgbe_hw *hw = &adapter->hw;
6166
6167 switch (hw->device_id) {
6168 case IXGBE_DEV_ID_X550EM_A_1G_T:
6169 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6170 if (!hw->phy.eee_speeds_supported)
6171 break;
6172 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6173 if (!hw->phy.eee_speeds_advertised)
6174 break;
6175 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6176 break;
6177 default:
6178 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6179 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6180 break;
6181 }
6182}
6183
6184
6185
6186
6187
6188
6189static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6190{
6191 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6192
6193
6194 ixgbe_tx_timeout_reset(adapter);
6195}
6196
6197#ifdef CONFIG_IXGBE_DCB
6198static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6199{
6200 struct ixgbe_hw *hw = &adapter->hw;
6201 struct tc_configuration *tc;
6202 int j;
6203
6204 switch (hw->mac.type) {
6205 case ixgbe_mac_82598EB:
6206 case ixgbe_mac_82599EB:
6207 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6208 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6209 break;
6210 case ixgbe_mac_X540:
6211 case ixgbe_mac_X550:
6212 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6213 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6214 break;
6215 case ixgbe_mac_X550EM_x:
6216 case ixgbe_mac_x550em_a:
6217 default:
6218 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6219 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6220 break;
6221 }
6222
6223
6224 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6225 tc = &adapter->dcb_cfg.tc_config[j];
6226 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6227 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6228 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6229 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6230 tc->dcb_pfc = pfc_disabled;
6231 }
6232
6233
6234 tc = &adapter->dcb_cfg.tc_config[0];
6235 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6236 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6237
6238 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6239 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6240 adapter->dcb_cfg.pfc_mode_enable = false;
6241 adapter->dcb_set_bitmap = 0x00;
6242 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6243 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6244 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6245 sizeof(adapter->temp_dcb_cfg));
6246}
6247#endif
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6259 const struct ixgbe_info *ii)
6260{
6261 struct ixgbe_hw *hw = &adapter->hw;
6262 struct pci_dev *pdev = adapter->pdev;
6263 unsigned int rss, fdir;
6264 u32 fwsm;
6265 int i;
6266
6267
6268
6269 hw->vendor_id = pdev->vendor;
6270 hw->device_id = pdev->device;
6271 hw->revision_id = pdev->revision;
6272 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6273 hw->subsystem_device_id = pdev->subsystem_device;
6274
6275
6276 ii->get_invariants(hw);
6277
6278
6279 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6280 adapter->ring_feature[RING_F_RSS].limit = rss;
6281 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6282 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6283 adapter->atr_sample_rate = 20;
6284 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6285 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6286 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6287 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6288#ifdef CONFIG_IXGBE_DCA
6289 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6290#endif
6291#ifdef CONFIG_IXGBE_DCB
6292 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6293 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6294#endif
6295#ifdef IXGBE_FCOE
6296 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6297 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6298#ifdef CONFIG_IXGBE_DCB
6299
6300 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6301#endif
6302#endif
6303
6304
6305 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6306 GFP_KERNEL);
6307 if (!adapter->jump_tables[0])
6308 return -ENOMEM;
6309 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6310
6311 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6312 adapter->jump_tables[i] = NULL;
6313
6314 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6315 sizeof(struct ixgbe_mac_addr),
6316 GFP_KERNEL);
6317 if (!adapter->mac_table)
6318 return -ENOMEM;
6319
6320 if (ixgbe_init_rss_key(adapter))
6321 return -ENOMEM;
6322
6323 adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6324 if (!adapter->af_xdp_zc_qps)
6325 return -ENOMEM;
6326
6327
6328 switch (hw->mac.type) {
6329 case ixgbe_mac_82598EB:
6330 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6331
6332 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6333 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6334
6335 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6336 adapter->ring_feature[RING_F_FDIR].limit = 0;
6337 adapter->atr_sample_rate = 0;
6338 adapter->fdir_pballoc = 0;
6339#ifdef IXGBE_FCOE
6340 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6341 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6342#ifdef CONFIG_IXGBE_DCB
6343 adapter->fcoe.up = 0;
6344#endif
6345#endif
6346 break;
6347 case ixgbe_mac_82599EB:
6348 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6349 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6350 break;
6351 case ixgbe_mac_X540:
6352 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6353 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6354 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6355 break;
6356 case ixgbe_mac_x550em_a:
6357 switch (hw->device_id) {
6358 case IXGBE_DEV_ID_X550EM_A_1G_T:
6359 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6360 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6361 break;
6362 default:
6363 break;
6364 }
6365 fallthrough;
6366 case ixgbe_mac_X550EM_x:
6367#ifdef CONFIG_IXGBE_DCB
6368 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6369#endif
6370#ifdef IXGBE_FCOE
6371 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6372#ifdef CONFIG_IXGBE_DCB
6373 adapter->fcoe.up = 0;
6374#endif
6375#endif
6376 fallthrough;
6377 case ixgbe_mac_X550:
6378 if (hw->mac.type == ixgbe_mac_X550)
6379 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6380#ifdef CONFIG_IXGBE_DCA
6381 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6382#endif
6383 break;
6384 default:
6385 break;
6386 }
6387
6388#ifdef IXGBE_FCOE
6389
6390 spin_lock_init(&adapter->fcoe.lock);
6391
6392#endif
6393
6394 spin_lock_init(&adapter->fdir_perfect_lock);
6395
6396#ifdef CONFIG_IXGBE_DCB
6397 ixgbe_init_dcb(adapter);
6398#endif
6399 ixgbe_init_ipsec_offload(adapter);
6400
6401
6402 hw->fc.requested_mode = ixgbe_fc_full;
6403 hw->fc.current_mode = ixgbe_fc_full;
6404 ixgbe_pbthresh_setup(adapter);
6405 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6406 hw->fc.send_xon = true;
6407 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6408
6409#ifdef CONFIG_PCI_IOV
6410 if (max_vfs > 0)
6411 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6412
6413
6414 if (hw->mac.type != ixgbe_mac_82598EB) {
6415 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6416 max_vfs = 0;
6417 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6418 }
6419 }
6420#endif
6421
6422
6423 adapter->rx_itr_setting = 1;
6424 adapter->tx_itr_setting = 1;
6425
6426
6427 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6428 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6429
6430
6431 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6432
6433
6434 if (ixgbe_init_eeprom_params_generic(hw)) {
6435 e_dev_err("EEPROM initialization failed\n");
6436 return -EIO;
6437 }
6438
6439
6440 set_bit(0, adapter->fwd_bitmask);
6441 set_bit(__IXGBE_DOWN, &adapter->state);
6442
6443 return 0;
6444}
6445
6446
6447
6448
6449
6450
6451
6452int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6453{
6454 struct device *dev = tx_ring->dev;
6455 int orig_node = dev_to_node(dev);
6456 int ring_node = NUMA_NO_NODE;
6457 int size;
6458
6459 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6460
6461 if (tx_ring->q_vector)
6462 ring_node = tx_ring->q_vector->numa_node;
6463
6464 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6465 if (!tx_ring->tx_buffer_info)
6466 tx_ring->tx_buffer_info = vmalloc(size);
6467 if (!tx_ring->tx_buffer_info)
6468 goto err;
6469
6470
6471 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6472 tx_ring->size = ALIGN(tx_ring->size, 4096);
6473
6474 set_dev_node(dev, ring_node);
6475 tx_ring->desc = dma_alloc_coherent(dev,
6476 tx_ring->size,
6477 &tx_ring->dma,
6478 GFP_KERNEL);
6479 set_dev_node(dev, orig_node);
6480 if (!tx_ring->desc)
6481 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6482 &tx_ring->dma, GFP_KERNEL);
6483 if (!tx_ring->desc)
6484 goto err;
6485
6486 tx_ring->next_to_use = 0;
6487 tx_ring->next_to_clean = 0;
6488 return 0;
6489
6490err:
6491 vfree(tx_ring->tx_buffer_info);
6492 tx_ring->tx_buffer_info = NULL;
6493 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6494 return -ENOMEM;
6495}
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6508{
6509 int i, j = 0, err = 0;
6510
6511 for (i = 0; i < adapter->num_tx_queues; i++) {
6512 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6513 if (!err)
6514 continue;
6515
6516 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6517 goto err_setup_tx;
6518 }
6519 for (j = 0; j < adapter->num_xdp_queues; j++) {
6520 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6521 if (!err)
6522 continue;
6523
6524 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6525 goto err_setup_tx;
6526 }
6527
6528 return 0;
6529err_setup_tx:
6530
6531 while (j--)
6532 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6533 while (i--)
6534 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6535 return err;
6536}
6537
6538static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
6539{
6540 struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
6541
6542 return q_vector ? q_vector->napi.napi_id : 0;
6543}
6544
6545
6546
6547
6548
6549
6550
6551
6552int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6553 struct ixgbe_ring *rx_ring)
6554{
6555 struct device *dev = rx_ring->dev;
6556 int orig_node = dev_to_node(dev);
6557 int ring_node = NUMA_NO_NODE;
6558 int size;
6559
6560 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6561
6562 if (rx_ring->q_vector)
6563 ring_node = rx_ring->q_vector->numa_node;
6564
6565 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6566 if (!rx_ring->rx_buffer_info)
6567 rx_ring->rx_buffer_info = vmalloc(size);
6568 if (!rx_ring->rx_buffer_info)
6569 goto err;
6570
6571
6572 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6573 rx_ring->size = ALIGN(rx_ring->size, 4096);
6574
6575 set_dev_node(dev, ring_node);
6576 rx_ring->desc = dma_alloc_coherent(dev,
6577 rx_ring->size,
6578 &rx_ring->dma,
6579 GFP_KERNEL);
6580 set_dev_node(dev, orig_node);
6581 if (!rx_ring->desc)
6582 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6583 &rx_ring->dma, GFP_KERNEL);
6584 if (!rx_ring->desc)
6585 goto err;
6586
6587 rx_ring->next_to_clean = 0;
6588 rx_ring->next_to_use = 0;
6589
6590
6591 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6592 rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
6593 goto err;
6594
6595 rx_ring->xdp_prog = adapter->xdp_prog;
6596
6597 return 0;
6598err:
6599 vfree(rx_ring->rx_buffer_info);
6600 rx_ring->rx_buffer_info = NULL;
6601 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6602 return -ENOMEM;
6603}
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6616{
6617 int i, err = 0;
6618
6619 for (i = 0; i < adapter->num_rx_queues; i++) {
6620 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6621 if (!err)
6622 continue;
6623
6624 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6625 goto err_setup_rx;
6626 }
6627
6628#ifdef IXGBE_FCOE
6629 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6630 if (!err)
6631#endif
6632 return 0;
6633err_setup_rx:
6634
6635 while (i--)
6636 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6637 return err;
6638}
6639
6640
6641
6642
6643
6644
6645
6646void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6647{
6648 ixgbe_clean_tx_ring(tx_ring);
6649
6650 vfree(tx_ring->tx_buffer_info);
6651 tx_ring->tx_buffer_info = NULL;
6652
6653
6654 if (!tx_ring->desc)
6655 return;
6656
6657 dma_free_coherent(tx_ring->dev, tx_ring->size,
6658 tx_ring->desc, tx_ring->dma);
6659
6660 tx_ring->desc = NULL;
6661}
6662
6663
6664
6665
6666
6667
6668
6669static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6670{
6671 int i;
6672
6673 for (i = 0; i < adapter->num_tx_queues; i++)
6674 if (adapter->tx_ring[i]->desc)
6675 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6676 for (i = 0; i < adapter->num_xdp_queues; i++)
6677 if (adapter->xdp_ring[i]->desc)
6678 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6679}
6680
6681
6682
6683
6684
6685
6686
6687void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6688{
6689 ixgbe_clean_rx_ring(rx_ring);
6690
6691 rx_ring->xdp_prog = NULL;
6692 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6693 vfree(rx_ring->rx_buffer_info);
6694 rx_ring->rx_buffer_info = NULL;
6695
6696
6697 if (!rx_ring->desc)
6698 return;
6699
6700 dma_free_coherent(rx_ring->dev, rx_ring->size,
6701 rx_ring->desc, rx_ring->dma);
6702
6703 rx_ring->desc = NULL;
6704}
6705
6706
6707
6708
6709
6710
6711
6712static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6713{
6714 int i;
6715
6716#ifdef IXGBE_FCOE
6717 ixgbe_free_fcoe_ddp_resources(adapter);
6718
6719#endif
6720 for (i = 0; i < adapter->num_rx_queues; i++)
6721 if (adapter->rx_ring[i]->desc)
6722 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6723}
6724
6725
6726
6727
6728
6729
6730
6731
6732static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6733{
6734 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6735
6736 if (adapter->xdp_prog) {
6737 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6738 VLAN_HLEN;
6739 int i;
6740
6741 for (i = 0; i < adapter->num_rx_queues; i++) {
6742 struct ixgbe_ring *ring = adapter->rx_ring[i];
6743
6744 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6745 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6746 return -EINVAL;
6747 }
6748 }
6749 }
6750
6751
6752
6753
6754
6755
6756 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6757 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6758 (new_mtu > ETH_DATA_LEN))
6759 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6760
6761 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6762 netdev->mtu, new_mtu);
6763
6764
6765 netdev->mtu = new_mtu;
6766
6767 if (netif_running(netdev))
6768 ixgbe_reinit_locked(adapter);
6769
6770 return 0;
6771}
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785int ixgbe_open(struct net_device *netdev)
6786{
6787 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6788 struct ixgbe_hw *hw = &adapter->hw;
6789 int err, queues;
6790
6791
6792 if (test_bit(__IXGBE_TESTING, &adapter->state))
6793 return -EBUSY;
6794
6795 netif_carrier_off(netdev);
6796
6797
6798 err = ixgbe_setup_all_tx_resources(adapter);
6799 if (err)
6800 goto err_setup_tx;
6801
6802
6803 err = ixgbe_setup_all_rx_resources(adapter);
6804 if (err)
6805 goto err_setup_rx;
6806
6807 ixgbe_configure(adapter);
6808
6809 err = ixgbe_request_irq(adapter);
6810 if (err)
6811 goto err_req_irq;
6812
6813
6814 queues = adapter->num_tx_queues;
6815 err = netif_set_real_num_tx_queues(netdev, queues);
6816 if (err)
6817 goto err_set_queues;
6818
6819 queues = adapter->num_rx_queues;
6820 err = netif_set_real_num_rx_queues(netdev, queues);
6821 if (err)
6822 goto err_set_queues;
6823
6824 ixgbe_ptp_init(adapter);
6825
6826 ixgbe_up_complete(adapter);
6827
6828 udp_tunnel_nic_reset_ntf(netdev);
6829
6830 return 0;
6831
6832err_set_queues:
6833 ixgbe_free_irq(adapter);
6834err_req_irq:
6835 ixgbe_free_all_rx_resources(adapter);
6836 if (hw->phy.ops.set_phy_power && !adapter->wol)
6837 hw->phy.ops.set_phy_power(&adapter->hw, false);
6838err_setup_rx:
6839 ixgbe_free_all_tx_resources(adapter);
6840err_setup_tx:
6841 ixgbe_reset(adapter);
6842
6843 return err;
6844}
6845
6846static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6847{
6848 ixgbe_ptp_suspend(adapter);
6849
6850 if (adapter->hw.phy.ops.enter_lplu) {
6851 adapter->hw.phy.reset_disable = true;
6852 ixgbe_down(adapter);
6853 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6854 adapter->hw.phy.reset_disable = false;
6855 } else {
6856 ixgbe_down(adapter);
6857 }
6858
6859 ixgbe_free_irq(adapter);
6860
6861 ixgbe_free_all_tx_resources(adapter);
6862 ixgbe_free_all_rx_resources(adapter);
6863}
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876int ixgbe_close(struct net_device *netdev)
6877{
6878 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6879
6880 ixgbe_ptp_stop(adapter);
6881
6882 if (netif_device_present(netdev))
6883 ixgbe_close_suspend(adapter);
6884
6885 ixgbe_fdir_filter_exit(adapter);
6886
6887 ixgbe_release_hw_control(adapter);
6888
6889 return 0;
6890}
6891
6892static int __maybe_unused ixgbe_resume(struct device *dev_d)
6893{
6894 struct pci_dev *pdev = to_pci_dev(dev_d);
6895 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6896 struct net_device *netdev = adapter->netdev;
6897 u32 err;
6898
6899 adapter->hw.hw_addr = adapter->io_addr;
6900
6901 err = pci_enable_device_mem(pdev);
6902 if (err) {
6903 e_dev_err("Cannot enable PCI device from suspend\n");
6904 return err;
6905 }
6906 smp_mb__before_atomic();
6907 clear_bit(__IXGBE_DISABLED, &adapter->state);
6908 pci_set_master(pdev);
6909
6910 device_wakeup_disable(dev_d);
6911
6912 ixgbe_reset(adapter);
6913
6914 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6915
6916 rtnl_lock();
6917 err = ixgbe_init_interrupt_scheme(adapter);
6918 if (!err && netif_running(netdev))
6919 err = ixgbe_open(netdev);
6920
6921
6922 if (!err)
6923 netif_device_attach(netdev);
6924 rtnl_unlock();
6925
6926 return err;
6927}
6928
6929static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6930{
6931 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6932 struct net_device *netdev = adapter->netdev;
6933 struct ixgbe_hw *hw = &adapter->hw;
6934 u32 ctrl;
6935 u32 wufc = adapter->wol;
6936
6937 rtnl_lock();
6938 netif_device_detach(netdev);
6939
6940 if (netif_running(netdev))
6941 ixgbe_close_suspend(adapter);
6942
6943 ixgbe_clear_interrupt_scheme(adapter);
6944 rtnl_unlock();
6945
6946 if (hw->mac.ops.stop_link_on_d3)
6947 hw->mac.ops.stop_link_on_d3(hw);
6948
6949 if (wufc) {
6950 u32 fctrl;
6951
6952 ixgbe_set_rx_mode(netdev);
6953
6954
6955 if (hw->mac.ops.enable_tx_laser)
6956 hw->mac.ops.enable_tx_laser(hw);
6957
6958
6959 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6960 fctrl |= IXGBE_FCTRL_MPE;
6961 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6962
6963 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6964 ctrl |= IXGBE_CTRL_GIO_DIS;
6965 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6966
6967 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6968 } else {
6969 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6970 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6971 }
6972
6973 switch (hw->mac.type) {
6974 case ixgbe_mac_82598EB:
6975 pci_wake_from_d3(pdev, false);
6976 break;
6977 case ixgbe_mac_82599EB:
6978 case ixgbe_mac_X540:
6979 case ixgbe_mac_X550:
6980 case ixgbe_mac_X550EM_x:
6981 case ixgbe_mac_x550em_a:
6982 pci_wake_from_d3(pdev, !!wufc);
6983 break;
6984 default:
6985 break;
6986 }
6987
6988 *enable_wake = !!wufc;
6989 if (hw->phy.ops.set_phy_power && !*enable_wake)
6990 hw->phy.ops.set_phy_power(hw, false);
6991
6992 ixgbe_release_hw_control(adapter);
6993
6994 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6995 pci_disable_device(pdev);
6996
6997 return 0;
6998}
6999
7000static int __maybe_unused ixgbe_suspend(struct device *dev_d)
7001{
7002 struct pci_dev *pdev = to_pci_dev(dev_d);
7003 int retval;
7004 bool wake;
7005
7006 retval = __ixgbe_shutdown(pdev, &wake);
7007
7008 device_set_wakeup_enable(dev_d, wake);
7009
7010 return retval;
7011}
7012
7013static void ixgbe_shutdown(struct pci_dev *pdev)
7014{
7015 bool wake;
7016
7017 __ixgbe_shutdown(pdev, &wake);
7018
7019 if (system_state == SYSTEM_POWER_OFF) {
7020 pci_wake_from_d3(pdev, wake);
7021 pci_set_power_state(pdev, PCI_D3hot);
7022 }
7023}
7024
7025
7026
7027
7028
7029void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7030{
7031 struct net_device *netdev = adapter->netdev;
7032 struct ixgbe_hw *hw = &adapter->hw;
7033 struct ixgbe_hw_stats *hwstats = &adapter->stats;
7034 u64 total_mpc = 0;
7035 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7036 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7037 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7038 u64 alloc_rx_page = 0;
7039 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7040
7041 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7042 test_bit(__IXGBE_RESETTING, &adapter->state))
7043 return;
7044
7045 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7046 u64 rsc_count = 0;
7047 u64 rsc_flush = 0;
7048 for (i = 0; i < adapter->num_rx_queues; i++) {
7049 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7050 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7051 }
7052 adapter->rsc_total_count = rsc_count;
7053 adapter->rsc_total_flush = rsc_flush;
7054 }
7055
7056 for (i = 0; i < adapter->num_rx_queues; i++) {
7057 struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
7058
7059 if (!rx_ring)
7060 continue;
7061 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7062 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7063 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7064 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7065 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7066 bytes += rx_ring->stats.bytes;
7067 packets += rx_ring->stats.packets;
7068 }
7069 adapter->non_eop_descs = non_eop_descs;
7070 adapter->alloc_rx_page = alloc_rx_page;
7071 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7072 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7073 adapter->hw_csum_rx_error = hw_csum_rx_error;
7074 netdev->stats.rx_bytes = bytes;
7075 netdev->stats.rx_packets = packets;
7076
7077 bytes = 0;
7078 packets = 0;
7079
7080 for (i = 0; i < adapter->num_tx_queues; i++) {
7081 struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
7082
7083 if (!tx_ring)
7084 continue;
7085 restart_queue += tx_ring->tx_stats.restart_queue;
7086 tx_busy += tx_ring->tx_stats.tx_busy;
7087 bytes += tx_ring->stats.bytes;
7088 packets += tx_ring->stats.packets;
7089 }
7090 for (i = 0; i < adapter->num_xdp_queues; i++) {
7091 struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
7092
7093 if (!xdp_ring)
7094 continue;
7095 restart_queue += xdp_ring->tx_stats.restart_queue;
7096 tx_busy += xdp_ring->tx_stats.tx_busy;
7097 bytes += xdp_ring->stats.bytes;
7098 packets += xdp_ring->stats.packets;
7099 }
7100 adapter->restart_queue = restart_queue;
7101 adapter->tx_busy = tx_busy;
7102 netdev->stats.tx_bytes = bytes;
7103 netdev->stats.tx_packets = packets;
7104
7105 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7106
7107
7108 for (i = 0; i < 8; i++) {
7109
7110 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7111 missed_rx += mpc;
7112 hwstats->mpc[i] += mpc;
7113 total_mpc += hwstats->mpc[i];
7114 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7115 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7116 switch (hw->mac.type) {
7117 case ixgbe_mac_82598EB:
7118 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7119 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7120 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7121 hwstats->pxonrxc[i] +=
7122 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7123 break;
7124 case ixgbe_mac_82599EB:
7125 case ixgbe_mac_X540:
7126 case ixgbe_mac_X550:
7127 case ixgbe_mac_X550EM_x:
7128 case ixgbe_mac_x550em_a:
7129 hwstats->pxonrxc[i] +=
7130 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7131 break;
7132 default:
7133 break;
7134 }
7135 }
7136
7137
7138 for (i = 0; i < 16; i++) {
7139 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7140 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7141 if ((hw->mac.type == ixgbe_mac_82599EB) ||
7142 (hw->mac.type == ixgbe_mac_X540) ||
7143 (hw->mac.type == ixgbe_mac_X550) ||
7144 (hw->mac.type == ixgbe_mac_X550EM_x) ||
7145 (hw->mac.type == ixgbe_mac_x550em_a)) {
7146 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7147 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
7148 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7149 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
7150 }
7151 }
7152
7153 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7154
7155 hwstats->gprc -= missed_rx;
7156
7157 ixgbe_update_xoff_received(adapter);
7158
7159
7160 switch (hw->mac.type) {
7161 case ixgbe_mac_82598EB:
7162 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7163 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7164 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7165 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7166 break;
7167 case ixgbe_mac_X540:
7168 case ixgbe_mac_X550:
7169 case ixgbe_mac_X550EM_x:
7170 case ixgbe_mac_x550em_a:
7171
7172 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7173 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7174 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7175 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7176 fallthrough;
7177 case ixgbe_mac_82599EB:
7178 for (i = 0; i < 16; i++)
7179 adapter->hw_rx_no_dma_resources +=
7180 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7181 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7182 IXGBE_READ_REG(hw, IXGBE_GORCH);
7183 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7184 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7185 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7186 IXGBE_READ_REG(hw, IXGBE_TORH);
7187 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7188 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7189 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7190#ifdef IXGBE_FCOE
7191 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7192 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7193 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7194 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7195 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7196 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7197
7198 if (adapter->fcoe.ddp_pool) {
7199 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7200 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7201 unsigned int cpu;
7202 u64 noddp = 0, noddp_ext_buff = 0;
7203 for_each_possible_cpu(cpu) {
7204 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7205 noddp += ddp_pool->noddp;
7206 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7207 }
7208 hwstats->fcoe_noddp = noddp;
7209 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7210 }
7211#endif
7212 break;
7213 default:
7214 break;
7215 }
7216 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7217 hwstats->bprc += bprc;
7218 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7219 if (hw->mac.type == ixgbe_mac_82598EB)
7220 hwstats->mprc -= bprc;
7221 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7222 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7223 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7224 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7225 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7226 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7227 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7228 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7229 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7230 hwstats->lxontxc += lxon;
7231 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7232 hwstats->lxofftxc += lxoff;
7233 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7234 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7235
7236
7237
7238 xon_off_tot = lxon + lxoff;
7239 hwstats->gptc -= xon_off_tot;
7240 hwstats->mptc -= xon_off_tot;
7241 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7242 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7243 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7244 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7245 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7246 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7247 hwstats->ptc64 -= xon_off_tot;
7248 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7249 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7250 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7251 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7252 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7253 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7254
7255
7256 netdev->stats.multicast = hwstats->mprc;
7257
7258
7259 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7260 netdev->stats.rx_dropped = 0;
7261 netdev->stats.rx_length_errors = hwstats->rlec;
7262 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7263 netdev->stats.rx_missed_errors = total_mpc;
7264}
7265
7266
7267
7268
7269
7270static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7271{
7272 struct ixgbe_hw *hw = &adapter->hw;
7273 int i;
7274
7275 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7276 return;
7277
7278 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7279
7280
7281 if (test_bit(__IXGBE_DOWN, &adapter->state))
7282 return;
7283
7284
7285 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7286 return;
7287
7288 adapter->fdir_overflow++;
7289
7290 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7291 for (i = 0; i < adapter->num_tx_queues; i++)
7292 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7293 &(adapter->tx_ring[i]->state));
7294 for (i = 0; i < adapter->num_xdp_queues; i++)
7295 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7296 &adapter->xdp_ring[i]->state);
7297
7298 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7299 } else {
7300 e_err(probe, "failed to finish FDIR re-initialization, "
7301 "ignored adding FDIR ATR filters\n");
7302 }
7303}
7304
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7315{
7316 struct ixgbe_hw *hw = &adapter->hw;
7317 u64 eics = 0;
7318 int i;
7319
7320
7321 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7322 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7323 test_bit(__IXGBE_RESETTING, &adapter->state))
7324 return;
7325
7326
7327 if (netif_carrier_ok(adapter->netdev)) {
7328 for (i = 0; i < adapter->num_tx_queues; i++)
7329 set_check_for_tx_hang(adapter->tx_ring[i]);
7330 for (i = 0; i < adapter->num_xdp_queues; i++)
7331 set_check_for_tx_hang(adapter->xdp_ring[i]);
7332 }
7333
7334 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7335
7336
7337
7338
7339
7340 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7341 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7342 } else {
7343
7344 for (i = 0; i < adapter->num_q_vectors; i++) {
7345 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7346 if (qv->rx.ring || qv->tx.ring)
7347 eics |= BIT_ULL(i);
7348 }
7349 }
7350
7351
7352 ixgbe_irq_rearm_queues(adapter, eics);
7353}
7354
7355
7356
7357
7358
7359static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7360{
7361 struct ixgbe_hw *hw = &adapter->hw;
7362 u32 link_speed = adapter->link_speed;
7363 bool link_up = adapter->link_up;
7364 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7365
7366 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7367 return;
7368
7369 if (hw->mac.ops.check_link) {
7370 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7371 } else {
7372
7373 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7374 link_up = true;
7375 }
7376
7377 if (adapter->ixgbe_ieee_pfc)
7378 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7379
7380 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7381 hw->mac.ops.fc_enable(hw);
7382 ixgbe_set_rx_drop_en(adapter);
7383 }
7384
7385 if (link_up ||
7386 time_after(jiffies, (adapter->link_check_timeout +
7387 IXGBE_TRY_LINK_TIMEOUT))) {
7388 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7389 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7390 IXGBE_WRITE_FLUSH(hw);
7391 }
7392
7393 adapter->link_up = link_up;
7394 adapter->link_speed = link_speed;
7395}
7396
7397static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7398{
7399#ifdef CONFIG_IXGBE_DCB
7400 struct net_device *netdev = adapter->netdev;
7401 struct dcb_app app = {
7402 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7403 .protocol = 0,
7404 };
7405 u8 up = 0;
7406
7407 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7408 up = dcb_ieee_getapp_mask(netdev, &app);
7409
7410 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7411#endif
7412}
7413
7414
7415
7416
7417
7418
7419static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7420{
7421 struct net_device *netdev = adapter->netdev;
7422 struct ixgbe_hw *hw = &adapter->hw;
7423 u32 link_speed = adapter->link_speed;
7424 const char *speed_str;
7425 bool flow_rx, flow_tx;
7426
7427
7428 if (netif_carrier_ok(netdev))
7429 return;
7430
7431 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7432
7433 switch (hw->mac.type) {
7434 case ixgbe_mac_82598EB: {
7435 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7436 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7437 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7438 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7439 }
7440 break;
7441 case ixgbe_mac_X540:
7442 case ixgbe_mac_X550:
7443 case ixgbe_mac_X550EM_x:
7444 case ixgbe_mac_x550em_a:
7445 case ixgbe_mac_82599EB: {
7446 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7447 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7448 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7449 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7450 }
7451 break;
7452 default:
7453 flow_tx = false;
7454 flow_rx = false;
7455 break;
7456 }
7457
7458 adapter->last_rx_ptp_check = jiffies;
7459
7460 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7461 ixgbe_ptp_start_cyclecounter(adapter);
7462
7463 switch (link_speed) {
7464 case IXGBE_LINK_SPEED_10GB_FULL:
7465 speed_str = "10 Gbps";
7466 break;
7467 case IXGBE_LINK_SPEED_5GB_FULL:
7468 speed_str = "5 Gbps";
7469 break;
7470 case IXGBE_LINK_SPEED_2_5GB_FULL:
7471 speed_str = "2.5 Gbps";
7472 break;
7473 case IXGBE_LINK_SPEED_1GB_FULL:
7474 speed_str = "1 Gbps";
7475 break;
7476 case IXGBE_LINK_SPEED_100_FULL:
7477 speed_str = "100 Mbps";
7478 break;
7479 case IXGBE_LINK_SPEED_10_FULL:
7480 speed_str = "10 Mbps";
7481 break;
7482 default:
7483 speed_str = "unknown speed";
7484 break;
7485 }
7486 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7487 ((flow_rx && flow_tx) ? "RX/TX" :
7488 (flow_rx ? "RX" :
7489 (flow_tx ? "TX" : "None"))));
7490
7491 netif_carrier_on(netdev);
7492 ixgbe_check_vf_rate_limit(adapter);
7493
7494
7495 netif_tx_wake_all_queues(adapter->netdev);
7496
7497
7498 ixgbe_update_default_up(adapter);
7499
7500
7501 ixgbe_ping_all_vfs(adapter);
7502}
7503
7504
7505
7506
7507
7508
7509static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7510{
7511 struct net_device *netdev = adapter->netdev;
7512 struct ixgbe_hw *hw = &adapter->hw;
7513
7514 adapter->link_up = false;
7515 adapter->link_speed = 0;
7516
7517
7518 if (!netif_carrier_ok(netdev))
7519 return;
7520
7521
7522 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7523 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7524
7525 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7526 ixgbe_ptp_start_cyclecounter(adapter);
7527
7528 e_info(drv, "NIC Link is Down\n");
7529 netif_carrier_off(netdev);
7530
7531
7532 ixgbe_ping_all_vfs(adapter);
7533}
7534
7535static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7536{
7537 int i;
7538
7539 for (i = 0; i < adapter->num_tx_queues; i++) {
7540 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7541
7542 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7543 return true;
7544 }
7545
7546 for (i = 0; i < adapter->num_xdp_queues; i++) {
7547 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7548
7549 if (ring->next_to_use != ring->next_to_clean)
7550 return true;
7551 }
7552
7553 return false;
7554}
7555
7556static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7557{
7558 struct ixgbe_hw *hw = &adapter->hw;
7559 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7560 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7561
7562 int i, j;
7563
7564 if (!adapter->num_vfs)
7565 return false;
7566
7567
7568 if (hw->mac.type >= ixgbe_mac_X550)
7569 return false;
7570
7571 for (i = 0; i < adapter->num_vfs; i++) {
7572 for (j = 0; j < q_per_pool; j++) {
7573 u32 h, t;
7574
7575 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7576 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7577
7578 if (h != t)
7579 return true;
7580 }
7581 }
7582
7583 return false;
7584}
7585
7586
7587
7588
7589
7590static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7591{
7592 if (!netif_carrier_ok(adapter->netdev)) {
7593 if (ixgbe_ring_tx_pending(adapter) ||
7594 ixgbe_vf_tx_pending(adapter)) {
7595
7596
7597
7598
7599
7600 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7601 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7602 }
7603 }
7604}
7605
7606#ifdef CONFIG_PCI_IOV
7607static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7608{
7609 struct ixgbe_hw *hw = &adapter->hw;
7610 struct pci_dev *pdev = adapter->pdev;
7611 unsigned int vf;
7612 u32 gpc;
7613
7614 if (!(netif_carrier_ok(adapter->netdev)))
7615 return;
7616
7617 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7618 if (gpc)
7619 return;
7620
7621
7622
7623
7624
7625
7626 if (!pdev)
7627 return;
7628
7629
7630 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7631 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7632 u16 status_reg;
7633
7634 if (!vfdev)
7635 continue;
7636 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7637 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7638 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7639 pcie_flr(vfdev);
7640 }
7641}
7642
7643static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7644{
7645 u32 ssvpc;
7646
7647
7648 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7649 adapter->num_vfs == 0)
7650 return;
7651
7652 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7653
7654
7655
7656
7657
7658 if (!ssvpc)
7659 return;
7660
7661 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7662}
7663#else
7664static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7665{
7666}
7667
7668static void
7669ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7670{
7671}
7672#endif
7673
7674
7675
7676
7677
7678
7679static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7680{
7681
7682 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7683 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7684 test_bit(__IXGBE_RESETTING, &adapter->state))
7685 return;
7686
7687 ixgbe_watchdog_update_link(adapter);
7688
7689 if (adapter->link_up)
7690 ixgbe_watchdog_link_is_up(adapter);
7691 else
7692 ixgbe_watchdog_link_is_down(adapter);
7693
7694 ixgbe_check_for_bad_vf(adapter);
7695 ixgbe_spoof_check(adapter);
7696 ixgbe_update_stats(adapter);
7697
7698 ixgbe_watchdog_flush_tx(adapter);
7699}
7700
7701
7702
7703
7704
7705static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7706{
7707 struct ixgbe_hw *hw = &adapter->hw;
7708 s32 err;
7709
7710
7711 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7712 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7713 return;
7714
7715 if (adapter->sfp_poll_time &&
7716 time_after(adapter->sfp_poll_time, jiffies))
7717 return;
7718
7719
7720 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7721 return;
7722
7723 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7724
7725 err = hw->phy.ops.identify_sfp(hw);
7726 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7727 goto sfp_out;
7728
7729 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7730
7731
7732 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7733 }
7734
7735
7736 if (err)
7737 goto sfp_out;
7738
7739
7740 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7741 goto sfp_out;
7742
7743 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7744
7745
7746
7747
7748
7749
7750 if (hw->mac.type == ixgbe_mac_82598EB)
7751 err = hw->phy.ops.reset(hw);
7752 else
7753 err = hw->mac.ops.setup_sfp(hw);
7754
7755 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7756 goto sfp_out;
7757
7758 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7759 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7760
7761sfp_out:
7762 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7763
7764 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7765 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7766 e_dev_err("failed to initialize because an unsupported "
7767 "SFP+ module type was detected.\n");
7768 e_dev_err("Reload the driver after installing a "
7769 "supported module.\n");
7770 unregister_netdev(adapter->netdev);
7771 }
7772}
7773
7774
7775
7776
7777
7778static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7779{
7780 struct ixgbe_hw *hw = &adapter->hw;
7781 u32 cap_speed;
7782 u32 speed;
7783 bool autoneg = false;
7784
7785 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7786 return;
7787
7788
7789 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7790 return;
7791
7792 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7793
7794 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7795
7796
7797 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7798 speed = IXGBE_LINK_SPEED_10GB_FULL;
7799 else
7800 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7801 IXGBE_LINK_SPEED_1GB_FULL);
7802
7803 if (hw->mac.ops.setup_link)
7804 hw->mac.ops.setup_link(hw, speed, true);
7805
7806 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7807 adapter->link_check_timeout = jiffies;
7808 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7809}
7810
7811
7812
7813
7814
7815static void ixgbe_service_timer(struct timer_list *t)
7816{
7817 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7818 unsigned long next_event_offset;
7819
7820
7821 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7822 next_event_offset = HZ / 10;
7823 else
7824 next_event_offset = HZ * 2;
7825
7826
7827 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7828
7829 ixgbe_service_event_schedule(adapter);
7830}
7831
7832static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7833{
7834 struct ixgbe_hw *hw = &adapter->hw;
7835 u32 status;
7836
7837 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7838 return;
7839
7840 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7841
7842 if (!hw->phy.ops.handle_lasi)
7843 return;
7844
7845 status = hw->phy.ops.handle_lasi(&adapter->hw);
7846 if (status != IXGBE_ERR_OVERTEMP)
7847 return;
7848
7849 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7850}
7851
7852static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7853{
7854 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7855 return;
7856
7857 rtnl_lock();
7858
7859 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7860 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7861 test_bit(__IXGBE_RESETTING, &adapter->state)) {
7862 rtnl_unlock();
7863 return;
7864 }
7865
7866 ixgbe_dump(adapter);
7867 netdev_err(adapter->netdev, "Reset adapter\n");
7868 adapter->tx_timeout_count++;
7869
7870 ixgbe_reinit_locked(adapter);
7871 rtnl_unlock();
7872}
7873
7874
7875
7876
7877
7878
7879
7880static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7881{
7882 struct ixgbe_hw *hw = &adapter->hw;
7883 u32 fwsm;
7884
7885
7886 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7887
7888 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7889 !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7890 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7891 fwsm);
7892
7893 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7894 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7895 return true;
7896 }
7897
7898 return false;
7899}
7900
7901
7902
7903
7904
7905static void ixgbe_service_task(struct work_struct *work)
7906{
7907 struct ixgbe_adapter *adapter = container_of(work,
7908 struct ixgbe_adapter,
7909 service_task);
7910 if (ixgbe_removed(adapter->hw.hw_addr)) {
7911 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7912 rtnl_lock();
7913 ixgbe_down(adapter);
7914 rtnl_unlock();
7915 }
7916 ixgbe_service_event_complete(adapter);
7917 return;
7918 }
7919 if (ixgbe_check_fw_error(adapter)) {
7920 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7921 unregister_netdev(adapter->netdev);
7922 ixgbe_service_event_complete(adapter);
7923 return;
7924 }
7925 ixgbe_reset_subtask(adapter);
7926 ixgbe_phy_interrupt_subtask(adapter);
7927 ixgbe_sfp_detection_subtask(adapter);
7928 ixgbe_sfp_link_config_subtask(adapter);
7929 ixgbe_check_overtemp_subtask(adapter);
7930 ixgbe_watchdog_subtask(adapter);
7931 ixgbe_fdir_reinit_subtask(adapter);
7932 ixgbe_check_hang_subtask(adapter);
7933
7934 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7935 ixgbe_ptp_overflow_check(adapter);
7936 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7937 ixgbe_ptp_rx_hang(adapter);
7938 ixgbe_ptp_tx_hang(adapter);
7939 }
7940
7941 ixgbe_service_event_complete(adapter);
7942}
7943
7944static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7945 struct ixgbe_tx_buffer *first,
7946 u8 *hdr_len,
7947 struct ixgbe_ipsec_tx_data *itd)
7948{
7949 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7950 struct sk_buff *skb = first->skb;
7951 union {
7952 struct iphdr *v4;
7953 struct ipv6hdr *v6;
7954 unsigned char *hdr;
7955 } ip;
7956 union {
7957 struct tcphdr *tcp;
7958 struct udphdr *udp;
7959 unsigned char *hdr;
7960 } l4;
7961 u32 paylen, l4_offset;
7962 u32 fceof_saidx = 0;
7963 int err;
7964
7965 if (skb->ip_summed != CHECKSUM_PARTIAL)
7966 return 0;
7967
7968 if (!skb_is_gso(skb))
7969 return 0;
7970
7971 err = skb_cow_head(skb, 0);
7972 if (err < 0)
7973 return err;
7974
7975 if (eth_p_mpls(first->protocol))
7976 ip.hdr = skb_inner_network_header(skb);
7977 else
7978 ip.hdr = skb_network_header(skb);
7979 l4.hdr = skb_checksum_start(skb);
7980
7981
7982 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
7983 IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
7984
7985
7986 if (ip.v4->version == 4) {
7987 unsigned char *csum_start = skb_checksum_start(skb);
7988 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7989 int len = csum_start - trans_start;
7990
7991
7992
7993
7994
7995 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
7996 csum_fold(csum_partial(trans_start,
7997 len, 0)) : 0;
7998 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7999
8000 ip.v4->tot_len = 0;
8001 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8002 IXGBE_TX_FLAGS_CSUM |
8003 IXGBE_TX_FLAGS_IPV4;
8004 } else {
8005 ip.v6->payload_len = 0;
8006 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8007 IXGBE_TX_FLAGS_CSUM;
8008 }
8009
8010
8011 l4_offset = l4.hdr - skb->data;
8012
8013
8014 paylen = skb->len - l4_offset;
8015
8016 if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
8017
8018 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8019 csum_replace_by_diff(&l4.tcp->check,
8020 (__force __wsum)htonl(paylen));
8021 } else {
8022
8023 *hdr_len = sizeof(*l4.udp) + l4_offset;
8024 csum_replace_by_diff(&l4.udp->check,
8025 (__force __wsum)htonl(paylen));
8026 }
8027
8028
8029 first->gso_segs = skb_shinfo(skb)->gso_segs;
8030 first->bytecount += (first->gso_segs - 1) * *hdr_len;
8031
8032
8033 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8034 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8035
8036 fceof_saidx |= itd->sa_idx;
8037 type_tucmd |= itd->flags | itd->trailer_len;
8038
8039
8040 vlan_macip_lens = l4.hdr - ip.hdr;
8041 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8042 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8043
8044 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8045 mss_l4len_idx);
8046
8047 return 1;
8048}
8049
8050static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8051 struct ixgbe_tx_buffer *first,
8052 struct ixgbe_ipsec_tx_data *itd)
8053{
8054 struct sk_buff *skb = first->skb;
8055 u32 vlan_macip_lens = 0;
8056 u32 fceof_saidx = 0;
8057 u32 type_tucmd = 0;
8058
8059 if (skb->ip_summed != CHECKSUM_PARTIAL) {
8060csum_failed:
8061 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8062 IXGBE_TX_FLAGS_CC)))
8063 return;
8064 goto no_csum;
8065 }
8066
8067 switch (skb->csum_offset) {
8068 case offsetof(struct tcphdr, check):
8069 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8070 fallthrough;
8071 case offsetof(struct udphdr, check):
8072 break;
8073 case offsetof(struct sctphdr, checksum):
8074
8075 if (skb_csum_is_sctp(skb)) {
8076 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8077 break;
8078 }
8079 fallthrough;
8080 default:
8081 skb_checksum_help(skb);
8082 goto csum_failed;
8083 }
8084
8085
8086 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8087 vlan_macip_lens = skb_checksum_start_offset(skb) -
8088 skb_network_offset(skb);
8089no_csum:
8090
8091 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8092 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8093
8094 fceof_saidx |= itd->sa_idx;
8095 type_tucmd |= itd->flags | itd->trailer_len;
8096
8097 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8098}
8099
8100#define IXGBE_SET_FLAG(_input, _flag, _result) \
8101 ((_flag <= _result) ? \
8102 ((u32)(_input & _flag) * (_result / _flag)) : \
8103 ((u32)(_input & _flag) / (_flag / _result)))
8104
8105static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8106{
8107
8108 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8109 IXGBE_ADVTXD_DCMD_DEXT |
8110 IXGBE_ADVTXD_DCMD_IFCS;
8111
8112
8113 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8114 IXGBE_ADVTXD_DCMD_VLE);
8115
8116
8117 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8118 IXGBE_ADVTXD_DCMD_TSE);
8119
8120
8121 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8122 IXGBE_ADVTXD_MAC_TSTAMP);
8123
8124
8125 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8126
8127 return cmd_type;
8128}
8129
8130static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8131 u32 tx_flags, unsigned int paylen)
8132{
8133 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8134
8135
8136 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8137 IXGBE_TX_FLAGS_CSUM,
8138 IXGBE_ADVTXD_POPTS_TXSM);
8139
8140
8141 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8142 IXGBE_TX_FLAGS_IPV4,
8143 IXGBE_ADVTXD_POPTS_IXSM);
8144
8145
8146 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8147 IXGBE_TX_FLAGS_IPSEC,
8148 IXGBE_ADVTXD_POPTS_IPSEC);
8149
8150
8151
8152
8153
8154 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8155 IXGBE_TX_FLAGS_CC,
8156 IXGBE_ADVTXD_CC);
8157
8158 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8159}
8160
8161static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8162{
8163 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8164
8165
8166
8167
8168
8169 smp_mb();
8170
8171
8172
8173
8174 if (likely(ixgbe_desc_unused(tx_ring) < size))
8175 return -EBUSY;
8176
8177
8178 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8179 ++tx_ring->tx_stats.restart_queue;
8180 return 0;
8181}
8182
8183static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8184{
8185 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8186 return 0;
8187
8188 return __ixgbe_maybe_stop_tx(tx_ring, size);
8189}
8190
8191static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8192 struct ixgbe_tx_buffer *first,
8193 const u8 hdr_len)
8194{
8195 struct sk_buff *skb = first->skb;
8196 struct ixgbe_tx_buffer *tx_buffer;
8197 union ixgbe_adv_tx_desc *tx_desc;
8198 skb_frag_t *frag;
8199 dma_addr_t dma;
8200 unsigned int data_len, size;
8201 u32 tx_flags = first->tx_flags;
8202 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8203 u16 i = tx_ring->next_to_use;
8204
8205 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8206
8207 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8208
8209 size = skb_headlen(skb);
8210 data_len = skb->data_len;
8211
8212#ifdef IXGBE_FCOE
8213 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8214 if (data_len < sizeof(struct fcoe_crc_eof)) {
8215 size -= sizeof(struct fcoe_crc_eof) - data_len;
8216 data_len = 0;
8217 } else {
8218 data_len -= sizeof(struct fcoe_crc_eof);
8219 }
8220 }
8221
8222#endif
8223 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8224
8225 tx_buffer = first;
8226
8227 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8228 if (dma_mapping_error(tx_ring->dev, dma))
8229 goto dma_error;
8230
8231
8232 dma_unmap_len_set(tx_buffer, len, size);
8233 dma_unmap_addr_set(tx_buffer, dma, dma);
8234
8235 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8236
8237 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8238 tx_desc->read.cmd_type_len =
8239 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8240
8241 i++;
8242 tx_desc++;
8243 if (i == tx_ring->count) {
8244 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8245 i = 0;
8246 }
8247 tx_desc->read.olinfo_status = 0;
8248
8249 dma += IXGBE_MAX_DATA_PER_TXD;
8250 size -= IXGBE_MAX_DATA_PER_TXD;
8251
8252 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8253 }
8254
8255 if (likely(!data_len))
8256 break;
8257
8258 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8259
8260 i++;
8261 tx_desc++;
8262 if (i == tx_ring->count) {
8263 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8264 i = 0;
8265 }
8266 tx_desc->read.olinfo_status = 0;
8267
8268#ifdef IXGBE_FCOE
8269 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8270#else
8271 size = skb_frag_size(frag);
8272#endif
8273 data_len -= size;
8274
8275 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8276 DMA_TO_DEVICE);
8277
8278 tx_buffer = &tx_ring->tx_buffer_info[i];
8279 }
8280
8281
8282 cmd_type |= size | IXGBE_TXD_CMD;
8283 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8284
8285 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8286
8287
8288 first->time_stamp = jiffies;
8289
8290 skb_tx_timestamp(skb);
8291
8292
8293
8294
8295
8296
8297
8298
8299
8300 wmb();
8301
8302
8303 first->next_to_watch = tx_desc;
8304
8305 i++;
8306 if (i == tx_ring->count)
8307 i = 0;
8308
8309 tx_ring->next_to_use = i;
8310
8311 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8312
8313 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8314 writel(i, tx_ring->tail);
8315 }
8316
8317 return 0;
8318dma_error:
8319 dev_err(tx_ring->dev, "TX DMA map failed\n");
8320
8321
8322 for (;;) {
8323 tx_buffer = &tx_ring->tx_buffer_info[i];
8324 if (dma_unmap_len(tx_buffer, len))
8325 dma_unmap_page(tx_ring->dev,
8326 dma_unmap_addr(tx_buffer, dma),
8327 dma_unmap_len(tx_buffer, len),
8328 DMA_TO_DEVICE);
8329 dma_unmap_len_set(tx_buffer, len, 0);
8330 if (tx_buffer == first)
8331 break;
8332 if (i == 0)
8333 i += tx_ring->count;
8334 i--;
8335 }
8336
8337 dev_kfree_skb_any(first->skb);
8338 first->skb = NULL;
8339
8340 tx_ring->next_to_use = i;
8341
8342 return -1;
8343}
8344
8345static void ixgbe_atr(struct ixgbe_ring *ring,
8346 struct ixgbe_tx_buffer *first)
8347{
8348 struct ixgbe_q_vector *q_vector = ring->q_vector;
8349 union ixgbe_atr_hash_dword input = { .dword = 0 };
8350 union ixgbe_atr_hash_dword common = { .dword = 0 };
8351 union {
8352 unsigned char *network;
8353 struct iphdr *ipv4;
8354 struct ipv6hdr *ipv6;
8355 } hdr;
8356 struct tcphdr *th;
8357 unsigned int hlen;
8358 struct sk_buff *skb;
8359 __be16 vlan_id;
8360 int l4_proto;
8361
8362
8363 if (!q_vector)
8364 return;
8365
8366
8367 if (!ring->atr_sample_rate)
8368 return;
8369
8370 ring->atr_count++;
8371
8372
8373 if ((first->protocol != htons(ETH_P_IP)) &&
8374 (first->protocol != htons(ETH_P_IPV6)))
8375 return;
8376
8377
8378 skb = first->skb;
8379 hdr.network = skb_network_header(skb);
8380 if (unlikely(hdr.network <= skb->data))
8381 return;
8382 if (skb->encapsulation &&
8383 first->protocol == htons(ETH_P_IP) &&
8384 hdr.ipv4->protocol == IPPROTO_UDP) {
8385 struct ixgbe_adapter *adapter = q_vector->adapter;
8386
8387 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8388 VXLAN_HEADROOM))
8389 return;
8390
8391
8392 if (adapter->vxlan_port &&
8393 udp_hdr(skb)->dest == adapter->vxlan_port)
8394 hdr.network = skb_inner_network_header(skb);
8395
8396 if (adapter->geneve_port &&
8397 udp_hdr(skb)->dest == adapter->geneve_port)
8398 hdr.network = skb_inner_network_header(skb);
8399 }
8400
8401
8402
8403
8404 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8405 return;
8406
8407
8408 switch (hdr.ipv4->version) {
8409 case IPVERSION:
8410
8411 hlen = (hdr.network[0] & 0x0F) << 2;
8412 l4_proto = hdr.ipv4->protocol;
8413 break;
8414 case 6:
8415 hlen = hdr.network - skb->data;
8416 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8417 hlen -= hdr.network - skb->data;
8418 break;
8419 default:
8420 return;
8421 }
8422
8423 if (l4_proto != IPPROTO_TCP)
8424 return;
8425
8426 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8427 hlen + sizeof(struct tcphdr)))
8428 return;
8429
8430 th = (struct tcphdr *)(hdr.network + hlen);
8431
8432
8433 if (th->fin)
8434 return;
8435
8436
8437 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8438 return;
8439
8440
8441 ring->atr_count = 0;
8442
8443 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8444
8445
8446
8447
8448
8449
8450
8451
8452 input.formatted.vlan_id = vlan_id;
8453
8454
8455
8456
8457
8458 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8459 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8460 else
8461 common.port.src ^= th->dest ^ first->protocol;
8462 common.port.dst ^= th->source;
8463
8464 switch (hdr.ipv4->version) {
8465 case IPVERSION:
8466 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8467 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8468 break;
8469 case 6:
8470 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8471 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8472 hdr.ipv6->saddr.s6_addr32[1] ^
8473 hdr.ipv6->saddr.s6_addr32[2] ^
8474 hdr.ipv6->saddr.s6_addr32[3] ^
8475 hdr.ipv6->daddr.s6_addr32[0] ^
8476 hdr.ipv6->daddr.s6_addr32[1] ^
8477 hdr.ipv6->daddr.s6_addr32[2] ^
8478 hdr.ipv6->daddr.s6_addr32[3];
8479 break;
8480 default:
8481 break;
8482 }
8483
8484 if (hdr.network != skb_network_header(skb))
8485 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8486
8487
8488 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8489 input, common, ring->queue_index);
8490}
8491
8492#ifdef IXGBE_FCOE
8493static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8494 struct net_device *sb_dev)
8495{
8496 struct ixgbe_adapter *adapter;
8497 struct ixgbe_ring_feature *f;
8498 int txq;
8499
8500 if (sb_dev) {
8501 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8502 struct net_device *vdev = sb_dev;
8503
8504 txq = vdev->tc_to_txq[tc].offset;
8505 txq += reciprocal_scale(skb_get_hash(skb),
8506 vdev->tc_to_txq[tc].count);
8507
8508 return txq;
8509 }
8510
8511
8512
8513
8514
8515 switch (vlan_get_protocol(skb)) {
8516 case htons(ETH_P_FCOE):
8517 case htons(ETH_P_FIP):
8518 adapter = netdev_priv(dev);
8519
8520 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8521 break;
8522 fallthrough;
8523 default:
8524 return netdev_pick_tx(dev, skb, sb_dev);
8525 }
8526
8527 f = &adapter->ring_feature[RING_F_FCOE];
8528
8529 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8530 smp_processor_id();
8531
8532 while (txq >= f->indices)
8533 txq -= f->indices;
8534
8535 return txq + f->offset;
8536}
8537
8538#endif
8539int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8540 struct xdp_frame *xdpf)
8541{
8542 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8543 struct ixgbe_tx_buffer *tx_buffer;
8544 union ixgbe_adv_tx_desc *tx_desc;
8545 u32 len, cmd_type;
8546 dma_addr_t dma;
8547 u16 i;
8548
8549 len = xdpf->len;
8550
8551 if (unlikely(!ixgbe_desc_unused(ring)))
8552 return IXGBE_XDP_CONSUMED;
8553
8554 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
8555 if (dma_mapping_error(ring->dev, dma))
8556 return IXGBE_XDP_CONSUMED;
8557
8558
8559 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8560 tx_buffer->bytecount = len;
8561 tx_buffer->gso_segs = 1;
8562 tx_buffer->protocol = 0;
8563
8564 i = ring->next_to_use;
8565 tx_desc = IXGBE_TX_DESC(ring, i);
8566
8567 dma_unmap_len_set(tx_buffer, len, len);
8568 dma_unmap_addr_set(tx_buffer, dma, dma);
8569 tx_buffer->xdpf = xdpf;
8570
8571 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8572
8573
8574 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8575 IXGBE_ADVTXD_DCMD_DEXT |
8576 IXGBE_ADVTXD_DCMD_IFCS;
8577 cmd_type |= len | IXGBE_TXD_CMD;
8578 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8579 tx_desc->read.olinfo_status =
8580 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8581
8582
8583 smp_wmb();
8584
8585
8586 i++;
8587 if (i == ring->count)
8588 i = 0;
8589
8590 tx_buffer->next_to_watch = tx_desc;
8591 ring->next_to_use = i;
8592
8593 return IXGBE_XDP_TX;
8594}
8595
8596netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8597 struct ixgbe_adapter *adapter,
8598 struct ixgbe_ring *tx_ring)
8599{
8600 struct ixgbe_tx_buffer *first;
8601 int tso;
8602 u32 tx_flags = 0;
8603 unsigned short f;
8604 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8605 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8606 __be16 protocol = skb->protocol;
8607 u8 hdr_len = 0;
8608
8609
8610
8611
8612
8613
8614
8615
8616 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8617 count += TXD_USE_COUNT(skb_frag_size(
8618 &skb_shinfo(skb)->frags[f]));
8619
8620 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8621 tx_ring->tx_stats.tx_busy++;
8622 return NETDEV_TX_BUSY;
8623 }
8624
8625
8626 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8627 first->skb = skb;
8628 first->bytecount = skb->len;
8629 first->gso_segs = 1;
8630
8631
8632 if (skb_vlan_tag_present(skb)) {
8633 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8634 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8635
8636 } else if (protocol == htons(ETH_P_8021Q)) {
8637 struct vlan_hdr *vhdr, _vhdr;
8638 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8639 if (!vhdr)
8640 goto out_drop;
8641
8642 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8643 IXGBE_TX_FLAGS_VLAN_SHIFT;
8644 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8645 }
8646 protocol = vlan_get_protocol(skb);
8647
8648 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8649 adapter->ptp_clock) {
8650 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
8651 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8652 &adapter->state)) {
8653 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8654 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8655
8656
8657 adapter->ptp_tx_skb = skb_get(skb);
8658 adapter->ptp_tx_start = jiffies;
8659 schedule_work(&adapter->ptp_tx_work);
8660 } else {
8661 adapter->tx_hwtstamp_skipped++;
8662 }
8663 }
8664
8665#ifdef CONFIG_PCI_IOV
8666
8667
8668
8669
8670 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8671 tx_flags |= IXGBE_TX_FLAGS_CC;
8672
8673#endif
8674
8675 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8676 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8677 (skb->priority != TC_PRIO_CONTROL))) {
8678 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8679 tx_flags |= (skb->priority & 0x7) <<
8680 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8681 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8682 struct vlan_ethhdr *vhdr;
8683
8684 if (skb_cow_head(skb, 0))
8685 goto out_drop;
8686 vhdr = (struct vlan_ethhdr *)skb->data;
8687 vhdr->h_vlan_TCI = htons(tx_flags >>
8688 IXGBE_TX_FLAGS_VLAN_SHIFT);
8689 } else {
8690 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8691 }
8692 }
8693
8694
8695 first->tx_flags = tx_flags;
8696 first->protocol = protocol;
8697
8698#ifdef IXGBE_FCOE
8699
8700 if ((protocol == htons(ETH_P_FCOE)) &&
8701 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8702 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8703 if (tso < 0)
8704 goto out_drop;
8705
8706 goto xmit_fcoe;
8707 }
8708
8709#endif
8710
8711#ifdef CONFIG_IXGBE_IPSEC
8712 if (xfrm_offload(skb) &&
8713 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8714 goto out_drop;
8715#endif
8716 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8717 if (tso < 0)
8718 goto out_drop;
8719 else if (!tso)
8720 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8721
8722
8723 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8724 ixgbe_atr(tx_ring, first);
8725
8726#ifdef IXGBE_FCOE
8727xmit_fcoe:
8728#endif
8729 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8730 goto cleanup_tx_timestamp;
8731
8732 return NETDEV_TX_OK;
8733
8734out_drop:
8735 dev_kfree_skb_any(first->skb);
8736 first->skb = NULL;
8737cleanup_tx_timestamp:
8738 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8739 dev_kfree_skb_any(adapter->ptp_tx_skb);
8740 adapter->ptp_tx_skb = NULL;
8741 cancel_work_sync(&adapter->ptp_tx_work);
8742 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8743 }
8744
8745 return NETDEV_TX_OK;
8746}
8747
8748static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8749 struct net_device *netdev,
8750 struct ixgbe_ring *ring)
8751{
8752 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8753 struct ixgbe_ring *tx_ring;
8754
8755
8756
8757
8758
8759 if (skb_put_padto(skb, 17))
8760 return NETDEV_TX_OK;
8761
8762 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8763 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8764 return NETDEV_TX_BUSY;
8765
8766 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8767}
8768
8769static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8770 struct net_device *netdev)
8771{
8772 return __ixgbe_xmit_frame(skb, netdev, NULL);
8773}
8774
8775
8776
8777
8778
8779
8780
8781
8782static int ixgbe_set_mac(struct net_device *netdev, void *p)
8783{
8784 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8785 struct ixgbe_hw *hw = &adapter->hw;
8786 struct sockaddr *addr = p;
8787
8788 if (!is_valid_ether_addr(addr->sa_data))
8789 return -EADDRNOTAVAIL;
8790
8791 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8792 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8793
8794 ixgbe_mac_set_default_filter(adapter);
8795
8796 return 0;
8797}
8798
8799static int
8800ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8801{
8802 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8803 struct ixgbe_hw *hw = &adapter->hw;
8804 u16 value;
8805 int rc;
8806
8807 if (adapter->mii_bus) {
8808 int regnum = addr;
8809
8810 if (devad != MDIO_DEVAD_NONE)
8811 regnum |= (devad << 16) | MII_ADDR_C45;
8812
8813 return mdiobus_read(adapter->mii_bus, prtad, regnum);
8814 }
8815
8816 if (prtad != hw->phy.mdio.prtad)
8817 return -EINVAL;
8818 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8819 if (!rc)
8820 rc = value;
8821 return rc;
8822}
8823
8824static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8825 u16 addr, u16 value)
8826{
8827 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8828 struct ixgbe_hw *hw = &adapter->hw;
8829
8830 if (adapter->mii_bus) {
8831 int regnum = addr;
8832
8833 if (devad != MDIO_DEVAD_NONE)
8834 regnum |= (devad << 16) | MII_ADDR_C45;
8835
8836 return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8837 }
8838
8839 if (prtad != hw->phy.mdio.prtad)
8840 return -EINVAL;
8841 return hw->phy.ops.write_reg(hw, addr, devad, value);
8842}
8843
8844static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8845{
8846 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8847
8848 switch (cmd) {
8849 case SIOCSHWTSTAMP:
8850 return ixgbe_ptp_set_ts_config(adapter, req);
8851 case SIOCGHWTSTAMP:
8852 return ixgbe_ptp_get_ts_config(adapter, req);
8853 case SIOCGMIIPHY:
8854 if (!adapter->hw.phy.ops.read_reg)
8855 return -EOPNOTSUPP;
8856 fallthrough;
8857 default:
8858 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8859 }
8860}
8861
8862
8863
8864
8865
8866
8867
8868
8869static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8870{
8871 int err = 0;
8872 struct ixgbe_adapter *adapter = netdev_priv(dev);
8873 struct ixgbe_hw *hw = &adapter->hw;
8874
8875 if (is_valid_ether_addr(hw->mac.san_addr)) {
8876 rtnl_lock();
8877 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8878 rtnl_unlock();
8879
8880
8881 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8882 }
8883 return err;
8884}
8885
8886
8887
8888
8889
8890
8891
8892
8893static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8894{
8895 int err = 0;
8896 struct ixgbe_adapter *adapter = netdev_priv(dev);
8897 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8898
8899 if (is_valid_ether_addr(mac->san_addr)) {
8900 rtnl_lock();
8901 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8902 rtnl_unlock();
8903 }
8904 return err;
8905}
8906
8907static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8908 struct ixgbe_ring *ring)
8909{
8910 u64 bytes, packets;
8911 unsigned int start;
8912
8913 if (ring) {
8914 do {
8915 start = u64_stats_fetch_begin_irq(&ring->syncp);
8916 packets = ring->stats.packets;
8917 bytes = ring->stats.bytes;
8918 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8919 stats->tx_packets += packets;
8920 stats->tx_bytes += bytes;
8921 }
8922}
8923
8924static void ixgbe_get_stats64(struct net_device *netdev,
8925 struct rtnl_link_stats64 *stats)
8926{
8927 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8928 int i;
8929
8930 rcu_read_lock();
8931 for (i = 0; i < adapter->num_rx_queues; i++) {
8932 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8933 u64 bytes, packets;
8934 unsigned int start;
8935
8936 if (ring) {
8937 do {
8938 start = u64_stats_fetch_begin_irq(&ring->syncp);
8939 packets = ring->stats.packets;
8940 bytes = ring->stats.bytes;
8941 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8942 stats->rx_packets += packets;
8943 stats->rx_bytes += bytes;
8944 }
8945 }
8946
8947 for (i = 0; i < adapter->num_tx_queues; i++) {
8948 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8949
8950 ixgbe_get_ring_stats64(stats, ring);
8951 }
8952 for (i = 0; i < adapter->num_xdp_queues; i++) {
8953 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8954
8955 ixgbe_get_ring_stats64(stats, ring);
8956 }
8957 rcu_read_unlock();
8958
8959
8960 stats->multicast = netdev->stats.multicast;
8961 stats->rx_errors = netdev->stats.rx_errors;
8962 stats->rx_length_errors = netdev->stats.rx_length_errors;
8963 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8964 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8965}
8966
8967#ifdef CONFIG_IXGBE_DCB
8968
8969
8970
8971
8972
8973
8974
8975
8976static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8977{
8978 struct ixgbe_hw *hw = &adapter->hw;
8979 u32 reg, rsave;
8980 int i;
8981
8982
8983
8984
8985 if (hw->mac.type == ixgbe_mac_82598EB)
8986 return;
8987
8988 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8989 rsave = reg;
8990
8991 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8992 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8993
8994
8995 if (up2tc > tc)
8996 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8997 }
8998
8999 if (reg != rsave)
9000 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
9001
9002 return;
9003}
9004
9005
9006
9007
9008
9009
9010
9011static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9012{
9013 struct net_device *dev = adapter->netdev;
9014 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9015 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9016 u8 prio;
9017
9018 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9019 u8 tc = 0;
9020
9021 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9022 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9023 else if (ets)
9024 tc = ets->prio_tc[prio];
9025
9026 netdev_set_prio_tc_map(dev, prio, tc);
9027 }
9028}
9029
9030#endif
9031static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
9032 struct netdev_nested_priv *priv)
9033{
9034 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)priv->data;
9035 struct ixgbe_fwd_adapter *accel;
9036 int pool;
9037
9038
9039 if (!netif_is_macvlan(vdev))
9040 return 0;
9041
9042
9043 accel = macvlan_accel_priv(vdev);
9044 if (!accel)
9045 return 0;
9046
9047
9048 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9049 if (pool < adapter->num_rx_pools) {
9050 set_bit(pool, adapter->fwd_bitmask);
9051 accel->pool = pool;
9052 return 0;
9053 }
9054
9055
9056 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9057 macvlan_release_l2fw_offload(vdev);
9058
9059
9060 netdev_unbind_sb_channel(adapter->netdev, vdev);
9061 netdev_set_sb_channel(vdev, 0);
9062
9063 kfree(accel);
9064
9065 return 0;
9066}
9067
9068static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9069{
9070 struct ixgbe_adapter *adapter = netdev_priv(dev);
9071 struct netdev_nested_priv priv = {
9072 .data = (void *)adapter,
9073 };
9074
9075
9076 bitmap_clear(adapter->fwd_bitmask, 1, 63);
9077
9078
9079 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9080 &priv);
9081}
9082
9083
9084
9085
9086
9087
9088
9089int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9090{
9091 struct ixgbe_adapter *adapter = netdev_priv(dev);
9092 struct ixgbe_hw *hw = &adapter->hw;
9093
9094
9095 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9096 return -EINVAL;
9097
9098 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9099 return -EINVAL;
9100
9101
9102
9103
9104
9105 if (netif_running(dev))
9106 ixgbe_close(dev);
9107 else
9108 ixgbe_reset(adapter);
9109
9110 ixgbe_clear_interrupt_scheme(adapter);
9111
9112#ifdef CONFIG_IXGBE_DCB
9113 if (tc) {
9114 if (adapter->xdp_prog) {
9115 e_warn(probe, "DCB is not supported with XDP\n");
9116
9117 ixgbe_init_interrupt_scheme(adapter);
9118 if (netif_running(dev))
9119 ixgbe_open(dev);
9120 return -EINVAL;
9121 }
9122
9123 netdev_set_num_tc(dev, tc);
9124 ixgbe_set_prio_tc_map(adapter);
9125
9126 adapter->hw_tcs = tc;
9127 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9128
9129 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9130 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9131 adapter->hw.fc.requested_mode = ixgbe_fc_none;
9132 }
9133 } else {
9134 netdev_reset_tc(dev);
9135
9136 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9137 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9138
9139 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9140 adapter->hw_tcs = tc;
9141
9142 adapter->temp_dcb_cfg.pfc_mode_enable = false;
9143 adapter->dcb_cfg.pfc_mode_enable = false;
9144 }
9145
9146 ixgbe_validate_rtr(adapter, tc);
9147
9148#endif
9149 ixgbe_init_interrupt_scheme(adapter);
9150
9151 ixgbe_defrag_macvlan_pools(dev);
9152
9153 if (netif_running(dev))
9154 return ixgbe_open(dev);
9155
9156 return 0;
9157}
9158
9159static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9160 struct tc_cls_u32_offload *cls)
9161{
9162 u32 hdl = cls->knode.handle;
9163 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9164 u32 loc = cls->knode.handle & 0xfffff;
9165 int err = 0, i, j;
9166 struct ixgbe_jump_table *jump = NULL;
9167
9168 if (loc > IXGBE_MAX_HW_ENTRIES)
9169 return -EINVAL;
9170
9171 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9172 return -EINVAL;
9173
9174
9175 if (uhtid != 0x800) {
9176 jump = adapter->jump_tables[uhtid];
9177 if (!jump)
9178 return -EINVAL;
9179 if (!test_bit(loc - 1, jump->child_loc_map))
9180 return -EINVAL;
9181 clear_bit(loc - 1, jump->child_loc_map);
9182 }
9183
9184
9185 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9186 jump = adapter->jump_tables[i];
9187 if (jump && jump->link_hdl == hdl) {
9188
9189
9190
9191 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9192 if (!test_bit(j, jump->child_loc_map))
9193 continue;
9194 spin_lock(&adapter->fdir_perfect_lock);
9195 err = ixgbe_update_ethtool_fdir_entry(adapter,
9196 NULL,
9197 j + 1);
9198 spin_unlock(&adapter->fdir_perfect_lock);
9199 clear_bit(j, jump->child_loc_map);
9200 }
9201
9202 kfree(jump->input);
9203 kfree(jump->mask);
9204 kfree(jump);
9205 adapter->jump_tables[i] = NULL;
9206 return err;
9207 }
9208 }
9209
9210 spin_lock(&adapter->fdir_perfect_lock);
9211 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9212 spin_unlock(&adapter->fdir_perfect_lock);
9213 return err;
9214}
9215
9216static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9217 struct tc_cls_u32_offload *cls)
9218{
9219 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9220
9221 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9222 return -EINVAL;
9223
9224
9225
9226
9227 if (cls->hnode.divisor > 0)
9228 return -EINVAL;
9229
9230 set_bit(uhtid - 1, &adapter->tables);
9231 return 0;
9232}
9233
9234static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9235 struct tc_cls_u32_offload *cls)
9236{
9237 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9238
9239 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9240 return -EINVAL;
9241
9242 clear_bit(uhtid - 1, &adapter->tables);
9243 return 0;
9244}
9245
9246#ifdef CONFIG_NET_CLS_ACT
9247struct upper_walk_data {
9248 struct ixgbe_adapter *adapter;
9249 u64 action;
9250 int ifindex;
9251 u8 queue;
9252};
9253
9254static int get_macvlan_queue(struct net_device *upper,
9255 struct netdev_nested_priv *priv)
9256{
9257 if (netif_is_macvlan(upper)) {
9258 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9259 struct ixgbe_adapter *adapter;
9260 struct upper_walk_data *data;
9261 int ifindex;
9262
9263 data = (struct upper_walk_data *)priv->data;
9264 ifindex = data->ifindex;
9265 adapter = data->adapter;
9266 if (vadapter && upper->ifindex == ifindex) {
9267 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9268 data->action = data->queue;
9269 return 1;
9270 }
9271 }
9272
9273 return 0;
9274}
9275
9276static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9277 u8 *queue, u64 *action)
9278{
9279 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9280 unsigned int num_vfs = adapter->num_vfs, vf;
9281 struct netdev_nested_priv priv;
9282 struct upper_walk_data data;
9283 struct net_device *upper;
9284
9285
9286 for (vf = 0; vf < num_vfs; ++vf) {
9287 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9288 if (upper->ifindex == ifindex) {
9289 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9290 *action = vf + 1;
9291 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9292 return 0;
9293 }
9294 }
9295
9296
9297 data.adapter = adapter;
9298 data.ifindex = ifindex;
9299 data.action = 0;
9300 data.queue = 0;
9301 priv.data = (void *)&data;
9302 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9303 get_macvlan_queue, &priv)) {
9304 *action = data.action;
9305 *queue = data.queue;
9306
9307 return 0;
9308 }
9309
9310 return -EINVAL;
9311}
9312
9313static int parse_tc_actions(struct ixgbe_adapter *adapter,
9314 struct tcf_exts *exts, u64 *action, u8 *queue)
9315{
9316 const struct tc_action *a;
9317 int i;
9318
9319 if (!tcf_exts_has_actions(exts))
9320 return -EINVAL;
9321
9322 tcf_exts_for_each_action(i, a, exts) {
9323
9324 if (is_tcf_gact_shot(a)) {
9325 *action = IXGBE_FDIR_DROP_QUEUE;
9326 *queue = IXGBE_FDIR_DROP_QUEUE;
9327 return 0;
9328 }
9329
9330
9331 if (is_tcf_mirred_egress_redirect(a)) {
9332 struct net_device *dev = tcf_mirred_dev(a);
9333
9334 if (!dev)
9335 return -EINVAL;
9336 return handle_redirect_action(adapter, dev->ifindex,
9337 queue, action);
9338 }
9339
9340 return -EINVAL;
9341 }
9342
9343 return -EINVAL;
9344}
9345#else
9346static int parse_tc_actions(struct ixgbe_adapter *adapter,
9347 struct tcf_exts *exts, u64 *action, u8 *queue)
9348{
9349 return -EINVAL;
9350}
9351#endif
9352
9353static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9354 union ixgbe_atr_input *mask,
9355 struct tc_cls_u32_offload *cls,
9356 struct ixgbe_mat_field *field_ptr,
9357 struct ixgbe_nexthdr *nexthdr)
9358{
9359 int i, j, off;
9360 __be32 val, m;
9361 bool found_entry = false, found_jump_field = false;
9362
9363 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9364 off = cls->knode.sel->keys[i].off;
9365 val = cls->knode.sel->keys[i].val;
9366 m = cls->knode.sel->keys[i].mask;
9367
9368 for (j = 0; field_ptr[j].val; j++) {
9369 if (field_ptr[j].off == off) {
9370 field_ptr[j].val(input, mask, (__force u32)val,
9371 (__force u32)m);
9372 input->filter.formatted.flow_type |=
9373 field_ptr[j].type;
9374 found_entry = true;
9375 break;
9376 }
9377 }
9378 if (nexthdr) {
9379 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9380 nexthdr->val ==
9381 (__force u32)cls->knode.sel->keys[i].val &&
9382 nexthdr->mask ==
9383 (__force u32)cls->knode.sel->keys[i].mask)
9384 found_jump_field = true;
9385 else
9386 continue;
9387 }
9388 }
9389
9390 if (nexthdr && !found_jump_field)
9391 return -EINVAL;
9392
9393 if (!found_entry)
9394 return 0;
9395
9396 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9397 IXGBE_ATR_L4TYPE_MASK;
9398
9399 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9400 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9401
9402 return 0;
9403}
9404
9405static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9406 struct tc_cls_u32_offload *cls)
9407{
9408 __be16 protocol = cls->common.protocol;
9409 u32 loc = cls->knode.handle & 0xfffff;
9410 struct ixgbe_hw *hw = &adapter->hw;
9411 struct ixgbe_mat_field *field_ptr;
9412 struct ixgbe_fdir_filter *input = NULL;
9413 union ixgbe_atr_input *mask = NULL;
9414 struct ixgbe_jump_table *jump = NULL;
9415 int i, err = -EINVAL;
9416 u8 queue;
9417 u32 uhtid, link_uhtid;
9418
9419 uhtid = TC_U32_USERHTID(cls->knode.handle);
9420 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9421
9422
9423
9424
9425
9426
9427
9428
9429 if (protocol != htons(ETH_P_IP))
9430 return err;
9431
9432 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9433 e_err(drv, "Location out of range\n");
9434 return err;
9435 }
9436
9437
9438
9439
9440
9441
9442
9443
9444 if (uhtid == 0x800) {
9445 field_ptr = (adapter->jump_tables[0])->mat;
9446 } else {
9447 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9448 return err;
9449 if (!adapter->jump_tables[uhtid])
9450 return err;
9451 field_ptr = (adapter->jump_tables[uhtid])->mat;
9452 }
9453
9454 if (!field_ptr)
9455 return err;
9456
9457
9458
9459
9460
9461
9462
9463 if (link_uhtid) {
9464 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9465
9466 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9467 return err;
9468
9469 if (!test_bit(link_uhtid - 1, &adapter->tables))
9470 return err;
9471
9472
9473
9474
9475
9476
9477 if (adapter->jump_tables[link_uhtid] &&
9478 (adapter->jump_tables[link_uhtid])->link_hdl) {
9479 e_err(drv, "Link filter exists for link: %x\n",
9480 link_uhtid);
9481 return err;
9482 }
9483
9484 for (i = 0; nexthdr[i].jump; i++) {
9485 if (nexthdr[i].o != cls->knode.sel->offoff ||
9486 nexthdr[i].s != cls->knode.sel->offshift ||
9487 nexthdr[i].m !=
9488 (__force u32)cls->knode.sel->offmask)
9489 return err;
9490
9491 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9492 if (!jump)
9493 return -ENOMEM;
9494 input = kzalloc(sizeof(*input), GFP_KERNEL);
9495 if (!input) {
9496 err = -ENOMEM;
9497 goto free_jump;
9498 }
9499 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9500 if (!mask) {
9501 err = -ENOMEM;
9502 goto free_input;
9503 }
9504 jump->input = input;
9505 jump->mask = mask;
9506 jump->link_hdl = cls->knode.handle;
9507
9508 err = ixgbe_clsu32_build_input(input, mask, cls,
9509 field_ptr, &nexthdr[i]);
9510 if (!err) {
9511 jump->mat = nexthdr[i].jump;
9512 adapter->jump_tables[link_uhtid] = jump;
9513 break;
9514 } else {
9515 kfree(mask);
9516 kfree(input);
9517 kfree(jump);
9518 }
9519 }
9520 return 0;
9521 }
9522
9523 input = kzalloc(sizeof(*input), GFP_KERNEL);
9524 if (!input)
9525 return -ENOMEM;
9526 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9527 if (!mask) {
9528 err = -ENOMEM;
9529 goto free_input;
9530 }
9531
9532 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9533 if ((adapter->jump_tables[uhtid])->input)
9534 memcpy(input, (adapter->jump_tables[uhtid])->input,
9535 sizeof(*input));
9536 if ((adapter->jump_tables[uhtid])->mask)
9537 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9538 sizeof(*mask));
9539
9540
9541
9542
9543 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9544 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9545
9546 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9547 e_err(drv, "Filter exists in location: %x\n",
9548 loc);
9549 err = -EINVAL;
9550 goto err_out;
9551 }
9552 }
9553 }
9554 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9555 if (err)
9556 goto err_out;
9557
9558 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9559 &queue);
9560 if (err < 0)
9561 goto err_out;
9562
9563 input->sw_idx = loc;
9564
9565 spin_lock(&adapter->fdir_perfect_lock);
9566
9567 if (hlist_empty(&adapter->fdir_filter_list)) {
9568 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9569 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9570 if (err)
9571 goto err_out_w_lock;
9572 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9573 err = -EINVAL;
9574 goto err_out_w_lock;
9575 }
9576
9577 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9578 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9579 input->sw_idx, queue);
9580 if (err)
9581 goto err_out_w_lock;
9582
9583 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9584 spin_unlock(&adapter->fdir_perfect_lock);
9585
9586 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9587 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9588
9589 kfree(mask);
9590 return err;
9591err_out_w_lock:
9592 spin_unlock(&adapter->fdir_perfect_lock);
9593err_out:
9594 kfree(mask);
9595free_input:
9596 kfree(input);
9597free_jump:
9598 kfree(jump);
9599 return err;
9600}
9601
9602static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9603 struct tc_cls_u32_offload *cls_u32)
9604{
9605 switch (cls_u32->command) {
9606 case TC_CLSU32_NEW_KNODE:
9607 case TC_CLSU32_REPLACE_KNODE:
9608 return ixgbe_configure_clsu32(adapter, cls_u32);
9609 case TC_CLSU32_DELETE_KNODE:
9610 return ixgbe_delete_clsu32(adapter, cls_u32);
9611 case TC_CLSU32_NEW_HNODE:
9612 case TC_CLSU32_REPLACE_HNODE:
9613 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9614 case TC_CLSU32_DELETE_HNODE:
9615 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9616 default:
9617 return -EOPNOTSUPP;
9618 }
9619}
9620
9621static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9622 void *cb_priv)
9623{
9624 struct ixgbe_adapter *adapter = cb_priv;
9625
9626 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9627 return -EOPNOTSUPP;
9628
9629 switch (type) {
9630 case TC_SETUP_CLSU32:
9631 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9632 default:
9633 return -EOPNOTSUPP;
9634 }
9635}
9636
9637static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9638 struct tc_mqprio_qopt *mqprio)
9639{
9640 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9641 return ixgbe_setup_tc(dev, mqprio->num_tc);
9642}
9643
9644static LIST_HEAD(ixgbe_block_cb_list);
9645
9646static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9647 void *type_data)
9648{
9649 struct ixgbe_adapter *adapter = netdev_priv(dev);
9650
9651 switch (type) {
9652 case TC_SETUP_BLOCK:
9653 return flow_block_cb_setup_simple(type_data,
9654 &ixgbe_block_cb_list,
9655 ixgbe_setup_tc_block_cb,
9656 adapter, adapter, true);
9657 case TC_SETUP_QDISC_MQPRIO:
9658 return ixgbe_setup_tc_mqprio(dev, type_data);
9659 default:
9660 return -EOPNOTSUPP;
9661 }
9662}
9663
9664#ifdef CONFIG_PCI_IOV
9665void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9666{
9667 struct net_device *netdev = adapter->netdev;
9668
9669 rtnl_lock();
9670 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9671 rtnl_unlock();
9672}
9673
9674#endif
9675void ixgbe_do_reset(struct net_device *netdev)
9676{
9677 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9678
9679 if (netif_running(netdev))
9680 ixgbe_reinit_locked(adapter);
9681 else
9682 ixgbe_reset(adapter);
9683}
9684
9685static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9686 netdev_features_t features)
9687{
9688 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9689
9690
9691 if (!(features & NETIF_F_RXCSUM))
9692 features &= ~NETIF_F_LRO;
9693
9694
9695 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9696 features &= ~NETIF_F_LRO;
9697
9698 if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9699 e_dev_err("LRO is not supported with XDP\n");
9700 features &= ~NETIF_F_LRO;
9701 }
9702
9703 return features;
9704}
9705
9706static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9707{
9708 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9709 num_online_cpus());
9710
9711
9712 if (!adapter->ring_feature[RING_F_VMDQ].offset)
9713 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9714 IXGBE_FLAG_SRIOV_ENABLED);
9715
9716 adapter->ring_feature[RING_F_RSS].limit = rss;
9717 adapter->ring_feature[RING_F_VMDQ].limit = 1;
9718
9719 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9720}
9721
9722static int ixgbe_set_features(struct net_device *netdev,
9723 netdev_features_t features)
9724{
9725 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9726 netdev_features_t changed = netdev->features ^ features;
9727 bool need_reset = false;
9728
9729
9730 if (!(features & NETIF_F_LRO)) {
9731 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9732 need_reset = true;
9733 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9734 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9735 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9736 if (adapter->rx_itr_setting == 1 ||
9737 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9738 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9739 need_reset = true;
9740 } else if ((changed ^ features) & NETIF_F_LRO) {
9741 e_info(probe, "rx-usecs set too low, "
9742 "disabling RSC\n");
9743 }
9744 }
9745
9746
9747
9748
9749
9750 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9751
9752 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9753 need_reset = true;
9754
9755 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9756 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9757 } else {
9758
9759 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9760 need_reset = true;
9761
9762 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9763
9764
9765 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9766
9767 (adapter->hw_tcs > 1) ||
9768
9769 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9770
9771 (!adapter->atr_sample_rate))
9772 ;
9773 else
9774 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9775 }
9776
9777 if (changed & NETIF_F_RXALL)
9778 need_reset = true;
9779
9780 netdev->features = features;
9781
9782 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9783 ixgbe_reset_l2fw_offload(adapter);
9784 else if (need_reset)
9785 ixgbe_do_reset(netdev);
9786 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9787 NETIF_F_HW_VLAN_CTAG_FILTER))
9788 ixgbe_set_rx_mode(netdev);
9789
9790 return 1;
9791}
9792
9793static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9794 struct net_device *dev,
9795 const unsigned char *addr, u16 vid,
9796 u16 flags,
9797 struct netlink_ext_ack *extack)
9798{
9799
9800 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9801 struct ixgbe_adapter *adapter = netdev_priv(dev);
9802 u16 pool = VMDQ_P(0);
9803
9804 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9805 return -ENOMEM;
9806 }
9807
9808 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9809}
9810
9811
9812
9813
9814
9815
9816
9817
9818static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9819 __u16 mode)
9820{
9821 struct ixgbe_hw *hw = &adapter->hw;
9822 unsigned int p, num_pools;
9823 u32 vmdctl;
9824
9825 switch (mode) {
9826 case BRIDGE_MODE_VEPA:
9827
9828 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9829
9830
9831
9832
9833
9834 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9835 vmdctl |= IXGBE_VT_CTL_REPLEN;
9836 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9837
9838
9839
9840
9841 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9842 for (p = 0; p < num_pools; p++) {
9843 if (hw->mac.ops.set_source_address_pruning)
9844 hw->mac.ops.set_source_address_pruning(hw,
9845 true,
9846 p);
9847 }
9848 break;
9849 case BRIDGE_MODE_VEB:
9850
9851 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9852 IXGBE_PFDTXGSWC_VT_LBEN);
9853
9854
9855
9856
9857 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9858 if (!adapter->num_vfs)
9859 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9860 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9861
9862
9863
9864
9865 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9866 for (p = 0; p < num_pools; p++) {
9867 if (hw->mac.ops.set_source_address_pruning)
9868 hw->mac.ops.set_source_address_pruning(hw,
9869 false,
9870 p);
9871 }
9872 break;
9873 default:
9874 return -EINVAL;
9875 }
9876
9877 adapter->bridge_mode = mode;
9878
9879 e_info(drv, "enabling bridge mode: %s\n",
9880 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9881
9882 return 0;
9883}
9884
9885static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9886 struct nlmsghdr *nlh, u16 flags,
9887 struct netlink_ext_ack *extack)
9888{
9889 struct ixgbe_adapter *adapter = netdev_priv(dev);
9890 struct nlattr *attr, *br_spec;
9891 int rem;
9892
9893 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9894 return -EOPNOTSUPP;
9895
9896 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9897 if (!br_spec)
9898 return -EINVAL;
9899
9900 nla_for_each_nested(attr, br_spec, rem) {
9901 int status;
9902 __u16 mode;
9903
9904 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9905 continue;
9906
9907 if (nla_len(attr) < sizeof(mode))
9908 return -EINVAL;
9909
9910 mode = nla_get_u16(attr);
9911 status = ixgbe_configure_bridge_mode(adapter, mode);
9912 if (status)
9913 return status;
9914
9915 break;
9916 }
9917
9918 return 0;
9919}
9920
9921static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9922 struct net_device *dev,
9923 u32 filter_mask, int nlflags)
9924{
9925 struct ixgbe_adapter *adapter = netdev_priv(dev);
9926
9927 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9928 return 0;
9929
9930 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
9931 adapter->bridge_mode, 0, 0, nlflags,
9932 filter_mask, NULL);
9933}
9934
9935static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9936{
9937 struct ixgbe_adapter *adapter = netdev_priv(pdev);
9938 struct ixgbe_fwd_adapter *accel;
9939 int tcs = adapter->hw_tcs ? : 1;
9940 int pool, err;
9941
9942 if (adapter->xdp_prog) {
9943 e_warn(probe, "L2FW offload is not supported with XDP\n");
9944 return ERR_PTR(-EINVAL);
9945 }
9946
9947
9948
9949
9950
9951 if (!macvlan_supports_dest_filter(vdev))
9952 return ERR_PTR(-EMEDIUMTYPE);
9953
9954
9955
9956
9957
9958 if (netif_is_multiqueue(vdev))
9959 return ERR_PTR(-ERANGE);
9960
9961 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9962 if (pool == adapter->num_rx_pools) {
9963 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
9964 u16 reserved_pools;
9965
9966 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9967 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
9968 adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
9969 return ERR_PTR(-EBUSY);
9970
9971
9972
9973
9974
9975 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9976 return ERR_PTR(-EBUSY);
9977
9978
9979 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
9980 IXGBE_FLAG_SRIOV_ENABLED;
9981
9982
9983
9984
9985
9986 if (used_pools < 32 && adapter->num_rx_pools < 16)
9987 reserved_pools = min_t(u16,
9988 32 - used_pools,
9989 16 - adapter->num_rx_pools);
9990 else if (adapter->num_rx_pools < 32)
9991 reserved_pools = min_t(u16,
9992 64 - used_pools,
9993 32 - adapter->num_rx_pools);
9994 else
9995 reserved_pools = 64 - used_pools;
9996
9997
9998 if (!reserved_pools)
9999 return ERR_PTR(-EBUSY);
10000
10001 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10002
10003
10004 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10005 if (err)
10006 return ERR_PTR(err);
10007
10008 if (pool >= adapter->num_rx_pools)
10009 return ERR_PTR(-ENOMEM);
10010 }
10011
10012 accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10013 if (!accel)
10014 return ERR_PTR(-ENOMEM);
10015
10016 set_bit(pool, adapter->fwd_bitmask);
10017 netdev_set_sb_channel(vdev, pool);
10018 accel->pool = pool;
10019 accel->netdev = vdev;
10020
10021 if (!netif_running(pdev))
10022 return accel;
10023
10024 err = ixgbe_fwd_ring_up(adapter, accel);
10025 if (err)
10026 return ERR_PTR(err);
10027
10028 return accel;
10029}
10030
10031static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10032{
10033 struct ixgbe_fwd_adapter *accel = priv;
10034 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10035 unsigned int rxbase = accel->rx_base_queue;
10036 unsigned int i;
10037
10038
10039 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10040 VMDQ_P(accel->pool));
10041
10042
10043
10044
10045 usleep_range(10000, 20000);
10046
10047 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10048 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10049 struct ixgbe_q_vector *qv = ring->q_vector;
10050
10051
10052
10053
10054 if (netif_running(adapter->netdev))
10055 napi_synchronize(&qv->napi);
10056 ring->netdev = NULL;
10057 }
10058
10059
10060 netdev_unbind_sb_channel(pdev, accel->netdev);
10061 netdev_set_sb_channel(accel->netdev, 0);
10062
10063 clear_bit(accel->pool, adapter->fwd_bitmask);
10064 kfree(accel);
10065}
10066
10067#define IXGBE_MAX_MAC_HDR_LEN 127
10068#define IXGBE_MAX_NETWORK_HDR_LEN 511
10069
10070static netdev_features_t
10071ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10072 netdev_features_t features)
10073{
10074 unsigned int network_hdr_len, mac_hdr_len;
10075
10076
10077 mac_hdr_len = skb_network_header(skb) - skb->data;
10078 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10079 return features & ~(NETIF_F_HW_CSUM |
10080 NETIF_F_SCTP_CRC |
10081 NETIF_F_GSO_UDP_L4 |
10082 NETIF_F_HW_VLAN_CTAG_TX |
10083 NETIF_F_TSO |
10084 NETIF_F_TSO6);
10085
10086 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10087 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
10088 return features & ~(NETIF_F_HW_CSUM |
10089 NETIF_F_SCTP_CRC |
10090 NETIF_F_GSO_UDP_L4 |
10091 NETIF_F_TSO |
10092 NETIF_F_TSO6);
10093
10094
10095
10096
10097
10098
10099 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10100#ifdef CONFIG_IXGBE_IPSEC
10101 if (!secpath_exists(skb))
10102#endif
10103 features &= ~NETIF_F_TSO;
10104 }
10105
10106 return features;
10107}
10108
10109static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10110{
10111 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10112 struct ixgbe_adapter *adapter = netdev_priv(dev);
10113 struct bpf_prog *old_prog;
10114 bool need_reset;
10115 int num_queues;
10116
10117 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10118 return -EINVAL;
10119
10120 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10121 return -EINVAL;
10122
10123
10124 for (i = 0; i < adapter->num_rx_queues; i++) {
10125 struct ixgbe_ring *ring = adapter->rx_ring[i];
10126
10127 if (ring_is_rsc_enabled(ring))
10128 return -EINVAL;
10129
10130 if (frame_size > ixgbe_rx_bufsz(ring))
10131 return -EINVAL;
10132 }
10133
10134 if (nr_cpu_ids > MAX_XDP_QUEUES)
10135 return -ENOMEM;
10136
10137 old_prog = xchg(&adapter->xdp_prog, prog);
10138 need_reset = (!!prog != !!old_prog);
10139
10140
10141 if (need_reset) {
10142 int err;
10143
10144 if (!prog)
10145
10146 synchronize_rcu();
10147 err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10148
10149 if (err) {
10150 rcu_assign_pointer(adapter->xdp_prog, old_prog);
10151 return -EINVAL;
10152 }
10153 } else {
10154 for (i = 0; i < adapter->num_rx_queues; i++)
10155 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
10156 adapter->xdp_prog);
10157 }
10158
10159 if (old_prog)
10160 bpf_prog_put(old_prog);
10161
10162
10163
10164
10165 if (need_reset && prog) {
10166 num_queues = min_t(int, adapter->num_rx_queues,
10167 adapter->num_xdp_queues);
10168 for (i = 0; i < num_queues; i++)
10169 if (adapter->xdp_ring[i]->xsk_pool)
10170 (void)ixgbe_xsk_wakeup(adapter->netdev, i,
10171 XDP_WAKEUP_RX);
10172 }
10173
10174 return 0;
10175}
10176
10177static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10178{
10179 struct ixgbe_adapter *adapter = netdev_priv(dev);
10180
10181 switch (xdp->command) {
10182 case XDP_SETUP_PROG:
10183 return ixgbe_xdp_setup(dev, xdp->prog);
10184 case XDP_SETUP_XSK_POOL:
10185 return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
10186 xdp->xsk.queue_id);
10187
10188 default:
10189 return -EINVAL;
10190 }
10191}
10192
10193void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10194{
10195
10196
10197
10198 wmb();
10199 writel(ring->next_to_use, ring->tail);
10200}
10201
10202static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10203 struct xdp_frame **frames, u32 flags)
10204{
10205 struct ixgbe_adapter *adapter = netdev_priv(dev);
10206 struct ixgbe_ring *ring;
10207 int nxmit = 0;
10208 int i;
10209
10210 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10211 return -ENETDOWN;
10212
10213 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10214 return -EINVAL;
10215
10216
10217
10218
10219 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10220 if (unlikely(!ring))
10221 return -ENXIO;
10222
10223 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10224 return -ENXIO;
10225
10226 for (i = 0; i < n; i++) {
10227 struct xdp_frame *xdpf = frames[i];
10228 int err;
10229
10230 err = ixgbe_xmit_xdp_ring(adapter, xdpf);
10231 if (err != IXGBE_XDP_TX)
10232 break;
10233 nxmit++;
10234 }
10235
10236 if (unlikely(flags & XDP_XMIT_FLUSH))
10237 ixgbe_xdp_ring_update_tail(ring);
10238
10239 return nxmit;
10240}
10241
10242static const struct net_device_ops ixgbe_netdev_ops = {
10243 .ndo_open = ixgbe_open,
10244 .ndo_stop = ixgbe_close,
10245 .ndo_start_xmit = ixgbe_xmit_frame,
10246 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10247 .ndo_validate_addr = eth_validate_addr,
10248 .ndo_set_mac_address = ixgbe_set_mac,
10249 .ndo_change_mtu = ixgbe_change_mtu,
10250 .ndo_tx_timeout = ixgbe_tx_timeout,
10251 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10252 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10253 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10254 .ndo_eth_ioctl = ixgbe_ioctl,
10255 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10256 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10257 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10258 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10259 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10260 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10261 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10262 .ndo_get_stats64 = ixgbe_get_stats64,
10263 .ndo_setup_tc = __ixgbe_setup_tc,
10264#ifdef IXGBE_FCOE
10265 .ndo_select_queue = ixgbe_select_queue,
10266 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10267 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10268 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10269 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10270 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10271 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10272 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10273#endif
10274 .ndo_set_features = ixgbe_set_features,
10275 .ndo_fix_features = ixgbe_fix_features,
10276 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10277 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10278 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10279 .ndo_dfwd_add_station = ixgbe_fwd_add,
10280 .ndo_dfwd_del_station = ixgbe_fwd_del,
10281 .ndo_features_check = ixgbe_features_check,
10282 .ndo_bpf = ixgbe_xdp,
10283 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10284 .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
10285};
10286
10287static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10288 struct ixgbe_ring *tx_ring)
10289{
10290 unsigned long wait_delay, delay_interval;
10291 struct ixgbe_hw *hw = &adapter->hw;
10292 u8 reg_idx = tx_ring->reg_idx;
10293 int wait_loop;
10294 u32 txdctl;
10295
10296 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10297
10298
10299 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10300
10301 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10302 wait_delay = delay_interval;
10303
10304 while (wait_loop--) {
10305 usleep_range(wait_delay, wait_delay + 10);
10306 wait_delay += delay_interval * 2;
10307 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10308
10309 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10310 return;
10311 }
10312
10313 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10314}
10315
10316static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10317 struct ixgbe_ring *tx_ring)
10318{
10319 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10320 ixgbe_disable_txr_hw(adapter, tx_ring);
10321}
10322
10323static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10324 struct ixgbe_ring *rx_ring)
10325{
10326 unsigned long wait_delay, delay_interval;
10327 struct ixgbe_hw *hw = &adapter->hw;
10328 u8 reg_idx = rx_ring->reg_idx;
10329 int wait_loop;
10330 u32 rxdctl;
10331
10332 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10333 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10334 rxdctl |= IXGBE_RXDCTL_SWFLSH;
10335
10336
10337 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10338
10339
10340 if (hw->mac.type == ixgbe_mac_82598EB &&
10341 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10342 return;
10343
10344
10345 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10346
10347 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10348 wait_delay = delay_interval;
10349
10350 while (wait_loop--) {
10351 usleep_range(wait_delay, wait_delay + 10);
10352 wait_delay += delay_interval * 2;
10353 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10354
10355 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10356 return;
10357 }
10358
10359 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10360}
10361
10362static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10363{
10364 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10365 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10366}
10367
10368static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10369{
10370 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10371 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10372}
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10383{
10384 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10385
10386 rx_ring = adapter->rx_ring[ring];
10387 tx_ring = adapter->tx_ring[ring];
10388 xdp_ring = adapter->xdp_ring[ring];
10389
10390 ixgbe_disable_txr(adapter, tx_ring);
10391 if (xdp_ring)
10392 ixgbe_disable_txr(adapter, xdp_ring);
10393 ixgbe_disable_rxr_hw(adapter, rx_ring);
10394
10395 if (xdp_ring)
10396 synchronize_rcu();
10397
10398
10399 napi_disable(&rx_ring->q_vector->napi);
10400
10401 ixgbe_clean_tx_ring(tx_ring);
10402 if (xdp_ring)
10403 ixgbe_clean_tx_ring(xdp_ring);
10404 ixgbe_clean_rx_ring(rx_ring);
10405
10406 ixgbe_reset_txr_stats(tx_ring);
10407 if (xdp_ring)
10408 ixgbe_reset_txr_stats(xdp_ring);
10409 ixgbe_reset_rxr_stats(rx_ring);
10410}
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10421{
10422 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10423
10424 rx_ring = adapter->rx_ring[ring];
10425 tx_ring = adapter->tx_ring[ring];
10426 xdp_ring = adapter->xdp_ring[ring];
10427
10428
10429 napi_enable(&rx_ring->q_vector->napi);
10430
10431 ixgbe_configure_tx_ring(adapter, tx_ring);
10432 if (xdp_ring)
10433 ixgbe_configure_tx_ring(adapter, xdp_ring);
10434 ixgbe_configure_rx_ring(adapter, rx_ring);
10435
10436 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10437 if (xdp_ring)
10438 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10439}
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10451{
10452 struct pci_dev *entry, *pdev = adapter->pdev;
10453 int physfns = 0;
10454
10455
10456
10457
10458
10459 if (ixgbe_pcie_from_parent(&adapter->hw))
10460 physfns = 4;
10461
10462 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10463
10464 if (entry->is_virtfn)
10465 continue;
10466
10467
10468
10469
10470
10471
10472
10473 if ((entry->vendor != pdev->vendor) ||
10474 (entry->device != pdev->device))
10475 return -1;
10476
10477 physfns++;
10478 }
10479
10480 return physfns;
10481}
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10494 u16 subdevice_id)
10495{
10496 struct ixgbe_hw *hw = &adapter->hw;
10497 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10498
10499
10500 if (hw->mac.type == ixgbe_mac_82598EB)
10501 return false;
10502
10503
10504 if (hw->mac.type >= ixgbe_mac_X540) {
10505 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10506 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10507 (hw->bus.func == 0)))
10508 return true;
10509 }
10510
10511
10512 switch (device_id) {
10513 case IXGBE_DEV_ID_82599_SFP:
10514
10515 switch (subdevice_id) {
10516 case IXGBE_SUBDEV_ID_82599_560FLR:
10517 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10518 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10519 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10520
10521 if (hw->bus.func != 0)
10522 break;
10523 fallthrough;
10524 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10525 case IXGBE_SUBDEV_ID_82599_SFP:
10526 case IXGBE_SUBDEV_ID_82599_RNDC:
10527 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10528 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10529 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10530 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10531 return true;
10532 }
10533 break;
10534 case IXGBE_DEV_ID_82599EN_SFP:
10535
10536 switch (subdevice_id) {
10537 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10538 return true;
10539 }
10540 break;
10541 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10542
10543 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10544 return true;
10545 break;
10546 case IXGBE_DEV_ID_82599_KX4:
10547 return true;
10548 default:
10549 break;
10550 }
10551
10552 return false;
10553}
10554
10555
10556
10557
10558
10559
10560
10561
10562static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10563{
10564 struct ixgbe_hw *hw = &adapter->hw;
10565 struct ixgbe_nvm_version nvm_ver;
10566
10567 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10568 if (nvm_ver.oem_valid) {
10569 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10570 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10571 nvm_ver.oem_release);
10572 return;
10573 }
10574
10575 ixgbe_get_etk_id(hw, &nvm_ver);
10576 ixgbe_get_orom_version(hw, &nvm_ver);
10577
10578 if (nvm_ver.or_valid) {
10579 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10580 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10581 nvm_ver.or_build, nvm_ver.or_patch);
10582 return;
10583 }
10584
10585
10586 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10587 "0x%08x", nvm_ver.etk_id);
10588}
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10602{
10603 struct net_device *netdev;
10604 struct ixgbe_adapter *adapter = NULL;
10605 struct ixgbe_hw *hw;
10606 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10607 int i, err, pci_using_dac, expected_gts;
10608 unsigned int indices = MAX_TX_QUEUES;
10609 u8 part_str[IXGBE_PBANUM_LENGTH];
10610 bool disable_dev = false;
10611#ifdef IXGBE_FCOE
10612 u16 device_caps;
10613#endif
10614 u32 eec;
10615
10616
10617
10618
10619 if (pdev->is_virtfn) {
10620 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10621 pci_name(pdev), pdev->vendor, pdev->device);
10622 return -EINVAL;
10623 }
10624
10625 err = pci_enable_device_mem(pdev);
10626 if (err)
10627 return err;
10628
10629 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10630 pci_using_dac = 1;
10631 } else {
10632 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10633 if (err) {
10634 dev_err(&pdev->dev,
10635 "No usable DMA configuration, aborting\n");
10636 goto err_dma;
10637 }
10638 pci_using_dac = 0;
10639 }
10640
10641 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10642 if (err) {
10643 dev_err(&pdev->dev,
10644 "pci_request_selected_regions failed 0x%x\n", err);
10645 goto err_pci_reg;
10646 }
10647
10648 pci_enable_pcie_error_reporting(pdev);
10649
10650 pci_set_master(pdev);
10651 pci_save_state(pdev);
10652
10653 if (ii->mac == ixgbe_mac_82598EB) {
10654#ifdef CONFIG_IXGBE_DCB
10655
10656 indices = 4 * MAX_TRAFFIC_CLASS;
10657#else
10658 indices = IXGBE_MAX_RSS_INDICES;
10659#endif
10660 }
10661
10662 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10663 if (!netdev) {
10664 err = -ENOMEM;
10665 goto err_alloc_etherdev;
10666 }
10667
10668 SET_NETDEV_DEV(netdev, &pdev->dev);
10669
10670 adapter = netdev_priv(netdev);
10671
10672 adapter->netdev = netdev;
10673 adapter->pdev = pdev;
10674 hw = &adapter->hw;
10675 hw->back = adapter;
10676 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10677
10678 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10679 pci_resource_len(pdev, 0));
10680 adapter->io_addr = hw->hw_addr;
10681 if (!hw->hw_addr) {
10682 err = -EIO;
10683 goto err_ioremap;
10684 }
10685
10686 netdev->netdev_ops = &ixgbe_netdev_ops;
10687 ixgbe_set_ethtool_ops(netdev);
10688 netdev->watchdog_timeo = 5 * HZ;
10689 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10690
10691
10692 hw->mac.ops = *ii->mac_ops;
10693 hw->mac.type = ii->mac;
10694 hw->mvals = ii->mvals;
10695 if (ii->link_ops)
10696 hw->link.ops = *ii->link_ops;
10697
10698
10699 hw->eeprom.ops = *ii->eeprom_ops;
10700 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10701 if (ixgbe_removed(hw->hw_addr)) {
10702 err = -EIO;
10703 goto err_ioremap;
10704 }
10705
10706 if (!(eec & BIT(8)))
10707 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10708
10709
10710 hw->phy.ops = *ii->phy_ops;
10711 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10712
10713 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10714 hw->phy.mdio.mmds = 0;
10715 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10716 hw->phy.mdio.dev = netdev;
10717 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10718 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10719
10720
10721 err = ixgbe_sw_init(adapter, ii);
10722 if (err)
10723 goto err_sw_init;
10724
10725 switch (adapter->hw.mac.type) {
10726 case ixgbe_mac_X550:
10727 case ixgbe_mac_X550EM_x:
10728 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
10729 break;
10730 case ixgbe_mac_x550em_a:
10731 netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
10732 break;
10733 default:
10734 break;
10735 }
10736
10737
10738 if (hw->mac.ops.init_swfw_sync)
10739 hw->mac.ops.init_swfw_sync(hw);
10740
10741
10742 switch (adapter->hw.mac.type) {
10743 case ixgbe_mac_82599EB:
10744 case ixgbe_mac_X540:
10745 case ixgbe_mac_X550:
10746 case ixgbe_mac_X550EM_x:
10747 case ixgbe_mac_x550em_a:
10748 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10749 break;
10750 default:
10751 break;
10752 }
10753
10754
10755
10756
10757
10758 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10759 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10760 if (esdp & IXGBE_ESDP_SDP1)
10761 e_crit(probe, "Fan has stopped, replace the adapter\n");
10762 }
10763
10764 if (allow_unsupported_sfp)
10765 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10766
10767
10768 hw->phy.reset_if_overtemp = true;
10769 err = hw->mac.ops.reset_hw(hw);
10770 hw->phy.reset_if_overtemp = false;
10771 ixgbe_set_eee_capable(adapter);
10772 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10773 err = 0;
10774 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10775 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10776 e_dev_err("Reload the driver after installing a supported module.\n");
10777 goto err_sw_init;
10778 } else if (err) {
10779 e_dev_err("HW Init failed: %d\n", err);
10780 goto err_sw_init;
10781 }
10782
10783#ifdef CONFIG_PCI_IOV
10784
10785 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10786 goto skip_sriov;
10787
10788 ixgbe_init_mbx_params_pf(hw);
10789 hw->mbx.ops = ii->mbx_ops;
10790 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10791 ixgbe_enable_sriov(adapter, max_vfs);
10792skip_sriov:
10793
10794#endif
10795 netdev->features = NETIF_F_SG |
10796 NETIF_F_TSO |
10797 NETIF_F_TSO6 |
10798 NETIF_F_RXHASH |
10799 NETIF_F_RXCSUM |
10800 NETIF_F_HW_CSUM;
10801
10802#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10803 NETIF_F_GSO_GRE_CSUM | \
10804 NETIF_F_GSO_IPXIP4 | \
10805 NETIF_F_GSO_IPXIP6 | \
10806 NETIF_F_GSO_UDP_TUNNEL | \
10807 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10808
10809 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10810 netdev->features |= NETIF_F_GSO_PARTIAL |
10811 IXGBE_GSO_PARTIAL_FEATURES;
10812
10813 if (hw->mac.type >= ixgbe_mac_82599EB)
10814 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
10815
10816#ifdef CONFIG_IXGBE_IPSEC
10817#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10818 NETIF_F_HW_ESP_TX_CSUM | \
10819 NETIF_F_GSO_ESP)
10820
10821 if (adapter->ipsec)
10822 netdev->features |= IXGBE_ESP_FEATURES;
10823#endif
10824
10825 netdev->hw_features |= netdev->features |
10826 NETIF_F_HW_VLAN_CTAG_FILTER |
10827 NETIF_F_HW_VLAN_CTAG_RX |
10828 NETIF_F_HW_VLAN_CTAG_TX |
10829 NETIF_F_RXALL |
10830 NETIF_F_HW_L2FW_DOFFLOAD;
10831
10832 if (hw->mac.type >= ixgbe_mac_82599EB)
10833 netdev->hw_features |= NETIF_F_NTUPLE |
10834 NETIF_F_HW_TC;
10835
10836 if (pci_using_dac)
10837 netdev->features |= NETIF_F_HIGHDMA;
10838
10839 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10840 netdev->hw_enc_features |= netdev->vlan_features;
10841 netdev->mpls_features |= NETIF_F_SG |
10842 NETIF_F_TSO |
10843 NETIF_F_TSO6 |
10844 NETIF_F_HW_CSUM;
10845 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10846
10847
10848 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10849 NETIF_F_HW_VLAN_CTAG_RX |
10850 NETIF_F_HW_VLAN_CTAG_TX;
10851
10852 netdev->priv_flags |= IFF_UNICAST_FLT;
10853 netdev->priv_flags |= IFF_SUPP_NOFCS;
10854
10855
10856 netdev->min_mtu = ETH_MIN_MTU;
10857 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10858
10859#ifdef CONFIG_IXGBE_DCB
10860 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10861 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10862#endif
10863
10864#ifdef IXGBE_FCOE
10865 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10866 unsigned int fcoe_l;
10867
10868 if (hw->mac.ops.get_device_caps) {
10869 hw->mac.ops.get_device_caps(hw, &device_caps);
10870 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10871 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10872 }
10873
10874
10875 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10876 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10877
10878 netdev->features |= NETIF_F_FSO |
10879 NETIF_F_FCOE_CRC;
10880
10881 netdev->vlan_features |= NETIF_F_FSO |
10882 NETIF_F_FCOE_CRC |
10883 NETIF_F_FCOE_MTU;
10884 }
10885#endif
10886 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10887 netdev->hw_features |= NETIF_F_LRO;
10888 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10889 netdev->features |= NETIF_F_LRO;
10890
10891 if (ixgbe_check_fw_error(adapter)) {
10892 err = -EIO;
10893 goto err_sw_init;
10894 }
10895
10896
10897 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10898 e_dev_err("The EEPROM Checksum Is Not Valid\n");
10899 err = -EIO;
10900 goto err_sw_init;
10901 }
10902
10903 eth_platform_get_mac_address(&adapter->pdev->dev,
10904 adapter->hw.mac.perm_addr);
10905
10906 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
10907
10908 if (!is_valid_ether_addr(netdev->dev_addr)) {
10909 e_dev_err("invalid MAC address\n");
10910 err = -EIO;
10911 goto err_sw_init;
10912 }
10913
10914
10915 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
10916 ixgbe_mac_set_default_filter(adapter);
10917
10918 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
10919
10920 if (ixgbe_removed(hw->hw_addr)) {
10921 err = -EIO;
10922 goto err_sw_init;
10923 }
10924 INIT_WORK(&adapter->service_task, ixgbe_service_task);
10925 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
10926 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
10927
10928 err = ixgbe_init_interrupt_scheme(adapter);
10929 if (err)
10930 goto err_sw_init;
10931
10932 for (i = 0; i < adapter->num_rx_queues; i++)
10933 u64_stats_init(&adapter->rx_ring[i]->syncp);
10934 for (i = 0; i < adapter->num_tx_queues; i++)
10935 u64_stats_init(&adapter->tx_ring[i]->syncp);
10936 for (i = 0; i < adapter->num_xdp_queues; i++)
10937 u64_stats_init(&adapter->xdp_ring[i]->syncp);
10938
10939
10940 adapter->wol = 0;
10941 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
10942 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
10943 pdev->subsystem_device);
10944 if (hw->wol_enabled)
10945 adapter->wol = IXGBE_WUFC_MAG;
10946
10947 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
10948
10949
10950 ixgbe_set_fw_version(adapter);
10951
10952
10953 if (ixgbe_pcie_from_parent(hw))
10954 ixgbe_get_parent_bus_info(adapter);
10955 else
10956 hw->mac.ops.get_bus_info(hw);
10957
10958
10959
10960
10961
10962
10963 switch (hw->mac.type) {
10964 case ixgbe_mac_82598EB:
10965 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
10966 break;
10967 default:
10968 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
10969 break;
10970 }
10971
10972
10973 if (expected_gts > 0)
10974 ixgbe_check_minimum_link(adapter, expected_gts);
10975
10976 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
10977 if (err)
10978 strlcpy(part_str, "Unknown", sizeof(part_str));
10979 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
10980 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
10981 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
10982 part_str);
10983 else
10984 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
10985 hw->mac.type, hw->phy.type, part_str);
10986
10987 e_dev_info("%pM\n", netdev->dev_addr);
10988
10989
10990 err = hw->mac.ops.start_hw(hw);
10991 if (err == IXGBE_ERR_EEPROM_VERSION) {
10992
10993 e_dev_warn("This device is a pre-production adapter/LOM. "
10994 "Please be aware there may be issues associated "
10995 "with your hardware. If you are experiencing "
10996 "problems please contact your Intel or hardware "
10997 "representative who provided you with this "
10998 "hardware.\n");
10999 }
11000 strcpy(netdev->name, "eth%d");
11001 pci_set_drvdata(pdev, adapter);
11002 err = register_netdev(netdev);
11003 if (err)
11004 goto err_register;
11005
11006
11007
11008 if (hw->mac.ops.disable_tx_laser)
11009 hw->mac.ops.disable_tx_laser(hw);
11010
11011
11012 netif_carrier_off(netdev);
11013
11014#ifdef CONFIG_IXGBE_DCA
11015 if (dca_add_requester(&pdev->dev) == 0) {
11016 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11017 ixgbe_setup_dca(adapter);
11018 }
11019#endif
11020 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11021 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11022 for (i = 0; i < adapter->num_vfs; i++)
11023 ixgbe_vf_configuration(pdev, (i | 0x10000000));
11024 }
11025
11026
11027
11028
11029 if (hw->mac.ops.set_fw_drv_ver)
11030 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11031 sizeof(UTS_RELEASE) - 1,
11032 UTS_RELEASE);
11033
11034
11035 ixgbe_add_sanmac_netdev(netdev);
11036
11037 e_dev_info("%s\n", ixgbe_default_device_descr);
11038
11039#ifdef CONFIG_IXGBE_HWMON
11040 if (ixgbe_sysfs_init(adapter))
11041 e_err(probe, "failed to allocate sysfs resources\n");
11042#endif
11043
11044 ixgbe_dbg_adapter_init(adapter);
11045
11046
11047 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11048 hw->mac.ops.setup_link(hw,
11049 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11050 true);
11051
11052 err = ixgbe_mii_bus_init(hw);
11053 if (err)
11054 goto err_netdev;
11055
11056 return 0;
11057
11058err_netdev:
11059 unregister_netdev(netdev);
11060err_register:
11061 ixgbe_release_hw_control(adapter);
11062 ixgbe_clear_interrupt_scheme(adapter);
11063err_sw_init:
11064 ixgbe_disable_sriov(adapter);
11065 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11066 iounmap(adapter->io_addr);
11067 kfree(adapter->jump_tables[0]);
11068 kfree(adapter->mac_table);
11069 kfree(adapter->rss_key);
11070 bitmap_free(adapter->af_xdp_zc_qps);
11071err_ioremap:
11072 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11073 free_netdev(netdev);
11074err_alloc_etherdev:
11075 pci_disable_pcie_error_reporting(pdev);
11076 pci_release_mem_regions(pdev);
11077err_pci_reg:
11078err_dma:
11079 if (!adapter || disable_dev)
11080 pci_disable_device(pdev);
11081 return err;
11082}
11083
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093static void ixgbe_remove(struct pci_dev *pdev)
11094{
11095 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11096 struct net_device *netdev;
11097 bool disable_dev;
11098 int i;
11099
11100
11101 if (!adapter)
11102 return;
11103
11104 netdev = adapter->netdev;
11105 ixgbe_dbg_adapter_exit(adapter);
11106
11107 set_bit(__IXGBE_REMOVING, &adapter->state);
11108 cancel_work_sync(&adapter->service_task);
11109
11110 if (adapter->mii_bus)
11111 mdiobus_unregister(adapter->mii_bus);
11112
11113#ifdef CONFIG_IXGBE_DCA
11114 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11115 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11116 dca_remove_requester(&pdev->dev);
11117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11118 IXGBE_DCA_CTRL_DCA_DISABLE);
11119 }
11120
11121#endif
11122#ifdef CONFIG_IXGBE_HWMON
11123 ixgbe_sysfs_exit(adapter);
11124#endif
11125
11126
11127 ixgbe_del_sanmac_netdev(netdev);
11128
11129#ifdef CONFIG_PCI_IOV
11130 ixgbe_disable_sriov(adapter);
11131#endif
11132 if (netdev->reg_state == NETREG_REGISTERED)
11133 unregister_netdev(netdev);
11134
11135 ixgbe_stop_ipsec_offload(adapter);
11136 ixgbe_clear_interrupt_scheme(adapter);
11137
11138 ixgbe_release_hw_control(adapter);
11139
11140#ifdef CONFIG_DCB
11141 kfree(adapter->ixgbe_ieee_pfc);
11142 kfree(adapter->ixgbe_ieee_ets);
11143
11144#endif
11145 iounmap(adapter->io_addr);
11146 pci_release_mem_regions(pdev);
11147
11148 e_dev_info("complete\n");
11149
11150 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11151 if (adapter->jump_tables[i]) {
11152 kfree(adapter->jump_tables[i]->input);
11153 kfree(adapter->jump_tables[i]->mask);
11154 }
11155 kfree(adapter->jump_tables[i]);
11156 }
11157
11158 kfree(adapter->mac_table);
11159 kfree(adapter->rss_key);
11160 bitmap_free(adapter->af_xdp_zc_qps);
11161 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11162 free_netdev(netdev);
11163
11164 pci_disable_pcie_error_reporting(pdev);
11165
11166 if (disable_dev)
11167 pci_disable_device(pdev);
11168}
11169
11170
11171
11172
11173
11174
11175
11176
11177
11178static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11179 pci_channel_state_t state)
11180{
11181 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11182 struct net_device *netdev = adapter->netdev;
11183
11184#ifdef CONFIG_PCI_IOV
11185 struct ixgbe_hw *hw = &adapter->hw;
11186 struct pci_dev *bdev, *vfdev;
11187 u32 dw0, dw1, dw2, dw3;
11188 int vf, pos;
11189 u16 req_id, pf_func;
11190
11191 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11192 adapter->num_vfs == 0)
11193 goto skip_bad_vf_detection;
11194
11195 bdev = pdev->bus->self;
11196 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11197 bdev = bdev->bus->self;
11198
11199 if (!bdev)
11200 goto skip_bad_vf_detection;
11201
11202 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11203 if (!pos)
11204 goto skip_bad_vf_detection;
11205
11206 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11207 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11208 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11209 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11210 if (ixgbe_removed(hw->hw_addr))
11211 goto skip_bad_vf_detection;
11212
11213 req_id = dw1 >> 16;
11214
11215 if (!(req_id & 0x0080))
11216 goto skip_bad_vf_detection;
11217
11218 pf_func = req_id & 0x01;
11219 if ((pf_func & 1) == (pdev->devfn & 1)) {
11220 unsigned int device_id;
11221
11222 vf = (req_id & 0x7F) >> 1;
11223 e_dev_err("VF %d has caused a PCIe error\n", vf);
11224 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11225 "%8.8x\tdw3: %8.8x\n",
11226 dw0, dw1, dw2, dw3);
11227 switch (adapter->hw.mac.type) {
11228 case ixgbe_mac_82599EB:
11229 device_id = IXGBE_82599_VF_DEVICE_ID;
11230 break;
11231 case ixgbe_mac_X540:
11232 device_id = IXGBE_X540_VF_DEVICE_ID;
11233 break;
11234 case ixgbe_mac_X550:
11235 device_id = IXGBE_DEV_ID_X550_VF;
11236 break;
11237 case ixgbe_mac_X550EM_x:
11238 device_id = IXGBE_DEV_ID_X550EM_X_VF;
11239 break;
11240 case ixgbe_mac_x550em_a:
11241 device_id = IXGBE_DEV_ID_X550EM_A_VF;
11242 break;
11243 default:
11244 device_id = 0;
11245 break;
11246 }
11247
11248
11249 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11250 while (vfdev) {
11251 if (vfdev->devfn == (req_id & 0xFF))
11252 break;
11253 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11254 device_id, vfdev);
11255 }
11256
11257
11258
11259
11260
11261 if (vfdev) {
11262 pcie_flr(vfdev);
11263
11264 pci_dev_put(vfdev);
11265 }
11266 }
11267
11268
11269
11270
11271
11272
11273
11274 adapter->vferr_refcount++;
11275
11276 return PCI_ERS_RESULT_RECOVERED;
11277
11278skip_bad_vf_detection:
11279#endif
11280 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11281 return PCI_ERS_RESULT_DISCONNECT;
11282
11283 if (!netif_device_present(netdev))
11284 return PCI_ERS_RESULT_DISCONNECT;
11285
11286 rtnl_lock();
11287 netif_device_detach(netdev);
11288
11289 if (netif_running(netdev))
11290 ixgbe_close_suspend(adapter);
11291
11292 if (state == pci_channel_io_perm_failure) {
11293 rtnl_unlock();
11294 return PCI_ERS_RESULT_DISCONNECT;
11295 }
11296
11297 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11298 pci_disable_device(pdev);
11299 rtnl_unlock();
11300
11301
11302 return PCI_ERS_RESULT_NEED_RESET;
11303}
11304
11305
11306
11307
11308
11309
11310
11311static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11312{
11313 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11314 pci_ers_result_t result;
11315
11316 if (pci_enable_device_mem(pdev)) {
11317 e_err(probe, "Cannot re-enable PCI device after reset.\n");
11318 result = PCI_ERS_RESULT_DISCONNECT;
11319 } else {
11320 smp_mb__before_atomic();
11321 clear_bit(__IXGBE_DISABLED, &adapter->state);
11322 adapter->hw.hw_addr = adapter->io_addr;
11323 pci_set_master(pdev);
11324 pci_restore_state(pdev);
11325 pci_save_state(pdev);
11326
11327 pci_wake_from_d3(pdev, false);
11328
11329 ixgbe_reset(adapter);
11330 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11331 result = PCI_ERS_RESULT_RECOVERED;
11332 }
11333
11334 return result;
11335}
11336
11337
11338
11339
11340
11341
11342
11343
11344static void ixgbe_io_resume(struct pci_dev *pdev)
11345{
11346 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11347 struct net_device *netdev = adapter->netdev;
11348
11349#ifdef CONFIG_PCI_IOV
11350 if (adapter->vferr_refcount) {
11351 e_info(drv, "Resuming after VF err\n");
11352 adapter->vferr_refcount--;
11353 return;
11354 }
11355
11356#endif
11357 rtnl_lock();
11358 if (netif_running(netdev))
11359 ixgbe_open(netdev);
11360
11361 netif_device_attach(netdev);
11362 rtnl_unlock();
11363}
11364
11365static const struct pci_error_handlers ixgbe_err_handler = {
11366 .error_detected = ixgbe_io_error_detected,
11367 .slot_reset = ixgbe_io_slot_reset,
11368 .resume = ixgbe_io_resume,
11369};
11370
11371static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
11372
11373static struct pci_driver ixgbe_driver = {
11374 .name = ixgbe_driver_name,
11375 .id_table = ixgbe_pci_tbl,
11376 .probe = ixgbe_probe,
11377 .remove = ixgbe_remove,
11378 .driver.pm = &ixgbe_pm_ops,
11379 .shutdown = ixgbe_shutdown,
11380 .sriov_configure = ixgbe_pci_sriov_configure,
11381 .err_handler = &ixgbe_err_handler
11382};
11383
11384
11385
11386
11387
11388
11389
11390static int __init ixgbe_init_module(void)
11391{
11392 int ret;
11393 pr_info("%s\n", ixgbe_driver_string);
11394 pr_info("%s\n", ixgbe_copyright);
11395
11396 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11397 if (!ixgbe_wq) {
11398 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11399 return -ENOMEM;
11400 }
11401
11402 ixgbe_dbg_init();
11403
11404 ret = pci_register_driver(&ixgbe_driver);
11405 if (ret) {
11406 destroy_workqueue(ixgbe_wq);
11407 ixgbe_dbg_exit();
11408 return ret;
11409 }
11410
11411#ifdef CONFIG_IXGBE_DCA
11412 dca_register_notify(&dca_notifier);
11413#endif
11414
11415 return 0;
11416}
11417
11418module_init(ixgbe_init_module);
11419
11420
11421
11422
11423
11424
11425
11426static void __exit ixgbe_exit_module(void)
11427{
11428#ifdef CONFIG_IXGBE_DCA
11429 dca_unregister_notify(&dca_notifier);
11430#endif
11431 pci_unregister_driver(&ixgbe_driver);
11432
11433 ixgbe_dbg_exit();
11434 if (ixgbe_wq) {
11435 destroy_workqueue(ixgbe_wq);
11436 ixgbe_wq = NULL;
11437 }
11438}
11439
11440#ifdef CONFIG_IXGBE_DCA
11441static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11442 void *p)
11443{
11444 int ret_val;
11445
11446 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11447 __ixgbe_notify_dca);
11448
11449 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11450}
11451
11452#endif
11453
11454module_exit(ixgbe_exit_module);
11455
11456
11457