1
2
3
4#include <linux/types.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/netdevice.h>
8#include <linux/vmalloc.h>
9#include <linux/string.h>
10#include <linux/in.h>
11#include <linux/interrupt.h>
12#include <linux/ip.h>
13#include <linux/tcp.h>
14#include <linux/sctp.h>
15#include <linux/pkt_sched.h>
16#include <linux/ipv6.h>
17#include <linux/slab.h>
18#include <net/checksum.h>
19#include <net/ip6_checksum.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/if_macvlan.h>
25#include <linux/if_bridge.h>
26#include <linux/prefetch.h>
27#include <linux/bpf.h>
28#include <linux/bpf_trace.h>
29#include <linux/atomic.h>
30#include <linux/numa.h>
31#include <scsi/fc/fc_fcoe.h>
32#include <net/udp_tunnel.h>
33#include <net/pkt_cls.h>
34#include <net/tc_act/tc_gact.h>
35#include <net/tc_act/tc_mirred.h>
36#include <net/vxlan.h>
37#include <net/mpls.h>
38#include <net/xdp_sock.h>
39#include <net/xfrm.h>
40
41#include "ixgbe.h"
42#include "ixgbe_common.h"
43#include "ixgbe_dcb_82599.h"
44#include "ixgbe_phy.h"
45#include "ixgbe_sriov.h"
46#include "ixgbe_model.h"
47#include "ixgbe_txrx_common.h"
48
49char ixgbe_driver_name[] = "ixgbe";
50static const char ixgbe_driver_string[] =
51 "Intel(R) 10 Gigabit PCI Express Network Driver";
52#ifdef IXGBE_FCOE
53char ixgbe_default_device_descr[] =
54 "Intel(R) 10 Gigabit Network Connection";
55#else
56static char ixgbe_default_device_descr[] =
57 "Intel(R) 10 Gigabit Network Connection";
58#endif
59#define DRV_VERSION "5.1.0-k"
60const char ixgbe_driver_version[] = DRV_VERSION;
61static const char ixgbe_copyright[] =
62 "Copyright (c) 1999-2016 Intel Corporation.";
63
64static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
65
66static const struct ixgbe_info *ixgbe_info_tbl[] = {
67 [board_82598] = &ixgbe_82598_info,
68 [board_82599] = &ixgbe_82599_info,
69 [board_X540] = &ixgbe_X540_info,
70 [board_X550] = &ixgbe_X550_info,
71 [board_X550EM_x] = &ixgbe_X550EM_x_info,
72 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
73 [board_x550em_a] = &ixgbe_x550em_a_info,
74 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
75};
76
77
78
79
80
81
82
83
84
85static const struct pci_device_id ixgbe_pci_tbl[] = {
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
133
134 {0, }
135};
136MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
137
138#ifdef CONFIG_IXGBE_DCA
139static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
140 void *p);
141static struct notifier_block dca_notifier = {
142 .notifier_call = ixgbe_notify_dca,
143 .next = NULL,
144 .priority = 0
145};
146#endif
147
148#ifdef CONFIG_PCI_IOV
149static unsigned int max_vfs;
150module_param(max_vfs, uint, 0);
151MODULE_PARM_DESC(max_vfs,
152 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
153#endif
154
155static unsigned int allow_unsupported_sfp;
156module_param(allow_unsupported_sfp, uint, 0);
157MODULE_PARM_DESC(allow_unsupported_sfp,
158 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
159
160#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
161static int debug = -1;
162module_param(debug, int, 0);
163MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
164
165MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
166MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
167MODULE_LICENSE("GPL v2");
168MODULE_VERSION(DRV_VERSION);
169
170static struct workqueue_struct *ixgbe_wq;
171
172static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
173static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
174
175static const struct net_device_ops ixgbe_netdev_ops;
176
177static bool netif_is_ixgbe(struct net_device *dev)
178{
179 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
180}
181
182static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
183 u32 reg, u16 *value)
184{
185 struct pci_dev *parent_dev;
186 struct pci_bus *parent_bus;
187
188 parent_bus = adapter->pdev->bus->parent;
189 if (!parent_bus)
190 return -1;
191
192 parent_dev = parent_bus->self;
193 if (!parent_dev)
194 return -1;
195
196 if (!pci_is_pcie(parent_dev))
197 return -1;
198
199 pcie_capability_read_word(parent_dev, reg, value);
200 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
201 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
202 return -1;
203 return 0;
204}
205
206static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
207{
208 struct ixgbe_hw *hw = &adapter->hw;
209 u16 link_status = 0;
210 int err;
211
212 hw->bus.type = ixgbe_bus_type_pci_express;
213
214
215
216
217 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
218
219
220 if (err)
221 return err;
222
223 hw->bus.width = ixgbe_convert_bus_width(link_status);
224 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
225
226 return 0;
227}
228
229
230
231
232
233
234
235
236
237
238static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
239{
240 switch (hw->device_id) {
241 case IXGBE_DEV_ID_82599_SFP_SF_QP:
242 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
243 return true;
244 default:
245 return false;
246 }
247}
248
249static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
250 int expected_gts)
251{
252 struct ixgbe_hw *hw = &adapter->hw;
253 struct pci_dev *pdev;
254
255
256
257
258
259 if (hw->bus.type == ixgbe_bus_type_internal)
260 return;
261
262
263 if (ixgbe_pcie_from_parent(&adapter->hw))
264 pdev = adapter->pdev->bus->parent->self;
265 else
266 pdev = adapter->pdev;
267
268 pcie_print_link_status(pdev);
269}
270
271static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
272{
273 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
274 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
275 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
276 queue_work(ixgbe_wq, &adapter->service_task);
277}
278
279static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
280{
281 struct ixgbe_adapter *adapter = hw->back;
282
283 if (!hw->hw_addr)
284 return;
285 hw->hw_addr = NULL;
286 e_dev_err("Adapter removed\n");
287 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
288 ixgbe_service_event_schedule(adapter);
289}
290
291static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
292{
293 u8 __iomem *reg_addr;
294 u32 value;
295 int i;
296
297 reg_addr = READ_ONCE(hw->hw_addr);
298 if (ixgbe_removed(reg_addr))
299 return IXGBE_FAILED_READ_REG;
300
301
302
303
304
305 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
306 value = readl(reg_addr + IXGBE_STATUS);
307 if (value != IXGBE_FAILED_READ_REG)
308 break;
309 mdelay(3);
310 }
311
312 if (value == IXGBE_FAILED_READ_REG)
313 ixgbe_remove_adapter(hw);
314 else
315 value = readl(reg_addr + reg);
316 return value;
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
333{
334 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
335 u32 value;
336
337 if (ixgbe_removed(reg_addr))
338 return IXGBE_FAILED_READ_REG;
339 if (unlikely(hw->phy.nw_mng_if_sel &
340 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
341 struct ixgbe_adapter *adapter;
342 int i;
343
344 for (i = 0; i < 200; ++i) {
345 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
346 if (likely(!value))
347 goto writes_completed;
348 if (value == IXGBE_FAILED_READ_REG) {
349 ixgbe_remove_adapter(hw);
350 return IXGBE_FAILED_READ_REG;
351 }
352 udelay(5);
353 }
354
355 adapter = hw->back;
356 e_warn(hw, "register writes incomplete %08x\n", value);
357 }
358
359writes_completed:
360 value = readl(reg_addr + reg);
361 if (unlikely(value == IXGBE_FAILED_READ_REG))
362 value = ixgbe_check_remove(hw, reg);
363 return value;
364}
365
366static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
367{
368 u16 value;
369
370 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
371 if (value == IXGBE_FAILED_READ_CFG_WORD) {
372 ixgbe_remove_adapter(hw);
373 return true;
374 }
375 return false;
376}
377
378u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
379{
380 struct ixgbe_adapter *adapter = hw->back;
381 u16 value;
382
383 if (ixgbe_removed(hw->hw_addr))
384 return IXGBE_FAILED_READ_CFG_WORD;
385 pci_read_config_word(adapter->pdev, reg, &value);
386 if (value == IXGBE_FAILED_READ_CFG_WORD &&
387 ixgbe_check_cfg_remove(hw, adapter->pdev))
388 return IXGBE_FAILED_READ_CFG_WORD;
389 return value;
390}
391
392#ifdef CONFIG_PCI_IOV
393static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
394{
395 struct ixgbe_adapter *adapter = hw->back;
396 u32 value;
397
398 if (ixgbe_removed(hw->hw_addr))
399 return IXGBE_FAILED_READ_CFG_DWORD;
400 pci_read_config_dword(adapter->pdev, reg, &value);
401 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
402 ixgbe_check_cfg_remove(hw, adapter->pdev))
403 return IXGBE_FAILED_READ_CFG_DWORD;
404 return value;
405}
406#endif
407
408void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
409{
410 struct ixgbe_adapter *adapter = hw->back;
411
412 if (ixgbe_removed(hw->hw_addr))
413 return;
414 pci_write_config_word(adapter->pdev, reg, value);
415}
416
417static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
418{
419 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
420
421
422 smp_mb__before_atomic();
423 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
424}
425
426struct ixgbe_reg_info {
427 u32 ofs;
428 char *name;
429};
430
431static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
432
433
434 {IXGBE_CTRL, "CTRL"},
435 {IXGBE_STATUS, "STATUS"},
436 {IXGBE_CTRL_EXT, "CTRL_EXT"},
437
438
439 {IXGBE_EICR, "EICR"},
440
441
442 {IXGBE_SRRCTL(0), "SRRCTL"},
443 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
444 {IXGBE_RDLEN(0), "RDLEN"},
445 {IXGBE_RDH(0), "RDH"},
446 {IXGBE_RDT(0), "RDT"},
447 {IXGBE_RXDCTL(0), "RXDCTL"},
448 {IXGBE_RDBAL(0), "RDBAL"},
449 {IXGBE_RDBAH(0), "RDBAH"},
450
451
452 {IXGBE_TDBAL(0), "TDBAL"},
453 {IXGBE_TDBAH(0), "TDBAH"},
454 {IXGBE_TDLEN(0), "TDLEN"},
455 {IXGBE_TDH(0), "TDH"},
456 {IXGBE_TDT(0), "TDT"},
457 {IXGBE_TXDCTL(0), "TXDCTL"},
458
459
460 { .name = NULL }
461};
462
463
464
465
466
467static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
468{
469 int i;
470 char rname[16];
471 u32 regs[64];
472
473 switch (reginfo->ofs) {
474 case IXGBE_SRRCTL(0):
475 for (i = 0; i < 64; i++)
476 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
477 break;
478 case IXGBE_DCA_RXCTRL(0):
479 for (i = 0; i < 64; i++)
480 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
481 break;
482 case IXGBE_RDLEN(0):
483 for (i = 0; i < 64; i++)
484 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
485 break;
486 case IXGBE_RDH(0):
487 for (i = 0; i < 64; i++)
488 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
489 break;
490 case IXGBE_RDT(0):
491 for (i = 0; i < 64; i++)
492 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
493 break;
494 case IXGBE_RXDCTL(0):
495 for (i = 0; i < 64; i++)
496 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
497 break;
498 case IXGBE_RDBAL(0):
499 for (i = 0; i < 64; i++)
500 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
501 break;
502 case IXGBE_RDBAH(0):
503 for (i = 0; i < 64; i++)
504 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
505 break;
506 case IXGBE_TDBAL(0):
507 for (i = 0; i < 64; i++)
508 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
509 break;
510 case IXGBE_TDBAH(0):
511 for (i = 0; i < 64; i++)
512 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
513 break;
514 case IXGBE_TDLEN(0):
515 for (i = 0; i < 64; i++)
516 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
517 break;
518 case IXGBE_TDH(0):
519 for (i = 0; i < 64; i++)
520 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
521 break;
522 case IXGBE_TDT(0):
523 for (i = 0; i < 64; i++)
524 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
525 break;
526 case IXGBE_TXDCTL(0):
527 for (i = 0; i < 64; i++)
528 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
529 break;
530 default:
531 pr_info("%-15s %08x\n",
532 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
533 return;
534 }
535
536 i = 0;
537 while (i < 64) {
538 int j;
539 char buf[9 * 8 + 1];
540 char *p = buf;
541
542 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
543 for (j = 0; j < 8; j++)
544 p += sprintf(p, " %08x", regs[i++]);
545 pr_err("%-15s%s\n", rname, buf);
546 }
547
548}
549
550static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
551{
552 struct ixgbe_tx_buffer *tx_buffer;
553
554 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
555 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
556 n, ring->next_to_use, ring->next_to_clean,
557 (u64)dma_unmap_addr(tx_buffer, dma),
558 dma_unmap_len(tx_buffer, len),
559 tx_buffer->next_to_watch,
560 (u64)tx_buffer->time_stamp);
561}
562
563
564
565
566static void ixgbe_dump(struct ixgbe_adapter *adapter)
567{
568 struct net_device *netdev = adapter->netdev;
569 struct ixgbe_hw *hw = &adapter->hw;
570 struct ixgbe_reg_info *reginfo;
571 int n = 0;
572 struct ixgbe_ring *ring;
573 struct ixgbe_tx_buffer *tx_buffer;
574 union ixgbe_adv_tx_desc *tx_desc;
575 struct my_u0 { u64 a; u64 b; } *u0;
576 struct ixgbe_ring *rx_ring;
577 union ixgbe_adv_rx_desc *rx_desc;
578 struct ixgbe_rx_buffer *rx_buffer_info;
579 int i = 0;
580
581 if (!netif_msg_hw(adapter))
582 return;
583
584
585 if (netdev) {
586 dev_info(&adapter->pdev->dev, "Net device Info\n");
587 pr_info("Device Name state "
588 "trans_start\n");
589 pr_info("%-15s %016lX %016lX\n",
590 netdev->name,
591 netdev->state,
592 dev_trans_start(netdev));
593 }
594
595
596 dev_info(&adapter->pdev->dev, "Register Dump\n");
597 pr_info(" Register Name Value\n");
598 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
599 reginfo->name; reginfo++) {
600 ixgbe_regdump(hw, reginfo);
601 }
602
603
604 if (!netdev || !netif_running(netdev))
605 return;
606
607 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
608 pr_info(" %s %s %s %s\n",
609 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
610 "leng", "ntw", "timestamp");
611 for (n = 0; n < adapter->num_tx_queues; n++) {
612 ring = adapter->tx_ring[n];
613 ixgbe_print_buffer(ring, n);
614 }
615
616 for (n = 0; n < adapter->num_xdp_queues; n++) {
617 ring = adapter->xdp_ring[n];
618 ixgbe_print_buffer(ring, n);
619 }
620
621
622 if (!netif_msg_tx_done(adapter))
623 goto rx_ring_summary;
624
625 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 for (n = 0; n < adapter->num_tx_queues; n++) {
663 ring = adapter->tx_ring[n];
664 pr_info("------------------------------------\n");
665 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
666 pr_info("------------------------------------\n");
667 pr_info("%s%s %s %s %s %s\n",
668 "T [desc] [address 63:0 ] ",
669 "[PlPOIdStDDt Ln] [bi->dma ] ",
670 "leng", "ntw", "timestamp", "bi->skb");
671
672 for (i = 0; ring->desc && (i < ring->count); i++) {
673 tx_desc = IXGBE_TX_DESC(ring, i);
674 tx_buffer = &ring->tx_buffer_info[i];
675 u0 = (struct my_u0 *)tx_desc;
676 if (dma_unmap_len(tx_buffer, len) > 0) {
677 const char *ring_desc;
678
679 if (i == ring->next_to_use &&
680 i == ring->next_to_clean)
681 ring_desc = " NTC/U";
682 else if (i == ring->next_to_use)
683 ring_desc = " NTU";
684 else if (i == ring->next_to_clean)
685 ring_desc = " NTC";
686 else
687 ring_desc = "";
688 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
689 i,
690 le64_to_cpu((__force __le64)u0->a),
691 le64_to_cpu((__force __le64)u0->b),
692 (u64)dma_unmap_addr(tx_buffer, dma),
693 dma_unmap_len(tx_buffer, len),
694 tx_buffer->next_to_watch,
695 (u64)tx_buffer->time_stamp,
696 tx_buffer->skb,
697 ring_desc);
698
699 if (netif_msg_pktdata(adapter) &&
700 tx_buffer->skb)
701 print_hex_dump(KERN_INFO, "",
702 DUMP_PREFIX_ADDRESS, 16, 1,
703 tx_buffer->skb->data,
704 dma_unmap_len(tx_buffer, len),
705 true);
706 }
707 }
708 }
709
710
711rx_ring_summary:
712 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
713 pr_info("Queue [NTU] [NTC]\n");
714 for (n = 0; n < adapter->num_rx_queues; n++) {
715 rx_ring = adapter->rx_ring[n];
716 pr_info("%5d %5X %5X\n",
717 n, rx_ring->next_to_use, rx_ring->next_to_clean);
718 }
719
720
721 if (!netif_msg_rx_status(adapter))
722 return;
723
724 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771 for (n = 0; n < adapter->num_rx_queues; n++) {
772 rx_ring = adapter->rx_ring[n];
773 pr_info("------------------------------------\n");
774 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
775 pr_info("------------------------------------\n");
776 pr_info("%s%s%s\n",
777 "R [desc] [ PktBuf A0] ",
778 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
779 "<-- Adv Rx Read format");
780 pr_info("%s%s%s\n",
781 "RWB[desc] [PcsmIpSHl PtRs] ",
782 "[vl er S cks ln] ---------------- [bi->skb ] ",
783 "<-- Adv Rx Write-Back format");
784
785 for (i = 0; i < rx_ring->count; i++) {
786 const char *ring_desc;
787
788 if (i == rx_ring->next_to_use)
789 ring_desc = " NTU";
790 else if (i == rx_ring->next_to_clean)
791 ring_desc = " NTC";
792 else
793 ring_desc = "";
794
795 rx_buffer_info = &rx_ring->rx_buffer_info[i];
796 rx_desc = IXGBE_RX_DESC(rx_ring, i);
797 u0 = (struct my_u0 *)rx_desc;
798 if (rx_desc->wb.upper.length) {
799
800 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
801 i,
802 le64_to_cpu((__force __le64)u0->a),
803 le64_to_cpu((__force __le64)u0->b),
804 rx_buffer_info->skb,
805 ring_desc);
806 } else {
807 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
808 i,
809 le64_to_cpu((__force __le64)u0->a),
810 le64_to_cpu((__force __le64)u0->b),
811 (u64)rx_buffer_info->dma,
812 rx_buffer_info->skb,
813 ring_desc);
814
815 if (netif_msg_pktdata(adapter) &&
816 rx_buffer_info->dma) {
817 print_hex_dump(KERN_INFO, "",
818 DUMP_PREFIX_ADDRESS, 16, 1,
819 page_address(rx_buffer_info->page) +
820 rx_buffer_info->page_offset,
821 ixgbe_rx_bufsz(rx_ring), true);
822 }
823 }
824 }
825 }
826}
827
828static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
829{
830 u32 ctrl_ext;
831
832
833 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
835 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
836}
837
838static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
839{
840 u32 ctrl_ext;
841
842
843 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
845 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
846}
847
848
849
850
851
852
853
854
855
856static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
857 u8 queue, u8 msix_vector)
858{
859 u32 ivar, index;
860 struct ixgbe_hw *hw = &adapter->hw;
861 switch (hw->mac.type) {
862 case ixgbe_mac_82598EB:
863 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
864 if (direction == -1)
865 direction = 0;
866 index = (((direction * 64) + queue) >> 2) & 0x1F;
867 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
868 ivar &= ~(0xFF << (8 * (queue & 0x3)));
869 ivar |= (msix_vector << (8 * (queue & 0x3)));
870 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
871 break;
872 case ixgbe_mac_82599EB:
873 case ixgbe_mac_X540:
874 case ixgbe_mac_X550:
875 case ixgbe_mac_X550EM_x:
876 case ixgbe_mac_x550em_a:
877 if (direction == -1) {
878
879 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
880 index = ((queue & 1) * 8);
881 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
882 ivar &= ~(0xFF << index);
883 ivar |= (msix_vector << index);
884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
885 break;
886 } else {
887
888 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
889 index = ((16 * (queue & 1)) + (8 * direction));
890 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
891 ivar &= ~(0xFF << index);
892 ivar |= (msix_vector << index);
893 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
894 break;
895 }
896 default:
897 break;
898 }
899}
900
901void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
902 u64 qmask)
903{
904 u32 mask;
905
906 switch (adapter->hw.mac.type) {
907 case ixgbe_mac_82598EB:
908 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
910 break;
911 case ixgbe_mac_82599EB:
912 case ixgbe_mac_X540:
913 case ixgbe_mac_X550:
914 case ixgbe_mac_X550EM_x:
915 case ixgbe_mac_x550em_a:
916 mask = (qmask & 0xFFFFFFFF);
917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
918 mask = (qmask >> 32);
919 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
920 break;
921 default:
922 break;
923 }
924}
925
926static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
927{
928 struct ixgbe_hw *hw = &adapter->hw;
929 struct ixgbe_hw_stats *hwstats = &adapter->stats;
930 int i;
931 u32 data;
932
933 if ((hw->fc.current_mode != ixgbe_fc_full) &&
934 (hw->fc.current_mode != ixgbe_fc_rx_pause))
935 return;
936
937 switch (hw->mac.type) {
938 case ixgbe_mac_82598EB:
939 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
940 break;
941 default:
942 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
943 }
944 hwstats->lxoffrxc += data;
945
946
947 if (!data)
948 return;
949
950 for (i = 0; i < adapter->num_tx_queues; i++)
951 clear_bit(__IXGBE_HANG_CHECK_ARMED,
952 &adapter->tx_ring[i]->state);
953
954 for (i = 0; i < adapter->num_xdp_queues; i++)
955 clear_bit(__IXGBE_HANG_CHECK_ARMED,
956 &adapter->xdp_ring[i]->state);
957}
958
959static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
960{
961 struct ixgbe_hw *hw = &adapter->hw;
962 struct ixgbe_hw_stats *hwstats = &adapter->stats;
963 u32 xoff[8] = {0};
964 u8 tc;
965 int i;
966 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
967
968 if (adapter->ixgbe_ieee_pfc)
969 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
970
971 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
972 ixgbe_update_xoff_rx_lfc(adapter);
973 return;
974 }
975
976
977 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
978 u32 pxoffrxc;
979
980 switch (hw->mac.type) {
981 case ixgbe_mac_82598EB:
982 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
983 break;
984 default:
985 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
986 }
987 hwstats->pxoffrxc[i] += pxoffrxc;
988
989 tc = netdev_get_prio_tc_map(adapter->netdev, i);
990 xoff[tc] += pxoffrxc;
991 }
992
993
994 for (i = 0; i < adapter->num_tx_queues; i++) {
995 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
996
997 tc = tx_ring->dcb_tc;
998 if (xoff[tc])
999 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1000 }
1001
1002 for (i = 0; i < adapter->num_xdp_queues; i++) {
1003 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1004
1005 tc = xdp_ring->dcb_tc;
1006 if (xoff[tc])
1007 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1008 }
1009}
1010
1011static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1012{
1013 return ring->stats.packets;
1014}
1015
1016static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1017{
1018 unsigned int head, tail;
1019
1020 head = ring->next_to_clean;
1021 tail = ring->next_to_use;
1022
1023 return ((head <= tail) ? tail : tail + ring->count) - head;
1024}
1025
1026static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1027{
1028 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1029 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1030 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1031
1032 clear_check_for_tx_hang(tx_ring);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (tx_done_old == tx_done && tx_pending)
1047
1048 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1049 &tx_ring->state);
1050
1051 tx_ring->tx_stats.tx_done_old = tx_done;
1052
1053 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1054
1055 return false;
1056}
1057
1058
1059
1060
1061
1062static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1063{
1064
1065
1066 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1067 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1068 e_warn(drv, "initiating reset due to tx timeout\n");
1069 ixgbe_service_event_schedule(adapter);
1070 }
1071}
1072
1073
1074
1075
1076
1077
1078
1079static int ixgbe_tx_maxrate(struct net_device *netdev,
1080 int queue_index, u32 maxrate)
1081{
1082 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1083 struct ixgbe_hw *hw = &adapter->hw;
1084 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1085
1086 if (!maxrate)
1087 return 0;
1088
1089
1090 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1091 bcnrc_val /= maxrate;
1092
1093
1094 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1095 IXGBE_RTTBCNRC_RF_DEC_MASK;
1096
1097
1098 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1099
1100 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1101 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1102
1103 return 0;
1104}
1105
1106
1107
1108
1109
1110
1111
1112static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1113 struct ixgbe_ring *tx_ring, int napi_budget)
1114{
1115 struct ixgbe_adapter *adapter = q_vector->adapter;
1116 struct ixgbe_tx_buffer *tx_buffer;
1117 union ixgbe_adv_tx_desc *tx_desc;
1118 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1119 unsigned int budget = q_vector->tx.work_limit;
1120 unsigned int i = tx_ring->next_to_clean;
1121
1122 if (test_bit(__IXGBE_DOWN, &adapter->state))
1123 return true;
1124
1125 tx_buffer = &tx_ring->tx_buffer_info[i];
1126 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1127 i -= tx_ring->count;
1128
1129 do {
1130 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1131
1132
1133 if (!eop_desc)
1134 break;
1135
1136
1137 smp_rmb();
1138
1139
1140 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1141 break;
1142
1143
1144 tx_buffer->next_to_watch = NULL;
1145
1146
1147 total_bytes += tx_buffer->bytecount;
1148 total_packets += tx_buffer->gso_segs;
1149 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1150 total_ipsec++;
1151
1152
1153 if (ring_is_xdp(tx_ring))
1154 xdp_return_frame(tx_buffer->xdpf);
1155 else
1156 napi_consume_skb(tx_buffer->skb, napi_budget);
1157
1158
1159 dma_unmap_single(tx_ring->dev,
1160 dma_unmap_addr(tx_buffer, dma),
1161 dma_unmap_len(tx_buffer, len),
1162 DMA_TO_DEVICE);
1163
1164
1165 dma_unmap_len_set(tx_buffer, len, 0);
1166
1167
1168 while (tx_desc != eop_desc) {
1169 tx_buffer++;
1170 tx_desc++;
1171 i++;
1172 if (unlikely(!i)) {
1173 i -= tx_ring->count;
1174 tx_buffer = tx_ring->tx_buffer_info;
1175 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1176 }
1177
1178
1179 if (dma_unmap_len(tx_buffer, len)) {
1180 dma_unmap_page(tx_ring->dev,
1181 dma_unmap_addr(tx_buffer, dma),
1182 dma_unmap_len(tx_buffer, len),
1183 DMA_TO_DEVICE);
1184 dma_unmap_len_set(tx_buffer, len, 0);
1185 }
1186 }
1187
1188
1189 tx_buffer++;
1190 tx_desc++;
1191 i++;
1192 if (unlikely(!i)) {
1193 i -= tx_ring->count;
1194 tx_buffer = tx_ring->tx_buffer_info;
1195 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1196 }
1197
1198
1199 prefetch(tx_desc);
1200
1201
1202 budget--;
1203 } while (likely(budget));
1204
1205 i += tx_ring->count;
1206 tx_ring->next_to_clean = i;
1207 u64_stats_update_begin(&tx_ring->syncp);
1208 tx_ring->stats.bytes += total_bytes;
1209 tx_ring->stats.packets += total_packets;
1210 u64_stats_update_end(&tx_ring->syncp);
1211 q_vector->tx.total_bytes += total_bytes;
1212 q_vector->tx.total_packets += total_packets;
1213 adapter->tx_ipsec += total_ipsec;
1214
1215 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1216
1217 struct ixgbe_hw *hw = &adapter->hw;
1218 e_err(drv, "Detected Tx Unit Hang %s\n"
1219 " Tx Queue <%d>\n"
1220 " TDH, TDT <%x>, <%x>\n"
1221 " next_to_use <%x>\n"
1222 " next_to_clean <%x>\n"
1223 "tx_buffer_info[next_to_clean]\n"
1224 " time_stamp <%lx>\n"
1225 " jiffies <%lx>\n",
1226 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1227 tx_ring->queue_index,
1228 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1229 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1230 tx_ring->next_to_use, i,
1231 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1232
1233 if (!ring_is_xdp(tx_ring))
1234 netif_stop_subqueue(tx_ring->netdev,
1235 tx_ring->queue_index);
1236
1237 e_info(probe,
1238 "tx hang %d detected on queue %d, resetting adapter\n",
1239 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1240
1241
1242 ixgbe_tx_timeout_reset(adapter);
1243
1244
1245 return true;
1246 }
1247
1248 if (ring_is_xdp(tx_ring))
1249 return !!budget;
1250
1251 netdev_tx_completed_queue(txring_txq(tx_ring),
1252 total_packets, total_bytes);
1253
1254#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1255 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1256 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1257
1258
1259
1260 smp_mb();
1261 if (__netif_subqueue_stopped(tx_ring->netdev,
1262 tx_ring->queue_index)
1263 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1264 netif_wake_subqueue(tx_ring->netdev,
1265 tx_ring->queue_index);
1266 ++tx_ring->tx_stats.restart_queue;
1267 }
1268 }
1269
1270 return !!budget;
1271}
1272
1273#ifdef CONFIG_IXGBE_DCA
1274static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1275 struct ixgbe_ring *tx_ring,
1276 int cpu)
1277{
1278 struct ixgbe_hw *hw = &adapter->hw;
1279 u32 txctrl = 0;
1280 u16 reg_offset;
1281
1282 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1283 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1284
1285 switch (hw->mac.type) {
1286 case ixgbe_mac_82598EB:
1287 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1288 break;
1289 case ixgbe_mac_82599EB:
1290 case ixgbe_mac_X540:
1291 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1292 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1293 break;
1294 default:
1295
1296 return;
1297 }
1298
1299
1300
1301
1302
1303
1304 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1305 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1306 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1307
1308 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1309}
1310
1311static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1312 struct ixgbe_ring *rx_ring,
1313 int cpu)
1314{
1315 struct ixgbe_hw *hw = &adapter->hw;
1316 u32 rxctrl = 0;
1317 u8 reg_idx = rx_ring->reg_idx;
1318
1319 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1320 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1321
1322 switch (hw->mac.type) {
1323 case ixgbe_mac_82599EB:
1324 case ixgbe_mac_X540:
1325 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1326 break;
1327 default:
1328 break;
1329 }
1330
1331
1332
1333
1334
1335
1336 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1337 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1338 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1339
1340 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1341}
1342
1343static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1344{
1345 struct ixgbe_adapter *adapter = q_vector->adapter;
1346 struct ixgbe_ring *ring;
1347 int cpu = get_cpu();
1348
1349 if (q_vector->cpu == cpu)
1350 goto out_no_update;
1351
1352 ixgbe_for_each_ring(ring, q_vector->tx)
1353 ixgbe_update_tx_dca(adapter, ring, cpu);
1354
1355 ixgbe_for_each_ring(ring, q_vector->rx)
1356 ixgbe_update_rx_dca(adapter, ring, cpu);
1357
1358 q_vector->cpu = cpu;
1359out_no_update:
1360 put_cpu();
1361}
1362
1363static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1364{
1365 int i;
1366
1367
1368 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1370 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1371 else
1372 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1373 IXGBE_DCA_CTRL_DCA_DISABLE);
1374
1375 for (i = 0; i < adapter->num_q_vectors; i++) {
1376 adapter->q_vector[i]->cpu = -1;
1377 ixgbe_update_dca(adapter->q_vector[i]);
1378 }
1379}
1380
1381static int __ixgbe_notify_dca(struct device *dev, void *data)
1382{
1383 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1384 unsigned long event = *(unsigned long *)data;
1385
1386 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1387 return 0;
1388
1389 switch (event) {
1390 case DCA_PROVIDER_ADD:
1391
1392 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1393 break;
1394 if (dca_add_requester(dev) == 0) {
1395 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1396 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1397 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1398 break;
1399 }
1400
1401 case DCA_PROVIDER_REMOVE:
1402 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1403 dca_remove_requester(dev);
1404 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1405 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1406 IXGBE_DCA_CTRL_DCA_DISABLE);
1407 }
1408 break;
1409 }
1410
1411 return 0;
1412}
1413
1414#endif
1415
1416#define IXGBE_RSS_L4_TYPES_MASK \
1417 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1418 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1419 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1420 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1421
1422static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1423 union ixgbe_adv_rx_desc *rx_desc,
1424 struct sk_buff *skb)
1425{
1426 u16 rss_type;
1427
1428 if (!(ring->netdev->features & NETIF_F_RXHASH))
1429 return;
1430
1431 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1432 IXGBE_RXDADV_RSSTYPE_MASK;
1433
1434 if (!rss_type)
1435 return;
1436
1437 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1438 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1439 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1440}
1441
1442#ifdef IXGBE_FCOE
1443
1444
1445
1446
1447
1448
1449
1450static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1451 union ixgbe_adv_rx_desc *rx_desc)
1452{
1453 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1454
1455 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1456 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1457 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1458 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1459}
1460
1461#endif
1462
1463
1464
1465
1466
1467
1468static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1469 union ixgbe_adv_rx_desc *rx_desc,
1470 struct sk_buff *skb)
1471{
1472 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1473 bool encap_pkt = false;
1474
1475 skb_checksum_none_assert(skb);
1476
1477
1478 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1479 return;
1480
1481
1482 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1483 encap_pkt = true;
1484 skb->encapsulation = 1;
1485 }
1486
1487
1488 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1489 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1490 ring->rx_stats.csum_err++;
1491 return;
1492 }
1493
1494 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1495 return;
1496
1497 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1498
1499
1500
1501
1502 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1503 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1504 return;
1505
1506 ring->rx_stats.csum_err++;
1507 return;
1508 }
1509
1510
1511 skb->ip_summed = CHECKSUM_UNNECESSARY;
1512 if (encap_pkt) {
1513 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1514 return;
1515
1516 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1517 skb->ip_summed = CHECKSUM_NONE;
1518 return;
1519 }
1520
1521 skb->csum_level = 1;
1522 }
1523}
1524
1525static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1526{
1527 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1528}
1529
1530static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1531 struct ixgbe_rx_buffer *bi)
1532{
1533 struct page *page = bi->page;
1534 dma_addr_t dma;
1535
1536
1537 if (likely(page))
1538 return true;
1539
1540
1541 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1542 if (unlikely(!page)) {
1543 rx_ring->rx_stats.alloc_rx_page_failed++;
1544 return false;
1545 }
1546
1547
1548 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1549 ixgbe_rx_pg_size(rx_ring),
1550 DMA_FROM_DEVICE,
1551 IXGBE_RX_DMA_ATTR);
1552
1553
1554
1555
1556
1557 if (dma_mapping_error(rx_ring->dev, dma)) {
1558 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1559
1560 rx_ring->rx_stats.alloc_rx_page_failed++;
1561 return false;
1562 }
1563
1564 bi->dma = dma;
1565 bi->page = page;
1566 bi->page_offset = ixgbe_rx_offset(rx_ring);
1567 page_ref_add(page, USHRT_MAX - 1);
1568 bi->pagecnt_bias = USHRT_MAX;
1569 rx_ring->rx_stats.alloc_rx_page++;
1570
1571 return true;
1572}
1573
1574
1575
1576
1577
1578
1579void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1580{
1581 union ixgbe_adv_rx_desc *rx_desc;
1582 struct ixgbe_rx_buffer *bi;
1583 u16 i = rx_ring->next_to_use;
1584 u16 bufsz;
1585
1586
1587 if (!cleaned_count)
1588 return;
1589
1590 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1591 bi = &rx_ring->rx_buffer_info[i];
1592 i -= rx_ring->count;
1593
1594 bufsz = ixgbe_rx_bufsz(rx_ring);
1595
1596 do {
1597 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1598 break;
1599
1600
1601 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1602 bi->page_offset, bufsz,
1603 DMA_FROM_DEVICE);
1604
1605
1606
1607
1608
1609 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1610
1611 rx_desc++;
1612 bi++;
1613 i++;
1614 if (unlikely(!i)) {
1615 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1616 bi = rx_ring->rx_buffer_info;
1617 i -= rx_ring->count;
1618 }
1619
1620
1621 rx_desc->wb.upper.length = 0;
1622
1623 cleaned_count--;
1624 } while (cleaned_count);
1625
1626 i += rx_ring->count;
1627
1628 if (rx_ring->next_to_use != i) {
1629 rx_ring->next_to_use = i;
1630
1631
1632 rx_ring->next_to_alloc = i;
1633
1634
1635
1636
1637
1638
1639 wmb();
1640 writel(i, rx_ring->tail);
1641 }
1642}
1643
1644static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1645 struct sk_buff *skb)
1646{
1647 u16 hdr_len = skb_headlen(skb);
1648
1649
1650 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1651 IXGBE_CB(skb)->append_cnt);
1652 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1653}
1654
1655static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1656 struct sk_buff *skb)
1657{
1658
1659 if (!IXGBE_CB(skb)->append_cnt)
1660 return;
1661
1662 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1663 rx_ring->rx_stats.rsc_flush++;
1664
1665 ixgbe_set_rsc_gso_size(rx_ring, skb);
1666
1667
1668 IXGBE_CB(skb)->append_cnt = 0;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1682 union ixgbe_adv_rx_desc *rx_desc,
1683 struct sk_buff *skb)
1684{
1685 struct net_device *dev = rx_ring->netdev;
1686 u32 flags = rx_ring->q_vector->adapter->flags;
1687
1688 ixgbe_update_rsc_stats(rx_ring, skb);
1689
1690 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1691
1692 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1693
1694 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1695 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1696
1697 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1698 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1699 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1700 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1701 }
1702
1703 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1704 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1705
1706
1707 if (netif_is_ixgbe(dev))
1708 skb_record_rx_queue(skb, rx_ring->queue_index);
1709 else
1710 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1711 false);
1712
1713 skb->protocol = eth_type_trans(skb, dev);
1714}
1715
1716void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1717 struct sk_buff *skb)
1718{
1719 napi_gro_receive(&q_vector->napi, skb);
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1734 union ixgbe_adv_rx_desc *rx_desc,
1735 struct sk_buff *skb)
1736{
1737 u32 ntc = rx_ring->next_to_clean + 1;
1738
1739
1740 ntc = (ntc < rx_ring->count) ? ntc : 0;
1741 rx_ring->next_to_clean = ntc;
1742
1743 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1744
1745
1746 if (ring_is_rsc_enabled(rx_ring)) {
1747 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1748 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1749
1750 if (unlikely(rsc_enabled)) {
1751 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1752
1753 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1754 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1755
1756
1757 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1758 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1759 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1760 }
1761 }
1762
1763
1764 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1765 return false;
1766
1767
1768 rx_ring->rx_buffer_info[ntc].skb = skb;
1769 rx_ring->rx_stats.non_eop_descs++;
1770
1771 return true;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1787 struct sk_buff *skb)
1788{
1789 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1790 unsigned char *va;
1791 unsigned int pull_len;
1792
1793
1794
1795
1796
1797
1798 va = skb_frag_address(frag);
1799
1800
1801
1802
1803
1804 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1805
1806
1807 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1808
1809
1810 skb_frag_size_sub(frag, pull_len);
1811 skb_frag_off_add(frag, pull_len);
1812 skb->data_len -= pull_len;
1813 skb->tail += pull_len;
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1827 struct sk_buff *skb)
1828{
1829 if (ring_uses_build_skb(rx_ring)) {
1830 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1831
1832 dma_sync_single_range_for_cpu(rx_ring->dev,
1833 IXGBE_CB(skb)->dma,
1834 offset,
1835 skb_headlen(skb),
1836 DMA_FROM_DEVICE);
1837 } else {
1838 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1839
1840 dma_sync_single_range_for_cpu(rx_ring->dev,
1841 IXGBE_CB(skb)->dma,
1842 skb_frag_off(frag),
1843 skb_frag_size(frag),
1844 DMA_FROM_DEVICE);
1845 }
1846
1847
1848 if (unlikely(IXGBE_CB(skb)->page_released)) {
1849 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1850 ixgbe_rx_pg_size(rx_ring),
1851 DMA_FROM_DEVICE,
1852 IXGBE_RX_DMA_ATTR);
1853 }
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1879 union ixgbe_adv_rx_desc *rx_desc,
1880 struct sk_buff *skb)
1881{
1882 struct net_device *netdev = rx_ring->netdev;
1883
1884
1885 if (IS_ERR(skb))
1886 return true;
1887
1888
1889
1890
1891 if (!netdev ||
1892 (unlikely(ixgbe_test_staterr(rx_desc,
1893 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1894 !(netdev->features & NETIF_F_RXALL)))) {
1895 dev_kfree_skb_any(skb);
1896 return true;
1897 }
1898
1899
1900 if (!skb_headlen(skb))
1901 ixgbe_pull_tail(rx_ring, skb);
1902
1903#ifdef IXGBE_FCOE
1904
1905 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1906 return false;
1907
1908#endif
1909
1910 if (eth_skb_pad(skb))
1911 return true;
1912
1913 return false;
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1924 struct ixgbe_rx_buffer *old_buff)
1925{
1926 struct ixgbe_rx_buffer *new_buff;
1927 u16 nta = rx_ring->next_to_alloc;
1928
1929 new_buff = &rx_ring->rx_buffer_info[nta];
1930
1931
1932 nta++;
1933 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1934
1935
1936
1937
1938
1939 new_buff->dma = old_buff->dma;
1940 new_buff->page = old_buff->page;
1941 new_buff->page_offset = old_buff->page_offset;
1942 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1943}
1944
1945static inline bool ixgbe_page_is_reserved(struct page *page)
1946{
1947 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1948}
1949
1950static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
1951{
1952 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1953 struct page *page = rx_buffer->page;
1954
1955
1956 if (unlikely(ixgbe_page_is_reserved(page)))
1957 return false;
1958
1959#if (PAGE_SIZE < 8192)
1960
1961 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1962 return false;
1963#else
1964
1965
1966
1967
1968
1969#define IXGBE_LAST_OFFSET \
1970 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1971 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1972 return false;
1973#endif
1974
1975
1976
1977
1978
1979 if (unlikely(pagecnt_bias == 1)) {
1980 page_ref_add(page, USHRT_MAX - 1);
1981 rx_buffer->pagecnt_bias = USHRT_MAX;
1982 }
1983
1984 return true;
1985}
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2003 struct ixgbe_rx_buffer *rx_buffer,
2004 struct sk_buff *skb,
2005 unsigned int size)
2006{
2007#if (PAGE_SIZE < 8192)
2008 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2009#else
2010 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2011 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2012 SKB_DATA_ALIGN(size);
2013#endif
2014 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2015 rx_buffer->page_offset, size, truesize);
2016#if (PAGE_SIZE < 8192)
2017 rx_buffer->page_offset ^= truesize;
2018#else
2019 rx_buffer->page_offset += truesize;
2020#endif
2021}
2022
2023static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2024 union ixgbe_adv_rx_desc *rx_desc,
2025 struct sk_buff **skb,
2026 const unsigned int size)
2027{
2028 struct ixgbe_rx_buffer *rx_buffer;
2029
2030 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2031 prefetchw(rx_buffer->page);
2032 *skb = rx_buffer->skb;
2033
2034
2035
2036
2037
2038 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2039 if (!*skb)
2040 goto skip_sync;
2041 } else {
2042 if (*skb)
2043 ixgbe_dma_sync_frag(rx_ring, *skb);
2044 }
2045
2046
2047 dma_sync_single_range_for_cpu(rx_ring->dev,
2048 rx_buffer->dma,
2049 rx_buffer->page_offset,
2050 size,
2051 DMA_FROM_DEVICE);
2052skip_sync:
2053 rx_buffer->pagecnt_bias--;
2054
2055 return rx_buffer;
2056}
2057
2058static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2059 struct ixgbe_rx_buffer *rx_buffer,
2060 struct sk_buff *skb)
2061{
2062 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2063
2064 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2065 } else {
2066 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2067
2068 IXGBE_CB(skb)->page_released = true;
2069 } else {
2070
2071 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2072 ixgbe_rx_pg_size(rx_ring),
2073 DMA_FROM_DEVICE,
2074 IXGBE_RX_DMA_ATTR);
2075 }
2076 __page_frag_cache_drain(rx_buffer->page,
2077 rx_buffer->pagecnt_bias);
2078 }
2079
2080
2081 rx_buffer->page = NULL;
2082 rx_buffer->skb = NULL;
2083}
2084
2085static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2086 struct ixgbe_rx_buffer *rx_buffer,
2087 struct xdp_buff *xdp,
2088 union ixgbe_adv_rx_desc *rx_desc)
2089{
2090 unsigned int size = xdp->data_end - xdp->data;
2091#if (PAGE_SIZE < 8192)
2092 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2093#else
2094 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2095 xdp->data_hard_start);
2096#endif
2097 struct sk_buff *skb;
2098
2099
2100 prefetch(xdp->data);
2101#if L1_CACHE_BYTES < 128
2102 prefetch(xdp->data + L1_CACHE_BYTES);
2103#endif
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2122 if (unlikely(!skb))
2123 return NULL;
2124
2125 if (size > IXGBE_RX_HDR_SIZE) {
2126 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2127 IXGBE_CB(skb)->dma = rx_buffer->dma;
2128
2129 skb_add_rx_frag(skb, 0, rx_buffer->page,
2130 xdp->data - page_address(rx_buffer->page),
2131 size, truesize);
2132#if (PAGE_SIZE < 8192)
2133 rx_buffer->page_offset ^= truesize;
2134#else
2135 rx_buffer->page_offset += truesize;
2136#endif
2137 } else {
2138 memcpy(__skb_put(skb, size),
2139 xdp->data, ALIGN(size, sizeof(long)));
2140 rx_buffer->pagecnt_bias++;
2141 }
2142
2143 return skb;
2144}
2145
2146static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2147 struct ixgbe_rx_buffer *rx_buffer,
2148 struct xdp_buff *xdp,
2149 union ixgbe_adv_rx_desc *rx_desc)
2150{
2151 unsigned int metasize = xdp->data - xdp->data_meta;
2152#if (PAGE_SIZE < 8192)
2153 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2154#else
2155 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2156 SKB_DATA_ALIGN(xdp->data_end -
2157 xdp->data_hard_start);
2158#endif
2159 struct sk_buff *skb;
2160
2161
2162
2163
2164
2165
2166 prefetch(xdp->data_meta);
2167#if L1_CACHE_BYTES < 128
2168 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2169#endif
2170
2171
2172 skb = build_skb(xdp->data_hard_start, truesize);
2173 if (unlikely(!skb))
2174 return NULL;
2175
2176
2177 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2178 __skb_put(skb, xdp->data_end - xdp->data);
2179 if (metasize)
2180 skb_metadata_set(skb, metasize);
2181
2182
2183 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2184 IXGBE_CB(skb)->dma = rx_buffer->dma;
2185
2186
2187#if (PAGE_SIZE < 8192)
2188 rx_buffer->page_offset ^= truesize;
2189#else
2190 rx_buffer->page_offset += truesize;
2191#endif
2192
2193 return skb;
2194}
2195
2196static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2197 struct ixgbe_ring *rx_ring,
2198 struct xdp_buff *xdp)
2199{
2200 int err, result = IXGBE_XDP_PASS;
2201 struct bpf_prog *xdp_prog;
2202 struct xdp_frame *xdpf;
2203 u32 act;
2204
2205 rcu_read_lock();
2206 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2207
2208 if (!xdp_prog)
2209 goto xdp_out;
2210
2211 prefetchw(xdp->data_hard_start);
2212
2213 act = bpf_prog_run_xdp(xdp_prog, xdp);
2214 switch (act) {
2215 case XDP_PASS:
2216 break;
2217 case XDP_TX:
2218 xdpf = convert_to_xdp_frame(xdp);
2219 if (unlikely(!xdpf)) {
2220 result = IXGBE_XDP_CONSUMED;
2221 break;
2222 }
2223 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2224 break;
2225 case XDP_REDIRECT:
2226 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2227 if (!err)
2228 result = IXGBE_XDP_REDIR;
2229 else
2230 result = IXGBE_XDP_CONSUMED;
2231 break;
2232 default:
2233 bpf_warn_invalid_xdp_action(act);
2234
2235 case XDP_ABORTED:
2236 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2237
2238 case XDP_DROP:
2239 result = IXGBE_XDP_CONSUMED;
2240 break;
2241 }
2242xdp_out:
2243 rcu_read_unlock();
2244 return ERR_PTR(-result);
2245}
2246
2247static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2248 struct ixgbe_rx_buffer *rx_buffer,
2249 unsigned int size)
2250{
2251#if (PAGE_SIZE < 8192)
2252 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2253
2254 rx_buffer->page_offset ^= truesize;
2255#else
2256 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2257 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2258 SKB_DATA_ALIGN(size);
2259
2260 rx_buffer->page_offset += truesize;
2261#endif
2262}
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2278 struct ixgbe_ring *rx_ring,
2279 const int budget)
2280{
2281 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2282 struct ixgbe_adapter *adapter = q_vector->adapter;
2283#ifdef IXGBE_FCOE
2284 int ddp_bytes;
2285 unsigned int mss = 0;
2286#endif
2287 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2288 unsigned int xdp_xmit = 0;
2289 struct xdp_buff xdp;
2290
2291 xdp.rxq = &rx_ring->xdp_rxq;
2292
2293 while (likely(total_rx_packets < budget)) {
2294 union ixgbe_adv_rx_desc *rx_desc;
2295 struct ixgbe_rx_buffer *rx_buffer;
2296 struct sk_buff *skb;
2297 unsigned int size;
2298
2299
2300 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2301 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2302 cleaned_count = 0;
2303 }
2304
2305 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2306 size = le16_to_cpu(rx_desc->wb.upper.length);
2307 if (!size)
2308 break;
2309
2310
2311
2312
2313
2314 dma_rmb();
2315
2316 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2317
2318
2319 if (!skb) {
2320 xdp.data = page_address(rx_buffer->page) +
2321 rx_buffer->page_offset;
2322 xdp.data_meta = xdp.data;
2323 xdp.data_hard_start = xdp.data -
2324 ixgbe_rx_offset(rx_ring);
2325 xdp.data_end = xdp.data + size;
2326
2327 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2328 }
2329
2330 if (IS_ERR(skb)) {
2331 unsigned int xdp_res = -PTR_ERR(skb);
2332
2333 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2334 xdp_xmit |= xdp_res;
2335 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2336 } else {
2337 rx_buffer->pagecnt_bias++;
2338 }
2339 total_rx_packets++;
2340 total_rx_bytes += size;
2341 } else if (skb) {
2342 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2343 } else if (ring_uses_build_skb(rx_ring)) {
2344 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2345 &xdp, rx_desc);
2346 } else {
2347 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2348 &xdp, rx_desc);
2349 }
2350
2351
2352 if (!skb) {
2353 rx_ring->rx_stats.alloc_rx_buff_failed++;
2354 rx_buffer->pagecnt_bias++;
2355 break;
2356 }
2357
2358 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2359 cleaned_count++;
2360
2361
2362 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2363 continue;
2364
2365
2366 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2367 continue;
2368
2369
2370 total_rx_bytes += skb->len;
2371
2372
2373 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2374
2375#ifdef IXGBE_FCOE
2376
2377 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2378 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2379
2380 if (ddp_bytes > 0) {
2381 if (!mss) {
2382 mss = rx_ring->netdev->mtu -
2383 sizeof(struct fcoe_hdr) -
2384 sizeof(struct fc_frame_header) -
2385 sizeof(struct fcoe_crc_eof);
2386 if (mss > 512)
2387 mss &= ~511;
2388 }
2389 total_rx_bytes += ddp_bytes;
2390 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2391 mss);
2392 }
2393 if (!ddp_bytes) {
2394 dev_kfree_skb_any(skb);
2395 continue;
2396 }
2397 }
2398
2399#endif
2400 ixgbe_rx_skb(q_vector, skb);
2401
2402
2403 total_rx_packets++;
2404 }
2405
2406 if (xdp_xmit & IXGBE_XDP_REDIR)
2407 xdp_do_flush_map();
2408
2409 if (xdp_xmit & IXGBE_XDP_TX) {
2410 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2411
2412
2413
2414
2415 wmb();
2416 writel(ring->next_to_use, ring->tail);
2417 }
2418
2419 u64_stats_update_begin(&rx_ring->syncp);
2420 rx_ring->stats.packets += total_rx_packets;
2421 rx_ring->stats.bytes += total_rx_bytes;
2422 u64_stats_update_end(&rx_ring->syncp);
2423 q_vector->rx.total_packets += total_rx_packets;
2424 q_vector->rx.total_bytes += total_rx_bytes;
2425
2426 return total_rx_packets;
2427}
2428
2429
2430
2431
2432
2433
2434
2435
2436static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2437{
2438 struct ixgbe_q_vector *q_vector;
2439 int v_idx;
2440 u32 mask;
2441
2442
2443 if (adapter->num_vfs > 32) {
2444 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2445 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2446 }
2447
2448
2449
2450
2451
2452 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2453 struct ixgbe_ring *ring;
2454 q_vector = adapter->q_vector[v_idx];
2455
2456 ixgbe_for_each_ring(ring, q_vector->rx)
2457 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2458
2459 ixgbe_for_each_ring(ring, q_vector->tx)
2460 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2461
2462 ixgbe_write_eitr(q_vector);
2463 }
2464
2465 switch (adapter->hw.mac.type) {
2466 case ixgbe_mac_82598EB:
2467 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2468 v_idx);
2469 break;
2470 case ixgbe_mac_82599EB:
2471 case ixgbe_mac_X540:
2472 case ixgbe_mac_X550:
2473 case ixgbe_mac_X550EM_x:
2474 case ixgbe_mac_x550em_a:
2475 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2476 break;
2477 default:
2478 break;
2479 }
2480 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2481
2482
2483 mask = IXGBE_EIMS_ENABLE_MASK;
2484 mask &= ~(IXGBE_EIMS_OTHER |
2485 IXGBE_EIMS_MAILBOX |
2486 IXGBE_EIMS_LSC);
2487
2488 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2489}
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2505 struct ixgbe_ring_container *ring_container)
2506{
2507 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2508 IXGBE_ITR_ADAPTIVE_LATENCY;
2509 unsigned int avg_wire_size, packets, bytes;
2510 unsigned long next_update = jiffies;
2511
2512
2513
2514
2515 if (!ring_container->ring)
2516 return;
2517
2518
2519
2520
2521
2522
2523 if (time_after(next_update, ring_container->next_update))
2524 goto clear_counts;
2525
2526 packets = ring_container->total_packets;
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536 if (!packets) {
2537 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2538 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2539 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2540 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2541 goto clear_counts;
2542 }
2543
2544 bytes = ring_container->total_bytes;
2545
2546
2547
2548
2549
2550 if (packets < 4 && bytes < 9000) {
2551 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2552 goto adjust_by_size;
2553 }
2554
2555
2556
2557
2558
2559 if (packets < 48) {
2560 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2561 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2562 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2563 goto clear_counts;
2564 }
2565
2566
2567
2568
2569 if (packets < 96) {
2570 itr = q_vector->itr >> 2;
2571 goto clear_counts;
2572 }
2573
2574
2575
2576
2577
2578 if (packets < 256) {
2579 itr = q_vector->itr >> 3;
2580 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2581 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2582 goto clear_counts;
2583 }
2584
2585
2586
2587
2588
2589
2590
2591 itr = IXGBE_ITR_ADAPTIVE_BULK;
2592
2593adjust_by_size:
2594
2595
2596
2597
2598
2599 avg_wire_size = bytes / packets;
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 if (avg_wire_size <= 60) {
2617
2618 avg_wire_size = 5120;
2619 } else if (avg_wire_size <= 316) {
2620
2621 avg_wire_size *= 40;
2622 avg_wire_size += 2720;
2623 } else if (avg_wire_size <= 1084) {
2624
2625 avg_wire_size *= 15;
2626 avg_wire_size += 11452;
2627 } else if (avg_wire_size < 1968) {
2628
2629 avg_wire_size *= 5;
2630 avg_wire_size += 22420;
2631 } else {
2632
2633 avg_wire_size = 32256;
2634 }
2635
2636
2637
2638
2639 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2640 avg_wire_size >>= 1;
2641
2642
2643
2644
2645
2646
2647
2648
2649 switch (q_vector->adapter->link_speed) {
2650 case IXGBE_LINK_SPEED_10GB_FULL:
2651 case IXGBE_LINK_SPEED_100_FULL:
2652 default:
2653 itr += DIV_ROUND_UP(avg_wire_size,
2654 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2655 IXGBE_ITR_ADAPTIVE_MIN_INC;
2656 break;
2657 case IXGBE_LINK_SPEED_2_5GB_FULL:
2658 case IXGBE_LINK_SPEED_1GB_FULL:
2659 case IXGBE_LINK_SPEED_10_FULL:
2660 if (avg_wire_size > 8064)
2661 avg_wire_size = 8064;
2662 itr += DIV_ROUND_UP(avg_wire_size,
2663 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2664 IXGBE_ITR_ADAPTIVE_MIN_INC;
2665 break;
2666 }
2667
2668clear_counts:
2669
2670 ring_container->itr = itr;
2671
2672
2673 ring_container->next_update = next_update + 1;
2674
2675 ring_container->total_bytes = 0;
2676 ring_container->total_packets = 0;
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2688{
2689 struct ixgbe_adapter *adapter = q_vector->adapter;
2690 struct ixgbe_hw *hw = &adapter->hw;
2691 int v_idx = q_vector->v_idx;
2692 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2693
2694 switch (adapter->hw.mac.type) {
2695 case ixgbe_mac_82598EB:
2696
2697 itr_reg |= (itr_reg << 16);
2698 break;
2699 case ixgbe_mac_82599EB:
2700 case ixgbe_mac_X540:
2701 case ixgbe_mac_X550:
2702 case ixgbe_mac_X550EM_x:
2703 case ixgbe_mac_x550em_a:
2704
2705
2706
2707
2708 itr_reg |= IXGBE_EITR_CNT_WDIS;
2709 break;
2710 default:
2711 break;
2712 }
2713 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2714}
2715
2716static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2717{
2718 u32 new_itr;
2719
2720 ixgbe_update_itr(q_vector, &q_vector->tx);
2721 ixgbe_update_itr(q_vector, &q_vector->rx);
2722
2723
2724 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2725
2726
2727 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2728 new_itr <<= 2;
2729
2730 if (new_itr != q_vector->itr) {
2731
2732 q_vector->itr = new_itr;
2733
2734 ixgbe_write_eitr(q_vector);
2735 }
2736}
2737
2738
2739
2740
2741
2742static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2743{
2744 struct ixgbe_hw *hw = &adapter->hw;
2745 u32 eicr = adapter->interrupt_event;
2746 s32 rc;
2747
2748 if (test_bit(__IXGBE_DOWN, &adapter->state))
2749 return;
2750
2751 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2752 return;
2753
2754 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2755
2756 switch (hw->device_id) {
2757 case IXGBE_DEV_ID_82599_T3_LOM:
2758
2759
2760
2761
2762
2763
2764
2765 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2766 !(eicr & IXGBE_EICR_LSC))
2767 return;
2768
2769 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2770 u32 speed;
2771 bool link_up = false;
2772
2773 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2774
2775 if (link_up)
2776 return;
2777 }
2778
2779
2780 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2781 return;
2782
2783 break;
2784 case IXGBE_DEV_ID_X550EM_A_1G_T:
2785 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2786 rc = hw->phy.ops.check_overtemp(hw);
2787 if (rc != IXGBE_ERR_OVERTEMP)
2788 return;
2789 break;
2790 default:
2791 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2792 return;
2793 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2794 return;
2795 break;
2796 }
2797 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2798
2799 adapter->interrupt_event = 0;
2800}
2801
2802static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2803{
2804 struct ixgbe_hw *hw = &adapter->hw;
2805
2806 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2807 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2808 e_crit(probe, "Fan has stopped, replace the adapter\n");
2809
2810 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2811 }
2812}
2813
2814static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2815{
2816 struct ixgbe_hw *hw = &adapter->hw;
2817
2818 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2819 return;
2820
2821 switch (adapter->hw.mac.type) {
2822 case ixgbe_mac_82599EB:
2823
2824
2825
2826
2827 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2828 (eicr & IXGBE_EICR_LSC)) &&
2829 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2830 adapter->interrupt_event = eicr;
2831 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2832 ixgbe_service_event_schedule(adapter);
2833 return;
2834 }
2835 return;
2836 case ixgbe_mac_x550em_a:
2837 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2838 adapter->interrupt_event = eicr;
2839 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2840 ixgbe_service_event_schedule(adapter);
2841 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2842 IXGBE_EICR_GPI_SDP0_X550EM_a);
2843 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2844 IXGBE_EICR_GPI_SDP0_X550EM_a);
2845 }
2846 return;
2847 case ixgbe_mac_X550:
2848 case ixgbe_mac_X540:
2849 if (!(eicr & IXGBE_EICR_TS))
2850 return;
2851 break;
2852 default:
2853 return;
2854 }
2855
2856 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2857}
2858
2859static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2860{
2861 switch (hw->mac.type) {
2862 case ixgbe_mac_82598EB:
2863 if (hw->phy.type == ixgbe_phy_nl)
2864 return true;
2865 return false;
2866 case ixgbe_mac_82599EB:
2867 case ixgbe_mac_X550EM_x:
2868 case ixgbe_mac_x550em_a:
2869 switch (hw->mac.ops.get_media_type(hw)) {
2870 case ixgbe_media_type_fiber:
2871 case ixgbe_media_type_fiber_qsfp:
2872 return true;
2873 default:
2874 return false;
2875 }
2876 default:
2877 return false;
2878 }
2879}
2880
2881static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2882{
2883 struct ixgbe_hw *hw = &adapter->hw;
2884 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2885
2886 if (!ixgbe_is_sfp(hw))
2887 return;
2888
2889
2890 if (hw->mac.type >= ixgbe_mac_X540)
2891 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2892
2893 if (eicr & eicr_mask) {
2894
2895 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2896 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2897 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2898 adapter->sfp_poll_time = 0;
2899 ixgbe_service_event_schedule(adapter);
2900 }
2901 }
2902
2903 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2904 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2905
2906 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2907 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2908 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2909 ixgbe_service_event_schedule(adapter);
2910 }
2911 }
2912}
2913
2914static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2915{
2916 struct ixgbe_hw *hw = &adapter->hw;
2917
2918 adapter->lsc_int++;
2919 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2920 adapter->link_check_timeout = jiffies;
2921 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2922 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2923 IXGBE_WRITE_FLUSH(hw);
2924 ixgbe_service_event_schedule(adapter);
2925 }
2926}
2927
2928static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2929 u64 qmask)
2930{
2931 u32 mask;
2932 struct ixgbe_hw *hw = &adapter->hw;
2933
2934 switch (hw->mac.type) {
2935 case ixgbe_mac_82598EB:
2936 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2937 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2938 break;
2939 case ixgbe_mac_82599EB:
2940 case ixgbe_mac_X540:
2941 case ixgbe_mac_X550:
2942 case ixgbe_mac_X550EM_x:
2943 case ixgbe_mac_x550em_a:
2944 mask = (qmask & 0xFFFFFFFF);
2945 if (mask)
2946 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2947 mask = (qmask >> 32);
2948 if (mask)
2949 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2950 break;
2951 default:
2952 break;
2953 }
2954
2955}
2956
2957static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2958 u64 qmask)
2959{
2960 u32 mask;
2961 struct ixgbe_hw *hw = &adapter->hw;
2962
2963 switch (hw->mac.type) {
2964 case ixgbe_mac_82598EB:
2965 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2966 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2967 break;
2968 case ixgbe_mac_82599EB:
2969 case ixgbe_mac_X540:
2970 case ixgbe_mac_X550:
2971 case ixgbe_mac_X550EM_x:
2972 case ixgbe_mac_x550em_a:
2973 mask = (qmask & 0xFFFFFFFF);
2974 if (mask)
2975 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2976 mask = (qmask >> 32);
2977 if (mask)
2978 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2979 break;
2980 default:
2981 break;
2982 }
2983
2984}
2985
2986
2987
2988
2989
2990
2991
2992static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2993 bool flush)
2994{
2995 struct ixgbe_hw *hw = &adapter->hw;
2996 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2997
2998
2999 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
3000 mask &= ~IXGBE_EIMS_LSC;
3001
3002 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3003 switch (adapter->hw.mac.type) {
3004 case ixgbe_mac_82599EB:
3005 mask |= IXGBE_EIMS_GPI_SDP0(hw);
3006 break;
3007 case ixgbe_mac_X540:
3008 case ixgbe_mac_X550:
3009 case ixgbe_mac_X550EM_x:
3010 case ixgbe_mac_x550em_a:
3011 mask |= IXGBE_EIMS_TS;
3012 break;
3013 default:
3014 break;
3015 }
3016 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3017 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3018 switch (adapter->hw.mac.type) {
3019 case ixgbe_mac_82599EB:
3020 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3021 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3022
3023 case ixgbe_mac_X540:
3024 case ixgbe_mac_X550:
3025 case ixgbe_mac_X550EM_x:
3026 case ixgbe_mac_x550em_a:
3027 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3028 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3029 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3030 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3031 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3032 mask |= IXGBE_EICR_GPI_SDP0_X540;
3033 mask |= IXGBE_EIMS_ECC;
3034 mask |= IXGBE_EIMS_MAILBOX;
3035 break;
3036 default:
3037 break;
3038 }
3039
3040 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3041 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3042 mask |= IXGBE_EIMS_FLOW_DIR;
3043
3044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3045 if (queues)
3046 ixgbe_irq_enable_queues(adapter, ~0);
3047 if (flush)
3048 IXGBE_WRITE_FLUSH(&adapter->hw);
3049}
3050
3051static irqreturn_t ixgbe_msix_other(int irq, void *data)
3052{
3053 struct ixgbe_adapter *adapter = data;
3054 struct ixgbe_hw *hw = &adapter->hw;
3055 u32 eicr;
3056
3057
3058
3059
3060
3061
3062
3063 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3064
3065
3066
3067
3068
3069
3070
3071
3072 eicr &= 0xFFFF0000;
3073
3074 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3075
3076 if (eicr & IXGBE_EICR_LSC)
3077 ixgbe_check_lsc(adapter);
3078
3079 if (eicr & IXGBE_EICR_MAILBOX)
3080 ixgbe_msg_task(adapter);
3081
3082 switch (hw->mac.type) {
3083 case ixgbe_mac_82599EB:
3084 case ixgbe_mac_X540:
3085 case ixgbe_mac_X550:
3086 case ixgbe_mac_X550EM_x:
3087 case ixgbe_mac_x550em_a:
3088 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3089 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3090 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3091 ixgbe_service_event_schedule(adapter);
3092 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3093 IXGBE_EICR_GPI_SDP0_X540);
3094 }
3095 if (eicr & IXGBE_EICR_ECC) {
3096 e_info(link, "Received ECC Err, initiating reset\n");
3097 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3098 ixgbe_service_event_schedule(adapter);
3099 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3100 }
3101
3102 if (eicr & IXGBE_EICR_FLOW_DIR) {
3103 int reinit_count = 0;
3104 int i;
3105 for (i = 0; i < adapter->num_tx_queues; i++) {
3106 struct ixgbe_ring *ring = adapter->tx_ring[i];
3107 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3108 &ring->state))
3109 reinit_count++;
3110 }
3111 if (reinit_count) {
3112
3113 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3114 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3115 ixgbe_service_event_schedule(adapter);
3116 }
3117 }
3118 ixgbe_check_sfp_event(adapter, eicr);
3119 ixgbe_check_overtemp_event(adapter, eicr);
3120 break;
3121 default:
3122 break;
3123 }
3124
3125 ixgbe_check_fan_failure(adapter, eicr);
3126
3127 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3128 ixgbe_ptp_check_pps_event(adapter);
3129
3130
3131 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3132 ixgbe_irq_enable(adapter, false, false);
3133
3134 return IRQ_HANDLED;
3135}
3136
3137static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3138{
3139 struct ixgbe_q_vector *q_vector = data;
3140
3141
3142
3143 if (q_vector->rx.ring || q_vector->tx.ring)
3144 napi_schedule_irqoff(&q_vector->napi);
3145
3146 return IRQ_HANDLED;
3147}
3148
3149
3150
3151
3152
3153
3154
3155
3156int ixgbe_poll(struct napi_struct *napi, int budget)
3157{
3158 struct ixgbe_q_vector *q_vector =
3159 container_of(napi, struct ixgbe_q_vector, napi);
3160 struct ixgbe_adapter *adapter = q_vector->adapter;
3161 struct ixgbe_ring *ring;
3162 int per_ring_budget, work_done = 0;
3163 bool clean_complete = true;
3164
3165#ifdef CONFIG_IXGBE_DCA
3166 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3167 ixgbe_update_dca(q_vector);
3168#endif
3169
3170 ixgbe_for_each_ring(ring, q_vector->tx) {
3171 bool wd = ring->xsk_umem ?
3172 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3173 ixgbe_clean_tx_irq(q_vector, ring, budget);
3174
3175 if (!wd)
3176 clean_complete = false;
3177 }
3178
3179
3180 if (budget <= 0)
3181 return budget;
3182
3183
3184
3185 if (q_vector->rx.count > 1)
3186 per_ring_budget = max(budget/q_vector->rx.count, 1);
3187 else
3188 per_ring_budget = budget;
3189
3190 ixgbe_for_each_ring(ring, q_vector->rx) {
3191 int cleaned = ring->xsk_umem ?
3192 ixgbe_clean_rx_irq_zc(q_vector, ring,
3193 per_ring_budget) :
3194 ixgbe_clean_rx_irq(q_vector, ring,
3195 per_ring_budget);
3196
3197 work_done += cleaned;
3198 if (cleaned >= per_ring_budget)
3199 clean_complete = false;
3200 }
3201
3202
3203 if (!clean_complete)
3204 return budget;
3205
3206
3207 if (likely(napi_complete_done(napi, work_done))) {
3208 if (adapter->rx_itr_setting & 1)
3209 ixgbe_set_itr(q_vector);
3210 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3211 ixgbe_irq_enable_queues(adapter,
3212 BIT_ULL(q_vector->v_idx));
3213 }
3214
3215 return min(work_done, budget - 1);
3216}
3217
3218
3219
3220
3221
3222
3223
3224
3225static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3226{
3227 struct net_device *netdev = adapter->netdev;
3228 unsigned int ri = 0, ti = 0;
3229 int vector, err;
3230
3231 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3232 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3233 struct msix_entry *entry = &adapter->msix_entries[vector];
3234
3235 if (q_vector->tx.ring && q_vector->rx.ring) {
3236 snprintf(q_vector->name, sizeof(q_vector->name),
3237 "%s-TxRx-%u", netdev->name, ri++);
3238 ti++;
3239 } else if (q_vector->rx.ring) {
3240 snprintf(q_vector->name, sizeof(q_vector->name),
3241 "%s-rx-%u", netdev->name, ri++);
3242 } else if (q_vector->tx.ring) {
3243 snprintf(q_vector->name, sizeof(q_vector->name),
3244 "%s-tx-%u", netdev->name, ti++);
3245 } else {
3246
3247 continue;
3248 }
3249 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3250 q_vector->name, q_vector);
3251 if (err) {
3252 e_err(probe, "request_irq failed for MSIX interrupt "
3253 "Error: %d\n", err);
3254 goto free_queue_irqs;
3255 }
3256
3257 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3258
3259 irq_set_affinity_hint(entry->vector,
3260 &q_vector->affinity_mask);
3261 }
3262 }
3263
3264 err = request_irq(adapter->msix_entries[vector].vector,
3265 ixgbe_msix_other, 0, netdev->name, adapter);
3266 if (err) {
3267 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3268 goto free_queue_irqs;
3269 }
3270
3271 return 0;
3272
3273free_queue_irqs:
3274 while (vector) {
3275 vector--;
3276 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3277 NULL);
3278 free_irq(adapter->msix_entries[vector].vector,
3279 adapter->q_vector[vector]);
3280 }
3281 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3282 pci_disable_msix(adapter->pdev);
3283 kfree(adapter->msix_entries);
3284 adapter->msix_entries = NULL;
3285 return err;
3286}
3287
3288
3289
3290
3291
3292
3293static irqreturn_t ixgbe_intr(int irq, void *data)
3294{
3295 struct ixgbe_adapter *adapter = data;
3296 struct ixgbe_hw *hw = &adapter->hw;
3297 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3298 u32 eicr;
3299
3300
3301
3302
3303
3304 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3305
3306
3307
3308 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3309 if (!eicr) {
3310
3311
3312
3313
3314
3315
3316
3317 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3318 ixgbe_irq_enable(adapter, true, true);
3319 return IRQ_NONE;
3320 }
3321
3322 if (eicr & IXGBE_EICR_LSC)
3323 ixgbe_check_lsc(adapter);
3324
3325 switch (hw->mac.type) {
3326 case ixgbe_mac_82599EB:
3327 ixgbe_check_sfp_event(adapter, eicr);
3328
3329 case ixgbe_mac_X540:
3330 case ixgbe_mac_X550:
3331 case ixgbe_mac_X550EM_x:
3332 case ixgbe_mac_x550em_a:
3333 if (eicr & IXGBE_EICR_ECC) {
3334 e_info(link, "Received ECC Err, initiating reset\n");
3335 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3336 ixgbe_service_event_schedule(adapter);
3337 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3338 }
3339 ixgbe_check_overtemp_event(adapter, eicr);
3340 break;
3341 default:
3342 break;
3343 }
3344
3345 ixgbe_check_fan_failure(adapter, eicr);
3346 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3347 ixgbe_ptp_check_pps_event(adapter);
3348
3349
3350 napi_schedule_irqoff(&q_vector->napi);
3351
3352
3353
3354
3355
3356 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3357 ixgbe_irq_enable(adapter, false, false);
3358
3359 return IRQ_HANDLED;
3360}
3361
3362
3363
3364
3365
3366
3367
3368
3369static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3370{
3371 struct net_device *netdev = adapter->netdev;
3372 int err;
3373
3374 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3375 err = ixgbe_request_msix_irqs(adapter);
3376 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3377 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3378 netdev->name, adapter);
3379 else
3380 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3381 netdev->name, adapter);
3382
3383 if (err)
3384 e_err(probe, "request_irq failed, Error %d\n", err);
3385
3386 return err;
3387}
3388
3389static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3390{
3391 int vector;
3392
3393 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3394 free_irq(adapter->pdev->irq, adapter);
3395 return;
3396 }
3397
3398 if (!adapter->msix_entries)
3399 return;
3400
3401 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3402 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3403 struct msix_entry *entry = &adapter->msix_entries[vector];
3404
3405
3406 if (!q_vector->rx.ring && !q_vector->tx.ring)
3407 continue;
3408
3409
3410 irq_set_affinity_hint(entry->vector, NULL);
3411
3412 free_irq(entry->vector, q_vector);
3413 }
3414
3415 free_irq(adapter->msix_entries[vector].vector, adapter);
3416}
3417
3418
3419
3420
3421
3422static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3423{
3424 switch (adapter->hw.mac.type) {
3425 case ixgbe_mac_82598EB:
3426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3427 break;
3428 case ixgbe_mac_82599EB:
3429 case ixgbe_mac_X540:
3430 case ixgbe_mac_X550:
3431 case ixgbe_mac_X550EM_x:
3432 case ixgbe_mac_x550em_a:
3433 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3434 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3436 break;
3437 default:
3438 break;
3439 }
3440 IXGBE_WRITE_FLUSH(&adapter->hw);
3441 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3442 int vector;
3443
3444 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3445 synchronize_irq(adapter->msix_entries[vector].vector);
3446
3447 synchronize_irq(adapter->msix_entries[vector++].vector);
3448 } else {
3449 synchronize_irq(adapter->pdev->irq);
3450 }
3451}
3452
3453
3454
3455
3456
3457
3458static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3459{
3460 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3461
3462 ixgbe_write_eitr(q_vector);
3463
3464 ixgbe_set_ivar(adapter, 0, 0, 0);
3465 ixgbe_set_ivar(adapter, 1, 0, 0);
3466
3467 e_info(hw, "Legacy interrupt IVAR setup done\n");
3468}
3469
3470
3471
3472
3473
3474
3475
3476
3477void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3478 struct ixgbe_ring *ring)
3479{
3480 struct ixgbe_hw *hw = &adapter->hw;
3481 u64 tdba = ring->dma;
3482 int wait_loop = 10;
3483 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3484 u8 reg_idx = ring->reg_idx;
3485
3486 ring->xsk_umem = NULL;
3487 if (ring_is_xdp(ring))
3488 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
3489
3490
3491 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3492 IXGBE_WRITE_FLUSH(hw);
3493
3494 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3495 (tdba & DMA_BIT_MASK(32)));
3496 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3497 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3498 ring->count * sizeof(union ixgbe_adv_tx_desc));
3499 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3500 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3501 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3514 txdctl |= 1u << 16;
3515 else
3516 txdctl |= 8u << 16;
3517
3518
3519
3520
3521
3522 txdctl |= (1u << 8) |
3523 32;
3524
3525
3526 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3527 ring->atr_sample_rate = adapter->atr_sample_rate;
3528 ring->atr_count = 0;
3529 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3530 } else {
3531 ring->atr_sample_rate = 0;
3532 }
3533
3534
3535 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3536 struct ixgbe_q_vector *q_vector = ring->q_vector;
3537
3538 if (q_vector)
3539 netif_set_xps_queue(ring->netdev,
3540 &q_vector->affinity_mask,
3541 ring->queue_index);
3542 }
3543
3544 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3545
3546
3547 memset(ring->tx_buffer_info, 0,
3548 sizeof(struct ixgbe_tx_buffer) * ring->count);
3549
3550
3551 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3552
3553
3554 if (hw->mac.type == ixgbe_mac_82598EB &&
3555 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3556 return;
3557
3558
3559 do {
3560 usleep_range(1000, 2000);
3561 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3562 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3563 if (!wait_loop)
3564 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3565}
3566
3567static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3568{
3569 struct ixgbe_hw *hw = &adapter->hw;
3570 u32 rttdcs, mtqc;
3571 u8 tcs = adapter->hw_tcs;
3572
3573 if (hw->mac.type == ixgbe_mac_82598EB)
3574 return;
3575
3576
3577 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3578 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3579 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3580
3581
3582 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3583 mtqc = IXGBE_MTQC_VT_ENA;
3584 if (tcs > 4)
3585 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3586 else if (tcs > 1)
3587 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3588 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3589 IXGBE_82599_VMDQ_4Q_MASK)
3590 mtqc |= IXGBE_MTQC_32VF;
3591 else
3592 mtqc |= IXGBE_MTQC_64VF;
3593 } else {
3594 if (tcs > 4) {
3595 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3596 } else if (tcs > 1) {
3597 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3598 } else {
3599 u8 max_txq = adapter->num_tx_queues +
3600 adapter->num_xdp_queues;
3601 if (max_txq > 63)
3602 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3603 else
3604 mtqc = IXGBE_MTQC_64Q_1PB;
3605 }
3606 }
3607
3608 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3609
3610
3611 if (tcs) {
3612 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3613 sectx |= IXGBE_SECTX_DCB;
3614 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3615 }
3616
3617
3618 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3619 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3620}
3621
3622
3623
3624
3625
3626
3627
3628static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3629{
3630 struct ixgbe_hw *hw = &adapter->hw;
3631 u32 dmatxctl;
3632 u32 i;
3633
3634 ixgbe_setup_mtqc(adapter);
3635
3636 if (hw->mac.type != ixgbe_mac_82598EB) {
3637
3638 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3639 dmatxctl |= IXGBE_DMATXCTL_TE;
3640 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3641 }
3642
3643
3644 for (i = 0; i < adapter->num_tx_queues; i++)
3645 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3646 for (i = 0; i < adapter->num_xdp_queues; i++)
3647 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3648}
3649
3650static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3651 struct ixgbe_ring *ring)
3652{
3653 struct ixgbe_hw *hw = &adapter->hw;
3654 u8 reg_idx = ring->reg_idx;
3655 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3656
3657 srrctl |= IXGBE_SRRCTL_DROP_EN;
3658
3659 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3660}
3661
3662static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3663 struct ixgbe_ring *ring)
3664{
3665 struct ixgbe_hw *hw = &adapter->hw;
3666 u8 reg_idx = ring->reg_idx;
3667 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3668
3669 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3670
3671 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3672}
3673
3674#ifdef CONFIG_IXGBE_DCB
3675void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3676#else
3677static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3678#endif
3679{
3680 int i;
3681 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3682
3683 if (adapter->ixgbe_ieee_pfc)
3684 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3696 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3697 for (i = 0; i < adapter->num_rx_queues; i++)
3698 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3699 } else {
3700 for (i = 0; i < adapter->num_rx_queues; i++)
3701 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3702 }
3703}
3704
3705#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3706
3707static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3708 struct ixgbe_ring *rx_ring)
3709{
3710 struct ixgbe_hw *hw = &adapter->hw;
3711 u32 srrctl;
3712 u8 reg_idx = rx_ring->reg_idx;
3713
3714 if (hw->mac.type == ixgbe_mac_82598EB) {
3715 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3716
3717
3718
3719
3720
3721 reg_idx &= mask;
3722 }
3723
3724
3725 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3726
3727
3728 if (rx_ring->xsk_umem) {
3729 u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
3730 XDP_PACKET_HEADROOM;
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740 if (hw->mac.type != ixgbe_mac_82599EB)
3741 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3742 else
3743 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3744 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3745 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3746 } else {
3747 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3748 }
3749
3750
3751 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3752
3753 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3754}
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3765{
3766 if (adapter->hw.mac.type < ixgbe_mac_X550)
3767 return 128;
3768 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3769 return 64;
3770 else
3771 return 512;
3772}
3773
3774
3775
3776
3777
3778
3779
3780void ixgbe_store_key(struct ixgbe_adapter *adapter)
3781{
3782 struct ixgbe_hw *hw = &adapter->hw;
3783 int i;
3784
3785 for (i = 0; i < 10; i++)
3786 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3787}
3788
3789
3790
3791
3792
3793
3794
3795static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3796{
3797 u32 *rss_key;
3798
3799 if (!adapter->rss_key) {
3800 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3801 if (unlikely(!rss_key))
3802 return -ENOMEM;
3803
3804 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3805 adapter->rss_key = rss_key;
3806 }
3807
3808 return 0;
3809}
3810
3811
3812
3813
3814
3815
3816
3817void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3818{
3819 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3820 struct ixgbe_hw *hw = &adapter->hw;
3821 u32 reta = 0;
3822 u32 indices_multi;
3823 u8 *indir_tbl = adapter->rss_indir_tbl;
3824
3825
3826
3827
3828
3829
3830
3831 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3832 indices_multi = 0x11;
3833 else
3834 indices_multi = 0x1;
3835
3836
3837 for (i = 0; i < reta_entries; i++) {
3838 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3839 if ((i & 3) == 3) {
3840 if (i < 128)
3841 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3842 else
3843 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3844 reta);
3845 reta = 0;
3846 }
3847 }
3848}
3849
3850
3851
3852
3853
3854
3855
3856static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3857{
3858 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3859 struct ixgbe_hw *hw = &adapter->hw;
3860 u32 vfreta = 0;
3861
3862
3863 for (i = 0; i < reta_entries; i++) {
3864 u16 pool = adapter->num_rx_pools;
3865
3866 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3867 if ((i & 3) != 3)
3868 continue;
3869
3870 while (pool--)
3871 IXGBE_WRITE_REG(hw,
3872 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3873 vfreta);
3874 vfreta = 0;
3875 }
3876}
3877
3878static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3879{
3880 u32 i, j;
3881 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3882 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3883
3884
3885
3886
3887
3888 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3889 rss_i = 4;
3890
3891
3892 ixgbe_store_key(adapter);
3893
3894
3895 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3896
3897 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3898 if (j == rss_i)
3899 j = 0;
3900
3901 adapter->rss_indir_tbl[i] = j;
3902 }
3903
3904 ixgbe_store_reta(adapter);
3905}
3906
3907static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3908{
3909 struct ixgbe_hw *hw = &adapter->hw;
3910 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3911 int i, j;
3912
3913
3914 for (i = 0; i < 10; i++) {
3915 u16 pool = adapter->num_rx_pools;
3916
3917 while (pool--)
3918 IXGBE_WRITE_REG(hw,
3919 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3920 *(adapter->rss_key + i));
3921 }
3922
3923
3924 for (i = 0, j = 0; i < 64; i++, j++) {
3925 if (j == rss_i)
3926 j = 0;
3927
3928 adapter->rss_indir_tbl[i] = j;
3929 }
3930
3931 ixgbe_store_vfreta(adapter);
3932}
3933
3934static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3935{
3936 struct ixgbe_hw *hw = &adapter->hw;
3937 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3938 u32 rxcsum;
3939
3940
3941 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3942 rxcsum |= IXGBE_RXCSUM_PCSD;
3943 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3944
3945 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3946 if (adapter->ring_feature[RING_F_RSS].mask)
3947 mrqc = IXGBE_MRQC_RSSEN;
3948 } else {
3949 u8 tcs = adapter->hw_tcs;
3950
3951 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3952 if (tcs > 4)
3953 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3954 else if (tcs > 1)
3955 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3956 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3957 IXGBE_82599_VMDQ_4Q_MASK)
3958 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3959 else
3960 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3961
3962
3963
3964
3965 if (hw->mac.type >= ixgbe_mac_X550)
3966 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3967 } else {
3968 if (tcs > 4)
3969 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3970 else if (tcs > 1)
3971 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3972 else
3973 mrqc = IXGBE_MRQC_RSSEN;
3974 }
3975 }
3976
3977
3978 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3979 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3980 IXGBE_MRQC_RSS_FIELD_IPV6 |
3981 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3982
3983 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3984 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3985 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3986 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3987
3988 if ((hw->mac.type >= ixgbe_mac_X550) &&
3989 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3990 u16 pool = adapter->num_rx_pools;
3991
3992
3993 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3994 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3995
3996
3997 ixgbe_setup_vfreta(adapter);
3998 vfmrqc = IXGBE_MRQC_RSSEN;
3999 vfmrqc |= rss_field;
4000
4001 while (pool--)
4002 IXGBE_WRITE_REG(hw,
4003 IXGBE_PFVFMRQC(VMDQ_P(pool)),
4004 vfmrqc);
4005 } else {
4006 ixgbe_setup_reta(adapter);
4007 mrqc |= rss_field;
4008 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4009 }
4010}
4011
4012
4013
4014
4015
4016
4017static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4018 struct ixgbe_ring *ring)
4019{
4020 struct ixgbe_hw *hw = &adapter->hw;
4021 u32 rscctrl;
4022 u8 reg_idx = ring->reg_idx;
4023
4024 if (!ring_is_rsc_enabled(ring))
4025 return;
4026
4027 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4028 rscctrl |= IXGBE_RSCCTL_RSCEN;
4029
4030
4031
4032
4033
4034 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4035 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4036}
4037
4038#define IXGBE_MAX_RX_DESC_POLL 10
4039static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4040 struct ixgbe_ring *ring)
4041{
4042 struct ixgbe_hw *hw = &adapter->hw;
4043 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4044 u32 rxdctl;
4045 u8 reg_idx = ring->reg_idx;
4046
4047 if (ixgbe_removed(hw->hw_addr))
4048 return;
4049
4050 if (hw->mac.type == ixgbe_mac_82598EB &&
4051 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4052 return;
4053
4054 do {
4055 usleep_range(1000, 2000);
4056 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4057 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4058
4059 if (!wait_loop) {
4060 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4061 "the polling period\n", reg_idx);
4062 }
4063}
4064
4065void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4066 struct ixgbe_ring *ring)
4067{
4068 struct ixgbe_hw *hw = &adapter->hw;
4069 union ixgbe_adv_rx_desc *rx_desc;
4070 u64 rdba = ring->dma;
4071 u32 rxdctl;
4072 u8 reg_idx = ring->reg_idx;
4073
4074 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4075 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
4076 if (ring->xsk_umem) {
4077 ring->zca.free = ixgbe_zca_free;
4078 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4079 MEM_TYPE_ZERO_COPY,
4080 &ring->zca));
4081
4082 } else {
4083 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4084 MEM_TYPE_PAGE_SHARED, NULL));
4085 }
4086
4087
4088 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4089 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4090
4091
4092 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4093 IXGBE_WRITE_FLUSH(hw);
4094
4095 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4096 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4097 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4098 ring->count * sizeof(union ixgbe_adv_rx_desc));
4099
4100 IXGBE_WRITE_FLUSH(hw);
4101
4102 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4103 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4104 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4105
4106 ixgbe_configure_srrctl(adapter, ring);
4107 ixgbe_configure_rscctl(adapter, ring);
4108
4109 if (hw->mac.type == ixgbe_mac_82598EB) {
4110
4111
4112
4113
4114
4115
4116
4117 rxdctl &= ~0x3FFFFF;
4118 rxdctl |= 0x080420;
4119#if (PAGE_SIZE < 8192)
4120
4121 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4122 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4123 IXGBE_RXDCTL_RLPML_EN);
4124
4125
4126
4127
4128
4129 if (ring_uses_build_skb(ring) &&
4130 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4131 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4132 IXGBE_RXDCTL_RLPML_EN;
4133#endif
4134 }
4135
4136 if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
4137 u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
4138 XDP_PACKET_HEADROOM;
4139
4140 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4141 IXGBE_RXDCTL_RLPML_EN);
4142 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4143
4144 ring->rx_buf_len = xsk_buf_len;
4145 }
4146
4147
4148 memset(ring->rx_buffer_info, 0,
4149 sizeof(struct ixgbe_rx_buffer) * ring->count);
4150
4151
4152 rx_desc = IXGBE_RX_DESC(ring, 0);
4153 rx_desc->wb.upper.length = 0;
4154
4155
4156 rxdctl |= IXGBE_RXDCTL_ENABLE;
4157 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4158
4159 ixgbe_rx_desc_queue_enable(adapter, ring);
4160 if (ring->xsk_umem)
4161 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4162 else
4163 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4164}
4165
4166static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4167{
4168 struct ixgbe_hw *hw = &adapter->hw;
4169 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4170 u16 pool = adapter->num_rx_pools;
4171
4172
4173 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4174 IXGBE_PSRTYPE_UDPHDR |
4175 IXGBE_PSRTYPE_IPV4HDR |
4176 IXGBE_PSRTYPE_L2HDR |
4177 IXGBE_PSRTYPE_IPV6HDR;
4178
4179 if (hw->mac.type == ixgbe_mac_82598EB)
4180 return;
4181
4182 if (rss_i > 3)
4183 psrtype |= 2u << 29;
4184 else if (rss_i > 1)
4185 psrtype |= 1u << 29;
4186
4187 while (pool--)
4188 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4189}
4190
4191static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4192{
4193 struct ixgbe_hw *hw = &adapter->hw;
4194 u16 pool = adapter->num_rx_pools;
4195 u32 reg_offset, vf_shift, vmolr;
4196 u32 gcr_ext, vmdctl;
4197 int i;
4198
4199 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4200 return;
4201
4202 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4203 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4204 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4205 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4206 vmdctl |= IXGBE_VT_CTL_REPLEN;
4207 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4208
4209
4210
4211
4212 vmolr = IXGBE_VMOLR_AUPE;
4213 while (pool--)
4214 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4215
4216 vf_shift = VMDQ_P(0) % 32;
4217 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4218
4219
4220 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4221 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4222 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4223 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4224 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4225 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4226
4227
4228 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4229
4230
4231 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4232
4233
4234
4235
4236
4237 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4238 case IXGBE_82599_VMDQ_8Q_MASK:
4239 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4240 break;
4241 case IXGBE_82599_VMDQ_4Q_MASK:
4242 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4243 break;
4244 default:
4245 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4246 break;
4247 }
4248
4249 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4250
4251 for (i = 0; i < adapter->num_vfs; i++) {
4252
4253 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4254 adapter->vfinfo[i].spoofchk_enabled);
4255
4256
4257 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4258 adapter->vfinfo[i].rss_query_enabled);
4259 }
4260}
4261
4262static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4263{
4264 struct ixgbe_hw *hw = &adapter->hw;
4265 struct net_device *netdev = adapter->netdev;
4266 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4267 struct ixgbe_ring *rx_ring;
4268 int i;
4269 u32 mhadd, hlreg0;
4270
4271#ifdef IXGBE_FCOE
4272
4273 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4274 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4275 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4276
4277#endif
4278
4279
4280 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4281 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4282
4283 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4284 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4285 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4286 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4287
4288 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4289 }
4290
4291 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4292
4293 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4294 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4295
4296
4297
4298
4299
4300 for (i = 0; i < adapter->num_rx_queues; i++) {
4301 rx_ring = adapter->rx_ring[i];
4302
4303 clear_ring_rsc_enabled(rx_ring);
4304 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4305 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4306
4307 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4308 set_ring_rsc_enabled(rx_ring);
4309
4310 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4311 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4312
4313 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4314 continue;
4315
4316 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4317
4318#if (PAGE_SIZE < 8192)
4319 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4320 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4321
4322 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4323 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4324 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4325#endif
4326 }
4327}
4328
4329static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4330{
4331 struct ixgbe_hw *hw = &adapter->hw;
4332 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4333
4334 switch (hw->mac.type) {
4335 case ixgbe_mac_82598EB:
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4347 break;
4348 case ixgbe_mac_X550:
4349 case ixgbe_mac_X550EM_x:
4350 case ixgbe_mac_x550em_a:
4351 if (adapter->num_vfs)
4352 rdrxctl |= IXGBE_RDRXCTL_PSP;
4353
4354 case ixgbe_mac_82599EB:
4355 case ixgbe_mac_X540:
4356
4357 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4358 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4359 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4360
4361 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4362 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4363 break;
4364 default:
4365
4366 return;
4367 }
4368
4369 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4370}
4371
4372
4373
4374
4375
4376
4377
4378static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4379{
4380 struct ixgbe_hw *hw = &adapter->hw;
4381 int i;
4382 u32 rxctrl, rfctl;
4383
4384
4385 hw->mac.ops.disable_rx(hw);
4386
4387 ixgbe_setup_psrtype(adapter);
4388 ixgbe_setup_rdrxctl(adapter);
4389
4390
4391 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4392 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4393 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4394 rfctl |= IXGBE_RFCTL_RSC_DIS;
4395
4396
4397 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4398 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4399
4400
4401 ixgbe_setup_mrqc(adapter);
4402
4403
4404 ixgbe_set_rx_buffer_len(adapter);
4405
4406
4407
4408
4409
4410 for (i = 0; i < adapter->num_rx_queues; i++)
4411 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4412
4413 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4414
4415 if (hw->mac.type == ixgbe_mac_82598EB)
4416 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4417
4418
4419 rxctrl |= IXGBE_RXCTRL_RXEN;
4420 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4421}
4422
4423static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4424 __be16 proto, u16 vid)
4425{
4426 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4427 struct ixgbe_hw *hw = &adapter->hw;
4428
4429
4430 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4431 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4432
4433 set_bit(vid, adapter->active_vlans);
4434
4435 return 0;
4436}
4437
4438static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4439{
4440 u32 vlvf;
4441 int idx;
4442
4443
4444 if (vlan == 0)
4445 return 0;
4446
4447
4448 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4449 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4450 if ((vlvf & VLAN_VID_MASK) == vlan)
4451 break;
4452 }
4453
4454 return idx;
4455}
4456
4457void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4458{
4459 struct ixgbe_hw *hw = &adapter->hw;
4460 u32 bits, word;
4461 int idx;
4462
4463 idx = ixgbe_find_vlvf_entry(hw, vid);
4464 if (!idx)
4465 return;
4466
4467
4468
4469
4470 word = idx * 2 + (VMDQ_P(0) / 32);
4471 bits = ~BIT(VMDQ_P(0) % 32);
4472 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4473
4474
4475 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4476 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4477 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4478 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4479 }
4480}
4481
4482static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4483 __be16 proto, u16 vid)
4484{
4485 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4486 struct ixgbe_hw *hw = &adapter->hw;
4487
4488
4489 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4490 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4491
4492 clear_bit(vid, adapter->active_vlans);
4493
4494 return 0;
4495}
4496
4497
4498
4499
4500
4501static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4502{
4503 struct ixgbe_hw *hw = &adapter->hw;
4504 u32 vlnctrl;
4505 int i, j;
4506
4507 switch (hw->mac.type) {
4508 case ixgbe_mac_82598EB:
4509 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4510 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4511 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4512 break;
4513 case ixgbe_mac_82599EB:
4514 case ixgbe_mac_X540:
4515 case ixgbe_mac_X550:
4516 case ixgbe_mac_X550EM_x:
4517 case ixgbe_mac_x550em_a:
4518 for (i = 0; i < adapter->num_rx_queues; i++) {
4519 struct ixgbe_ring *ring = adapter->rx_ring[i];
4520
4521 if (!netif_is_ixgbe(ring->netdev))
4522 continue;
4523
4524 j = ring->reg_idx;
4525 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4526 vlnctrl &= ~IXGBE_RXDCTL_VME;
4527 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4528 }
4529 break;
4530 default:
4531 break;
4532 }
4533}
4534
4535
4536
4537
4538
4539static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4540{
4541 struct ixgbe_hw *hw = &adapter->hw;
4542 u32 vlnctrl;
4543 int i, j;
4544
4545 switch (hw->mac.type) {
4546 case ixgbe_mac_82598EB:
4547 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4548 vlnctrl |= IXGBE_VLNCTRL_VME;
4549 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4550 break;
4551 case ixgbe_mac_82599EB:
4552 case ixgbe_mac_X540:
4553 case ixgbe_mac_X550:
4554 case ixgbe_mac_X550EM_x:
4555 case ixgbe_mac_x550em_a:
4556 for (i = 0; i < adapter->num_rx_queues; i++) {
4557 struct ixgbe_ring *ring = adapter->rx_ring[i];
4558
4559 if (!netif_is_ixgbe(ring->netdev))
4560 continue;
4561
4562 j = ring->reg_idx;
4563 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4564 vlnctrl |= IXGBE_RXDCTL_VME;
4565 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4566 }
4567 break;
4568 default:
4569 break;
4570 }
4571}
4572
4573static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4574{
4575 struct ixgbe_hw *hw = &adapter->hw;
4576 u32 vlnctrl, i;
4577
4578 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4579
4580 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4581
4582 vlnctrl |= IXGBE_VLNCTRL_VFE;
4583 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4584 } else {
4585 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4586 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4587 return;
4588 }
4589
4590
4591 if (hw->mac.type == ixgbe_mac_82598EB)
4592 return;
4593
4594
4595 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4596 return;
4597
4598
4599 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4600
4601
4602 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4603 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4604 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4605
4606 vlvfb |= BIT(VMDQ_P(0) % 32);
4607 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4608 }
4609
4610
4611 for (i = hw->mac.vft_size; i--;)
4612 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4613}
4614
4615#define VFTA_BLOCK_SIZE 8
4616static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4617{
4618 struct ixgbe_hw *hw = &adapter->hw;
4619 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4620 u32 vid_start = vfta_offset * 32;
4621 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4622 u32 i, vid, word, bits;
4623
4624 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4625 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4626
4627
4628 vid = vlvf & VLAN_VID_MASK;
4629
4630
4631 if (vid < vid_start || vid >= vid_end)
4632 continue;
4633
4634 if (vlvf) {
4635
4636 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4637
4638
4639 if (test_bit(vid, adapter->active_vlans))
4640 continue;
4641 }
4642
4643
4644 word = i * 2 + VMDQ_P(0) / 32;
4645 bits = ~BIT(VMDQ_P(0) % 32);
4646 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4647 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4648 }
4649
4650
4651 for (i = VFTA_BLOCK_SIZE; i--;) {
4652 vid = (vfta_offset + i) * 32;
4653 word = vid / BITS_PER_LONG;
4654 bits = vid % BITS_PER_LONG;
4655
4656 vfta[i] |= adapter->active_vlans[word] >> bits;
4657
4658 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4659 }
4660}
4661
4662static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4663{
4664 struct ixgbe_hw *hw = &adapter->hw;
4665 u32 vlnctrl, i;
4666
4667
4668 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4669 vlnctrl |= IXGBE_VLNCTRL_VFE;
4670 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4671
4672 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4673 hw->mac.type == ixgbe_mac_82598EB)
4674 return;
4675
4676
4677 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4678 return;
4679
4680
4681 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4682
4683 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4684 ixgbe_scrub_vfta(adapter, i);
4685}
4686
4687static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4688{
4689 u16 vid = 1;
4690
4691 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4692
4693 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4694 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4695}
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4707{
4708 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4709 struct ixgbe_hw *hw = &adapter->hw;
4710
4711 if (!netif_running(netdev))
4712 return 0;
4713
4714 if (hw->mac.ops.update_mc_addr_list)
4715 hw->mac.ops.update_mc_addr_list(hw, netdev);
4716 else
4717 return -ENOMEM;
4718
4719#ifdef CONFIG_PCI_IOV
4720 ixgbe_restore_vf_multicasts(adapter);
4721#endif
4722
4723 return netdev_mc_count(netdev);
4724}
4725
4726#ifdef CONFIG_PCI_IOV
4727void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4728{
4729 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4730 struct ixgbe_hw *hw = &adapter->hw;
4731 int i;
4732
4733 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4734 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4735
4736 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4737 hw->mac.ops.set_rar(hw, i,
4738 mac_table->addr,
4739 mac_table->pool,
4740 IXGBE_RAH_AV);
4741 else
4742 hw->mac.ops.clear_rar(hw, i);
4743 }
4744}
4745
4746#endif
4747static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4748{
4749 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4750 struct ixgbe_hw *hw = &adapter->hw;
4751 int i;
4752
4753 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4754 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4755 continue;
4756
4757 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4758
4759 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4760 hw->mac.ops.set_rar(hw, i,
4761 mac_table->addr,
4762 mac_table->pool,
4763 IXGBE_RAH_AV);
4764 else
4765 hw->mac.ops.clear_rar(hw, i);
4766 }
4767}
4768
4769static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4770{
4771 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4772 struct ixgbe_hw *hw = &adapter->hw;
4773 int i;
4774
4775 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4776 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4777 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4778 }
4779
4780 ixgbe_sync_mac_table(adapter);
4781}
4782
4783static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4784{
4785 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4786 struct ixgbe_hw *hw = &adapter->hw;
4787 int i, count = 0;
4788
4789 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4790
4791 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4792 continue;
4793
4794
4795 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4796 if (mac_table->pool != pool)
4797 continue;
4798 }
4799
4800 count++;
4801 }
4802
4803 return count;
4804}
4805
4806
4807static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4808{
4809 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4810 struct ixgbe_hw *hw = &adapter->hw;
4811
4812 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4813 mac_table->pool = VMDQ_P(0);
4814
4815 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4816
4817 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4818 IXGBE_RAH_AV);
4819}
4820
4821int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4822 const u8 *addr, u16 pool)
4823{
4824 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4825 struct ixgbe_hw *hw = &adapter->hw;
4826 int i;
4827
4828 if (is_zero_ether_addr(addr))
4829 return -EINVAL;
4830
4831 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4832 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4833 continue;
4834
4835 ether_addr_copy(mac_table->addr, addr);
4836 mac_table->pool = pool;
4837
4838 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4839 IXGBE_MAC_STATE_IN_USE;
4840
4841 ixgbe_sync_mac_table(adapter);
4842
4843 return i;
4844 }
4845
4846 return -ENOMEM;
4847}
4848
4849int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4850 const u8 *addr, u16 pool)
4851{
4852 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4853 struct ixgbe_hw *hw = &adapter->hw;
4854 int i;
4855
4856 if (is_zero_ether_addr(addr))
4857 return -EINVAL;
4858
4859
4860 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4861
4862 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4863 continue;
4864
4865 if (mac_table->pool != pool)
4866 continue;
4867
4868 if (!ether_addr_equal(addr, mac_table->addr))
4869 continue;
4870
4871 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4872 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4873
4874 ixgbe_sync_mac_table(adapter);
4875
4876 return 0;
4877 }
4878
4879 return -ENOMEM;
4880}
4881
4882static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4883{
4884 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4885 int ret;
4886
4887 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4888
4889 return min_t(int, ret, 0);
4890}
4891
4892static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4893{
4894 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4895
4896 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4897
4898 return 0;
4899}
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910void ixgbe_set_rx_mode(struct net_device *netdev)
4911{
4912 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4913 struct ixgbe_hw *hw = &adapter->hw;
4914 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4915 netdev_features_t features = netdev->features;
4916 int count;
4917
4918
4919 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4920
4921
4922 fctrl &= ~IXGBE_FCTRL_SBP;
4923 fctrl |= IXGBE_FCTRL_BAM;
4924 fctrl |= IXGBE_FCTRL_DPF;
4925 fctrl |= IXGBE_FCTRL_PMCF;
4926
4927
4928 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4929 if (netdev->flags & IFF_PROMISC) {
4930 hw->addr_ctrl.user_set_promisc = true;
4931 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4932 vmolr |= IXGBE_VMOLR_MPE;
4933 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4934 } else {
4935 if (netdev->flags & IFF_ALLMULTI) {
4936 fctrl |= IXGBE_FCTRL_MPE;
4937 vmolr |= IXGBE_VMOLR_MPE;
4938 }
4939 hw->addr_ctrl.user_set_promisc = false;
4940 }
4941
4942
4943
4944
4945
4946
4947 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4948 fctrl |= IXGBE_FCTRL_UPE;
4949 vmolr |= IXGBE_VMOLR_ROPE;
4950 }
4951
4952
4953
4954
4955
4956 count = ixgbe_write_mc_addr_list(netdev);
4957 if (count < 0) {
4958 fctrl |= IXGBE_FCTRL_MPE;
4959 vmolr |= IXGBE_VMOLR_MPE;
4960 } else if (count) {
4961 vmolr |= IXGBE_VMOLR_ROMPE;
4962 }
4963
4964 if (hw->mac.type != ixgbe_mac_82598EB) {
4965 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4966 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4967 IXGBE_VMOLR_ROPE);
4968 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4969 }
4970
4971
4972 if (features & NETIF_F_RXALL) {
4973
4974
4975 fctrl |= (IXGBE_FCTRL_SBP |
4976 IXGBE_FCTRL_BAM |
4977 IXGBE_FCTRL_PMCF);
4978
4979 fctrl &= ~(IXGBE_FCTRL_DPF);
4980
4981 }
4982
4983 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4984
4985 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4986 ixgbe_vlan_strip_enable(adapter);
4987 else
4988 ixgbe_vlan_strip_disable(adapter);
4989
4990 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4991 ixgbe_vlan_promisc_disable(adapter);
4992 else
4993 ixgbe_vlan_promisc_enable(adapter);
4994}
4995
4996static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4997{
4998 int q_idx;
4999
5000 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5001 napi_enable(&adapter->q_vector[q_idx]->napi);
5002}
5003
5004static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
5005{
5006 int q_idx;
5007
5008 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5009 napi_disable(&adapter->q_vector[q_idx]->napi);
5010}
5011
5012static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
5013{
5014 struct ixgbe_hw *hw = &adapter->hw;
5015 u32 vxlanctrl;
5016
5017 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
5018 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
5019 return;
5020
5021 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
5022 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
5023
5024 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
5025 adapter->vxlan_port = 0;
5026
5027 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
5028 adapter->geneve_port = 0;
5029}
5030
5031#ifdef CONFIG_IXGBE_DCB
5032
5033
5034
5035
5036
5037
5038
5039
5040static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5041{
5042 struct ixgbe_hw *hw = &adapter->hw;
5043 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5044
5045 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5046 if (hw->mac.type == ixgbe_mac_82598EB)
5047 netif_set_gso_max_size(adapter->netdev, 65536);
5048 return;
5049 }
5050
5051 if (hw->mac.type == ixgbe_mac_82598EB)
5052 netif_set_gso_max_size(adapter->netdev, 32768);
5053
5054#ifdef IXGBE_FCOE
5055 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5056 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5057#endif
5058
5059
5060 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5061 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5062 DCB_TX_CONFIG);
5063 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5064 DCB_RX_CONFIG);
5065 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5066 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5067 ixgbe_dcb_hw_ets(&adapter->hw,
5068 adapter->ixgbe_ieee_ets,
5069 max_frame);
5070 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5071 adapter->ixgbe_ieee_pfc->pfc_en,
5072 adapter->ixgbe_ieee_ets->prio_tc);
5073 }
5074
5075
5076 if (hw->mac.type != ixgbe_mac_82598EB) {
5077 u32 msb = 0;
5078 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5079
5080 while (rss_i) {
5081 msb++;
5082 rss_i >>= 1;
5083 }
5084
5085
5086 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5087 }
5088}
5089#endif
5090
5091
5092#define IXGBE_ETH_FRAMING 20
5093
5094
5095
5096
5097
5098
5099
5100static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5101{
5102 struct ixgbe_hw *hw = &adapter->hw;
5103 struct net_device *dev = adapter->netdev;
5104 int link, tc, kb, marker;
5105 u32 dv_id, rx_pba;
5106
5107
5108 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5109
5110#ifdef IXGBE_FCOE
5111
5112 if ((dev->features & NETIF_F_FCOE_MTU) &&
5113 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5114 (pb == ixgbe_fcoe_get_tc(adapter)))
5115 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5116#endif
5117
5118
5119 switch (hw->mac.type) {
5120 case ixgbe_mac_X540:
5121 case ixgbe_mac_X550:
5122 case ixgbe_mac_X550EM_x:
5123 case ixgbe_mac_x550em_a:
5124 dv_id = IXGBE_DV_X540(link, tc);
5125 break;
5126 default:
5127 dv_id = IXGBE_DV(link, tc);
5128 break;
5129 }
5130
5131
5132 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5133 dv_id += IXGBE_B2BT(tc);
5134
5135
5136 kb = IXGBE_BT2KB(dv_id);
5137 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5138
5139 marker = rx_pba - kb;
5140
5141
5142
5143
5144
5145 if (marker < 0) {
5146 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5147 "headroom to support flow control."
5148 "Decrease MTU or number of traffic classes\n", pb);
5149 marker = tc + 1;
5150 }
5151
5152 return marker;
5153}
5154
5155
5156
5157
5158
5159
5160
5161static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5162{
5163 struct ixgbe_hw *hw = &adapter->hw;
5164 struct net_device *dev = adapter->netdev;
5165 int tc;
5166 u32 dv_id;
5167
5168
5169 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5170
5171#ifdef IXGBE_FCOE
5172
5173 if ((dev->features & NETIF_F_FCOE_MTU) &&
5174 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5175 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5176 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5177#endif
5178
5179
5180 switch (hw->mac.type) {
5181 case ixgbe_mac_X540:
5182 case ixgbe_mac_X550:
5183 case ixgbe_mac_X550EM_x:
5184 case ixgbe_mac_x550em_a:
5185 dv_id = IXGBE_LOW_DV_X540(tc);
5186 break;
5187 default:
5188 dv_id = IXGBE_LOW_DV(tc);
5189 break;
5190 }
5191
5192
5193 return IXGBE_BT2KB(dv_id);
5194}
5195
5196
5197
5198
5199static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5200{
5201 struct ixgbe_hw *hw = &adapter->hw;
5202 int num_tc = adapter->hw_tcs;
5203 int i;
5204
5205 if (!num_tc)
5206 num_tc = 1;
5207
5208 for (i = 0; i < num_tc; i++) {
5209 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5210 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5211
5212
5213 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5214 hw->fc.low_water[i] = 0;
5215 }
5216
5217 for (; i < MAX_TRAFFIC_CLASS; i++)
5218 hw->fc.high_water[i] = 0;
5219}
5220
5221static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5222{
5223 struct ixgbe_hw *hw = &adapter->hw;
5224 int hdrm;
5225 u8 tc = adapter->hw_tcs;
5226
5227 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5228 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5229 hdrm = 32 << adapter->fdir_pballoc;
5230 else
5231 hdrm = 0;
5232
5233 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5234 ixgbe_pbthresh_setup(adapter);
5235}
5236
5237static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5238{
5239 struct ixgbe_hw *hw = &adapter->hw;
5240 struct hlist_node *node2;
5241 struct ixgbe_fdir_filter *filter;
5242 u64 action;
5243
5244 spin_lock(&adapter->fdir_perfect_lock);
5245
5246 if (!hlist_empty(&adapter->fdir_filter_list))
5247 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5248
5249 hlist_for_each_entry_safe(filter, node2,
5250 &adapter->fdir_filter_list, fdir_node) {
5251 action = filter->action;
5252 if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
5253 action =
5254 (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
5255
5256 ixgbe_fdir_write_perfect_filter_82599(hw,
5257 &filter->filter,
5258 filter->sw_idx,
5259 (action == IXGBE_FDIR_DROP_QUEUE) ?
5260 IXGBE_FDIR_DROP_QUEUE :
5261 adapter->rx_ring[action]->reg_idx);
5262 }
5263
5264 spin_unlock(&adapter->fdir_perfect_lock);
5265}
5266
5267
5268
5269
5270
5271static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5272{
5273 u16 i = rx_ring->next_to_clean;
5274 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5275
5276 if (rx_ring->xsk_umem) {
5277 ixgbe_xsk_clean_rx_ring(rx_ring);
5278 goto skip_free;
5279 }
5280
5281
5282 while (i != rx_ring->next_to_alloc) {
5283 if (rx_buffer->skb) {
5284 struct sk_buff *skb = rx_buffer->skb;
5285 if (IXGBE_CB(skb)->page_released)
5286 dma_unmap_page_attrs(rx_ring->dev,
5287 IXGBE_CB(skb)->dma,
5288 ixgbe_rx_pg_size(rx_ring),
5289 DMA_FROM_DEVICE,
5290 IXGBE_RX_DMA_ATTR);
5291 dev_kfree_skb(skb);
5292 }
5293
5294
5295
5296
5297 dma_sync_single_range_for_cpu(rx_ring->dev,
5298 rx_buffer->dma,
5299 rx_buffer->page_offset,
5300 ixgbe_rx_bufsz(rx_ring),
5301 DMA_FROM_DEVICE);
5302
5303
5304 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5305 ixgbe_rx_pg_size(rx_ring),
5306 DMA_FROM_DEVICE,
5307 IXGBE_RX_DMA_ATTR);
5308 __page_frag_cache_drain(rx_buffer->page,
5309 rx_buffer->pagecnt_bias);
5310
5311 i++;
5312 rx_buffer++;
5313 if (i == rx_ring->count) {
5314 i = 0;
5315 rx_buffer = rx_ring->rx_buffer_info;
5316 }
5317 }
5318
5319skip_free:
5320 rx_ring->next_to_alloc = 0;
5321 rx_ring->next_to_clean = 0;
5322 rx_ring->next_to_use = 0;
5323}
5324
5325static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5326 struct ixgbe_fwd_adapter *accel)
5327{
5328 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5329 int num_tc = netdev_get_num_tc(adapter->netdev);
5330 struct net_device *vdev = accel->netdev;
5331 int i, baseq, err;
5332
5333 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5334 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5335 accel->pool, adapter->num_rx_pools,
5336 baseq, baseq + adapter->num_rx_queues_per_pool);
5337
5338 accel->rx_base_queue = baseq;
5339 accel->tx_base_queue = baseq;
5340
5341
5342 for (i = 0; i < num_tc; i++)
5343 netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5344 i, rss_i, baseq + (rss_i * i));
5345
5346 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5347 adapter->rx_ring[baseq + i]->netdev = vdev;
5348
5349
5350
5351
5352 wmb();
5353
5354
5355
5356
5357 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5358 VMDQ_P(accel->pool));
5359 if (err >= 0)
5360 return 0;
5361
5362
5363 macvlan_release_l2fw_offload(vdev);
5364
5365 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5366 adapter->rx_ring[baseq + i]->netdev = NULL;
5367
5368 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5369
5370
5371 netdev_unbind_sb_channel(adapter->netdev, vdev);
5372 netdev_set_sb_channel(vdev, 0);
5373
5374 clear_bit(accel->pool, adapter->fwd_bitmask);
5375 kfree(accel);
5376
5377 return err;
5378}
5379
5380static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
5381{
5382 struct ixgbe_adapter *adapter = data;
5383 struct ixgbe_fwd_adapter *accel;
5384
5385 if (!netif_is_macvlan(vdev))
5386 return 0;
5387
5388 accel = macvlan_accel_priv(vdev);
5389 if (!accel)
5390 return 0;
5391
5392 ixgbe_fwd_ring_up(adapter, accel);
5393
5394 return 0;
5395}
5396
5397static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5398{
5399 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5400 ixgbe_macvlan_up, adapter);
5401}
5402
5403static void ixgbe_configure(struct ixgbe_adapter *adapter)
5404{
5405 struct ixgbe_hw *hw = &adapter->hw;
5406
5407 ixgbe_configure_pb(adapter);
5408#ifdef CONFIG_IXGBE_DCB
5409 ixgbe_configure_dcb(adapter);
5410#endif
5411
5412
5413
5414
5415 ixgbe_configure_virtualization(adapter);
5416
5417 ixgbe_set_rx_mode(adapter->netdev);
5418 ixgbe_restore_vlan(adapter);
5419 ixgbe_ipsec_restore(adapter);
5420
5421 switch (hw->mac.type) {
5422 case ixgbe_mac_82599EB:
5423 case ixgbe_mac_X540:
5424 hw->mac.ops.disable_rx_buff(hw);
5425 break;
5426 default:
5427 break;
5428 }
5429
5430 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5431 ixgbe_init_fdir_signature_82599(&adapter->hw,
5432 adapter->fdir_pballoc);
5433 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5434 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5435 adapter->fdir_pballoc);
5436 ixgbe_fdir_filter_restore(adapter);
5437 }
5438
5439 switch (hw->mac.type) {
5440 case ixgbe_mac_82599EB:
5441 case ixgbe_mac_X540:
5442 hw->mac.ops.enable_rx_buff(hw);
5443 break;
5444 default:
5445 break;
5446 }
5447
5448#ifdef CONFIG_IXGBE_DCA
5449
5450 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5451 ixgbe_setup_dca(adapter);
5452#endif
5453
5454#ifdef IXGBE_FCOE
5455
5456 ixgbe_configure_fcoe(adapter);
5457
5458#endif
5459 ixgbe_configure_tx(adapter);
5460 ixgbe_configure_rx(adapter);
5461 ixgbe_configure_dfwd(adapter);
5462}
5463
5464
5465
5466
5467
5468static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5469{
5470
5471
5472
5473
5474
5475
5476 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5477 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5478
5479 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5480 adapter->sfp_poll_time = 0;
5481}
5482
5483
5484
5485
5486
5487
5488
5489static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5490{
5491 u32 speed;
5492 bool autoneg, link_up = false;
5493 int ret = IXGBE_ERR_LINK_SETUP;
5494
5495 if (hw->mac.ops.check_link)
5496 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5497
5498 if (ret)
5499 return ret;
5500
5501 speed = hw->phy.autoneg_advertised;
5502 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5503 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5504 &autoneg);
5505 if (ret)
5506 return ret;
5507
5508 if (hw->mac.ops.setup_link)
5509 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5510
5511 return ret;
5512}
5513
5514static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5515{
5516 struct ixgbe_hw *hw = &adapter->hw;
5517 u32 gpie = 0;
5518
5519 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5520 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5521 IXGBE_GPIE_OCD;
5522 gpie |= IXGBE_GPIE_EIAME;
5523
5524
5525
5526
5527 switch (hw->mac.type) {
5528 case ixgbe_mac_82598EB:
5529 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5530 break;
5531 case ixgbe_mac_82599EB:
5532 case ixgbe_mac_X540:
5533 case ixgbe_mac_X550:
5534 case ixgbe_mac_X550EM_x:
5535 case ixgbe_mac_x550em_a:
5536 default:
5537 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5538 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5539 break;
5540 }
5541 } else {
5542
5543
5544 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5545 }
5546
5547
5548
5549
5550 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5551 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5552
5553 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5554 case IXGBE_82599_VMDQ_8Q_MASK:
5555 gpie |= IXGBE_GPIE_VTMODE_16;
5556 break;
5557 case IXGBE_82599_VMDQ_4Q_MASK:
5558 gpie |= IXGBE_GPIE_VTMODE_32;
5559 break;
5560 default:
5561 gpie |= IXGBE_GPIE_VTMODE_64;
5562 break;
5563 }
5564 }
5565
5566
5567 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5568 switch (adapter->hw.mac.type) {
5569 case ixgbe_mac_82599EB:
5570 gpie |= IXGBE_SDP0_GPIEN_8259X;
5571 break;
5572 default:
5573 break;
5574 }
5575 }
5576
5577
5578 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5579 gpie |= IXGBE_SDP1_GPIEN(hw);
5580
5581 switch (hw->mac.type) {
5582 case ixgbe_mac_82599EB:
5583 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5584 break;
5585 case ixgbe_mac_X550EM_x:
5586 case ixgbe_mac_x550em_a:
5587 gpie |= IXGBE_SDP0_GPIEN_X540;
5588 break;
5589 default:
5590 break;
5591 }
5592
5593 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5594}
5595
5596static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5597{
5598 struct ixgbe_hw *hw = &adapter->hw;
5599 int err;
5600 u32 ctrl_ext;
5601
5602 ixgbe_get_hw_control(adapter);
5603 ixgbe_setup_gpie(adapter);
5604
5605 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5606 ixgbe_configure_msix(adapter);
5607 else
5608 ixgbe_configure_msi_and_legacy(adapter);
5609
5610
5611 if (hw->mac.ops.enable_tx_laser)
5612 hw->mac.ops.enable_tx_laser(hw);
5613
5614 if (hw->phy.ops.set_phy_power)
5615 hw->phy.ops.set_phy_power(hw, true);
5616
5617 smp_mb__before_atomic();
5618 clear_bit(__IXGBE_DOWN, &adapter->state);
5619 ixgbe_napi_enable_all(adapter);
5620
5621 if (ixgbe_is_sfp(hw)) {
5622 ixgbe_sfp_link_config(adapter);
5623 } else {
5624 err = ixgbe_non_sfp_link_config(hw);
5625 if (err)
5626 e_err(probe, "link_config FAILED %d\n", err);
5627 }
5628
5629
5630 IXGBE_READ_REG(hw, IXGBE_EICR);
5631 ixgbe_irq_enable(adapter, true, true);
5632
5633
5634
5635
5636
5637 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5638 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5639 if (esdp & IXGBE_ESDP_SDP1)
5640 e_crit(drv, "Fan has stopped, replace the adapter\n");
5641 }
5642
5643
5644
5645 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5646 adapter->link_check_timeout = jiffies;
5647 mod_timer(&adapter->service_timer, jiffies);
5648
5649
5650 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5651 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5652 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5653}
5654
5655void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5656{
5657 WARN_ON(in_interrupt());
5658
5659 netif_trans_update(adapter->netdev);
5660
5661 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5662 usleep_range(1000, 2000);
5663 if (adapter->hw.phy.type == ixgbe_phy_fw)
5664 ixgbe_watchdog_link_is_down(adapter);
5665 ixgbe_down(adapter);
5666
5667
5668
5669
5670
5671
5672 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5673 msleep(2000);
5674 ixgbe_up(adapter);
5675 clear_bit(__IXGBE_RESETTING, &adapter->state);
5676}
5677
5678void ixgbe_up(struct ixgbe_adapter *adapter)
5679{
5680
5681 ixgbe_configure(adapter);
5682
5683 ixgbe_up_complete(adapter);
5684}
5685
5686static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5687{
5688 u16 devctl2;
5689
5690 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5691
5692 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5693 case IXGBE_PCIDEVCTRL2_17_34s:
5694 case IXGBE_PCIDEVCTRL2_4_8s:
5695
5696
5697
5698
5699 case IXGBE_PCIDEVCTRL2_1_2s:
5700 return 2000000ul;
5701 case IXGBE_PCIDEVCTRL2_260_520ms:
5702 return 520000ul;
5703 case IXGBE_PCIDEVCTRL2_65_130ms:
5704 return 130000ul;
5705 case IXGBE_PCIDEVCTRL2_16_32ms:
5706 return 32000ul;
5707 case IXGBE_PCIDEVCTRL2_1_2ms:
5708 return 2000ul;
5709 case IXGBE_PCIDEVCTRL2_50_100us:
5710 return 100ul;
5711 case IXGBE_PCIDEVCTRL2_16_32ms_def:
5712 return 32000ul;
5713 default:
5714 break;
5715 }
5716
5717
5718
5719
5720 return 32000ul;
5721}
5722
5723void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5724{
5725 unsigned long wait_delay, delay_interval;
5726 struct ixgbe_hw *hw = &adapter->hw;
5727 int i, wait_loop;
5728 u32 rxdctl;
5729
5730
5731 hw->mac.ops.disable_rx(hw);
5732
5733 if (ixgbe_removed(hw->hw_addr))
5734 return;
5735
5736
5737 for (i = 0; i < adapter->num_rx_queues; i++) {
5738 struct ixgbe_ring *ring = adapter->rx_ring[i];
5739 u8 reg_idx = ring->reg_idx;
5740
5741 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5742 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5743 rxdctl |= IXGBE_RXDCTL_SWFLSH;
5744
5745
5746 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5747 }
5748
5749
5750 if (hw->mac.type == ixgbe_mac_82598EB &&
5751 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5752 return;
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5766
5767 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5768 wait_delay = delay_interval;
5769
5770 while (wait_loop--) {
5771 usleep_range(wait_delay, wait_delay + 10);
5772 wait_delay += delay_interval * 2;
5773 rxdctl = 0;
5774
5775
5776
5777
5778
5779
5780 for (i = 0; i < adapter->num_rx_queues; i++) {
5781 struct ixgbe_ring *ring = adapter->rx_ring[i];
5782 u8 reg_idx = ring->reg_idx;
5783
5784 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5785 }
5786
5787 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5788 return;
5789 }
5790
5791 e_err(drv,
5792 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5793}
5794
5795void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5796{
5797 unsigned long wait_delay, delay_interval;
5798 struct ixgbe_hw *hw = &adapter->hw;
5799 int i, wait_loop;
5800 u32 txdctl;
5801
5802 if (ixgbe_removed(hw->hw_addr))
5803 return;
5804
5805
5806 for (i = 0; i < adapter->num_tx_queues; i++) {
5807 struct ixgbe_ring *ring = adapter->tx_ring[i];
5808 u8 reg_idx = ring->reg_idx;
5809
5810 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5811 }
5812
5813
5814 for (i = 0; i < adapter->num_xdp_queues; i++) {
5815 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5816 u8 reg_idx = ring->reg_idx;
5817
5818 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5819 }
5820
5821
5822
5823
5824
5825
5826 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5827 goto dma_engine_disable;
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5841
5842 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5843 wait_delay = delay_interval;
5844
5845 while (wait_loop--) {
5846 usleep_range(wait_delay, wait_delay + 10);
5847 wait_delay += delay_interval * 2;
5848 txdctl = 0;
5849
5850
5851
5852
5853
5854
5855 for (i = 0; i < adapter->num_tx_queues; i++) {
5856 struct ixgbe_ring *ring = adapter->tx_ring[i];
5857 u8 reg_idx = ring->reg_idx;
5858
5859 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5860 }
5861 for (i = 0; i < adapter->num_xdp_queues; i++) {
5862 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5863 u8 reg_idx = ring->reg_idx;
5864
5865 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5866 }
5867
5868 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5869 goto dma_engine_disable;
5870 }
5871
5872 e_err(drv,
5873 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5874
5875dma_engine_disable:
5876
5877 switch (hw->mac.type) {
5878 case ixgbe_mac_82599EB:
5879 case ixgbe_mac_X540:
5880 case ixgbe_mac_X550:
5881 case ixgbe_mac_X550EM_x:
5882 case ixgbe_mac_x550em_a:
5883 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5884 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5885 ~IXGBE_DMATXCTL_TE));
5886
5887 default:
5888 break;
5889 }
5890}
5891
5892void ixgbe_reset(struct ixgbe_adapter *adapter)
5893{
5894 struct ixgbe_hw *hw = &adapter->hw;
5895 struct net_device *netdev = adapter->netdev;
5896 int err;
5897
5898 if (ixgbe_removed(hw->hw_addr))
5899 return;
5900
5901 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5902 usleep_range(1000, 2000);
5903
5904
5905 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5906 IXGBE_FLAG2_SFP_NEEDS_RESET);
5907 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5908
5909 err = hw->mac.ops.init_hw(hw);
5910 switch (err) {
5911 case 0:
5912 case IXGBE_ERR_SFP_NOT_PRESENT:
5913 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5914 break;
5915 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5916 e_dev_err("master disable timed out\n");
5917 break;
5918 case IXGBE_ERR_EEPROM_VERSION:
5919
5920 e_dev_warn("This device is a pre-production adapter/LOM. "
5921 "Please be aware there may be issues associated with "
5922 "your hardware. If you are experiencing problems "
5923 "please contact your Intel or hardware "
5924 "representative who provided you with this "
5925 "hardware.\n");
5926 break;
5927 default:
5928 e_dev_err("Hardware Error: %d\n", err);
5929 }
5930
5931 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5932
5933
5934 ixgbe_flush_sw_mac_table(adapter);
5935 __dev_uc_unsync(netdev, NULL);
5936
5937
5938 ixgbe_mac_set_default_filter(adapter);
5939
5940
5941 if (hw->mac.san_mac_rar_index)
5942 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5943
5944 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5945 ixgbe_ptp_reset(adapter);
5946
5947 if (hw->phy.ops.set_phy_power) {
5948 if (!netif_running(adapter->netdev) && !adapter->wol)
5949 hw->phy.ops.set_phy_power(hw, false);
5950 else
5951 hw->phy.ops.set_phy_power(hw, true);
5952 }
5953}
5954
5955
5956
5957
5958
5959static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5960{
5961 u16 i = tx_ring->next_to_clean;
5962 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5963
5964 if (tx_ring->xsk_umem) {
5965 ixgbe_xsk_clean_tx_ring(tx_ring);
5966 goto out;
5967 }
5968
5969 while (i != tx_ring->next_to_use) {
5970 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5971
5972
5973 if (ring_is_xdp(tx_ring))
5974 xdp_return_frame(tx_buffer->xdpf);
5975 else
5976 dev_kfree_skb_any(tx_buffer->skb);
5977
5978
5979 dma_unmap_single(tx_ring->dev,
5980 dma_unmap_addr(tx_buffer, dma),
5981 dma_unmap_len(tx_buffer, len),
5982 DMA_TO_DEVICE);
5983
5984
5985 eop_desc = tx_buffer->next_to_watch;
5986 tx_desc = IXGBE_TX_DESC(tx_ring, i);
5987
5988
5989 while (tx_desc != eop_desc) {
5990 tx_buffer++;
5991 tx_desc++;
5992 i++;
5993 if (unlikely(i == tx_ring->count)) {
5994 i = 0;
5995 tx_buffer = tx_ring->tx_buffer_info;
5996 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
5997 }
5998
5999
6000 if (dma_unmap_len(tx_buffer, len))
6001 dma_unmap_page(tx_ring->dev,
6002 dma_unmap_addr(tx_buffer, dma),
6003 dma_unmap_len(tx_buffer, len),
6004 DMA_TO_DEVICE);
6005 }
6006
6007
6008 tx_buffer++;
6009 i++;
6010 if (unlikely(i == tx_ring->count)) {
6011 i = 0;
6012 tx_buffer = tx_ring->tx_buffer_info;
6013 }
6014 }
6015
6016
6017 if (!ring_is_xdp(tx_ring))
6018 netdev_tx_reset_queue(txring_txq(tx_ring));
6019
6020out:
6021
6022 tx_ring->next_to_use = 0;
6023 tx_ring->next_to_clean = 0;
6024}
6025
6026
6027
6028
6029
6030static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6031{
6032 int i;
6033
6034 for (i = 0; i < adapter->num_rx_queues; i++)
6035 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6036}
6037
6038
6039
6040
6041
6042static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6043{
6044 int i;
6045
6046 for (i = 0; i < adapter->num_tx_queues; i++)
6047 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6048 for (i = 0; i < adapter->num_xdp_queues; i++)
6049 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6050}
6051
6052static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6053{
6054 struct hlist_node *node2;
6055 struct ixgbe_fdir_filter *filter;
6056
6057 spin_lock(&adapter->fdir_perfect_lock);
6058
6059 hlist_for_each_entry_safe(filter, node2,
6060 &adapter->fdir_filter_list, fdir_node) {
6061 hlist_del(&filter->fdir_node);
6062 kfree(filter);
6063 }
6064 adapter->fdir_filter_count = 0;
6065
6066 spin_unlock(&adapter->fdir_perfect_lock);
6067}
6068
6069void ixgbe_down(struct ixgbe_adapter *adapter)
6070{
6071 struct net_device *netdev = adapter->netdev;
6072 struct ixgbe_hw *hw = &adapter->hw;
6073 int i;
6074
6075
6076 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6077 return;
6078
6079
6080 netif_tx_stop_all_queues(netdev);
6081
6082
6083 netif_carrier_off(netdev);
6084 netif_tx_disable(netdev);
6085
6086
6087 ixgbe_disable_rx(adapter);
6088
6089
6090 if (adapter->xdp_ring[0])
6091 synchronize_rcu();
6092
6093 ixgbe_irq_disable(adapter);
6094
6095 ixgbe_napi_disable_all(adapter);
6096
6097 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6098 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6099 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6100
6101 del_timer_sync(&adapter->service_timer);
6102
6103 if (adapter->num_vfs) {
6104
6105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6106
6107
6108 for (i = 0 ; i < adapter->num_vfs; i++)
6109 adapter->vfinfo[i].clear_to_send = false;
6110
6111
6112 ixgbe_ping_all_vfs(adapter);
6113
6114
6115 ixgbe_disable_tx_rx(adapter);
6116 }
6117
6118
6119 ixgbe_disable_tx(adapter);
6120
6121 if (!pci_channel_offline(adapter->pdev))
6122 ixgbe_reset(adapter);
6123
6124
6125 if (hw->mac.ops.disable_tx_laser)
6126 hw->mac.ops.disable_tx_laser(hw);
6127
6128 ixgbe_clean_all_tx_rings(adapter);
6129 ixgbe_clean_all_rx_rings(adapter);
6130}
6131
6132
6133
6134
6135
6136static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6137{
6138 struct ixgbe_hw *hw = &adapter->hw;
6139
6140 switch (hw->device_id) {
6141 case IXGBE_DEV_ID_X550EM_A_1G_T:
6142 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6143 if (!hw->phy.eee_speeds_supported)
6144 break;
6145 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6146 if (!hw->phy.eee_speeds_advertised)
6147 break;
6148 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6149 break;
6150 default:
6151 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6152 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6153 break;
6154 }
6155}
6156
6157
6158
6159
6160
6161static void ixgbe_tx_timeout(struct net_device *netdev)
6162{
6163 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6164
6165
6166 ixgbe_tx_timeout_reset(adapter);
6167}
6168
6169#ifdef CONFIG_IXGBE_DCB
6170static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6171{
6172 struct ixgbe_hw *hw = &adapter->hw;
6173 struct tc_configuration *tc;
6174 int j;
6175
6176 switch (hw->mac.type) {
6177 case ixgbe_mac_82598EB:
6178 case ixgbe_mac_82599EB:
6179 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6180 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6181 break;
6182 case ixgbe_mac_X540:
6183 case ixgbe_mac_X550:
6184 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6185 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6186 break;
6187 case ixgbe_mac_X550EM_x:
6188 case ixgbe_mac_x550em_a:
6189 default:
6190 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6191 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6192 break;
6193 }
6194
6195
6196 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6197 tc = &adapter->dcb_cfg.tc_config[j];
6198 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6199 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6200 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6201 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6202 tc->dcb_pfc = pfc_disabled;
6203 }
6204
6205
6206 tc = &adapter->dcb_cfg.tc_config[0];
6207 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6208 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6209
6210 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6211 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6212 adapter->dcb_cfg.pfc_mode_enable = false;
6213 adapter->dcb_set_bitmap = 0x00;
6214 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6215 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6216 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6217 sizeof(adapter->temp_dcb_cfg));
6218}
6219#endif
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6231 const struct ixgbe_info *ii)
6232{
6233 struct ixgbe_hw *hw = &adapter->hw;
6234 struct pci_dev *pdev = adapter->pdev;
6235 unsigned int rss, fdir;
6236 u32 fwsm;
6237 int i;
6238
6239
6240
6241 hw->vendor_id = pdev->vendor;
6242 hw->device_id = pdev->device;
6243 hw->revision_id = pdev->revision;
6244 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6245 hw->subsystem_device_id = pdev->subsystem_device;
6246
6247
6248 ii->get_invariants(hw);
6249
6250
6251 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6252 adapter->ring_feature[RING_F_RSS].limit = rss;
6253 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6254 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6255 adapter->atr_sample_rate = 20;
6256 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6257 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6258 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6259 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6260#ifdef CONFIG_IXGBE_DCA
6261 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6262#endif
6263#ifdef CONFIG_IXGBE_DCB
6264 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6265 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6266#endif
6267#ifdef IXGBE_FCOE
6268 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6269 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6270#ifdef CONFIG_IXGBE_DCB
6271
6272 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6273#endif
6274#endif
6275
6276
6277 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6278 GFP_KERNEL);
6279 if (!adapter->jump_tables[0])
6280 return -ENOMEM;
6281 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6282
6283 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6284 adapter->jump_tables[i] = NULL;
6285
6286 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6287 sizeof(struct ixgbe_mac_addr),
6288 GFP_KERNEL);
6289 if (!adapter->mac_table)
6290 return -ENOMEM;
6291
6292 if (ixgbe_init_rss_key(adapter))
6293 return -ENOMEM;
6294
6295 adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6296 if (!adapter->af_xdp_zc_qps)
6297 return -ENOMEM;
6298
6299
6300 switch (hw->mac.type) {
6301 case ixgbe_mac_82598EB:
6302 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6303
6304 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6305 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6306
6307 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6308 adapter->ring_feature[RING_F_FDIR].limit = 0;
6309 adapter->atr_sample_rate = 0;
6310 adapter->fdir_pballoc = 0;
6311#ifdef IXGBE_FCOE
6312 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6313 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6314#ifdef CONFIG_IXGBE_DCB
6315 adapter->fcoe.up = 0;
6316#endif
6317#endif
6318 break;
6319 case ixgbe_mac_82599EB:
6320 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6321 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6322 break;
6323 case ixgbe_mac_X540:
6324 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6325 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6326 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6327 break;
6328 case ixgbe_mac_x550em_a:
6329 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
6330 switch (hw->device_id) {
6331 case IXGBE_DEV_ID_X550EM_A_1G_T:
6332 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6333 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6334 break;
6335 default:
6336 break;
6337 }
6338
6339 case ixgbe_mac_X550EM_x:
6340#ifdef CONFIG_IXGBE_DCB
6341 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6342#endif
6343#ifdef IXGBE_FCOE
6344 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6345#ifdef CONFIG_IXGBE_DCB
6346 adapter->fcoe.up = 0;
6347#endif
6348#endif
6349
6350 case ixgbe_mac_X550:
6351 if (hw->mac.type == ixgbe_mac_X550)
6352 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6353#ifdef CONFIG_IXGBE_DCA
6354 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6355#endif
6356 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
6357 break;
6358 default:
6359 break;
6360 }
6361
6362#ifdef IXGBE_FCOE
6363
6364 spin_lock_init(&adapter->fcoe.lock);
6365
6366#endif
6367
6368 spin_lock_init(&adapter->fdir_perfect_lock);
6369
6370#ifdef CONFIG_IXGBE_DCB
6371 ixgbe_init_dcb(adapter);
6372#endif
6373 ixgbe_init_ipsec_offload(adapter);
6374
6375
6376 hw->fc.requested_mode = ixgbe_fc_full;
6377 hw->fc.current_mode = ixgbe_fc_full;
6378 ixgbe_pbthresh_setup(adapter);
6379 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6380 hw->fc.send_xon = true;
6381 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6382
6383#ifdef CONFIG_PCI_IOV
6384 if (max_vfs > 0)
6385 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6386
6387
6388 if (hw->mac.type != ixgbe_mac_82598EB) {
6389 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6390 max_vfs = 0;
6391 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6392 }
6393 }
6394#endif
6395
6396
6397 adapter->rx_itr_setting = 1;
6398 adapter->tx_itr_setting = 1;
6399
6400
6401 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6402 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6403
6404
6405 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6406
6407
6408 if (ixgbe_init_eeprom_params_generic(hw)) {
6409 e_dev_err("EEPROM initialization failed\n");
6410 return -EIO;
6411 }
6412
6413
6414 set_bit(0, adapter->fwd_bitmask);
6415 set_bit(__IXGBE_DOWN, &adapter->state);
6416
6417 return 0;
6418}
6419
6420
6421
6422
6423
6424
6425
6426int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6427{
6428 struct device *dev = tx_ring->dev;
6429 int orig_node = dev_to_node(dev);
6430 int ring_node = NUMA_NO_NODE;
6431 int size;
6432
6433 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6434
6435 if (tx_ring->q_vector)
6436 ring_node = tx_ring->q_vector->numa_node;
6437
6438 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6439 if (!tx_ring->tx_buffer_info)
6440 tx_ring->tx_buffer_info = vmalloc(size);
6441 if (!tx_ring->tx_buffer_info)
6442 goto err;
6443
6444
6445 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6446 tx_ring->size = ALIGN(tx_ring->size, 4096);
6447
6448 set_dev_node(dev, ring_node);
6449 tx_ring->desc = dma_alloc_coherent(dev,
6450 tx_ring->size,
6451 &tx_ring->dma,
6452 GFP_KERNEL);
6453 set_dev_node(dev, orig_node);
6454 if (!tx_ring->desc)
6455 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6456 &tx_ring->dma, GFP_KERNEL);
6457 if (!tx_ring->desc)
6458 goto err;
6459
6460 tx_ring->next_to_use = 0;
6461 tx_ring->next_to_clean = 0;
6462 return 0;
6463
6464err:
6465 vfree(tx_ring->tx_buffer_info);
6466 tx_ring->tx_buffer_info = NULL;
6467 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6468 return -ENOMEM;
6469}
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6482{
6483 int i, j = 0, err = 0;
6484
6485 for (i = 0; i < adapter->num_tx_queues; i++) {
6486 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6487 if (!err)
6488 continue;
6489
6490 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6491 goto err_setup_tx;
6492 }
6493 for (j = 0; j < adapter->num_xdp_queues; j++) {
6494 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6495 if (!err)
6496 continue;
6497
6498 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6499 goto err_setup_tx;
6500 }
6501
6502 return 0;
6503err_setup_tx:
6504
6505 while (j--)
6506 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6507 while (i--)
6508 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6509 return err;
6510}
6511
6512
6513
6514
6515
6516
6517
6518
6519int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6520 struct ixgbe_ring *rx_ring)
6521{
6522 struct device *dev = rx_ring->dev;
6523 int orig_node = dev_to_node(dev);
6524 int ring_node = NUMA_NO_NODE;
6525 int size;
6526
6527 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6528
6529 if (rx_ring->q_vector)
6530 ring_node = rx_ring->q_vector->numa_node;
6531
6532 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6533 if (!rx_ring->rx_buffer_info)
6534 rx_ring->rx_buffer_info = vmalloc(size);
6535 if (!rx_ring->rx_buffer_info)
6536 goto err;
6537
6538
6539 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6540 rx_ring->size = ALIGN(rx_ring->size, 4096);
6541
6542 set_dev_node(dev, ring_node);
6543 rx_ring->desc = dma_alloc_coherent(dev,
6544 rx_ring->size,
6545 &rx_ring->dma,
6546 GFP_KERNEL);
6547 set_dev_node(dev, orig_node);
6548 if (!rx_ring->desc)
6549 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6550 &rx_ring->dma, GFP_KERNEL);
6551 if (!rx_ring->desc)
6552 goto err;
6553
6554 rx_ring->next_to_clean = 0;
6555 rx_ring->next_to_use = 0;
6556
6557
6558 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6559 rx_ring->queue_index) < 0)
6560 goto err;
6561
6562 rx_ring->xdp_prog = adapter->xdp_prog;
6563
6564 return 0;
6565err:
6566 vfree(rx_ring->rx_buffer_info);
6567 rx_ring->rx_buffer_info = NULL;
6568 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6569 return -ENOMEM;
6570}
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6583{
6584 int i, err = 0;
6585
6586 for (i = 0; i < adapter->num_rx_queues; i++) {
6587 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6588 if (!err)
6589 continue;
6590
6591 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6592 goto err_setup_rx;
6593 }
6594
6595#ifdef IXGBE_FCOE
6596 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6597 if (!err)
6598#endif
6599 return 0;
6600err_setup_rx:
6601
6602 while (i--)
6603 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6604 return err;
6605}
6606
6607
6608
6609
6610
6611
6612
6613void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6614{
6615 ixgbe_clean_tx_ring(tx_ring);
6616
6617 vfree(tx_ring->tx_buffer_info);
6618 tx_ring->tx_buffer_info = NULL;
6619
6620
6621 if (!tx_ring->desc)
6622 return;
6623
6624 dma_free_coherent(tx_ring->dev, tx_ring->size,
6625 tx_ring->desc, tx_ring->dma);
6626
6627 tx_ring->desc = NULL;
6628}
6629
6630
6631
6632
6633
6634
6635
6636static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6637{
6638 int i;
6639
6640 for (i = 0; i < adapter->num_tx_queues; i++)
6641 if (adapter->tx_ring[i]->desc)
6642 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6643 for (i = 0; i < adapter->num_xdp_queues; i++)
6644 if (adapter->xdp_ring[i]->desc)
6645 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6646}
6647
6648
6649
6650
6651
6652
6653
6654void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6655{
6656 ixgbe_clean_rx_ring(rx_ring);
6657
6658 rx_ring->xdp_prog = NULL;
6659 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6660 vfree(rx_ring->rx_buffer_info);
6661 rx_ring->rx_buffer_info = NULL;
6662
6663
6664 if (!rx_ring->desc)
6665 return;
6666
6667 dma_free_coherent(rx_ring->dev, rx_ring->size,
6668 rx_ring->desc, rx_ring->dma);
6669
6670 rx_ring->desc = NULL;
6671}
6672
6673
6674
6675
6676
6677
6678
6679static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6680{
6681 int i;
6682
6683#ifdef IXGBE_FCOE
6684 ixgbe_free_fcoe_ddp_resources(adapter);
6685
6686#endif
6687 for (i = 0; i < adapter->num_rx_queues; i++)
6688 if (adapter->rx_ring[i]->desc)
6689 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6690}
6691
6692
6693
6694
6695
6696
6697
6698
6699static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6700{
6701 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6702
6703 if (adapter->xdp_prog) {
6704 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6705 VLAN_HLEN;
6706 int i;
6707
6708 for (i = 0; i < adapter->num_rx_queues; i++) {
6709 struct ixgbe_ring *ring = adapter->rx_ring[i];
6710
6711 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6712 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6713 return -EINVAL;
6714 }
6715 }
6716 }
6717
6718
6719
6720
6721
6722
6723 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6724 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6725 (new_mtu > ETH_DATA_LEN))
6726 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6727
6728 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6729
6730
6731 netdev->mtu = new_mtu;
6732
6733 if (netif_running(netdev))
6734 ixgbe_reinit_locked(adapter);
6735
6736 return 0;
6737}
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751int ixgbe_open(struct net_device *netdev)
6752{
6753 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6754 struct ixgbe_hw *hw = &adapter->hw;
6755 int err, queues;
6756
6757
6758 if (test_bit(__IXGBE_TESTING, &adapter->state))
6759 return -EBUSY;
6760
6761 netif_carrier_off(netdev);
6762
6763
6764 err = ixgbe_setup_all_tx_resources(adapter);
6765 if (err)
6766 goto err_setup_tx;
6767
6768
6769 err = ixgbe_setup_all_rx_resources(adapter);
6770 if (err)
6771 goto err_setup_rx;
6772
6773 ixgbe_configure(adapter);
6774
6775 err = ixgbe_request_irq(adapter);
6776 if (err)
6777 goto err_req_irq;
6778
6779
6780 queues = adapter->num_tx_queues;
6781 err = netif_set_real_num_tx_queues(netdev, queues);
6782 if (err)
6783 goto err_set_queues;
6784
6785 queues = adapter->num_rx_queues;
6786 err = netif_set_real_num_rx_queues(netdev, queues);
6787 if (err)
6788 goto err_set_queues;
6789
6790 ixgbe_ptp_init(adapter);
6791
6792 ixgbe_up_complete(adapter);
6793
6794 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6795 udp_tunnel_get_rx_info(netdev);
6796
6797 return 0;
6798
6799err_set_queues:
6800 ixgbe_free_irq(adapter);
6801err_req_irq:
6802 ixgbe_free_all_rx_resources(adapter);
6803 if (hw->phy.ops.set_phy_power && !adapter->wol)
6804 hw->phy.ops.set_phy_power(&adapter->hw, false);
6805err_setup_rx:
6806 ixgbe_free_all_tx_resources(adapter);
6807err_setup_tx:
6808 ixgbe_reset(adapter);
6809
6810 return err;
6811}
6812
6813static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6814{
6815 ixgbe_ptp_suspend(adapter);
6816
6817 if (adapter->hw.phy.ops.enter_lplu) {
6818 adapter->hw.phy.reset_disable = true;
6819 ixgbe_down(adapter);
6820 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6821 adapter->hw.phy.reset_disable = false;
6822 } else {
6823 ixgbe_down(adapter);
6824 }
6825
6826 ixgbe_free_irq(adapter);
6827
6828 ixgbe_free_all_tx_resources(adapter);
6829 ixgbe_free_all_rx_resources(adapter);
6830}
6831
6832
6833
6834
6835
6836
6837
6838
6839
6840
6841
6842
6843int ixgbe_close(struct net_device *netdev)
6844{
6845 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6846
6847 ixgbe_ptp_stop(adapter);
6848
6849 if (netif_device_present(netdev))
6850 ixgbe_close_suspend(adapter);
6851
6852 ixgbe_fdir_filter_exit(adapter);
6853
6854 ixgbe_release_hw_control(adapter);
6855
6856 return 0;
6857}
6858
6859#ifdef CONFIG_PM
6860static int ixgbe_resume(struct pci_dev *pdev)
6861{
6862 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6863 struct net_device *netdev = adapter->netdev;
6864 u32 err;
6865
6866 adapter->hw.hw_addr = adapter->io_addr;
6867 pci_set_power_state(pdev, PCI_D0);
6868 pci_restore_state(pdev);
6869
6870
6871
6872
6873 pci_save_state(pdev);
6874
6875 err = pci_enable_device_mem(pdev);
6876 if (err) {
6877 e_dev_err("Cannot enable PCI device from suspend\n");
6878 return err;
6879 }
6880 smp_mb__before_atomic();
6881 clear_bit(__IXGBE_DISABLED, &adapter->state);
6882 pci_set_master(pdev);
6883
6884 pci_wake_from_d3(pdev, false);
6885
6886 ixgbe_reset(adapter);
6887
6888 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6889
6890 rtnl_lock();
6891 err = ixgbe_init_interrupt_scheme(adapter);
6892 if (!err && netif_running(netdev))
6893 err = ixgbe_open(netdev);
6894
6895
6896 if (!err)
6897 netif_device_attach(netdev);
6898 rtnl_unlock();
6899
6900 return err;
6901}
6902#endif
6903
6904static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6905{
6906 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6907 struct net_device *netdev = adapter->netdev;
6908 struct ixgbe_hw *hw = &adapter->hw;
6909 u32 ctrl;
6910 u32 wufc = adapter->wol;
6911#ifdef CONFIG_PM
6912 int retval = 0;
6913#endif
6914
6915 rtnl_lock();
6916 netif_device_detach(netdev);
6917
6918 if (netif_running(netdev))
6919 ixgbe_close_suspend(adapter);
6920
6921 ixgbe_clear_interrupt_scheme(adapter);
6922 rtnl_unlock();
6923
6924#ifdef CONFIG_PM
6925 retval = pci_save_state(pdev);
6926 if (retval)
6927 return retval;
6928
6929#endif
6930 if (hw->mac.ops.stop_link_on_d3)
6931 hw->mac.ops.stop_link_on_d3(hw);
6932
6933 if (wufc) {
6934 u32 fctrl;
6935
6936 ixgbe_set_rx_mode(netdev);
6937
6938
6939 if (hw->mac.ops.enable_tx_laser)
6940 hw->mac.ops.enable_tx_laser(hw);
6941
6942
6943 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6944 fctrl |= IXGBE_FCTRL_MPE;
6945 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6946
6947 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6948 ctrl |= IXGBE_CTRL_GIO_DIS;
6949 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6950
6951 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6952 } else {
6953 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6954 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6955 }
6956
6957 switch (hw->mac.type) {
6958 case ixgbe_mac_82598EB:
6959 pci_wake_from_d3(pdev, false);
6960 break;
6961 case ixgbe_mac_82599EB:
6962 case ixgbe_mac_X540:
6963 case ixgbe_mac_X550:
6964 case ixgbe_mac_X550EM_x:
6965 case ixgbe_mac_x550em_a:
6966 pci_wake_from_d3(pdev, !!wufc);
6967 break;
6968 default:
6969 break;
6970 }
6971
6972 *enable_wake = !!wufc;
6973 if (hw->phy.ops.set_phy_power && !*enable_wake)
6974 hw->phy.ops.set_phy_power(hw, false);
6975
6976 ixgbe_release_hw_control(adapter);
6977
6978 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6979 pci_disable_device(pdev);
6980
6981 return 0;
6982}
6983
6984#ifdef CONFIG_PM
6985static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6986{
6987 int retval;
6988 bool wake;
6989
6990 retval = __ixgbe_shutdown(pdev, &wake);
6991 if (retval)
6992 return retval;
6993
6994 if (wake) {
6995 pci_prepare_to_sleep(pdev);
6996 } else {
6997 pci_wake_from_d3(pdev, false);
6998 pci_set_power_state(pdev, PCI_D3hot);
6999 }
7000
7001 return 0;
7002}
7003#endif
7004
7005static void ixgbe_shutdown(struct pci_dev *pdev)
7006{
7007 bool wake;
7008
7009 __ixgbe_shutdown(pdev, &wake);
7010
7011 if (system_state == SYSTEM_POWER_OFF) {
7012 pci_wake_from_d3(pdev, wake);
7013 pci_set_power_state(pdev, PCI_D3hot);
7014 }
7015}
7016
7017
7018
7019
7020
7021void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7022{
7023 struct net_device *netdev = adapter->netdev;
7024 struct ixgbe_hw *hw = &adapter->hw;
7025 struct ixgbe_hw_stats *hwstats = &adapter->stats;
7026 u64 total_mpc = 0;
7027 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7028 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7029 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7030 u64 alloc_rx_page = 0;
7031 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7032
7033 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7034 test_bit(__IXGBE_RESETTING, &adapter->state))
7035 return;
7036
7037 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7038 u64 rsc_count = 0;
7039 u64 rsc_flush = 0;
7040 for (i = 0; i < adapter->num_rx_queues; i++) {
7041 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7042 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7043 }
7044 adapter->rsc_total_count = rsc_count;
7045 adapter->rsc_total_flush = rsc_flush;
7046 }
7047
7048 for (i = 0; i < adapter->num_rx_queues; i++) {
7049 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
7050 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7051 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7052 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7053 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7054 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7055 bytes += rx_ring->stats.bytes;
7056 packets += rx_ring->stats.packets;
7057 }
7058 adapter->non_eop_descs = non_eop_descs;
7059 adapter->alloc_rx_page = alloc_rx_page;
7060 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7061 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7062 adapter->hw_csum_rx_error = hw_csum_rx_error;
7063 netdev->stats.rx_bytes = bytes;
7064 netdev->stats.rx_packets = packets;
7065
7066 bytes = 0;
7067 packets = 0;
7068
7069 for (i = 0; i < adapter->num_tx_queues; i++) {
7070 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7071 restart_queue += tx_ring->tx_stats.restart_queue;
7072 tx_busy += tx_ring->tx_stats.tx_busy;
7073 bytes += tx_ring->stats.bytes;
7074 packets += tx_ring->stats.packets;
7075 }
7076 for (i = 0; i < adapter->num_xdp_queues; i++) {
7077 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
7078
7079 restart_queue += xdp_ring->tx_stats.restart_queue;
7080 tx_busy += xdp_ring->tx_stats.tx_busy;
7081 bytes += xdp_ring->stats.bytes;
7082 packets += xdp_ring->stats.packets;
7083 }
7084 adapter->restart_queue = restart_queue;
7085 adapter->tx_busy = tx_busy;
7086 netdev->stats.tx_bytes = bytes;
7087 netdev->stats.tx_packets = packets;
7088
7089 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7090
7091
7092 for (i = 0; i < 8; i++) {
7093
7094 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7095 missed_rx += mpc;
7096 hwstats->mpc[i] += mpc;
7097 total_mpc += hwstats->mpc[i];
7098 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7099 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7100 switch (hw->mac.type) {
7101 case ixgbe_mac_82598EB:
7102 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7103 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7104 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7105 hwstats->pxonrxc[i] +=
7106 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7107 break;
7108 case ixgbe_mac_82599EB:
7109 case ixgbe_mac_X540:
7110 case ixgbe_mac_X550:
7111 case ixgbe_mac_X550EM_x:
7112 case ixgbe_mac_x550em_a:
7113 hwstats->pxonrxc[i] +=
7114 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7115 break;
7116 default:
7117 break;
7118 }
7119 }
7120
7121
7122 for (i = 0; i < 16; i++) {
7123 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7124 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7125 if ((hw->mac.type == ixgbe_mac_82599EB) ||
7126 (hw->mac.type == ixgbe_mac_X540) ||
7127 (hw->mac.type == ixgbe_mac_X550) ||
7128 (hw->mac.type == ixgbe_mac_X550EM_x) ||
7129 (hw->mac.type == ixgbe_mac_x550em_a)) {
7130 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7131 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
7132 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7133 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
7134 }
7135 }
7136
7137 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7138
7139 hwstats->gprc -= missed_rx;
7140
7141 ixgbe_update_xoff_received(adapter);
7142
7143
7144 switch (hw->mac.type) {
7145 case ixgbe_mac_82598EB:
7146 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7147 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7148 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7149 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7150 break;
7151 case ixgbe_mac_X540:
7152 case ixgbe_mac_X550:
7153 case ixgbe_mac_X550EM_x:
7154 case ixgbe_mac_x550em_a:
7155
7156 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7157 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7158 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7159 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7160
7161 case ixgbe_mac_82599EB:
7162 for (i = 0; i < 16; i++)
7163 adapter->hw_rx_no_dma_resources +=
7164 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7165 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7166 IXGBE_READ_REG(hw, IXGBE_GORCH);
7167 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7168 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7169 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7170 IXGBE_READ_REG(hw, IXGBE_TORH);
7171 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7172 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7173 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7174#ifdef IXGBE_FCOE
7175 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7176 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7177 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7178 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7179 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7180 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7181
7182 if (adapter->fcoe.ddp_pool) {
7183 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7184 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7185 unsigned int cpu;
7186 u64 noddp = 0, noddp_ext_buff = 0;
7187 for_each_possible_cpu(cpu) {
7188 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7189 noddp += ddp_pool->noddp;
7190 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7191 }
7192 hwstats->fcoe_noddp = noddp;
7193 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7194 }
7195#endif
7196 break;
7197 default:
7198 break;
7199 }
7200 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7201 hwstats->bprc += bprc;
7202 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7203 if (hw->mac.type == ixgbe_mac_82598EB)
7204 hwstats->mprc -= bprc;
7205 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7206 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7207 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7208 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7209 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7210 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7211 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7212 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7213 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7214 hwstats->lxontxc += lxon;
7215 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7216 hwstats->lxofftxc += lxoff;
7217 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7218 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7219
7220
7221
7222 xon_off_tot = lxon + lxoff;
7223 hwstats->gptc -= xon_off_tot;
7224 hwstats->mptc -= xon_off_tot;
7225 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7226 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7227 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7228 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7229 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7230 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7231 hwstats->ptc64 -= xon_off_tot;
7232 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7233 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7234 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7235 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7236 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7237 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7238
7239
7240 netdev->stats.multicast = hwstats->mprc;
7241
7242
7243 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7244 netdev->stats.rx_dropped = 0;
7245 netdev->stats.rx_length_errors = hwstats->rlec;
7246 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7247 netdev->stats.rx_missed_errors = total_mpc;
7248}
7249
7250
7251
7252
7253
7254static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7255{
7256 struct ixgbe_hw *hw = &adapter->hw;
7257 int i;
7258
7259 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7260 return;
7261
7262 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7263
7264
7265 if (test_bit(__IXGBE_DOWN, &adapter->state))
7266 return;
7267
7268
7269 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7270 return;
7271
7272 adapter->fdir_overflow++;
7273
7274 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7275 for (i = 0; i < adapter->num_tx_queues; i++)
7276 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7277 &(adapter->tx_ring[i]->state));
7278 for (i = 0; i < adapter->num_xdp_queues; i++)
7279 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7280 &adapter->xdp_ring[i]->state);
7281
7282 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7283 } else {
7284 e_err(probe, "failed to finish FDIR re-initialization, "
7285 "ignored adding FDIR ATR filters\n");
7286 }
7287}
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297
7298static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7299{
7300 struct ixgbe_hw *hw = &adapter->hw;
7301 u64 eics = 0;
7302 int i;
7303
7304
7305 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7306 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7307 test_bit(__IXGBE_RESETTING, &adapter->state))
7308 return;
7309
7310
7311 if (netif_carrier_ok(adapter->netdev)) {
7312 for (i = 0; i < adapter->num_tx_queues; i++)
7313 set_check_for_tx_hang(adapter->tx_ring[i]);
7314 for (i = 0; i < adapter->num_xdp_queues; i++)
7315 set_check_for_tx_hang(adapter->xdp_ring[i]);
7316 }
7317
7318 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7319
7320
7321
7322
7323
7324 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7325 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7326 } else {
7327
7328 for (i = 0; i < adapter->num_q_vectors; i++) {
7329 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7330 if (qv->rx.ring || qv->tx.ring)
7331 eics |= BIT_ULL(i);
7332 }
7333 }
7334
7335
7336 ixgbe_irq_rearm_queues(adapter, eics);
7337}
7338
7339
7340
7341
7342
7343static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7344{
7345 struct ixgbe_hw *hw = &adapter->hw;
7346 u32 link_speed = adapter->link_speed;
7347 bool link_up = adapter->link_up;
7348 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7349
7350 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7351 return;
7352
7353 if (hw->mac.ops.check_link) {
7354 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7355 } else {
7356
7357 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7358 link_up = true;
7359 }
7360
7361 if (adapter->ixgbe_ieee_pfc)
7362 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7363
7364 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7365 hw->mac.ops.fc_enable(hw);
7366 ixgbe_set_rx_drop_en(adapter);
7367 }
7368
7369 if (link_up ||
7370 time_after(jiffies, (adapter->link_check_timeout +
7371 IXGBE_TRY_LINK_TIMEOUT))) {
7372 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7373 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7374 IXGBE_WRITE_FLUSH(hw);
7375 }
7376
7377 adapter->link_up = link_up;
7378 adapter->link_speed = link_speed;
7379}
7380
7381static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7382{
7383#ifdef CONFIG_IXGBE_DCB
7384 struct net_device *netdev = adapter->netdev;
7385 struct dcb_app app = {
7386 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7387 .protocol = 0,
7388 };
7389 u8 up = 0;
7390
7391 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7392 up = dcb_ieee_getapp_mask(netdev, &app);
7393
7394 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7395#endif
7396}
7397
7398
7399
7400
7401
7402
7403static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7404{
7405 struct net_device *netdev = adapter->netdev;
7406 struct ixgbe_hw *hw = &adapter->hw;
7407 u32 link_speed = adapter->link_speed;
7408 const char *speed_str;
7409 bool flow_rx, flow_tx;
7410
7411
7412 if (netif_carrier_ok(netdev))
7413 return;
7414
7415 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7416
7417 switch (hw->mac.type) {
7418 case ixgbe_mac_82598EB: {
7419 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7420 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7421 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7422 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7423 }
7424 break;
7425 case ixgbe_mac_X540:
7426 case ixgbe_mac_X550:
7427 case ixgbe_mac_X550EM_x:
7428 case ixgbe_mac_x550em_a:
7429 case ixgbe_mac_82599EB: {
7430 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7431 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7432 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7433 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7434 }
7435 break;
7436 default:
7437 flow_tx = false;
7438 flow_rx = false;
7439 break;
7440 }
7441
7442 adapter->last_rx_ptp_check = jiffies;
7443
7444 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7445 ixgbe_ptp_start_cyclecounter(adapter);
7446
7447 switch (link_speed) {
7448 case IXGBE_LINK_SPEED_10GB_FULL:
7449 speed_str = "10 Gbps";
7450 break;
7451 case IXGBE_LINK_SPEED_5GB_FULL:
7452 speed_str = "5 Gbps";
7453 break;
7454 case IXGBE_LINK_SPEED_2_5GB_FULL:
7455 speed_str = "2.5 Gbps";
7456 break;
7457 case IXGBE_LINK_SPEED_1GB_FULL:
7458 speed_str = "1 Gbps";
7459 break;
7460 case IXGBE_LINK_SPEED_100_FULL:
7461 speed_str = "100 Mbps";
7462 break;
7463 case IXGBE_LINK_SPEED_10_FULL:
7464 speed_str = "10 Mbps";
7465 break;
7466 default:
7467 speed_str = "unknown speed";
7468 break;
7469 }
7470 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7471 ((flow_rx && flow_tx) ? "RX/TX" :
7472 (flow_rx ? "RX" :
7473 (flow_tx ? "TX" : "None"))));
7474
7475 netif_carrier_on(netdev);
7476 ixgbe_check_vf_rate_limit(adapter);
7477
7478
7479 netif_tx_wake_all_queues(adapter->netdev);
7480
7481
7482 ixgbe_update_default_up(adapter);
7483
7484
7485 ixgbe_ping_all_vfs(adapter);
7486}
7487
7488
7489
7490
7491
7492
7493static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7494{
7495 struct net_device *netdev = adapter->netdev;
7496 struct ixgbe_hw *hw = &adapter->hw;
7497
7498 adapter->link_up = false;
7499 adapter->link_speed = 0;
7500
7501
7502 if (!netif_carrier_ok(netdev))
7503 return;
7504
7505
7506 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7507 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7508
7509 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7510 ixgbe_ptp_start_cyclecounter(adapter);
7511
7512 e_info(drv, "NIC Link is Down\n");
7513 netif_carrier_off(netdev);
7514
7515
7516 ixgbe_ping_all_vfs(adapter);
7517}
7518
7519static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7520{
7521 int i;
7522
7523 for (i = 0; i < adapter->num_tx_queues; i++) {
7524 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7525
7526 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7527 return true;
7528 }
7529
7530 for (i = 0; i < adapter->num_xdp_queues; i++) {
7531 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7532
7533 if (ring->next_to_use != ring->next_to_clean)
7534 return true;
7535 }
7536
7537 return false;
7538}
7539
7540static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7541{
7542 struct ixgbe_hw *hw = &adapter->hw;
7543 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7544 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7545
7546 int i, j;
7547
7548 if (!adapter->num_vfs)
7549 return false;
7550
7551
7552 if (hw->mac.type >= ixgbe_mac_X550)
7553 return false;
7554
7555 for (i = 0; i < adapter->num_vfs; i++) {
7556 for (j = 0; j < q_per_pool; j++) {
7557 u32 h, t;
7558
7559 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7560 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7561
7562 if (h != t)
7563 return true;
7564 }
7565 }
7566
7567 return false;
7568}
7569
7570
7571
7572
7573
7574static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7575{
7576 if (!netif_carrier_ok(adapter->netdev)) {
7577 if (ixgbe_ring_tx_pending(adapter) ||
7578 ixgbe_vf_tx_pending(adapter)) {
7579
7580
7581
7582
7583
7584 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7585 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7586 }
7587 }
7588}
7589
7590#ifdef CONFIG_PCI_IOV
7591static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7592{
7593 struct ixgbe_hw *hw = &adapter->hw;
7594 struct pci_dev *pdev = adapter->pdev;
7595 unsigned int vf;
7596 u32 gpc;
7597
7598 if (!(netif_carrier_ok(adapter->netdev)))
7599 return;
7600
7601 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7602 if (gpc)
7603 return;
7604
7605
7606
7607
7608
7609
7610 if (!pdev)
7611 return;
7612
7613
7614 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7615 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7616 u16 status_reg;
7617
7618 if (!vfdev)
7619 continue;
7620 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7621 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7622 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7623 pcie_flr(vfdev);
7624 }
7625}
7626
7627static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7628{
7629 u32 ssvpc;
7630
7631
7632 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7633 adapter->num_vfs == 0)
7634 return;
7635
7636 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7637
7638
7639
7640
7641
7642 if (!ssvpc)
7643 return;
7644
7645 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7646}
7647#else
7648static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7649{
7650}
7651
7652static void
7653ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7654{
7655}
7656#endif
7657
7658
7659
7660
7661
7662
7663static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7664{
7665
7666 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7667 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7668 test_bit(__IXGBE_RESETTING, &adapter->state))
7669 return;
7670
7671 ixgbe_watchdog_update_link(adapter);
7672
7673 if (adapter->link_up)
7674 ixgbe_watchdog_link_is_up(adapter);
7675 else
7676 ixgbe_watchdog_link_is_down(adapter);
7677
7678 ixgbe_check_for_bad_vf(adapter);
7679 ixgbe_spoof_check(adapter);
7680 ixgbe_update_stats(adapter);
7681
7682 ixgbe_watchdog_flush_tx(adapter);
7683}
7684
7685
7686
7687
7688
7689static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7690{
7691 struct ixgbe_hw *hw = &adapter->hw;
7692 s32 err;
7693
7694
7695 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7696 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7697 return;
7698
7699 if (adapter->sfp_poll_time &&
7700 time_after(adapter->sfp_poll_time, jiffies))
7701 return;
7702
7703
7704 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7705 return;
7706
7707 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7708
7709 err = hw->phy.ops.identify_sfp(hw);
7710 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7711 goto sfp_out;
7712
7713 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7714
7715
7716 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7717 }
7718
7719
7720 if (err)
7721 goto sfp_out;
7722
7723
7724 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7725 goto sfp_out;
7726
7727 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7728
7729
7730
7731
7732
7733
7734 if (hw->mac.type == ixgbe_mac_82598EB)
7735 err = hw->phy.ops.reset(hw);
7736 else
7737 err = hw->mac.ops.setup_sfp(hw);
7738
7739 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7740 goto sfp_out;
7741
7742 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7743 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7744
7745sfp_out:
7746 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7747
7748 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7749 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7750 e_dev_err("failed to initialize because an unsupported "
7751 "SFP+ module type was detected.\n");
7752 e_dev_err("Reload the driver after installing a "
7753 "supported module.\n");
7754 unregister_netdev(adapter->netdev);
7755 }
7756}
7757
7758
7759
7760
7761
7762static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7763{
7764 struct ixgbe_hw *hw = &adapter->hw;
7765 u32 cap_speed;
7766 u32 speed;
7767 bool autoneg = false;
7768
7769 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7770 return;
7771
7772
7773 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7774 return;
7775
7776 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7777
7778 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7779
7780
7781 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7782 speed = IXGBE_LINK_SPEED_10GB_FULL;
7783 else
7784 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7785 IXGBE_LINK_SPEED_1GB_FULL);
7786
7787 if (hw->mac.ops.setup_link)
7788 hw->mac.ops.setup_link(hw, speed, true);
7789
7790 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7791 adapter->link_check_timeout = jiffies;
7792 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7793}
7794
7795
7796
7797
7798
7799static void ixgbe_service_timer(struct timer_list *t)
7800{
7801 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7802 unsigned long next_event_offset;
7803
7804
7805 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7806 next_event_offset = HZ / 10;
7807 else
7808 next_event_offset = HZ * 2;
7809
7810
7811 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7812
7813 ixgbe_service_event_schedule(adapter);
7814}
7815
7816static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7817{
7818 struct ixgbe_hw *hw = &adapter->hw;
7819 u32 status;
7820
7821 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7822 return;
7823
7824 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7825
7826 if (!hw->phy.ops.handle_lasi)
7827 return;
7828
7829 status = hw->phy.ops.handle_lasi(&adapter->hw);
7830 if (status != IXGBE_ERR_OVERTEMP)
7831 return;
7832
7833 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7834}
7835
7836static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7837{
7838 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7839 return;
7840
7841 rtnl_lock();
7842
7843 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7844 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7845 test_bit(__IXGBE_RESETTING, &adapter->state)) {
7846 rtnl_unlock();
7847 return;
7848 }
7849
7850 ixgbe_dump(adapter);
7851 netdev_err(adapter->netdev, "Reset adapter\n");
7852 adapter->tx_timeout_count++;
7853
7854 ixgbe_reinit_locked(adapter);
7855 rtnl_unlock();
7856}
7857
7858
7859
7860
7861
7862
7863
7864static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7865{
7866 struct ixgbe_hw *hw = &adapter->hw;
7867 u32 fwsm;
7868
7869
7870 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7871
7872 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7873 !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7874 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7875 fwsm);
7876
7877 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7878 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7879 return true;
7880 }
7881
7882 return false;
7883}
7884
7885
7886
7887
7888
7889static void ixgbe_service_task(struct work_struct *work)
7890{
7891 struct ixgbe_adapter *adapter = container_of(work,
7892 struct ixgbe_adapter,
7893 service_task);
7894 if (ixgbe_removed(adapter->hw.hw_addr)) {
7895 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7896 rtnl_lock();
7897 ixgbe_down(adapter);
7898 rtnl_unlock();
7899 }
7900 ixgbe_service_event_complete(adapter);
7901 return;
7902 }
7903 if (ixgbe_check_fw_error(adapter)) {
7904 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7905 unregister_netdev(adapter->netdev);
7906 ixgbe_service_event_complete(adapter);
7907 return;
7908 }
7909 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7910 rtnl_lock();
7911 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7912 udp_tunnel_get_rx_info(adapter->netdev);
7913 rtnl_unlock();
7914 }
7915 ixgbe_reset_subtask(adapter);
7916 ixgbe_phy_interrupt_subtask(adapter);
7917 ixgbe_sfp_detection_subtask(adapter);
7918 ixgbe_sfp_link_config_subtask(adapter);
7919 ixgbe_check_overtemp_subtask(adapter);
7920 ixgbe_watchdog_subtask(adapter);
7921 ixgbe_fdir_reinit_subtask(adapter);
7922 ixgbe_check_hang_subtask(adapter);
7923
7924 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7925 ixgbe_ptp_overflow_check(adapter);
7926 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7927 ixgbe_ptp_rx_hang(adapter);
7928 ixgbe_ptp_tx_hang(adapter);
7929 }
7930
7931 ixgbe_service_event_complete(adapter);
7932}
7933
7934static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7935 struct ixgbe_tx_buffer *first,
7936 u8 *hdr_len,
7937 struct ixgbe_ipsec_tx_data *itd)
7938{
7939 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7940 struct sk_buff *skb = first->skb;
7941 union {
7942 struct iphdr *v4;
7943 struct ipv6hdr *v6;
7944 unsigned char *hdr;
7945 } ip;
7946 union {
7947 struct tcphdr *tcp;
7948 unsigned char *hdr;
7949 } l4;
7950 u32 paylen, l4_offset;
7951 u32 fceof_saidx = 0;
7952 int err;
7953
7954 if (skb->ip_summed != CHECKSUM_PARTIAL)
7955 return 0;
7956
7957 if (!skb_is_gso(skb))
7958 return 0;
7959
7960 err = skb_cow_head(skb, 0);
7961 if (err < 0)
7962 return err;
7963
7964 if (eth_p_mpls(first->protocol))
7965 ip.hdr = skb_inner_network_header(skb);
7966 else
7967 ip.hdr = skb_network_header(skb);
7968 l4.hdr = skb_checksum_start(skb);
7969
7970
7971 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7972
7973
7974 if (ip.v4->version == 4) {
7975 unsigned char *csum_start = skb_checksum_start(skb);
7976 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7977 int len = csum_start - trans_start;
7978
7979
7980
7981
7982
7983 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
7984 csum_fold(csum_partial(trans_start,
7985 len, 0)) : 0;
7986 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7987
7988 ip.v4->tot_len = 0;
7989 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7990 IXGBE_TX_FLAGS_CSUM |
7991 IXGBE_TX_FLAGS_IPV4;
7992 } else {
7993 ip.v6->payload_len = 0;
7994 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7995 IXGBE_TX_FLAGS_CSUM;
7996 }
7997
7998
7999 l4_offset = l4.hdr - skb->data;
8000
8001
8002 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8003
8004
8005 paylen = skb->len - l4_offset;
8006 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
8007
8008
8009 first->gso_segs = skb_shinfo(skb)->gso_segs;
8010 first->bytecount += (first->gso_segs - 1) * *hdr_len;
8011
8012
8013 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8014 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8015
8016 fceof_saidx |= itd->sa_idx;
8017 type_tucmd |= itd->flags | itd->trailer_len;
8018
8019
8020 vlan_macip_lens = l4.hdr - ip.hdr;
8021 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8022 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8023
8024 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8025 mss_l4len_idx);
8026
8027 return 1;
8028}
8029
8030static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
8031{
8032 unsigned int offset = 0;
8033
8034 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
8035
8036 return offset == skb_checksum_start_offset(skb);
8037}
8038
8039static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8040 struct ixgbe_tx_buffer *first,
8041 struct ixgbe_ipsec_tx_data *itd)
8042{
8043 struct sk_buff *skb = first->skb;
8044 u32 vlan_macip_lens = 0;
8045 u32 fceof_saidx = 0;
8046 u32 type_tucmd = 0;
8047
8048 if (skb->ip_summed != CHECKSUM_PARTIAL) {
8049csum_failed:
8050 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8051 IXGBE_TX_FLAGS_CC)))
8052 return;
8053 goto no_csum;
8054 }
8055
8056 switch (skb->csum_offset) {
8057 case offsetof(struct tcphdr, check):
8058 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8059
8060 case offsetof(struct udphdr, check):
8061 break;
8062 case offsetof(struct sctphdr, checksum):
8063
8064 if (((first->protocol == htons(ETH_P_IP)) &&
8065 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
8066 ((first->protocol == htons(ETH_P_IPV6)) &&
8067 ixgbe_ipv6_csum_is_sctp(skb))) {
8068 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8069 break;
8070 }
8071
8072 default:
8073 skb_checksum_help(skb);
8074 goto csum_failed;
8075 }
8076
8077
8078 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8079 vlan_macip_lens = skb_checksum_start_offset(skb) -
8080 skb_network_offset(skb);
8081no_csum:
8082
8083 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8084 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8085
8086 fceof_saidx |= itd->sa_idx;
8087 type_tucmd |= itd->flags | itd->trailer_len;
8088
8089 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8090}
8091
8092#define IXGBE_SET_FLAG(_input, _flag, _result) \
8093 ((_flag <= _result) ? \
8094 ((u32)(_input & _flag) * (_result / _flag)) : \
8095 ((u32)(_input & _flag) / (_flag / _result)))
8096
8097static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8098{
8099
8100 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8101 IXGBE_ADVTXD_DCMD_DEXT |
8102 IXGBE_ADVTXD_DCMD_IFCS;
8103
8104
8105 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8106 IXGBE_ADVTXD_DCMD_VLE);
8107
8108
8109 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8110 IXGBE_ADVTXD_DCMD_TSE);
8111
8112
8113 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8114 IXGBE_ADVTXD_MAC_TSTAMP);
8115
8116
8117 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8118
8119 return cmd_type;
8120}
8121
8122static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8123 u32 tx_flags, unsigned int paylen)
8124{
8125 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8126
8127
8128 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8129 IXGBE_TX_FLAGS_CSUM,
8130 IXGBE_ADVTXD_POPTS_TXSM);
8131
8132
8133 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8134 IXGBE_TX_FLAGS_IPV4,
8135 IXGBE_ADVTXD_POPTS_IXSM);
8136
8137
8138 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8139 IXGBE_TX_FLAGS_IPSEC,
8140 IXGBE_ADVTXD_POPTS_IPSEC);
8141
8142
8143
8144
8145
8146 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8147 IXGBE_TX_FLAGS_CC,
8148 IXGBE_ADVTXD_CC);
8149
8150 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8151}
8152
8153static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8154{
8155 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8156
8157
8158
8159
8160
8161 smp_mb();
8162
8163
8164
8165
8166 if (likely(ixgbe_desc_unused(tx_ring) < size))
8167 return -EBUSY;
8168
8169
8170 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8171 ++tx_ring->tx_stats.restart_queue;
8172 return 0;
8173}
8174
8175static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8176{
8177 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8178 return 0;
8179
8180 return __ixgbe_maybe_stop_tx(tx_ring, size);
8181}
8182
8183static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8184 struct ixgbe_tx_buffer *first,
8185 const u8 hdr_len)
8186{
8187 struct sk_buff *skb = first->skb;
8188 struct ixgbe_tx_buffer *tx_buffer;
8189 union ixgbe_adv_tx_desc *tx_desc;
8190 skb_frag_t *frag;
8191 dma_addr_t dma;
8192 unsigned int data_len, size;
8193 u32 tx_flags = first->tx_flags;
8194 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8195 u16 i = tx_ring->next_to_use;
8196
8197 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8198
8199 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8200
8201 size = skb_headlen(skb);
8202 data_len = skb->data_len;
8203
8204#ifdef IXGBE_FCOE
8205 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8206 if (data_len < sizeof(struct fcoe_crc_eof)) {
8207 size -= sizeof(struct fcoe_crc_eof) - data_len;
8208 data_len = 0;
8209 } else {
8210 data_len -= sizeof(struct fcoe_crc_eof);
8211 }
8212 }
8213
8214#endif
8215 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8216
8217 tx_buffer = first;
8218
8219 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8220 if (dma_mapping_error(tx_ring->dev, dma))
8221 goto dma_error;
8222
8223
8224 dma_unmap_len_set(tx_buffer, len, size);
8225 dma_unmap_addr_set(tx_buffer, dma, dma);
8226
8227 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8228
8229 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8230 tx_desc->read.cmd_type_len =
8231 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8232
8233 i++;
8234 tx_desc++;
8235 if (i == tx_ring->count) {
8236 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8237 i = 0;
8238 }
8239 tx_desc->read.olinfo_status = 0;
8240
8241 dma += IXGBE_MAX_DATA_PER_TXD;
8242 size -= IXGBE_MAX_DATA_PER_TXD;
8243
8244 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8245 }
8246
8247 if (likely(!data_len))
8248 break;
8249
8250 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8251
8252 i++;
8253 tx_desc++;
8254 if (i == tx_ring->count) {
8255 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8256 i = 0;
8257 }
8258 tx_desc->read.olinfo_status = 0;
8259
8260#ifdef IXGBE_FCOE
8261 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8262#else
8263 size = skb_frag_size(frag);
8264#endif
8265 data_len -= size;
8266
8267 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8268 DMA_TO_DEVICE);
8269
8270 tx_buffer = &tx_ring->tx_buffer_info[i];
8271 }
8272
8273
8274 cmd_type |= size | IXGBE_TXD_CMD;
8275 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8276
8277 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8278
8279
8280 first->time_stamp = jiffies;
8281
8282 skb_tx_timestamp(skb);
8283
8284
8285
8286
8287
8288
8289
8290
8291
8292 wmb();
8293
8294
8295 first->next_to_watch = tx_desc;
8296
8297 i++;
8298 if (i == tx_ring->count)
8299 i = 0;
8300
8301 tx_ring->next_to_use = i;
8302
8303 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8304
8305 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8306 writel(i, tx_ring->tail);
8307 }
8308
8309 return 0;
8310dma_error:
8311 dev_err(tx_ring->dev, "TX DMA map failed\n");
8312
8313
8314 for (;;) {
8315 tx_buffer = &tx_ring->tx_buffer_info[i];
8316 if (dma_unmap_len(tx_buffer, len))
8317 dma_unmap_page(tx_ring->dev,
8318 dma_unmap_addr(tx_buffer, dma),
8319 dma_unmap_len(tx_buffer, len),
8320 DMA_TO_DEVICE);
8321 dma_unmap_len_set(tx_buffer, len, 0);
8322 if (tx_buffer == first)
8323 break;
8324 if (i == 0)
8325 i += tx_ring->count;
8326 i--;
8327 }
8328
8329 dev_kfree_skb_any(first->skb);
8330 first->skb = NULL;
8331
8332 tx_ring->next_to_use = i;
8333
8334 return -1;
8335}
8336
8337static void ixgbe_atr(struct ixgbe_ring *ring,
8338 struct ixgbe_tx_buffer *first)
8339{
8340 struct ixgbe_q_vector *q_vector = ring->q_vector;
8341 union ixgbe_atr_hash_dword input = { .dword = 0 };
8342 union ixgbe_atr_hash_dword common = { .dword = 0 };
8343 union {
8344 unsigned char *network;
8345 struct iphdr *ipv4;
8346 struct ipv6hdr *ipv6;
8347 } hdr;
8348 struct tcphdr *th;
8349 unsigned int hlen;
8350 struct sk_buff *skb;
8351 __be16 vlan_id;
8352 int l4_proto;
8353
8354
8355 if (!q_vector)
8356 return;
8357
8358
8359 if (!ring->atr_sample_rate)
8360 return;
8361
8362 ring->atr_count++;
8363
8364
8365 if ((first->protocol != htons(ETH_P_IP)) &&
8366 (first->protocol != htons(ETH_P_IPV6)))
8367 return;
8368
8369
8370 skb = first->skb;
8371 hdr.network = skb_network_header(skb);
8372 if (unlikely(hdr.network <= skb->data))
8373 return;
8374 if (skb->encapsulation &&
8375 first->protocol == htons(ETH_P_IP) &&
8376 hdr.ipv4->protocol == IPPROTO_UDP) {
8377 struct ixgbe_adapter *adapter = q_vector->adapter;
8378
8379 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8380 VXLAN_HEADROOM))
8381 return;
8382
8383
8384 if (adapter->vxlan_port &&
8385 udp_hdr(skb)->dest == adapter->vxlan_port)
8386 hdr.network = skb_inner_network_header(skb);
8387
8388 if (adapter->geneve_port &&
8389 udp_hdr(skb)->dest == adapter->geneve_port)
8390 hdr.network = skb_inner_network_header(skb);
8391 }
8392
8393
8394
8395
8396 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8397 return;
8398
8399
8400 switch (hdr.ipv4->version) {
8401 case IPVERSION:
8402
8403 hlen = (hdr.network[0] & 0x0F) << 2;
8404 l4_proto = hdr.ipv4->protocol;
8405 break;
8406 case 6:
8407 hlen = hdr.network - skb->data;
8408 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8409 hlen -= hdr.network - skb->data;
8410 break;
8411 default:
8412 return;
8413 }
8414
8415 if (l4_proto != IPPROTO_TCP)
8416 return;
8417
8418 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8419 hlen + sizeof(struct tcphdr)))
8420 return;
8421
8422 th = (struct tcphdr *)(hdr.network + hlen);
8423
8424
8425 if (th->fin)
8426 return;
8427
8428
8429 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8430 return;
8431
8432
8433 ring->atr_count = 0;
8434
8435 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8436
8437
8438
8439
8440
8441
8442
8443
8444 input.formatted.vlan_id = vlan_id;
8445
8446
8447
8448
8449
8450 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8451 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8452 else
8453 common.port.src ^= th->dest ^ first->protocol;
8454 common.port.dst ^= th->source;
8455
8456 switch (hdr.ipv4->version) {
8457 case IPVERSION:
8458 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8459 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8460 break;
8461 case 6:
8462 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8463 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8464 hdr.ipv6->saddr.s6_addr32[1] ^
8465 hdr.ipv6->saddr.s6_addr32[2] ^
8466 hdr.ipv6->saddr.s6_addr32[3] ^
8467 hdr.ipv6->daddr.s6_addr32[0] ^
8468 hdr.ipv6->daddr.s6_addr32[1] ^
8469 hdr.ipv6->daddr.s6_addr32[2] ^
8470 hdr.ipv6->daddr.s6_addr32[3];
8471 break;
8472 default:
8473 break;
8474 }
8475
8476 if (hdr.network != skb_network_header(skb))
8477 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8478
8479
8480 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8481 input, common, ring->queue_index);
8482}
8483
8484#ifdef IXGBE_FCOE
8485static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8486 struct net_device *sb_dev)
8487{
8488 struct ixgbe_adapter *adapter;
8489 struct ixgbe_ring_feature *f;
8490 int txq;
8491
8492 if (sb_dev) {
8493 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8494 struct net_device *vdev = sb_dev;
8495
8496 txq = vdev->tc_to_txq[tc].offset;
8497 txq += reciprocal_scale(skb_get_hash(skb),
8498 vdev->tc_to_txq[tc].count);
8499
8500 return txq;
8501 }
8502
8503
8504
8505
8506
8507 switch (vlan_get_protocol(skb)) {
8508 case htons(ETH_P_FCOE):
8509 case htons(ETH_P_FIP):
8510 adapter = netdev_priv(dev);
8511
8512 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8513 break;
8514
8515 default:
8516 return netdev_pick_tx(dev, skb, sb_dev);
8517 }
8518
8519 f = &adapter->ring_feature[RING_F_FCOE];
8520
8521 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8522 smp_processor_id();
8523
8524 while (txq >= f->indices)
8525 txq -= f->indices;
8526
8527 return txq + f->offset;
8528}
8529
8530#endif
8531int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8532 struct xdp_frame *xdpf)
8533{
8534 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8535 struct ixgbe_tx_buffer *tx_buffer;
8536 union ixgbe_adv_tx_desc *tx_desc;
8537 u32 len, cmd_type;
8538 dma_addr_t dma;
8539 u16 i;
8540
8541 len = xdpf->len;
8542
8543 if (unlikely(!ixgbe_desc_unused(ring)))
8544 return IXGBE_XDP_CONSUMED;
8545
8546 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
8547 if (dma_mapping_error(ring->dev, dma))
8548 return IXGBE_XDP_CONSUMED;
8549
8550
8551 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8552 tx_buffer->bytecount = len;
8553 tx_buffer->gso_segs = 1;
8554 tx_buffer->protocol = 0;
8555
8556 i = ring->next_to_use;
8557 tx_desc = IXGBE_TX_DESC(ring, i);
8558
8559 dma_unmap_len_set(tx_buffer, len, len);
8560 dma_unmap_addr_set(tx_buffer, dma, dma);
8561 tx_buffer->xdpf = xdpf;
8562
8563 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8564
8565
8566 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8567 IXGBE_ADVTXD_DCMD_DEXT |
8568 IXGBE_ADVTXD_DCMD_IFCS;
8569 cmd_type |= len | IXGBE_TXD_CMD;
8570 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8571 tx_desc->read.olinfo_status =
8572 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8573
8574
8575 smp_wmb();
8576
8577
8578 i++;
8579 if (i == ring->count)
8580 i = 0;
8581
8582 tx_buffer->next_to_watch = tx_desc;
8583 ring->next_to_use = i;
8584
8585 return IXGBE_XDP_TX;
8586}
8587
8588netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8589 struct ixgbe_adapter *adapter,
8590 struct ixgbe_ring *tx_ring)
8591{
8592 struct ixgbe_tx_buffer *first;
8593 int tso;
8594 u32 tx_flags = 0;
8595 unsigned short f;
8596 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8597 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8598 __be16 protocol = skb->protocol;
8599 u8 hdr_len = 0;
8600
8601
8602
8603
8604
8605
8606
8607
8608 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8609 count += TXD_USE_COUNT(skb_frag_size(
8610 &skb_shinfo(skb)->frags[f]));
8611
8612 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8613 tx_ring->tx_stats.tx_busy++;
8614 return NETDEV_TX_BUSY;
8615 }
8616
8617
8618 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8619 first->skb = skb;
8620 first->bytecount = skb->len;
8621 first->gso_segs = 1;
8622
8623
8624 if (skb_vlan_tag_present(skb)) {
8625 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8626 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8627
8628 } else if (protocol == htons(ETH_P_8021Q)) {
8629 struct vlan_hdr *vhdr, _vhdr;
8630 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8631 if (!vhdr)
8632 goto out_drop;
8633
8634 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8635 IXGBE_TX_FLAGS_VLAN_SHIFT;
8636 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8637 }
8638 protocol = vlan_get_protocol(skb);
8639
8640 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8641 adapter->ptp_clock) {
8642 if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8643 &adapter->state)) {
8644 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8645 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8646
8647
8648 adapter->ptp_tx_skb = skb_get(skb);
8649 adapter->ptp_tx_start = jiffies;
8650 schedule_work(&adapter->ptp_tx_work);
8651 } else {
8652 adapter->tx_hwtstamp_skipped++;
8653 }
8654 }
8655
8656#ifdef CONFIG_PCI_IOV
8657
8658
8659
8660
8661 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8662 tx_flags |= IXGBE_TX_FLAGS_CC;
8663
8664#endif
8665
8666 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8667 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8668 (skb->priority != TC_PRIO_CONTROL))) {
8669 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8670 tx_flags |= (skb->priority & 0x7) <<
8671 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8672 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8673 struct vlan_ethhdr *vhdr;
8674
8675 if (skb_cow_head(skb, 0))
8676 goto out_drop;
8677 vhdr = (struct vlan_ethhdr *)skb->data;
8678 vhdr->h_vlan_TCI = htons(tx_flags >>
8679 IXGBE_TX_FLAGS_VLAN_SHIFT);
8680 } else {
8681 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8682 }
8683 }
8684
8685
8686 first->tx_flags = tx_flags;
8687 first->protocol = protocol;
8688
8689#ifdef IXGBE_FCOE
8690
8691 if ((protocol == htons(ETH_P_FCOE)) &&
8692 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8693 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8694 if (tso < 0)
8695 goto out_drop;
8696
8697 goto xmit_fcoe;
8698 }
8699
8700#endif
8701
8702#ifdef CONFIG_IXGBE_IPSEC
8703 if (xfrm_offload(skb) &&
8704 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8705 goto out_drop;
8706#endif
8707 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8708 if (tso < 0)
8709 goto out_drop;
8710 else if (!tso)
8711 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8712
8713
8714 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8715 ixgbe_atr(tx_ring, first);
8716
8717#ifdef IXGBE_FCOE
8718xmit_fcoe:
8719#endif
8720 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8721 goto cleanup_tx_timestamp;
8722
8723 return NETDEV_TX_OK;
8724
8725out_drop:
8726 dev_kfree_skb_any(first->skb);
8727 first->skb = NULL;
8728cleanup_tx_timestamp:
8729 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8730 dev_kfree_skb_any(adapter->ptp_tx_skb);
8731 adapter->ptp_tx_skb = NULL;
8732 cancel_work_sync(&adapter->ptp_tx_work);
8733 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8734 }
8735
8736 return NETDEV_TX_OK;
8737}
8738
8739static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8740 struct net_device *netdev,
8741 struct ixgbe_ring *ring)
8742{
8743 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8744 struct ixgbe_ring *tx_ring;
8745
8746
8747
8748
8749
8750 if (skb_put_padto(skb, 17))
8751 return NETDEV_TX_OK;
8752
8753 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8754 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8755 return NETDEV_TX_BUSY;
8756
8757 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8758}
8759
8760static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8761 struct net_device *netdev)
8762{
8763 return __ixgbe_xmit_frame(skb, netdev, NULL);
8764}
8765
8766
8767
8768
8769
8770
8771
8772
8773static int ixgbe_set_mac(struct net_device *netdev, void *p)
8774{
8775 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8776 struct ixgbe_hw *hw = &adapter->hw;
8777 struct sockaddr *addr = p;
8778
8779 if (!is_valid_ether_addr(addr->sa_data))
8780 return -EADDRNOTAVAIL;
8781
8782 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8783 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8784
8785 ixgbe_mac_set_default_filter(adapter);
8786
8787 return 0;
8788}
8789
8790static int
8791ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8792{
8793 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8794 struct ixgbe_hw *hw = &adapter->hw;
8795 u16 value;
8796 int rc;
8797
8798 if (adapter->mii_bus) {
8799 int regnum = addr;
8800
8801 if (devad != MDIO_DEVAD_NONE)
8802 regnum |= (devad << 16) | MII_ADDR_C45;
8803
8804 return mdiobus_read(adapter->mii_bus, prtad, regnum);
8805 }
8806
8807 if (prtad != hw->phy.mdio.prtad)
8808 return -EINVAL;
8809 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8810 if (!rc)
8811 rc = value;
8812 return rc;
8813}
8814
8815static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8816 u16 addr, u16 value)
8817{
8818 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8819 struct ixgbe_hw *hw = &adapter->hw;
8820
8821 if (adapter->mii_bus) {
8822 int regnum = addr;
8823
8824 if (devad != MDIO_DEVAD_NONE)
8825 regnum |= (devad << 16) | MII_ADDR_C45;
8826
8827 return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8828 }
8829
8830 if (prtad != hw->phy.mdio.prtad)
8831 return -EINVAL;
8832 return hw->phy.ops.write_reg(hw, addr, devad, value);
8833}
8834
8835static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8836{
8837 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8838
8839 switch (cmd) {
8840 case SIOCSHWTSTAMP:
8841 return ixgbe_ptp_set_ts_config(adapter, req);
8842 case SIOCGHWTSTAMP:
8843 return ixgbe_ptp_get_ts_config(adapter, req);
8844 case SIOCGMIIPHY:
8845 if (!adapter->hw.phy.ops.read_reg)
8846 return -EOPNOTSUPP;
8847
8848 default:
8849 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8850 }
8851}
8852
8853
8854
8855
8856
8857
8858
8859
8860static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8861{
8862 int err = 0;
8863 struct ixgbe_adapter *adapter = netdev_priv(dev);
8864 struct ixgbe_hw *hw = &adapter->hw;
8865
8866 if (is_valid_ether_addr(hw->mac.san_addr)) {
8867 rtnl_lock();
8868 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8869 rtnl_unlock();
8870
8871
8872 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8873 }
8874 return err;
8875}
8876
8877
8878
8879
8880
8881
8882
8883
8884static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8885{
8886 int err = 0;
8887 struct ixgbe_adapter *adapter = netdev_priv(dev);
8888 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8889
8890 if (is_valid_ether_addr(mac->san_addr)) {
8891 rtnl_lock();
8892 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8893 rtnl_unlock();
8894 }
8895 return err;
8896}
8897
8898static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8899 struct ixgbe_ring *ring)
8900{
8901 u64 bytes, packets;
8902 unsigned int start;
8903
8904 if (ring) {
8905 do {
8906 start = u64_stats_fetch_begin_irq(&ring->syncp);
8907 packets = ring->stats.packets;
8908 bytes = ring->stats.bytes;
8909 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8910 stats->tx_packets += packets;
8911 stats->tx_bytes += bytes;
8912 }
8913}
8914
8915static void ixgbe_get_stats64(struct net_device *netdev,
8916 struct rtnl_link_stats64 *stats)
8917{
8918 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8919 int i;
8920
8921 rcu_read_lock();
8922 for (i = 0; i < adapter->num_rx_queues; i++) {
8923 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8924 u64 bytes, packets;
8925 unsigned int start;
8926
8927 if (ring) {
8928 do {
8929 start = u64_stats_fetch_begin_irq(&ring->syncp);
8930 packets = ring->stats.packets;
8931 bytes = ring->stats.bytes;
8932 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8933 stats->rx_packets += packets;
8934 stats->rx_bytes += bytes;
8935 }
8936 }
8937
8938 for (i = 0; i < adapter->num_tx_queues; i++) {
8939 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8940
8941 ixgbe_get_ring_stats64(stats, ring);
8942 }
8943 for (i = 0; i < adapter->num_xdp_queues; i++) {
8944 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8945
8946 ixgbe_get_ring_stats64(stats, ring);
8947 }
8948 rcu_read_unlock();
8949
8950
8951 stats->multicast = netdev->stats.multicast;
8952 stats->rx_errors = netdev->stats.rx_errors;
8953 stats->rx_length_errors = netdev->stats.rx_length_errors;
8954 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8955 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8956}
8957
8958#ifdef CONFIG_IXGBE_DCB
8959
8960
8961
8962
8963
8964
8965
8966
8967static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8968{
8969 struct ixgbe_hw *hw = &adapter->hw;
8970 u32 reg, rsave;
8971 int i;
8972
8973
8974
8975
8976 if (hw->mac.type == ixgbe_mac_82598EB)
8977 return;
8978
8979 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8980 rsave = reg;
8981
8982 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8983 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8984
8985
8986 if (up2tc > tc)
8987 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8988 }
8989
8990 if (reg != rsave)
8991 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8992
8993 return;
8994}
8995
8996
8997
8998
8999
9000
9001
9002static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9003{
9004 struct net_device *dev = adapter->netdev;
9005 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9006 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9007 u8 prio;
9008
9009 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9010 u8 tc = 0;
9011
9012 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9013 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9014 else if (ets)
9015 tc = ets->prio_tc[prio];
9016
9017 netdev_set_prio_tc_map(dev, prio, tc);
9018 }
9019}
9020
9021#endif
9022static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
9023{
9024 struct ixgbe_adapter *adapter = data;
9025 struct ixgbe_fwd_adapter *accel;
9026 int pool;
9027
9028
9029 if (!netif_is_macvlan(vdev))
9030 return 0;
9031
9032
9033 accel = macvlan_accel_priv(vdev);
9034 if (!accel)
9035 return 0;
9036
9037
9038 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9039 if (pool < adapter->num_rx_pools) {
9040 set_bit(pool, adapter->fwd_bitmask);
9041 accel->pool = pool;
9042 return 0;
9043 }
9044
9045
9046 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9047 macvlan_release_l2fw_offload(vdev);
9048
9049
9050 netdev_unbind_sb_channel(adapter->netdev, vdev);
9051 netdev_set_sb_channel(vdev, 0);
9052
9053 kfree(accel);
9054
9055 return 0;
9056}
9057
9058static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9059{
9060 struct ixgbe_adapter *adapter = netdev_priv(dev);
9061
9062
9063 bitmap_clear(adapter->fwd_bitmask, 1, 63);
9064
9065
9066 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9067 adapter);
9068}
9069
9070
9071
9072
9073
9074
9075
9076int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9077{
9078 struct ixgbe_adapter *adapter = netdev_priv(dev);
9079 struct ixgbe_hw *hw = &adapter->hw;
9080
9081
9082 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9083 return -EINVAL;
9084
9085 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9086 return -EINVAL;
9087
9088
9089
9090
9091
9092 if (netif_running(dev))
9093 ixgbe_close(dev);
9094 else
9095 ixgbe_reset(adapter);
9096
9097 ixgbe_clear_interrupt_scheme(adapter);
9098
9099#ifdef CONFIG_IXGBE_DCB
9100 if (tc) {
9101 if (adapter->xdp_prog) {
9102 e_warn(probe, "DCB is not supported with XDP\n");
9103
9104 ixgbe_init_interrupt_scheme(adapter);
9105 if (netif_running(dev))
9106 ixgbe_open(dev);
9107 return -EINVAL;
9108 }
9109
9110 netdev_set_num_tc(dev, tc);
9111 ixgbe_set_prio_tc_map(adapter);
9112
9113 adapter->hw_tcs = tc;
9114 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9115
9116 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9117 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9118 adapter->hw.fc.requested_mode = ixgbe_fc_none;
9119 }
9120 } else {
9121 netdev_reset_tc(dev);
9122
9123 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9124 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9125
9126 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9127 adapter->hw_tcs = tc;
9128
9129 adapter->temp_dcb_cfg.pfc_mode_enable = false;
9130 adapter->dcb_cfg.pfc_mode_enable = false;
9131 }
9132
9133 ixgbe_validate_rtr(adapter, tc);
9134
9135#endif
9136 ixgbe_init_interrupt_scheme(adapter);
9137
9138 ixgbe_defrag_macvlan_pools(dev);
9139
9140 if (netif_running(dev))
9141 return ixgbe_open(dev);
9142
9143 return 0;
9144}
9145
9146static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9147 struct tc_cls_u32_offload *cls)
9148{
9149 u32 hdl = cls->knode.handle;
9150 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9151 u32 loc = cls->knode.handle & 0xfffff;
9152 int err = 0, i, j;
9153 struct ixgbe_jump_table *jump = NULL;
9154
9155 if (loc > IXGBE_MAX_HW_ENTRIES)
9156 return -EINVAL;
9157
9158 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9159 return -EINVAL;
9160
9161
9162 if (uhtid != 0x800) {
9163 jump = adapter->jump_tables[uhtid];
9164 if (!jump)
9165 return -EINVAL;
9166 if (!test_bit(loc - 1, jump->child_loc_map))
9167 return -EINVAL;
9168 clear_bit(loc - 1, jump->child_loc_map);
9169 }
9170
9171
9172 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9173 jump = adapter->jump_tables[i];
9174 if (jump && jump->link_hdl == hdl) {
9175
9176
9177
9178 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9179 if (!test_bit(j, jump->child_loc_map))
9180 continue;
9181 spin_lock(&adapter->fdir_perfect_lock);
9182 err = ixgbe_update_ethtool_fdir_entry(adapter,
9183 NULL,
9184 j + 1);
9185 spin_unlock(&adapter->fdir_perfect_lock);
9186 clear_bit(j, jump->child_loc_map);
9187 }
9188
9189 kfree(jump->input);
9190 kfree(jump->mask);
9191 kfree(jump);
9192 adapter->jump_tables[i] = NULL;
9193 return err;
9194 }
9195 }
9196
9197 spin_lock(&adapter->fdir_perfect_lock);
9198 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9199 spin_unlock(&adapter->fdir_perfect_lock);
9200 return err;
9201}
9202
9203static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9204 struct tc_cls_u32_offload *cls)
9205{
9206 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9207
9208 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9209 return -EINVAL;
9210
9211
9212
9213
9214 if (cls->hnode.divisor > 0)
9215 return -EINVAL;
9216
9217 set_bit(uhtid - 1, &adapter->tables);
9218 return 0;
9219}
9220
9221static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9222 struct tc_cls_u32_offload *cls)
9223{
9224 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9225
9226 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9227 return -EINVAL;
9228
9229 clear_bit(uhtid - 1, &adapter->tables);
9230 return 0;
9231}
9232
9233#ifdef CONFIG_NET_CLS_ACT
9234struct upper_walk_data {
9235 struct ixgbe_adapter *adapter;
9236 u64 action;
9237 int ifindex;
9238 u8 queue;
9239};
9240
9241static int get_macvlan_queue(struct net_device *upper, void *_data)
9242{
9243 if (netif_is_macvlan(upper)) {
9244 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9245 struct upper_walk_data *data = _data;
9246 struct ixgbe_adapter *adapter = data->adapter;
9247 int ifindex = data->ifindex;
9248
9249 if (vadapter && upper->ifindex == ifindex) {
9250 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9251 data->action = data->queue;
9252 return 1;
9253 }
9254 }
9255
9256 return 0;
9257}
9258
9259static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9260 u8 *queue, u64 *action)
9261{
9262 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9263 unsigned int num_vfs = adapter->num_vfs, vf;
9264 struct upper_walk_data data;
9265 struct net_device *upper;
9266
9267
9268 for (vf = 0; vf < num_vfs; ++vf) {
9269 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9270 if (upper->ifindex == ifindex) {
9271 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9272 *action = vf + 1;
9273 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9274 return 0;
9275 }
9276 }
9277
9278
9279 data.adapter = adapter;
9280 data.ifindex = ifindex;
9281 data.action = 0;
9282 data.queue = 0;
9283 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9284 get_macvlan_queue, &data)) {
9285 *action = data.action;
9286 *queue = data.queue;
9287
9288 return 0;
9289 }
9290
9291 return -EINVAL;
9292}
9293
9294static int parse_tc_actions(struct ixgbe_adapter *adapter,
9295 struct tcf_exts *exts, u64 *action, u8 *queue)
9296{
9297 const struct tc_action *a;
9298 int i;
9299
9300 if (!tcf_exts_has_actions(exts))
9301 return -EINVAL;
9302
9303 tcf_exts_for_each_action(i, a, exts) {
9304
9305 if (is_tcf_gact_shot(a)) {
9306 *action = IXGBE_FDIR_DROP_QUEUE;
9307 *queue = IXGBE_FDIR_DROP_QUEUE;
9308 return 0;
9309 }
9310
9311
9312 if (is_tcf_mirred_egress_redirect(a)) {
9313 struct net_device *dev = tcf_mirred_dev(a);
9314
9315 if (!dev)
9316 return -EINVAL;
9317 return handle_redirect_action(adapter, dev->ifindex,
9318 queue, action);
9319 }
9320
9321 return -EINVAL;
9322 }
9323
9324 return -EINVAL;
9325}
9326#else
9327static int parse_tc_actions(struct ixgbe_adapter *adapter,
9328 struct tcf_exts *exts, u64 *action, u8 *queue)
9329{
9330 return -EINVAL;
9331}
9332#endif
9333
9334static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9335 union ixgbe_atr_input *mask,
9336 struct tc_cls_u32_offload *cls,
9337 struct ixgbe_mat_field *field_ptr,
9338 struct ixgbe_nexthdr *nexthdr)
9339{
9340 int i, j, off;
9341 __be32 val, m;
9342 bool found_entry = false, found_jump_field = false;
9343
9344 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9345 off = cls->knode.sel->keys[i].off;
9346 val = cls->knode.sel->keys[i].val;
9347 m = cls->knode.sel->keys[i].mask;
9348
9349 for (j = 0; field_ptr[j].val; j++) {
9350 if (field_ptr[j].off == off) {
9351 field_ptr[j].val(input, mask, (__force u32)val,
9352 (__force u32)m);
9353 input->filter.formatted.flow_type |=
9354 field_ptr[j].type;
9355 found_entry = true;
9356 break;
9357 }
9358 }
9359 if (nexthdr) {
9360 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9361 nexthdr->val ==
9362 (__force u32)cls->knode.sel->keys[i].val &&
9363 nexthdr->mask ==
9364 (__force u32)cls->knode.sel->keys[i].mask)
9365 found_jump_field = true;
9366 else
9367 continue;
9368 }
9369 }
9370
9371 if (nexthdr && !found_jump_field)
9372 return -EINVAL;
9373
9374 if (!found_entry)
9375 return 0;
9376
9377 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9378 IXGBE_ATR_L4TYPE_MASK;
9379
9380 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9381 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9382
9383 return 0;
9384}
9385
9386static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9387 struct tc_cls_u32_offload *cls)
9388{
9389 __be16 protocol = cls->common.protocol;
9390 u32 loc = cls->knode.handle & 0xfffff;
9391 struct ixgbe_hw *hw = &adapter->hw;
9392 struct ixgbe_mat_field *field_ptr;
9393 struct ixgbe_fdir_filter *input = NULL;
9394 union ixgbe_atr_input *mask = NULL;
9395 struct ixgbe_jump_table *jump = NULL;
9396 int i, err = -EINVAL;
9397 u8 queue;
9398 u32 uhtid, link_uhtid;
9399
9400 uhtid = TC_U32_USERHTID(cls->knode.handle);
9401 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9402
9403
9404
9405
9406
9407
9408
9409
9410 if (protocol != htons(ETH_P_IP))
9411 return err;
9412
9413 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9414 e_err(drv, "Location out of range\n");
9415 return err;
9416 }
9417
9418
9419
9420
9421
9422
9423
9424
9425 if (uhtid == 0x800) {
9426 field_ptr = (adapter->jump_tables[0])->mat;
9427 } else {
9428 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9429 return err;
9430 if (!adapter->jump_tables[uhtid])
9431 return err;
9432 field_ptr = (adapter->jump_tables[uhtid])->mat;
9433 }
9434
9435 if (!field_ptr)
9436 return err;
9437
9438
9439
9440
9441
9442
9443
9444 if (link_uhtid) {
9445 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9446
9447 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9448 return err;
9449
9450 if (!test_bit(link_uhtid - 1, &adapter->tables))
9451 return err;
9452
9453
9454
9455
9456
9457
9458 if (adapter->jump_tables[link_uhtid] &&
9459 (adapter->jump_tables[link_uhtid])->link_hdl) {
9460 e_err(drv, "Link filter exists for link: %x\n",
9461 link_uhtid);
9462 return err;
9463 }
9464
9465 for (i = 0; nexthdr[i].jump; i++) {
9466 if (nexthdr[i].o != cls->knode.sel->offoff ||
9467 nexthdr[i].s != cls->knode.sel->offshift ||
9468 nexthdr[i].m !=
9469 (__force u32)cls->knode.sel->offmask)
9470 return err;
9471
9472 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9473 if (!jump)
9474 return -ENOMEM;
9475 input = kzalloc(sizeof(*input), GFP_KERNEL);
9476 if (!input) {
9477 err = -ENOMEM;
9478 goto free_jump;
9479 }
9480 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9481 if (!mask) {
9482 err = -ENOMEM;
9483 goto free_input;
9484 }
9485 jump->input = input;
9486 jump->mask = mask;
9487 jump->link_hdl = cls->knode.handle;
9488
9489 err = ixgbe_clsu32_build_input(input, mask, cls,
9490 field_ptr, &nexthdr[i]);
9491 if (!err) {
9492 jump->mat = nexthdr[i].jump;
9493 adapter->jump_tables[link_uhtid] = jump;
9494 break;
9495 } else {
9496 kfree(mask);
9497 kfree(input);
9498 kfree(jump);
9499 }
9500 }
9501 return 0;
9502 }
9503
9504 input = kzalloc(sizeof(*input), GFP_KERNEL);
9505 if (!input)
9506 return -ENOMEM;
9507 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9508 if (!mask) {
9509 err = -ENOMEM;
9510 goto free_input;
9511 }
9512
9513 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9514 if ((adapter->jump_tables[uhtid])->input)
9515 memcpy(input, (adapter->jump_tables[uhtid])->input,
9516 sizeof(*input));
9517 if ((adapter->jump_tables[uhtid])->mask)
9518 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9519 sizeof(*mask));
9520
9521
9522
9523
9524 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9525 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9526
9527 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9528 e_err(drv, "Filter exists in location: %x\n",
9529 loc);
9530 err = -EINVAL;
9531 goto err_out;
9532 }
9533 }
9534 }
9535 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9536 if (err)
9537 goto err_out;
9538
9539 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9540 &queue);
9541 if (err < 0)
9542 goto err_out;
9543
9544 input->sw_idx = loc;
9545
9546 spin_lock(&adapter->fdir_perfect_lock);
9547
9548 if (hlist_empty(&adapter->fdir_filter_list)) {
9549 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9550 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9551 if (err)
9552 goto err_out_w_lock;
9553 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9554 err = -EINVAL;
9555 goto err_out_w_lock;
9556 }
9557
9558 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9559 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9560 input->sw_idx, queue);
9561 if (!err)
9562 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9563 spin_unlock(&adapter->fdir_perfect_lock);
9564
9565 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9566 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9567
9568 kfree(mask);
9569 return err;
9570err_out_w_lock:
9571 spin_unlock(&adapter->fdir_perfect_lock);
9572err_out:
9573 kfree(mask);
9574free_input:
9575 kfree(input);
9576free_jump:
9577 kfree(jump);
9578 return err;
9579}
9580
9581static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9582 struct tc_cls_u32_offload *cls_u32)
9583{
9584 switch (cls_u32->command) {
9585 case TC_CLSU32_NEW_KNODE:
9586 case TC_CLSU32_REPLACE_KNODE:
9587 return ixgbe_configure_clsu32(adapter, cls_u32);
9588 case TC_CLSU32_DELETE_KNODE:
9589 return ixgbe_delete_clsu32(adapter, cls_u32);
9590 case TC_CLSU32_NEW_HNODE:
9591 case TC_CLSU32_REPLACE_HNODE:
9592 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9593 case TC_CLSU32_DELETE_HNODE:
9594 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9595 default:
9596 return -EOPNOTSUPP;
9597 }
9598}
9599
9600static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9601 void *cb_priv)
9602{
9603 struct ixgbe_adapter *adapter = cb_priv;
9604
9605 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9606 return -EOPNOTSUPP;
9607
9608 switch (type) {
9609 case TC_SETUP_CLSU32:
9610 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9611 default:
9612 return -EOPNOTSUPP;
9613 }
9614}
9615
9616static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9617 struct tc_mqprio_qopt *mqprio)
9618{
9619 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9620 return ixgbe_setup_tc(dev, mqprio->num_tc);
9621}
9622
9623static LIST_HEAD(ixgbe_block_cb_list);
9624
9625static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9626 void *type_data)
9627{
9628 struct ixgbe_adapter *adapter = netdev_priv(dev);
9629
9630 switch (type) {
9631 case TC_SETUP_BLOCK:
9632 return flow_block_cb_setup_simple(type_data,
9633 &ixgbe_block_cb_list,
9634 ixgbe_setup_tc_block_cb,
9635 adapter, adapter, true);
9636 case TC_SETUP_QDISC_MQPRIO:
9637 return ixgbe_setup_tc_mqprio(dev, type_data);
9638 default:
9639 return -EOPNOTSUPP;
9640 }
9641}
9642
9643#ifdef CONFIG_PCI_IOV
9644void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9645{
9646 struct net_device *netdev = adapter->netdev;
9647
9648 rtnl_lock();
9649 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9650 rtnl_unlock();
9651}
9652
9653#endif
9654void ixgbe_do_reset(struct net_device *netdev)
9655{
9656 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9657
9658 if (netif_running(netdev))
9659 ixgbe_reinit_locked(adapter);
9660 else
9661 ixgbe_reset(adapter);
9662}
9663
9664static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9665 netdev_features_t features)
9666{
9667 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9668
9669
9670 if (!(features & NETIF_F_RXCSUM))
9671 features &= ~NETIF_F_LRO;
9672
9673
9674 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9675 features &= ~NETIF_F_LRO;
9676
9677 if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9678 e_dev_err("LRO is not supported with XDP\n");
9679 features &= ~NETIF_F_LRO;
9680 }
9681
9682 return features;
9683}
9684
9685static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9686{
9687 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9688 num_online_cpus());
9689
9690
9691 if (!adapter->ring_feature[RING_F_VMDQ].offset)
9692 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9693 IXGBE_FLAG_SRIOV_ENABLED);
9694
9695 adapter->ring_feature[RING_F_RSS].limit = rss;
9696 adapter->ring_feature[RING_F_VMDQ].limit = 1;
9697
9698 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9699}
9700
9701static int ixgbe_set_features(struct net_device *netdev,
9702 netdev_features_t features)
9703{
9704 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9705 netdev_features_t changed = netdev->features ^ features;
9706 bool need_reset = false;
9707
9708
9709 if (!(features & NETIF_F_LRO)) {
9710 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9711 need_reset = true;
9712 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9713 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9714 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9715 if (adapter->rx_itr_setting == 1 ||
9716 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9717 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9718 need_reset = true;
9719 } else if ((changed ^ features) & NETIF_F_LRO) {
9720 e_info(probe, "rx-usecs set too low, "
9721 "disabling RSC\n");
9722 }
9723 }
9724
9725
9726
9727
9728
9729 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9730
9731 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9732 need_reset = true;
9733
9734 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9735 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9736 } else {
9737
9738 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9739 need_reset = true;
9740
9741 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9742
9743
9744 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9745
9746 (adapter->hw_tcs > 1) ||
9747
9748 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9749
9750 (!adapter->atr_sample_rate))
9751 ;
9752 else
9753 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9754 }
9755
9756 if (changed & NETIF_F_RXALL)
9757 need_reset = true;
9758
9759 netdev->features = features;
9760
9761 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9762 if (features & NETIF_F_RXCSUM) {
9763 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9764 } else {
9765 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9766
9767 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9768 }
9769 }
9770
9771 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9772 if (features & NETIF_F_RXCSUM) {
9773 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9774 } else {
9775 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9776
9777 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9778 }
9779 }
9780
9781 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9782 ixgbe_reset_l2fw_offload(adapter);
9783 else if (need_reset)
9784 ixgbe_do_reset(netdev);
9785 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9786 NETIF_F_HW_VLAN_CTAG_FILTER))
9787 ixgbe_set_rx_mode(netdev);
9788
9789 return 1;
9790}
9791
9792
9793
9794
9795
9796
9797static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9798 struct udp_tunnel_info *ti)
9799{
9800 struct ixgbe_adapter *adapter = netdev_priv(dev);
9801 struct ixgbe_hw *hw = &adapter->hw;
9802 __be16 port = ti->port;
9803 u32 port_shift = 0;
9804 u32 reg;
9805
9806 if (ti->sa_family != AF_INET)
9807 return;
9808
9809 switch (ti->type) {
9810 case UDP_TUNNEL_TYPE_VXLAN:
9811 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9812 return;
9813
9814 if (adapter->vxlan_port == port)
9815 return;
9816
9817 if (adapter->vxlan_port) {
9818 netdev_info(dev,
9819 "VXLAN port %d set, not adding port %d\n",
9820 ntohs(adapter->vxlan_port),
9821 ntohs(port));
9822 return;
9823 }
9824
9825 adapter->vxlan_port = port;
9826 break;
9827 case UDP_TUNNEL_TYPE_GENEVE:
9828 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9829 return;
9830
9831 if (adapter->geneve_port == port)
9832 return;
9833
9834 if (adapter->geneve_port) {
9835 netdev_info(dev,
9836 "GENEVE port %d set, not adding port %d\n",
9837 ntohs(adapter->geneve_port),
9838 ntohs(port));
9839 return;
9840 }
9841
9842 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9843 adapter->geneve_port = port;
9844 break;
9845 default:
9846 return;
9847 }
9848
9849 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9850 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9851}
9852
9853
9854
9855
9856
9857
9858static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9859 struct udp_tunnel_info *ti)
9860{
9861 struct ixgbe_adapter *adapter = netdev_priv(dev);
9862 u32 port_mask;
9863
9864 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9865 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9866 return;
9867
9868 if (ti->sa_family != AF_INET)
9869 return;
9870
9871 switch (ti->type) {
9872 case UDP_TUNNEL_TYPE_VXLAN:
9873 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9874 return;
9875
9876 if (adapter->vxlan_port != ti->port) {
9877 netdev_info(dev, "VXLAN port %d not found\n",
9878 ntohs(ti->port));
9879 return;
9880 }
9881
9882 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9883 break;
9884 case UDP_TUNNEL_TYPE_GENEVE:
9885 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9886 return;
9887
9888 if (adapter->geneve_port != ti->port) {
9889 netdev_info(dev, "GENEVE port %d not found\n",
9890 ntohs(ti->port));
9891 return;
9892 }
9893
9894 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9895 break;
9896 default:
9897 return;
9898 }
9899
9900 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9901 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9902}
9903
9904static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9905 struct net_device *dev,
9906 const unsigned char *addr, u16 vid,
9907 u16 flags,
9908 struct netlink_ext_ack *extack)
9909{
9910
9911 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9912 struct ixgbe_adapter *adapter = netdev_priv(dev);
9913 u16 pool = VMDQ_P(0);
9914
9915 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9916 return -ENOMEM;
9917 }
9918
9919 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9920}
9921
9922
9923
9924
9925
9926
9927
9928
9929static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9930 __u16 mode)
9931{
9932 struct ixgbe_hw *hw = &adapter->hw;
9933 unsigned int p, num_pools;
9934 u32 vmdctl;
9935
9936 switch (mode) {
9937 case BRIDGE_MODE_VEPA:
9938
9939 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9940
9941
9942
9943
9944
9945 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9946 vmdctl |= IXGBE_VT_CTL_REPLEN;
9947 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9948
9949
9950
9951
9952 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9953 for (p = 0; p < num_pools; p++) {
9954 if (hw->mac.ops.set_source_address_pruning)
9955 hw->mac.ops.set_source_address_pruning(hw,
9956 true,
9957 p);
9958 }
9959 break;
9960 case BRIDGE_MODE_VEB:
9961
9962 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9963 IXGBE_PFDTXGSWC_VT_LBEN);
9964
9965
9966
9967
9968 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9969 if (!adapter->num_vfs)
9970 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9971 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9972
9973
9974
9975
9976 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9977 for (p = 0; p < num_pools; p++) {
9978 if (hw->mac.ops.set_source_address_pruning)
9979 hw->mac.ops.set_source_address_pruning(hw,
9980 false,
9981 p);
9982 }
9983 break;
9984 default:
9985 return -EINVAL;
9986 }
9987
9988 adapter->bridge_mode = mode;
9989
9990 e_info(drv, "enabling bridge mode: %s\n",
9991 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9992
9993 return 0;
9994}
9995
9996static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9997 struct nlmsghdr *nlh, u16 flags,
9998 struct netlink_ext_ack *extack)
9999{
10000 struct ixgbe_adapter *adapter = netdev_priv(dev);
10001 struct nlattr *attr, *br_spec;
10002 int rem;
10003
10004 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10005 return -EOPNOTSUPP;
10006
10007 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10008 if (!br_spec)
10009 return -EINVAL;
10010
10011 nla_for_each_nested(attr, br_spec, rem) {
10012 int status;
10013 __u16 mode;
10014
10015 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10016 continue;
10017
10018 if (nla_len(attr) < sizeof(mode))
10019 return -EINVAL;
10020
10021 mode = nla_get_u16(attr);
10022 status = ixgbe_configure_bridge_mode(adapter, mode);
10023 if (status)
10024 return status;
10025
10026 break;
10027 }
10028
10029 return 0;
10030}
10031
10032static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10033 struct net_device *dev,
10034 u32 filter_mask, int nlflags)
10035{
10036 struct ixgbe_adapter *adapter = netdev_priv(dev);
10037
10038 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10039 return 0;
10040
10041 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
10042 adapter->bridge_mode, 0, 0, nlflags,
10043 filter_mask, NULL);
10044}
10045
10046static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
10047{
10048 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10049 struct ixgbe_fwd_adapter *accel;
10050 int tcs = adapter->hw_tcs ? : 1;
10051 int pool, err;
10052
10053 if (adapter->xdp_prog) {
10054 e_warn(probe, "L2FW offload is not supported with XDP\n");
10055 return ERR_PTR(-EINVAL);
10056 }
10057
10058
10059
10060
10061
10062 if (!macvlan_supports_dest_filter(vdev))
10063 return ERR_PTR(-EMEDIUMTYPE);
10064
10065
10066
10067
10068
10069 if (netif_is_multiqueue(vdev))
10070 return ERR_PTR(-ERANGE);
10071
10072 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
10073 if (pool == adapter->num_rx_pools) {
10074 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
10075 u16 reserved_pools;
10076
10077 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
10078 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
10079 adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
10080 return ERR_PTR(-EBUSY);
10081
10082
10083
10084
10085
10086 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
10087 return ERR_PTR(-EBUSY);
10088
10089
10090 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
10091 IXGBE_FLAG_SRIOV_ENABLED;
10092
10093
10094
10095
10096
10097 if (used_pools < 32 && adapter->num_rx_pools < 16)
10098 reserved_pools = min_t(u16,
10099 32 - used_pools,
10100 16 - adapter->num_rx_pools);
10101 else if (adapter->num_rx_pools < 32)
10102 reserved_pools = min_t(u16,
10103 64 - used_pools,
10104 32 - adapter->num_rx_pools);
10105 else
10106 reserved_pools = 64 - used_pools;
10107
10108
10109 if (!reserved_pools)
10110 return ERR_PTR(-EBUSY);
10111
10112 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10113
10114
10115 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10116 if (err)
10117 return ERR_PTR(err);
10118
10119 if (pool >= adapter->num_rx_pools)
10120 return ERR_PTR(-ENOMEM);
10121 }
10122
10123 accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10124 if (!accel)
10125 return ERR_PTR(-ENOMEM);
10126
10127 set_bit(pool, adapter->fwd_bitmask);
10128 netdev_set_sb_channel(vdev, pool);
10129 accel->pool = pool;
10130 accel->netdev = vdev;
10131
10132 if (!netif_running(pdev))
10133 return accel;
10134
10135 err = ixgbe_fwd_ring_up(adapter, accel);
10136 if (err)
10137 return ERR_PTR(err);
10138
10139 return accel;
10140}
10141
10142static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10143{
10144 struct ixgbe_fwd_adapter *accel = priv;
10145 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10146 unsigned int rxbase = accel->rx_base_queue;
10147 unsigned int i;
10148
10149
10150 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10151 VMDQ_P(accel->pool));
10152
10153
10154
10155
10156 usleep_range(10000, 20000);
10157
10158 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10159 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10160 struct ixgbe_q_vector *qv = ring->q_vector;
10161
10162
10163
10164
10165 if (netif_running(adapter->netdev))
10166 napi_synchronize(&qv->napi);
10167 ring->netdev = NULL;
10168 }
10169
10170
10171 netdev_unbind_sb_channel(pdev, accel->netdev);
10172 netdev_set_sb_channel(accel->netdev, 0);
10173
10174 clear_bit(accel->pool, adapter->fwd_bitmask);
10175 kfree(accel);
10176}
10177
10178#define IXGBE_MAX_MAC_HDR_LEN 127
10179#define IXGBE_MAX_NETWORK_HDR_LEN 511
10180
10181static netdev_features_t
10182ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10183 netdev_features_t features)
10184{
10185 unsigned int network_hdr_len, mac_hdr_len;
10186
10187
10188 mac_hdr_len = skb_network_header(skb) - skb->data;
10189 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10190 return features & ~(NETIF_F_HW_CSUM |
10191 NETIF_F_SCTP_CRC |
10192 NETIF_F_HW_VLAN_CTAG_TX |
10193 NETIF_F_TSO |
10194 NETIF_F_TSO6);
10195
10196 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10197 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
10198 return features & ~(NETIF_F_HW_CSUM |
10199 NETIF_F_SCTP_CRC |
10200 NETIF_F_TSO |
10201 NETIF_F_TSO6);
10202
10203
10204
10205
10206
10207
10208 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10209#ifdef CONFIG_IXGBE_IPSEC
10210 if (!secpath_exists(skb))
10211#endif
10212 features &= ~NETIF_F_TSO;
10213 }
10214
10215 return features;
10216}
10217
10218static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10219{
10220 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10221 struct ixgbe_adapter *adapter = netdev_priv(dev);
10222 struct bpf_prog *old_prog;
10223 bool need_reset;
10224
10225 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10226 return -EINVAL;
10227
10228 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10229 return -EINVAL;
10230
10231
10232 for (i = 0; i < adapter->num_rx_queues; i++) {
10233 struct ixgbe_ring *ring = adapter->rx_ring[i];
10234
10235 if (ring_is_rsc_enabled(ring))
10236 return -EINVAL;
10237
10238 if (frame_size > ixgbe_rx_bufsz(ring))
10239 return -EINVAL;
10240 }
10241
10242 if (nr_cpu_ids > MAX_XDP_QUEUES)
10243 return -ENOMEM;
10244
10245 old_prog = xchg(&adapter->xdp_prog, prog);
10246 need_reset = (!!prog != !!old_prog);
10247
10248
10249 if (need_reset) {
10250 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10251
10252 if (err) {
10253 rcu_assign_pointer(adapter->xdp_prog, old_prog);
10254 return -EINVAL;
10255 }
10256 } else {
10257 for (i = 0; i < adapter->num_rx_queues; i++)
10258 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
10259 adapter->xdp_prog);
10260 }
10261
10262 if (old_prog)
10263 bpf_prog_put(old_prog);
10264
10265
10266
10267
10268 if (need_reset && prog)
10269 for (i = 0; i < adapter->num_rx_queues; i++)
10270 if (adapter->xdp_ring[i]->xsk_umem)
10271 (void)ixgbe_xsk_wakeup(adapter->netdev, i,
10272 XDP_WAKEUP_RX);
10273
10274 return 0;
10275}
10276
10277static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10278{
10279 struct ixgbe_adapter *adapter = netdev_priv(dev);
10280
10281 switch (xdp->command) {
10282 case XDP_SETUP_PROG:
10283 return ixgbe_xdp_setup(dev, xdp->prog);
10284 case XDP_QUERY_PROG:
10285 xdp->prog_id = adapter->xdp_prog ?
10286 adapter->xdp_prog->aux->id : 0;
10287 return 0;
10288 case XDP_SETUP_XSK_UMEM:
10289 return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
10290 xdp->xsk.queue_id);
10291
10292 default:
10293 return -EINVAL;
10294 }
10295}
10296
10297void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10298{
10299
10300
10301
10302 wmb();
10303 writel(ring->next_to_use, ring->tail);
10304}
10305
10306static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10307 struct xdp_frame **frames, u32 flags)
10308{
10309 struct ixgbe_adapter *adapter = netdev_priv(dev);
10310 struct ixgbe_ring *ring;
10311 int drops = 0;
10312 int i;
10313
10314 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10315 return -ENETDOWN;
10316
10317 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10318 return -EINVAL;
10319
10320
10321
10322
10323 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10324 if (unlikely(!ring))
10325 return -ENXIO;
10326
10327 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10328 return -ENXIO;
10329
10330 for (i = 0; i < n; i++) {
10331 struct xdp_frame *xdpf = frames[i];
10332 int err;
10333
10334 err = ixgbe_xmit_xdp_ring(adapter, xdpf);
10335 if (err != IXGBE_XDP_TX) {
10336 xdp_return_frame_rx_napi(xdpf);
10337 drops++;
10338 }
10339 }
10340
10341 if (unlikely(flags & XDP_XMIT_FLUSH))
10342 ixgbe_xdp_ring_update_tail(ring);
10343
10344 return n - drops;
10345}
10346
10347static const struct net_device_ops ixgbe_netdev_ops = {
10348 .ndo_open = ixgbe_open,
10349 .ndo_stop = ixgbe_close,
10350 .ndo_start_xmit = ixgbe_xmit_frame,
10351 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10352 .ndo_validate_addr = eth_validate_addr,
10353 .ndo_set_mac_address = ixgbe_set_mac,
10354 .ndo_change_mtu = ixgbe_change_mtu,
10355 .ndo_tx_timeout = ixgbe_tx_timeout,
10356 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10357 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10358 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10359 .ndo_do_ioctl = ixgbe_ioctl,
10360 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10361 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10362 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10363 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10364 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10365 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10366 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10367 .ndo_get_stats64 = ixgbe_get_stats64,
10368 .ndo_setup_tc = __ixgbe_setup_tc,
10369#ifdef IXGBE_FCOE
10370 .ndo_select_queue = ixgbe_select_queue,
10371 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10372 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10373 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10374 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10375 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10376 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10377 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10378#endif
10379 .ndo_set_features = ixgbe_set_features,
10380 .ndo_fix_features = ixgbe_fix_features,
10381 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10382 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10383 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10384 .ndo_dfwd_add_station = ixgbe_fwd_add,
10385 .ndo_dfwd_del_station = ixgbe_fwd_del,
10386 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10387 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10388 .ndo_features_check = ixgbe_features_check,
10389 .ndo_bpf = ixgbe_xdp,
10390 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10391 .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
10392};
10393
10394static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10395 struct ixgbe_ring *tx_ring)
10396{
10397 unsigned long wait_delay, delay_interval;
10398 struct ixgbe_hw *hw = &adapter->hw;
10399 u8 reg_idx = tx_ring->reg_idx;
10400 int wait_loop;
10401 u32 txdctl;
10402
10403 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10404
10405
10406 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10407
10408 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10409 wait_delay = delay_interval;
10410
10411 while (wait_loop--) {
10412 usleep_range(wait_delay, wait_delay + 10);
10413 wait_delay += delay_interval * 2;
10414 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10415
10416 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10417 return;
10418 }
10419
10420 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10421}
10422
10423static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10424 struct ixgbe_ring *tx_ring)
10425{
10426 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10427 ixgbe_disable_txr_hw(adapter, tx_ring);
10428}
10429
10430static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10431 struct ixgbe_ring *rx_ring)
10432{
10433 unsigned long wait_delay, delay_interval;
10434 struct ixgbe_hw *hw = &adapter->hw;
10435 u8 reg_idx = rx_ring->reg_idx;
10436 int wait_loop;
10437 u32 rxdctl;
10438
10439 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10440 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10441 rxdctl |= IXGBE_RXDCTL_SWFLSH;
10442
10443
10444 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10445
10446
10447 if (hw->mac.type == ixgbe_mac_82598EB &&
10448 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10449 return;
10450
10451
10452 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10453
10454 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10455 wait_delay = delay_interval;
10456
10457 while (wait_loop--) {
10458 usleep_range(wait_delay, wait_delay + 10);
10459 wait_delay += delay_interval * 2;
10460 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10461
10462 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10463 return;
10464 }
10465
10466 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10467}
10468
10469static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10470{
10471 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10472 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10473}
10474
10475static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10476{
10477 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10478 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10479}
10480
10481
10482
10483
10484
10485
10486
10487
10488
10489void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10490{
10491 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10492
10493 rx_ring = adapter->rx_ring[ring];
10494 tx_ring = adapter->tx_ring[ring];
10495 xdp_ring = adapter->xdp_ring[ring];
10496
10497 ixgbe_disable_txr(adapter, tx_ring);
10498 if (xdp_ring)
10499 ixgbe_disable_txr(adapter, xdp_ring);
10500 ixgbe_disable_rxr_hw(adapter, rx_ring);
10501
10502 if (xdp_ring)
10503 synchronize_rcu();
10504
10505
10506 napi_disable(&rx_ring->q_vector->napi);
10507
10508 ixgbe_clean_tx_ring(tx_ring);
10509 if (xdp_ring)
10510 ixgbe_clean_tx_ring(xdp_ring);
10511 ixgbe_clean_rx_ring(rx_ring);
10512
10513 ixgbe_reset_txr_stats(tx_ring);
10514 if (xdp_ring)
10515 ixgbe_reset_txr_stats(xdp_ring);
10516 ixgbe_reset_rxr_stats(rx_ring);
10517}
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10528{
10529 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10530
10531 rx_ring = adapter->rx_ring[ring];
10532 tx_ring = adapter->tx_ring[ring];
10533 xdp_ring = adapter->xdp_ring[ring];
10534
10535
10536 napi_enable(&rx_ring->q_vector->napi);
10537
10538 ixgbe_configure_tx_ring(adapter, tx_ring);
10539 if (xdp_ring)
10540 ixgbe_configure_tx_ring(adapter, xdp_ring);
10541 ixgbe_configure_rx_ring(adapter, rx_ring);
10542
10543 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10544 if (xdp_ring)
10545 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10546}
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10558{
10559 struct pci_dev *entry, *pdev = adapter->pdev;
10560 int physfns = 0;
10561
10562
10563
10564
10565
10566 if (ixgbe_pcie_from_parent(&adapter->hw))
10567 physfns = 4;
10568
10569 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10570
10571 if (entry->is_virtfn)
10572 continue;
10573
10574
10575
10576
10577
10578
10579
10580 if ((entry->vendor != pdev->vendor) ||
10581 (entry->device != pdev->device))
10582 return -1;
10583
10584 physfns++;
10585 }
10586
10587 return physfns;
10588}
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10601 u16 subdevice_id)
10602{
10603 struct ixgbe_hw *hw = &adapter->hw;
10604 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10605
10606
10607 if (hw->mac.type == ixgbe_mac_82598EB)
10608 return false;
10609
10610
10611 if (hw->mac.type >= ixgbe_mac_X540) {
10612 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10613 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10614 (hw->bus.func == 0)))
10615 return true;
10616 }
10617
10618
10619 switch (device_id) {
10620 case IXGBE_DEV_ID_82599_SFP:
10621
10622 switch (subdevice_id) {
10623 case IXGBE_SUBDEV_ID_82599_560FLR:
10624 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10625 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10626 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10627
10628 if (hw->bus.func != 0)
10629 break;
10630
10631 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10632 case IXGBE_SUBDEV_ID_82599_SFP:
10633 case IXGBE_SUBDEV_ID_82599_RNDC:
10634 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10635 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10636 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10637 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10638 return true;
10639 }
10640 break;
10641 case IXGBE_DEV_ID_82599EN_SFP:
10642
10643 switch (subdevice_id) {
10644 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10645 return true;
10646 }
10647 break;
10648 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10649
10650 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10651 return true;
10652 break;
10653 case IXGBE_DEV_ID_82599_KX4:
10654 return true;
10655 default:
10656 break;
10657 }
10658
10659 return false;
10660}
10661
10662
10663
10664
10665
10666
10667
10668
10669static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10670{
10671 struct ixgbe_hw *hw = &adapter->hw;
10672 struct ixgbe_nvm_version nvm_ver;
10673
10674 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10675 if (nvm_ver.oem_valid) {
10676 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10677 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10678 nvm_ver.oem_release);
10679 return;
10680 }
10681
10682 ixgbe_get_etk_id(hw, &nvm_ver);
10683 ixgbe_get_orom_version(hw, &nvm_ver);
10684
10685 if (nvm_ver.or_valid) {
10686 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10687 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10688 nvm_ver.or_build, nvm_ver.or_patch);
10689 return;
10690 }
10691
10692
10693 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10694 "0x%08x", nvm_ver.etk_id);
10695}
10696
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10709{
10710 struct net_device *netdev;
10711 struct ixgbe_adapter *adapter = NULL;
10712 struct ixgbe_hw *hw;
10713 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10714 int i, err, pci_using_dac, expected_gts;
10715 unsigned int indices = MAX_TX_QUEUES;
10716 u8 part_str[IXGBE_PBANUM_LENGTH];
10717 bool disable_dev = false;
10718#ifdef IXGBE_FCOE
10719 u16 device_caps;
10720#endif
10721 u32 eec;
10722
10723
10724
10725
10726 if (pdev->is_virtfn) {
10727 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10728 pci_name(pdev), pdev->vendor, pdev->device);
10729 return -EINVAL;
10730 }
10731
10732 err = pci_enable_device_mem(pdev);
10733 if (err)
10734 return err;
10735
10736 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10737 pci_using_dac = 1;
10738 } else {
10739 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10740 if (err) {
10741 dev_err(&pdev->dev,
10742 "No usable DMA configuration, aborting\n");
10743 goto err_dma;
10744 }
10745 pci_using_dac = 0;
10746 }
10747
10748 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10749 if (err) {
10750 dev_err(&pdev->dev,
10751 "pci_request_selected_regions failed 0x%x\n", err);
10752 goto err_pci_reg;
10753 }
10754
10755 pci_enable_pcie_error_reporting(pdev);
10756
10757 pci_set_master(pdev);
10758 pci_save_state(pdev);
10759
10760 if (ii->mac == ixgbe_mac_82598EB) {
10761#ifdef CONFIG_IXGBE_DCB
10762
10763 indices = 4 * MAX_TRAFFIC_CLASS;
10764#else
10765 indices = IXGBE_MAX_RSS_INDICES;
10766#endif
10767 }
10768
10769 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10770 if (!netdev) {
10771 err = -ENOMEM;
10772 goto err_alloc_etherdev;
10773 }
10774
10775 SET_NETDEV_DEV(netdev, &pdev->dev);
10776
10777 adapter = netdev_priv(netdev);
10778
10779 adapter->netdev = netdev;
10780 adapter->pdev = pdev;
10781 hw = &adapter->hw;
10782 hw->back = adapter;
10783 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10784
10785 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10786 pci_resource_len(pdev, 0));
10787 adapter->io_addr = hw->hw_addr;
10788 if (!hw->hw_addr) {
10789 err = -EIO;
10790 goto err_ioremap;
10791 }
10792
10793 netdev->netdev_ops = &ixgbe_netdev_ops;
10794 ixgbe_set_ethtool_ops(netdev);
10795 netdev->watchdog_timeo = 5 * HZ;
10796 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10797
10798
10799 hw->mac.ops = *ii->mac_ops;
10800 hw->mac.type = ii->mac;
10801 hw->mvals = ii->mvals;
10802 if (ii->link_ops)
10803 hw->link.ops = *ii->link_ops;
10804
10805
10806 hw->eeprom.ops = *ii->eeprom_ops;
10807 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10808 if (ixgbe_removed(hw->hw_addr)) {
10809 err = -EIO;
10810 goto err_ioremap;
10811 }
10812
10813 if (!(eec & BIT(8)))
10814 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10815
10816
10817 hw->phy.ops = *ii->phy_ops;
10818 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10819
10820 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10821 hw->phy.mdio.mmds = 0;
10822 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10823 hw->phy.mdio.dev = netdev;
10824 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10825 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10826
10827
10828 err = ixgbe_sw_init(adapter, ii);
10829 if (err)
10830 goto err_sw_init;
10831
10832
10833 if (hw->mac.ops.init_swfw_sync)
10834 hw->mac.ops.init_swfw_sync(hw);
10835
10836
10837 switch (adapter->hw.mac.type) {
10838 case ixgbe_mac_82599EB:
10839 case ixgbe_mac_X540:
10840 case ixgbe_mac_X550:
10841 case ixgbe_mac_X550EM_x:
10842 case ixgbe_mac_x550em_a:
10843 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10844 break;
10845 default:
10846 break;
10847 }
10848
10849
10850
10851
10852
10853 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10854 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10855 if (esdp & IXGBE_ESDP_SDP1)
10856 e_crit(probe, "Fan has stopped, replace the adapter\n");
10857 }
10858
10859 if (allow_unsupported_sfp)
10860 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10861
10862
10863 hw->phy.reset_if_overtemp = true;
10864 err = hw->mac.ops.reset_hw(hw);
10865 hw->phy.reset_if_overtemp = false;
10866 ixgbe_set_eee_capable(adapter);
10867 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10868 err = 0;
10869 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10870 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10871 e_dev_err("Reload the driver after installing a supported module.\n");
10872 goto err_sw_init;
10873 } else if (err) {
10874 e_dev_err("HW Init failed: %d\n", err);
10875 goto err_sw_init;
10876 }
10877
10878#ifdef CONFIG_PCI_IOV
10879
10880 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10881 goto skip_sriov;
10882
10883 ixgbe_init_mbx_params_pf(hw);
10884 hw->mbx.ops = ii->mbx_ops;
10885 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10886 ixgbe_enable_sriov(adapter, max_vfs);
10887skip_sriov:
10888
10889#endif
10890 netdev->features = NETIF_F_SG |
10891 NETIF_F_TSO |
10892 NETIF_F_TSO6 |
10893 NETIF_F_RXHASH |
10894 NETIF_F_RXCSUM |
10895 NETIF_F_HW_CSUM;
10896
10897#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10898 NETIF_F_GSO_GRE_CSUM | \
10899 NETIF_F_GSO_IPXIP4 | \
10900 NETIF_F_GSO_IPXIP6 | \
10901 NETIF_F_GSO_UDP_TUNNEL | \
10902 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10903
10904 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10905 netdev->features |= NETIF_F_GSO_PARTIAL |
10906 IXGBE_GSO_PARTIAL_FEATURES;
10907
10908 if (hw->mac.type >= ixgbe_mac_82599EB)
10909 netdev->features |= NETIF_F_SCTP_CRC;
10910
10911#ifdef CONFIG_IXGBE_IPSEC
10912#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10913 NETIF_F_HW_ESP_TX_CSUM | \
10914 NETIF_F_GSO_ESP)
10915
10916 if (adapter->ipsec)
10917 netdev->features |= IXGBE_ESP_FEATURES;
10918#endif
10919
10920 netdev->hw_features |= netdev->features |
10921 NETIF_F_HW_VLAN_CTAG_FILTER |
10922 NETIF_F_HW_VLAN_CTAG_RX |
10923 NETIF_F_HW_VLAN_CTAG_TX |
10924 NETIF_F_RXALL |
10925 NETIF_F_HW_L2FW_DOFFLOAD;
10926
10927 if (hw->mac.type >= ixgbe_mac_82599EB)
10928 netdev->hw_features |= NETIF_F_NTUPLE |
10929 NETIF_F_HW_TC;
10930
10931 if (pci_using_dac)
10932 netdev->features |= NETIF_F_HIGHDMA;
10933
10934 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10935 netdev->hw_enc_features |= netdev->vlan_features;
10936 netdev->mpls_features |= NETIF_F_SG |
10937 NETIF_F_TSO |
10938 NETIF_F_TSO6 |
10939 NETIF_F_HW_CSUM;
10940 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10941
10942
10943 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10944 NETIF_F_HW_VLAN_CTAG_RX |
10945 NETIF_F_HW_VLAN_CTAG_TX;
10946
10947 netdev->priv_flags |= IFF_UNICAST_FLT;
10948 netdev->priv_flags |= IFF_SUPP_NOFCS;
10949
10950
10951 netdev->min_mtu = ETH_MIN_MTU;
10952 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10953
10954#ifdef CONFIG_IXGBE_DCB
10955 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10956 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10957#endif
10958
10959#ifdef IXGBE_FCOE
10960 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10961 unsigned int fcoe_l;
10962
10963 if (hw->mac.ops.get_device_caps) {
10964 hw->mac.ops.get_device_caps(hw, &device_caps);
10965 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10966 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10967 }
10968
10969
10970 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10971 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10972
10973 netdev->features |= NETIF_F_FSO |
10974 NETIF_F_FCOE_CRC;
10975
10976 netdev->vlan_features |= NETIF_F_FSO |
10977 NETIF_F_FCOE_CRC |
10978 NETIF_F_FCOE_MTU;
10979 }
10980#endif
10981 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10982 netdev->hw_features |= NETIF_F_LRO;
10983 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10984 netdev->features |= NETIF_F_LRO;
10985
10986 if (ixgbe_check_fw_error(adapter)) {
10987 err = -EIO;
10988 goto err_sw_init;
10989 }
10990
10991
10992 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10993 e_dev_err("The EEPROM Checksum Is Not Valid\n");
10994 err = -EIO;
10995 goto err_sw_init;
10996 }
10997
10998 eth_platform_get_mac_address(&adapter->pdev->dev,
10999 adapter->hw.mac.perm_addr);
11000
11001 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
11002
11003 if (!is_valid_ether_addr(netdev->dev_addr)) {
11004 e_dev_err("invalid MAC address\n");
11005 err = -EIO;
11006 goto err_sw_init;
11007 }
11008
11009
11010 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
11011 ixgbe_mac_set_default_filter(adapter);
11012
11013 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
11014
11015 if (ixgbe_removed(hw->hw_addr)) {
11016 err = -EIO;
11017 goto err_sw_init;
11018 }
11019 INIT_WORK(&adapter->service_task, ixgbe_service_task);
11020 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
11021 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
11022
11023 err = ixgbe_init_interrupt_scheme(adapter);
11024 if (err)
11025 goto err_sw_init;
11026
11027 for (i = 0; i < adapter->num_rx_queues; i++)
11028 u64_stats_init(&adapter->rx_ring[i]->syncp);
11029 for (i = 0; i < adapter->num_tx_queues; i++)
11030 u64_stats_init(&adapter->tx_ring[i]->syncp);
11031 for (i = 0; i < adapter->num_xdp_queues; i++)
11032 u64_stats_init(&adapter->xdp_ring[i]->syncp);
11033
11034
11035 adapter->wol = 0;
11036 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
11037 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
11038 pdev->subsystem_device);
11039 if (hw->wol_enabled)
11040 adapter->wol = IXGBE_WUFC_MAG;
11041
11042 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
11043
11044
11045 ixgbe_set_fw_version(adapter);
11046
11047
11048 if (ixgbe_pcie_from_parent(hw))
11049 ixgbe_get_parent_bus_info(adapter);
11050 else
11051 hw->mac.ops.get_bus_info(hw);
11052
11053
11054
11055
11056
11057
11058 switch (hw->mac.type) {
11059 case ixgbe_mac_82598EB:
11060 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
11061 break;
11062 default:
11063 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
11064 break;
11065 }
11066
11067
11068 if (expected_gts > 0)
11069 ixgbe_check_minimum_link(adapter, expected_gts);
11070
11071 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
11072 if (err)
11073 strlcpy(part_str, "Unknown", sizeof(part_str));
11074 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
11075 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
11076 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
11077 part_str);
11078 else
11079 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
11080 hw->mac.type, hw->phy.type, part_str);
11081
11082 e_dev_info("%pM\n", netdev->dev_addr);
11083
11084
11085 err = hw->mac.ops.start_hw(hw);
11086 if (err == IXGBE_ERR_EEPROM_VERSION) {
11087
11088 e_dev_warn("This device is a pre-production adapter/LOM. "
11089 "Please be aware there may be issues associated "
11090 "with your hardware. If you are experiencing "
11091 "problems please contact your Intel or hardware "
11092 "representative who provided you with this "
11093 "hardware.\n");
11094 }
11095 strcpy(netdev->name, "eth%d");
11096 pci_set_drvdata(pdev, adapter);
11097 err = register_netdev(netdev);
11098 if (err)
11099 goto err_register;
11100
11101
11102
11103 if (hw->mac.ops.disable_tx_laser)
11104 hw->mac.ops.disable_tx_laser(hw);
11105
11106
11107 netif_carrier_off(netdev);
11108
11109#ifdef CONFIG_IXGBE_DCA
11110 if (dca_add_requester(&pdev->dev) == 0) {
11111 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11112 ixgbe_setup_dca(adapter);
11113 }
11114#endif
11115 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11116 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11117 for (i = 0; i < adapter->num_vfs; i++)
11118 ixgbe_vf_configuration(pdev, (i | 0x10000000));
11119 }
11120
11121
11122
11123
11124 if (hw->mac.ops.set_fw_drv_ver)
11125 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11126 sizeof(ixgbe_driver_version) - 1,
11127 ixgbe_driver_version);
11128
11129
11130 ixgbe_add_sanmac_netdev(netdev);
11131
11132 e_dev_info("%s\n", ixgbe_default_device_descr);
11133
11134#ifdef CONFIG_IXGBE_HWMON
11135 if (ixgbe_sysfs_init(adapter))
11136 e_err(probe, "failed to allocate sysfs resources\n");
11137#endif
11138
11139 ixgbe_dbg_adapter_init(adapter);
11140
11141
11142 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11143 hw->mac.ops.setup_link(hw,
11144 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11145 true);
11146
11147 ixgbe_mii_bus_init(hw);
11148
11149 return 0;
11150
11151err_register:
11152 ixgbe_release_hw_control(adapter);
11153 ixgbe_clear_interrupt_scheme(adapter);
11154err_sw_init:
11155 ixgbe_disable_sriov(adapter);
11156 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11157 iounmap(adapter->io_addr);
11158 kfree(adapter->jump_tables[0]);
11159 kfree(adapter->mac_table);
11160 kfree(adapter->rss_key);
11161 bitmap_free(adapter->af_xdp_zc_qps);
11162err_ioremap:
11163 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11164 free_netdev(netdev);
11165err_alloc_etherdev:
11166 pci_release_mem_regions(pdev);
11167err_pci_reg:
11168err_dma:
11169 if (!adapter || disable_dev)
11170 pci_disable_device(pdev);
11171 return err;
11172}
11173
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183static void ixgbe_remove(struct pci_dev *pdev)
11184{
11185 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11186 struct net_device *netdev;
11187 bool disable_dev;
11188 int i;
11189
11190
11191 if (!adapter)
11192 return;
11193
11194 netdev = adapter->netdev;
11195 ixgbe_dbg_adapter_exit(adapter);
11196
11197 set_bit(__IXGBE_REMOVING, &adapter->state);
11198 cancel_work_sync(&adapter->service_task);
11199
11200 if (adapter->mii_bus)
11201 mdiobus_unregister(adapter->mii_bus);
11202
11203#ifdef CONFIG_IXGBE_DCA
11204 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11205 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11206 dca_remove_requester(&pdev->dev);
11207 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11208 IXGBE_DCA_CTRL_DCA_DISABLE);
11209 }
11210
11211#endif
11212#ifdef CONFIG_IXGBE_HWMON
11213 ixgbe_sysfs_exit(adapter);
11214#endif
11215
11216
11217 ixgbe_del_sanmac_netdev(netdev);
11218
11219#ifdef CONFIG_PCI_IOV
11220 ixgbe_disable_sriov(adapter);
11221#endif
11222 if (netdev->reg_state == NETREG_REGISTERED)
11223 unregister_netdev(netdev);
11224
11225 ixgbe_stop_ipsec_offload(adapter);
11226 ixgbe_clear_interrupt_scheme(adapter);
11227
11228 ixgbe_release_hw_control(adapter);
11229
11230#ifdef CONFIG_DCB
11231 kfree(adapter->ixgbe_ieee_pfc);
11232 kfree(adapter->ixgbe_ieee_ets);
11233
11234#endif
11235 iounmap(adapter->io_addr);
11236 pci_release_mem_regions(pdev);
11237
11238 e_dev_info("complete\n");
11239
11240 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11241 if (adapter->jump_tables[i]) {
11242 kfree(adapter->jump_tables[i]->input);
11243 kfree(adapter->jump_tables[i]->mask);
11244 }
11245 kfree(adapter->jump_tables[i]);
11246 }
11247
11248 kfree(adapter->mac_table);
11249 kfree(adapter->rss_key);
11250 bitmap_free(adapter->af_xdp_zc_qps);
11251 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11252 free_netdev(netdev);
11253
11254 pci_disable_pcie_error_reporting(pdev);
11255
11256 if (disable_dev)
11257 pci_disable_device(pdev);
11258}
11259
11260
11261
11262
11263
11264
11265
11266
11267
11268static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11269 pci_channel_state_t state)
11270{
11271 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11272 struct net_device *netdev = adapter->netdev;
11273
11274#ifdef CONFIG_PCI_IOV
11275 struct ixgbe_hw *hw = &adapter->hw;
11276 struct pci_dev *bdev, *vfdev;
11277 u32 dw0, dw1, dw2, dw3;
11278 int vf, pos;
11279 u16 req_id, pf_func;
11280
11281 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11282 adapter->num_vfs == 0)
11283 goto skip_bad_vf_detection;
11284
11285 bdev = pdev->bus->self;
11286 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11287 bdev = bdev->bus->self;
11288
11289 if (!bdev)
11290 goto skip_bad_vf_detection;
11291
11292 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11293 if (!pos)
11294 goto skip_bad_vf_detection;
11295
11296 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11297 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11298 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11299 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11300 if (ixgbe_removed(hw->hw_addr))
11301 goto skip_bad_vf_detection;
11302
11303 req_id = dw1 >> 16;
11304
11305 if (!(req_id & 0x0080))
11306 goto skip_bad_vf_detection;
11307
11308 pf_func = req_id & 0x01;
11309 if ((pf_func & 1) == (pdev->devfn & 1)) {
11310 unsigned int device_id;
11311
11312 vf = (req_id & 0x7F) >> 1;
11313 e_dev_err("VF %d has caused a PCIe error\n", vf);
11314 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11315 "%8.8x\tdw3: %8.8x\n",
11316 dw0, dw1, dw2, dw3);
11317 switch (adapter->hw.mac.type) {
11318 case ixgbe_mac_82599EB:
11319 device_id = IXGBE_82599_VF_DEVICE_ID;
11320 break;
11321 case ixgbe_mac_X540:
11322 device_id = IXGBE_X540_VF_DEVICE_ID;
11323 break;
11324 case ixgbe_mac_X550:
11325 device_id = IXGBE_DEV_ID_X550_VF;
11326 break;
11327 case ixgbe_mac_X550EM_x:
11328 device_id = IXGBE_DEV_ID_X550EM_X_VF;
11329 break;
11330 case ixgbe_mac_x550em_a:
11331 device_id = IXGBE_DEV_ID_X550EM_A_VF;
11332 break;
11333 default:
11334 device_id = 0;
11335 break;
11336 }
11337
11338
11339 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11340 while (vfdev) {
11341 if (vfdev->devfn == (req_id & 0xFF))
11342 break;
11343 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11344 device_id, vfdev);
11345 }
11346
11347
11348
11349
11350
11351 if (vfdev) {
11352 pcie_flr(vfdev);
11353
11354 pci_dev_put(vfdev);
11355 }
11356 }
11357
11358
11359
11360
11361
11362
11363
11364 adapter->vferr_refcount++;
11365
11366 return PCI_ERS_RESULT_RECOVERED;
11367
11368skip_bad_vf_detection:
11369#endif
11370 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11371 return PCI_ERS_RESULT_DISCONNECT;
11372
11373 if (!netif_device_present(netdev))
11374 return PCI_ERS_RESULT_DISCONNECT;
11375
11376 rtnl_lock();
11377 netif_device_detach(netdev);
11378
11379 if (netif_running(netdev))
11380 ixgbe_close_suspend(adapter);
11381
11382 if (state == pci_channel_io_perm_failure) {
11383 rtnl_unlock();
11384 return PCI_ERS_RESULT_DISCONNECT;
11385 }
11386
11387 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11388 pci_disable_device(pdev);
11389 rtnl_unlock();
11390
11391
11392 return PCI_ERS_RESULT_NEED_RESET;
11393}
11394
11395
11396
11397
11398
11399
11400
11401static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11402{
11403 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11404 pci_ers_result_t result;
11405
11406 if (pci_enable_device_mem(pdev)) {
11407 e_err(probe, "Cannot re-enable PCI device after reset.\n");
11408 result = PCI_ERS_RESULT_DISCONNECT;
11409 } else {
11410 smp_mb__before_atomic();
11411 clear_bit(__IXGBE_DISABLED, &adapter->state);
11412 adapter->hw.hw_addr = adapter->io_addr;
11413 pci_set_master(pdev);
11414 pci_restore_state(pdev);
11415 pci_save_state(pdev);
11416
11417 pci_wake_from_d3(pdev, false);
11418
11419 ixgbe_reset(adapter);
11420 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11421 result = PCI_ERS_RESULT_RECOVERED;
11422 }
11423
11424 return result;
11425}
11426
11427
11428
11429
11430
11431
11432
11433
11434static void ixgbe_io_resume(struct pci_dev *pdev)
11435{
11436 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11437 struct net_device *netdev = adapter->netdev;
11438
11439#ifdef CONFIG_PCI_IOV
11440 if (adapter->vferr_refcount) {
11441 e_info(drv, "Resuming after VF err\n");
11442 adapter->vferr_refcount--;
11443 return;
11444 }
11445
11446#endif
11447 rtnl_lock();
11448 if (netif_running(netdev))
11449 ixgbe_open(netdev);
11450
11451 netif_device_attach(netdev);
11452 rtnl_unlock();
11453}
11454
11455static const struct pci_error_handlers ixgbe_err_handler = {
11456 .error_detected = ixgbe_io_error_detected,
11457 .slot_reset = ixgbe_io_slot_reset,
11458 .resume = ixgbe_io_resume,
11459};
11460
11461static struct pci_driver ixgbe_driver = {
11462 .name = ixgbe_driver_name,
11463 .id_table = ixgbe_pci_tbl,
11464 .probe = ixgbe_probe,
11465 .remove = ixgbe_remove,
11466#ifdef CONFIG_PM
11467 .suspend = ixgbe_suspend,
11468 .resume = ixgbe_resume,
11469#endif
11470 .shutdown = ixgbe_shutdown,
11471 .sriov_configure = ixgbe_pci_sriov_configure,
11472 .err_handler = &ixgbe_err_handler
11473};
11474
11475
11476
11477
11478
11479
11480
11481static int __init ixgbe_init_module(void)
11482{
11483 int ret;
11484 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
11485 pr_info("%s\n", ixgbe_copyright);
11486
11487 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11488 if (!ixgbe_wq) {
11489 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11490 return -ENOMEM;
11491 }
11492
11493 ixgbe_dbg_init();
11494
11495 ret = pci_register_driver(&ixgbe_driver);
11496 if (ret) {
11497 destroy_workqueue(ixgbe_wq);
11498 ixgbe_dbg_exit();
11499 return ret;
11500 }
11501
11502#ifdef CONFIG_IXGBE_DCA
11503 dca_register_notify(&dca_notifier);
11504#endif
11505
11506 return 0;
11507}
11508
11509module_init(ixgbe_init_module);
11510
11511
11512
11513
11514
11515
11516
11517static void __exit ixgbe_exit_module(void)
11518{
11519#ifdef CONFIG_IXGBE_DCA
11520 dca_unregister_notify(&dca_notifier);
11521#endif
11522 pci_unregister_driver(&ixgbe_driver);
11523
11524 ixgbe_dbg_exit();
11525 if (ixgbe_wq) {
11526 destroy_workqueue(ixgbe_wq);
11527 ixgbe_wq = NULL;
11528 }
11529}
11530
11531#ifdef CONFIG_IXGBE_DCA
11532static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11533 void *p)
11534{
11535 int ret_val;
11536
11537 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11538 __ixgbe_notify_dca);
11539
11540 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11541}
11542
11543#endif
11544
11545module_exit(ixgbe_exit_module);
11546
11547
11548