1
2
3
4#include <linux/types.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/netdevice.h>
8#include <linux/vmalloc.h>
9#include <linux/string.h>
10#include <linux/in.h>
11#include <linux/interrupt.h>
12#include <linux/ip.h>
13#include <linux/tcp.h>
14#include <linux/sctp.h>
15#include <linux/pkt_sched.h>
16#include <linux/ipv6.h>
17#include <linux/slab.h>
18#include <net/checksum.h>
19#include <net/ip6_checksum.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/if_macvlan.h>
25#include <linux/if_bridge.h>
26#include <linux/prefetch.h>
27#include <linux/bpf.h>
28#include <linux/bpf_trace.h>
29#include <linux/atomic.h>
30#include <linux/numa.h>
31#include <scsi/fc/fc_fcoe.h>
32#include <net/udp_tunnel.h>
33#include <net/pkt_cls.h>
34#include <net/tc_act/tc_gact.h>
35#include <net/tc_act/tc_mirred.h>
36#include <net/vxlan.h>
37#include <net/mpls.h>
38#include <net/xdp_sock.h>
39#include <net/xfrm.h>
40
41#include "ixgbe.h"
42#include "ixgbe_common.h"
43#include "ixgbe_dcb_82599.h"
44#include "ixgbe_phy.h"
45#include "ixgbe_sriov.h"
46#include "ixgbe_model.h"
47#include "ixgbe_txrx_common.h"
48
49char ixgbe_driver_name[] = "ixgbe";
50static const char ixgbe_driver_string[] =
51 "Intel(R) 10 Gigabit PCI Express Network Driver";
52#ifdef IXGBE_FCOE
53char ixgbe_default_device_descr[] =
54 "Intel(R) 10 Gigabit Network Connection";
55#else
56static char ixgbe_default_device_descr[] =
57 "Intel(R) 10 Gigabit Network Connection";
58#endif
59#define DRV_VERSION "5.1.0-k"
60const char ixgbe_driver_version[] = DRV_VERSION;
61static const char ixgbe_copyright[] =
62 "Copyright (c) 1999-2016 Intel Corporation.";
63
64static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
65
66static const struct ixgbe_info *ixgbe_info_tbl[] = {
67 [board_82598] = &ixgbe_82598_info,
68 [board_82599] = &ixgbe_82599_info,
69 [board_X540] = &ixgbe_X540_info,
70 [board_X550] = &ixgbe_X550_info,
71 [board_X550EM_x] = &ixgbe_X550EM_x_info,
72 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
73 [board_x550em_a] = &ixgbe_x550em_a_info,
74 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
75};
76
77
78
79
80
81
82
83
84
85static const struct pci_device_id ixgbe_pci_tbl[] = {
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
133
134 {0, }
135};
136MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
137
138#ifdef CONFIG_IXGBE_DCA
139static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
140 void *p);
141static struct notifier_block dca_notifier = {
142 .notifier_call = ixgbe_notify_dca,
143 .next = NULL,
144 .priority = 0
145};
146#endif
147
148#ifdef CONFIG_PCI_IOV
149static unsigned int max_vfs;
150module_param(max_vfs, uint, 0);
151MODULE_PARM_DESC(max_vfs,
152 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
153#endif
154
155static unsigned int allow_unsupported_sfp;
156module_param(allow_unsupported_sfp, uint, 0);
157MODULE_PARM_DESC(allow_unsupported_sfp,
158 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
159
160#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
161static int debug = -1;
162module_param(debug, int, 0);
163MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
164
165MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
166MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
167MODULE_LICENSE("GPL v2");
168MODULE_VERSION(DRV_VERSION);
169
170static struct workqueue_struct *ixgbe_wq;
171
172static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
173static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
174
175static const struct net_device_ops ixgbe_netdev_ops;
176
177static bool netif_is_ixgbe(struct net_device *dev)
178{
179 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
180}
181
182static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
183 u32 reg, u16 *value)
184{
185 struct pci_dev *parent_dev;
186 struct pci_bus *parent_bus;
187
188 parent_bus = adapter->pdev->bus->parent;
189 if (!parent_bus)
190 return -1;
191
192 parent_dev = parent_bus->self;
193 if (!parent_dev)
194 return -1;
195
196 if (!pci_is_pcie(parent_dev))
197 return -1;
198
199 pcie_capability_read_word(parent_dev, reg, value);
200 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
201 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
202 return -1;
203 return 0;
204}
205
206static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
207{
208 struct ixgbe_hw *hw = &adapter->hw;
209 u16 link_status = 0;
210 int err;
211
212 hw->bus.type = ixgbe_bus_type_pci_express;
213
214
215
216
217 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
218
219
220 if (err)
221 return err;
222
223 hw->bus.width = ixgbe_convert_bus_width(link_status);
224 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
225
226 return 0;
227}
228
229
230
231
232
233
234
235
236
237
238static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
239{
240 switch (hw->device_id) {
241 case IXGBE_DEV_ID_82599_SFP_SF_QP:
242 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
243 return true;
244 default:
245 return false;
246 }
247}
248
249static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
250 int expected_gts)
251{
252 struct ixgbe_hw *hw = &adapter->hw;
253 struct pci_dev *pdev;
254
255
256
257
258
259 if (hw->bus.type == ixgbe_bus_type_internal)
260 return;
261
262
263 if (ixgbe_pcie_from_parent(&adapter->hw))
264 pdev = adapter->pdev->bus->parent->self;
265 else
266 pdev = adapter->pdev;
267
268 pcie_print_link_status(pdev);
269}
270
271static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
272{
273 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
274 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
275 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
276 queue_work(ixgbe_wq, &adapter->service_task);
277}
278
279static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
280{
281 struct ixgbe_adapter *adapter = hw->back;
282
283 if (!hw->hw_addr)
284 return;
285 hw->hw_addr = NULL;
286 e_dev_err("Adapter removed\n");
287 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
288 ixgbe_service_event_schedule(adapter);
289}
290
291static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
292{
293 u8 __iomem *reg_addr;
294 u32 value;
295 int i;
296
297 reg_addr = READ_ONCE(hw->hw_addr);
298 if (ixgbe_removed(reg_addr))
299 return IXGBE_FAILED_READ_REG;
300
301
302
303
304
305 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
306 value = readl(reg_addr + IXGBE_STATUS);
307 if (value != IXGBE_FAILED_READ_REG)
308 break;
309 mdelay(3);
310 }
311
312 if (value == IXGBE_FAILED_READ_REG)
313 ixgbe_remove_adapter(hw);
314 else
315 value = readl(reg_addr + reg);
316 return value;
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
333{
334 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
335 u32 value;
336
337 if (ixgbe_removed(reg_addr))
338 return IXGBE_FAILED_READ_REG;
339 if (unlikely(hw->phy.nw_mng_if_sel &
340 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
341 struct ixgbe_adapter *adapter;
342 int i;
343
344 for (i = 0; i < 200; ++i) {
345 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
346 if (likely(!value))
347 goto writes_completed;
348 if (value == IXGBE_FAILED_READ_REG) {
349 ixgbe_remove_adapter(hw);
350 return IXGBE_FAILED_READ_REG;
351 }
352 udelay(5);
353 }
354
355 adapter = hw->back;
356 e_warn(hw, "register writes incomplete %08x\n", value);
357 }
358
359writes_completed:
360 value = readl(reg_addr + reg);
361 if (unlikely(value == IXGBE_FAILED_READ_REG))
362 value = ixgbe_check_remove(hw, reg);
363 return value;
364}
365
366static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
367{
368 u16 value;
369
370 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
371 if (value == IXGBE_FAILED_READ_CFG_WORD) {
372 ixgbe_remove_adapter(hw);
373 return true;
374 }
375 return false;
376}
377
378u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
379{
380 struct ixgbe_adapter *adapter = hw->back;
381 u16 value;
382
383 if (ixgbe_removed(hw->hw_addr))
384 return IXGBE_FAILED_READ_CFG_WORD;
385 pci_read_config_word(adapter->pdev, reg, &value);
386 if (value == IXGBE_FAILED_READ_CFG_WORD &&
387 ixgbe_check_cfg_remove(hw, adapter->pdev))
388 return IXGBE_FAILED_READ_CFG_WORD;
389 return value;
390}
391
392#ifdef CONFIG_PCI_IOV
393static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
394{
395 struct ixgbe_adapter *adapter = hw->back;
396 u32 value;
397
398 if (ixgbe_removed(hw->hw_addr))
399 return IXGBE_FAILED_READ_CFG_DWORD;
400 pci_read_config_dword(adapter->pdev, reg, &value);
401 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
402 ixgbe_check_cfg_remove(hw, adapter->pdev))
403 return IXGBE_FAILED_READ_CFG_DWORD;
404 return value;
405}
406#endif
407
408void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
409{
410 struct ixgbe_adapter *adapter = hw->back;
411
412 if (ixgbe_removed(hw->hw_addr))
413 return;
414 pci_write_config_word(adapter->pdev, reg, value);
415}
416
417static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
418{
419 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
420
421
422 smp_mb__before_atomic();
423 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
424}
425
426struct ixgbe_reg_info {
427 u32 ofs;
428 char *name;
429};
430
431static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
432
433
434 {IXGBE_CTRL, "CTRL"},
435 {IXGBE_STATUS, "STATUS"},
436 {IXGBE_CTRL_EXT, "CTRL_EXT"},
437
438
439 {IXGBE_EICR, "EICR"},
440
441
442 {IXGBE_SRRCTL(0), "SRRCTL"},
443 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
444 {IXGBE_RDLEN(0), "RDLEN"},
445 {IXGBE_RDH(0), "RDH"},
446 {IXGBE_RDT(0), "RDT"},
447 {IXGBE_RXDCTL(0), "RXDCTL"},
448 {IXGBE_RDBAL(0), "RDBAL"},
449 {IXGBE_RDBAH(0), "RDBAH"},
450
451
452 {IXGBE_TDBAL(0), "TDBAL"},
453 {IXGBE_TDBAH(0), "TDBAH"},
454 {IXGBE_TDLEN(0), "TDLEN"},
455 {IXGBE_TDH(0), "TDH"},
456 {IXGBE_TDT(0), "TDT"},
457 {IXGBE_TXDCTL(0), "TXDCTL"},
458
459
460 { .name = NULL }
461};
462
463
464
465
466
467static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
468{
469 int i;
470 char rname[16];
471 u32 regs[64];
472
473 switch (reginfo->ofs) {
474 case IXGBE_SRRCTL(0):
475 for (i = 0; i < 64; i++)
476 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
477 break;
478 case IXGBE_DCA_RXCTRL(0):
479 for (i = 0; i < 64; i++)
480 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
481 break;
482 case IXGBE_RDLEN(0):
483 for (i = 0; i < 64; i++)
484 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
485 break;
486 case IXGBE_RDH(0):
487 for (i = 0; i < 64; i++)
488 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
489 break;
490 case IXGBE_RDT(0):
491 for (i = 0; i < 64; i++)
492 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
493 break;
494 case IXGBE_RXDCTL(0):
495 for (i = 0; i < 64; i++)
496 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
497 break;
498 case IXGBE_RDBAL(0):
499 for (i = 0; i < 64; i++)
500 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
501 break;
502 case IXGBE_RDBAH(0):
503 for (i = 0; i < 64; i++)
504 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
505 break;
506 case IXGBE_TDBAL(0):
507 for (i = 0; i < 64; i++)
508 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
509 break;
510 case IXGBE_TDBAH(0):
511 for (i = 0; i < 64; i++)
512 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
513 break;
514 case IXGBE_TDLEN(0):
515 for (i = 0; i < 64; i++)
516 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
517 break;
518 case IXGBE_TDH(0):
519 for (i = 0; i < 64; i++)
520 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
521 break;
522 case IXGBE_TDT(0):
523 for (i = 0; i < 64; i++)
524 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
525 break;
526 case IXGBE_TXDCTL(0):
527 for (i = 0; i < 64; i++)
528 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
529 break;
530 default:
531 pr_info("%-15s %08x\n",
532 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
533 return;
534 }
535
536 i = 0;
537 while (i < 64) {
538 int j;
539 char buf[9 * 8 + 1];
540 char *p = buf;
541
542 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
543 for (j = 0; j < 8; j++)
544 p += sprintf(p, " %08x", regs[i++]);
545 pr_err("%-15s%s\n", rname, buf);
546 }
547
548}
549
550static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
551{
552 struct ixgbe_tx_buffer *tx_buffer;
553
554 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
555 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
556 n, ring->next_to_use, ring->next_to_clean,
557 (u64)dma_unmap_addr(tx_buffer, dma),
558 dma_unmap_len(tx_buffer, len),
559 tx_buffer->next_to_watch,
560 (u64)tx_buffer->time_stamp);
561}
562
563
564
565
566static void ixgbe_dump(struct ixgbe_adapter *adapter)
567{
568 struct net_device *netdev = adapter->netdev;
569 struct ixgbe_hw *hw = &adapter->hw;
570 struct ixgbe_reg_info *reginfo;
571 int n = 0;
572 struct ixgbe_ring *ring;
573 struct ixgbe_tx_buffer *tx_buffer;
574 union ixgbe_adv_tx_desc *tx_desc;
575 struct my_u0 { u64 a; u64 b; } *u0;
576 struct ixgbe_ring *rx_ring;
577 union ixgbe_adv_rx_desc *rx_desc;
578 struct ixgbe_rx_buffer *rx_buffer_info;
579 int i = 0;
580
581 if (!netif_msg_hw(adapter))
582 return;
583
584
585 if (netdev) {
586 dev_info(&adapter->pdev->dev, "Net device Info\n");
587 pr_info("Device Name state "
588 "trans_start\n");
589 pr_info("%-15s %016lX %016lX\n",
590 netdev->name,
591 netdev->state,
592 dev_trans_start(netdev));
593 }
594
595
596 dev_info(&adapter->pdev->dev, "Register Dump\n");
597 pr_info(" Register Name Value\n");
598 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
599 reginfo->name; reginfo++) {
600 ixgbe_regdump(hw, reginfo);
601 }
602
603
604 if (!netdev || !netif_running(netdev))
605 return;
606
607 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
608 pr_info(" %s %s %s %s\n",
609 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
610 "leng", "ntw", "timestamp");
611 for (n = 0; n < adapter->num_tx_queues; n++) {
612 ring = adapter->tx_ring[n];
613 ixgbe_print_buffer(ring, n);
614 }
615
616 for (n = 0; n < adapter->num_xdp_queues; n++) {
617 ring = adapter->xdp_ring[n];
618 ixgbe_print_buffer(ring, n);
619 }
620
621
622 if (!netif_msg_tx_done(adapter))
623 goto rx_ring_summary;
624
625 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 for (n = 0; n < adapter->num_tx_queues; n++) {
663 ring = adapter->tx_ring[n];
664 pr_info("------------------------------------\n");
665 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
666 pr_info("------------------------------------\n");
667 pr_info("%s%s %s %s %s %s\n",
668 "T [desc] [address 63:0 ] ",
669 "[PlPOIdStDDt Ln] [bi->dma ] ",
670 "leng", "ntw", "timestamp", "bi->skb");
671
672 for (i = 0; ring->desc && (i < ring->count); i++) {
673 tx_desc = IXGBE_TX_DESC(ring, i);
674 tx_buffer = &ring->tx_buffer_info[i];
675 u0 = (struct my_u0 *)tx_desc;
676 if (dma_unmap_len(tx_buffer, len) > 0) {
677 const char *ring_desc;
678
679 if (i == ring->next_to_use &&
680 i == ring->next_to_clean)
681 ring_desc = " NTC/U";
682 else if (i == ring->next_to_use)
683 ring_desc = " NTU";
684 else if (i == ring->next_to_clean)
685 ring_desc = " NTC";
686 else
687 ring_desc = "";
688 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
689 i,
690 le64_to_cpu((__force __le64)u0->a),
691 le64_to_cpu((__force __le64)u0->b),
692 (u64)dma_unmap_addr(tx_buffer, dma),
693 dma_unmap_len(tx_buffer, len),
694 tx_buffer->next_to_watch,
695 (u64)tx_buffer->time_stamp,
696 tx_buffer->skb,
697 ring_desc);
698
699 if (netif_msg_pktdata(adapter) &&
700 tx_buffer->skb)
701 print_hex_dump(KERN_INFO, "",
702 DUMP_PREFIX_ADDRESS, 16, 1,
703 tx_buffer->skb->data,
704 dma_unmap_len(tx_buffer, len),
705 true);
706 }
707 }
708 }
709
710
711rx_ring_summary:
712 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
713 pr_info("Queue [NTU] [NTC]\n");
714 for (n = 0; n < adapter->num_rx_queues; n++) {
715 rx_ring = adapter->rx_ring[n];
716 pr_info("%5d %5X %5X\n",
717 n, rx_ring->next_to_use, rx_ring->next_to_clean);
718 }
719
720
721 if (!netif_msg_rx_status(adapter))
722 return;
723
724 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771 for (n = 0; n < adapter->num_rx_queues; n++) {
772 rx_ring = adapter->rx_ring[n];
773 pr_info("------------------------------------\n");
774 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
775 pr_info("------------------------------------\n");
776 pr_info("%s%s%s\n",
777 "R [desc] [ PktBuf A0] ",
778 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
779 "<-- Adv Rx Read format");
780 pr_info("%s%s%s\n",
781 "RWB[desc] [PcsmIpSHl PtRs] ",
782 "[vl er S cks ln] ---------------- [bi->skb ] ",
783 "<-- Adv Rx Write-Back format");
784
785 for (i = 0; i < rx_ring->count; i++) {
786 const char *ring_desc;
787
788 if (i == rx_ring->next_to_use)
789 ring_desc = " NTU";
790 else if (i == rx_ring->next_to_clean)
791 ring_desc = " NTC";
792 else
793 ring_desc = "";
794
795 rx_buffer_info = &rx_ring->rx_buffer_info[i];
796 rx_desc = IXGBE_RX_DESC(rx_ring, i);
797 u0 = (struct my_u0 *)rx_desc;
798 if (rx_desc->wb.upper.length) {
799
800 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
801 i,
802 le64_to_cpu((__force __le64)u0->a),
803 le64_to_cpu((__force __le64)u0->b),
804 rx_buffer_info->skb,
805 ring_desc);
806 } else {
807 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
808 i,
809 le64_to_cpu((__force __le64)u0->a),
810 le64_to_cpu((__force __le64)u0->b),
811 (u64)rx_buffer_info->dma,
812 rx_buffer_info->skb,
813 ring_desc);
814
815 if (netif_msg_pktdata(adapter) &&
816 rx_buffer_info->dma) {
817 print_hex_dump(KERN_INFO, "",
818 DUMP_PREFIX_ADDRESS, 16, 1,
819 page_address(rx_buffer_info->page) +
820 rx_buffer_info->page_offset,
821 ixgbe_rx_bufsz(rx_ring), true);
822 }
823 }
824 }
825 }
826}
827
828static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
829{
830 u32 ctrl_ext;
831
832
833 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
835 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
836}
837
838static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
839{
840 u32 ctrl_ext;
841
842
843 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
845 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
846}
847
848
849
850
851
852
853
854
855
856static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
857 u8 queue, u8 msix_vector)
858{
859 u32 ivar, index;
860 struct ixgbe_hw *hw = &adapter->hw;
861 switch (hw->mac.type) {
862 case ixgbe_mac_82598EB:
863 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
864 if (direction == -1)
865 direction = 0;
866 index = (((direction * 64) + queue) >> 2) & 0x1F;
867 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
868 ivar &= ~(0xFF << (8 * (queue & 0x3)));
869 ivar |= (msix_vector << (8 * (queue & 0x3)));
870 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
871 break;
872 case ixgbe_mac_82599EB:
873 case ixgbe_mac_X540:
874 case ixgbe_mac_X550:
875 case ixgbe_mac_X550EM_x:
876 case ixgbe_mac_x550em_a:
877 if (direction == -1) {
878
879 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
880 index = ((queue & 1) * 8);
881 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
882 ivar &= ~(0xFF << index);
883 ivar |= (msix_vector << index);
884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
885 break;
886 } else {
887
888 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
889 index = ((16 * (queue & 1)) + (8 * direction));
890 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
891 ivar &= ~(0xFF << index);
892 ivar |= (msix_vector << index);
893 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
894 break;
895 }
896 default:
897 break;
898 }
899}
900
901void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
902 u64 qmask)
903{
904 u32 mask;
905
906 switch (adapter->hw.mac.type) {
907 case ixgbe_mac_82598EB:
908 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
910 break;
911 case ixgbe_mac_82599EB:
912 case ixgbe_mac_X540:
913 case ixgbe_mac_X550:
914 case ixgbe_mac_X550EM_x:
915 case ixgbe_mac_x550em_a:
916 mask = (qmask & 0xFFFFFFFF);
917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
918 mask = (qmask >> 32);
919 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
920 break;
921 default:
922 break;
923 }
924}
925
926static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
927{
928 struct ixgbe_hw *hw = &adapter->hw;
929 struct ixgbe_hw_stats *hwstats = &adapter->stats;
930 int i;
931 u32 data;
932
933 if ((hw->fc.current_mode != ixgbe_fc_full) &&
934 (hw->fc.current_mode != ixgbe_fc_rx_pause))
935 return;
936
937 switch (hw->mac.type) {
938 case ixgbe_mac_82598EB:
939 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
940 break;
941 default:
942 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
943 }
944 hwstats->lxoffrxc += data;
945
946
947 if (!data)
948 return;
949
950 for (i = 0; i < adapter->num_tx_queues; i++)
951 clear_bit(__IXGBE_HANG_CHECK_ARMED,
952 &adapter->tx_ring[i]->state);
953
954 for (i = 0; i < adapter->num_xdp_queues; i++)
955 clear_bit(__IXGBE_HANG_CHECK_ARMED,
956 &adapter->xdp_ring[i]->state);
957}
958
959static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
960{
961 struct ixgbe_hw *hw = &adapter->hw;
962 struct ixgbe_hw_stats *hwstats = &adapter->stats;
963 u32 xoff[8] = {0};
964 u8 tc;
965 int i;
966 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
967
968 if (adapter->ixgbe_ieee_pfc)
969 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
970
971 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
972 ixgbe_update_xoff_rx_lfc(adapter);
973 return;
974 }
975
976
977 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
978 u32 pxoffrxc;
979
980 switch (hw->mac.type) {
981 case ixgbe_mac_82598EB:
982 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
983 break;
984 default:
985 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
986 }
987 hwstats->pxoffrxc[i] += pxoffrxc;
988
989 tc = netdev_get_prio_tc_map(adapter->netdev, i);
990 xoff[tc] += pxoffrxc;
991 }
992
993
994 for (i = 0; i < adapter->num_tx_queues; i++) {
995 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
996
997 tc = tx_ring->dcb_tc;
998 if (xoff[tc])
999 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1000 }
1001
1002 for (i = 0; i < adapter->num_xdp_queues; i++) {
1003 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1004
1005 tc = xdp_ring->dcb_tc;
1006 if (xoff[tc])
1007 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1008 }
1009}
1010
1011static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1012{
1013 return ring->stats.packets;
1014}
1015
1016static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1017{
1018 unsigned int head, tail;
1019
1020 head = ring->next_to_clean;
1021 tail = ring->next_to_use;
1022
1023 return ((head <= tail) ? tail : tail + ring->count) - head;
1024}
1025
1026static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1027{
1028 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1029 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1030 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1031
1032 clear_check_for_tx_hang(tx_ring);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (tx_done_old == tx_done && tx_pending)
1047
1048 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1049 &tx_ring->state);
1050
1051 tx_ring->tx_stats.tx_done_old = tx_done;
1052
1053 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1054
1055 return false;
1056}
1057
1058
1059
1060
1061
1062static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1063{
1064
1065
1066 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1067 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1068 e_warn(drv, "initiating reset due to tx timeout\n");
1069 ixgbe_service_event_schedule(adapter);
1070 }
1071}
1072
1073
1074
1075
1076
1077
1078
1079static int ixgbe_tx_maxrate(struct net_device *netdev,
1080 int queue_index, u32 maxrate)
1081{
1082 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1083 struct ixgbe_hw *hw = &adapter->hw;
1084 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1085
1086 if (!maxrate)
1087 return 0;
1088
1089
1090 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1091 bcnrc_val /= maxrate;
1092
1093
1094 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1095 IXGBE_RTTBCNRC_RF_DEC_MASK;
1096
1097
1098 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1099
1100 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1101 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1102
1103 return 0;
1104}
1105
1106
1107
1108
1109
1110
1111
1112static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1113 struct ixgbe_ring *tx_ring, int napi_budget)
1114{
1115 struct ixgbe_adapter *adapter = q_vector->adapter;
1116 struct ixgbe_tx_buffer *tx_buffer;
1117 union ixgbe_adv_tx_desc *tx_desc;
1118 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1119 unsigned int budget = q_vector->tx.work_limit;
1120 unsigned int i = tx_ring->next_to_clean;
1121
1122 if (test_bit(__IXGBE_DOWN, &adapter->state))
1123 return true;
1124
1125 tx_buffer = &tx_ring->tx_buffer_info[i];
1126 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1127 i -= tx_ring->count;
1128
1129 do {
1130 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1131
1132
1133 if (!eop_desc)
1134 break;
1135
1136
1137 smp_rmb();
1138
1139
1140 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1141 break;
1142
1143
1144 tx_buffer->next_to_watch = NULL;
1145
1146
1147 total_bytes += tx_buffer->bytecount;
1148 total_packets += tx_buffer->gso_segs;
1149 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1150 total_ipsec++;
1151
1152
1153 if (ring_is_xdp(tx_ring))
1154 xdp_return_frame(tx_buffer->xdpf);
1155 else
1156 napi_consume_skb(tx_buffer->skb, napi_budget);
1157
1158
1159 dma_unmap_single(tx_ring->dev,
1160 dma_unmap_addr(tx_buffer, dma),
1161 dma_unmap_len(tx_buffer, len),
1162 DMA_TO_DEVICE);
1163
1164
1165 dma_unmap_len_set(tx_buffer, len, 0);
1166
1167
1168 while (tx_desc != eop_desc) {
1169 tx_buffer++;
1170 tx_desc++;
1171 i++;
1172 if (unlikely(!i)) {
1173 i -= tx_ring->count;
1174 tx_buffer = tx_ring->tx_buffer_info;
1175 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1176 }
1177
1178
1179 if (dma_unmap_len(tx_buffer, len)) {
1180 dma_unmap_page(tx_ring->dev,
1181 dma_unmap_addr(tx_buffer, dma),
1182 dma_unmap_len(tx_buffer, len),
1183 DMA_TO_DEVICE);
1184 dma_unmap_len_set(tx_buffer, len, 0);
1185 }
1186 }
1187
1188
1189 tx_buffer++;
1190 tx_desc++;
1191 i++;
1192 if (unlikely(!i)) {
1193 i -= tx_ring->count;
1194 tx_buffer = tx_ring->tx_buffer_info;
1195 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1196 }
1197
1198
1199 prefetch(tx_desc);
1200
1201
1202 budget--;
1203 } while (likely(budget));
1204
1205 i += tx_ring->count;
1206 tx_ring->next_to_clean = i;
1207 u64_stats_update_begin(&tx_ring->syncp);
1208 tx_ring->stats.bytes += total_bytes;
1209 tx_ring->stats.packets += total_packets;
1210 u64_stats_update_end(&tx_ring->syncp);
1211 q_vector->tx.total_bytes += total_bytes;
1212 q_vector->tx.total_packets += total_packets;
1213 adapter->tx_ipsec += total_ipsec;
1214
1215 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1216
1217 struct ixgbe_hw *hw = &adapter->hw;
1218 e_err(drv, "Detected Tx Unit Hang %s\n"
1219 " Tx Queue <%d>\n"
1220 " TDH, TDT <%x>, <%x>\n"
1221 " next_to_use <%x>\n"
1222 " next_to_clean <%x>\n"
1223 "tx_buffer_info[next_to_clean]\n"
1224 " time_stamp <%lx>\n"
1225 " jiffies <%lx>\n",
1226 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1227 tx_ring->queue_index,
1228 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1229 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1230 tx_ring->next_to_use, i,
1231 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1232
1233 if (!ring_is_xdp(tx_ring))
1234 netif_stop_subqueue(tx_ring->netdev,
1235 tx_ring->queue_index);
1236
1237 e_info(probe,
1238 "tx hang %d detected on queue %d, resetting adapter\n",
1239 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1240
1241
1242 ixgbe_tx_timeout_reset(adapter);
1243
1244
1245 return true;
1246 }
1247
1248 if (ring_is_xdp(tx_ring))
1249 return !!budget;
1250
1251 netdev_tx_completed_queue(txring_txq(tx_ring),
1252 total_packets, total_bytes);
1253
1254#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1255 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1256 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1257
1258
1259
1260 smp_mb();
1261 if (__netif_subqueue_stopped(tx_ring->netdev,
1262 tx_ring->queue_index)
1263 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1264 netif_wake_subqueue(tx_ring->netdev,
1265 tx_ring->queue_index);
1266 ++tx_ring->tx_stats.restart_queue;
1267 }
1268 }
1269
1270 return !!budget;
1271}
1272
1273#ifdef CONFIG_IXGBE_DCA
1274static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1275 struct ixgbe_ring *tx_ring,
1276 int cpu)
1277{
1278 struct ixgbe_hw *hw = &adapter->hw;
1279 u32 txctrl = 0;
1280 u16 reg_offset;
1281
1282 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1283 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1284
1285 switch (hw->mac.type) {
1286 case ixgbe_mac_82598EB:
1287 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1288 break;
1289 case ixgbe_mac_82599EB:
1290 case ixgbe_mac_X540:
1291 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1292 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1293 break;
1294 default:
1295
1296 return;
1297 }
1298
1299
1300
1301
1302
1303
1304 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1305 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1306 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1307
1308 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1309}
1310
1311static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1312 struct ixgbe_ring *rx_ring,
1313 int cpu)
1314{
1315 struct ixgbe_hw *hw = &adapter->hw;
1316 u32 rxctrl = 0;
1317 u8 reg_idx = rx_ring->reg_idx;
1318
1319 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1320 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1321
1322 switch (hw->mac.type) {
1323 case ixgbe_mac_82599EB:
1324 case ixgbe_mac_X540:
1325 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1326 break;
1327 default:
1328 break;
1329 }
1330
1331
1332
1333
1334
1335
1336 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1337 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1338 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1339
1340 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1341}
1342
1343static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1344{
1345 struct ixgbe_adapter *adapter = q_vector->adapter;
1346 struct ixgbe_ring *ring;
1347 int cpu = get_cpu();
1348
1349 if (q_vector->cpu == cpu)
1350 goto out_no_update;
1351
1352 ixgbe_for_each_ring(ring, q_vector->tx)
1353 ixgbe_update_tx_dca(adapter, ring, cpu);
1354
1355 ixgbe_for_each_ring(ring, q_vector->rx)
1356 ixgbe_update_rx_dca(adapter, ring, cpu);
1357
1358 q_vector->cpu = cpu;
1359out_no_update:
1360 put_cpu();
1361}
1362
1363static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1364{
1365 int i;
1366
1367
1368 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1370 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1371 else
1372 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1373 IXGBE_DCA_CTRL_DCA_DISABLE);
1374
1375 for (i = 0; i < adapter->num_q_vectors; i++) {
1376 adapter->q_vector[i]->cpu = -1;
1377 ixgbe_update_dca(adapter->q_vector[i]);
1378 }
1379}
1380
1381static int __ixgbe_notify_dca(struct device *dev, void *data)
1382{
1383 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1384 unsigned long event = *(unsigned long *)data;
1385
1386 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1387 return 0;
1388
1389 switch (event) {
1390 case DCA_PROVIDER_ADD:
1391
1392 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1393 break;
1394 if (dca_add_requester(dev) == 0) {
1395 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1396 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1397 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1398 break;
1399 }
1400
1401 case DCA_PROVIDER_REMOVE:
1402 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1403 dca_remove_requester(dev);
1404 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1405 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1406 IXGBE_DCA_CTRL_DCA_DISABLE);
1407 }
1408 break;
1409 }
1410
1411 return 0;
1412}
1413
1414#endif
1415
1416#define IXGBE_RSS_L4_TYPES_MASK \
1417 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1418 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1419 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1420 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1421
1422static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1423 union ixgbe_adv_rx_desc *rx_desc,
1424 struct sk_buff *skb)
1425{
1426 u16 rss_type;
1427
1428 if (!(ring->netdev->features & NETIF_F_RXHASH))
1429 return;
1430
1431 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1432 IXGBE_RXDADV_RSSTYPE_MASK;
1433
1434 if (!rss_type)
1435 return;
1436
1437 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1438 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1439 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1440}
1441
1442#ifdef IXGBE_FCOE
1443
1444
1445
1446
1447
1448
1449
1450static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1451 union ixgbe_adv_rx_desc *rx_desc)
1452{
1453 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1454
1455 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1456 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1457 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1458 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1459}
1460
1461#endif
1462
1463
1464
1465
1466
1467
1468static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1469 union ixgbe_adv_rx_desc *rx_desc,
1470 struct sk_buff *skb)
1471{
1472 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1473 bool encap_pkt = false;
1474
1475 skb_checksum_none_assert(skb);
1476
1477
1478 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1479 return;
1480
1481
1482 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1483 encap_pkt = true;
1484 skb->encapsulation = 1;
1485 }
1486
1487
1488 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1489 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1490 ring->rx_stats.csum_err++;
1491 return;
1492 }
1493
1494 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1495 return;
1496
1497 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1498
1499
1500
1501
1502 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1503 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1504 return;
1505
1506 ring->rx_stats.csum_err++;
1507 return;
1508 }
1509
1510
1511 skb->ip_summed = CHECKSUM_UNNECESSARY;
1512 if (encap_pkt) {
1513 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1514 return;
1515
1516 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1517 skb->ip_summed = CHECKSUM_NONE;
1518 return;
1519 }
1520
1521 skb->csum_level = 1;
1522 }
1523}
1524
1525static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1526{
1527 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1528}
1529
1530static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1531 struct ixgbe_rx_buffer *bi)
1532{
1533 struct page *page = bi->page;
1534 dma_addr_t dma;
1535
1536
1537 if (likely(page))
1538 return true;
1539
1540
1541 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1542 if (unlikely(!page)) {
1543 rx_ring->rx_stats.alloc_rx_page_failed++;
1544 return false;
1545 }
1546
1547
1548 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1549 ixgbe_rx_pg_size(rx_ring),
1550 DMA_FROM_DEVICE,
1551 IXGBE_RX_DMA_ATTR);
1552
1553
1554
1555
1556
1557 if (dma_mapping_error(rx_ring->dev, dma)) {
1558 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1559
1560 rx_ring->rx_stats.alloc_rx_page_failed++;
1561 return false;
1562 }
1563
1564 bi->dma = dma;
1565 bi->page = page;
1566 bi->page_offset = ixgbe_rx_offset(rx_ring);
1567 page_ref_add(page, USHRT_MAX - 1);
1568 bi->pagecnt_bias = USHRT_MAX;
1569 rx_ring->rx_stats.alloc_rx_page++;
1570
1571 return true;
1572}
1573
1574
1575
1576
1577
1578
1579void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1580{
1581 union ixgbe_adv_rx_desc *rx_desc;
1582 struct ixgbe_rx_buffer *bi;
1583 u16 i = rx_ring->next_to_use;
1584 u16 bufsz;
1585
1586
1587 if (!cleaned_count)
1588 return;
1589
1590 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1591 bi = &rx_ring->rx_buffer_info[i];
1592 i -= rx_ring->count;
1593
1594 bufsz = ixgbe_rx_bufsz(rx_ring);
1595
1596 do {
1597 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1598 break;
1599
1600
1601 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1602 bi->page_offset, bufsz,
1603 DMA_FROM_DEVICE);
1604
1605
1606
1607
1608
1609 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1610
1611 rx_desc++;
1612 bi++;
1613 i++;
1614 if (unlikely(!i)) {
1615 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1616 bi = rx_ring->rx_buffer_info;
1617 i -= rx_ring->count;
1618 }
1619
1620
1621 rx_desc->wb.upper.length = 0;
1622
1623 cleaned_count--;
1624 } while (cleaned_count);
1625
1626 i += rx_ring->count;
1627
1628 if (rx_ring->next_to_use != i) {
1629 rx_ring->next_to_use = i;
1630
1631
1632 rx_ring->next_to_alloc = i;
1633
1634
1635
1636
1637
1638
1639 wmb();
1640 writel(i, rx_ring->tail);
1641 }
1642}
1643
1644static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1645 struct sk_buff *skb)
1646{
1647 u16 hdr_len = skb_headlen(skb);
1648
1649
1650 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1651 IXGBE_CB(skb)->append_cnt);
1652 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1653}
1654
1655static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1656 struct sk_buff *skb)
1657{
1658
1659 if (!IXGBE_CB(skb)->append_cnt)
1660 return;
1661
1662 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1663 rx_ring->rx_stats.rsc_flush++;
1664
1665 ixgbe_set_rsc_gso_size(rx_ring, skb);
1666
1667
1668 IXGBE_CB(skb)->append_cnt = 0;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1682 union ixgbe_adv_rx_desc *rx_desc,
1683 struct sk_buff *skb)
1684{
1685 struct net_device *dev = rx_ring->netdev;
1686 u32 flags = rx_ring->q_vector->adapter->flags;
1687
1688 ixgbe_update_rsc_stats(rx_ring, skb);
1689
1690 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1691
1692 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1693
1694 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1695 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1696
1697 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1698 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1699 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1700 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1701 }
1702
1703 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1704 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1705
1706
1707 if (netif_is_ixgbe(dev))
1708 skb_record_rx_queue(skb, rx_ring->queue_index);
1709 else
1710 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1711 false);
1712
1713 skb->protocol = eth_type_trans(skb, dev);
1714}
1715
1716void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1717 struct sk_buff *skb)
1718{
1719 napi_gro_receive(&q_vector->napi, skb);
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1734 union ixgbe_adv_rx_desc *rx_desc,
1735 struct sk_buff *skb)
1736{
1737 u32 ntc = rx_ring->next_to_clean + 1;
1738
1739
1740 ntc = (ntc < rx_ring->count) ? ntc : 0;
1741 rx_ring->next_to_clean = ntc;
1742
1743 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1744
1745
1746 if (ring_is_rsc_enabled(rx_ring)) {
1747 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1748 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1749
1750 if (unlikely(rsc_enabled)) {
1751 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1752
1753 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1754 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1755
1756
1757 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1758 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1759 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1760 }
1761 }
1762
1763
1764 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1765 return false;
1766
1767
1768 rx_ring->rx_buffer_info[ntc].skb = skb;
1769 rx_ring->rx_stats.non_eop_descs++;
1770
1771 return true;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1787 struct sk_buff *skb)
1788{
1789 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1790 unsigned char *va;
1791 unsigned int pull_len;
1792
1793
1794
1795
1796
1797
1798 va = skb_frag_address(frag);
1799
1800
1801
1802
1803
1804 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1805
1806
1807 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1808
1809
1810 skb_frag_size_sub(frag, pull_len);
1811 frag->page_offset += pull_len;
1812 skb->data_len -= pull_len;
1813 skb->tail += pull_len;
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1827 struct sk_buff *skb)
1828{
1829
1830 if (unlikely(IXGBE_CB(skb)->page_released)) {
1831 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1832 ixgbe_rx_pg_size(rx_ring),
1833 DMA_FROM_DEVICE,
1834 IXGBE_RX_DMA_ATTR);
1835 } else if (ring_uses_build_skb(rx_ring)) {
1836 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1837
1838 dma_sync_single_range_for_cpu(rx_ring->dev,
1839 IXGBE_CB(skb)->dma,
1840 offset,
1841 skb_headlen(skb),
1842 DMA_FROM_DEVICE);
1843 } else {
1844 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1845
1846 dma_sync_single_range_for_cpu(rx_ring->dev,
1847 IXGBE_CB(skb)->dma,
1848 frag->page_offset,
1849 skb_frag_size(frag),
1850 DMA_FROM_DEVICE);
1851 }
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1877 union ixgbe_adv_rx_desc *rx_desc,
1878 struct sk_buff *skb)
1879{
1880 struct net_device *netdev = rx_ring->netdev;
1881
1882
1883 if (IS_ERR(skb))
1884 return true;
1885
1886
1887
1888
1889 if (!netdev ||
1890 (unlikely(ixgbe_test_staterr(rx_desc,
1891 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1892 !(netdev->features & NETIF_F_RXALL)))) {
1893 dev_kfree_skb_any(skb);
1894 return true;
1895 }
1896
1897
1898 if (!skb_headlen(skb))
1899 ixgbe_pull_tail(rx_ring, skb);
1900
1901#ifdef IXGBE_FCOE
1902
1903 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1904 return false;
1905
1906#endif
1907
1908 if (eth_skb_pad(skb))
1909 return true;
1910
1911 return false;
1912}
1913
1914
1915
1916
1917
1918
1919
1920
1921static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1922 struct ixgbe_rx_buffer *old_buff)
1923{
1924 struct ixgbe_rx_buffer *new_buff;
1925 u16 nta = rx_ring->next_to_alloc;
1926
1927 new_buff = &rx_ring->rx_buffer_info[nta];
1928
1929
1930 nta++;
1931 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1932
1933
1934
1935
1936
1937 new_buff->dma = old_buff->dma;
1938 new_buff->page = old_buff->page;
1939 new_buff->page_offset = old_buff->page_offset;
1940 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1941}
1942
1943static inline bool ixgbe_page_is_reserved(struct page *page)
1944{
1945 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1946}
1947
1948static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
1949{
1950 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1951 struct page *page = rx_buffer->page;
1952
1953
1954 if (unlikely(ixgbe_page_is_reserved(page)))
1955 return false;
1956
1957#if (PAGE_SIZE < 8192)
1958
1959 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1960 return false;
1961#else
1962
1963
1964
1965
1966
1967#define IXGBE_LAST_OFFSET \
1968 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1969 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1970 return false;
1971#endif
1972
1973
1974
1975
1976
1977 if (unlikely(pagecnt_bias == 1)) {
1978 page_ref_add(page, USHRT_MAX - 1);
1979 rx_buffer->pagecnt_bias = USHRT_MAX;
1980 }
1981
1982 return true;
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2001 struct ixgbe_rx_buffer *rx_buffer,
2002 struct sk_buff *skb,
2003 unsigned int size)
2004{
2005#if (PAGE_SIZE < 8192)
2006 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2007#else
2008 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2009 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2010 SKB_DATA_ALIGN(size);
2011#endif
2012 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2013 rx_buffer->page_offset, size, truesize);
2014#if (PAGE_SIZE < 8192)
2015 rx_buffer->page_offset ^= truesize;
2016#else
2017 rx_buffer->page_offset += truesize;
2018#endif
2019}
2020
2021static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2022 union ixgbe_adv_rx_desc *rx_desc,
2023 struct sk_buff **skb,
2024 const unsigned int size)
2025{
2026 struct ixgbe_rx_buffer *rx_buffer;
2027
2028 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2029 prefetchw(rx_buffer->page);
2030 *skb = rx_buffer->skb;
2031
2032
2033
2034
2035
2036 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2037 if (!*skb)
2038 goto skip_sync;
2039 } else {
2040 if (*skb)
2041 ixgbe_dma_sync_frag(rx_ring, *skb);
2042 }
2043
2044
2045 dma_sync_single_range_for_cpu(rx_ring->dev,
2046 rx_buffer->dma,
2047 rx_buffer->page_offset,
2048 size,
2049 DMA_FROM_DEVICE);
2050skip_sync:
2051 rx_buffer->pagecnt_bias--;
2052
2053 return rx_buffer;
2054}
2055
2056static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2057 struct ixgbe_rx_buffer *rx_buffer,
2058 struct sk_buff *skb)
2059{
2060 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2061
2062 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2063 } else {
2064 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2065
2066 IXGBE_CB(skb)->page_released = true;
2067 } else {
2068
2069 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2070 ixgbe_rx_pg_size(rx_ring),
2071 DMA_FROM_DEVICE,
2072 IXGBE_RX_DMA_ATTR);
2073 }
2074 __page_frag_cache_drain(rx_buffer->page,
2075 rx_buffer->pagecnt_bias);
2076 }
2077
2078
2079 rx_buffer->page = NULL;
2080 rx_buffer->skb = NULL;
2081}
2082
2083static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2084 struct ixgbe_rx_buffer *rx_buffer,
2085 struct xdp_buff *xdp,
2086 union ixgbe_adv_rx_desc *rx_desc)
2087{
2088 unsigned int size = xdp->data_end - xdp->data;
2089#if (PAGE_SIZE < 8192)
2090 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2091#else
2092 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2093 xdp->data_hard_start);
2094#endif
2095 struct sk_buff *skb;
2096
2097
2098 prefetch(xdp->data);
2099#if L1_CACHE_BYTES < 128
2100 prefetch(xdp->data + L1_CACHE_BYTES);
2101#endif
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2120 if (unlikely(!skb))
2121 return NULL;
2122
2123 if (size > IXGBE_RX_HDR_SIZE) {
2124 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2125 IXGBE_CB(skb)->dma = rx_buffer->dma;
2126
2127 skb_add_rx_frag(skb, 0, rx_buffer->page,
2128 xdp->data - page_address(rx_buffer->page),
2129 size, truesize);
2130#if (PAGE_SIZE < 8192)
2131 rx_buffer->page_offset ^= truesize;
2132#else
2133 rx_buffer->page_offset += truesize;
2134#endif
2135 } else {
2136 memcpy(__skb_put(skb, size),
2137 xdp->data, ALIGN(size, sizeof(long)));
2138 rx_buffer->pagecnt_bias++;
2139 }
2140
2141 return skb;
2142}
2143
2144static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2145 struct ixgbe_rx_buffer *rx_buffer,
2146 struct xdp_buff *xdp,
2147 union ixgbe_adv_rx_desc *rx_desc)
2148{
2149 unsigned int metasize = xdp->data - xdp->data_meta;
2150#if (PAGE_SIZE < 8192)
2151 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2152#else
2153 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2154 SKB_DATA_ALIGN(xdp->data_end -
2155 xdp->data_hard_start);
2156#endif
2157 struct sk_buff *skb;
2158
2159
2160
2161
2162
2163
2164 prefetch(xdp->data_meta);
2165#if L1_CACHE_BYTES < 128
2166 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2167#endif
2168
2169
2170 skb = build_skb(xdp->data_hard_start, truesize);
2171 if (unlikely(!skb))
2172 return NULL;
2173
2174
2175 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2176 __skb_put(skb, xdp->data_end - xdp->data);
2177 if (metasize)
2178 skb_metadata_set(skb, metasize);
2179
2180
2181 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2182 IXGBE_CB(skb)->dma = rx_buffer->dma;
2183
2184
2185#if (PAGE_SIZE < 8192)
2186 rx_buffer->page_offset ^= truesize;
2187#else
2188 rx_buffer->page_offset += truesize;
2189#endif
2190
2191 return skb;
2192}
2193
2194static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2195 struct ixgbe_ring *rx_ring,
2196 struct xdp_buff *xdp)
2197{
2198 int err, result = IXGBE_XDP_PASS;
2199 struct bpf_prog *xdp_prog;
2200 struct xdp_frame *xdpf;
2201 u32 act;
2202
2203 rcu_read_lock();
2204 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2205
2206 if (!xdp_prog)
2207 goto xdp_out;
2208
2209 prefetchw(xdp->data_hard_start);
2210
2211 act = bpf_prog_run_xdp(xdp_prog, xdp);
2212 switch (act) {
2213 case XDP_PASS:
2214 break;
2215 case XDP_TX:
2216 xdpf = convert_to_xdp_frame(xdp);
2217 if (unlikely(!xdpf)) {
2218 result = IXGBE_XDP_CONSUMED;
2219 break;
2220 }
2221 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2222 break;
2223 case XDP_REDIRECT:
2224 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2225 if (!err)
2226 result = IXGBE_XDP_REDIR;
2227 else
2228 result = IXGBE_XDP_CONSUMED;
2229 break;
2230 default:
2231 bpf_warn_invalid_xdp_action(act);
2232
2233 case XDP_ABORTED:
2234 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2235
2236 case XDP_DROP:
2237 result = IXGBE_XDP_CONSUMED;
2238 break;
2239 }
2240xdp_out:
2241 rcu_read_unlock();
2242 return ERR_PTR(-result);
2243}
2244
2245static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2246 struct ixgbe_rx_buffer *rx_buffer,
2247 unsigned int size)
2248{
2249#if (PAGE_SIZE < 8192)
2250 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2251
2252 rx_buffer->page_offset ^= truesize;
2253#else
2254 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2255 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2256 SKB_DATA_ALIGN(size);
2257
2258 rx_buffer->page_offset += truesize;
2259#endif
2260}
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2276 struct ixgbe_ring *rx_ring,
2277 const int budget)
2278{
2279 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2280 struct ixgbe_adapter *adapter = q_vector->adapter;
2281#ifdef IXGBE_FCOE
2282 int ddp_bytes;
2283 unsigned int mss = 0;
2284#endif
2285 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2286 unsigned int xdp_xmit = 0;
2287 struct xdp_buff xdp;
2288
2289 xdp.rxq = &rx_ring->xdp_rxq;
2290
2291 while (likely(total_rx_packets < budget)) {
2292 union ixgbe_adv_rx_desc *rx_desc;
2293 struct ixgbe_rx_buffer *rx_buffer;
2294 struct sk_buff *skb;
2295 unsigned int size;
2296
2297
2298 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2299 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2300 cleaned_count = 0;
2301 }
2302
2303 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2304 size = le16_to_cpu(rx_desc->wb.upper.length);
2305 if (!size)
2306 break;
2307
2308
2309
2310
2311
2312 dma_rmb();
2313
2314 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2315
2316
2317 if (!skb) {
2318 xdp.data = page_address(rx_buffer->page) +
2319 rx_buffer->page_offset;
2320 xdp.data_meta = xdp.data;
2321 xdp.data_hard_start = xdp.data -
2322 ixgbe_rx_offset(rx_ring);
2323 xdp.data_end = xdp.data + size;
2324
2325 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2326 }
2327
2328 if (IS_ERR(skb)) {
2329 unsigned int xdp_res = -PTR_ERR(skb);
2330
2331 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2332 xdp_xmit |= xdp_res;
2333 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2334 } else {
2335 rx_buffer->pagecnt_bias++;
2336 }
2337 total_rx_packets++;
2338 total_rx_bytes += size;
2339 } else if (skb) {
2340 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2341 } else if (ring_uses_build_skb(rx_ring)) {
2342 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2343 &xdp, rx_desc);
2344 } else {
2345 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2346 &xdp, rx_desc);
2347 }
2348
2349
2350 if (!skb) {
2351 rx_ring->rx_stats.alloc_rx_buff_failed++;
2352 rx_buffer->pagecnt_bias++;
2353 break;
2354 }
2355
2356 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2357 cleaned_count++;
2358
2359
2360 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2361 continue;
2362
2363
2364 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2365 continue;
2366
2367
2368 total_rx_bytes += skb->len;
2369
2370
2371 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2372
2373#ifdef IXGBE_FCOE
2374
2375 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2376 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2377
2378 if (ddp_bytes > 0) {
2379 if (!mss) {
2380 mss = rx_ring->netdev->mtu -
2381 sizeof(struct fcoe_hdr) -
2382 sizeof(struct fc_frame_header) -
2383 sizeof(struct fcoe_crc_eof);
2384 if (mss > 512)
2385 mss &= ~511;
2386 }
2387 total_rx_bytes += ddp_bytes;
2388 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2389 mss);
2390 }
2391 if (!ddp_bytes) {
2392 dev_kfree_skb_any(skb);
2393 continue;
2394 }
2395 }
2396
2397#endif
2398 ixgbe_rx_skb(q_vector, skb);
2399
2400
2401 total_rx_packets++;
2402 }
2403
2404 if (xdp_xmit & IXGBE_XDP_REDIR)
2405 xdp_do_flush_map();
2406
2407 if (xdp_xmit & IXGBE_XDP_TX) {
2408 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2409
2410
2411
2412
2413 wmb();
2414 writel(ring->next_to_use, ring->tail);
2415 }
2416
2417 u64_stats_update_begin(&rx_ring->syncp);
2418 rx_ring->stats.packets += total_rx_packets;
2419 rx_ring->stats.bytes += total_rx_bytes;
2420 u64_stats_update_end(&rx_ring->syncp);
2421 q_vector->rx.total_packets += total_rx_packets;
2422 q_vector->rx.total_bytes += total_rx_bytes;
2423
2424 return total_rx_packets;
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2435{
2436 struct ixgbe_q_vector *q_vector;
2437 int v_idx;
2438 u32 mask;
2439
2440
2441 if (adapter->num_vfs > 32) {
2442 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2443 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2444 }
2445
2446
2447
2448
2449
2450 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2451 struct ixgbe_ring *ring;
2452 q_vector = adapter->q_vector[v_idx];
2453
2454 ixgbe_for_each_ring(ring, q_vector->rx)
2455 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2456
2457 ixgbe_for_each_ring(ring, q_vector->tx)
2458 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2459
2460 ixgbe_write_eitr(q_vector);
2461 }
2462
2463 switch (adapter->hw.mac.type) {
2464 case ixgbe_mac_82598EB:
2465 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2466 v_idx);
2467 break;
2468 case ixgbe_mac_82599EB:
2469 case ixgbe_mac_X540:
2470 case ixgbe_mac_X550:
2471 case ixgbe_mac_X550EM_x:
2472 case ixgbe_mac_x550em_a:
2473 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2474 break;
2475 default:
2476 break;
2477 }
2478 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2479
2480
2481 mask = IXGBE_EIMS_ENABLE_MASK;
2482 mask &= ~(IXGBE_EIMS_OTHER |
2483 IXGBE_EIMS_MAILBOX |
2484 IXGBE_EIMS_LSC);
2485
2486 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2503 struct ixgbe_ring_container *ring_container)
2504{
2505 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2506 IXGBE_ITR_ADAPTIVE_LATENCY;
2507 unsigned int avg_wire_size, packets, bytes;
2508 unsigned long next_update = jiffies;
2509
2510
2511
2512
2513 if (!ring_container->ring)
2514 return;
2515
2516
2517
2518
2519
2520
2521 if (time_after(next_update, ring_container->next_update))
2522 goto clear_counts;
2523
2524 packets = ring_container->total_packets;
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534 if (!packets) {
2535 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2536 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2537 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2538 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2539 goto clear_counts;
2540 }
2541
2542 bytes = ring_container->total_bytes;
2543
2544
2545
2546
2547
2548 if (packets < 4 && bytes < 9000) {
2549 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2550 goto adjust_by_size;
2551 }
2552
2553
2554
2555
2556
2557 if (packets < 48) {
2558 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2559 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2560 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2561 goto clear_counts;
2562 }
2563
2564
2565
2566
2567 if (packets < 96) {
2568 itr = q_vector->itr >> 2;
2569 goto clear_counts;
2570 }
2571
2572
2573
2574
2575
2576 if (packets < 256) {
2577 itr = q_vector->itr >> 3;
2578 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2579 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2580 goto clear_counts;
2581 }
2582
2583
2584
2585
2586
2587
2588
2589 itr = IXGBE_ITR_ADAPTIVE_BULK;
2590
2591adjust_by_size:
2592
2593
2594
2595
2596
2597 avg_wire_size = bytes / packets;
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614 if (avg_wire_size <= 60) {
2615
2616 avg_wire_size = 5120;
2617 } else if (avg_wire_size <= 316) {
2618
2619 avg_wire_size *= 40;
2620 avg_wire_size += 2720;
2621 } else if (avg_wire_size <= 1084) {
2622
2623 avg_wire_size *= 15;
2624 avg_wire_size += 11452;
2625 } else if (avg_wire_size < 1968) {
2626
2627 avg_wire_size *= 5;
2628 avg_wire_size += 22420;
2629 } else {
2630
2631 avg_wire_size = 32256;
2632 }
2633
2634
2635
2636
2637 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2638 avg_wire_size >>= 1;
2639
2640
2641
2642
2643
2644
2645
2646
2647 switch (q_vector->adapter->link_speed) {
2648 case IXGBE_LINK_SPEED_10GB_FULL:
2649 case IXGBE_LINK_SPEED_100_FULL:
2650 default:
2651 itr += DIV_ROUND_UP(avg_wire_size,
2652 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2653 IXGBE_ITR_ADAPTIVE_MIN_INC;
2654 break;
2655 case IXGBE_LINK_SPEED_2_5GB_FULL:
2656 case IXGBE_LINK_SPEED_1GB_FULL:
2657 case IXGBE_LINK_SPEED_10_FULL:
2658 if (avg_wire_size > 8064)
2659 avg_wire_size = 8064;
2660 itr += DIV_ROUND_UP(avg_wire_size,
2661 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2662 IXGBE_ITR_ADAPTIVE_MIN_INC;
2663 break;
2664 }
2665
2666clear_counts:
2667
2668 ring_container->itr = itr;
2669
2670
2671 ring_container->next_update = next_update + 1;
2672
2673 ring_container->total_bytes = 0;
2674 ring_container->total_packets = 0;
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2686{
2687 struct ixgbe_adapter *adapter = q_vector->adapter;
2688 struct ixgbe_hw *hw = &adapter->hw;
2689 int v_idx = q_vector->v_idx;
2690 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2691
2692 switch (adapter->hw.mac.type) {
2693 case ixgbe_mac_82598EB:
2694
2695 itr_reg |= (itr_reg << 16);
2696 break;
2697 case ixgbe_mac_82599EB:
2698 case ixgbe_mac_X540:
2699 case ixgbe_mac_X550:
2700 case ixgbe_mac_X550EM_x:
2701 case ixgbe_mac_x550em_a:
2702
2703
2704
2705
2706 itr_reg |= IXGBE_EITR_CNT_WDIS;
2707 break;
2708 default:
2709 break;
2710 }
2711 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2712}
2713
2714static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2715{
2716 u32 new_itr;
2717
2718 ixgbe_update_itr(q_vector, &q_vector->tx);
2719 ixgbe_update_itr(q_vector, &q_vector->rx);
2720
2721
2722 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2723
2724
2725 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2726 new_itr <<= 2;
2727
2728 if (new_itr != q_vector->itr) {
2729
2730 q_vector->itr = new_itr;
2731
2732 ixgbe_write_eitr(q_vector);
2733 }
2734}
2735
2736
2737
2738
2739
2740static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2741{
2742 struct ixgbe_hw *hw = &adapter->hw;
2743 u32 eicr = adapter->interrupt_event;
2744 s32 rc;
2745
2746 if (test_bit(__IXGBE_DOWN, &adapter->state))
2747 return;
2748
2749 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2750 return;
2751
2752 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2753
2754 switch (hw->device_id) {
2755 case IXGBE_DEV_ID_82599_T3_LOM:
2756
2757
2758
2759
2760
2761
2762
2763 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2764 !(eicr & IXGBE_EICR_LSC))
2765 return;
2766
2767 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2768 u32 speed;
2769 bool link_up = false;
2770
2771 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2772
2773 if (link_up)
2774 return;
2775 }
2776
2777
2778 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2779 return;
2780
2781 break;
2782 case IXGBE_DEV_ID_X550EM_A_1G_T:
2783 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2784 rc = hw->phy.ops.check_overtemp(hw);
2785 if (rc != IXGBE_ERR_OVERTEMP)
2786 return;
2787 break;
2788 default:
2789 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2790 return;
2791 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2792 return;
2793 break;
2794 }
2795 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2796
2797 adapter->interrupt_event = 0;
2798}
2799
2800static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2801{
2802 struct ixgbe_hw *hw = &adapter->hw;
2803
2804 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2805 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2806 e_crit(probe, "Fan has stopped, replace the adapter\n");
2807
2808 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2809 }
2810}
2811
2812static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2813{
2814 struct ixgbe_hw *hw = &adapter->hw;
2815
2816 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2817 return;
2818
2819 switch (adapter->hw.mac.type) {
2820 case ixgbe_mac_82599EB:
2821
2822
2823
2824
2825 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2826 (eicr & IXGBE_EICR_LSC)) &&
2827 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2828 adapter->interrupt_event = eicr;
2829 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2830 ixgbe_service_event_schedule(adapter);
2831 return;
2832 }
2833 return;
2834 case ixgbe_mac_x550em_a:
2835 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2836 adapter->interrupt_event = eicr;
2837 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2838 ixgbe_service_event_schedule(adapter);
2839 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2840 IXGBE_EICR_GPI_SDP0_X550EM_a);
2841 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2842 IXGBE_EICR_GPI_SDP0_X550EM_a);
2843 }
2844 return;
2845 case ixgbe_mac_X550:
2846 case ixgbe_mac_X540:
2847 if (!(eicr & IXGBE_EICR_TS))
2848 return;
2849 break;
2850 default:
2851 return;
2852 }
2853
2854 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2855}
2856
2857static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2858{
2859 switch (hw->mac.type) {
2860 case ixgbe_mac_82598EB:
2861 if (hw->phy.type == ixgbe_phy_nl)
2862 return true;
2863 return false;
2864 case ixgbe_mac_82599EB:
2865 case ixgbe_mac_X550EM_x:
2866 case ixgbe_mac_x550em_a:
2867 switch (hw->mac.ops.get_media_type(hw)) {
2868 case ixgbe_media_type_fiber:
2869 case ixgbe_media_type_fiber_qsfp:
2870 return true;
2871 default:
2872 return false;
2873 }
2874 default:
2875 return false;
2876 }
2877}
2878
2879static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2880{
2881 struct ixgbe_hw *hw = &adapter->hw;
2882 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2883
2884 if (!ixgbe_is_sfp(hw))
2885 return;
2886
2887
2888 if (hw->mac.type >= ixgbe_mac_X540)
2889 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2890
2891 if (eicr & eicr_mask) {
2892
2893 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2894 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2895 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2896 adapter->sfp_poll_time = 0;
2897 ixgbe_service_event_schedule(adapter);
2898 }
2899 }
2900
2901 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2902 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2903
2904 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2905 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2906 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2907 ixgbe_service_event_schedule(adapter);
2908 }
2909 }
2910}
2911
2912static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2913{
2914 struct ixgbe_hw *hw = &adapter->hw;
2915
2916 adapter->lsc_int++;
2917 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2918 adapter->link_check_timeout = jiffies;
2919 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2920 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2921 IXGBE_WRITE_FLUSH(hw);
2922 ixgbe_service_event_schedule(adapter);
2923 }
2924}
2925
2926static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2927 u64 qmask)
2928{
2929 u32 mask;
2930 struct ixgbe_hw *hw = &adapter->hw;
2931
2932 switch (hw->mac.type) {
2933 case ixgbe_mac_82598EB:
2934 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2935 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2936 break;
2937 case ixgbe_mac_82599EB:
2938 case ixgbe_mac_X540:
2939 case ixgbe_mac_X550:
2940 case ixgbe_mac_X550EM_x:
2941 case ixgbe_mac_x550em_a:
2942 mask = (qmask & 0xFFFFFFFF);
2943 if (mask)
2944 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2945 mask = (qmask >> 32);
2946 if (mask)
2947 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2948 break;
2949 default:
2950 break;
2951 }
2952
2953}
2954
2955static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2956 u64 qmask)
2957{
2958 u32 mask;
2959 struct ixgbe_hw *hw = &adapter->hw;
2960
2961 switch (hw->mac.type) {
2962 case ixgbe_mac_82598EB:
2963 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2964 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2965 break;
2966 case ixgbe_mac_82599EB:
2967 case ixgbe_mac_X540:
2968 case ixgbe_mac_X550:
2969 case ixgbe_mac_X550EM_x:
2970 case ixgbe_mac_x550em_a:
2971 mask = (qmask & 0xFFFFFFFF);
2972 if (mask)
2973 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2974 mask = (qmask >> 32);
2975 if (mask)
2976 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2977 break;
2978 default:
2979 break;
2980 }
2981
2982}
2983
2984
2985
2986
2987
2988
2989
2990static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2991 bool flush)
2992{
2993 struct ixgbe_hw *hw = &adapter->hw;
2994 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2995
2996
2997 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2998 mask &= ~IXGBE_EIMS_LSC;
2999
3000 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3001 switch (adapter->hw.mac.type) {
3002 case ixgbe_mac_82599EB:
3003 mask |= IXGBE_EIMS_GPI_SDP0(hw);
3004 break;
3005 case ixgbe_mac_X540:
3006 case ixgbe_mac_X550:
3007 case ixgbe_mac_X550EM_x:
3008 case ixgbe_mac_x550em_a:
3009 mask |= IXGBE_EIMS_TS;
3010 break;
3011 default:
3012 break;
3013 }
3014 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3015 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3016 switch (adapter->hw.mac.type) {
3017 case ixgbe_mac_82599EB:
3018 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3019 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3020
3021 case ixgbe_mac_X540:
3022 case ixgbe_mac_X550:
3023 case ixgbe_mac_X550EM_x:
3024 case ixgbe_mac_x550em_a:
3025 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3026 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3027 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3028 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3029 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3030 mask |= IXGBE_EICR_GPI_SDP0_X540;
3031 mask |= IXGBE_EIMS_ECC;
3032 mask |= IXGBE_EIMS_MAILBOX;
3033 break;
3034 default:
3035 break;
3036 }
3037
3038 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3039 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3040 mask |= IXGBE_EIMS_FLOW_DIR;
3041
3042 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3043 if (queues)
3044 ixgbe_irq_enable_queues(adapter, ~0);
3045 if (flush)
3046 IXGBE_WRITE_FLUSH(&adapter->hw);
3047}
3048
3049static irqreturn_t ixgbe_msix_other(int irq, void *data)
3050{
3051 struct ixgbe_adapter *adapter = data;
3052 struct ixgbe_hw *hw = &adapter->hw;
3053 u32 eicr;
3054
3055
3056
3057
3058
3059
3060
3061 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3062
3063
3064
3065
3066
3067
3068
3069
3070 eicr &= 0xFFFF0000;
3071
3072 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3073
3074 if (eicr & IXGBE_EICR_LSC)
3075 ixgbe_check_lsc(adapter);
3076
3077 if (eicr & IXGBE_EICR_MAILBOX)
3078 ixgbe_msg_task(adapter);
3079
3080 switch (hw->mac.type) {
3081 case ixgbe_mac_82599EB:
3082 case ixgbe_mac_X540:
3083 case ixgbe_mac_X550:
3084 case ixgbe_mac_X550EM_x:
3085 case ixgbe_mac_x550em_a:
3086 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3087 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3088 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3089 ixgbe_service_event_schedule(adapter);
3090 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3091 IXGBE_EICR_GPI_SDP0_X540);
3092 }
3093 if (eicr & IXGBE_EICR_ECC) {
3094 e_info(link, "Received ECC Err, initiating reset\n");
3095 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3096 ixgbe_service_event_schedule(adapter);
3097 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3098 }
3099
3100 if (eicr & IXGBE_EICR_FLOW_DIR) {
3101 int reinit_count = 0;
3102 int i;
3103 for (i = 0; i < adapter->num_tx_queues; i++) {
3104 struct ixgbe_ring *ring = adapter->tx_ring[i];
3105 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3106 &ring->state))
3107 reinit_count++;
3108 }
3109 if (reinit_count) {
3110
3111 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3112 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3113 ixgbe_service_event_schedule(adapter);
3114 }
3115 }
3116 ixgbe_check_sfp_event(adapter, eicr);
3117 ixgbe_check_overtemp_event(adapter, eicr);
3118 break;
3119 default:
3120 break;
3121 }
3122
3123 ixgbe_check_fan_failure(adapter, eicr);
3124
3125 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3126 ixgbe_ptp_check_pps_event(adapter);
3127
3128
3129 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3130 ixgbe_irq_enable(adapter, false, false);
3131
3132 return IRQ_HANDLED;
3133}
3134
3135static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3136{
3137 struct ixgbe_q_vector *q_vector = data;
3138
3139
3140
3141 if (q_vector->rx.ring || q_vector->tx.ring)
3142 napi_schedule_irqoff(&q_vector->napi);
3143
3144 return IRQ_HANDLED;
3145}
3146
3147
3148
3149
3150
3151
3152
3153
3154int ixgbe_poll(struct napi_struct *napi, int budget)
3155{
3156 struct ixgbe_q_vector *q_vector =
3157 container_of(napi, struct ixgbe_q_vector, napi);
3158 struct ixgbe_adapter *adapter = q_vector->adapter;
3159 struct ixgbe_ring *ring;
3160 int per_ring_budget, work_done = 0;
3161 bool clean_complete = true;
3162
3163#ifdef CONFIG_IXGBE_DCA
3164 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3165 ixgbe_update_dca(q_vector);
3166#endif
3167
3168 ixgbe_for_each_ring(ring, q_vector->tx) {
3169 bool wd = ring->xsk_umem ?
3170 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3171 ixgbe_clean_tx_irq(q_vector, ring, budget);
3172
3173 if (!wd)
3174 clean_complete = false;
3175 }
3176
3177
3178 if (budget <= 0)
3179 return budget;
3180
3181
3182
3183 if (q_vector->rx.count > 1)
3184 per_ring_budget = max(budget/q_vector->rx.count, 1);
3185 else
3186 per_ring_budget = budget;
3187
3188 ixgbe_for_each_ring(ring, q_vector->rx) {
3189 int cleaned = ring->xsk_umem ?
3190 ixgbe_clean_rx_irq_zc(q_vector, ring,
3191 per_ring_budget) :
3192 ixgbe_clean_rx_irq(q_vector, ring,
3193 per_ring_budget);
3194
3195 work_done += cleaned;
3196 if (cleaned >= per_ring_budget)
3197 clean_complete = false;
3198 }
3199
3200
3201 if (!clean_complete)
3202 return budget;
3203
3204
3205 if (likely(napi_complete_done(napi, work_done))) {
3206 if (adapter->rx_itr_setting & 1)
3207 ixgbe_set_itr(q_vector);
3208 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3209 ixgbe_irq_enable_queues(adapter,
3210 BIT_ULL(q_vector->v_idx));
3211 }
3212
3213 return min(work_done, budget - 1);
3214}
3215
3216
3217
3218
3219
3220
3221
3222
3223static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3224{
3225 struct net_device *netdev = adapter->netdev;
3226 unsigned int ri = 0, ti = 0;
3227 int vector, err;
3228
3229 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3230 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3231 struct msix_entry *entry = &adapter->msix_entries[vector];
3232
3233 if (q_vector->tx.ring && q_vector->rx.ring) {
3234 snprintf(q_vector->name, sizeof(q_vector->name),
3235 "%s-TxRx-%u", netdev->name, ri++);
3236 ti++;
3237 } else if (q_vector->rx.ring) {
3238 snprintf(q_vector->name, sizeof(q_vector->name),
3239 "%s-rx-%u", netdev->name, ri++);
3240 } else if (q_vector->tx.ring) {
3241 snprintf(q_vector->name, sizeof(q_vector->name),
3242 "%s-tx-%u", netdev->name, ti++);
3243 } else {
3244
3245 continue;
3246 }
3247 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3248 q_vector->name, q_vector);
3249 if (err) {
3250 e_err(probe, "request_irq failed for MSIX interrupt "
3251 "Error: %d\n", err);
3252 goto free_queue_irqs;
3253 }
3254
3255 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3256
3257 irq_set_affinity_hint(entry->vector,
3258 &q_vector->affinity_mask);
3259 }
3260 }
3261
3262 err = request_irq(adapter->msix_entries[vector].vector,
3263 ixgbe_msix_other, 0, netdev->name, adapter);
3264 if (err) {
3265 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3266 goto free_queue_irqs;
3267 }
3268
3269 return 0;
3270
3271free_queue_irqs:
3272 while (vector) {
3273 vector--;
3274 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3275 NULL);
3276 free_irq(adapter->msix_entries[vector].vector,
3277 adapter->q_vector[vector]);
3278 }
3279 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3280 pci_disable_msix(adapter->pdev);
3281 kfree(adapter->msix_entries);
3282 adapter->msix_entries = NULL;
3283 return err;
3284}
3285
3286
3287
3288
3289
3290
3291static irqreturn_t ixgbe_intr(int irq, void *data)
3292{
3293 struct ixgbe_adapter *adapter = data;
3294 struct ixgbe_hw *hw = &adapter->hw;
3295 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3296 u32 eicr;
3297
3298
3299
3300
3301
3302 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3303
3304
3305
3306 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3307 if (!eicr) {
3308
3309
3310
3311
3312
3313
3314
3315 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3316 ixgbe_irq_enable(adapter, true, true);
3317 return IRQ_NONE;
3318 }
3319
3320 if (eicr & IXGBE_EICR_LSC)
3321 ixgbe_check_lsc(adapter);
3322
3323 switch (hw->mac.type) {
3324 case ixgbe_mac_82599EB:
3325 ixgbe_check_sfp_event(adapter, eicr);
3326
3327 case ixgbe_mac_X540:
3328 case ixgbe_mac_X550:
3329 case ixgbe_mac_X550EM_x:
3330 case ixgbe_mac_x550em_a:
3331 if (eicr & IXGBE_EICR_ECC) {
3332 e_info(link, "Received ECC Err, initiating reset\n");
3333 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3334 ixgbe_service_event_schedule(adapter);
3335 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3336 }
3337 ixgbe_check_overtemp_event(adapter, eicr);
3338 break;
3339 default:
3340 break;
3341 }
3342
3343 ixgbe_check_fan_failure(adapter, eicr);
3344 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3345 ixgbe_ptp_check_pps_event(adapter);
3346
3347
3348 napi_schedule_irqoff(&q_vector->napi);
3349
3350
3351
3352
3353
3354 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3355 ixgbe_irq_enable(adapter, false, false);
3356
3357 return IRQ_HANDLED;
3358}
3359
3360
3361
3362
3363
3364
3365
3366
3367static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3368{
3369 struct net_device *netdev = adapter->netdev;
3370 int err;
3371
3372 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3373 err = ixgbe_request_msix_irqs(adapter);
3374 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3375 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3376 netdev->name, adapter);
3377 else
3378 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3379 netdev->name, adapter);
3380
3381 if (err)
3382 e_err(probe, "request_irq failed, Error %d\n", err);
3383
3384 return err;
3385}
3386
3387static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3388{
3389 int vector;
3390
3391 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3392 free_irq(adapter->pdev->irq, adapter);
3393 return;
3394 }
3395
3396 if (!adapter->msix_entries)
3397 return;
3398
3399 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3400 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3401 struct msix_entry *entry = &adapter->msix_entries[vector];
3402
3403
3404 if (!q_vector->rx.ring && !q_vector->tx.ring)
3405 continue;
3406
3407
3408 irq_set_affinity_hint(entry->vector, NULL);
3409
3410 free_irq(entry->vector, q_vector);
3411 }
3412
3413 free_irq(adapter->msix_entries[vector].vector, adapter);
3414}
3415
3416
3417
3418
3419
3420static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3421{
3422 switch (adapter->hw.mac.type) {
3423 case ixgbe_mac_82598EB:
3424 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3425 break;
3426 case ixgbe_mac_82599EB:
3427 case ixgbe_mac_X540:
3428 case ixgbe_mac_X550:
3429 case ixgbe_mac_X550EM_x:
3430 case ixgbe_mac_x550em_a:
3431 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3432 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3433 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3434 break;
3435 default:
3436 break;
3437 }
3438 IXGBE_WRITE_FLUSH(&adapter->hw);
3439 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3440 int vector;
3441
3442 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3443 synchronize_irq(adapter->msix_entries[vector].vector);
3444
3445 synchronize_irq(adapter->msix_entries[vector++].vector);
3446 } else {
3447 synchronize_irq(adapter->pdev->irq);
3448 }
3449}
3450
3451
3452
3453
3454
3455
3456static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3457{
3458 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3459
3460 ixgbe_write_eitr(q_vector);
3461
3462 ixgbe_set_ivar(adapter, 0, 0, 0);
3463 ixgbe_set_ivar(adapter, 1, 0, 0);
3464
3465 e_info(hw, "Legacy interrupt IVAR setup done\n");
3466}
3467
3468
3469
3470
3471
3472
3473
3474
3475void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3476 struct ixgbe_ring *ring)
3477{
3478 struct ixgbe_hw *hw = &adapter->hw;
3479 u64 tdba = ring->dma;
3480 int wait_loop = 10;
3481 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3482 u8 reg_idx = ring->reg_idx;
3483
3484 ring->xsk_umem = NULL;
3485 if (ring_is_xdp(ring))
3486 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
3487
3488
3489 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3490 IXGBE_WRITE_FLUSH(hw);
3491
3492 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3493 (tdba & DMA_BIT_MASK(32)));
3494 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3495 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3496 ring->count * sizeof(union ixgbe_adv_tx_desc));
3497 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3498 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3499 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3512 txdctl |= 1u << 16;
3513 else
3514 txdctl |= 8u << 16;
3515
3516
3517
3518
3519
3520 txdctl |= (1u << 8) |
3521 32;
3522
3523
3524 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3525 ring->atr_sample_rate = adapter->atr_sample_rate;
3526 ring->atr_count = 0;
3527 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3528 } else {
3529 ring->atr_sample_rate = 0;
3530 }
3531
3532
3533 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3534 struct ixgbe_q_vector *q_vector = ring->q_vector;
3535
3536 if (q_vector)
3537 netif_set_xps_queue(ring->netdev,
3538 &q_vector->affinity_mask,
3539 ring->queue_index);
3540 }
3541
3542 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3543
3544
3545 memset(ring->tx_buffer_info, 0,
3546 sizeof(struct ixgbe_tx_buffer) * ring->count);
3547
3548
3549 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3550
3551
3552 if (hw->mac.type == ixgbe_mac_82598EB &&
3553 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3554 return;
3555
3556
3557 do {
3558 usleep_range(1000, 2000);
3559 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3560 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3561 if (!wait_loop)
3562 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3563}
3564
3565static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3566{
3567 struct ixgbe_hw *hw = &adapter->hw;
3568 u32 rttdcs, mtqc;
3569 u8 tcs = adapter->hw_tcs;
3570
3571 if (hw->mac.type == ixgbe_mac_82598EB)
3572 return;
3573
3574
3575 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3576 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3577 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3578
3579
3580 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3581 mtqc = IXGBE_MTQC_VT_ENA;
3582 if (tcs > 4)
3583 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3584 else if (tcs > 1)
3585 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3586 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3587 IXGBE_82599_VMDQ_4Q_MASK)
3588 mtqc |= IXGBE_MTQC_32VF;
3589 else
3590 mtqc |= IXGBE_MTQC_64VF;
3591 } else {
3592 if (tcs > 4) {
3593 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3594 } else if (tcs > 1) {
3595 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3596 } else {
3597 u8 max_txq = adapter->num_tx_queues +
3598 adapter->num_xdp_queues;
3599 if (max_txq > 63)
3600 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3601 else
3602 mtqc = IXGBE_MTQC_64Q_1PB;
3603 }
3604 }
3605
3606 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3607
3608
3609 if (tcs) {
3610 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3611 sectx |= IXGBE_SECTX_DCB;
3612 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3613 }
3614
3615
3616 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3617 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3618}
3619
3620
3621
3622
3623
3624
3625
3626static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3627{
3628 struct ixgbe_hw *hw = &adapter->hw;
3629 u32 dmatxctl;
3630 u32 i;
3631
3632 ixgbe_setup_mtqc(adapter);
3633
3634 if (hw->mac.type != ixgbe_mac_82598EB) {
3635
3636 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3637 dmatxctl |= IXGBE_DMATXCTL_TE;
3638 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3639 }
3640
3641
3642 for (i = 0; i < adapter->num_tx_queues; i++)
3643 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3644 for (i = 0; i < adapter->num_xdp_queues; i++)
3645 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3646}
3647
3648static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3649 struct ixgbe_ring *ring)
3650{
3651 struct ixgbe_hw *hw = &adapter->hw;
3652 u8 reg_idx = ring->reg_idx;
3653 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3654
3655 srrctl |= IXGBE_SRRCTL_DROP_EN;
3656
3657 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3658}
3659
3660static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3661 struct ixgbe_ring *ring)
3662{
3663 struct ixgbe_hw *hw = &adapter->hw;
3664 u8 reg_idx = ring->reg_idx;
3665 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3666
3667 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3668
3669 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3670}
3671
3672#ifdef CONFIG_IXGBE_DCB
3673void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3674#else
3675static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3676#endif
3677{
3678 int i;
3679 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3680
3681 if (adapter->ixgbe_ieee_pfc)
3682 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3694 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3695 for (i = 0; i < adapter->num_rx_queues; i++)
3696 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3697 } else {
3698 for (i = 0; i < adapter->num_rx_queues; i++)
3699 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3700 }
3701}
3702
3703#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3704
3705static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3706 struct ixgbe_ring *rx_ring)
3707{
3708 struct ixgbe_hw *hw = &adapter->hw;
3709 u32 srrctl;
3710 u8 reg_idx = rx_ring->reg_idx;
3711
3712 if (hw->mac.type == ixgbe_mac_82598EB) {
3713 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3714
3715
3716
3717
3718
3719 reg_idx &= mask;
3720 }
3721
3722
3723 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3724
3725
3726 if (rx_ring->xsk_umem) {
3727 u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
3728 XDP_PACKET_HEADROOM;
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738 if (hw->mac.type != ixgbe_mac_82599EB)
3739 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3740 else
3741 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3742 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3743 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3744 } else {
3745 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3746 }
3747
3748
3749 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3750
3751 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3752}
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3763{
3764 if (adapter->hw.mac.type < ixgbe_mac_X550)
3765 return 128;
3766 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3767 return 64;
3768 else
3769 return 512;
3770}
3771
3772
3773
3774
3775
3776
3777
3778void ixgbe_store_key(struct ixgbe_adapter *adapter)
3779{
3780 struct ixgbe_hw *hw = &adapter->hw;
3781 int i;
3782
3783 for (i = 0; i < 10; i++)
3784 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3785}
3786
3787
3788
3789
3790
3791
3792
3793static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3794{
3795 u32 *rss_key;
3796
3797 if (!adapter->rss_key) {
3798 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3799 if (unlikely(!rss_key))
3800 return -ENOMEM;
3801
3802 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3803 adapter->rss_key = rss_key;
3804 }
3805
3806 return 0;
3807}
3808
3809
3810
3811
3812
3813
3814
3815void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3816{
3817 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3818 struct ixgbe_hw *hw = &adapter->hw;
3819 u32 reta = 0;
3820 u32 indices_multi;
3821 u8 *indir_tbl = adapter->rss_indir_tbl;
3822
3823
3824
3825
3826
3827
3828
3829 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3830 indices_multi = 0x11;
3831 else
3832 indices_multi = 0x1;
3833
3834
3835 for (i = 0; i < reta_entries; i++) {
3836 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3837 if ((i & 3) == 3) {
3838 if (i < 128)
3839 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3840 else
3841 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3842 reta);
3843 reta = 0;
3844 }
3845 }
3846}
3847
3848
3849
3850
3851
3852
3853
3854static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3855{
3856 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3857 struct ixgbe_hw *hw = &adapter->hw;
3858 u32 vfreta = 0;
3859
3860
3861 for (i = 0; i < reta_entries; i++) {
3862 u16 pool = adapter->num_rx_pools;
3863
3864 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3865 if ((i & 3) != 3)
3866 continue;
3867
3868 while (pool--)
3869 IXGBE_WRITE_REG(hw,
3870 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3871 vfreta);
3872 vfreta = 0;
3873 }
3874}
3875
3876static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3877{
3878 u32 i, j;
3879 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3880 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3881
3882
3883
3884
3885
3886 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3887 rss_i = 4;
3888
3889
3890 ixgbe_store_key(adapter);
3891
3892
3893 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3894
3895 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3896 if (j == rss_i)
3897 j = 0;
3898
3899 adapter->rss_indir_tbl[i] = j;
3900 }
3901
3902 ixgbe_store_reta(adapter);
3903}
3904
3905static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3906{
3907 struct ixgbe_hw *hw = &adapter->hw;
3908 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3909 int i, j;
3910
3911
3912 for (i = 0; i < 10; i++) {
3913 u16 pool = adapter->num_rx_pools;
3914
3915 while (pool--)
3916 IXGBE_WRITE_REG(hw,
3917 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3918 *(adapter->rss_key + i));
3919 }
3920
3921
3922 for (i = 0, j = 0; i < 64; i++, j++) {
3923 if (j == rss_i)
3924 j = 0;
3925
3926 adapter->rss_indir_tbl[i] = j;
3927 }
3928
3929 ixgbe_store_vfreta(adapter);
3930}
3931
3932static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3933{
3934 struct ixgbe_hw *hw = &adapter->hw;
3935 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3936 u32 rxcsum;
3937
3938
3939 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3940 rxcsum |= IXGBE_RXCSUM_PCSD;
3941 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3942
3943 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3944 if (adapter->ring_feature[RING_F_RSS].mask)
3945 mrqc = IXGBE_MRQC_RSSEN;
3946 } else {
3947 u8 tcs = adapter->hw_tcs;
3948
3949 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3950 if (tcs > 4)
3951 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3952 else if (tcs > 1)
3953 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3954 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3955 IXGBE_82599_VMDQ_4Q_MASK)
3956 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3957 else
3958 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3959
3960
3961
3962
3963 if (hw->mac.type >= ixgbe_mac_X550)
3964 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3965 } else {
3966 if (tcs > 4)
3967 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3968 else if (tcs > 1)
3969 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3970 else
3971 mrqc = IXGBE_MRQC_RSSEN;
3972 }
3973 }
3974
3975
3976 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3977 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3978 IXGBE_MRQC_RSS_FIELD_IPV6 |
3979 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3980
3981 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3982 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3983 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3984 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3985
3986 if ((hw->mac.type >= ixgbe_mac_X550) &&
3987 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3988 u16 pool = adapter->num_rx_pools;
3989
3990
3991 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3992 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3993
3994
3995 ixgbe_setup_vfreta(adapter);
3996 vfmrqc = IXGBE_MRQC_RSSEN;
3997 vfmrqc |= rss_field;
3998
3999 while (pool--)
4000 IXGBE_WRITE_REG(hw,
4001 IXGBE_PFVFMRQC(VMDQ_P(pool)),
4002 vfmrqc);
4003 } else {
4004 ixgbe_setup_reta(adapter);
4005 mrqc |= rss_field;
4006 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4007 }
4008}
4009
4010
4011
4012
4013
4014
4015static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4016 struct ixgbe_ring *ring)
4017{
4018 struct ixgbe_hw *hw = &adapter->hw;
4019 u32 rscctrl;
4020 u8 reg_idx = ring->reg_idx;
4021
4022 if (!ring_is_rsc_enabled(ring))
4023 return;
4024
4025 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4026 rscctrl |= IXGBE_RSCCTL_RSCEN;
4027
4028
4029
4030
4031
4032 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4033 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4034}
4035
4036#define IXGBE_MAX_RX_DESC_POLL 10
4037static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4038 struct ixgbe_ring *ring)
4039{
4040 struct ixgbe_hw *hw = &adapter->hw;
4041 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4042 u32 rxdctl;
4043 u8 reg_idx = ring->reg_idx;
4044
4045 if (ixgbe_removed(hw->hw_addr))
4046 return;
4047
4048 if (hw->mac.type == ixgbe_mac_82598EB &&
4049 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4050 return;
4051
4052 do {
4053 usleep_range(1000, 2000);
4054 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4055 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4056
4057 if (!wait_loop) {
4058 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4059 "the polling period\n", reg_idx);
4060 }
4061}
4062
4063void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4064 struct ixgbe_ring *ring)
4065{
4066 struct ixgbe_hw *hw = &adapter->hw;
4067 union ixgbe_adv_rx_desc *rx_desc;
4068 u64 rdba = ring->dma;
4069 u32 rxdctl;
4070 u8 reg_idx = ring->reg_idx;
4071
4072 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4073 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
4074 if (ring->xsk_umem) {
4075 ring->zca.free = ixgbe_zca_free;
4076 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4077 MEM_TYPE_ZERO_COPY,
4078 &ring->zca));
4079
4080 } else {
4081 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4082 MEM_TYPE_PAGE_SHARED, NULL));
4083 }
4084
4085
4086 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4087 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4088
4089
4090 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4091 IXGBE_WRITE_FLUSH(hw);
4092
4093 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4094 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4095 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4096 ring->count * sizeof(union ixgbe_adv_rx_desc));
4097
4098 IXGBE_WRITE_FLUSH(hw);
4099
4100 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4101 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4102 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4103
4104 ixgbe_configure_srrctl(adapter, ring);
4105 ixgbe_configure_rscctl(adapter, ring);
4106
4107 if (hw->mac.type == ixgbe_mac_82598EB) {
4108
4109
4110
4111
4112
4113
4114
4115 rxdctl &= ~0x3FFFFF;
4116 rxdctl |= 0x080420;
4117#if (PAGE_SIZE < 8192)
4118
4119 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4120 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4121 IXGBE_RXDCTL_RLPML_EN);
4122
4123
4124
4125
4126
4127 if (ring_uses_build_skb(ring) &&
4128 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4129 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4130 IXGBE_RXDCTL_RLPML_EN;
4131#endif
4132 }
4133
4134 if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
4135 u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
4136 XDP_PACKET_HEADROOM;
4137
4138 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4139 IXGBE_RXDCTL_RLPML_EN);
4140 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4141
4142 ring->rx_buf_len = xsk_buf_len;
4143 }
4144
4145
4146 memset(ring->rx_buffer_info, 0,
4147 sizeof(struct ixgbe_rx_buffer) * ring->count);
4148
4149
4150 rx_desc = IXGBE_RX_DESC(ring, 0);
4151 rx_desc->wb.upper.length = 0;
4152
4153
4154 rxdctl |= IXGBE_RXDCTL_ENABLE;
4155 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4156
4157 ixgbe_rx_desc_queue_enable(adapter, ring);
4158 if (ring->xsk_umem)
4159 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4160 else
4161 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4162}
4163
4164static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4165{
4166 struct ixgbe_hw *hw = &adapter->hw;
4167 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4168 u16 pool = adapter->num_rx_pools;
4169
4170
4171 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4172 IXGBE_PSRTYPE_UDPHDR |
4173 IXGBE_PSRTYPE_IPV4HDR |
4174 IXGBE_PSRTYPE_L2HDR |
4175 IXGBE_PSRTYPE_IPV6HDR;
4176
4177 if (hw->mac.type == ixgbe_mac_82598EB)
4178 return;
4179
4180 if (rss_i > 3)
4181 psrtype |= 2u << 29;
4182 else if (rss_i > 1)
4183 psrtype |= 1u << 29;
4184
4185 while (pool--)
4186 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4187}
4188
4189static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4190{
4191 struct ixgbe_hw *hw = &adapter->hw;
4192 u16 pool = adapter->num_rx_pools;
4193 u32 reg_offset, vf_shift, vmolr;
4194 u32 gcr_ext, vmdctl;
4195 int i;
4196
4197 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4198 return;
4199
4200 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4201 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4202 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4203 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4204 vmdctl |= IXGBE_VT_CTL_REPLEN;
4205 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4206
4207
4208
4209
4210 vmolr = IXGBE_VMOLR_AUPE;
4211 while (pool--)
4212 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4213
4214 vf_shift = VMDQ_P(0) % 32;
4215 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4216
4217
4218 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4219 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4220 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4221 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4222 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4223 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4224
4225
4226 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4227
4228
4229 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4230
4231
4232
4233
4234
4235 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4236 case IXGBE_82599_VMDQ_8Q_MASK:
4237 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4238 break;
4239 case IXGBE_82599_VMDQ_4Q_MASK:
4240 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4241 break;
4242 default:
4243 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4244 break;
4245 }
4246
4247 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4248
4249 for (i = 0; i < adapter->num_vfs; i++) {
4250
4251 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4252 adapter->vfinfo[i].spoofchk_enabled);
4253
4254
4255 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4256 adapter->vfinfo[i].rss_query_enabled);
4257 }
4258}
4259
4260static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4261{
4262 struct ixgbe_hw *hw = &adapter->hw;
4263 struct net_device *netdev = adapter->netdev;
4264 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4265 struct ixgbe_ring *rx_ring;
4266 int i;
4267 u32 mhadd, hlreg0;
4268
4269#ifdef IXGBE_FCOE
4270
4271 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4272 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4273 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4274
4275#endif
4276
4277
4278 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4279 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4280
4281 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4282 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4283 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4284 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4285
4286 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4287 }
4288
4289 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4290
4291 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4292 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4293
4294
4295
4296
4297
4298 for (i = 0; i < adapter->num_rx_queues; i++) {
4299 rx_ring = adapter->rx_ring[i];
4300
4301 clear_ring_rsc_enabled(rx_ring);
4302 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4303 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4304
4305 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4306 set_ring_rsc_enabled(rx_ring);
4307
4308 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4309 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4310
4311 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4312 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4313 continue;
4314
4315 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4316
4317#if (PAGE_SIZE < 8192)
4318 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4319 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4320
4321 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4322 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4323 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4324#endif
4325 }
4326}
4327
4328static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4329{
4330 struct ixgbe_hw *hw = &adapter->hw;
4331 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4332
4333 switch (hw->mac.type) {
4334 case ixgbe_mac_82598EB:
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4346 break;
4347 case ixgbe_mac_X550:
4348 case ixgbe_mac_X550EM_x:
4349 case ixgbe_mac_x550em_a:
4350 if (adapter->num_vfs)
4351 rdrxctl |= IXGBE_RDRXCTL_PSP;
4352
4353 case ixgbe_mac_82599EB:
4354 case ixgbe_mac_X540:
4355
4356 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4357 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4358 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4359
4360 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4361 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4362 break;
4363 default:
4364
4365 return;
4366 }
4367
4368 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4369}
4370
4371
4372
4373
4374
4375
4376
4377static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4378{
4379 struct ixgbe_hw *hw = &adapter->hw;
4380 int i;
4381 u32 rxctrl, rfctl;
4382
4383
4384 hw->mac.ops.disable_rx(hw);
4385
4386 ixgbe_setup_psrtype(adapter);
4387 ixgbe_setup_rdrxctl(adapter);
4388
4389
4390 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4391 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4392 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4393 rfctl |= IXGBE_RFCTL_RSC_DIS;
4394
4395
4396 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4397 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4398
4399
4400 ixgbe_setup_mrqc(adapter);
4401
4402
4403 ixgbe_set_rx_buffer_len(adapter);
4404
4405
4406
4407
4408
4409 for (i = 0; i < adapter->num_rx_queues; i++)
4410 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4411
4412 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4413
4414 if (hw->mac.type == ixgbe_mac_82598EB)
4415 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4416
4417
4418 rxctrl |= IXGBE_RXCTRL_RXEN;
4419 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4420}
4421
4422static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4423 __be16 proto, u16 vid)
4424{
4425 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4426 struct ixgbe_hw *hw = &adapter->hw;
4427
4428
4429 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4430 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4431
4432 set_bit(vid, adapter->active_vlans);
4433
4434 return 0;
4435}
4436
4437static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4438{
4439 u32 vlvf;
4440 int idx;
4441
4442
4443 if (vlan == 0)
4444 return 0;
4445
4446
4447 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4448 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4449 if ((vlvf & VLAN_VID_MASK) == vlan)
4450 break;
4451 }
4452
4453 return idx;
4454}
4455
4456void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4457{
4458 struct ixgbe_hw *hw = &adapter->hw;
4459 u32 bits, word;
4460 int idx;
4461
4462 idx = ixgbe_find_vlvf_entry(hw, vid);
4463 if (!idx)
4464 return;
4465
4466
4467
4468
4469 word = idx * 2 + (VMDQ_P(0) / 32);
4470 bits = ~BIT(VMDQ_P(0) % 32);
4471 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4472
4473
4474 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4475 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4476 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4477 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4478 }
4479}
4480
4481static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4482 __be16 proto, u16 vid)
4483{
4484 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4485 struct ixgbe_hw *hw = &adapter->hw;
4486
4487
4488 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4489 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4490
4491 clear_bit(vid, adapter->active_vlans);
4492
4493 return 0;
4494}
4495
4496
4497
4498
4499
4500static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4501{
4502 struct ixgbe_hw *hw = &adapter->hw;
4503 u32 vlnctrl;
4504 int i, j;
4505
4506 switch (hw->mac.type) {
4507 case ixgbe_mac_82598EB:
4508 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4509 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4510 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4511 break;
4512 case ixgbe_mac_82599EB:
4513 case ixgbe_mac_X540:
4514 case ixgbe_mac_X550:
4515 case ixgbe_mac_X550EM_x:
4516 case ixgbe_mac_x550em_a:
4517 for (i = 0; i < adapter->num_rx_queues; i++) {
4518 struct ixgbe_ring *ring = adapter->rx_ring[i];
4519
4520 if (!netif_is_ixgbe(ring->netdev))
4521 continue;
4522
4523 j = ring->reg_idx;
4524 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4525 vlnctrl &= ~IXGBE_RXDCTL_VME;
4526 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4527 }
4528 break;
4529 default:
4530 break;
4531 }
4532}
4533
4534
4535
4536
4537
4538static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4539{
4540 struct ixgbe_hw *hw = &adapter->hw;
4541 u32 vlnctrl;
4542 int i, j;
4543
4544 switch (hw->mac.type) {
4545 case ixgbe_mac_82598EB:
4546 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4547 vlnctrl |= IXGBE_VLNCTRL_VME;
4548 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4549 break;
4550 case ixgbe_mac_82599EB:
4551 case ixgbe_mac_X540:
4552 case ixgbe_mac_X550:
4553 case ixgbe_mac_X550EM_x:
4554 case ixgbe_mac_x550em_a:
4555 for (i = 0; i < adapter->num_rx_queues; i++) {
4556 struct ixgbe_ring *ring = adapter->rx_ring[i];
4557
4558 if (!netif_is_ixgbe(ring->netdev))
4559 continue;
4560
4561 j = ring->reg_idx;
4562 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4563 vlnctrl |= IXGBE_RXDCTL_VME;
4564 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4565 }
4566 break;
4567 default:
4568 break;
4569 }
4570}
4571
4572static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4573{
4574 struct ixgbe_hw *hw = &adapter->hw;
4575 u32 vlnctrl, i;
4576
4577 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4578
4579 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4580
4581 vlnctrl |= IXGBE_VLNCTRL_VFE;
4582 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4583 } else {
4584 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4585 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4586 return;
4587 }
4588
4589
4590 if (hw->mac.type == ixgbe_mac_82598EB)
4591 return;
4592
4593
4594 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4595 return;
4596
4597
4598 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4599
4600
4601 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4602 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4603 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4604
4605 vlvfb |= BIT(VMDQ_P(0) % 32);
4606 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4607 }
4608
4609
4610 for (i = hw->mac.vft_size; i--;)
4611 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4612}
4613
4614#define VFTA_BLOCK_SIZE 8
4615static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4616{
4617 struct ixgbe_hw *hw = &adapter->hw;
4618 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4619 u32 vid_start = vfta_offset * 32;
4620 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4621 u32 i, vid, word, bits;
4622
4623 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4624 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4625
4626
4627 vid = vlvf & VLAN_VID_MASK;
4628
4629
4630 if (vid < vid_start || vid >= vid_end)
4631 continue;
4632
4633 if (vlvf) {
4634
4635 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4636
4637
4638 if (test_bit(vid, adapter->active_vlans))
4639 continue;
4640 }
4641
4642
4643 word = i * 2 + VMDQ_P(0) / 32;
4644 bits = ~BIT(VMDQ_P(0) % 32);
4645 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4646 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4647 }
4648
4649
4650 for (i = VFTA_BLOCK_SIZE; i--;) {
4651 vid = (vfta_offset + i) * 32;
4652 word = vid / BITS_PER_LONG;
4653 bits = vid % BITS_PER_LONG;
4654
4655 vfta[i] |= adapter->active_vlans[word] >> bits;
4656
4657 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4658 }
4659}
4660
4661static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4662{
4663 struct ixgbe_hw *hw = &adapter->hw;
4664 u32 vlnctrl, i;
4665
4666
4667 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4668 vlnctrl |= IXGBE_VLNCTRL_VFE;
4669 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4670
4671 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4672 hw->mac.type == ixgbe_mac_82598EB)
4673 return;
4674
4675
4676 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4677 return;
4678
4679
4680 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4681
4682 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4683 ixgbe_scrub_vfta(adapter, i);
4684}
4685
4686static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4687{
4688 u16 vid = 1;
4689
4690 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4691
4692 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4693 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4694}
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4706{
4707 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4708 struct ixgbe_hw *hw = &adapter->hw;
4709
4710 if (!netif_running(netdev))
4711 return 0;
4712
4713 if (hw->mac.ops.update_mc_addr_list)
4714 hw->mac.ops.update_mc_addr_list(hw, netdev);
4715 else
4716 return -ENOMEM;
4717
4718#ifdef CONFIG_PCI_IOV
4719 ixgbe_restore_vf_multicasts(adapter);
4720#endif
4721
4722 return netdev_mc_count(netdev);
4723}
4724
4725#ifdef CONFIG_PCI_IOV
4726void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4727{
4728 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4729 struct ixgbe_hw *hw = &adapter->hw;
4730 int i;
4731
4732 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4733 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4734
4735 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4736 hw->mac.ops.set_rar(hw, i,
4737 mac_table->addr,
4738 mac_table->pool,
4739 IXGBE_RAH_AV);
4740 else
4741 hw->mac.ops.clear_rar(hw, i);
4742 }
4743}
4744
4745#endif
4746static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4747{
4748 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4749 struct ixgbe_hw *hw = &adapter->hw;
4750 int i;
4751
4752 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4753 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4754 continue;
4755
4756 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4757
4758 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4759 hw->mac.ops.set_rar(hw, i,
4760 mac_table->addr,
4761 mac_table->pool,
4762 IXGBE_RAH_AV);
4763 else
4764 hw->mac.ops.clear_rar(hw, i);
4765 }
4766}
4767
4768static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4769{
4770 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4771 struct ixgbe_hw *hw = &adapter->hw;
4772 int i;
4773
4774 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4775 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4776 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4777 }
4778
4779 ixgbe_sync_mac_table(adapter);
4780}
4781
4782static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4783{
4784 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4785 struct ixgbe_hw *hw = &adapter->hw;
4786 int i, count = 0;
4787
4788 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4789
4790 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4791 continue;
4792
4793
4794 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4795 if (mac_table->pool != pool)
4796 continue;
4797 }
4798
4799 count++;
4800 }
4801
4802 return count;
4803}
4804
4805
4806static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4807{
4808 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4809 struct ixgbe_hw *hw = &adapter->hw;
4810
4811 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4812 mac_table->pool = VMDQ_P(0);
4813
4814 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4815
4816 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4817 IXGBE_RAH_AV);
4818}
4819
4820int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4821 const u8 *addr, u16 pool)
4822{
4823 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4824 struct ixgbe_hw *hw = &adapter->hw;
4825 int i;
4826
4827 if (is_zero_ether_addr(addr))
4828 return -EINVAL;
4829
4830 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4831 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4832 continue;
4833
4834 ether_addr_copy(mac_table->addr, addr);
4835 mac_table->pool = pool;
4836
4837 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4838 IXGBE_MAC_STATE_IN_USE;
4839
4840 ixgbe_sync_mac_table(adapter);
4841
4842 return i;
4843 }
4844
4845 return -ENOMEM;
4846}
4847
4848int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4849 const u8 *addr, u16 pool)
4850{
4851 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4852 struct ixgbe_hw *hw = &adapter->hw;
4853 int i;
4854
4855 if (is_zero_ether_addr(addr))
4856 return -EINVAL;
4857
4858
4859 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4860
4861 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4862 continue;
4863
4864 if (mac_table->pool != pool)
4865 continue;
4866
4867 if (!ether_addr_equal(addr, mac_table->addr))
4868 continue;
4869
4870 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4871 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4872
4873 ixgbe_sync_mac_table(adapter);
4874
4875 return 0;
4876 }
4877
4878 return -ENOMEM;
4879}
4880
4881static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4882{
4883 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4884 int ret;
4885
4886 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4887
4888 return min_t(int, ret, 0);
4889}
4890
4891static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4892{
4893 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4894
4895 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4896
4897 return 0;
4898}
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909void ixgbe_set_rx_mode(struct net_device *netdev)
4910{
4911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4912 struct ixgbe_hw *hw = &adapter->hw;
4913 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4914 netdev_features_t features = netdev->features;
4915 int count;
4916
4917
4918 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4919
4920
4921 fctrl &= ~IXGBE_FCTRL_SBP;
4922 fctrl |= IXGBE_FCTRL_BAM;
4923 fctrl |= IXGBE_FCTRL_DPF;
4924 fctrl |= IXGBE_FCTRL_PMCF;
4925
4926
4927 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4928 if (netdev->flags & IFF_PROMISC) {
4929 hw->addr_ctrl.user_set_promisc = true;
4930 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4931 vmolr |= IXGBE_VMOLR_MPE;
4932 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4933 } else {
4934 if (netdev->flags & IFF_ALLMULTI) {
4935 fctrl |= IXGBE_FCTRL_MPE;
4936 vmolr |= IXGBE_VMOLR_MPE;
4937 }
4938 hw->addr_ctrl.user_set_promisc = false;
4939 }
4940
4941
4942
4943
4944
4945
4946 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4947 fctrl |= IXGBE_FCTRL_UPE;
4948 vmolr |= IXGBE_VMOLR_ROPE;
4949 }
4950
4951
4952
4953
4954
4955 count = ixgbe_write_mc_addr_list(netdev);
4956 if (count < 0) {
4957 fctrl |= IXGBE_FCTRL_MPE;
4958 vmolr |= IXGBE_VMOLR_MPE;
4959 } else if (count) {
4960 vmolr |= IXGBE_VMOLR_ROMPE;
4961 }
4962
4963 if (hw->mac.type != ixgbe_mac_82598EB) {
4964 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4965 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4966 IXGBE_VMOLR_ROPE);
4967 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4968 }
4969
4970
4971 if (features & NETIF_F_RXALL) {
4972
4973
4974 fctrl |= (IXGBE_FCTRL_SBP |
4975 IXGBE_FCTRL_BAM |
4976 IXGBE_FCTRL_PMCF);
4977
4978 fctrl &= ~(IXGBE_FCTRL_DPF);
4979
4980 }
4981
4982 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4983
4984 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4985 ixgbe_vlan_strip_enable(adapter);
4986 else
4987 ixgbe_vlan_strip_disable(adapter);
4988
4989 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4990 ixgbe_vlan_promisc_disable(adapter);
4991 else
4992 ixgbe_vlan_promisc_enable(adapter);
4993}
4994
4995static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4996{
4997 int q_idx;
4998
4999 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5000 napi_enable(&adapter->q_vector[q_idx]->napi);
5001}
5002
5003static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
5004{
5005 int q_idx;
5006
5007 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5008 napi_disable(&adapter->q_vector[q_idx]->napi);
5009}
5010
5011static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
5012{
5013 struct ixgbe_hw *hw = &adapter->hw;
5014 u32 vxlanctrl;
5015
5016 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
5017 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
5018 return;
5019
5020 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
5021 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
5022
5023 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
5024 adapter->vxlan_port = 0;
5025
5026 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
5027 adapter->geneve_port = 0;
5028}
5029
5030#ifdef CONFIG_IXGBE_DCB
5031
5032
5033
5034
5035
5036
5037
5038
5039static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5040{
5041 struct ixgbe_hw *hw = &adapter->hw;
5042 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5043
5044 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5045 if (hw->mac.type == ixgbe_mac_82598EB)
5046 netif_set_gso_max_size(adapter->netdev, 65536);
5047 return;
5048 }
5049
5050 if (hw->mac.type == ixgbe_mac_82598EB)
5051 netif_set_gso_max_size(adapter->netdev, 32768);
5052
5053#ifdef IXGBE_FCOE
5054 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5055 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5056#endif
5057
5058
5059 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5060 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5061 DCB_TX_CONFIG);
5062 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5063 DCB_RX_CONFIG);
5064 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5065 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5066 ixgbe_dcb_hw_ets(&adapter->hw,
5067 adapter->ixgbe_ieee_ets,
5068 max_frame);
5069 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5070 adapter->ixgbe_ieee_pfc->pfc_en,
5071 adapter->ixgbe_ieee_ets->prio_tc);
5072 }
5073
5074
5075 if (hw->mac.type != ixgbe_mac_82598EB) {
5076 u32 msb = 0;
5077 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5078
5079 while (rss_i) {
5080 msb++;
5081 rss_i >>= 1;
5082 }
5083
5084
5085 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5086 }
5087}
5088#endif
5089
5090
5091#define IXGBE_ETH_FRAMING 20
5092
5093
5094
5095
5096
5097
5098
5099static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5100{
5101 struct ixgbe_hw *hw = &adapter->hw;
5102 struct net_device *dev = adapter->netdev;
5103 int link, tc, kb, marker;
5104 u32 dv_id, rx_pba;
5105
5106
5107 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5108
5109#ifdef IXGBE_FCOE
5110
5111 if ((dev->features & NETIF_F_FCOE_MTU) &&
5112 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5113 (pb == ixgbe_fcoe_get_tc(adapter)))
5114 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5115#endif
5116
5117
5118 switch (hw->mac.type) {
5119 case ixgbe_mac_X540:
5120 case ixgbe_mac_X550:
5121 case ixgbe_mac_X550EM_x:
5122 case ixgbe_mac_x550em_a:
5123 dv_id = IXGBE_DV_X540(link, tc);
5124 break;
5125 default:
5126 dv_id = IXGBE_DV(link, tc);
5127 break;
5128 }
5129
5130
5131 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5132 dv_id += IXGBE_B2BT(tc);
5133
5134
5135 kb = IXGBE_BT2KB(dv_id);
5136 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5137
5138 marker = rx_pba - kb;
5139
5140
5141
5142
5143
5144 if (marker < 0) {
5145 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5146 "headroom to support flow control."
5147 "Decrease MTU or number of traffic classes\n", pb);
5148 marker = tc + 1;
5149 }
5150
5151 return marker;
5152}
5153
5154
5155
5156
5157
5158
5159
5160static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5161{
5162 struct ixgbe_hw *hw = &adapter->hw;
5163 struct net_device *dev = adapter->netdev;
5164 int tc;
5165 u32 dv_id;
5166
5167
5168 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5169
5170#ifdef IXGBE_FCOE
5171
5172 if ((dev->features & NETIF_F_FCOE_MTU) &&
5173 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5174 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5175 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5176#endif
5177
5178
5179 switch (hw->mac.type) {
5180 case ixgbe_mac_X540:
5181 case ixgbe_mac_X550:
5182 case ixgbe_mac_X550EM_x:
5183 case ixgbe_mac_x550em_a:
5184 dv_id = IXGBE_LOW_DV_X540(tc);
5185 break;
5186 default:
5187 dv_id = IXGBE_LOW_DV(tc);
5188 break;
5189 }
5190
5191
5192 return IXGBE_BT2KB(dv_id);
5193}
5194
5195
5196
5197
5198static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5199{
5200 struct ixgbe_hw *hw = &adapter->hw;
5201 int num_tc = adapter->hw_tcs;
5202 int i;
5203
5204 if (!num_tc)
5205 num_tc = 1;
5206
5207 for (i = 0; i < num_tc; i++) {
5208 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5209 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5210
5211
5212 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5213 hw->fc.low_water[i] = 0;
5214 }
5215
5216 for (; i < MAX_TRAFFIC_CLASS; i++)
5217 hw->fc.high_water[i] = 0;
5218}
5219
5220static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5221{
5222 struct ixgbe_hw *hw = &adapter->hw;
5223 int hdrm;
5224 u8 tc = adapter->hw_tcs;
5225
5226 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5227 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5228 hdrm = 32 << adapter->fdir_pballoc;
5229 else
5230 hdrm = 0;
5231
5232 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5233 ixgbe_pbthresh_setup(adapter);
5234}
5235
5236static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5237{
5238 struct ixgbe_hw *hw = &adapter->hw;
5239 struct hlist_node *node2;
5240 struct ixgbe_fdir_filter *filter;
5241 u64 action;
5242
5243 spin_lock(&adapter->fdir_perfect_lock);
5244
5245 if (!hlist_empty(&adapter->fdir_filter_list))
5246 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5247
5248 hlist_for_each_entry_safe(filter, node2,
5249 &adapter->fdir_filter_list, fdir_node) {
5250 action = filter->action;
5251 if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
5252 action =
5253 (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
5254
5255 ixgbe_fdir_write_perfect_filter_82599(hw,
5256 &filter->filter,
5257 filter->sw_idx,
5258 (action == IXGBE_FDIR_DROP_QUEUE) ?
5259 IXGBE_FDIR_DROP_QUEUE :
5260 adapter->rx_ring[action]->reg_idx);
5261 }
5262
5263 spin_unlock(&adapter->fdir_perfect_lock);
5264}
5265
5266
5267
5268
5269
5270static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5271{
5272 u16 i = rx_ring->next_to_clean;
5273 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5274
5275 if (rx_ring->xsk_umem) {
5276 ixgbe_xsk_clean_rx_ring(rx_ring);
5277 goto skip_free;
5278 }
5279
5280
5281 while (i != rx_ring->next_to_alloc) {
5282 if (rx_buffer->skb) {
5283 struct sk_buff *skb = rx_buffer->skb;
5284 if (IXGBE_CB(skb)->page_released)
5285 dma_unmap_page_attrs(rx_ring->dev,
5286 IXGBE_CB(skb)->dma,
5287 ixgbe_rx_pg_size(rx_ring),
5288 DMA_FROM_DEVICE,
5289 IXGBE_RX_DMA_ATTR);
5290 dev_kfree_skb(skb);
5291 }
5292
5293
5294
5295
5296 dma_sync_single_range_for_cpu(rx_ring->dev,
5297 rx_buffer->dma,
5298 rx_buffer->page_offset,
5299 ixgbe_rx_bufsz(rx_ring),
5300 DMA_FROM_DEVICE);
5301
5302
5303 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5304 ixgbe_rx_pg_size(rx_ring),
5305 DMA_FROM_DEVICE,
5306 IXGBE_RX_DMA_ATTR);
5307 __page_frag_cache_drain(rx_buffer->page,
5308 rx_buffer->pagecnt_bias);
5309
5310 i++;
5311 rx_buffer++;
5312 if (i == rx_ring->count) {
5313 i = 0;
5314 rx_buffer = rx_ring->rx_buffer_info;
5315 }
5316 }
5317
5318skip_free:
5319 rx_ring->next_to_alloc = 0;
5320 rx_ring->next_to_clean = 0;
5321 rx_ring->next_to_use = 0;
5322}
5323
5324static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5325 struct ixgbe_fwd_adapter *accel)
5326{
5327 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5328 int num_tc = netdev_get_num_tc(adapter->netdev);
5329 struct net_device *vdev = accel->netdev;
5330 int i, baseq, err;
5331
5332 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5333 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5334 accel->pool, adapter->num_rx_pools,
5335 baseq, baseq + adapter->num_rx_queues_per_pool);
5336
5337 accel->rx_base_queue = baseq;
5338 accel->tx_base_queue = baseq;
5339
5340
5341 for (i = 0; i < num_tc; i++)
5342 netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5343 i, rss_i, baseq + (rss_i * i));
5344
5345 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5346 adapter->rx_ring[baseq + i]->netdev = vdev;
5347
5348
5349
5350
5351 wmb();
5352
5353
5354
5355
5356 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5357 VMDQ_P(accel->pool));
5358 if (err >= 0)
5359 return 0;
5360
5361
5362 macvlan_release_l2fw_offload(vdev);
5363
5364 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5365 adapter->rx_ring[baseq + i]->netdev = NULL;
5366
5367 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5368
5369
5370 netdev_unbind_sb_channel(adapter->netdev, vdev);
5371 netdev_set_sb_channel(vdev, 0);
5372
5373 clear_bit(accel->pool, adapter->fwd_bitmask);
5374 kfree(accel);
5375
5376 return err;
5377}
5378
5379static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
5380{
5381 struct ixgbe_adapter *adapter = data;
5382 struct ixgbe_fwd_adapter *accel;
5383
5384 if (!netif_is_macvlan(vdev))
5385 return 0;
5386
5387 accel = macvlan_accel_priv(vdev);
5388 if (!accel)
5389 return 0;
5390
5391 ixgbe_fwd_ring_up(adapter, accel);
5392
5393 return 0;
5394}
5395
5396static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5397{
5398 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5399 ixgbe_macvlan_up, adapter);
5400}
5401
5402static void ixgbe_configure(struct ixgbe_adapter *adapter)
5403{
5404 struct ixgbe_hw *hw = &adapter->hw;
5405
5406 ixgbe_configure_pb(adapter);
5407#ifdef CONFIG_IXGBE_DCB
5408 ixgbe_configure_dcb(adapter);
5409#endif
5410
5411
5412
5413
5414 ixgbe_configure_virtualization(adapter);
5415
5416 ixgbe_set_rx_mode(adapter->netdev);
5417 ixgbe_restore_vlan(adapter);
5418 ixgbe_ipsec_restore(adapter);
5419
5420 switch (hw->mac.type) {
5421 case ixgbe_mac_82599EB:
5422 case ixgbe_mac_X540:
5423 hw->mac.ops.disable_rx_buff(hw);
5424 break;
5425 default:
5426 break;
5427 }
5428
5429 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5430 ixgbe_init_fdir_signature_82599(&adapter->hw,
5431 adapter->fdir_pballoc);
5432 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5433 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5434 adapter->fdir_pballoc);
5435 ixgbe_fdir_filter_restore(adapter);
5436 }
5437
5438 switch (hw->mac.type) {
5439 case ixgbe_mac_82599EB:
5440 case ixgbe_mac_X540:
5441 hw->mac.ops.enable_rx_buff(hw);
5442 break;
5443 default:
5444 break;
5445 }
5446
5447#ifdef CONFIG_IXGBE_DCA
5448
5449 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5450 ixgbe_setup_dca(adapter);
5451#endif
5452
5453#ifdef IXGBE_FCOE
5454
5455 ixgbe_configure_fcoe(adapter);
5456
5457#endif
5458 ixgbe_configure_tx(adapter);
5459 ixgbe_configure_rx(adapter);
5460 ixgbe_configure_dfwd(adapter);
5461}
5462
5463
5464
5465
5466
5467static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5468{
5469
5470
5471
5472
5473
5474
5475 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5476 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5477
5478 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5479 adapter->sfp_poll_time = 0;
5480}
5481
5482
5483
5484
5485
5486
5487
5488static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5489{
5490 u32 speed;
5491 bool autoneg, link_up = false;
5492 int ret = IXGBE_ERR_LINK_SETUP;
5493
5494 if (hw->mac.ops.check_link)
5495 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5496
5497 if (ret)
5498 return ret;
5499
5500 speed = hw->phy.autoneg_advertised;
5501 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5502 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5503 &autoneg);
5504 if (ret)
5505 return ret;
5506
5507 if (hw->mac.ops.setup_link)
5508 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5509
5510 return ret;
5511}
5512
5513static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5514{
5515 struct ixgbe_hw *hw = &adapter->hw;
5516 u32 gpie = 0;
5517
5518 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5519 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5520 IXGBE_GPIE_OCD;
5521 gpie |= IXGBE_GPIE_EIAME;
5522
5523
5524
5525
5526 switch (hw->mac.type) {
5527 case ixgbe_mac_82598EB:
5528 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5529 break;
5530 case ixgbe_mac_82599EB:
5531 case ixgbe_mac_X540:
5532 case ixgbe_mac_X550:
5533 case ixgbe_mac_X550EM_x:
5534 case ixgbe_mac_x550em_a:
5535 default:
5536 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5537 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5538 break;
5539 }
5540 } else {
5541
5542
5543 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5544 }
5545
5546
5547
5548
5549 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5550 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5551
5552 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5553 case IXGBE_82599_VMDQ_8Q_MASK:
5554 gpie |= IXGBE_GPIE_VTMODE_16;
5555 break;
5556 case IXGBE_82599_VMDQ_4Q_MASK:
5557 gpie |= IXGBE_GPIE_VTMODE_32;
5558 break;
5559 default:
5560 gpie |= IXGBE_GPIE_VTMODE_64;
5561 break;
5562 }
5563 }
5564
5565
5566 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5567 switch (adapter->hw.mac.type) {
5568 case ixgbe_mac_82599EB:
5569 gpie |= IXGBE_SDP0_GPIEN_8259X;
5570 break;
5571 default:
5572 break;
5573 }
5574 }
5575
5576
5577 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5578 gpie |= IXGBE_SDP1_GPIEN(hw);
5579
5580 switch (hw->mac.type) {
5581 case ixgbe_mac_82599EB:
5582 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5583 break;
5584 case ixgbe_mac_X550EM_x:
5585 case ixgbe_mac_x550em_a:
5586 gpie |= IXGBE_SDP0_GPIEN_X540;
5587 break;
5588 default:
5589 break;
5590 }
5591
5592 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5593}
5594
5595static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5596{
5597 struct ixgbe_hw *hw = &adapter->hw;
5598 int err;
5599 u32 ctrl_ext;
5600
5601 ixgbe_get_hw_control(adapter);
5602 ixgbe_setup_gpie(adapter);
5603
5604 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5605 ixgbe_configure_msix(adapter);
5606 else
5607 ixgbe_configure_msi_and_legacy(adapter);
5608
5609
5610 if (hw->mac.ops.enable_tx_laser)
5611 hw->mac.ops.enable_tx_laser(hw);
5612
5613 if (hw->phy.ops.set_phy_power)
5614 hw->phy.ops.set_phy_power(hw, true);
5615
5616 smp_mb__before_atomic();
5617 clear_bit(__IXGBE_DOWN, &adapter->state);
5618 ixgbe_napi_enable_all(adapter);
5619
5620 if (ixgbe_is_sfp(hw)) {
5621 ixgbe_sfp_link_config(adapter);
5622 } else {
5623 err = ixgbe_non_sfp_link_config(hw);
5624 if (err)
5625 e_err(probe, "link_config FAILED %d\n", err);
5626 }
5627
5628
5629 IXGBE_READ_REG(hw, IXGBE_EICR);
5630 ixgbe_irq_enable(adapter, true, true);
5631
5632
5633
5634
5635
5636 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5637 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5638 if (esdp & IXGBE_ESDP_SDP1)
5639 e_crit(drv, "Fan has stopped, replace the adapter\n");
5640 }
5641
5642
5643
5644 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5645 adapter->link_check_timeout = jiffies;
5646 mod_timer(&adapter->service_timer, jiffies);
5647
5648
5649 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5650 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5651 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5652}
5653
5654void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5655{
5656 WARN_ON(in_interrupt());
5657
5658 netif_trans_update(adapter->netdev);
5659
5660 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5661 usleep_range(1000, 2000);
5662 if (adapter->hw.phy.type == ixgbe_phy_fw)
5663 ixgbe_watchdog_link_is_down(adapter);
5664 ixgbe_down(adapter);
5665
5666
5667
5668
5669
5670
5671 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5672 msleep(2000);
5673 ixgbe_up(adapter);
5674 clear_bit(__IXGBE_RESETTING, &adapter->state);
5675}
5676
5677void ixgbe_up(struct ixgbe_adapter *adapter)
5678{
5679
5680 ixgbe_configure(adapter);
5681
5682 ixgbe_up_complete(adapter);
5683}
5684
5685static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5686{
5687 u16 devctl2;
5688
5689 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5690
5691 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5692 case IXGBE_PCIDEVCTRL2_17_34s:
5693 case IXGBE_PCIDEVCTRL2_4_8s:
5694
5695
5696
5697
5698 case IXGBE_PCIDEVCTRL2_1_2s:
5699 return 2000000ul;
5700 case IXGBE_PCIDEVCTRL2_260_520ms:
5701 return 520000ul;
5702 case IXGBE_PCIDEVCTRL2_65_130ms:
5703 return 130000ul;
5704 case IXGBE_PCIDEVCTRL2_16_32ms:
5705 return 32000ul;
5706 case IXGBE_PCIDEVCTRL2_1_2ms:
5707 return 2000ul;
5708 case IXGBE_PCIDEVCTRL2_50_100us:
5709 return 100ul;
5710 case IXGBE_PCIDEVCTRL2_16_32ms_def:
5711 return 32000ul;
5712 default:
5713 break;
5714 }
5715
5716
5717
5718
5719 return 32000ul;
5720}
5721
5722void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5723{
5724 unsigned long wait_delay, delay_interval;
5725 struct ixgbe_hw *hw = &adapter->hw;
5726 int i, wait_loop;
5727 u32 rxdctl;
5728
5729
5730 hw->mac.ops.disable_rx(hw);
5731
5732 if (ixgbe_removed(hw->hw_addr))
5733 return;
5734
5735
5736 for (i = 0; i < adapter->num_rx_queues; i++) {
5737 struct ixgbe_ring *ring = adapter->rx_ring[i];
5738 u8 reg_idx = ring->reg_idx;
5739
5740 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5741 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5742 rxdctl |= IXGBE_RXDCTL_SWFLSH;
5743
5744
5745 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5746 }
5747
5748
5749 if (hw->mac.type == ixgbe_mac_82598EB &&
5750 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5751 return;
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5765
5766 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5767 wait_delay = delay_interval;
5768
5769 while (wait_loop--) {
5770 usleep_range(wait_delay, wait_delay + 10);
5771 wait_delay += delay_interval * 2;
5772 rxdctl = 0;
5773
5774
5775
5776
5777
5778
5779 for (i = 0; i < adapter->num_rx_queues; i++) {
5780 struct ixgbe_ring *ring = adapter->rx_ring[i];
5781 u8 reg_idx = ring->reg_idx;
5782
5783 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5784 }
5785
5786 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5787 return;
5788 }
5789
5790 e_err(drv,
5791 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5792}
5793
5794void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5795{
5796 unsigned long wait_delay, delay_interval;
5797 struct ixgbe_hw *hw = &adapter->hw;
5798 int i, wait_loop;
5799 u32 txdctl;
5800
5801 if (ixgbe_removed(hw->hw_addr))
5802 return;
5803
5804
5805 for (i = 0; i < adapter->num_tx_queues; i++) {
5806 struct ixgbe_ring *ring = adapter->tx_ring[i];
5807 u8 reg_idx = ring->reg_idx;
5808
5809 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5810 }
5811
5812
5813 for (i = 0; i < adapter->num_xdp_queues; i++) {
5814 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5815 u8 reg_idx = ring->reg_idx;
5816
5817 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5818 }
5819
5820
5821
5822
5823
5824
5825 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5826 goto dma_engine_disable;
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5840
5841 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5842 wait_delay = delay_interval;
5843
5844 while (wait_loop--) {
5845 usleep_range(wait_delay, wait_delay + 10);
5846 wait_delay += delay_interval * 2;
5847 txdctl = 0;
5848
5849
5850
5851
5852
5853
5854 for (i = 0; i < adapter->num_tx_queues; i++) {
5855 struct ixgbe_ring *ring = adapter->tx_ring[i];
5856 u8 reg_idx = ring->reg_idx;
5857
5858 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5859 }
5860 for (i = 0; i < adapter->num_xdp_queues; i++) {
5861 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5862 u8 reg_idx = ring->reg_idx;
5863
5864 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5865 }
5866
5867 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5868 goto dma_engine_disable;
5869 }
5870
5871 e_err(drv,
5872 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5873
5874dma_engine_disable:
5875
5876 switch (hw->mac.type) {
5877 case ixgbe_mac_82599EB:
5878 case ixgbe_mac_X540:
5879 case ixgbe_mac_X550:
5880 case ixgbe_mac_X550EM_x:
5881 case ixgbe_mac_x550em_a:
5882 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5883 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5884 ~IXGBE_DMATXCTL_TE));
5885
5886 default:
5887 break;
5888 }
5889}
5890
5891void ixgbe_reset(struct ixgbe_adapter *adapter)
5892{
5893 struct ixgbe_hw *hw = &adapter->hw;
5894 struct net_device *netdev = adapter->netdev;
5895 int err;
5896
5897 if (ixgbe_removed(hw->hw_addr))
5898 return;
5899
5900 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5901 usleep_range(1000, 2000);
5902
5903
5904 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5905 IXGBE_FLAG2_SFP_NEEDS_RESET);
5906 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5907
5908 err = hw->mac.ops.init_hw(hw);
5909 switch (err) {
5910 case 0:
5911 case IXGBE_ERR_SFP_NOT_PRESENT:
5912 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5913 break;
5914 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5915 e_dev_err("master disable timed out\n");
5916 break;
5917 case IXGBE_ERR_EEPROM_VERSION:
5918
5919 e_dev_warn("This device is a pre-production adapter/LOM. "
5920 "Please be aware there may be issues associated with "
5921 "your hardware. If you are experiencing problems "
5922 "please contact your Intel or hardware "
5923 "representative who provided you with this "
5924 "hardware.\n");
5925 break;
5926 default:
5927 e_dev_err("Hardware Error: %d\n", err);
5928 }
5929
5930 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5931
5932
5933 ixgbe_flush_sw_mac_table(adapter);
5934 __dev_uc_unsync(netdev, NULL);
5935
5936
5937 ixgbe_mac_set_default_filter(adapter);
5938
5939
5940 if (hw->mac.san_mac_rar_index)
5941 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5942
5943 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5944 ixgbe_ptp_reset(adapter);
5945
5946 if (hw->phy.ops.set_phy_power) {
5947 if (!netif_running(adapter->netdev) && !adapter->wol)
5948 hw->phy.ops.set_phy_power(hw, false);
5949 else
5950 hw->phy.ops.set_phy_power(hw, true);
5951 }
5952}
5953
5954
5955
5956
5957
5958static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5959{
5960 u16 i = tx_ring->next_to_clean;
5961 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5962
5963 if (tx_ring->xsk_umem) {
5964 ixgbe_xsk_clean_tx_ring(tx_ring);
5965 goto out;
5966 }
5967
5968 while (i != tx_ring->next_to_use) {
5969 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5970
5971
5972 if (ring_is_xdp(tx_ring))
5973 xdp_return_frame(tx_buffer->xdpf);
5974 else
5975 dev_kfree_skb_any(tx_buffer->skb);
5976
5977
5978 dma_unmap_single(tx_ring->dev,
5979 dma_unmap_addr(tx_buffer, dma),
5980 dma_unmap_len(tx_buffer, len),
5981 DMA_TO_DEVICE);
5982
5983
5984 eop_desc = tx_buffer->next_to_watch;
5985 tx_desc = IXGBE_TX_DESC(tx_ring, i);
5986
5987
5988 while (tx_desc != eop_desc) {
5989 tx_buffer++;
5990 tx_desc++;
5991 i++;
5992 if (unlikely(i == tx_ring->count)) {
5993 i = 0;
5994 tx_buffer = tx_ring->tx_buffer_info;
5995 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
5996 }
5997
5998
5999 if (dma_unmap_len(tx_buffer, len))
6000 dma_unmap_page(tx_ring->dev,
6001 dma_unmap_addr(tx_buffer, dma),
6002 dma_unmap_len(tx_buffer, len),
6003 DMA_TO_DEVICE);
6004 }
6005
6006
6007 tx_buffer++;
6008 i++;
6009 if (unlikely(i == tx_ring->count)) {
6010 i = 0;
6011 tx_buffer = tx_ring->tx_buffer_info;
6012 }
6013 }
6014
6015
6016 if (!ring_is_xdp(tx_ring))
6017 netdev_tx_reset_queue(txring_txq(tx_ring));
6018
6019out:
6020
6021 tx_ring->next_to_use = 0;
6022 tx_ring->next_to_clean = 0;
6023}
6024
6025
6026
6027
6028
6029static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6030{
6031 int i;
6032
6033 for (i = 0; i < adapter->num_rx_queues; i++)
6034 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6035}
6036
6037
6038
6039
6040
6041static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6042{
6043 int i;
6044
6045 for (i = 0; i < adapter->num_tx_queues; i++)
6046 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6047 for (i = 0; i < adapter->num_xdp_queues; i++)
6048 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6049}
6050
6051static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6052{
6053 struct hlist_node *node2;
6054 struct ixgbe_fdir_filter *filter;
6055
6056 spin_lock(&adapter->fdir_perfect_lock);
6057
6058 hlist_for_each_entry_safe(filter, node2,
6059 &adapter->fdir_filter_list, fdir_node) {
6060 hlist_del(&filter->fdir_node);
6061 kfree(filter);
6062 }
6063 adapter->fdir_filter_count = 0;
6064
6065 spin_unlock(&adapter->fdir_perfect_lock);
6066}
6067
6068void ixgbe_down(struct ixgbe_adapter *adapter)
6069{
6070 struct net_device *netdev = adapter->netdev;
6071 struct ixgbe_hw *hw = &adapter->hw;
6072 int i;
6073
6074
6075 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6076 return;
6077
6078
6079 netif_tx_stop_all_queues(netdev);
6080
6081
6082 netif_carrier_off(netdev);
6083 netif_tx_disable(netdev);
6084
6085
6086 ixgbe_disable_rx(adapter);
6087
6088
6089 if (adapter->xdp_ring[0])
6090 synchronize_rcu();
6091
6092 ixgbe_irq_disable(adapter);
6093
6094 ixgbe_napi_disable_all(adapter);
6095
6096 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6097 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6098 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6099
6100 del_timer_sync(&adapter->service_timer);
6101
6102 if (adapter->num_vfs) {
6103
6104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6105
6106
6107 for (i = 0 ; i < adapter->num_vfs; i++)
6108 adapter->vfinfo[i].clear_to_send = false;
6109
6110
6111 ixgbe_ping_all_vfs(adapter);
6112
6113
6114 ixgbe_disable_tx_rx(adapter);
6115 }
6116
6117
6118 ixgbe_disable_tx(adapter);
6119
6120 if (!pci_channel_offline(adapter->pdev))
6121 ixgbe_reset(adapter);
6122
6123
6124 if (hw->mac.ops.disable_tx_laser)
6125 hw->mac.ops.disable_tx_laser(hw);
6126
6127 ixgbe_clean_all_tx_rings(adapter);
6128 ixgbe_clean_all_rx_rings(adapter);
6129}
6130
6131
6132
6133
6134
6135static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6136{
6137 struct ixgbe_hw *hw = &adapter->hw;
6138
6139 switch (hw->device_id) {
6140 case IXGBE_DEV_ID_X550EM_A_1G_T:
6141 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6142 if (!hw->phy.eee_speeds_supported)
6143 break;
6144 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6145 if (!hw->phy.eee_speeds_advertised)
6146 break;
6147 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6148 break;
6149 default:
6150 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6151 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6152 break;
6153 }
6154}
6155
6156
6157
6158
6159
6160static void ixgbe_tx_timeout(struct net_device *netdev)
6161{
6162 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6163
6164
6165 ixgbe_tx_timeout_reset(adapter);
6166}
6167
6168#ifdef CONFIG_IXGBE_DCB
6169static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6170{
6171 struct ixgbe_hw *hw = &adapter->hw;
6172 struct tc_configuration *tc;
6173 int j;
6174
6175 switch (hw->mac.type) {
6176 case ixgbe_mac_82598EB:
6177 case ixgbe_mac_82599EB:
6178 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6179 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6180 break;
6181 case ixgbe_mac_X540:
6182 case ixgbe_mac_X550:
6183 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6184 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6185 break;
6186 case ixgbe_mac_X550EM_x:
6187 case ixgbe_mac_x550em_a:
6188 default:
6189 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6190 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6191 break;
6192 }
6193
6194
6195 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6196 tc = &adapter->dcb_cfg.tc_config[j];
6197 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6198 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6199 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6200 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6201 tc->dcb_pfc = pfc_disabled;
6202 }
6203
6204
6205 tc = &adapter->dcb_cfg.tc_config[0];
6206 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6207 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6208
6209 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6210 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6211 adapter->dcb_cfg.pfc_mode_enable = false;
6212 adapter->dcb_set_bitmap = 0x00;
6213 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6214 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6215 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6216 sizeof(adapter->temp_dcb_cfg));
6217}
6218#endif
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6230 const struct ixgbe_info *ii)
6231{
6232 struct ixgbe_hw *hw = &adapter->hw;
6233 struct pci_dev *pdev = adapter->pdev;
6234 unsigned int rss, fdir;
6235 u32 fwsm;
6236 int i;
6237
6238
6239
6240 hw->vendor_id = pdev->vendor;
6241 hw->device_id = pdev->device;
6242 hw->revision_id = pdev->revision;
6243 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6244 hw->subsystem_device_id = pdev->subsystem_device;
6245
6246
6247 ii->get_invariants(hw);
6248
6249
6250 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6251 adapter->ring_feature[RING_F_RSS].limit = rss;
6252 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6253 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6254 adapter->atr_sample_rate = 20;
6255 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6256 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6257 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6258 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6259#ifdef CONFIG_IXGBE_DCA
6260 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6261#endif
6262#ifdef CONFIG_IXGBE_DCB
6263 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6264 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6265#endif
6266#ifdef IXGBE_FCOE
6267 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6268 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6269#ifdef CONFIG_IXGBE_DCB
6270
6271 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6272#endif
6273#endif
6274
6275
6276 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6277 GFP_KERNEL);
6278 if (!adapter->jump_tables[0])
6279 return -ENOMEM;
6280 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6281
6282 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6283 adapter->jump_tables[i] = NULL;
6284
6285 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6286 sizeof(struct ixgbe_mac_addr),
6287 GFP_KERNEL);
6288 if (!adapter->mac_table)
6289 return -ENOMEM;
6290
6291 if (ixgbe_init_rss_key(adapter))
6292 return -ENOMEM;
6293
6294 adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6295 if (!adapter->af_xdp_zc_qps)
6296 return -ENOMEM;
6297
6298
6299 switch (hw->mac.type) {
6300 case ixgbe_mac_82598EB:
6301 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6302
6303 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6304 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6305
6306 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6307 adapter->ring_feature[RING_F_FDIR].limit = 0;
6308 adapter->atr_sample_rate = 0;
6309 adapter->fdir_pballoc = 0;
6310#ifdef IXGBE_FCOE
6311 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6312 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6313#ifdef CONFIG_IXGBE_DCB
6314 adapter->fcoe.up = 0;
6315#endif
6316#endif
6317 break;
6318 case ixgbe_mac_82599EB:
6319 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6320 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6321 break;
6322 case ixgbe_mac_X540:
6323 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6324 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6325 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6326 break;
6327 case ixgbe_mac_x550em_a:
6328 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
6329 switch (hw->device_id) {
6330 case IXGBE_DEV_ID_X550EM_A_1G_T:
6331 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6332 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6333 break;
6334 default:
6335 break;
6336 }
6337
6338 case ixgbe_mac_X550EM_x:
6339#ifdef CONFIG_IXGBE_DCB
6340 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6341#endif
6342#ifdef IXGBE_FCOE
6343 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6344#ifdef CONFIG_IXGBE_DCB
6345 adapter->fcoe.up = 0;
6346#endif
6347#endif
6348
6349 case ixgbe_mac_X550:
6350 if (hw->mac.type == ixgbe_mac_X550)
6351 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6352#ifdef CONFIG_IXGBE_DCA
6353 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6354#endif
6355 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
6356 break;
6357 default:
6358 break;
6359 }
6360
6361#ifdef IXGBE_FCOE
6362
6363 spin_lock_init(&adapter->fcoe.lock);
6364
6365#endif
6366
6367 spin_lock_init(&adapter->fdir_perfect_lock);
6368
6369#ifdef CONFIG_IXGBE_DCB
6370 ixgbe_init_dcb(adapter);
6371#endif
6372 ixgbe_init_ipsec_offload(adapter);
6373
6374
6375 hw->fc.requested_mode = ixgbe_fc_full;
6376 hw->fc.current_mode = ixgbe_fc_full;
6377 ixgbe_pbthresh_setup(adapter);
6378 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6379 hw->fc.send_xon = true;
6380 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6381
6382#ifdef CONFIG_PCI_IOV
6383 if (max_vfs > 0)
6384 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6385
6386
6387 if (hw->mac.type != ixgbe_mac_82598EB) {
6388 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6389 max_vfs = 0;
6390 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6391 }
6392 }
6393#endif
6394
6395
6396 adapter->rx_itr_setting = 1;
6397 adapter->tx_itr_setting = 1;
6398
6399
6400 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6401 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6402
6403
6404 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6405
6406
6407 if (ixgbe_init_eeprom_params_generic(hw)) {
6408 e_dev_err("EEPROM initialization failed\n");
6409 return -EIO;
6410 }
6411
6412
6413 set_bit(0, adapter->fwd_bitmask);
6414 set_bit(__IXGBE_DOWN, &adapter->state);
6415
6416 return 0;
6417}
6418
6419
6420
6421
6422
6423
6424
6425int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6426{
6427 struct device *dev = tx_ring->dev;
6428 int orig_node = dev_to_node(dev);
6429 int ring_node = NUMA_NO_NODE;
6430 int size;
6431
6432 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6433
6434 if (tx_ring->q_vector)
6435 ring_node = tx_ring->q_vector->numa_node;
6436
6437 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6438 if (!tx_ring->tx_buffer_info)
6439 tx_ring->tx_buffer_info = vmalloc(size);
6440 if (!tx_ring->tx_buffer_info)
6441 goto err;
6442
6443
6444 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6445 tx_ring->size = ALIGN(tx_ring->size, 4096);
6446
6447 set_dev_node(dev, ring_node);
6448 tx_ring->desc = dma_alloc_coherent(dev,
6449 tx_ring->size,
6450 &tx_ring->dma,
6451 GFP_KERNEL);
6452 set_dev_node(dev, orig_node);
6453 if (!tx_ring->desc)
6454 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6455 &tx_ring->dma, GFP_KERNEL);
6456 if (!tx_ring->desc)
6457 goto err;
6458
6459 tx_ring->next_to_use = 0;
6460 tx_ring->next_to_clean = 0;
6461 return 0;
6462
6463err:
6464 vfree(tx_ring->tx_buffer_info);
6465 tx_ring->tx_buffer_info = NULL;
6466 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6467 return -ENOMEM;
6468}
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6481{
6482 int i, j = 0, err = 0;
6483
6484 for (i = 0; i < adapter->num_tx_queues; i++) {
6485 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6486 if (!err)
6487 continue;
6488
6489 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6490 goto err_setup_tx;
6491 }
6492 for (j = 0; j < adapter->num_xdp_queues; j++) {
6493 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6494 if (!err)
6495 continue;
6496
6497 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6498 goto err_setup_tx;
6499 }
6500
6501 return 0;
6502err_setup_tx:
6503
6504 while (j--)
6505 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6506 while (i--)
6507 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6508 return err;
6509}
6510
6511
6512
6513
6514
6515
6516
6517
6518int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6519 struct ixgbe_ring *rx_ring)
6520{
6521 struct device *dev = rx_ring->dev;
6522 int orig_node = dev_to_node(dev);
6523 int ring_node = NUMA_NO_NODE;
6524 int size;
6525
6526 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6527
6528 if (rx_ring->q_vector)
6529 ring_node = rx_ring->q_vector->numa_node;
6530
6531 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6532 if (!rx_ring->rx_buffer_info)
6533 rx_ring->rx_buffer_info = vmalloc(size);
6534 if (!rx_ring->rx_buffer_info)
6535 goto err;
6536
6537
6538 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6539 rx_ring->size = ALIGN(rx_ring->size, 4096);
6540
6541 set_dev_node(dev, ring_node);
6542 rx_ring->desc = dma_alloc_coherent(dev,
6543 rx_ring->size,
6544 &rx_ring->dma,
6545 GFP_KERNEL);
6546 set_dev_node(dev, orig_node);
6547 if (!rx_ring->desc)
6548 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6549 &rx_ring->dma, GFP_KERNEL);
6550 if (!rx_ring->desc)
6551 goto err;
6552
6553 rx_ring->next_to_clean = 0;
6554 rx_ring->next_to_use = 0;
6555
6556
6557 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6558 rx_ring->queue_index) < 0)
6559 goto err;
6560
6561 rx_ring->xdp_prog = adapter->xdp_prog;
6562
6563 return 0;
6564err:
6565 vfree(rx_ring->rx_buffer_info);
6566 rx_ring->rx_buffer_info = NULL;
6567 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6568 return -ENOMEM;
6569}
6570
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6582{
6583 int i, err = 0;
6584
6585 for (i = 0; i < adapter->num_rx_queues; i++) {
6586 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6587 if (!err)
6588 continue;
6589
6590 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6591 goto err_setup_rx;
6592 }
6593
6594#ifdef IXGBE_FCOE
6595 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6596 if (!err)
6597#endif
6598 return 0;
6599err_setup_rx:
6600
6601 while (i--)
6602 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6603 return err;
6604}
6605
6606
6607
6608
6609
6610
6611
6612void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6613{
6614 ixgbe_clean_tx_ring(tx_ring);
6615
6616 vfree(tx_ring->tx_buffer_info);
6617 tx_ring->tx_buffer_info = NULL;
6618
6619
6620 if (!tx_ring->desc)
6621 return;
6622
6623 dma_free_coherent(tx_ring->dev, tx_ring->size,
6624 tx_ring->desc, tx_ring->dma);
6625
6626 tx_ring->desc = NULL;
6627}
6628
6629
6630
6631
6632
6633
6634
6635static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6636{
6637 int i;
6638
6639 for (i = 0; i < adapter->num_tx_queues; i++)
6640 if (adapter->tx_ring[i]->desc)
6641 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6642 for (i = 0; i < adapter->num_xdp_queues; i++)
6643 if (adapter->xdp_ring[i]->desc)
6644 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6645}
6646
6647
6648
6649
6650
6651
6652
6653void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6654{
6655 ixgbe_clean_rx_ring(rx_ring);
6656
6657 rx_ring->xdp_prog = NULL;
6658 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6659 vfree(rx_ring->rx_buffer_info);
6660 rx_ring->rx_buffer_info = NULL;
6661
6662
6663 if (!rx_ring->desc)
6664 return;
6665
6666 dma_free_coherent(rx_ring->dev, rx_ring->size,
6667 rx_ring->desc, rx_ring->dma);
6668
6669 rx_ring->desc = NULL;
6670}
6671
6672
6673
6674
6675
6676
6677
6678static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6679{
6680 int i;
6681
6682#ifdef IXGBE_FCOE
6683 ixgbe_free_fcoe_ddp_resources(adapter);
6684
6685#endif
6686 for (i = 0; i < adapter->num_rx_queues; i++)
6687 if (adapter->rx_ring[i]->desc)
6688 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6689}
6690
6691
6692
6693
6694
6695
6696
6697
6698static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6699{
6700 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6701
6702 if (adapter->xdp_prog) {
6703 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6704 VLAN_HLEN;
6705 int i;
6706
6707 for (i = 0; i < adapter->num_rx_queues; i++) {
6708 struct ixgbe_ring *ring = adapter->rx_ring[i];
6709
6710 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6711 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6712 return -EINVAL;
6713 }
6714 }
6715 }
6716
6717
6718
6719
6720
6721
6722 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6723 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6724 (new_mtu > ETH_DATA_LEN))
6725 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6726
6727 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6728
6729
6730 netdev->mtu = new_mtu;
6731
6732 if (netif_running(netdev))
6733 ixgbe_reinit_locked(adapter);
6734
6735 return 0;
6736}
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750int ixgbe_open(struct net_device *netdev)
6751{
6752 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6753 struct ixgbe_hw *hw = &adapter->hw;
6754 int err, queues;
6755
6756
6757 if (test_bit(__IXGBE_TESTING, &adapter->state))
6758 return -EBUSY;
6759
6760 netif_carrier_off(netdev);
6761
6762
6763 err = ixgbe_setup_all_tx_resources(adapter);
6764 if (err)
6765 goto err_setup_tx;
6766
6767
6768 err = ixgbe_setup_all_rx_resources(adapter);
6769 if (err)
6770 goto err_setup_rx;
6771
6772 ixgbe_configure(adapter);
6773
6774 err = ixgbe_request_irq(adapter);
6775 if (err)
6776 goto err_req_irq;
6777
6778
6779 queues = adapter->num_tx_queues;
6780 err = netif_set_real_num_tx_queues(netdev, queues);
6781 if (err)
6782 goto err_set_queues;
6783
6784 queues = adapter->num_rx_queues;
6785 err = netif_set_real_num_rx_queues(netdev, queues);
6786 if (err)
6787 goto err_set_queues;
6788
6789 ixgbe_ptp_init(adapter);
6790
6791 ixgbe_up_complete(adapter);
6792
6793 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6794 udp_tunnel_get_rx_info(netdev);
6795
6796 return 0;
6797
6798err_set_queues:
6799 ixgbe_free_irq(adapter);
6800err_req_irq:
6801 ixgbe_free_all_rx_resources(adapter);
6802 if (hw->phy.ops.set_phy_power && !adapter->wol)
6803 hw->phy.ops.set_phy_power(&adapter->hw, false);
6804err_setup_rx:
6805 ixgbe_free_all_tx_resources(adapter);
6806err_setup_tx:
6807 ixgbe_reset(adapter);
6808
6809 return err;
6810}
6811
6812static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6813{
6814 ixgbe_ptp_suspend(adapter);
6815
6816 if (adapter->hw.phy.ops.enter_lplu) {
6817 adapter->hw.phy.reset_disable = true;
6818 ixgbe_down(adapter);
6819 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6820 adapter->hw.phy.reset_disable = false;
6821 } else {
6822 ixgbe_down(adapter);
6823 }
6824
6825 ixgbe_free_irq(adapter);
6826
6827 ixgbe_free_all_tx_resources(adapter);
6828 ixgbe_free_all_rx_resources(adapter);
6829}
6830
6831
6832
6833
6834
6835
6836
6837
6838
6839
6840
6841
6842int ixgbe_close(struct net_device *netdev)
6843{
6844 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6845
6846 ixgbe_ptp_stop(adapter);
6847
6848 if (netif_device_present(netdev))
6849 ixgbe_close_suspend(adapter);
6850
6851 ixgbe_fdir_filter_exit(adapter);
6852
6853 ixgbe_release_hw_control(adapter);
6854
6855 return 0;
6856}
6857
6858#ifdef CONFIG_PM
6859static int ixgbe_resume(struct pci_dev *pdev)
6860{
6861 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6862 struct net_device *netdev = adapter->netdev;
6863 u32 err;
6864
6865 adapter->hw.hw_addr = adapter->io_addr;
6866 pci_set_power_state(pdev, PCI_D0);
6867 pci_restore_state(pdev);
6868
6869
6870
6871
6872 pci_save_state(pdev);
6873
6874 err = pci_enable_device_mem(pdev);
6875 if (err) {
6876 e_dev_err("Cannot enable PCI device from suspend\n");
6877 return err;
6878 }
6879 smp_mb__before_atomic();
6880 clear_bit(__IXGBE_DISABLED, &adapter->state);
6881 pci_set_master(pdev);
6882
6883 pci_wake_from_d3(pdev, false);
6884
6885 ixgbe_reset(adapter);
6886
6887 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6888
6889 rtnl_lock();
6890 err = ixgbe_init_interrupt_scheme(adapter);
6891 if (!err && netif_running(netdev))
6892 err = ixgbe_open(netdev);
6893
6894
6895 if (!err)
6896 netif_device_attach(netdev);
6897 rtnl_unlock();
6898
6899 return err;
6900}
6901#endif
6902
6903static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6904{
6905 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6906 struct net_device *netdev = adapter->netdev;
6907 struct ixgbe_hw *hw = &adapter->hw;
6908 u32 ctrl;
6909 u32 wufc = adapter->wol;
6910#ifdef CONFIG_PM
6911 int retval = 0;
6912#endif
6913
6914 rtnl_lock();
6915 netif_device_detach(netdev);
6916
6917 if (netif_running(netdev))
6918 ixgbe_close_suspend(adapter);
6919
6920 ixgbe_clear_interrupt_scheme(adapter);
6921 rtnl_unlock();
6922
6923#ifdef CONFIG_PM
6924 retval = pci_save_state(pdev);
6925 if (retval)
6926 return retval;
6927
6928#endif
6929 if (hw->mac.ops.stop_link_on_d3)
6930 hw->mac.ops.stop_link_on_d3(hw);
6931
6932 if (wufc) {
6933 u32 fctrl;
6934
6935 ixgbe_set_rx_mode(netdev);
6936
6937
6938 if (hw->mac.ops.enable_tx_laser)
6939 hw->mac.ops.enable_tx_laser(hw);
6940
6941
6942 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6943 fctrl |= IXGBE_FCTRL_MPE;
6944 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6945
6946 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6947 ctrl |= IXGBE_CTRL_GIO_DIS;
6948 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6949
6950 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6951 } else {
6952 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6953 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6954 }
6955
6956 switch (hw->mac.type) {
6957 case ixgbe_mac_82598EB:
6958 pci_wake_from_d3(pdev, false);
6959 break;
6960 case ixgbe_mac_82599EB:
6961 case ixgbe_mac_X540:
6962 case ixgbe_mac_X550:
6963 case ixgbe_mac_X550EM_x:
6964 case ixgbe_mac_x550em_a:
6965 pci_wake_from_d3(pdev, !!wufc);
6966 break;
6967 default:
6968 break;
6969 }
6970
6971 *enable_wake = !!wufc;
6972 if (hw->phy.ops.set_phy_power && !*enable_wake)
6973 hw->phy.ops.set_phy_power(hw, false);
6974
6975 ixgbe_release_hw_control(adapter);
6976
6977 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6978 pci_disable_device(pdev);
6979
6980 return 0;
6981}
6982
6983#ifdef CONFIG_PM
6984static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6985{
6986 int retval;
6987 bool wake;
6988
6989 retval = __ixgbe_shutdown(pdev, &wake);
6990 if (retval)
6991 return retval;
6992
6993 if (wake) {
6994 pci_prepare_to_sleep(pdev);
6995 } else {
6996 pci_wake_from_d3(pdev, false);
6997 pci_set_power_state(pdev, PCI_D3hot);
6998 }
6999
7000 return 0;
7001}
7002#endif
7003
7004static void ixgbe_shutdown(struct pci_dev *pdev)
7005{
7006 bool wake;
7007
7008 __ixgbe_shutdown(pdev, &wake);
7009
7010 if (system_state == SYSTEM_POWER_OFF) {
7011 pci_wake_from_d3(pdev, wake);
7012 pci_set_power_state(pdev, PCI_D3hot);
7013 }
7014}
7015
7016
7017
7018
7019
7020void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7021{
7022 struct net_device *netdev = adapter->netdev;
7023 struct ixgbe_hw *hw = &adapter->hw;
7024 struct ixgbe_hw_stats *hwstats = &adapter->stats;
7025 u64 total_mpc = 0;
7026 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7027 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7028 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7029 u64 alloc_rx_page = 0;
7030 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7031
7032 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7033 test_bit(__IXGBE_RESETTING, &adapter->state))
7034 return;
7035
7036 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7037 u64 rsc_count = 0;
7038 u64 rsc_flush = 0;
7039 for (i = 0; i < adapter->num_rx_queues; i++) {
7040 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7041 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7042 }
7043 adapter->rsc_total_count = rsc_count;
7044 adapter->rsc_total_flush = rsc_flush;
7045 }
7046
7047 for (i = 0; i < adapter->num_rx_queues; i++) {
7048 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
7049 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7050 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7051 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7052 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7053 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7054 bytes += rx_ring->stats.bytes;
7055 packets += rx_ring->stats.packets;
7056 }
7057 adapter->non_eop_descs = non_eop_descs;
7058 adapter->alloc_rx_page = alloc_rx_page;
7059 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7060 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7061 adapter->hw_csum_rx_error = hw_csum_rx_error;
7062 netdev->stats.rx_bytes = bytes;
7063 netdev->stats.rx_packets = packets;
7064
7065 bytes = 0;
7066 packets = 0;
7067
7068 for (i = 0; i < adapter->num_tx_queues; i++) {
7069 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7070 restart_queue += tx_ring->tx_stats.restart_queue;
7071 tx_busy += tx_ring->tx_stats.tx_busy;
7072 bytes += tx_ring->stats.bytes;
7073 packets += tx_ring->stats.packets;
7074 }
7075 for (i = 0; i < adapter->num_xdp_queues; i++) {
7076 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
7077
7078 restart_queue += xdp_ring->tx_stats.restart_queue;
7079 tx_busy += xdp_ring->tx_stats.tx_busy;
7080 bytes += xdp_ring->stats.bytes;
7081 packets += xdp_ring->stats.packets;
7082 }
7083 adapter->restart_queue = restart_queue;
7084 adapter->tx_busy = tx_busy;
7085 netdev->stats.tx_bytes = bytes;
7086 netdev->stats.tx_packets = packets;
7087
7088 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7089
7090
7091 for (i = 0; i < 8; i++) {
7092
7093 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7094 missed_rx += mpc;
7095 hwstats->mpc[i] += mpc;
7096 total_mpc += hwstats->mpc[i];
7097 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7098 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7099 switch (hw->mac.type) {
7100 case ixgbe_mac_82598EB:
7101 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7102 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7103 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7104 hwstats->pxonrxc[i] +=
7105 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7106 break;
7107 case ixgbe_mac_82599EB:
7108 case ixgbe_mac_X540:
7109 case ixgbe_mac_X550:
7110 case ixgbe_mac_X550EM_x:
7111 case ixgbe_mac_x550em_a:
7112 hwstats->pxonrxc[i] +=
7113 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7114 break;
7115 default:
7116 break;
7117 }
7118 }
7119
7120
7121 for (i = 0; i < 16; i++) {
7122 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7123 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7124 if ((hw->mac.type == ixgbe_mac_82599EB) ||
7125 (hw->mac.type == ixgbe_mac_X540) ||
7126 (hw->mac.type == ixgbe_mac_X550) ||
7127 (hw->mac.type == ixgbe_mac_X550EM_x) ||
7128 (hw->mac.type == ixgbe_mac_x550em_a)) {
7129 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7130 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
7131 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7132 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
7133 }
7134 }
7135
7136 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7137
7138 hwstats->gprc -= missed_rx;
7139
7140 ixgbe_update_xoff_received(adapter);
7141
7142
7143 switch (hw->mac.type) {
7144 case ixgbe_mac_82598EB:
7145 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7146 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7147 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7148 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7149 break;
7150 case ixgbe_mac_X540:
7151 case ixgbe_mac_X550:
7152 case ixgbe_mac_X550EM_x:
7153 case ixgbe_mac_x550em_a:
7154
7155 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7156 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7157 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7158 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7159
7160 case ixgbe_mac_82599EB:
7161 for (i = 0; i < 16; i++)
7162 adapter->hw_rx_no_dma_resources +=
7163 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7164 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7165 IXGBE_READ_REG(hw, IXGBE_GORCH);
7166 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7167 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7168 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7169 IXGBE_READ_REG(hw, IXGBE_TORH);
7170 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7171 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7172 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7173#ifdef IXGBE_FCOE
7174 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7175 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7176 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7177 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7178 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7179 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7180
7181 if (adapter->fcoe.ddp_pool) {
7182 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7183 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7184 unsigned int cpu;
7185 u64 noddp = 0, noddp_ext_buff = 0;
7186 for_each_possible_cpu(cpu) {
7187 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7188 noddp += ddp_pool->noddp;
7189 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7190 }
7191 hwstats->fcoe_noddp = noddp;
7192 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7193 }
7194#endif
7195 break;
7196 default:
7197 break;
7198 }
7199 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7200 hwstats->bprc += bprc;
7201 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7202 if (hw->mac.type == ixgbe_mac_82598EB)
7203 hwstats->mprc -= bprc;
7204 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7205 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7206 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7207 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7208 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7209 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7210 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7211 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7212 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7213 hwstats->lxontxc += lxon;
7214 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7215 hwstats->lxofftxc += lxoff;
7216 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7217 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7218
7219
7220
7221 xon_off_tot = lxon + lxoff;
7222 hwstats->gptc -= xon_off_tot;
7223 hwstats->mptc -= xon_off_tot;
7224 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7225 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7226 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7227 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7228 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7229 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7230 hwstats->ptc64 -= xon_off_tot;
7231 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7232 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7233 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7234 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7235 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7236 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7237
7238
7239 netdev->stats.multicast = hwstats->mprc;
7240
7241
7242 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7243 netdev->stats.rx_dropped = 0;
7244 netdev->stats.rx_length_errors = hwstats->rlec;
7245 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7246 netdev->stats.rx_missed_errors = total_mpc;
7247}
7248
7249
7250
7251
7252
7253static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7254{
7255 struct ixgbe_hw *hw = &adapter->hw;
7256 int i;
7257
7258 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7259 return;
7260
7261 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7262
7263
7264 if (test_bit(__IXGBE_DOWN, &adapter->state))
7265 return;
7266
7267
7268 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7269 return;
7270
7271 adapter->fdir_overflow++;
7272
7273 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7274 for (i = 0; i < adapter->num_tx_queues; i++)
7275 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7276 &(adapter->tx_ring[i]->state));
7277 for (i = 0; i < adapter->num_xdp_queues; i++)
7278 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7279 &adapter->xdp_ring[i]->state);
7280
7281 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7282 } else {
7283 e_err(probe, "failed to finish FDIR re-initialization, "
7284 "ignored adding FDIR ATR filters\n");
7285 }
7286}
7287
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7298{
7299 struct ixgbe_hw *hw = &adapter->hw;
7300 u64 eics = 0;
7301 int i;
7302
7303
7304 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7305 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7306 test_bit(__IXGBE_RESETTING, &adapter->state))
7307 return;
7308
7309
7310 if (netif_carrier_ok(adapter->netdev)) {
7311 for (i = 0; i < adapter->num_tx_queues; i++)
7312 set_check_for_tx_hang(adapter->tx_ring[i]);
7313 for (i = 0; i < adapter->num_xdp_queues; i++)
7314 set_check_for_tx_hang(adapter->xdp_ring[i]);
7315 }
7316
7317 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7318
7319
7320
7321
7322
7323 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7324 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7325 } else {
7326
7327 for (i = 0; i < adapter->num_q_vectors; i++) {
7328 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7329 if (qv->rx.ring || qv->tx.ring)
7330 eics |= BIT_ULL(i);
7331 }
7332 }
7333
7334
7335 ixgbe_irq_rearm_queues(adapter, eics);
7336}
7337
7338
7339
7340
7341
7342static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7343{
7344 struct ixgbe_hw *hw = &adapter->hw;
7345 u32 link_speed = adapter->link_speed;
7346 bool link_up = adapter->link_up;
7347 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7348
7349 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7350 return;
7351
7352 if (hw->mac.ops.check_link) {
7353 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7354 } else {
7355
7356 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7357 link_up = true;
7358 }
7359
7360 if (adapter->ixgbe_ieee_pfc)
7361 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7362
7363 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7364 hw->mac.ops.fc_enable(hw);
7365 ixgbe_set_rx_drop_en(adapter);
7366 }
7367
7368 if (link_up ||
7369 time_after(jiffies, (adapter->link_check_timeout +
7370 IXGBE_TRY_LINK_TIMEOUT))) {
7371 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7372 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7373 IXGBE_WRITE_FLUSH(hw);
7374 }
7375
7376 adapter->link_up = link_up;
7377 adapter->link_speed = link_speed;
7378}
7379
7380static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7381{
7382#ifdef CONFIG_IXGBE_DCB
7383 struct net_device *netdev = adapter->netdev;
7384 struct dcb_app app = {
7385 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7386 .protocol = 0,
7387 };
7388 u8 up = 0;
7389
7390 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7391 up = dcb_ieee_getapp_mask(netdev, &app);
7392
7393 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7394#endif
7395}
7396
7397
7398
7399
7400
7401
7402static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7403{
7404 struct net_device *netdev = adapter->netdev;
7405 struct ixgbe_hw *hw = &adapter->hw;
7406 u32 link_speed = adapter->link_speed;
7407 const char *speed_str;
7408 bool flow_rx, flow_tx;
7409
7410
7411 if (netif_carrier_ok(netdev))
7412 return;
7413
7414 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7415
7416 switch (hw->mac.type) {
7417 case ixgbe_mac_82598EB: {
7418 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7419 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7420 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7421 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7422 }
7423 break;
7424 case ixgbe_mac_X540:
7425 case ixgbe_mac_X550:
7426 case ixgbe_mac_X550EM_x:
7427 case ixgbe_mac_x550em_a:
7428 case ixgbe_mac_82599EB: {
7429 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7430 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7431 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7432 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7433 }
7434 break;
7435 default:
7436 flow_tx = false;
7437 flow_rx = false;
7438 break;
7439 }
7440
7441 adapter->last_rx_ptp_check = jiffies;
7442
7443 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7444 ixgbe_ptp_start_cyclecounter(adapter);
7445
7446 switch (link_speed) {
7447 case IXGBE_LINK_SPEED_10GB_FULL:
7448 speed_str = "10 Gbps";
7449 break;
7450 case IXGBE_LINK_SPEED_5GB_FULL:
7451 speed_str = "5 Gbps";
7452 break;
7453 case IXGBE_LINK_SPEED_2_5GB_FULL:
7454 speed_str = "2.5 Gbps";
7455 break;
7456 case IXGBE_LINK_SPEED_1GB_FULL:
7457 speed_str = "1 Gbps";
7458 break;
7459 case IXGBE_LINK_SPEED_100_FULL:
7460 speed_str = "100 Mbps";
7461 break;
7462 case IXGBE_LINK_SPEED_10_FULL:
7463 speed_str = "10 Mbps";
7464 break;
7465 default:
7466 speed_str = "unknown speed";
7467 break;
7468 }
7469 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7470 ((flow_rx && flow_tx) ? "RX/TX" :
7471 (flow_rx ? "RX" :
7472 (flow_tx ? "TX" : "None"))));
7473
7474 netif_carrier_on(netdev);
7475 ixgbe_check_vf_rate_limit(adapter);
7476
7477
7478 netif_tx_wake_all_queues(adapter->netdev);
7479
7480
7481 ixgbe_update_default_up(adapter);
7482
7483
7484 ixgbe_ping_all_vfs(adapter);
7485}
7486
7487
7488
7489
7490
7491
7492static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7493{
7494 struct net_device *netdev = adapter->netdev;
7495 struct ixgbe_hw *hw = &adapter->hw;
7496
7497 adapter->link_up = false;
7498 adapter->link_speed = 0;
7499
7500
7501 if (!netif_carrier_ok(netdev))
7502 return;
7503
7504
7505 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7506 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7507
7508 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7509 ixgbe_ptp_start_cyclecounter(adapter);
7510
7511 e_info(drv, "NIC Link is Down\n");
7512 netif_carrier_off(netdev);
7513
7514
7515 ixgbe_ping_all_vfs(adapter);
7516}
7517
7518static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7519{
7520 int i;
7521
7522 for (i = 0; i < adapter->num_tx_queues; i++) {
7523 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7524
7525 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7526 return true;
7527 }
7528
7529 for (i = 0; i < adapter->num_xdp_queues; i++) {
7530 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7531
7532 if (ring->next_to_use != ring->next_to_clean)
7533 return true;
7534 }
7535
7536 return false;
7537}
7538
7539static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7540{
7541 struct ixgbe_hw *hw = &adapter->hw;
7542 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7543 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7544
7545 int i, j;
7546
7547 if (!adapter->num_vfs)
7548 return false;
7549
7550
7551 if (hw->mac.type >= ixgbe_mac_X550)
7552 return false;
7553
7554 for (i = 0; i < adapter->num_vfs; i++) {
7555 for (j = 0; j < q_per_pool; j++) {
7556 u32 h, t;
7557
7558 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7559 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7560
7561 if (h != t)
7562 return true;
7563 }
7564 }
7565
7566 return false;
7567}
7568
7569
7570
7571
7572
7573static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7574{
7575 if (!netif_carrier_ok(adapter->netdev)) {
7576 if (ixgbe_ring_tx_pending(adapter) ||
7577 ixgbe_vf_tx_pending(adapter)) {
7578
7579
7580
7581
7582
7583 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7584 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7585 }
7586 }
7587}
7588
7589#ifdef CONFIG_PCI_IOV
7590static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7591{
7592 struct ixgbe_hw *hw = &adapter->hw;
7593 struct pci_dev *pdev = adapter->pdev;
7594 unsigned int vf;
7595 u32 gpc;
7596
7597 if (!(netif_carrier_ok(adapter->netdev)))
7598 return;
7599
7600 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7601 if (gpc)
7602 return;
7603
7604
7605
7606
7607
7608
7609 if (!pdev)
7610 return;
7611
7612
7613 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7614 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7615 u16 status_reg;
7616
7617 if (!vfdev)
7618 continue;
7619 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7620 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7621 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7622 pcie_flr(vfdev);
7623 }
7624}
7625
7626static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7627{
7628 u32 ssvpc;
7629
7630
7631 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7632 adapter->num_vfs == 0)
7633 return;
7634
7635 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7636
7637
7638
7639
7640
7641 if (!ssvpc)
7642 return;
7643
7644 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7645}
7646#else
7647static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7648{
7649}
7650
7651static void
7652ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7653{
7654}
7655#endif
7656
7657
7658
7659
7660
7661
7662static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7663{
7664
7665 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7666 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7667 test_bit(__IXGBE_RESETTING, &adapter->state))
7668 return;
7669
7670 ixgbe_watchdog_update_link(adapter);
7671
7672 if (adapter->link_up)
7673 ixgbe_watchdog_link_is_up(adapter);
7674 else
7675 ixgbe_watchdog_link_is_down(adapter);
7676
7677 ixgbe_check_for_bad_vf(adapter);
7678 ixgbe_spoof_check(adapter);
7679 ixgbe_update_stats(adapter);
7680
7681 ixgbe_watchdog_flush_tx(adapter);
7682}
7683
7684
7685
7686
7687
7688static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7689{
7690 struct ixgbe_hw *hw = &adapter->hw;
7691 s32 err;
7692
7693
7694 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7695 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7696 return;
7697
7698 if (adapter->sfp_poll_time &&
7699 time_after(adapter->sfp_poll_time, jiffies))
7700 return;
7701
7702
7703 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7704 return;
7705
7706 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7707
7708 err = hw->phy.ops.identify_sfp(hw);
7709 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7710 goto sfp_out;
7711
7712 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7713
7714
7715 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7716 }
7717
7718
7719 if (err)
7720 goto sfp_out;
7721
7722
7723 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7724 goto sfp_out;
7725
7726 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7727
7728
7729
7730
7731
7732
7733 if (hw->mac.type == ixgbe_mac_82598EB)
7734 err = hw->phy.ops.reset(hw);
7735 else
7736 err = hw->mac.ops.setup_sfp(hw);
7737
7738 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7739 goto sfp_out;
7740
7741 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7742 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7743
7744sfp_out:
7745 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7746
7747 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7748 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7749 e_dev_err("failed to initialize because an unsupported "
7750 "SFP+ module type was detected.\n");
7751 e_dev_err("Reload the driver after installing a "
7752 "supported module.\n");
7753 unregister_netdev(adapter->netdev);
7754 }
7755}
7756
7757
7758
7759
7760
7761static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7762{
7763 struct ixgbe_hw *hw = &adapter->hw;
7764 u32 cap_speed;
7765 u32 speed;
7766 bool autoneg = false;
7767
7768 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7769 return;
7770
7771
7772 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7773 return;
7774
7775 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7776
7777 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7778
7779
7780 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7781 speed = IXGBE_LINK_SPEED_10GB_FULL;
7782 else
7783 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7784 IXGBE_LINK_SPEED_1GB_FULL);
7785
7786 if (hw->mac.ops.setup_link)
7787 hw->mac.ops.setup_link(hw, speed, true);
7788
7789 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7790 adapter->link_check_timeout = jiffies;
7791 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7792}
7793
7794
7795
7796
7797
7798static void ixgbe_service_timer(struct timer_list *t)
7799{
7800 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7801 unsigned long next_event_offset;
7802
7803
7804 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7805 next_event_offset = HZ / 10;
7806 else
7807 next_event_offset = HZ * 2;
7808
7809
7810 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7811
7812 ixgbe_service_event_schedule(adapter);
7813}
7814
7815static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7816{
7817 struct ixgbe_hw *hw = &adapter->hw;
7818 u32 status;
7819
7820 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7821 return;
7822
7823 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7824
7825 if (!hw->phy.ops.handle_lasi)
7826 return;
7827
7828 status = hw->phy.ops.handle_lasi(&adapter->hw);
7829 if (status != IXGBE_ERR_OVERTEMP)
7830 return;
7831
7832 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7833}
7834
7835static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7836{
7837 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7838 return;
7839
7840 rtnl_lock();
7841
7842 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7843 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7844 test_bit(__IXGBE_RESETTING, &adapter->state)) {
7845 rtnl_unlock();
7846 return;
7847 }
7848
7849 ixgbe_dump(adapter);
7850 netdev_err(adapter->netdev, "Reset adapter\n");
7851 adapter->tx_timeout_count++;
7852
7853 ixgbe_reinit_locked(adapter);
7854 rtnl_unlock();
7855}
7856
7857
7858
7859
7860
7861
7862
7863static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7864{
7865 struct ixgbe_hw *hw = &adapter->hw;
7866 u32 fwsm;
7867
7868
7869 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7870
7871 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7872 !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7873 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7874 fwsm);
7875
7876 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7877 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7878 return true;
7879 }
7880
7881 return false;
7882}
7883
7884
7885
7886
7887
7888static void ixgbe_service_task(struct work_struct *work)
7889{
7890 struct ixgbe_adapter *adapter = container_of(work,
7891 struct ixgbe_adapter,
7892 service_task);
7893 if (ixgbe_removed(adapter->hw.hw_addr)) {
7894 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7895 rtnl_lock();
7896 ixgbe_down(adapter);
7897 rtnl_unlock();
7898 }
7899 ixgbe_service_event_complete(adapter);
7900 return;
7901 }
7902 if (ixgbe_check_fw_error(adapter)) {
7903 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7904 unregister_netdev(adapter->netdev);
7905 ixgbe_service_event_complete(adapter);
7906 return;
7907 }
7908 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7909 rtnl_lock();
7910 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7911 udp_tunnel_get_rx_info(adapter->netdev);
7912 rtnl_unlock();
7913 }
7914 ixgbe_reset_subtask(adapter);
7915 ixgbe_phy_interrupt_subtask(adapter);
7916 ixgbe_sfp_detection_subtask(adapter);
7917 ixgbe_sfp_link_config_subtask(adapter);
7918 ixgbe_check_overtemp_subtask(adapter);
7919 ixgbe_watchdog_subtask(adapter);
7920 ixgbe_fdir_reinit_subtask(adapter);
7921 ixgbe_check_hang_subtask(adapter);
7922
7923 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7924 ixgbe_ptp_overflow_check(adapter);
7925 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7926 ixgbe_ptp_rx_hang(adapter);
7927 ixgbe_ptp_tx_hang(adapter);
7928 }
7929
7930 ixgbe_service_event_complete(adapter);
7931}
7932
7933static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7934 struct ixgbe_tx_buffer *first,
7935 u8 *hdr_len,
7936 struct ixgbe_ipsec_tx_data *itd)
7937{
7938 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7939 struct sk_buff *skb = first->skb;
7940 union {
7941 struct iphdr *v4;
7942 struct ipv6hdr *v6;
7943 unsigned char *hdr;
7944 } ip;
7945 union {
7946 struct tcphdr *tcp;
7947 unsigned char *hdr;
7948 } l4;
7949 u32 paylen, l4_offset;
7950 u32 fceof_saidx = 0;
7951 int err;
7952
7953 if (skb->ip_summed != CHECKSUM_PARTIAL)
7954 return 0;
7955
7956 if (!skb_is_gso(skb))
7957 return 0;
7958
7959 err = skb_cow_head(skb, 0);
7960 if (err < 0)
7961 return err;
7962
7963 if (eth_p_mpls(first->protocol))
7964 ip.hdr = skb_inner_network_header(skb);
7965 else
7966 ip.hdr = skb_network_header(skb);
7967 l4.hdr = skb_checksum_start(skb);
7968
7969
7970 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7971
7972
7973 if (ip.v4->version == 4) {
7974 unsigned char *csum_start = skb_checksum_start(skb);
7975 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7976 int len = csum_start - trans_start;
7977
7978
7979
7980
7981
7982 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
7983 csum_fold(csum_partial(trans_start,
7984 len, 0)) : 0;
7985 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7986
7987 ip.v4->tot_len = 0;
7988 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7989 IXGBE_TX_FLAGS_CSUM |
7990 IXGBE_TX_FLAGS_IPV4;
7991 } else {
7992 ip.v6->payload_len = 0;
7993 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7994 IXGBE_TX_FLAGS_CSUM;
7995 }
7996
7997
7998 l4_offset = l4.hdr - skb->data;
7999
8000
8001 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8002
8003
8004 paylen = skb->len - l4_offset;
8005 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
8006
8007
8008 first->gso_segs = skb_shinfo(skb)->gso_segs;
8009 first->bytecount += (first->gso_segs - 1) * *hdr_len;
8010
8011
8012 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8013 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8014
8015 fceof_saidx |= itd->sa_idx;
8016 type_tucmd |= itd->flags | itd->trailer_len;
8017
8018
8019 vlan_macip_lens = l4.hdr - ip.hdr;
8020 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8021 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8022
8023 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8024 mss_l4len_idx);
8025
8026 return 1;
8027}
8028
8029static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
8030{
8031 unsigned int offset = 0;
8032
8033 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
8034
8035 return offset == skb_checksum_start_offset(skb);
8036}
8037
8038static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8039 struct ixgbe_tx_buffer *first,
8040 struct ixgbe_ipsec_tx_data *itd)
8041{
8042 struct sk_buff *skb = first->skb;
8043 u32 vlan_macip_lens = 0;
8044 u32 fceof_saidx = 0;
8045 u32 type_tucmd = 0;
8046
8047 if (skb->ip_summed != CHECKSUM_PARTIAL) {
8048csum_failed:
8049 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8050 IXGBE_TX_FLAGS_CC)))
8051 return;
8052 goto no_csum;
8053 }
8054
8055 switch (skb->csum_offset) {
8056 case offsetof(struct tcphdr, check):
8057 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8058
8059 case offsetof(struct udphdr, check):
8060 break;
8061 case offsetof(struct sctphdr, checksum):
8062
8063 if (((first->protocol == htons(ETH_P_IP)) &&
8064 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
8065 ((first->protocol == htons(ETH_P_IPV6)) &&
8066 ixgbe_ipv6_csum_is_sctp(skb))) {
8067 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8068 break;
8069 }
8070
8071 default:
8072 skb_checksum_help(skb);
8073 goto csum_failed;
8074 }
8075
8076
8077 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8078 vlan_macip_lens = skb_checksum_start_offset(skb) -
8079 skb_network_offset(skb);
8080no_csum:
8081
8082 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8083 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8084
8085 fceof_saidx |= itd->sa_idx;
8086 type_tucmd |= itd->flags | itd->trailer_len;
8087
8088 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8089}
8090
8091#define IXGBE_SET_FLAG(_input, _flag, _result) \
8092 ((_flag <= _result) ? \
8093 ((u32)(_input & _flag) * (_result / _flag)) : \
8094 ((u32)(_input & _flag) / (_flag / _result)))
8095
8096static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8097{
8098
8099 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8100 IXGBE_ADVTXD_DCMD_DEXT |
8101 IXGBE_ADVTXD_DCMD_IFCS;
8102
8103
8104 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8105 IXGBE_ADVTXD_DCMD_VLE);
8106
8107
8108 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8109 IXGBE_ADVTXD_DCMD_TSE);
8110
8111
8112 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8113 IXGBE_ADVTXD_MAC_TSTAMP);
8114
8115
8116 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8117
8118 return cmd_type;
8119}
8120
8121static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8122 u32 tx_flags, unsigned int paylen)
8123{
8124 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8125
8126
8127 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8128 IXGBE_TX_FLAGS_CSUM,
8129 IXGBE_ADVTXD_POPTS_TXSM);
8130
8131
8132 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8133 IXGBE_TX_FLAGS_IPV4,
8134 IXGBE_ADVTXD_POPTS_IXSM);
8135
8136
8137 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8138 IXGBE_TX_FLAGS_IPSEC,
8139 IXGBE_ADVTXD_POPTS_IPSEC);
8140
8141
8142
8143
8144
8145 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8146 IXGBE_TX_FLAGS_CC,
8147 IXGBE_ADVTXD_CC);
8148
8149 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8150}
8151
8152static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8153{
8154 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8155
8156
8157
8158
8159
8160 smp_mb();
8161
8162
8163
8164
8165 if (likely(ixgbe_desc_unused(tx_ring) < size))
8166 return -EBUSY;
8167
8168
8169 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8170 ++tx_ring->tx_stats.restart_queue;
8171 return 0;
8172}
8173
8174static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8175{
8176 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8177 return 0;
8178
8179 return __ixgbe_maybe_stop_tx(tx_ring, size);
8180}
8181
8182static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8183 struct ixgbe_tx_buffer *first,
8184 const u8 hdr_len)
8185{
8186 struct sk_buff *skb = first->skb;
8187 struct ixgbe_tx_buffer *tx_buffer;
8188 union ixgbe_adv_tx_desc *tx_desc;
8189 struct skb_frag_struct *frag;
8190 dma_addr_t dma;
8191 unsigned int data_len, size;
8192 u32 tx_flags = first->tx_flags;
8193 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8194 u16 i = tx_ring->next_to_use;
8195
8196 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8197
8198 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8199
8200 size = skb_headlen(skb);
8201 data_len = skb->data_len;
8202
8203#ifdef IXGBE_FCOE
8204 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8205 if (data_len < sizeof(struct fcoe_crc_eof)) {
8206 size -= sizeof(struct fcoe_crc_eof) - data_len;
8207 data_len = 0;
8208 } else {
8209 data_len -= sizeof(struct fcoe_crc_eof);
8210 }
8211 }
8212
8213#endif
8214 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8215
8216 tx_buffer = first;
8217
8218 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8219 if (dma_mapping_error(tx_ring->dev, dma))
8220 goto dma_error;
8221
8222
8223 dma_unmap_len_set(tx_buffer, len, size);
8224 dma_unmap_addr_set(tx_buffer, dma, dma);
8225
8226 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8227
8228 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8229 tx_desc->read.cmd_type_len =
8230 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8231
8232 i++;
8233 tx_desc++;
8234 if (i == tx_ring->count) {
8235 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8236 i = 0;
8237 }
8238 tx_desc->read.olinfo_status = 0;
8239
8240 dma += IXGBE_MAX_DATA_PER_TXD;
8241 size -= IXGBE_MAX_DATA_PER_TXD;
8242
8243 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8244 }
8245
8246 if (likely(!data_len))
8247 break;
8248
8249 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8250
8251 i++;
8252 tx_desc++;
8253 if (i == tx_ring->count) {
8254 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8255 i = 0;
8256 }
8257 tx_desc->read.olinfo_status = 0;
8258
8259#ifdef IXGBE_FCOE
8260 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8261#else
8262 size = skb_frag_size(frag);
8263#endif
8264 data_len -= size;
8265
8266 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8267 DMA_TO_DEVICE);
8268
8269 tx_buffer = &tx_ring->tx_buffer_info[i];
8270 }
8271
8272
8273 cmd_type |= size | IXGBE_TXD_CMD;
8274 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8275
8276 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8277
8278
8279 first->time_stamp = jiffies;
8280
8281 skb_tx_timestamp(skb);
8282
8283
8284
8285
8286
8287
8288
8289
8290
8291 wmb();
8292
8293
8294 first->next_to_watch = tx_desc;
8295
8296 i++;
8297 if (i == tx_ring->count)
8298 i = 0;
8299
8300 tx_ring->next_to_use = i;
8301
8302 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8303
8304 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8305 writel(i, tx_ring->tail);
8306 }
8307
8308 return 0;
8309dma_error:
8310 dev_err(tx_ring->dev, "TX DMA map failed\n");
8311
8312
8313 for (;;) {
8314 tx_buffer = &tx_ring->tx_buffer_info[i];
8315 if (dma_unmap_len(tx_buffer, len))
8316 dma_unmap_page(tx_ring->dev,
8317 dma_unmap_addr(tx_buffer, dma),
8318 dma_unmap_len(tx_buffer, len),
8319 DMA_TO_DEVICE);
8320 dma_unmap_len_set(tx_buffer, len, 0);
8321 if (tx_buffer == first)
8322 break;
8323 if (i == 0)
8324 i += tx_ring->count;
8325 i--;
8326 }
8327
8328 dev_kfree_skb_any(first->skb);
8329 first->skb = NULL;
8330
8331 tx_ring->next_to_use = i;
8332
8333 return -1;
8334}
8335
8336static void ixgbe_atr(struct ixgbe_ring *ring,
8337 struct ixgbe_tx_buffer *first)
8338{
8339 struct ixgbe_q_vector *q_vector = ring->q_vector;
8340 union ixgbe_atr_hash_dword input = { .dword = 0 };
8341 union ixgbe_atr_hash_dword common = { .dword = 0 };
8342 union {
8343 unsigned char *network;
8344 struct iphdr *ipv4;
8345 struct ipv6hdr *ipv6;
8346 } hdr;
8347 struct tcphdr *th;
8348 unsigned int hlen;
8349 struct sk_buff *skb;
8350 __be16 vlan_id;
8351 int l4_proto;
8352
8353
8354 if (!q_vector)
8355 return;
8356
8357
8358 if (!ring->atr_sample_rate)
8359 return;
8360
8361 ring->atr_count++;
8362
8363
8364 if ((first->protocol != htons(ETH_P_IP)) &&
8365 (first->protocol != htons(ETH_P_IPV6)))
8366 return;
8367
8368
8369 skb = first->skb;
8370 hdr.network = skb_network_header(skb);
8371 if (unlikely(hdr.network <= skb->data))
8372 return;
8373 if (skb->encapsulation &&
8374 first->protocol == htons(ETH_P_IP) &&
8375 hdr.ipv4->protocol == IPPROTO_UDP) {
8376 struct ixgbe_adapter *adapter = q_vector->adapter;
8377
8378 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8379 VXLAN_HEADROOM))
8380 return;
8381
8382
8383 if (adapter->vxlan_port &&
8384 udp_hdr(skb)->dest == adapter->vxlan_port)
8385 hdr.network = skb_inner_network_header(skb);
8386
8387 if (adapter->geneve_port &&
8388 udp_hdr(skb)->dest == adapter->geneve_port)
8389 hdr.network = skb_inner_network_header(skb);
8390 }
8391
8392
8393
8394
8395 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8396 return;
8397
8398
8399 switch (hdr.ipv4->version) {
8400 case IPVERSION:
8401
8402 hlen = (hdr.network[0] & 0x0F) << 2;
8403 l4_proto = hdr.ipv4->protocol;
8404 break;
8405 case 6:
8406 hlen = hdr.network - skb->data;
8407 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8408 hlen -= hdr.network - skb->data;
8409 break;
8410 default:
8411 return;
8412 }
8413
8414 if (l4_proto != IPPROTO_TCP)
8415 return;
8416
8417 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8418 hlen + sizeof(struct tcphdr)))
8419 return;
8420
8421 th = (struct tcphdr *)(hdr.network + hlen);
8422
8423
8424 if (th->fin)
8425 return;
8426
8427
8428 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8429 return;
8430
8431
8432 ring->atr_count = 0;
8433
8434 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8435
8436
8437
8438
8439
8440
8441
8442
8443 input.formatted.vlan_id = vlan_id;
8444
8445
8446
8447
8448
8449 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8450 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8451 else
8452 common.port.src ^= th->dest ^ first->protocol;
8453 common.port.dst ^= th->source;
8454
8455 switch (hdr.ipv4->version) {
8456 case IPVERSION:
8457 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8458 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8459 break;
8460 case 6:
8461 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8462 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8463 hdr.ipv6->saddr.s6_addr32[1] ^
8464 hdr.ipv6->saddr.s6_addr32[2] ^
8465 hdr.ipv6->saddr.s6_addr32[3] ^
8466 hdr.ipv6->daddr.s6_addr32[0] ^
8467 hdr.ipv6->daddr.s6_addr32[1] ^
8468 hdr.ipv6->daddr.s6_addr32[2] ^
8469 hdr.ipv6->daddr.s6_addr32[3];
8470 break;
8471 default:
8472 break;
8473 }
8474
8475 if (hdr.network != skb_network_header(skb))
8476 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8477
8478
8479 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8480 input, common, ring->queue_index);
8481}
8482
8483#ifdef IXGBE_FCOE
8484static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8485 struct net_device *sb_dev)
8486{
8487 struct ixgbe_adapter *adapter;
8488 struct ixgbe_ring_feature *f;
8489 int txq;
8490
8491 if (sb_dev) {
8492 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8493 struct net_device *vdev = sb_dev;
8494
8495 txq = vdev->tc_to_txq[tc].offset;
8496 txq += reciprocal_scale(skb_get_hash(skb),
8497 vdev->tc_to_txq[tc].count);
8498
8499 return txq;
8500 }
8501
8502
8503
8504
8505
8506 switch (vlan_get_protocol(skb)) {
8507 case htons(ETH_P_FCOE):
8508 case htons(ETH_P_FIP):
8509 adapter = netdev_priv(dev);
8510
8511 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8512 break;
8513
8514 default:
8515 return netdev_pick_tx(dev, skb, sb_dev);
8516 }
8517
8518 f = &adapter->ring_feature[RING_F_FCOE];
8519
8520 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8521 smp_processor_id();
8522
8523 while (txq >= f->indices)
8524 txq -= f->indices;
8525
8526 return txq + f->offset;
8527}
8528
8529#endif
8530int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8531 struct xdp_frame *xdpf)
8532{
8533 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8534 struct ixgbe_tx_buffer *tx_buffer;
8535 union ixgbe_adv_tx_desc *tx_desc;
8536 u32 len, cmd_type;
8537 dma_addr_t dma;
8538 u16 i;
8539
8540 len = xdpf->len;
8541
8542 if (unlikely(!ixgbe_desc_unused(ring)))
8543 return IXGBE_XDP_CONSUMED;
8544
8545 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
8546 if (dma_mapping_error(ring->dev, dma))
8547 return IXGBE_XDP_CONSUMED;
8548
8549
8550 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8551 tx_buffer->bytecount = len;
8552 tx_buffer->gso_segs = 1;
8553 tx_buffer->protocol = 0;
8554
8555 i = ring->next_to_use;
8556 tx_desc = IXGBE_TX_DESC(ring, i);
8557
8558 dma_unmap_len_set(tx_buffer, len, len);
8559 dma_unmap_addr_set(tx_buffer, dma, dma);
8560 tx_buffer->xdpf = xdpf;
8561
8562 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8563
8564
8565 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8566 IXGBE_ADVTXD_DCMD_DEXT |
8567 IXGBE_ADVTXD_DCMD_IFCS;
8568 cmd_type |= len | IXGBE_TXD_CMD;
8569 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8570 tx_desc->read.olinfo_status =
8571 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8572
8573
8574 smp_wmb();
8575
8576
8577 i++;
8578 if (i == ring->count)
8579 i = 0;
8580
8581 tx_buffer->next_to_watch = tx_desc;
8582 ring->next_to_use = i;
8583
8584 return IXGBE_XDP_TX;
8585}
8586
8587netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8588 struct ixgbe_adapter *adapter,
8589 struct ixgbe_ring *tx_ring)
8590{
8591 struct ixgbe_tx_buffer *first;
8592 int tso;
8593 u32 tx_flags = 0;
8594 unsigned short f;
8595 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8596 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8597 __be16 protocol = skb->protocol;
8598 u8 hdr_len = 0;
8599
8600
8601
8602
8603
8604
8605
8606
8607 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8608 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
8609
8610 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8611 tx_ring->tx_stats.tx_busy++;
8612 return NETDEV_TX_BUSY;
8613 }
8614
8615
8616 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8617 first->skb = skb;
8618 first->bytecount = skb->len;
8619 first->gso_segs = 1;
8620
8621
8622 if (skb_vlan_tag_present(skb)) {
8623 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8624 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8625
8626 } else if (protocol == htons(ETH_P_8021Q)) {
8627 struct vlan_hdr *vhdr, _vhdr;
8628 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8629 if (!vhdr)
8630 goto out_drop;
8631
8632 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8633 IXGBE_TX_FLAGS_VLAN_SHIFT;
8634 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8635 }
8636 protocol = vlan_get_protocol(skb);
8637
8638 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8639 adapter->ptp_clock) {
8640 if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8641 &adapter->state)) {
8642 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8643 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8644
8645
8646 adapter->ptp_tx_skb = skb_get(skb);
8647 adapter->ptp_tx_start = jiffies;
8648 schedule_work(&adapter->ptp_tx_work);
8649 } else {
8650 adapter->tx_hwtstamp_skipped++;
8651 }
8652 }
8653
8654#ifdef CONFIG_PCI_IOV
8655
8656
8657
8658
8659 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8660 tx_flags |= IXGBE_TX_FLAGS_CC;
8661
8662#endif
8663
8664 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8665 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8666 (skb->priority != TC_PRIO_CONTROL))) {
8667 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8668 tx_flags |= (skb->priority & 0x7) <<
8669 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8670 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8671 struct vlan_ethhdr *vhdr;
8672
8673 if (skb_cow_head(skb, 0))
8674 goto out_drop;
8675 vhdr = (struct vlan_ethhdr *)skb->data;
8676 vhdr->h_vlan_TCI = htons(tx_flags >>
8677 IXGBE_TX_FLAGS_VLAN_SHIFT);
8678 } else {
8679 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8680 }
8681 }
8682
8683
8684 first->tx_flags = tx_flags;
8685 first->protocol = protocol;
8686
8687#ifdef IXGBE_FCOE
8688
8689 if ((protocol == htons(ETH_P_FCOE)) &&
8690 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8691 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8692 if (tso < 0)
8693 goto out_drop;
8694
8695 goto xmit_fcoe;
8696 }
8697
8698#endif
8699
8700#ifdef CONFIG_IXGBE_IPSEC
8701 if (xfrm_offload(skb) &&
8702 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8703 goto out_drop;
8704#endif
8705 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8706 if (tso < 0)
8707 goto out_drop;
8708 else if (!tso)
8709 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8710
8711
8712 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8713 ixgbe_atr(tx_ring, first);
8714
8715#ifdef IXGBE_FCOE
8716xmit_fcoe:
8717#endif
8718 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8719 goto cleanup_tx_timestamp;
8720
8721 return NETDEV_TX_OK;
8722
8723out_drop:
8724 dev_kfree_skb_any(first->skb);
8725 first->skb = NULL;
8726cleanup_tx_timestamp:
8727 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8728 dev_kfree_skb_any(adapter->ptp_tx_skb);
8729 adapter->ptp_tx_skb = NULL;
8730 cancel_work_sync(&adapter->ptp_tx_work);
8731 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8732 }
8733
8734 return NETDEV_TX_OK;
8735}
8736
8737static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8738 struct net_device *netdev,
8739 struct ixgbe_ring *ring)
8740{
8741 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8742 struct ixgbe_ring *tx_ring;
8743
8744
8745
8746
8747
8748 if (skb_put_padto(skb, 17))
8749 return NETDEV_TX_OK;
8750
8751 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
8752 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8753 return NETDEV_TX_BUSY;
8754
8755 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8756}
8757
8758static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8759 struct net_device *netdev)
8760{
8761 return __ixgbe_xmit_frame(skb, netdev, NULL);
8762}
8763
8764
8765
8766
8767
8768
8769
8770
8771static int ixgbe_set_mac(struct net_device *netdev, void *p)
8772{
8773 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8774 struct ixgbe_hw *hw = &adapter->hw;
8775 struct sockaddr *addr = p;
8776
8777 if (!is_valid_ether_addr(addr->sa_data))
8778 return -EADDRNOTAVAIL;
8779
8780 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8781 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8782
8783 ixgbe_mac_set_default_filter(adapter);
8784
8785 return 0;
8786}
8787
8788static int
8789ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8790{
8791 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8792 struct ixgbe_hw *hw = &adapter->hw;
8793 u16 value;
8794 int rc;
8795
8796 if (adapter->mii_bus) {
8797 int regnum = addr;
8798
8799 if (devad != MDIO_DEVAD_NONE)
8800 regnum |= (devad << 16) | MII_ADDR_C45;
8801
8802 return mdiobus_read(adapter->mii_bus, prtad, regnum);
8803 }
8804
8805 if (prtad != hw->phy.mdio.prtad)
8806 return -EINVAL;
8807 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8808 if (!rc)
8809 rc = value;
8810 return rc;
8811}
8812
8813static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8814 u16 addr, u16 value)
8815{
8816 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8817 struct ixgbe_hw *hw = &adapter->hw;
8818
8819 if (adapter->mii_bus) {
8820 int regnum = addr;
8821
8822 if (devad != MDIO_DEVAD_NONE)
8823 regnum |= (devad << 16) | MII_ADDR_C45;
8824
8825 return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8826 }
8827
8828 if (prtad != hw->phy.mdio.prtad)
8829 return -EINVAL;
8830 return hw->phy.ops.write_reg(hw, addr, devad, value);
8831}
8832
8833static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8834{
8835 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8836
8837 switch (cmd) {
8838 case SIOCSHWTSTAMP:
8839 return ixgbe_ptp_set_ts_config(adapter, req);
8840 case SIOCGHWTSTAMP:
8841 return ixgbe_ptp_get_ts_config(adapter, req);
8842 case SIOCGMIIPHY:
8843 if (!adapter->hw.phy.ops.read_reg)
8844 return -EOPNOTSUPP;
8845
8846 default:
8847 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8848 }
8849}
8850
8851
8852
8853
8854
8855
8856
8857
8858static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8859{
8860 int err = 0;
8861 struct ixgbe_adapter *adapter = netdev_priv(dev);
8862 struct ixgbe_hw *hw = &adapter->hw;
8863
8864 if (is_valid_ether_addr(hw->mac.san_addr)) {
8865 rtnl_lock();
8866 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8867 rtnl_unlock();
8868
8869
8870 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8871 }
8872 return err;
8873}
8874
8875
8876
8877
8878
8879
8880
8881
8882static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8883{
8884 int err = 0;
8885 struct ixgbe_adapter *adapter = netdev_priv(dev);
8886 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8887
8888 if (is_valid_ether_addr(mac->san_addr)) {
8889 rtnl_lock();
8890 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8891 rtnl_unlock();
8892 }
8893 return err;
8894}
8895
8896static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8897 struct ixgbe_ring *ring)
8898{
8899 u64 bytes, packets;
8900 unsigned int start;
8901
8902 if (ring) {
8903 do {
8904 start = u64_stats_fetch_begin_irq(&ring->syncp);
8905 packets = ring->stats.packets;
8906 bytes = ring->stats.bytes;
8907 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8908 stats->tx_packets += packets;
8909 stats->tx_bytes += bytes;
8910 }
8911}
8912
8913static void ixgbe_get_stats64(struct net_device *netdev,
8914 struct rtnl_link_stats64 *stats)
8915{
8916 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8917 int i;
8918
8919 rcu_read_lock();
8920 for (i = 0; i < adapter->num_rx_queues; i++) {
8921 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8922 u64 bytes, packets;
8923 unsigned int start;
8924
8925 if (ring) {
8926 do {
8927 start = u64_stats_fetch_begin_irq(&ring->syncp);
8928 packets = ring->stats.packets;
8929 bytes = ring->stats.bytes;
8930 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8931 stats->rx_packets += packets;
8932 stats->rx_bytes += bytes;
8933 }
8934 }
8935
8936 for (i = 0; i < adapter->num_tx_queues; i++) {
8937 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8938
8939 ixgbe_get_ring_stats64(stats, ring);
8940 }
8941 for (i = 0; i < adapter->num_xdp_queues; i++) {
8942 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8943
8944 ixgbe_get_ring_stats64(stats, ring);
8945 }
8946 rcu_read_unlock();
8947
8948
8949 stats->multicast = netdev->stats.multicast;
8950 stats->rx_errors = netdev->stats.rx_errors;
8951 stats->rx_length_errors = netdev->stats.rx_length_errors;
8952 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8953 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8954}
8955
8956#ifdef CONFIG_IXGBE_DCB
8957
8958
8959
8960
8961
8962
8963
8964
8965static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8966{
8967 struct ixgbe_hw *hw = &adapter->hw;
8968 u32 reg, rsave;
8969 int i;
8970
8971
8972
8973
8974 if (hw->mac.type == ixgbe_mac_82598EB)
8975 return;
8976
8977 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8978 rsave = reg;
8979
8980 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8981 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8982
8983
8984 if (up2tc > tc)
8985 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8986 }
8987
8988 if (reg != rsave)
8989 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8990
8991 return;
8992}
8993
8994
8995
8996
8997
8998
8999
9000static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9001{
9002 struct net_device *dev = adapter->netdev;
9003 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9004 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9005 u8 prio;
9006
9007 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9008 u8 tc = 0;
9009
9010 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9011 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9012 else if (ets)
9013 tc = ets->prio_tc[prio];
9014
9015 netdev_set_prio_tc_map(dev, prio, tc);
9016 }
9017}
9018
9019#endif
9020static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
9021{
9022 struct ixgbe_adapter *adapter = data;
9023 struct ixgbe_fwd_adapter *accel;
9024 int pool;
9025
9026
9027 if (!netif_is_macvlan(vdev))
9028 return 0;
9029
9030
9031 accel = macvlan_accel_priv(vdev);
9032 if (!accel)
9033 return 0;
9034
9035
9036 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9037 if (pool < adapter->num_rx_pools) {
9038 set_bit(pool, adapter->fwd_bitmask);
9039 accel->pool = pool;
9040 return 0;
9041 }
9042
9043
9044 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9045 macvlan_release_l2fw_offload(vdev);
9046
9047
9048 netdev_unbind_sb_channel(adapter->netdev, vdev);
9049 netdev_set_sb_channel(vdev, 0);
9050
9051 kfree(accel);
9052
9053 return 0;
9054}
9055
9056static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9057{
9058 struct ixgbe_adapter *adapter = netdev_priv(dev);
9059
9060
9061 bitmap_clear(adapter->fwd_bitmask, 1, 63);
9062
9063
9064 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9065 adapter);
9066}
9067
9068
9069
9070
9071
9072
9073
9074int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9075{
9076 struct ixgbe_adapter *adapter = netdev_priv(dev);
9077 struct ixgbe_hw *hw = &adapter->hw;
9078
9079
9080 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9081 return -EINVAL;
9082
9083 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9084 return -EINVAL;
9085
9086
9087
9088
9089
9090 if (netif_running(dev))
9091 ixgbe_close(dev);
9092 else
9093 ixgbe_reset(adapter);
9094
9095 ixgbe_clear_interrupt_scheme(adapter);
9096
9097#ifdef CONFIG_IXGBE_DCB
9098 if (tc) {
9099 if (adapter->xdp_prog) {
9100 e_warn(probe, "DCB is not supported with XDP\n");
9101
9102 ixgbe_init_interrupt_scheme(adapter);
9103 if (netif_running(dev))
9104 ixgbe_open(dev);
9105 return -EINVAL;
9106 }
9107
9108 netdev_set_num_tc(dev, tc);
9109 ixgbe_set_prio_tc_map(adapter);
9110
9111 adapter->hw_tcs = tc;
9112 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9113
9114 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9115 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9116 adapter->hw.fc.requested_mode = ixgbe_fc_none;
9117 }
9118 } else {
9119 netdev_reset_tc(dev);
9120
9121 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9122 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9123
9124 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9125 adapter->hw_tcs = tc;
9126
9127 adapter->temp_dcb_cfg.pfc_mode_enable = false;
9128 adapter->dcb_cfg.pfc_mode_enable = false;
9129 }
9130
9131 ixgbe_validate_rtr(adapter, tc);
9132
9133#endif
9134 ixgbe_init_interrupt_scheme(adapter);
9135
9136 ixgbe_defrag_macvlan_pools(dev);
9137
9138 if (netif_running(dev))
9139 return ixgbe_open(dev);
9140
9141 return 0;
9142}
9143
9144static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9145 struct tc_cls_u32_offload *cls)
9146{
9147 u32 hdl = cls->knode.handle;
9148 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9149 u32 loc = cls->knode.handle & 0xfffff;
9150 int err = 0, i, j;
9151 struct ixgbe_jump_table *jump = NULL;
9152
9153 if (loc > IXGBE_MAX_HW_ENTRIES)
9154 return -EINVAL;
9155
9156 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9157 return -EINVAL;
9158
9159
9160 if (uhtid != 0x800) {
9161 jump = adapter->jump_tables[uhtid];
9162 if (!jump)
9163 return -EINVAL;
9164 if (!test_bit(loc - 1, jump->child_loc_map))
9165 return -EINVAL;
9166 clear_bit(loc - 1, jump->child_loc_map);
9167 }
9168
9169
9170 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9171 jump = adapter->jump_tables[i];
9172 if (jump && jump->link_hdl == hdl) {
9173
9174
9175
9176 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9177 if (!test_bit(j, jump->child_loc_map))
9178 continue;
9179 spin_lock(&adapter->fdir_perfect_lock);
9180 err = ixgbe_update_ethtool_fdir_entry(adapter,
9181 NULL,
9182 j + 1);
9183 spin_unlock(&adapter->fdir_perfect_lock);
9184 clear_bit(j, jump->child_loc_map);
9185 }
9186
9187 kfree(jump->input);
9188 kfree(jump->mask);
9189 kfree(jump);
9190 adapter->jump_tables[i] = NULL;
9191 return err;
9192 }
9193 }
9194
9195 spin_lock(&adapter->fdir_perfect_lock);
9196 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9197 spin_unlock(&adapter->fdir_perfect_lock);
9198 return err;
9199}
9200
9201static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9202 struct tc_cls_u32_offload *cls)
9203{
9204 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9205
9206 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9207 return -EINVAL;
9208
9209
9210
9211
9212 if (cls->hnode.divisor > 0)
9213 return -EINVAL;
9214
9215 set_bit(uhtid - 1, &adapter->tables);
9216 return 0;
9217}
9218
9219static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9220 struct tc_cls_u32_offload *cls)
9221{
9222 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9223
9224 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9225 return -EINVAL;
9226
9227 clear_bit(uhtid - 1, &adapter->tables);
9228 return 0;
9229}
9230
9231#ifdef CONFIG_NET_CLS_ACT
9232struct upper_walk_data {
9233 struct ixgbe_adapter *adapter;
9234 u64 action;
9235 int ifindex;
9236 u8 queue;
9237};
9238
9239static int get_macvlan_queue(struct net_device *upper, void *_data)
9240{
9241 if (netif_is_macvlan(upper)) {
9242 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9243 struct upper_walk_data *data = _data;
9244 struct ixgbe_adapter *adapter = data->adapter;
9245 int ifindex = data->ifindex;
9246
9247 if (vadapter && upper->ifindex == ifindex) {
9248 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9249 data->action = data->queue;
9250 return 1;
9251 }
9252 }
9253
9254 return 0;
9255}
9256
9257static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9258 u8 *queue, u64 *action)
9259{
9260 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9261 unsigned int num_vfs = adapter->num_vfs, vf;
9262 struct upper_walk_data data;
9263 struct net_device *upper;
9264
9265
9266 for (vf = 0; vf < num_vfs; ++vf) {
9267 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9268 if (upper->ifindex == ifindex) {
9269 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9270 *action = vf + 1;
9271 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9272 return 0;
9273 }
9274 }
9275
9276
9277 data.adapter = adapter;
9278 data.ifindex = ifindex;
9279 data.action = 0;
9280 data.queue = 0;
9281 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9282 get_macvlan_queue, &data)) {
9283 *action = data.action;
9284 *queue = data.queue;
9285
9286 return 0;
9287 }
9288
9289 return -EINVAL;
9290}
9291
9292static int parse_tc_actions(struct ixgbe_adapter *adapter,
9293 struct tcf_exts *exts, u64 *action, u8 *queue)
9294{
9295 const struct tc_action *a;
9296 int i;
9297
9298 if (!tcf_exts_has_actions(exts))
9299 return -EINVAL;
9300
9301 tcf_exts_for_each_action(i, a, exts) {
9302
9303 if (is_tcf_gact_shot(a)) {
9304 *action = IXGBE_FDIR_DROP_QUEUE;
9305 *queue = IXGBE_FDIR_DROP_QUEUE;
9306 return 0;
9307 }
9308
9309
9310 if (is_tcf_mirred_egress_redirect(a)) {
9311 struct net_device *dev = tcf_mirred_dev(a);
9312
9313 if (!dev)
9314 return -EINVAL;
9315 return handle_redirect_action(adapter, dev->ifindex,
9316 queue, action);
9317 }
9318
9319 return -EINVAL;
9320 }
9321
9322 return -EINVAL;
9323}
9324#else
9325static int parse_tc_actions(struct ixgbe_adapter *adapter,
9326 struct tcf_exts *exts, u64 *action, u8 *queue)
9327{
9328 return -EINVAL;
9329}
9330#endif
9331
9332static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9333 union ixgbe_atr_input *mask,
9334 struct tc_cls_u32_offload *cls,
9335 struct ixgbe_mat_field *field_ptr,
9336 struct ixgbe_nexthdr *nexthdr)
9337{
9338 int i, j, off;
9339 __be32 val, m;
9340 bool found_entry = false, found_jump_field = false;
9341
9342 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9343 off = cls->knode.sel->keys[i].off;
9344 val = cls->knode.sel->keys[i].val;
9345 m = cls->knode.sel->keys[i].mask;
9346
9347 for (j = 0; field_ptr[j].val; j++) {
9348 if (field_ptr[j].off == off) {
9349 field_ptr[j].val(input, mask, (__force u32)val,
9350 (__force u32)m);
9351 input->filter.formatted.flow_type |=
9352 field_ptr[j].type;
9353 found_entry = true;
9354 break;
9355 }
9356 }
9357 if (nexthdr) {
9358 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9359 nexthdr->val ==
9360 (__force u32)cls->knode.sel->keys[i].val &&
9361 nexthdr->mask ==
9362 (__force u32)cls->knode.sel->keys[i].mask)
9363 found_jump_field = true;
9364 else
9365 continue;
9366 }
9367 }
9368
9369 if (nexthdr && !found_jump_field)
9370 return -EINVAL;
9371
9372 if (!found_entry)
9373 return 0;
9374
9375 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9376 IXGBE_ATR_L4TYPE_MASK;
9377
9378 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9379 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9380
9381 return 0;
9382}
9383
9384static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9385 struct tc_cls_u32_offload *cls)
9386{
9387 __be16 protocol = cls->common.protocol;
9388 u32 loc = cls->knode.handle & 0xfffff;
9389 struct ixgbe_hw *hw = &adapter->hw;
9390 struct ixgbe_mat_field *field_ptr;
9391 struct ixgbe_fdir_filter *input = NULL;
9392 union ixgbe_atr_input *mask = NULL;
9393 struct ixgbe_jump_table *jump = NULL;
9394 int i, err = -EINVAL;
9395 u8 queue;
9396 u32 uhtid, link_uhtid;
9397
9398 uhtid = TC_U32_USERHTID(cls->knode.handle);
9399 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9400
9401
9402
9403
9404
9405
9406
9407
9408 if (protocol != htons(ETH_P_IP))
9409 return err;
9410
9411 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9412 e_err(drv, "Location out of range\n");
9413 return err;
9414 }
9415
9416
9417
9418
9419
9420
9421
9422
9423 if (uhtid == 0x800) {
9424 field_ptr = (adapter->jump_tables[0])->mat;
9425 } else {
9426 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9427 return err;
9428 if (!adapter->jump_tables[uhtid])
9429 return err;
9430 field_ptr = (adapter->jump_tables[uhtid])->mat;
9431 }
9432
9433 if (!field_ptr)
9434 return err;
9435
9436
9437
9438
9439
9440
9441
9442 if (link_uhtid) {
9443 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9444
9445 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9446 return err;
9447
9448 if (!test_bit(link_uhtid - 1, &adapter->tables))
9449 return err;
9450
9451
9452
9453
9454
9455
9456 if (adapter->jump_tables[link_uhtid] &&
9457 (adapter->jump_tables[link_uhtid])->link_hdl) {
9458 e_err(drv, "Link filter exists for link: %x\n",
9459 link_uhtid);
9460 return err;
9461 }
9462
9463 for (i = 0; nexthdr[i].jump; i++) {
9464 if (nexthdr[i].o != cls->knode.sel->offoff ||
9465 nexthdr[i].s != cls->knode.sel->offshift ||
9466 nexthdr[i].m !=
9467 (__force u32)cls->knode.sel->offmask)
9468 return err;
9469
9470 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9471 if (!jump)
9472 return -ENOMEM;
9473 input = kzalloc(sizeof(*input), GFP_KERNEL);
9474 if (!input) {
9475 err = -ENOMEM;
9476 goto free_jump;
9477 }
9478 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9479 if (!mask) {
9480 err = -ENOMEM;
9481 goto free_input;
9482 }
9483 jump->input = input;
9484 jump->mask = mask;
9485 jump->link_hdl = cls->knode.handle;
9486
9487 err = ixgbe_clsu32_build_input(input, mask, cls,
9488 field_ptr, &nexthdr[i]);
9489 if (!err) {
9490 jump->mat = nexthdr[i].jump;
9491 adapter->jump_tables[link_uhtid] = jump;
9492 break;
9493 }
9494 }
9495 return 0;
9496 }
9497
9498 input = kzalloc(sizeof(*input), GFP_KERNEL);
9499 if (!input)
9500 return -ENOMEM;
9501 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9502 if (!mask) {
9503 err = -ENOMEM;
9504 goto free_input;
9505 }
9506
9507 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9508 if ((adapter->jump_tables[uhtid])->input)
9509 memcpy(input, (adapter->jump_tables[uhtid])->input,
9510 sizeof(*input));
9511 if ((adapter->jump_tables[uhtid])->mask)
9512 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9513 sizeof(*mask));
9514
9515
9516
9517
9518 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9519 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9520
9521 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9522 e_err(drv, "Filter exists in location: %x\n",
9523 loc);
9524 err = -EINVAL;
9525 goto err_out;
9526 }
9527 }
9528 }
9529 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9530 if (err)
9531 goto err_out;
9532
9533 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9534 &queue);
9535 if (err < 0)
9536 goto err_out;
9537
9538 input->sw_idx = loc;
9539
9540 spin_lock(&adapter->fdir_perfect_lock);
9541
9542 if (hlist_empty(&adapter->fdir_filter_list)) {
9543 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9544 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9545 if (err)
9546 goto err_out_w_lock;
9547 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9548 err = -EINVAL;
9549 goto err_out_w_lock;
9550 }
9551
9552 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9553 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9554 input->sw_idx, queue);
9555 if (!err)
9556 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9557 spin_unlock(&adapter->fdir_perfect_lock);
9558
9559 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9560 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9561
9562 kfree(mask);
9563 return err;
9564err_out_w_lock:
9565 spin_unlock(&adapter->fdir_perfect_lock);
9566err_out:
9567 kfree(mask);
9568free_input:
9569 kfree(input);
9570free_jump:
9571 kfree(jump);
9572 return err;
9573}
9574
9575static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9576 struct tc_cls_u32_offload *cls_u32)
9577{
9578 switch (cls_u32->command) {
9579 case TC_CLSU32_NEW_KNODE:
9580 case TC_CLSU32_REPLACE_KNODE:
9581 return ixgbe_configure_clsu32(adapter, cls_u32);
9582 case TC_CLSU32_DELETE_KNODE:
9583 return ixgbe_delete_clsu32(adapter, cls_u32);
9584 case TC_CLSU32_NEW_HNODE:
9585 case TC_CLSU32_REPLACE_HNODE:
9586 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9587 case TC_CLSU32_DELETE_HNODE:
9588 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9589 default:
9590 return -EOPNOTSUPP;
9591 }
9592}
9593
9594static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9595 void *cb_priv)
9596{
9597 struct ixgbe_adapter *adapter = cb_priv;
9598
9599 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9600 return -EOPNOTSUPP;
9601
9602 switch (type) {
9603 case TC_SETUP_CLSU32:
9604 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9605 default:
9606 return -EOPNOTSUPP;
9607 }
9608}
9609
9610static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9611 struct tc_mqprio_qopt *mqprio)
9612{
9613 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9614 return ixgbe_setup_tc(dev, mqprio->num_tc);
9615}
9616
9617static LIST_HEAD(ixgbe_block_cb_list);
9618
9619static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9620 void *type_data)
9621{
9622 struct ixgbe_adapter *adapter = netdev_priv(dev);
9623
9624 switch (type) {
9625 case TC_SETUP_BLOCK:
9626 return flow_block_cb_setup_simple(type_data,
9627 &ixgbe_block_cb_list,
9628 ixgbe_setup_tc_block_cb,
9629 adapter, adapter, true);
9630 case TC_SETUP_QDISC_MQPRIO:
9631 return ixgbe_setup_tc_mqprio(dev, type_data);
9632 default:
9633 return -EOPNOTSUPP;
9634 }
9635}
9636
9637#ifdef CONFIG_PCI_IOV
9638void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9639{
9640 struct net_device *netdev = adapter->netdev;
9641
9642 rtnl_lock();
9643 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9644 rtnl_unlock();
9645}
9646
9647#endif
9648void ixgbe_do_reset(struct net_device *netdev)
9649{
9650 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9651
9652 if (netif_running(netdev))
9653 ixgbe_reinit_locked(adapter);
9654 else
9655 ixgbe_reset(adapter);
9656}
9657
9658static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9659 netdev_features_t features)
9660{
9661 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9662
9663
9664 if (!(features & NETIF_F_RXCSUM))
9665 features &= ~NETIF_F_LRO;
9666
9667
9668 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9669 features &= ~NETIF_F_LRO;
9670
9671 if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9672 e_dev_err("LRO is not supported with XDP\n");
9673 features &= ~NETIF_F_LRO;
9674 }
9675
9676 return features;
9677}
9678
9679static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9680{
9681 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9682 num_online_cpus());
9683
9684
9685 if (!adapter->ring_feature[RING_F_VMDQ].offset)
9686 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9687 IXGBE_FLAG_SRIOV_ENABLED);
9688
9689 adapter->ring_feature[RING_F_RSS].limit = rss;
9690 adapter->ring_feature[RING_F_VMDQ].limit = 1;
9691
9692 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9693}
9694
9695static int ixgbe_set_features(struct net_device *netdev,
9696 netdev_features_t features)
9697{
9698 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9699 netdev_features_t changed = netdev->features ^ features;
9700 bool need_reset = false;
9701
9702
9703 if (!(features & NETIF_F_LRO)) {
9704 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9705 need_reset = true;
9706 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9707 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9708 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9709 if (adapter->rx_itr_setting == 1 ||
9710 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9711 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9712 need_reset = true;
9713 } else if ((changed ^ features) & NETIF_F_LRO) {
9714 e_info(probe, "rx-usecs set too low, "
9715 "disabling RSC\n");
9716 }
9717 }
9718
9719
9720
9721
9722
9723 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9724
9725 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9726 need_reset = true;
9727
9728 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9729 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9730 } else {
9731
9732 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9733 need_reset = true;
9734
9735 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9736
9737
9738 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9739
9740 (adapter->hw_tcs > 1) ||
9741
9742 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9743
9744 (!adapter->atr_sample_rate))
9745 ;
9746 else
9747 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9748 }
9749
9750 if (changed & NETIF_F_RXALL)
9751 need_reset = true;
9752
9753 netdev->features = features;
9754
9755 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9756 if (features & NETIF_F_RXCSUM) {
9757 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9758 } else {
9759 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9760
9761 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9762 }
9763 }
9764
9765 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9766 if (features & NETIF_F_RXCSUM) {
9767 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9768 } else {
9769 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9770
9771 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9772 }
9773 }
9774
9775 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9776 ixgbe_reset_l2fw_offload(adapter);
9777 else if (need_reset)
9778 ixgbe_do_reset(netdev);
9779 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9780 NETIF_F_HW_VLAN_CTAG_FILTER))
9781 ixgbe_set_rx_mode(netdev);
9782
9783 return 1;
9784}
9785
9786
9787
9788
9789
9790
9791static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9792 struct udp_tunnel_info *ti)
9793{
9794 struct ixgbe_adapter *adapter = netdev_priv(dev);
9795 struct ixgbe_hw *hw = &adapter->hw;
9796 __be16 port = ti->port;
9797 u32 port_shift = 0;
9798 u32 reg;
9799
9800 if (ti->sa_family != AF_INET)
9801 return;
9802
9803 switch (ti->type) {
9804 case UDP_TUNNEL_TYPE_VXLAN:
9805 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9806 return;
9807
9808 if (adapter->vxlan_port == port)
9809 return;
9810
9811 if (adapter->vxlan_port) {
9812 netdev_info(dev,
9813 "VXLAN port %d set, not adding port %d\n",
9814 ntohs(adapter->vxlan_port),
9815 ntohs(port));
9816 return;
9817 }
9818
9819 adapter->vxlan_port = port;
9820 break;
9821 case UDP_TUNNEL_TYPE_GENEVE:
9822 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9823 return;
9824
9825 if (adapter->geneve_port == port)
9826 return;
9827
9828 if (adapter->geneve_port) {
9829 netdev_info(dev,
9830 "GENEVE port %d set, not adding port %d\n",
9831 ntohs(adapter->geneve_port),
9832 ntohs(port));
9833 return;
9834 }
9835
9836 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9837 adapter->geneve_port = port;
9838 break;
9839 default:
9840 return;
9841 }
9842
9843 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9844 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9845}
9846
9847
9848
9849
9850
9851
9852static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9853 struct udp_tunnel_info *ti)
9854{
9855 struct ixgbe_adapter *adapter = netdev_priv(dev);
9856 u32 port_mask;
9857
9858 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9859 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9860 return;
9861
9862 if (ti->sa_family != AF_INET)
9863 return;
9864
9865 switch (ti->type) {
9866 case UDP_TUNNEL_TYPE_VXLAN:
9867 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9868 return;
9869
9870 if (adapter->vxlan_port != ti->port) {
9871 netdev_info(dev, "VXLAN port %d not found\n",
9872 ntohs(ti->port));
9873 return;
9874 }
9875
9876 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9877 break;
9878 case UDP_TUNNEL_TYPE_GENEVE:
9879 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9880 return;
9881
9882 if (adapter->geneve_port != ti->port) {
9883 netdev_info(dev, "GENEVE port %d not found\n",
9884 ntohs(ti->port));
9885 return;
9886 }
9887
9888 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9889 break;
9890 default:
9891 return;
9892 }
9893
9894 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9895 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9896}
9897
9898static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9899 struct net_device *dev,
9900 const unsigned char *addr, u16 vid,
9901 u16 flags,
9902 struct netlink_ext_ack *extack)
9903{
9904
9905 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9906 struct ixgbe_adapter *adapter = netdev_priv(dev);
9907 u16 pool = VMDQ_P(0);
9908
9909 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9910 return -ENOMEM;
9911 }
9912
9913 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9914}
9915
9916
9917
9918
9919
9920
9921
9922
9923static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9924 __u16 mode)
9925{
9926 struct ixgbe_hw *hw = &adapter->hw;
9927 unsigned int p, num_pools;
9928 u32 vmdctl;
9929
9930 switch (mode) {
9931 case BRIDGE_MODE_VEPA:
9932
9933 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9934
9935
9936
9937
9938
9939 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9940 vmdctl |= IXGBE_VT_CTL_REPLEN;
9941 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9942
9943
9944
9945
9946 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9947 for (p = 0; p < num_pools; p++) {
9948 if (hw->mac.ops.set_source_address_pruning)
9949 hw->mac.ops.set_source_address_pruning(hw,
9950 true,
9951 p);
9952 }
9953 break;
9954 case BRIDGE_MODE_VEB:
9955
9956 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9957 IXGBE_PFDTXGSWC_VT_LBEN);
9958
9959
9960
9961
9962 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9963 if (!adapter->num_vfs)
9964 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9965 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9966
9967
9968
9969
9970 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9971 for (p = 0; p < num_pools; p++) {
9972 if (hw->mac.ops.set_source_address_pruning)
9973 hw->mac.ops.set_source_address_pruning(hw,
9974 false,
9975 p);
9976 }
9977 break;
9978 default:
9979 return -EINVAL;
9980 }
9981
9982 adapter->bridge_mode = mode;
9983
9984 e_info(drv, "enabling bridge mode: %s\n",
9985 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9986
9987 return 0;
9988}
9989
9990static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9991 struct nlmsghdr *nlh, u16 flags,
9992 struct netlink_ext_ack *extack)
9993{
9994 struct ixgbe_adapter *adapter = netdev_priv(dev);
9995 struct nlattr *attr, *br_spec;
9996 int rem;
9997
9998 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9999 return -EOPNOTSUPP;
10000
10001 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10002 if (!br_spec)
10003 return -EINVAL;
10004
10005 nla_for_each_nested(attr, br_spec, rem) {
10006 int status;
10007 __u16 mode;
10008
10009 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10010 continue;
10011
10012 if (nla_len(attr) < sizeof(mode))
10013 return -EINVAL;
10014
10015 mode = nla_get_u16(attr);
10016 status = ixgbe_configure_bridge_mode(adapter, mode);
10017 if (status)
10018 return status;
10019
10020 break;
10021 }
10022
10023 return 0;
10024}
10025
10026static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10027 struct net_device *dev,
10028 u32 filter_mask, int nlflags)
10029{
10030 struct ixgbe_adapter *adapter = netdev_priv(dev);
10031
10032 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10033 return 0;
10034
10035 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
10036 adapter->bridge_mode, 0, 0, nlflags,
10037 filter_mask, NULL);
10038}
10039
10040static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
10041{
10042 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10043 struct ixgbe_fwd_adapter *accel;
10044 int tcs = adapter->hw_tcs ? : 1;
10045 int pool, err;
10046
10047 if (adapter->xdp_prog) {
10048 e_warn(probe, "L2FW offload is not supported with XDP\n");
10049 return ERR_PTR(-EINVAL);
10050 }
10051
10052
10053
10054
10055
10056 if (!macvlan_supports_dest_filter(vdev))
10057 return ERR_PTR(-EMEDIUMTYPE);
10058
10059
10060
10061
10062
10063 if (netif_is_multiqueue(vdev))
10064 return ERR_PTR(-ERANGE);
10065
10066 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
10067 if (pool == adapter->num_rx_pools) {
10068 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
10069 u16 reserved_pools;
10070
10071 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
10072 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
10073 adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
10074 return ERR_PTR(-EBUSY);
10075
10076
10077
10078
10079
10080 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
10081 return ERR_PTR(-EBUSY);
10082
10083
10084 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
10085 IXGBE_FLAG_SRIOV_ENABLED;
10086
10087
10088
10089
10090
10091 if (used_pools < 32 && adapter->num_rx_pools < 16)
10092 reserved_pools = min_t(u16,
10093 32 - used_pools,
10094 16 - adapter->num_rx_pools);
10095 else if (adapter->num_rx_pools < 32)
10096 reserved_pools = min_t(u16,
10097 64 - used_pools,
10098 32 - adapter->num_rx_pools);
10099 else
10100 reserved_pools = 64 - used_pools;
10101
10102
10103 if (!reserved_pools)
10104 return ERR_PTR(-EBUSY);
10105
10106 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10107
10108
10109 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10110 if (err)
10111 return ERR_PTR(err);
10112
10113 if (pool >= adapter->num_rx_pools)
10114 return ERR_PTR(-ENOMEM);
10115 }
10116
10117 accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10118 if (!accel)
10119 return ERR_PTR(-ENOMEM);
10120
10121 set_bit(pool, adapter->fwd_bitmask);
10122 netdev_set_sb_channel(vdev, pool);
10123 accel->pool = pool;
10124 accel->netdev = vdev;
10125
10126 if (!netif_running(pdev))
10127 return accel;
10128
10129 err = ixgbe_fwd_ring_up(adapter, accel);
10130 if (err)
10131 return ERR_PTR(err);
10132
10133 return accel;
10134}
10135
10136static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10137{
10138 struct ixgbe_fwd_adapter *accel = priv;
10139 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10140 unsigned int rxbase = accel->rx_base_queue;
10141 unsigned int i;
10142
10143
10144 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10145 VMDQ_P(accel->pool));
10146
10147
10148
10149
10150 usleep_range(10000, 20000);
10151
10152 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10153 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10154 struct ixgbe_q_vector *qv = ring->q_vector;
10155
10156
10157
10158
10159 if (netif_running(adapter->netdev))
10160 napi_synchronize(&qv->napi);
10161 ring->netdev = NULL;
10162 }
10163
10164
10165 netdev_unbind_sb_channel(pdev, accel->netdev);
10166 netdev_set_sb_channel(accel->netdev, 0);
10167
10168 clear_bit(accel->pool, adapter->fwd_bitmask);
10169 kfree(accel);
10170}
10171
10172#define IXGBE_MAX_MAC_HDR_LEN 127
10173#define IXGBE_MAX_NETWORK_HDR_LEN 511
10174
10175static netdev_features_t
10176ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10177 netdev_features_t features)
10178{
10179 unsigned int network_hdr_len, mac_hdr_len;
10180
10181
10182 mac_hdr_len = skb_network_header(skb) - skb->data;
10183 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10184 return features & ~(NETIF_F_HW_CSUM |
10185 NETIF_F_SCTP_CRC |
10186 NETIF_F_HW_VLAN_CTAG_TX |
10187 NETIF_F_TSO |
10188 NETIF_F_TSO6);
10189
10190 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10191 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
10192 return features & ~(NETIF_F_HW_CSUM |
10193 NETIF_F_SCTP_CRC |
10194 NETIF_F_TSO |
10195 NETIF_F_TSO6);
10196
10197
10198
10199
10200
10201
10202 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10203#ifdef CONFIG_IXGBE_IPSEC
10204 if (!secpath_exists(skb))
10205#endif
10206 features &= ~NETIF_F_TSO;
10207 }
10208
10209 return features;
10210}
10211
10212static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10213{
10214 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10215 struct ixgbe_adapter *adapter = netdev_priv(dev);
10216 struct bpf_prog *old_prog;
10217 bool need_reset;
10218
10219 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10220 return -EINVAL;
10221
10222 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10223 return -EINVAL;
10224
10225
10226 for (i = 0; i < adapter->num_rx_queues; i++) {
10227 struct ixgbe_ring *ring = adapter->rx_ring[i];
10228
10229 if (ring_is_rsc_enabled(ring))
10230 return -EINVAL;
10231
10232 if (frame_size > ixgbe_rx_bufsz(ring))
10233 return -EINVAL;
10234 }
10235
10236 if (nr_cpu_ids > MAX_XDP_QUEUES)
10237 return -ENOMEM;
10238
10239 old_prog = xchg(&adapter->xdp_prog, prog);
10240 need_reset = (!!prog != !!old_prog);
10241
10242
10243 if (need_reset) {
10244 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10245
10246 if (err) {
10247 rcu_assign_pointer(adapter->xdp_prog, old_prog);
10248 return -EINVAL;
10249 }
10250 } else {
10251 for (i = 0; i < adapter->num_rx_queues; i++)
10252 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
10253 adapter->xdp_prog);
10254 }
10255
10256 if (old_prog)
10257 bpf_prog_put(old_prog);
10258
10259
10260
10261
10262 if (need_reset && prog)
10263 for (i = 0; i < adapter->num_rx_queues; i++)
10264 if (adapter->xdp_ring[i]->xsk_umem)
10265 (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
10266
10267 return 0;
10268}
10269
10270static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10271{
10272 struct ixgbe_adapter *adapter = netdev_priv(dev);
10273
10274 switch (xdp->command) {
10275 case XDP_SETUP_PROG:
10276 return ixgbe_xdp_setup(dev, xdp->prog);
10277 case XDP_QUERY_PROG:
10278 xdp->prog_id = adapter->xdp_prog ?
10279 adapter->xdp_prog->aux->id : 0;
10280 return 0;
10281 case XDP_SETUP_XSK_UMEM:
10282 return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
10283 xdp->xsk.queue_id);
10284
10285 default:
10286 return -EINVAL;
10287 }
10288}
10289
10290void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10291{
10292
10293
10294
10295 wmb();
10296 writel(ring->next_to_use, ring->tail);
10297}
10298
10299static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10300 struct xdp_frame **frames, u32 flags)
10301{
10302 struct ixgbe_adapter *adapter = netdev_priv(dev);
10303 struct ixgbe_ring *ring;
10304 int drops = 0;
10305 int i;
10306
10307 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10308 return -ENETDOWN;
10309
10310 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10311 return -EINVAL;
10312
10313
10314
10315
10316 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10317 if (unlikely(!ring))
10318 return -ENXIO;
10319
10320 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10321 return -ENXIO;
10322
10323 for (i = 0; i < n; i++) {
10324 struct xdp_frame *xdpf = frames[i];
10325 int err;
10326
10327 err = ixgbe_xmit_xdp_ring(adapter, xdpf);
10328 if (err != IXGBE_XDP_TX) {
10329 xdp_return_frame_rx_napi(xdpf);
10330 drops++;
10331 }
10332 }
10333
10334 if (unlikely(flags & XDP_XMIT_FLUSH))
10335 ixgbe_xdp_ring_update_tail(ring);
10336
10337 return n - drops;
10338}
10339
10340static const struct net_device_ops ixgbe_netdev_ops = {
10341 .ndo_open = ixgbe_open,
10342 .ndo_stop = ixgbe_close,
10343 .ndo_start_xmit = ixgbe_xmit_frame,
10344 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10345 .ndo_validate_addr = eth_validate_addr,
10346 .ndo_set_mac_address = ixgbe_set_mac,
10347 .ndo_change_mtu = ixgbe_change_mtu,
10348 .ndo_tx_timeout = ixgbe_tx_timeout,
10349 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10350 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10351 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10352 .ndo_do_ioctl = ixgbe_ioctl,
10353 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10354 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10355 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10356 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10357 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10358 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10359 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10360 .ndo_get_stats64 = ixgbe_get_stats64,
10361 .ndo_setup_tc = __ixgbe_setup_tc,
10362#ifdef IXGBE_FCOE
10363 .ndo_select_queue = ixgbe_select_queue,
10364 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10365 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10366 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10367 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10368 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10369 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10370 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10371#endif
10372 .ndo_set_features = ixgbe_set_features,
10373 .ndo_fix_features = ixgbe_fix_features,
10374 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10375 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10376 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10377 .ndo_dfwd_add_station = ixgbe_fwd_add,
10378 .ndo_dfwd_del_station = ixgbe_fwd_del,
10379 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10380 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10381 .ndo_features_check = ixgbe_features_check,
10382 .ndo_bpf = ixgbe_xdp,
10383 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10384 .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit,
10385};
10386
10387static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10388 struct ixgbe_ring *tx_ring)
10389{
10390 unsigned long wait_delay, delay_interval;
10391 struct ixgbe_hw *hw = &adapter->hw;
10392 u8 reg_idx = tx_ring->reg_idx;
10393 int wait_loop;
10394 u32 txdctl;
10395
10396 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10397
10398
10399 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10400
10401 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10402 wait_delay = delay_interval;
10403
10404 while (wait_loop--) {
10405 usleep_range(wait_delay, wait_delay + 10);
10406 wait_delay += delay_interval * 2;
10407 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10408
10409 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10410 return;
10411 }
10412
10413 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10414}
10415
10416static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10417 struct ixgbe_ring *tx_ring)
10418{
10419 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10420 ixgbe_disable_txr_hw(adapter, tx_ring);
10421}
10422
10423static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10424 struct ixgbe_ring *rx_ring)
10425{
10426 unsigned long wait_delay, delay_interval;
10427 struct ixgbe_hw *hw = &adapter->hw;
10428 u8 reg_idx = rx_ring->reg_idx;
10429 int wait_loop;
10430 u32 rxdctl;
10431
10432 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10433 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10434 rxdctl |= IXGBE_RXDCTL_SWFLSH;
10435
10436
10437 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10438
10439
10440 if (hw->mac.type == ixgbe_mac_82598EB &&
10441 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10442 return;
10443
10444
10445 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10446
10447 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10448 wait_delay = delay_interval;
10449
10450 while (wait_loop--) {
10451 usleep_range(wait_delay, wait_delay + 10);
10452 wait_delay += delay_interval * 2;
10453 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10454
10455 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10456 return;
10457 }
10458
10459 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10460}
10461
10462static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10463{
10464 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10465 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10466}
10467
10468static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10469{
10470 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10471 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10472}
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10483{
10484 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10485
10486 rx_ring = adapter->rx_ring[ring];
10487 tx_ring = adapter->tx_ring[ring];
10488 xdp_ring = adapter->xdp_ring[ring];
10489
10490 ixgbe_disable_txr(adapter, tx_ring);
10491 if (xdp_ring)
10492 ixgbe_disable_txr(adapter, xdp_ring);
10493 ixgbe_disable_rxr_hw(adapter, rx_ring);
10494
10495 if (xdp_ring)
10496 synchronize_rcu();
10497
10498
10499 napi_disable(&rx_ring->q_vector->napi);
10500
10501 ixgbe_clean_tx_ring(tx_ring);
10502 if (xdp_ring)
10503 ixgbe_clean_tx_ring(xdp_ring);
10504 ixgbe_clean_rx_ring(rx_ring);
10505
10506 ixgbe_reset_txr_stats(tx_ring);
10507 if (xdp_ring)
10508 ixgbe_reset_txr_stats(xdp_ring);
10509 ixgbe_reset_rxr_stats(rx_ring);
10510}
10511
10512
10513
10514
10515
10516
10517
10518
10519
10520void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10521{
10522 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10523
10524 rx_ring = adapter->rx_ring[ring];
10525 tx_ring = adapter->tx_ring[ring];
10526 xdp_ring = adapter->xdp_ring[ring];
10527
10528
10529 napi_enable(&rx_ring->q_vector->napi);
10530
10531 ixgbe_configure_tx_ring(adapter, tx_ring);
10532 if (xdp_ring)
10533 ixgbe_configure_tx_ring(adapter, xdp_ring);
10534 ixgbe_configure_rx_ring(adapter, rx_ring);
10535
10536 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10537 if (xdp_ring)
10538 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10539}
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10551{
10552 struct pci_dev *entry, *pdev = adapter->pdev;
10553 int physfns = 0;
10554
10555
10556
10557
10558
10559 if (ixgbe_pcie_from_parent(&adapter->hw))
10560 physfns = 4;
10561
10562 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10563
10564 if (entry->is_virtfn)
10565 continue;
10566
10567
10568
10569
10570
10571
10572
10573 if ((entry->vendor != pdev->vendor) ||
10574 (entry->device != pdev->device))
10575 return -1;
10576
10577 physfns++;
10578 }
10579
10580 return physfns;
10581}
10582
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10594 u16 subdevice_id)
10595{
10596 struct ixgbe_hw *hw = &adapter->hw;
10597 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10598
10599
10600 if (hw->mac.type == ixgbe_mac_82598EB)
10601 return false;
10602
10603
10604 if (hw->mac.type >= ixgbe_mac_X540) {
10605 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10606 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10607 (hw->bus.func == 0)))
10608 return true;
10609 }
10610
10611
10612 switch (device_id) {
10613 case IXGBE_DEV_ID_82599_SFP:
10614
10615 switch (subdevice_id) {
10616 case IXGBE_SUBDEV_ID_82599_560FLR:
10617 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10618 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10619 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10620
10621 if (hw->bus.func != 0)
10622 break;
10623
10624 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10625 case IXGBE_SUBDEV_ID_82599_SFP:
10626 case IXGBE_SUBDEV_ID_82599_RNDC:
10627 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10628 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10629 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10630 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10631 return true;
10632 }
10633 break;
10634 case IXGBE_DEV_ID_82599EN_SFP:
10635
10636 switch (subdevice_id) {
10637 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10638 return true;
10639 }
10640 break;
10641 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10642
10643 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10644 return true;
10645 break;
10646 case IXGBE_DEV_ID_82599_KX4:
10647 return true;
10648 default:
10649 break;
10650 }
10651
10652 return false;
10653}
10654
10655
10656
10657
10658
10659
10660
10661
10662static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10663{
10664 struct ixgbe_hw *hw = &adapter->hw;
10665 struct ixgbe_nvm_version nvm_ver;
10666
10667 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10668 if (nvm_ver.oem_valid) {
10669 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10670 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10671 nvm_ver.oem_release);
10672 return;
10673 }
10674
10675 ixgbe_get_etk_id(hw, &nvm_ver);
10676 ixgbe_get_orom_version(hw, &nvm_ver);
10677
10678 if (nvm_ver.or_valid) {
10679 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10680 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10681 nvm_ver.or_build, nvm_ver.or_patch);
10682 return;
10683 }
10684
10685
10686 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10687 "0x%08x", nvm_ver.etk_id);
10688}
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10702{
10703 struct net_device *netdev;
10704 struct ixgbe_adapter *adapter = NULL;
10705 struct ixgbe_hw *hw;
10706 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10707 int i, err, pci_using_dac, expected_gts;
10708 unsigned int indices = MAX_TX_QUEUES;
10709 u8 part_str[IXGBE_PBANUM_LENGTH];
10710 bool disable_dev = false;
10711#ifdef IXGBE_FCOE
10712 u16 device_caps;
10713#endif
10714 u32 eec;
10715
10716
10717
10718
10719 if (pdev->is_virtfn) {
10720 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10721 pci_name(pdev), pdev->vendor, pdev->device);
10722 return -EINVAL;
10723 }
10724
10725 err = pci_enable_device_mem(pdev);
10726 if (err)
10727 return err;
10728
10729 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10730 pci_using_dac = 1;
10731 } else {
10732 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10733 if (err) {
10734 dev_err(&pdev->dev,
10735 "No usable DMA configuration, aborting\n");
10736 goto err_dma;
10737 }
10738 pci_using_dac = 0;
10739 }
10740
10741 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10742 if (err) {
10743 dev_err(&pdev->dev,
10744 "pci_request_selected_regions failed 0x%x\n", err);
10745 goto err_pci_reg;
10746 }
10747
10748 pci_enable_pcie_error_reporting(pdev);
10749
10750 pci_set_master(pdev);
10751 pci_save_state(pdev);
10752
10753 if (ii->mac == ixgbe_mac_82598EB) {
10754#ifdef CONFIG_IXGBE_DCB
10755
10756 indices = 4 * MAX_TRAFFIC_CLASS;
10757#else
10758 indices = IXGBE_MAX_RSS_INDICES;
10759#endif
10760 }
10761
10762 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10763 if (!netdev) {
10764 err = -ENOMEM;
10765 goto err_alloc_etherdev;
10766 }
10767
10768 SET_NETDEV_DEV(netdev, &pdev->dev);
10769
10770 adapter = netdev_priv(netdev);
10771
10772 adapter->netdev = netdev;
10773 adapter->pdev = pdev;
10774 hw = &adapter->hw;
10775 hw->back = adapter;
10776 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10777
10778 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10779 pci_resource_len(pdev, 0));
10780 adapter->io_addr = hw->hw_addr;
10781 if (!hw->hw_addr) {
10782 err = -EIO;
10783 goto err_ioremap;
10784 }
10785
10786 netdev->netdev_ops = &ixgbe_netdev_ops;
10787 ixgbe_set_ethtool_ops(netdev);
10788 netdev->watchdog_timeo = 5 * HZ;
10789 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10790
10791
10792 hw->mac.ops = *ii->mac_ops;
10793 hw->mac.type = ii->mac;
10794 hw->mvals = ii->mvals;
10795 if (ii->link_ops)
10796 hw->link.ops = *ii->link_ops;
10797
10798
10799 hw->eeprom.ops = *ii->eeprom_ops;
10800 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10801 if (ixgbe_removed(hw->hw_addr)) {
10802 err = -EIO;
10803 goto err_ioremap;
10804 }
10805
10806 if (!(eec & BIT(8)))
10807 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10808
10809
10810 hw->phy.ops = *ii->phy_ops;
10811 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10812
10813 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10814 hw->phy.mdio.mmds = 0;
10815 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10816 hw->phy.mdio.dev = netdev;
10817 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10818 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10819
10820
10821 err = ixgbe_sw_init(adapter, ii);
10822 if (err)
10823 goto err_sw_init;
10824
10825
10826 if (hw->mac.ops.init_swfw_sync)
10827 hw->mac.ops.init_swfw_sync(hw);
10828
10829
10830 switch (adapter->hw.mac.type) {
10831 case ixgbe_mac_82599EB:
10832 case ixgbe_mac_X540:
10833 case ixgbe_mac_X550:
10834 case ixgbe_mac_X550EM_x:
10835 case ixgbe_mac_x550em_a:
10836 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10837 break;
10838 default:
10839 break;
10840 }
10841
10842
10843
10844
10845
10846 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10847 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10848 if (esdp & IXGBE_ESDP_SDP1)
10849 e_crit(probe, "Fan has stopped, replace the adapter\n");
10850 }
10851
10852 if (allow_unsupported_sfp)
10853 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10854
10855
10856 hw->phy.reset_if_overtemp = true;
10857 err = hw->mac.ops.reset_hw(hw);
10858 hw->phy.reset_if_overtemp = false;
10859 ixgbe_set_eee_capable(adapter);
10860 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10861 err = 0;
10862 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10863 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10864 e_dev_err("Reload the driver after installing a supported module.\n");
10865 goto err_sw_init;
10866 } else if (err) {
10867 e_dev_err("HW Init failed: %d\n", err);
10868 goto err_sw_init;
10869 }
10870
10871#ifdef CONFIG_PCI_IOV
10872
10873 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10874 goto skip_sriov;
10875
10876 ixgbe_init_mbx_params_pf(hw);
10877 hw->mbx.ops = ii->mbx_ops;
10878 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10879 ixgbe_enable_sriov(adapter, max_vfs);
10880skip_sriov:
10881
10882#endif
10883 netdev->features = NETIF_F_SG |
10884 NETIF_F_TSO |
10885 NETIF_F_TSO6 |
10886 NETIF_F_RXHASH |
10887 NETIF_F_RXCSUM |
10888 NETIF_F_HW_CSUM;
10889
10890#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10891 NETIF_F_GSO_GRE_CSUM | \
10892 NETIF_F_GSO_IPXIP4 | \
10893 NETIF_F_GSO_IPXIP6 | \
10894 NETIF_F_GSO_UDP_TUNNEL | \
10895 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10896
10897 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10898 netdev->features |= NETIF_F_GSO_PARTIAL |
10899 IXGBE_GSO_PARTIAL_FEATURES;
10900
10901 if (hw->mac.type >= ixgbe_mac_82599EB)
10902 netdev->features |= NETIF_F_SCTP_CRC;
10903
10904#ifdef CONFIG_IXGBE_IPSEC
10905#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10906 NETIF_F_HW_ESP_TX_CSUM | \
10907 NETIF_F_GSO_ESP)
10908
10909 if (adapter->ipsec)
10910 netdev->features |= IXGBE_ESP_FEATURES;
10911#endif
10912
10913 netdev->hw_features |= netdev->features |
10914 NETIF_F_HW_VLAN_CTAG_FILTER |
10915 NETIF_F_HW_VLAN_CTAG_RX |
10916 NETIF_F_HW_VLAN_CTAG_TX |
10917 NETIF_F_RXALL |
10918 NETIF_F_HW_L2FW_DOFFLOAD;
10919
10920 if (hw->mac.type >= ixgbe_mac_82599EB)
10921 netdev->hw_features |= NETIF_F_NTUPLE |
10922 NETIF_F_HW_TC;
10923
10924 if (pci_using_dac)
10925 netdev->features |= NETIF_F_HIGHDMA;
10926
10927 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10928 netdev->hw_enc_features |= netdev->vlan_features;
10929 netdev->mpls_features |= NETIF_F_SG |
10930 NETIF_F_TSO |
10931 NETIF_F_TSO6 |
10932 NETIF_F_HW_CSUM;
10933 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10934
10935
10936 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10937 NETIF_F_HW_VLAN_CTAG_RX |
10938 NETIF_F_HW_VLAN_CTAG_TX;
10939
10940 netdev->priv_flags |= IFF_UNICAST_FLT;
10941 netdev->priv_flags |= IFF_SUPP_NOFCS;
10942
10943
10944 netdev->min_mtu = ETH_MIN_MTU;
10945 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10946
10947#ifdef CONFIG_IXGBE_DCB
10948 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10949 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10950#endif
10951
10952#ifdef IXGBE_FCOE
10953 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10954 unsigned int fcoe_l;
10955
10956 if (hw->mac.ops.get_device_caps) {
10957 hw->mac.ops.get_device_caps(hw, &device_caps);
10958 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10959 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10960 }
10961
10962
10963 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10964 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10965
10966 netdev->features |= NETIF_F_FSO |
10967 NETIF_F_FCOE_CRC;
10968
10969 netdev->vlan_features |= NETIF_F_FSO |
10970 NETIF_F_FCOE_CRC |
10971 NETIF_F_FCOE_MTU;
10972 }
10973#endif
10974 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10975 netdev->hw_features |= NETIF_F_LRO;
10976 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10977 netdev->features |= NETIF_F_LRO;
10978
10979 if (ixgbe_check_fw_error(adapter)) {
10980 err = -EIO;
10981 goto err_sw_init;
10982 }
10983
10984
10985 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10986 e_dev_err("The EEPROM Checksum Is Not Valid\n");
10987 err = -EIO;
10988 goto err_sw_init;
10989 }
10990
10991 eth_platform_get_mac_address(&adapter->pdev->dev,
10992 adapter->hw.mac.perm_addr);
10993
10994 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
10995
10996 if (!is_valid_ether_addr(netdev->dev_addr)) {
10997 e_dev_err("invalid MAC address\n");
10998 err = -EIO;
10999 goto err_sw_init;
11000 }
11001
11002
11003 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
11004 ixgbe_mac_set_default_filter(adapter);
11005
11006 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
11007
11008 if (ixgbe_removed(hw->hw_addr)) {
11009 err = -EIO;
11010 goto err_sw_init;
11011 }
11012 INIT_WORK(&adapter->service_task, ixgbe_service_task);
11013 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
11014 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
11015
11016 err = ixgbe_init_interrupt_scheme(adapter);
11017 if (err)
11018 goto err_sw_init;
11019
11020 for (i = 0; i < adapter->num_rx_queues; i++)
11021 u64_stats_init(&adapter->rx_ring[i]->syncp);
11022 for (i = 0; i < adapter->num_tx_queues; i++)
11023 u64_stats_init(&adapter->tx_ring[i]->syncp);
11024 for (i = 0; i < adapter->num_xdp_queues; i++)
11025 u64_stats_init(&adapter->xdp_ring[i]->syncp);
11026
11027
11028 adapter->wol = 0;
11029 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
11030 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
11031 pdev->subsystem_device);
11032 if (hw->wol_enabled)
11033 adapter->wol = IXGBE_WUFC_MAG;
11034
11035 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
11036
11037
11038 ixgbe_set_fw_version(adapter);
11039
11040
11041 if (ixgbe_pcie_from_parent(hw))
11042 ixgbe_get_parent_bus_info(adapter);
11043 else
11044 hw->mac.ops.get_bus_info(hw);
11045
11046
11047
11048
11049
11050
11051 switch (hw->mac.type) {
11052 case ixgbe_mac_82598EB:
11053 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
11054 break;
11055 default:
11056 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
11057 break;
11058 }
11059
11060
11061 if (expected_gts > 0)
11062 ixgbe_check_minimum_link(adapter, expected_gts);
11063
11064 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
11065 if (err)
11066 strlcpy(part_str, "Unknown", sizeof(part_str));
11067 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
11068 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
11069 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
11070 part_str);
11071 else
11072 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
11073 hw->mac.type, hw->phy.type, part_str);
11074
11075 e_dev_info("%pM\n", netdev->dev_addr);
11076
11077
11078 err = hw->mac.ops.start_hw(hw);
11079 if (err == IXGBE_ERR_EEPROM_VERSION) {
11080
11081 e_dev_warn("This device is a pre-production adapter/LOM. "
11082 "Please be aware there may be issues associated "
11083 "with your hardware. If you are experiencing "
11084 "problems please contact your Intel or hardware "
11085 "representative who provided you with this "
11086 "hardware.\n");
11087 }
11088 strcpy(netdev->name, "eth%d");
11089 pci_set_drvdata(pdev, adapter);
11090 err = register_netdev(netdev);
11091 if (err)
11092 goto err_register;
11093
11094
11095
11096 if (hw->mac.ops.disable_tx_laser)
11097 hw->mac.ops.disable_tx_laser(hw);
11098
11099
11100 netif_carrier_off(netdev);
11101
11102#ifdef CONFIG_IXGBE_DCA
11103 if (dca_add_requester(&pdev->dev) == 0) {
11104 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11105 ixgbe_setup_dca(adapter);
11106 }
11107#endif
11108 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11109 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11110 for (i = 0; i < adapter->num_vfs; i++)
11111 ixgbe_vf_configuration(pdev, (i | 0x10000000));
11112 }
11113
11114
11115
11116
11117 if (hw->mac.ops.set_fw_drv_ver)
11118 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11119 sizeof(ixgbe_driver_version) - 1,
11120 ixgbe_driver_version);
11121
11122
11123 ixgbe_add_sanmac_netdev(netdev);
11124
11125 e_dev_info("%s\n", ixgbe_default_device_descr);
11126
11127#ifdef CONFIG_IXGBE_HWMON
11128 if (ixgbe_sysfs_init(adapter))
11129 e_err(probe, "failed to allocate sysfs resources\n");
11130#endif
11131
11132 ixgbe_dbg_adapter_init(adapter);
11133
11134
11135 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11136 hw->mac.ops.setup_link(hw,
11137 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11138 true);
11139
11140 ixgbe_mii_bus_init(hw);
11141
11142 return 0;
11143
11144err_register:
11145 ixgbe_release_hw_control(adapter);
11146 ixgbe_clear_interrupt_scheme(adapter);
11147err_sw_init:
11148 ixgbe_disable_sriov(adapter);
11149 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11150 iounmap(adapter->io_addr);
11151 kfree(adapter->jump_tables[0]);
11152 kfree(adapter->mac_table);
11153 kfree(adapter->rss_key);
11154 bitmap_free(adapter->af_xdp_zc_qps);
11155err_ioremap:
11156 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11157 free_netdev(netdev);
11158err_alloc_etherdev:
11159 pci_release_mem_regions(pdev);
11160err_pci_reg:
11161err_dma:
11162 if (!adapter || disable_dev)
11163 pci_disable_device(pdev);
11164 return err;
11165}
11166
11167
11168
11169
11170
11171
11172
11173
11174
11175
11176static void ixgbe_remove(struct pci_dev *pdev)
11177{
11178 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11179 struct net_device *netdev;
11180 bool disable_dev;
11181 int i;
11182
11183
11184 if (!adapter)
11185 return;
11186
11187 netdev = adapter->netdev;
11188 ixgbe_dbg_adapter_exit(adapter);
11189
11190 set_bit(__IXGBE_REMOVING, &adapter->state);
11191 cancel_work_sync(&adapter->service_task);
11192
11193 if (adapter->mii_bus)
11194 mdiobus_unregister(adapter->mii_bus);
11195
11196#ifdef CONFIG_IXGBE_DCA
11197 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11198 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11199 dca_remove_requester(&pdev->dev);
11200 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11201 IXGBE_DCA_CTRL_DCA_DISABLE);
11202 }
11203
11204#endif
11205#ifdef CONFIG_IXGBE_HWMON
11206 ixgbe_sysfs_exit(adapter);
11207#endif
11208
11209
11210 ixgbe_del_sanmac_netdev(netdev);
11211
11212#ifdef CONFIG_PCI_IOV
11213 ixgbe_disable_sriov(adapter);
11214#endif
11215 if (netdev->reg_state == NETREG_REGISTERED)
11216 unregister_netdev(netdev);
11217
11218 ixgbe_stop_ipsec_offload(adapter);
11219 ixgbe_clear_interrupt_scheme(adapter);
11220
11221 ixgbe_release_hw_control(adapter);
11222
11223#ifdef CONFIG_DCB
11224 kfree(adapter->ixgbe_ieee_pfc);
11225 kfree(adapter->ixgbe_ieee_ets);
11226
11227#endif
11228 iounmap(adapter->io_addr);
11229 pci_release_mem_regions(pdev);
11230
11231 e_dev_info("complete\n");
11232
11233 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11234 if (adapter->jump_tables[i]) {
11235 kfree(adapter->jump_tables[i]->input);
11236 kfree(adapter->jump_tables[i]->mask);
11237 }
11238 kfree(adapter->jump_tables[i]);
11239 }
11240
11241 kfree(adapter->mac_table);
11242 kfree(adapter->rss_key);
11243 bitmap_free(adapter->af_xdp_zc_qps);
11244 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11245 free_netdev(netdev);
11246
11247 pci_disable_pcie_error_reporting(pdev);
11248
11249 if (disable_dev)
11250 pci_disable_device(pdev);
11251}
11252
11253
11254
11255
11256
11257
11258
11259
11260
11261static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11262 pci_channel_state_t state)
11263{
11264 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11265 struct net_device *netdev = adapter->netdev;
11266
11267#ifdef CONFIG_PCI_IOV
11268 struct ixgbe_hw *hw = &adapter->hw;
11269 struct pci_dev *bdev, *vfdev;
11270 u32 dw0, dw1, dw2, dw3;
11271 int vf, pos;
11272 u16 req_id, pf_func;
11273
11274 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11275 adapter->num_vfs == 0)
11276 goto skip_bad_vf_detection;
11277
11278 bdev = pdev->bus->self;
11279 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11280 bdev = bdev->bus->self;
11281
11282 if (!bdev)
11283 goto skip_bad_vf_detection;
11284
11285 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11286 if (!pos)
11287 goto skip_bad_vf_detection;
11288
11289 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11290 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11291 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11292 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11293 if (ixgbe_removed(hw->hw_addr))
11294 goto skip_bad_vf_detection;
11295
11296 req_id = dw1 >> 16;
11297
11298 if (!(req_id & 0x0080))
11299 goto skip_bad_vf_detection;
11300
11301 pf_func = req_id & 0x01;
11302 if ((pf_func & 1) == (pdev->devfn & 1)) {
11303 unsigned int device_id;
11304
11305 vf = (req_id & 0x7F) >> 1;
11306 e_dev_err("VF %d has caused a PCIe error\n", vf);
11307 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11308 "%8.8x\tdw3: %8.8x\n",
11309 dw0, dw1, dw2, dw3);
11310 switch (adapter->hw.mac.type) {
11311 case ixgbe_mac_82599EB:
11312 device_id = IXGBE_82599_VF_DEVICE_ID;
11313 break;
11314 case ixgbe_mac_X540:
11315 device_id = IXGBE_X540_VF_DEVICE_ID;
11316 break;
11317 case ixgbe_mac_X550:
11318 device_id = IXGBE_DEV_ID_X550_VF;
11319 break;
11320 case ixgbe_mac_X550EM_x:
11321 device_id = IXGBE_DEV_ID_X550EM_X_VF;
11322 break;
11323 case ixgbe_mac_x550em_a:
11324 device_id = IXGBE_DEV_ID_X550EM_A_VF;
11325 break;
11326 default:
11327 device_id = 0;
11328 break;
11329 }
11330
11331
11332 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11333 while (vfdev) {
11334 if (vfdev->devfn == (req_id & 0xFF))
11335 break;
11336 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11337 device_id, vfdev);
11338 }
11339
11340
11341
11342
11343
11344 if (vfdev) {
11345 pcie_flr(vfdev);
11346
11347 pci_dev_put(vfdev);
11348 }
11349 }
11350
11351
11352
11353
11354
11355
11356
11357 adapter->vferr_refcount++;
11358
11359 return PCI_ERS_RESULT_RECOVERED;
11360
11361skip_bad_vf_detection:
11362#endif
11363 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11364 return PCI_ERS_RESULT_DISCONNECT;
11365
11366 if (!netif_device_present(netdev))
11367 return PCI_ERS_RESULT_DISCONNECT;
11368
11369 rtnl_lock();
11370 netif_device_detach(netdev);
11371
11372 if (netif_running(netdev))
11373 ixgbe_close_suspend(adapter);
11374
11375 if (state == pci_channel_io_perm_failure) {
11376 rtnl_unlock();
11377 return PCI_ERS_RESULT_DISCONNECT;
11378 }
11379
11380 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11381 pci_disable_device(pdev);
11382 rtnl_unlock();
11383
11384
11385 return PCI_ERS_RESULT_NEED_RESET;
11386}
11387
11388
11389
11390
11391
11392
11393
11394static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11395{
11396 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11397 pci_ers_result_t result;
11398
11399 if (pci_enable_device_mem(pdev)) {
11400 e_err(probe, "Cannot re-enable PCI device after reset.\n");
11401 result = PCI_ERS_RESULT_DISCONNECT;
11402 } else {
11403 smp_mb__before_atomic();
11404 clear_bit(__IXGBE_DISABLED, &adapter->state);
11405 adapter->hw.hw_addr = adapter->io_addr;
11406 pci_set_master(pdev);
11407 pci_restore_state(pdev);
11408 pci_save_state(pdev);
11409
11410 pci_wake_from_d3(pdev, false);
11411
11412 ixgbe_reset(adapter);
11413 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11414 result = PCI_ERS_RESULT_RECOVERED;
11415 }
11416
11417 return result;
11418}
11419
11420
11421
11422
11423
11424
11425
11426
11427static void ixgbe_io_resume(struct pci_dev *pdev)
11428{
11429 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11430 struct net_device *netdev = adapter->netdev;
11431
11432#ifdef CONFIG_PCI_IOV
11433 if (adapter->vferr_refcount) {
11434 e_info(drv, "Resuming after VF err\n");
11435 adapter->vferr_refcount--;
11436 return;
11437 }
11438
11439#endif
11440 rtnl_lock();
11441 if (netif_running(netdev))
11442 ixgbe_open(netdev);
11443
11444 netif_device_attach(netdev);
11445 rtnl_unlock();
11446}
11447
11448static const struct pci_error_handlers ixgbe_err_handler = {
11449 .error_detected = ixgbe_io_error_detected,
11450 .slot_reset = ixgbe_io_slot_reset,
11451 .resume = ixgbe_io_resume,
11452};
11453
11454static struct pci_driver ixgbe_driver = {
11455 .name = ixgbe_driver_name,
11456 .id_table = ixgbe_pci_tbl,
11457 .probe = ixgbe_probe,
11458 .remove = ixgbe_remove,
11459#ifdef CONFIG_PM
11460 .suspend = ixgbe_suspend,
11461 .resume = ixgbe_resume,
11462#endif
11463 .shutdown = ixgbe_shutdown,
11464 .sriov_configure = ixgbe_pci_sriov_configure,
11465 .err_handler = &ixgbe_err_handler
11466};
11467
11468
11469
11470
11471
11472
11473
11474static int __init ixgbe_init_module(void)
11475{
11476 int ret;
11477 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
11478 pr_info("%s\n", ixgbe_copyright);
11479
11480 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11481 if (!ixgbe_wq) {
11482 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11483 return -ENOMEM;
11484 }
11485
11486 ixgbe_dbg_init();
11487
11488 ret = pci_register_driver(&ixgbe_driver);
11489 if (ret) {
11490 destroy_workqueue(ixgbe_wq);
11491 ixgbe_dbg_exit();
11492 return ret;
11493 }
11494
11495#ifdef CONFIG_IXGBE_DCA
11496 dca_register_notify(&dca_notifier);
11497#endif
11498
11499 return 0;
11500}
11501
11502module_init(ixgbe_init_module);
11503
11504
11505
11506
11507
11508
11509
11510static void __exit ixgbe_exit_module(void)
11511{
11512#ifdef CONFIG_IXGBE_DCA
11513 dca_unregister_notify(&dca_notifier);
11514#endif
11515 pci_unregister_driver(&ixgbe_driver);
11516
11517 ixgbe_dbg_exit();
11518 if (ixgbe_wq) {
11519 destroy_workqueue(ixgbe_wq);
11520 ixgbe_wq = NULL;
11521 }
11522}
11523
11524#ifdef CONFIG_IXGBE_DCA
11525static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11526 void *p)
11527{
11528 int ret_val;
11529
11530 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11531 __ixgbe_notify_dca);
11532
11533 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11534}
11535
11536#endif
11537
11538module_exit(ixgbe_exit_module);
11539
11540
11541