1
2
3
4#include <linux/types.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/netdevice.h>
8#include <linux/vmalloc.h>
9#include <linux/string.h>
10#include <linux/in.h>
11#include <linux/interrupt.h>
12#include <linux/ip.h>
13#include <linux/tcp.h>
14#include <linux/sctp.h>
15#include <linux/pkt_sched.h>
16#include <linux/ipv6.h>
17#include <linux/slab.h>
18#include <net/checksum.h>
19#include <net/ip6_checksum.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/if_macvlan.h>
25#include <linux/if_bridge.h>
26#include <linux/prefetch.h>
27#include <linux/bpf.h>
28#include <linux/bpf_trace.h>
29#include <linux/atomic.h>
30#include <linux/numa.h>
31#include <scsi/fc/fc_fcoe.h>
32#include <net/udp_tunnel.h>
33#include <net/pkt_cls.h>
34#include <net/tc_act/tc_gact.h>
35#include <net/tc_act/tc_mirred.h>
36#include <net/vxlan.h>
37#include <net/mpls.h>
38#include <net/xdp_sock.h>
39#include <net/xfrm.h>
40
41#include "ixgbe.h"
42#include "ixgbe_common.h"
43#include "ixgbe_dcb_82599.h"
44#include "ixgbe_phy.h"
45#include "ixgbe_sriov.h"
46#include "ixgbe_model.h"
47#include "ixgbe_txrx_common.h"
48
49char ixgbe_driver_name[] = "ixgbe";
50static const char ixgbe_driver_string[] =
51 "Intel(R) 10 Gigabit PCI Express Network Driver";
52#ifdef IXGBE_FCOE
53char ixgbe_default_device_descr[] =
54 "Intel(R) 10 Gigabit Network Connection";
55#else
56static char ixgbe_default_device_descr[] =
57 "Intel(R) 10 Gigabit Network Connection";
58#endif
59#define DRV_VERSION "5.1.0-k"
60const char ixgbe_driver_version[] = DRV_VERSION;
61static const char ixgbe_copyright[] =
62 "Copyright (c) 1999-2016 Intel Corporation.";
63
64static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
65
66static const struct ixgbe_info *ixgbe_info_tbl[] = {
67 [board_82598] = &ixgbe_82598_info,
68 [board_82599] = &ixgbe_82599_info,
69 [board_X540] = &ixgbe_X540_info,
70 [board_X550] = &ixgbe_X550_info,
71 [board_X550EM_x] = &ixgbe_X550EM_x_info,
72 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
73 [board_x550em_a] = &ixgbe_x550em_a_info,
74 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
75};
76
77
78
79
80
81
82
83
84
85static const struct pci_device_id ixgbe_pci_tbl[] = {
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
133
134 {0, }
135};
136MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
137
138#ifdef CONFIG_IXGBE_DCA
139static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
140 void *p);
141static struct notifier_block dca_notifier = {
142 .notifier_call = ixgbe_notify_dca,
143 .next = NULL,
144 .priority = 0
145};
146#endif
147
148#ifdef CONFIG_PCI_IOV
149static unsigned int max_vfs;
150module_param(max_vfs, uint, 0);
151MODULE_PARM_DESC(max_vfs,
152 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
153#endif
154
155static unsigned int allow_unsupported_sfp;
156module_param(allow_unsupported_sfp, uint, 0);
157MODULE_PARM_DESC(allow_unsupported_sfp,
158 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
159
160#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
161static int debug = -1;
162module_param(debug, int, 0);
163MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
164
165MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
166MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
167MODULE_LICENSE("GPL v2");
168MODULE_VERSION(DRV_VERSION);
169
170static struct workqueue_struct *ixgbe_wq;
171
172static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
173static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
174
175static const struct net_device_ops ixgbe_netdev_ops;
176
177static bool netif_is_ixgbe(struct net_device *dev)
178{
179 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
180}
181
182static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
183 u32 reg, u16 *value)
184{
185 struct pci_dev *parent_dev;
186 struct pci_bus *parent_bus;
187
188 parent_bus = adapter->pdev->bus->parent;
189 if (!parent_bus)
190 return -1;
191
192 parent_dev = parent_bus->self;
193 if (!parent_dev)
194 return -1;
195
196 if (!pci_is_pcie(parent_dev))
197 return -1;
198
199 pcie_capability_read_word(parent_dev, reg, value);
200 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
201 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
202 return -1;
203 return 0;
204}
205
206static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
207{
208 struct ixgbe_hw *hw = &adapter->hw;
209 u16 link_status = 0;
210 int err;
211
212 hw->bus.type = ixgbe_bus_type_pci_express;
213
214
215
216
217 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
218
219
220 if (err)
221 return err;
222
223 hw->bus.width = ixgbe_convert_bus_width(link_status);
224 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
225
226 return 0;
227}
228
229
230
231
232
233
234
235
236
237
238static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
239{
240 switch (hw->device_id) {
241 case IXGBE_DEV_ID_82599_SFP_SF_QP:
242 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
243 return true;
244 default:
245 return false;
246 }
247}
248
249static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
250 int expected_gts)
251{
252 struct ixgbe_hw *hw = &adapter->hw;
253 struct pci_dev *pdev;
254
255
256
257
258
259 if (hw->bus.type == ixgbe_bus_type_internal)
260 return;
261
262
263 if (ixgbe_pcie_from_parent(&adapter->hw))
264 pdev = adapter->pdev->bus->parent->self;
265 else
266 pdev = adapter->pdev;
267
268 pcie_print_link_status(pdev);
269}
270
271static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
272{
273 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
274 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
275 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
276 queue_work(ixgbe_wq, &adapter->service_task);
277}
278
279static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
280{
281 struct ixgbe_adapter *adapter = hw->back;
282
283 if (!hw->hw_addr)
284 return;
285 hw->hw_addr = NULL;
286 e_dev_err("Adapter removed\n");
287 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
288 ixgbe_service_event_schedule(adapter);
289}
290
291static u32 ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
292{
293 u8 __iomem *reg_addr;
294 u32 value;
295 int i;
296
297 reg_addr = READ_ONCE(hw->hw_addr);
298 if (ixgbe_removed(reg_addr))
299 return IXGBE_FAILED_READ_REG;
300
301
302
303
304
305 for (i = 0; i < IXGBE_FAILED_READ_RETRIES; i++) {
306 value = readl(reg_addr + IXGBE_STATUS);
307 if (value != IXGBE_FAILED_READ_REG)
308 break;
309 mdelay(3);
310 }
311
312 if (value == IXGBE_FAILED_READ_REG)
313 ixgbe_remove_adapter(hw);
314 else
315 value = readl(reg_addr + reg);
316 return value;
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
333{
334 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
335 u32 value;
336
337 if (ixgbe_removed(reg_addr))
338 return IXGBE_FAILED_READ_REG;
339 if (unlikely(hw->phy.nw_mng_if_sel &
340 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
341 struct ixgbe_adapter *adapter;
342 int i;
343
344 for (i = 0; i < 200; ++i) {
345 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
346 if (likely(!value))
347 goto writes_completed;
348 if (value == IXGBE_FAILED_READ_REG) {
349 ixgbe_remove_adapter(hw);
350 return IXGBE_FAILED_READ_REG;
351 }
352 udelay(5);
353 }
354
355 adapter = hw->back;
356 e_warn(hw, "register writes incomplete %08x\n", value);
357 }
358
359writes_completed:
360 value = readl(reg_addr + reg);
361 if (unlikely(value == IXGBE_FAILED_READ_REG))
362 value = ixgbe_check_remove(hw, reg);
363 return value;
364}
365
366static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
367{
368 u16 value;
369
370 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
371 if (value == IXGBE_FAILED_READ_CFG_WORD) {
372 ixgbe_remove_adapter(hw);
373 return true;
374 }
375 return false;
376}
377
378u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
379{
380 struct ixgbe_adapter *adapter = hw->back;
381 u16 value;
382
383 if (ixgbe_removed(hw->hw_addr))
384 return IXGBE_FAILED_READ_CFG_WORD;
385 pci_read_config_word(adapter->pdev, reg, &value);
386 if (value == IXGBE_FAILED_READ_CFG_WORD &&
387 ixgbe_check_cfg_remove(hw, adapter->pdev))
388 return IXGBE_FAILED_READ_CFG_WORD;
389 return value;
390}
391
392#ifdef CONFIG_PCI_IOV
393static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
394{
395 struct ixgbe_adapter *adapter = hw->back;
396 u32 value;
397
398 if (ixgbe_removed(hw->hw_addr))
399 return IXGBE_FAILED_READ_CFG_DWORD;
400 pci_read_config_dword(adapter->pdev, reg, &value);
401 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
402 ixgbe_check_cfg_remove(hw, adapter->pdev))
403 return IXGBE_FAILED_READ_CFG_DWORD;
404 return value;
405}
406#endif
407
408void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
409{
410 struct ixgbe_adapter *adapter = hw->back;
411
412 if (ixgbe_removed(hw->hw_addr))
413 return;
414 pci_write_config_word(adapter->pdev, reg, value);
415}
416
417static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
418{
419 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
420
421
422 smp_mb__before_atomic();
423 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
424}
425
426struct ixgbe_reg_info {
427 u32 ofs;
428 char *name;
429};
430
431static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
432
433
434 {IXGBE_CTRL, "CTRL"},
435 {IXGBE_STATUS, "STATUS"},
436 {IXGBE_CTRL_EXT, "CTRL_EXT"},
437
438
439 {IXGBE_EICR, "EICR"},
440
441
442 {IXGBE_SRRCTL(0), "SRRCTL"},
443 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
444 {IXGBE_RDLEN(0), "RDLEN"},
445 {IXGBE_RDH(0), "RDH"},
446 {IXGBE_RDT(0), "RDT"},
447 {IXGBE_RXDCTL(0), "RXDCTL"},
448 {IXGBE_RDBAL(0), "RDBAL"},
449 {IXGBE_RDBAH(0), "RDBAH"},
450
451
452 {IXGBE_TDBAL(0), "TDBAL"},
453 {IXGBE_TDBAH(0), "TDBAH"},
454 {IXGBE_TDLEN(0), "TDLEN"},
455 {IXGBE_TDH(0), "TDH"},
456 {IXGBE_TDT(0), "TDT"},
457 {IXGBE_TXDCTL(0), "TXDCTL"},
458
459
460 { .name = NULL }
461};
462
463
464
465
466
467static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
468{
469 int i;
470 char rname[16];
471 u32 regs[64];
472
473 switch (reginfo->ofs) {
474 case IXGBE_SRRCTL(0):
475 for (i = 0; i < 64; i++)
476 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
477 break;
478 case IXGBE_DCA_RXCTRL(0):
479 for (i = 0; i < 64; i++)
480 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
481 break;
482 case IXGBE_RDLEN(0):
483 for (i = 0; i < 64; i++)
484 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
485 break;
486 case IXGBE_RDH(0):
487 for (i = 0; i < 64; i++)
488 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
489 break;
490 case IXGBE_RDT(0):
491 for (i = 0; i < 64; i++)
492 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
493 break;
494 case IXGBE_RXDCTL(0):
495 for (i = 0; i < 64; i++)
496 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
497 break;
498 case IXGBE_RDBAL(0):
499 for (i = 0; i < 64; i++)
500 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
501 break;
502 case IXGBE_RDBAH(0):
503 for (i = 0; i < 64; i++)
504 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
505 break;
506 case IXGBE_TDBAL(0):
507 for (i = 0; i < 64; i++)
508 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
509 break;
510 case IXGBE_TDBAH(0):
511 for (i = 0; i < 64; i++)
512 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
513 break;
514 case IXGBE_TDLEN(0):
515 for (i = 0; i < 64; i++)
516 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
517 break;
518 case IXGBE_TDH(0):
519 for (i = 0; i < 64; i++)
520 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
521 break;
522 case IXGBE_TDT(0):
523 for (i = 0; i < 64; i++)
524 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
525 break;
526 case IXGBE_TXDCTL(0):
527 for (i = 0; i < 64; i++)
528 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
529 break;
530 default:
531 pr_info("%-15s %08x\n",
532 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
533 return;
534 }
535
536 i = 0;
537 while (i < 64) {
538 int j;
539 char buf[9 * 8 + 1];
540 char *p = buf;
541
542 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
543 for (j = 0; j < 8; j++)
544 p += sprintf(p, " %08x", regs[i++]);
545 pr_err("%-15s%s\n", rname, buf);
546 }
547
548}
549
550static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
551{
552 struct ixgbe_tx_buffer *tx_buffer;
553
554 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
555 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
556 n, ring->next_to_use, ring->next_to_clean,
557 (u64)dma_unmap_addr(tx_buffer, dma),
558 dma_unmap_len(tx_buffer, len),
559 tx_buffer->next_to_watch,
560 (u64)tx_buffer->time_stamp);
561}
562
563
564
565
566static void ixgbe_dump(struct ixgbe_adapter *adapter)
567{
568 struct net_device *netdev = adapter->netdev;
569 struct ixgbe_hw *hw = &adapter->hw;
570 struct ixgbe_reg_info *reginfo;
571 int n = 0;
572 struct ixgbe_ring *ring;
573 struct ixgbe_tx_buffer *tx_buffer;
574 union ixgbe_adv_tx_desc *tx_desc;
575 struct my_u0 { u64 a; u64 b; } *u0;
576 struct ixgbe_ring *rx_ring;
577 union ixgbe_adv_rx_desc *rx_desc;
578 struct ixgbe_rx_buffer *rx_buffer_info;
579 int i = 0;
580
581 if (!netif_msg_hw(adapter))
582 return;
583
584
585 if (netdev) {
586 dev_info(&adapter->pdev->dev, "Net device Info\n");
587 pr_info("Device Name state "
588 "trans_start\n");
589 pr_info("%-15s %016lX %016lX\n",
590 netdev->name,
591 netdev->state,
592 dev_trans_start(netdev));
593 }
594
595
596 dev_info(&adapter->pdev->dev, "Register Dump\n");
597 pr_info(" Register Name Value\n");
598 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
599 reginfo->name; reginfo++) {
600 ixgbe_regdump(hw, reginfo);
601 }
602
603
604 if (!netdev || !netif_running(netdev))
605 return;
606
607 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
608 pr_info(" %s %s %s %s\n",
609 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
610 "leng", "ntw", "timestamp");
611 for (n = 0; n < adapter->num_tx_queues; n++) {
612 ring = adapter->tx_ring[n];
613 ixgbe_print_buffer(ring, n);
614 }
615
616 for (n = 0; n < adapter->num_xdp_queues; n++) {
617 ring = adapter->xdp_ring[n];
618 ixgbe_print_buffer(ring, n);
619 }
620
621
622 if (!netif_msg_tx_done(adapter))
623 goto rx_ring_summary;
624
625 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 for (n = 0; n < adapter->num_tx_queues; n++) {
663 ring = adapter->tx_ring[n];
664 pr_info("------------------------------------\n");
665 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
666 pr_info("------------------------------------\n");
667 pr_info("%s%s %s %s %s %s\n",
668 "T [desc] [address 63:0 ] ",
669 "[PlPOIdStDDt Ln] [bi->dma ] ",
670 "leng", "ntw", "timestamp", "bi->skb");
671
672 for (i = 0; ring->desc && (i < ring->count); i++) {
673 tx_desc = IXGBE_TX_DESC(ring, i);
674 tx_buffer = &ring->tx_buffer_info[i];
675 u0 = (struct my_u0 *)tx_desc;
676 if (dma_unmap_len(tx_buffer, len) > 0) {
677 const char *ring_desc;
678
679 if (i == ring->next_to_use &&
680 i == ring->next_to_clean)
681 ring_desc = " NTC/U";
682 else if (i == ring->next_to_use)
683 ring_desc = " NTU";
684 else if (i == ring->next_to_clean)
685 ring_desc = " NTC";
686 else
687 ring_desc = "";
688 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
689 i,
690 le64_to_cpu((__force __le64)u0->a),
691 le64_to_cpu((__force __le64)u0->b),
692 (u64)dma_unmap_addr(tx_buffer, dma),
693 dma_unmap_len(tx_buffer, len),
694 tx_buffer->next_to_watch,
695 (u64)tx_buffer->time_stamp,
696 tx_buffer->skb,
697 ring_desc);
698
699 if (netif_msg_pktdata(adapter) &&
700 tx_buffer->skb)
701 print_hex_dump(KERN_INFO, "",
702 DUMP_PREFIX_ADDRESS, 16, 1,
703 tx_buffer->skb->data,
704 dma_unmap_len(tx_buffer, len),
705 true);
706 }
707 }
708 }
709
710
711rx_ring_summary:
712 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
713 pr_info("Queue [NTU] [NTC]\n");
714 for (n = 0; n < adapter->num_rx_queues; n++) {
715 rx_ring = adapter->rx_ring[n];
716 pr_info("%5d %5X %5X\n",
717 n, rx_ring->next_to_use, rx_ring->next_to_clean);
718 }
719
720
721 if (!netif_msg_rx_status(adapter))
722 return;
723
724 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771 for (n = 0; n < adapter->num_rx_queues; n++) {
772 rx_ring = adapter->rx_ring[n];
773 pr_info("------------------------------------\n");
774 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
775 pr_info("------------------------------------\n");
776 pr_info("%s%s%s\n",
777 "R [desc] [ PktBuf A0] ",
778 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
779 "<-- Adv Rx Read format");
780 pr_info("%s%s%s\n",
781 "RWB[desc] [PcsmIpSHl PtRs] ",
782 "[vl er S cks ln] ---------------- [bi->skb ] ",
783 "<-- Adv Rx Write-Back format");
784
785 for (i = 0; i < rx_ring->count; i++) {
786 const char *ring_desc;
787
788 if (i == rx_ring->next_to_use)
789 ring_desc = " NTU";
790 else if (i == rx_ring->next_to_clean)
791 ring_desc = " NTC";
792 else
793 ring_desc = "";
794
795 rx_buffer_info = &rx_ring->rx_buffer_info[i];
796 rx_desc = IXGBE_RX_DESC(rx_ring, i);
797 u0 = (struct my_u0 *)rx_desc;
798 if (rx_desc->wb.upper.length) {
799
800 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
801 i,
802 le64_to_cpu((__force __le64)u0->a),
803 le64_to_cpu((__force __le64)u0->b),
804 rx_buffer_info->skb,
805 ring_desc);
806 } else {
807 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
808 i,
809 le64_to_cpu((__force __le64)u0->a),
810 le64_to_cpu((__force __le64)u0->b),
811 (u64)rx_buffer_info->dma,
812 rx_buffer_info->skb,
813 ring_desc);
814
815 if (netif_msg_pktdata(adapter) &&
816 rx_buffer_info->dma) {
817 print_hex_dump(KERN_INFO, "",
818 DUMP_PREFIX_ADDRESS, 16, 1,
819 page_address(rx_buffer_info->page) +
820 rx_buffer_info->page_offset,
821 ixgbe_rx_bufsz(rx_ring), true);
822 }
823 }
824 }
825 }
826}
827
828static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
829{
830 u32 ctrl_ext;
831
832
833 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
835 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
836}
837
838static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
839{
840 u32 ctrl_ext;
841
842
843 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
845 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
846}
847
848
849
850
851
852
853
854
855
856static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
857 u8 queue, u8 msix_vector)
858{
859 u32 ivar, index;
860 struct ixgbe_hw *hw = &adapter->hw;
861 switch (hw->mac.type) {
862 case ixgbe_mac_82598EB:
863 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
864 if (direction == -1)
865 direction = 0;
866 index = (((direction * 64) + queue) >> 2) & 0x1F;
867 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
868 ivar &= ~(0xFF << (8 * (queue & 0x3)));
869 ivar |= (msix_vector << (8 * (queue & 0x3)));
870 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
871 break;
872 case ixgbe_mac_82599EB:
873 case ixgbe_mac_X540:
874 case ixgbe_mac_X550:
875 case ixgbe_mac_X550EM_x:
876 case ixgbe_mac_x550em_a:
877 if (direction == -1) {
878
879 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
880 index = ((queue & 1) * 8);
881 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
882 ivar &= ~(0xFF << index);
883 ivar |= (msix_vector << index);
884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
885 break;
886 } else {
887
888 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
889 index = ((16 * (queue & 1)) + (8 * direction));
890 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
891 ivar &= ~(0xFF << index);
892 ivar |= (msix_vector << index);
893 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
894 break;
895 }
896 default:
897 break;
898 }
899}
900
901void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
902 u64 qmask)
903{
904 u32 mask;
905
906 switch (adapter->hw.mac.type) {
907 case ixgbe_mac_82598EB:
908 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
910 break;
911 case ixgbe_mac_82599EB:
912 case ixgbe_mac_X540:
913 case ixgbe_mac_X550:
914 case ixgbe_mac_X550EM_x:
915 case ixgbe_mac_x550em_a:
916 mask = (qmask & 0xFFFFFFFF);
917 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
918 mask = (qmask >> 32);
919 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
920 break;
921 default:
922 break;
923 }
924}
925
926static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
927{
928 struct ixgbe_hw *hw = &adapter->hw;
929 struct ixgbe_hw_stats *hwstats = &adapter->stats;
930 int i;
931 u32 data;
932
933 if ((hw->fc.current_mode != ixgbe_fc_full) &&
934 (hw->fc.current_mode != ixgbe_fc_rx_pause))
935 return;
936
937 switch (hw->mac.type) {
938 case ixgbe_mac_82598EB:
939 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
940 break;
941 default:
942 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
943 }
944 hwstats->lxoffrxc += data;
945
946
947 if (!data)
948 return;
949
950 for (i = 0; i < adapter->num_tx_queues; i++)
951 clear_bit(__IXGBE_HANG_CHECK_ARMED,
952 &adapter->tx_ring[i]->state);
953
954 for (i = 0; i < adapter->num_xdp_queues; i++)
955 clear_bit(__IXGBE_HANG_CHECK_ARMED,
956 &adapter->xdp_ring[i]->state);
957}
958
959static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
960{
961 struct ixgbe_hw *hw = &adapter->hw;
962 struct ixgbe_hw_stats *hwstats = &adapter->stats;
963 u32 xoff[8] = {0};
964 u8 tc;
965 int i;
966 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
967
968 if (adapter->ixgbe_ieee_pfc)
969 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
970
971 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
972 ixgbe_update_xoff_rx_lfc(adapter);
973 return;
974 }
975
976
977 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
978 u32 pxoffrxc;
979
980 switch (hw->mac.type) {
981 case ixgbe_mac_82598EB:
982 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
983 break;
984 default:
985 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
986 }
987 hwstats->pxoffrxc[i] += pxoffrxc;
988
989 tc = netdev_get_prio_tc_map(adapter->netdev, i);
990 xoff[tc] += pxoffrxc;
991 }
992
993
994 for (i = 0; i < adapter->num_tx_queues; i++) {
995 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
996
997 tc = tx_ring->dcb_tc;
998 if (xoff[tc])
999 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1000 }
1001
1002 for (i = 0; i < adapter->num_xdp_queues; i++) {
1003 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1004
1005 tc = xdp_ring->dcb_tc;
1006 if (xoff[tc])
1007 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1008 }
1009}
1010
1011static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1012{
1013 return ring->stats.packets;
1014}
1015
1016static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1017{
1018 unsigned int head, tail;
1019
1020 head = ring->next_to_clean;
1021 tail = ring->next_to_use;
1022
1023 return ((head <= tail) ? tail : tail + ring->count) - head;
1024}
1025
1026static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1027{
1028 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1029 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1030 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1031
1032 clear_check_for_tx_hang(tx_ring);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (tx_done_old == tx_done && tx_pending)
1047
1048 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1049 &tx_ring->state);
1050
1051 tx_ring->tx_stats.tx_done_old = tx_done;
1052
1053 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1054
1055 return false;
1056}
1057
1058
1059
1060
1061
1062static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1063{
1064
1065
1066 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1067 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1068 e_warn(drv, "initiating reset due to tx timeout\n");
1069 ixgbe_service_event_schedule(adapter);
1070 }
1071}
1072
1073
1074
1075
1076
1077
1078
1079static int ixgbe_tx_maxrate(struct net_device *netdev,
1080 int queue_index, u32 maxrate)
1081{
1082 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1083 struct ixgbe_hw *hw = &adapter->hw;
1084 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1085
1086 if (!maxrate)
1087 return 0;
1088
1089
1090 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1091 bcnrc_val /= maxrate;
1092
1093
1094 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1095 IXGBE_RTTBCNRC_RF_DEC_MASK;
1096
1097
1098 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1099
1100 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1101 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1102
1103 return 0;
1104}
1105
1106
1107
1108
1109
1110
1111
1112static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1113 struct ixgbe_ring *tx_ring, int napi_budget)
1114{
1115 struct ixgbe_adapter *adapter = q_vector->adapter;
1116 struct ixgbe_tx_buffer *tx_buffer;
1117 union ixgbe_adv_tx_desc *tx_desc;
1118 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1119 unsigned int budget = q_vector->tx.work_limit;
1120 unsigned int i = tx_ring->next_to_clean;
1121
1122 if (test_bit(__IXGBE_DOWN, &adapter->state))
1123 return true;
1124
1125 tx_buffer = &tx_ring->tx_buffer_info[i];
1126 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1127 i -= tx_ring->count;
1128
1129 do {
1130 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1131
1132
1133 if (!eop_desc)
1134 break;
1135
1136
1137 smp_rmb();
1138
1139
1140 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1141 break;
1142
1143
1144 tx_buffer->next_to_watch = NULL;
1145
1146
1147 total_bytes += tx_buffer->bytecount;
1148 total_packets += tx_buffer->gso_segs;
1149 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1150 total_ipsec++;
1151
1152
1153 if (ring_is_xdp(tx_ring))
1154 xdp_return_frame(tx_buffer->xdpf);
1155 else
1156 napi_consume_skb(tx_buffer->skb, napi_budget);
1157
1158
1159 dma_unmap_single(tx_ring->dev,
1160 dma_unmap_addr(tx_buffer, dma),
1161 dma_unmap_len(tx_buffer, len),
1162 DMA_TO_DEVICE);
1163
1164
1165 dma_unmap_len_set(tx_buffer, len, 0);
1166
1167
1168 while (tx_desc != eop_desc) {
1169 tx_buffer++;
1170 tx_desc++;
1171 i++;
1172 if (unlikely(!i)) {
1173 i -= tx_ring->count;
1174 tx_buffer = tx_ring->tx_buffer_info;
1175 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1176 }
1177
1178
1179 if (dma_unmap_len(tx_buffer, len)) {
1180 dma_unmap_page(tx_ring->dev,
1181 dma_unmap_addr(tx_buffer, dma),
1182 dma_unmap_len(tx_buffer, len),
1183 DMA_TO_DEVICE);
1184 dma_unmap_len_set(tx_buffer, len, 0);
1185 }
1186 }
1187
1188
1189 tx_buffer++;
1190 tx_desc++;
1191 i++;
1192 if (unlikely(!i)) {
1193 i -= tx_ring->count;
1194 tx_buffer = tx_ring->tx_buffer_info;
1195 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1196 }
1197
1198
1199 prefetch(tx_desc);
1200
1201
1202 budget--;
1203 } while (likely(budget));
1204
1205 i += tx_ring->count;
1206 tx_ring->next_to_clean = i;
1207 u64_stats_update_begin(&tx_ring->syncp);
1208 tx_ring->stats.bytes += total_bytes;
1209 tx_ring->stats.packets += total_packets;
1210 u64_stats_update_end(&tx_ring->syncp);
1211 q_vector->tx.total_bytes += total_bytes;
1212 q_vector->tx.total_packets += total_packets;
1213 adapter->tx_ipsec += total_ipsec;
1214
1215 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1216
1217 struct ixgbe_hw *hw = &adapter->hw;
1218 e_err(drv, "Detected Tx Unit Hang %s\n"
1219 " Tx Queue <%d>\n"
1220 " TDH, TDT <%x>, <%x>\n"
1221 " next_to_use <%x>\n"
1222 " next_to_clean <%x>\n"
1223 "tx_buffer_info[next_to_clean]\n"
1224 " time_stamp <%lx>\n"
1225 " jiffies <%lx>\n",
1226 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1227 tx_ring->queue_index,
1228 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1229 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1230 tx_ring->next_to_use, i,
1231 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1232
1233 if (!ring_is_xdp(tx_ring))
1234 netif_stop_subqueue(tx_ring->netdev,
1235 tx_ring->queue_index);
1236
1237 e_info(probe,
1238 "tx hang %d detected on queue %d, resetting adapter\n",
1239 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1240
1241
1242 ixgbe_tx_timeout_reset(adapter);
1243
1244
1245 return true;
1246 }
1247
1248 if (ring_is_xdp(tx_ring))
1249 return !!budget;
1250
1251 netdev_tx_completed_queue(txring_txq(tx_ring),
1252 total_packets, total_bytes);
1253
1254#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1255 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1256 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1257
1258
1259
1260 smp_mb();
1261 if (__netif_subqueue_stopped(tx_ring->netdev,
1262 tx_ring->queue_index)
1263 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1264 netif_wake_subqueue(tx_ring->netdev,
1265 tx_ring->queue_index);
1266 ++tx_ring->tx_stats.restart_queue;
1267 }
1268 }
1269
1270 return !!budget;
1271}
1272
1273#ifdef CONFIG_IXGBE_DCA
1274static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1275 struct ixgbe_ring *tx_ring,
1276 int cpu)
1277{
1278 struct ixgbe_hw *hw = &adapter->hw;
1279 u32 txctrl = 0;
1280 u16 reg_offset;
1281
1282 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1283 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1284
1285 switch (hw->mac.type) {
1286 case ixgbe_mac_82598EB:
1287 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1288 break;
1289 case ixgbe_mac_82599EB:
1290 case ixgbe_mac_X540:
1291 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1292 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1293 break;
1294 default:
1295
1296 return;
1297 }
1298
1299
1300
1301
1302
1303
1304 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1305 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1306 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1307
1308 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1309}
1310
1311static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1312 struct ixgbe_ring *rx_ring,
1313 int cpu)
1314{
1315 struct ixgbe_hw *hw = &adapter->hw;
1316 u32 rxctrl = 0;
1317 u8 reg_idx = rx_ring->reg_idx;
1318
1319 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1320 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1321
1322 switch (hw->mac.type) {
1323 case ixgbe_mac_82599EB:
1324 case ixgbe_mac_X540:
1325 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1326 break;
1327 default:
1328 break;
1329 }
1330
1331
1332
1333
1334
1335
1336 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1337 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1338 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1339
1340 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1341}
1342
1343static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1344{
1345 struct ixgbe_adapter *adapter = q_vector->adapter;
1346 struct ixgbe_ring *ring;
1347 int cpu = get_cpu();
1348
1349 if (q_vector->cpu == cpu)
1350 goto out_no_update;
1351
1352 ixgbe_for_each_ring(ring, q_vector->tx)
1353 ixgbe_update_tx_dca(adapter, ring, cpu);
1354
1355 ixgbe_for_each_ring(ring, q_vector->rx)
1356 ixgbe_update_rx_dca(adapter, ring, cpu);
1357
1358 q_vector->cpu = cpu;
1359out_no_update:
1360 put_cpu();
1361}
1362
1363static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1364{
1365 int i;
1366
1367
1368 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1370 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1371 else
1372 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1373 IXGBE_DCA_CTRL_DCA_DISABLE);
1374
1375 for (i = 0; i < adapter->num_q_vectors; i++) {
1376 adapter->q_vector[i]->cpu = -1;
1377 ixgbe_update_dca(adapter->q_vector[i]);
1378 }
1379}
1380
1381static int __ixgbe_notify_dca(struct device *dev, void *data)
1382{
1383 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1384 unsigned long event = *(unsigned long *)data;
1385
1386 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1387 return 0;
1388
1389 switch (event) {
1390 case DCA_PROVIDER_ADD:
1391
1392 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1393 break;
1394 if (dca_add_requester(dev) == 0) {
1395 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1396 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1397 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1398 break;
1399 }
1400
1401 case DCA_PROVIDER_REMOVE:
1402 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1403 dca_remove_requester(dev);
1404 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1405 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1406 IXGBE_DCA_CTRL_DCA_DISABLE);
1407 }
1408 break;
1409 }
1410
1411 return 0;
1412}
1413
1414#endif
1415
1416#define IXGBE_RSS_L4_TYPES_MASK \
1417 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1418 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1419 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1420 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1421
1422static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1423 union ixgbe_adv_rx_desc *rx_desc,
1424 struct sk_buff *skb)
1425{
1426 u16 rss_type;
1427
1428 if (!(ring->netdev->features & NETIF_F_RXHASH))
1429 return;
1430
1431 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1432 IXGBE_RXDADV_RSSTYPE_MASK;
1433
1434 if (!rss_type)
1435 return;
1436
1437 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1438 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1439 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1440}
1441
1442#ifdef IXGBE_FCOE
1443
1444
1445
1446
1447
1448
1449
1450static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1451 union ixgbe_adv_rx_desc *rx_desc)
1452{
1453 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1454
1455 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1456 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1457 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1458 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1459}
1460
1461#endif
1462
1463
1464
1465
1466
1467
1468static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1469 union ixgbe_adv_rx_desc *rx_desc,
1470 struct sk_buff *skb)
1471{
1472 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1473 bool encap_pkt = false;
1474
1475 skb_checksum_none_assert(skb);
1476
1477
1478 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1479 return;
1480
1481
1482 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1483 encap_pkt = true;
1484 skb->encapsulation = 1;
1485 }
1486
1487
1488 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1489 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1490 ring->rx_stats.csum_err++;
1491 return;
1492 }
1493
1494 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1495 return;
1496
1497 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1498
1499
1500
1501
1502 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1503 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1504 return;
1505
1506 ring->rx_stats.csum_err++;
1507 return;
1508 }
1509
1510
1511 skb->ip_summed = CHECKSUM_UNNECESSARY;
1512 if (encap_pkt) {
1513 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1514 return;
1515
1516 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1517 skb->ip_summed = CHECKSUM_NONE;
1518 return;
1519 }
1520
1521 skb->csum_level = 1;
1522 }
1523}
1524
1525static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1526{
1527 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1528}
1529
1530static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1531 struct ixgbe_rx_buffer *bi)
1532{
1533 struct page *page = bi->page;
1534 dma_addr_t dma;
1535
1536
1537 if (likely(page))
1538 return true;
1539
1540
1541 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1542 if (unlikely(!page)) {
1543 rx_ring->rx_stats.alloc_rx_page_failed++;
1544 return false;
1545 }
1546
1547
1548 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1549 ixgbe_rx_pg_size(rx_ring),
1550 DMA_FROM_DEVICE,
1551 IXGBE_RX_DMA_ATTR);
1552
1553
1554
1555
1556
1557 if (dma_mapping_error(rx_ring->dev, dma)) {
1558 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1559
1560 rx_ring->rx_stats.alloc_rx_page_failed++;
1561 return false;
1562 }
1563
1564 bi->dma = dma;
1565 bi->page = page;
1566 bi->page_offset = ixgbe_rx_offset(rx_ring);
1567 page_ref_add(page, USHRT_MAX - 1);
1568 bi->pagecnt_bias = USHRT_MAX;
1569 rx_ring->rx_stats.alloc_rx_page++;
1570
1571 return true;
1572}
1573
1574
1575
1576
1577
1578
1579void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1580{
1581 union ixgbe_adv_rx_desc *rx_desc;
1582 struct ixgbe_rx_buffer *bi;
1583 u16 i = rx_ring->next_to_use;
1584 u16 bufsz;
1585
1586
1587 if (!cleaned_count)
1588 return;
1589
1590 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1591 bi = &rx_ring->rx_buffer_info[i];
1592 i -= rx_ring->count;
1593
1594 bufsz = ixgbe_rx_bufsz(rx_ring);
1595
1596 do {
1597 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1598 break;
1599
1600
1601 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1602 bi->page_offset, bufsz,
1603 DMA_FROM_DEVICE);
1604
1605
1606
1607
1608
1609 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1610
1611 rx_desc++;
1612 bi++;
1613 i++;
1614 if (unlikely(!i)) {
1615 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1616 bi = rx_ring->rx_buffer_info;
1617 i -= rx_ring->count;
1618 }
1619
1620
1621 rx_desc->wb.upper.length = 0;
1622
1623 cleaned_count--;
1624 } while (cleaned_count);
1625
1626 i += rx_ring->count;
1627
1628 if (rx_ring->next_to_use != i) {
1629 rx_ring->next_to_use = i;
1630
1631
1632 rx_ring->next_to_alloc = i;
1633
1634
1635
1636
1637
1638
1639 wmb();
1640 writel(i, rx_ring->tail);
1641 }
1642}
1643
1644static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1645 struct sk_buff *skb)
1646{
1647 u16 hdr_len = skb_headlen(skb);
1648
1649
1650 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1651 IXGBE_CB(skb)->append_cnt);
1652 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1653}
1654
1655static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1656 struct sk_buff *skb)
1657{
1658
1659 if (!IXGBE_CB(skb)->append_cnt)
1660 return;
1661
1662 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1663 rx_ring->rx_stats.rsc_flush++;
1664
1665 ixgbe_set_rsc_gso_size(rx_ring, skb);
1666
1667
1668 IXGBE_CB(skb)->append_cnt = 0;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1682 union ixgbe_adv_rx_desc *rx_desc,
1683 struct sk_buff *skb)
1684{
1685 struct net_device *dev = rx_ring->netdev;
1686 u32 flags = rx_ring->q_vector->adapter->flags;
1687
1688 ixgbe_update_rsc_stats(rx_ring, skb);
1689
1690 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1691
1692 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1693
1694 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1695 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1696
1697 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1698 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1699 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1700 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1701 }
1702
1703 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1704 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1705
1706
1707 if (netif_is_ixgbe(dev))
1708 skb_record_rx_queue(skb, rx_ring->queue_index);
1709 else
1710 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1711 false);
1712
1713 skb->protocol = eth_type_trans(skb, dev);
1714}
1715
1716void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1717 struct sk_buff *skb)
1718{
1719 napi_gro_receive(&q_vector->napi, skb);
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1734 union ixgbe_adv_rx_desc *rx_desc,
1735 struct sk_buff *skb)
1736{
1737 u32 ntc = rx_ring->next_to_clean + 1;
1738
1739
1740 ntc = (ntc < rx_ring->count) ? ntc : 0;
1741 rx_ring->next_to_clean = ntc;
1742
1743 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1744
1745
1746 if (ring_is_rsc_enabled(rx_ring)) {
1747 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1748 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1749
1750 if (unlikely(rsc_enabled)) {
1751 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1752
1753 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1754 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1755
1756
1757 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1758 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1759 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1760 }
1761 }
1762
1763
1764 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1765 return false;
1766
1767
1768 rx_ring->rx_buffer_info[ntc].skb = skb;
1769 rx_ring->rx_stats.non_eop_descs++;
1770
1771 return true;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1787 struct sk_buff *skb)
1788{
1789 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1790 unsigned char *va;
1791 unsigned int pull_len;
1792
1793
1794
1795
1796
1797
1798 va = skb_frag_address(frag);
1799
1800
1801
1802
1803
1804 pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
1805
1806
1807 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1808
1809
1810 skb_frag_size_sub(frag, pull_len);
1811 skb_frag_off_add(frag, pull_len);
1812 skb->data_len -= pull_len;
1813 skb->tail += pull_len;
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1827 struct sk_buff *skb)
1828{
1829 if (ring_uses_build_skb(rx_ring)) {
1830 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1831
1832 dma_sync_single_range_for_cpu(rx_ring->dev,
1833 IXGBE_CB(skb)->dma,
1834 offset,
1835 skb_headlen(skb),
1836 DMA_FROM_DEVICE);
1837 } else {
1838 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1839
1840 dma_sync_single_range_for_cpu(rx_ring->dev,
1841 IXGBE_CB(skb)->dma,
1842 skb_frag_off(frag),
1843 skb_frag_size(frag),
1844 DMA_FROM_DEVICE);
1845 }
1846
1847
1848 if (unlikely(IXGBE_CB(skb)->page_released)) {
1849 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1850 ixgbe_rx_pg_size(rx_ring),
1851 DMA_FROM_DEVICE,
1852 IXGBE_RX_DMA_ATTR);
1853 }
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1879 union ixgbe_adv_rx_desc *rx_desc,
1880 struct sk_buff *skb)
1881{
1882 struct net_device *netdev = rx_ring->netdev;
1883
1884
1885 if (IS_ERR(skb))
1886 return true;
1887
1888
1889
1890
1891 if (!netdev ||
1892 (unlikely(ixgbe_test_staterr(rx_desc,
1893 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1894 !(netdev->features & NETIF_F_RXALL)))) {
1895 dev_kfree_skb_any(skb);
1896 return true;
1897 }
1898
1899
1900 if (!skb_headlen(skb))
1901 ixgbe_pull_tail(rx_ring, skb);
1902
1903#ifdef IXGBE_FCOE
1904
1905 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1906 return false;
1907
1908#endif
1909
1910 if (eth_skb_pad(skb))
1911 return true;
1912
1913 return false;
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1924 struct ixgbe_rx_buffer *old_buff)
1925{
1926 struct ixgbe_rx_buffer *new_buff;
1927 u16 nta = rx_ring->next_to_alloc;
1928
1929 new_buff = &rx_ring->rx_buffer_info[nta];
1930
1931
1932 nta++;
1933 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1934
1935
1936
1937
1938
1939 new_buff->dma = old_buff->dma;
1940 new_buff->page = old_buff->page;
1941 new_buff->page_offset = old_buff->page_offset;
1942 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1943}
1944
1945static inline bool ixgbe_page_is_reserved(struct page *page)
1946{
1947 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1948}
1949
1950static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
1951{
1952 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1953 struct page *page = rx_buffer->page;
1954
1955
1956 if (unlikely(ixgbe_page_is_reserved(page)))
1957 return false;
1958
1959#if (PAGE_SIZE < 8192)
1960
1961 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1962 return false;
1963#else
1964
1965
1966
1967
1968
1969#define IXGBE_LAST_OFFSET \
1970 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1971 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1972 return false;
1973#endif
1974
1975
1976
1977
1978
1979 if (unlikely(pagecnt_bias == 1)) {
1980 page_ref_add(page, USHRT_MAX - 1);
1981 rx_buffer->pagecnt_bias = USHRT_MAX;
1982 }
1983
1984 return true;
1985}
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2003 struct ixgbe_rx_buffer *rx_buffer,
2004 struct sk_buff *skb,
2005 unsigned int size)
2006{
2007#if (PAGE_SIZE < 8192)
2008 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2009#else
2010 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2011 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2012 SKB_DATA_ALIGN(size);
2013#endif
2014 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2015 rx_buffer->page_offset, size, truesize);
2016#if (PAGE_SIZE < 8192)
2017 rx_buffer->page_offset ^= truesize;
2018#else
2019 rx_buffer->page_offset += truesize;
2020#endif
2021}
2022
2023static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2024 union ixgbe_adv_rx_desc *rx_desc,
2025 struct sk_buff **skb,
2026 const unsigned int size)
2027{
2028 struct ixgbe_rx_buffer *rx_buffer;
2029
2030 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2031 prefetchw(rx_buffer->page);
2032 *skb = rx_buffer->skb;
2033
2034
2035
2036
2037
2038 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2039 if (!*skb)
2040 goto skip_sync;
2041 } else {
2042 if (*skb)
2043 ixgbe_dma_sync_frag(rx_ring, *skb);
2044 }
2045
2046
2047 dma_sync_single_range_for_cpu(rx_ring->dev,
2048 rx_buffer->dma,
2049 rx_buffer->page_offset,
2050 size,
2051 DMA_FROM_DEVICE);
2052skip_sync:
2053 rx_buffer->pagecnt_bias--;
2054
2055 return rx_buffer;
2056}
2057
2058static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2059 struct ixgbe_rx_buffer *rx_buffer,
2060 struct sk_buff *skb)
2061{
2062 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2063
2064 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2065 } else {
2066 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2067
2068 IXGBE_CB(skb)->page_released = true;
2069 } else {
2070
2071 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2072 ixgbe_rx_pg_size(rx_ring),
2073 DMA_FROM_DEVICE,
2074 IXGBE_RX_DMA_ATTR);
2075 }
2076 __page_frag_cache_drain(rx_buffer->page,
2077 rx_buffer->pagecnt_bias);
2078 }
2079
2080
2081 rx_buffer->page = NULL;
2082 rx_buffer->skb = NULL;
2083}
2084
2085static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2086 struct ixgbe_rx_buffer *rx_buffer,
2087 struct xdp_buff *xdp,
2088 union ixgbe_adv_rx_desc *rx_desc)
2089{
2090 unsigned int size = xdp->data_end - xdp->data;
2091#if (PAGE_SIZE < 8192)
2092 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2093#else
2094 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2095 xdp->data_hard_start);
2096#endif
2097 struct sk_buff *skb;
2098
2099
2100 prefetch(xdp->data);
2101#if L1_CACHE_BYTES < 128
2102 prefetch(xdp->data + L1_CACHE_BYTES);
2103#endif
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2122 if (unlikely(!skb))
2123 return NULL;
2124
2125 if (size > IXGBE_RX_HDR_SIZE) {
2126 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2127 IXGBE_CB(skb)->dma = rx_buffer->dma;
2128
2129 skb_add_rx_frag(skb, 0, rx_buffer->page,
2130 xdp->data - page_address(rx_buffer->page),
2131 size, truesize);
2132#if (PAGE_SIZE < 8192)
2133 rx_buffer->page_offset ^= truesize;
2134#else
2135 rx_buffer->page_offset += truesize;
2136#endif
2137 } else {
2138 memcpy(__skb_put(skb, size),
2139 xdp->data, ALIGN(size, sizeof(long)));
2140 rx_buffer->pagecnt_bias++;
2141 }
2142
2143 return skb;
2144}
2145
2146static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2147 struct ixgbe_rx_buffer *rx_buffer,
2148 struct xdp_buff *xdp,
2149 union ixgbe_adv_rx_desc *rx_desc)
2150{
2151 unsigned int metasize = xdp->data - xdp->data_meta;
2152#if (PAGE_SIZE < 8192)
2153 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2154#else
2155 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2156 SKB_DATA_ALIGN(xdp->data_end -
2157 xdp->data_hard_start);
2158#endif
2159 struct sk_buff *skb;
2160
2161
2162
2163
2164
2165
2166 prefetch(xdp->data_meta);
2167#if L1_CACHE_BYTES < 128
2168 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2169#endif
2170
2171
2172 skb = build_skb(xdp->data_hard_start, truesize);
2173 if (unlikely(!skb))
2174 return NULL;
2175
2176
2177 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2178 __skb_put(skb, xdp->data_end - xdp->data);
2179 if (metasize)
2180 skb_metadata_set(skb, metasize);
2181
2182
2183 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2184 IXGBE_CB(skb)->dma = rx_buffer->dma;
2185
2186
2187#if (PAGE_SIZE < 8192)
2188 rx_buffer->page_offset ^= truesize;
2189#else
2190 rx_buffer->page_offset += truesize;
2191#endif
2192
2193 return skb;
2194}
2195
2196static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2197 struct ixgbe_ring *rx_ring,
2198 struct xdp_buff *xdp)
2199{
2200 int err, result = IXGBE_XDP_PASS;
2201 struct bpf_prog *xdp_prog;
2202 struct xdp_frame *xdpf;
2203 u32 act;
2204
2205 rcu_read_lock();
2206 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2207
2208 if (!xdp_prog)
2209 goto xdp_out;
2210
2211 prefetchw(xdp->data_hard_start);
2212
2213 act = bpf_prog_run_xdp(xdp_prog, xdp);
2214 switch (act) {
2215 case XDP_PASS:
2216 break;
2217 case XDP_TX:
2218 xdpf = convert_to_xdp_frame(xdp);
2219 if (unlikely(!xdpf)) {
2220 result = IXGBE_XDP_CONSUMED;
2221 break;
2222 }
2223 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
2224 break;
2225 case XDP_REDIRECT:
2226 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2227 if (!err)
2228 result = IXGBE_XDP_REDIR;
2229 else
2230 result = IXGBE_XDP_CONSUMED;
2231 break;
2232 default:
2233 bpf_warn_invalid_xdp_action(act);
2234
2235 case XDP_ABORTED:
2236 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2237
2238 case XDP_DROP:
2239 result = IXGBE_XDP_CONSUMED;
2240 break;
2241 }
2242xdp_out:
2243 rcu_read_unlock();
2244 return ERR_PTR(-result);
2245}
2246
2247static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2248 struct ixgbe_rx_buffer *rx_buffer,
2249 unsigned int size)
2250{
2251#if (PAGE_SIZE < 8192)
2252 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2253
2254 rx_buffer->page_offset ^= truesize;
2255#else
2256 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2257 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2258 SKB_DATA_ALIGN(size);
2259
2260 rx_buffer->page_offset += truesize;
2261#endif
2262}
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2278 struct ixgbe_ring *rx_ring,
2279 const int budget)
2280{
2281 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2282 struct ixgbe_adapter *adapter = q_vector->adapter;
2283#ifdef IXGBE_FCOE
2284 int ddp_bytes;
2285 unsigned int mss = 0;
2286#endif
2287 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2288 unsigned int xdp_xmit = 0;
2289 struct xdp_buff xdp;
2290
2291 xdp.rxq = &rx_ring->xdp_rxq;
2292
2293 while (likely(total_rx_packets < budget)) {
2294 union ixgbe_adv_rx_desc *rx_desc;
2295 struct ixgbe_rx_buffer *rx_buffer;
2296 struct sk_buff *skb;
2297 unsigned int size;
2298
2299
2300 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2301 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2302 cleaned_count = 0;
2303 }
2304
2305 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2306 size = le16_to_cpu(rx_desc->wb.upper.length);
2307 if (!size)
2308 break;
2309
2310
2311
2312
2313
2314 dma_rmb();
2315
2316 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2317
2318
2319 if (!skb) {
2320 xdp.data = page_address(rx_buffer->page) +
2321 rx_buffer->page_offset;
2322 xdp.data_meta = xdp.data;
2323 xdp.data_hard_start = xdp.data -
2324 ixgbe_rx_offset(rx_ring);
2325 xdp.data_end = xdp.data + size;
2326
2327 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2328 }
2329
2330 if (IS_ERR(skb)) {
2331 unsigned int xdp_res = -PTR_ERR(skb);
2332
2333 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2334 xdp_xmit |= xdp_res;
2335 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2336 } else {
2337 rx_buffer->pagecnt_bias++;
2338 }
2339 total_rx_packets++;
2340 total_rx_bytes += size;
2341 } else if (skb) {
2342 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2343 } else if (ring_uses_build_skb(rx_ring)) {
2344 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2345 &xdp, rx_desc);
2346 } else {
2347 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2348 &xdp, rx_desc);
2349 }
2350
2351
2352 if (!skb) {
2353 rx_ring->rx_stats.alloc_rx_buff_failed++;
2354 rx_buffer->pagecnt_bias++;
2355 break;
2356 }
2357
2358 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2359 cleaned_count++;
2360
2361
2362 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2363 continue;
2364
2365
2366 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2367 continue;
2368
2369
2370 total_rx_bytes += skb->len;
2371
2372
2373 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2374
2375#ifdef IXGBE_FCOE
2376
2377 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2378 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2379
2380 if (ddp_bytes > 0) {
2381 if (!mss) {
2382 mss = rx_ring->netdev->mtu -
2383 sizeof(struct fcoe_hdr) -
2384 sizeof(struct fc_frame_header) -
2385 sizeof(struct fcoe_crc_eof);
2386 if (mss > 512)
2387 mss &= ~511;
2388 }
2389 total_rx_bytes += ddp_bytes;
2390 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2391 mss);
2392 }
2393 if (!ddp_bytes) {
2394 dev_kfree_skb_any(skb);
2395 continue;
2396 }
2397 }
2398
2399#endif
2400 ixgbe_rx_skb(q_vector, skb);
2401
2402
2403 total_rx_packets++;
2404 }
2405
2406 if (xdp_xmit & IXGBE_XDP_REDIR)
2407 xdp_do_flush_map();
2408
2409 if (xdp_xmit & IXGBE_XDP_TX) {
2410 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2411
2412
2413
2414
2415 wmb();
2416 writel(ring->next_to_use, ring->tail);
2417 }
2418
2419 u64_stats_update_begin(&rx_ring->syncp);
2420 rx_ring->stats.packets += total_rx_packets;
2421 rx_ring->stats.bytes += total_rx_bytes;
2422 u64_stats_update_end(&rx_ring->syncp);
2423 q_vector->rx.total_packets += total_rx_packets;
2424 q_vector->rx.total_bytes += total_rx_bytes;
2425
2426 return total_rx_packets;
2427}
2428
2429
2430
2431
2432
2433
2434
2435
2436static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2437{
2438 struct ixgbe_q_vector *q_vector;
2439 int v_idx;
2440 u32 mask;
2441
2442
2443 if (adapter->num_vfs > 32) {
2444 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2445 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2446 }
2447
2448
2449
2450
2451
2452 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2453 struct ixgbe_ring *ring;
2454 q_vector = adapter->q_vector[v_idx];
2455
2456 ixgbe_for_each_ring(ring, q_vector->rx)
2457 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2458
2459 ixgbe_for_each_ring(ring, q_vector->tx)
2460 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2461
2462 ixgbe_write_eitr(q_vector);
2463 }
2464
2465 switch (adapter->hw.mac.type) {
2466 case ixgbe_mac_82598EB:
2467 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2468 v_idx);
2469 break;
2470 case ixgbe_mac_82599EB:
2471 case ixgbe_mac_X540:
2472 case ixgbe_mac_X550:
2473 case ixgbe_mac_X550EM_x:
2474 case ixgbe_mac_x550em_a:
2475 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2476 break;
2477 default:
2478 break;
2479 }
2480 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2481
2482
2483 mask = IXGBE_EIMS_ENABLE_MASK;
2484 mask &= ~(IXGBE_EIMS_OTHER |
2485 IXGBE_EIMS_MAILBOX |
2486 IXGBE_EIMS_LSC);
2487
2488 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2489}
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2505 struct ixgbe_ring_container *ring_container)
2506{
2507 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2508 IXGBE_ITR_ADAPTIVE_LATENCY;
2509 unsigned int avg_wire_size, packets, bytes;
2510 unsigned long next_update = jiffies;
2511
2512
2513
2514
2515 if (!ring_container->ring)
2516 return;
2517
2518
2519
2520
2521
2522
2523 if (time_after(next_update, ring_container->next_update))
2524 goto clear_counts;
2525
2526 packets = ring_container->total_packets;
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536 if (!packets) {
2537 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2538 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2539 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2540 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2541 goto clear_counts;
2542 }
2543
2544 bytes = ring_container->total_bytes;
2545
2546
2547
2548
2549
2550 if (packets < 4 && bytes < 9000) {
2551 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2552 goto adjust_by_size;
2553 }
2554
2555
2556
2557
2558
2559 if (packets < 48) {
2560 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2561 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2562 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2563 goto clear_counts;
2564 }
2565
2566
2567
2568
2569 if (packets < 96) {
2570 itr = q_vector->itr >> 2;
2571 goto clear_counts;
2572 }
2573
2574
2575
2576
2577
2578 if (packets < 256) {
2579 itr = q_vector->itr >> 3;
2580 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2581 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2582 goto clear_counts;
2583 }
2584
2585
2586
2587
2588
2589
2590
2591 itr = IXGBE_ITR_ADAPTIVE_BULK;
2592
2593adjust_by_size:
2594
2595
2596
2597
2598
2599 avg_wire_size = bytes / packets;
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 if (avg_wire_size <= 60) {
2617
2618 avg_wire_size = 5120;
2619 } else if (avg_wire_size <= 316) {
2620
2621 avg_wire_size *= 40;
2622 avg_wire_size += 2720;
2623 } else if (avg_wire_size <= 1084) {
2624
2625 avg_wire_size *= 15;
2626 avg_wire_size += 11452;
2627 } else if (avg_wire_size < 1968) {
2628
2629 avg_wire_size *= 5;
2630 avg_wire_size += 22420;
2631 } else {
2632
2633 avg_wire_size = 32256;
2634 }
2635
2636
2637
2638
2639 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2640 avg_wire_size >>= 1;
2641
2642
2643
2644
2645
2646
2647
2648
2649 switch (q_vector->adapter->link_speed) {
2650 case IXGBE_LINK_SPEED_10GB_FULL:
2651 case IXGBE_LINK_SPEED_100_FULL:
2652 default:
2653 itr += DIV_ROUND_UP(avg_wire_size,
2654 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2655 IXGBE_ITR_ADAPTIVE_MIN_INC;
2656 break;
2657 case IXGBE_LINK_SPEED_2_5GB_FULL:
2658 case IXGBE_LINK_SPEED_1GB_FULL:
2659 case IXGBE_LINK_SPEED_10_FULL:
2660 if (avg_wire_size > 8064)
2661 avg_wire_size = 8064;
2662 itr += DIV_ROUND_UP(avg_wire_size,
2663 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2664 IXGBE_ITR_ADAPTIVE_MIN_INC;
2665 break;
2666 }
2667
2668clear_counts:
2669
2670 ring_container->itr = itr;
2671
2672
2673 ring_container->next_update = next_update + 1;
2674
2675 ring_container->total_bytes = 0;
2676 ring_container->total_packets = 0;
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2688{
2689 struct ixgbe_adapter *adapter = q_vector->adapter;
2690 struct ixgbe_hw *hw = &adapter->hw;
2691 int v_idx = q_vector->v_idx;
2692 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2693
2694 switch (adapter->hw.mac.type) {
2695 case ixgbe_mac_82598EB:
2696
2697 itr_reg |= (itr_reg << 16);
2698 break;
2699 case ixgbe_mac_82599EB:
2700 case ixgbe_mac_X540:
2701 case ixgbe_mac_X550:
2702 case ixgbe_mac_X550EM_x:
2703 case ixgbe_mac_x550em_a:
2704
2705
2706
2707
2708 itr_reg |= IXGBE_EITR_CNT_WDIS;
2709 break;
2710 default:
2711 break;
2712 }
2713 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2714}
2715
2716static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2717{
2718 u32 new_itr;
2719
2720 ixgbe_update_itr(q_vector, &q_vector->tx);
2721 ixgbe_update_itr(q_vector, &q_vector->rx);
2722
2723
2724 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2725
2726
2727 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2728 new_itr <<= 2;
2729
2730 if (new_itr != q_vector->itr) {
2731
2732 q_vector->itr = new_itr;
2733
2734 ixgbe_write_eitr(q_vector);
2735 }
2736}
2737
2738
2739
2740
2741
2742static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2743{
2744 struct ixgbe_hw *hw = &adapter->hw;
2745 u32 eicr = adapter->interrupt_event;
2746 s32 rc;
2747
2748 if (test_bit(__IXGBE_DOWN, &adapter->state))
2749 return;
2750
2751 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2752 return;
2753
2754 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2755
2756 switch (hw->device_id) {
2757 case IXGBE_DEV_ID_82599_T3_LOM:
2758
2759
2760
2761
2762
2763
2764
2765 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2766 !(eicr & IXGBE_EICR_LSC))
2767 return;
2768
2769 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2770 u32 speed;
2771 bool link_up = false;
2772
2773 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2774
2775 if (link_up)
2776 return;
2777 }
2778
2779
2780 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2781 return;
2782
2783 break;
2784 case IXGBE_DEV_ID_X550EM_A_1G_T:
2785 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2786 rc = hw->phy.ops.check_overtemp(hw);
2787 if (rc != IXGBE_ERR_OVERTEMP)
2788 return;
2789 break;
2790 default:
2791 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2792 return;
2793 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2794 return;
2795 break;
2796 }
2797 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2798
2799 adapter->interrupt_event = 0;
2800}
2801
2802static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2803{
2804 struct ixgbe_hw *hw = &adapter->hw;
2805
2806 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2807 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2808 e_crit(probe, "Fan has stopped, replace the adapter\n");
2809
2810 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2811 }
2812}
2813
2814static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2815{
2816 struct ixgbe_hw *hw = &adapter->hw;
2817
2818 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2819 return;
2820
2821 switch (adapter->hw.mac.type) {
2822 case ixgbe_mac_82599EB:
2823
2824
2825
2826
2827 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2828 (eicr & IXGBE_EICR_LSC)) &&
2829 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2830 adapter->interrupt_event = eicr;
2831 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2832 ixgbe_service_event_schedule(adapter);
2833 return;
2834 }
2835 return;
2836 case ixgbe_mac_x550em_a:
2837 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2838 adapter->interrupt_event = eicr;
2839 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2840 ixgbe_service_event_schedule(adapter);
2841 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2842 IXGBE_EICR_GPI_SDP0_X550EM_a);
2843 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2844 IXGBE_EICR_GPI_SDP0_X550EM_a);
2845 }
2846 return;
2847 case ixgbe_mac_X550:
2848 case ixgbe_mac_X540:
2849 if (!(eicr & IXGBE_EICR_TS))
2850 return;
2851 break;
2852 default:
2853 return;
2854 }
2855
2856 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2857}
2858
2859static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2860{
2861 switch (hw->mac.type) {
2862 case ixgbe_mac_82598EB:
2863 if (hw->phy.type == ixgbe_phy_nl)
2864 return true;
2865 return false;
2866 case ixgbe_mac_82599EB:
2867 case ixgbe_mac_X550EM_x:
2868 case ixgbe_mac_x550em_a:
2869 switch (hw->mac.ops.get_media_type(hw)) {
2870 case ixgbe_media_type_fiber:
2871 case ixgbe_media_type_fiber_qsfp:
2872 return true;
2873 default:
2874 return false;
2875 }
2876 default:
2877 return false;
2878 }
2879}
2880
2881static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2882{
2883 struct ixgbe_hw *hw = &adapter->hw;
2884 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2885
2886 if (!ixgbe_is_sfp(hw))
2887 return;
2888
2889
2890 if (hw->mac.type >= ixgbe_mac_X540)
2891 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2892
2893 if (eicr & eicr_mask) {
2894
2895 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2896 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2897 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2898 adapter->sfp_poll_time = 0;
2899 ixgbe_service_event_schedule(adapter);
2900 }
2901 }
2902
2903 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2904 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2905
2906 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2907 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2908 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2909 ixgbe_service_event_schedule(adapter);
2910 }
2911 }
2912}
2913
2914static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2915{
2916 struct ixgbe_hw *hw = &adapter->hw;
2917
2918 adapter->lsc_int++;
2919 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2920 adapter->link_check_timeout = jiffies;
2921 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2922 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2923 IXGBE_WRITE_FLUSH(hw);
2924 ixgbe_service_event_schedule(adapter);
2925 }
2926}
2927
2928static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2929 u64 qmask)
2930{
2931 u32 mask;
2932 struct ixgbe_hw *hw = &adapter->hw;
2933
2934 switch (hw->mac.type) {
2935 case ixgbe_mac_82598EB:
2936 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2937 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2938 break;
2939 case ixgbe_mac_82599EB:
2940 case ixgbe_mac_X540:
2941 case ixgbe_mac_X550:
2942 case ixgbe_mac_X550EM_x:
2943 case ixgbe_mac_x550em_a:
2944 mask = (qmask & 0xFFFFFFFF);
2945 if (mask)
2946 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2947 mask = (qmask >> 32);
2948 if (mask)
2949 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2950 break;
2951 default:
2952 break;
2953 }
2954
2955}
2956
2957static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2958 u64 qmask)
2959{
2960 u32 mask;
2961 struct ixgbe_hw *hw = &adapter->hw;
2962
2963 switch (hw->mac.type) {
2964 case ixgbe_mac_82598EB:
2965 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2966 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2967 break;
2968 case ixgbe_mac_82599EB:
2969 case ixgbe_mac_X540:
2970 case ixgbe_mac_X550:
2971 case ixgbe_mac_X550EM_x:
2972 case ixgbe_mac_x550em_a:
2973 mask = (qmask & 0xFFFFFFFF);
2974 if (mask)
2975 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2976 mask = (qmask >> 32);
2977 if (mask)
2978 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2979 break;
2980 default:
2981 break;
2982 }
2983
2984}
2985
2986
2987
2988
2989
2990
2991
2992static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2993 bool flush)
2994{
2995 struct ixgbe_hw *hw = &adapter->hw;
2996 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2997
2998
2999 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
3000 mask &= ~IXGBE_EIMS_LSC;
3001
3002 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3003 switch (adapter->hw.mac.type) {
3004 case ixgbe_mac_82599EB:
3005 mask |= IXGBE_EIMS_GPI_SDP0(hw);
3006 break;
3007 case ixgbe_mac_X540:
3008 case ixgbe_mac_X550:
3009 case ixgbe_mac_X550EM_x:
3010 case ixgbe_mac_x550em_a:
3011 mask |= IXGBE_EIMS_TS;
3012 break;
3013 default:
3014 break;
3015 }
3016 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3017 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3018 switch (adapter->hw.mac.type) {
3019 case ixgbe_mac_82599EB:
3020 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3021 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3022
3023 case ixgbe_mac_X540:
3024 case ixgbe_mac_X550:
3025 case ixgbe_mac_X550EM_x:
3026 case ixgbe_mac_x550em_a:
3027 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3028 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3029 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3030 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3031 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3032 mask |= IXGBE_EICR_GPI_SDP0_X540;
3033 mask |= IXGBE_EIMS_ECC;
3034 mask |= IXGBE_EIMS_MAILBOX;
3035 break;
3036 default:
3037 break;
3038 }
3039
3040 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3041 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3042 mask |= IXGBE_EIMS_FLOW_DIR;
3043
3044 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3045 if (queues)
3046 ixgbe_irq_enable_queues(adapter, ~0);
3047 if (flush)
3048 IXGBE_WRITE_FLUSH(&adapter->hw);
3049}
3050
3051static irqreturn_t ixgbe_msix_other(int irq, void *data)
3052{
3053 struct ixgbe_adapter *adapter = data;
3054 struct ixgbe_hw *hw = &adapter->hw;
3055 u32 eicr;
3056
3057
3058
3059
3060
3061
3062
3063 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3064
3065
3066
3067
3068
3069
3070
3071
3072 eicr &= 0xFFFF0000;
3073
3074 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3075
3076 if (eicr & IXGBE_EICR_LSC)
3077 ixgbe_check_lsc(adapter);
3078
3079 if (eicr & IXGBE_EICR_MAILBOX)
3080 ixgbe_msg_task(adapter);
3081
3082 switch (hw->mac.type) {
3083 case ixgbe_mac_82599EB:
3084 case ixgbe_mac_X540:
3085 case ixgbe_mac_X550:
3086 case ixgbe_mac_X550EM_x:
3087 case ixgbe_mac_x550em_a:
3088 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3089 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3090 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3091 ixgbe_service_event_schedule(adapter);
3092 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3093 IXGBE_EICR_GPI_SDP0_X540);
3094 }
3095 if (eicr & IXGBE_EICR_ECC) {
3096 e_info(link, "Received ECC Err, initiating reset\n");
3097 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3098 ixgbe_service_event_schedule(adapter);
3099 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3100 }
3101
3102 if (eicr & IXGBE_EICR_FLOW_DIR) {
3103 int reinit_count = 0;
3104 int i;
3105 for (i = 0; i < adapter->num_tx_queues; i++) {
3106 struct ixgbe_ring *ring = adapter->tx_ring[i];
3107 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3108 &ring->state))
3109 reinit_count++;
3110 }
3111 if (reinit_count) {
3112
3113 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3114 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3115 ixgbe_service_event_schedule(adapter);
3116 }
3117 }
3118 ixgbe_check_sfp_event(adapter, eicr);
3119 ixgbe_check_overtemp_event(adapter, eicr);
3120 break;
3121 default:
3122 break;
3123 }
3124
3125 ixgbe_check_fan_failure(adapter, eicr);
3126
3127 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3128 ixgbe_ptp_check_pps_event(adapter);
3129
3130
3131 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3132 ixgbe_irq_enable(adapter, false, false);
3133
3134 return IRQ_HANDLED;
3135}
3136
3137static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3138{
3139 struct ixgbe_q_vector *q_vector = data;
3140
3141
3142
3143 if (q_vector->rx.ring || q_vector->tx.ring)
3144 napi_schedule_irqoff(&q_vector->napi);
3145
3146 return IRQ_HANDLED;
3147}
3148
3149
3150
3151
3152
3153
3154
3155
3156int ixgbe_poll(struct napi_struct *napi, int budget)
3157{
3158 struct ixgbe_q_vector *q_vector =
3159 container_of(napi, struct ixgbe_q_vector, napi);
3160 struct ixgbe_adapter *adapter = q_vector->adapter;
3161 struct ixgbe_ring *ring;
3162 int per_ring_budget, work_done = 0;
3163 bool clean_complete = true;
3164
3165#ifdef CONFIG_IXGBE_DCA
3166 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3167 ixgbe_update_dca(q_vector);
3168#endif
3169
3170 ixgbe_for_each_ring(ring, q_vector->tx) {
3171 bool wd = ring->xsk_umem ?
3172 ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
3173 ixgbe_clean_tx_irq(q_vector, ring, budget);
3174
3175 if (!wd)
3176 clean_complete = false;
3177 }
3178
3179
3180 if (budget <= 0)
3181 return budget;
3182
3183
3184
3185 if (q_vector->rx.count > 1)
3186 per_ring_budget = max(budget/q_vector->rx.count, 1);
3187 else
3188 per_ring_budget = budget;
3189
3190 ixgbe_for_each_ring(ring, q_vector->rx) {
3191 int cleaned = ring->xsk_umem ?
3192 ixgbe_clean_rx_irq_zc(q_vector, ring,
3193 per_ring_budget) :
3194 ixgbe_clean_rx_irq(q_vector, ring,
3195 per_ring_budget);
3196
3197 work_done += cleaned;
3198 if (cleaned >= per_ring_budget)
3199 clean_complete = false;
3200 }
3201
3202
3203 if (!clean_complete)
3204 return budget;
3205
3206
3207 if (likely(napi_complete_done(napi, work_done))) {
3208 if (adapter->rx_itr_setting & 1)
3209 ixgbe_set_itr(q_vector);
3210 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3211 ixgbe_irq_enable_queues(adapter,
3212 BIT_ULL(q_vector->v_idx));
3213 }
3214
3215 return min(work_done, budget - 1);
3216}
3217
3218
3219
3220
3221
3222
3223
3224
3225static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3226{
3227 struct net_device *netdev = adapter->netdev;
3228 unsigned int ri = 0, ti = 0;
3229 int vector, err;
3230
3231 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3232 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3233 struct msix_entry *entry = &adapter->msix_entries[vector];
3234
3235 if (q_vector->tx.ring && q_vector->rx.ring) {
3236 snprintf(q_vector->name, sizeof(q_vector->name),
3237 "%s-TxRx-%u", netdev->name, ri++);
3238 ti++;
3239 } else if (q_vector->rx.ring) {
3240 snprintf(q_vector->name, sizeof(q_vector->name),
3241 "%s-rx-%u", netdev->name, ri++);
3242 } else if (q_vector->tx.ring) {
3243 snprintf(q_vector->name, sizeof(q_vector->name),
3244 "%s-tx-%u", netdev->name, ti++);
3245 } else {
3246
3247 continue;
3248 }
3249 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3250 q_vector->name, q_vector);
3251 if (err) {
3252 e_err(probe, "request_irq failed for MSIX interrupt "
3253 "Error: %d\n", err);
3254 goto free_queue_irqs;
3255 }
3256
3257 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3258
3259 irq_set_affinity_hint(entry->vector,
3260 &q_vector->affinity_mask);
3261 }
3262 }
3263
3264 err = request_irq(adapter->msix_entries[vector].vector,
3265 ixgbe_msix_other, 0, netdev->name, adapter);
3266 if (err) {
3267 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3268 goto free_queue_irqs;
3269 }
3270
3271 return 0;
3272
3273free_queue_irqs:
3274 while (vector) {
3275 vector--;
3276 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3277 NULL);
3278 free_irq(adapter->msix_entries[vector].vector,
3279 adapter->q_vector[vector]);
3280 }
3281 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3282 pci_disable_msix(adapter->pdev);
3283 kfree(adapter->msix_entries);
3284 adapter->msix_entries = NULL;
3285 return err;
3286}
3287
3288
3289
3290
3291
3292
3293static irqreturn_t ixgbe_intr(int irq, void *data)
3294{
3295 struct ixgbe_adapter *adapter = data;
3296 struct ixgbe_hw *hw = &adapter->hw;
3297 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3298 u32 eicr;
3299
3300
3301
3302
3303
3304 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3305
3306
3307
3308 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3309 if (!eicr) {
3310
3311
3312
3313
3314
3315
3316
3317 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3318 ixgbe_irq_enable(adapter, true, true);
3319 return IRQ_NONE;
3320 }
3321
3322 if (eicr & IXGBE_EICR_LSC)
3323 ixgbe_check_lsc(adapter);
3324
3325 switch (hw->mac.type) {
3326 case ixgbe_mac_82599EB:
3327 ixgbe_check_sfp_event(adapter, eicr);
3328
3329 case ixgbe_mac_X540:
3330 case ixgbe_mac_X550:
3331 case ixgbe_mac_X550EM_x:
3332 case ixgbe_mac_x550em_a:
3333 if (eicr & IXGBE_EICR_ECC) {
3334 e_info(link, "Received ECC Err, initiating reset\n");
3335 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3336 ixgbe_service_event_schedule(adapter);
3337 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3338 }
3339 ixgbe_check_overtemp_event(adapter, eicr);
3340 break;
3341 default:
3342 break;
3343 }
3344
3345 ixgbe_check_fan_failure(adapter, eicr);
3346 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3347 ixgbe_ptp_check_pps_event(adapter);
3348
3349
3350 napi_schedule_irqoff(&q_vector->napi);
3351
3352
3353
3354
3355
3356 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3357 ixgbe_irq_enable(adapter, false, false);
3358
3359 return IRQ_HANDLED;
3360}
3361
3362
3363
3364
3365
3366
3367
3368
3369static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3370{
3371 struct net_device *netdev = adapter->netdev;
3372 int err;
3373
3374 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3375 err = ixgbe_request_msix_irqs(adapter);
3376 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3377 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3378 netdev->name, adapter);
3379 else
3380 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3381 netdev->name, adapter);
3382
3383 if (err)
3384 e_err(probe, "request_irq failed, Error %d\n", err);
3385
3386 return err;
3387}
3388
3389static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3390{
3391 int vector;
3392
3393 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3394 free_irq(adapter->pdev->irq, adapter);
3395 return;
3396 }
3397
3398 if (!adapter->msix_entries)
3399 return;
3400
3401 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3402 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3403 struct msix_entry *entry = &adapter->msix_entries[vector];
3404
3405
3406 if (!q_vector->rx.ring && !q_vector->tx.ring)
3407 continue;
3408
3409
3410 irq_set_affinity_hint(entry->vector, NULL);
3411
3412 free_irq(entry->vector, q_vector);
3413 }
3414
3415 free_irq(adapter->msix_entries[vector].vector, adapter);
3416}
3417
3418
3419
3420
3421
3422static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3423{
3424 switch (adapter->hw.mac.type) {
3425 case ixgbe_mac_82598EB:
3426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3427 break;
3428 case ixgbe_mac_82599EB:
3429 case ixgbe_mac_X540:
3430 case ixgbe_mac_X550:
3431 case ixgbe_mac_X550EM_x:
3432 case ixgbe_mac_x550em_a:
3433 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3434 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3436 break;
3437 default:
3438 break;
3439 }
3440 IXGBE_WRITE_FLUSH(&adapter->hw);
3441 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3442 int vector;
3443
3444 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3445 synchronize_irq(adapter->msix_entries[vector].vector);
3446
3447 synchronize_irq(adapter->msix_entries[vector++].vector);
3448 } else {
3449 synchronize_irq(adapter->pdev->irq);
3450 }
3451}
3452
3453
3454
3455
3456
3457
3458static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3459{
3460 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3461
3462 ixgbe_write_eitr(q_vector);
3463
3464 ixgbe_set_ivar(adapter, 0, 0, 0);
3465 ixgbe_set_ivar(adapter, 1, 0, 0);
3466
3467 e_info(hw, "Legacy interrupt IVAR setup done\n");
3468}
3469
3470
3471
3472
3473
3474
3475
3476
3477void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3478 struct ixgbe_ring *ring)
3479{
3480 struct ixgbe_hw *hw = &adapter->hw;
3481 u64 tdba = ring->dma;
3482 int wait_loop = 10;
3483 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3484 u8 reg_idx = ring->reg_idx;
3485
3486 ring->xsk_umem = NULL;
3487 if (ring_is_xdp(ring))
3488 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
3489
3490
3491 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3492 IXGBE_WRITE_FLUSH(hw);
3493
3494 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3495 (tdba & DMA_BIT_MASK(32)));
3496 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3497 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3498 ring->count * sizeof(union ixgbe_adv_tx_desc));
3499 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3500 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3501 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3514 txdctl |= 1u << 16;
3515 else
3516 txdctl |= 8u << 16;
3517
3518
3519
3520
3521
3522 txdctl |= (1u << 8) |
3523 32;
3524
3525
3526 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3527 ring->atr_sample_rate = adapter->atr_sample_rate;
3528 ring->atr_count = 0;
3529 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3530 } else {
3531 ring->atr_sample_rate = 0;
3532 }
3533
3534
3535 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3536 struct ixgbe_q_vector *q_vector = ring->q_vector;
3537
3538 if (q_vector)
3539 netif_set_xps_queue(ring->netdev,
3540 &q_vector->affinity_mask,
3541 ring->queue_index);
3542 }
3543
3544 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3545
3546
3547 memset(ring->tx_buffer_info, 0,
3548 sizeof(struct ixgbe_tx_buffer) * ring->count);
3549
3550
3551 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3552
3553
3554 if (hw->mac.type == ixgbe_mac_82598EB &&
3555 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3556 return;
3557
3558
3559 do {
3560 usleep_range(1000, 2000);
3561 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3562 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3563 if (!wait_loop)
3564 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3565}
3566
3567static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3568{
3569 struct ixgbe_hw *hw = &adapter->hw;
3570 u32 rttdcs, mtqc;
3571 u8 tcs = adapter->hw_tcs;
3572
3573 if (hw->mac.type == ixgbe_mac_82598EB)
3574 return;
3575
3576
3577 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3578 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3579 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3580
3581
3582 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3583 mtqc = IXGBE_MTQC_VT_ENA;
3584 if (tcs > 4)
3585 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3586 else if (tcs > 1)
3587 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3588 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3589 IXGBE_82599_VMDQ_4Q_MASK)
3590 mtqc |= IXGBE_MTQC_32VF;
3591 else
3592 mtqc |= IXGBE_MTQC_64VF;
3593 } else {
3594 if (tcs > 4) {
3595 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3596 } else if (tcs > 1) {
3597 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3598 } else {
3599 u8 max_txq = adapter->num_tx_queues +
3600 adapter->num_xdp_queues;
3601 if (max_txq > 63)
3602 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3603 else
3604 mtqc = IXGBE_MTQC_64Q_1PB;
3605 }
3606 }
3607
3608 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3609
3610
3611 if (tcs) {
3612 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3613 sectx |= IXGBE_SECTX_DCB;
3614 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3615 }
3616
3617
3618 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3619 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3620}
3621
3622
3623
3624
3625
3626
3627
3628static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3629{
3630 struct ixgbe_hw *hw = &adapter->hw;
3631 u32 dmatxctl;
3632 u32 i;
3633
3634 ixgbe_setup_mtqc(adapter);
3635
3636 if (hw->mac.type != ixgbe_mac_82598EB) {
3637
3638 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3639 dmatxctl |= IXGBE_DMATXCTL_TE;
3640 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3641 }
3642
3643
3644 for (i = 0; i < adapter->num_tx_queues; i++)
3645 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3646 for (i = 0; i < adapter->num_xdp_queues; i++)
3647 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3648}
3649
3650static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3651 struct ixgbe_ring *ring)
3652{
3653 struct ixgbe_hw *hw = &adapter->hw;
3654 u8 reg_idx = ring->reg_idx;
3655 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3656
3657 srrctl |= IXGBE_SRRCTL_DROP_EN;
3658
3659 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3660}
3661
3662static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3663 struct ixgbe_ring *ring)
3664{
3665 struct ixgbe_hw *hw = &adapter->hw;
3666 u8 reg_idx = ring->reg_idx;
3667 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3668
3669 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3670
3671 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3672}
3673
3674#ifdef CONFIG_IXGBE_DCB
3675void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3676#else
3677static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3678#endif
3679{
3680 int i;
3681 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3682
3683 if (adapter->ixgbe_ieee_pfc)
3684 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3696 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3697 for (i = 0; i < adapter->num_rx_queues; i++)
3698 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3699 } else {
3700 for (i = 0; i < adapter->num_rx_queues; i++)
3701 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3702 }
3703}
3704
3705#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3706
3707static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3708 struct ixgbe_ring *rx_ring)
3709{
3710 struct ixgbe_hw *hw = &adapter->hw;
3711 u32 srrctl;
3712 u8 reg_idx = rx_ring->reg_idx;
3713
3714 if (hw->mac.type == ixgbe_mac_82598EB) {
3715 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3716
3717
3718
3719
3720
3721 reg_idx &= mask;
3722 }
3723
3724
3725 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3726
3727
3728 if (rx_ring->xsk_umem) {
3729 u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
3730 XDP_PACKET_HEADROOM;
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740 if (hw->mac.type != ixgbe_mac_82599EB)
3741 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3742 else
3743 srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3744 } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
3745 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3746 } else {
3747 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3748 }
3749
3750
3751 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3752
3753 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3754}
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3765{
3766 if (adapter->hw.mac.type < ixgbe_mac_X550)
3767 return 128;
3768 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3769 return 64;
3770 else
3771 return 512;
3772}
3773
3774
3775
3776
3777
3778
3779
3780void ixgbe_store_key(struct ixgbe_adapter *adapter)
3781{
3782 struct ixgbe_hw *hw = &adapter->hw;
3783 int i;
3784
3785 for (i = 0; i < 10; i++)
3786 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3787}
3788
3789
3790
3791
3792
3793
3794
3795static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3796{
3797 u32 *rss_key;
3798
3799 if (!adapter->rss_key) {
3800 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3801 if (unlikely(!rss_key))
3802 return -ENOMEM;
3803
3804 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3805 adapter->rss_key = rss_key;
3806 }
3807
3808 return 0;
3809}
3810
3811
3812
3813
3814
3815
3816
3817void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3818{
3819 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3820 struct ixgbe_hw *hw = &adapter->hw;
3821 u32 reta = 0;
3822 u32 indices_multi;
3823 u8 *indir_tbl = adapter->rss_indir_tbl;
3824
3825
3826
3827
3828
3829
3830
3831 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3832 indices_multi = 0x11;
3833 else
3834 indices_multi = 0x1;
3835
3836
3837 for (i = 0; i < reta_entries; i++) {
3838 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3839 if ((i & 3) == 3) {
3840 if (i < 128)
3841 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3842 else
3843 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3844 reta);
3845 reta = 0;
3846 }
3847 }
3848}
3849
3850
3851
3852
3853
3854
3855
3856static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3857{
3858 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3859 struct ixgbe_hw *hw = &adapter->hw;
3860 u32 vfreta = 0;
3861
3862
3863 for (i = 0; i < reta_entries; i++) {
3864 u16 pool = adapter->num_rx_pools;
3865
3866 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3867 if ((i & 3) != 3)
3868 continue;
3869
3870 while (pool--)
3871 IXGBE_WRITE_REG(hw,
3872 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3873 vfreta);
3874 vfreta = 0;
3875 }
3876}
3877
3878static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3879{
3880 u32 i, j;
3881 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3882 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3883
3884
3885
3886
3887
3888 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3889 rss_i = 4;
3890
3891
3892 ixgbe_store_key(adapter);
3893
3894
3895 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3896
3897 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3898 if (j == rss_i)
3899 j = 0;
3900
3901 adapter->rss_indir_tbl[i] = j;
3902 }
3903
3904 ixgbe_store_reta(adapter);
3905}
3906
3907static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3908{
3909 struct ixgbe_hw *hw = &adapter->hw;
3910 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3911 int i, j;
3912
3913
3914 for (i = 0; i < 10; i++) {
3915 u16 pool = adapter->num_rx_pools;
3916
3917 while (pool--)
3918 IXGBE_WRITE_REG(hw,
3919 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3920 *(adapter->rss_key + i));
3921 }
3922
3923
3924 for (i = 0, j = 0; i < 64; i++, j++) {
3925 if (j == rss_i)
3926 j = 0;
3927
3928 adapter->rss_indir_tbl[i] = j;
3929 }
3930
3931 ixgbe_store_vfreta(adapter);
3932}
3933
3934static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3935{
3936 struct ixgbe_hw *hw = &adapter->hw;
3937 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3938 u32 rxcsum;
3939
3940
3941 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3942 rxcsum |= IXGBE_RXCSUM_PCSD;
3943 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3944
3945 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3946 if (adapter->ring_feature[RING_F_RSS].mask)
3947 mrqc = IXGBE_MRQC_RSSEN;
3948 } else {
3949 u8 tcs = adapter->hw_tcs;
3950
3951 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3952 if (tcs > 4)
3953 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3954 else if (tcs > 1)
3955 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3956 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3957 IXGBE_82599_VMDQ_4Q_MASK)
3958 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3959 else
3960 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3961
3962
3963
3964
3965 if (hw->mac.type >= ixgbe_mac_X550)
3966 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3967 } else {
3968 if (tcs > 4)
3969 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3970 else if (tcs > 1)
3971 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3972 else
3973 mrqc = IXGBE_MRQC_RSSEN;
3974 }
3975 }
3976
3977
3978 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3979 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3980 IXGBE_MRQC_RSS_FIELD_IPV6 |
3981 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3982
3983 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3984 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3985 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3986 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3987
3988 if ((hw->mac.type >= ixgbe_mac_X550) &&
3989 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3990 u16 pool = adapter->num_rx_pools;
3991
3992
3993 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3994 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3995
3996
3997 ixgbe_setup_vfreta(adapter);
3998 vfmrqc = IXGBE_MRQC_RSSEN;
3999 vfmrqc |= rss_field;
4000
4001 while (pool--)
4002 IXGBE_WRITE_REG(hw,
4003 IXGBE_PFVFMRQC(VMDQ_P(pool)),
4004 vfmrqc);
4005 } else {
4006 ixgbe_setup_reta(adapter);
4007 mrqc |= rss_field;
4008 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4009 }
4010}
4011
4012
4013
4014
4015
4016
4017static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4018 struct ixgbe_ring *ring)
4019{
4020 struct ixgbe_hw *hw = &adapter->hw;
4021 u32 rscctrl;
4022 u8 reg_idx = ring->reg_idx;
4023
4024 if (!ring_is_rsc_enabled(ring))
4025 return;
4026
4027 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4028 rscctrl |= IXGBE_RSCCTL_RSCEN;
4029
4030
4031
4032
4033
4034 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4035 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4036}
4037
4038#define IXGBE_MAX_RX_DESC_POLL 10
4039static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4040 struct ixgbe_ring *ring)
4041{
4042 struct ixgbe_hw *hw = &adapter->hw;
4043 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4044 u32 rxdctl;
4045 u8 reg_idx = ring->reg_idx;
4046
4047 if (ixgbe_removed(hw->hw_addr))
4048 return;
4049
4050 if (hw->mac.type == ixgbe_mac_82598EB &&
4051 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4052 return;
4053
4054 do {
4055 usleep_range(1000, 2000);
4056 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4057 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4058
4059 if (!wait_loop) {
4060 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4061 "the polling period\n", reg_idx);
4062 }
4063}
4064
4065void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4066 struct ixgbe_ring *ring)
4067{
4068 struct ixgbe_hw *hw = &adapter->hw;
4069 union ixgbe_adv_rx_desc *rx_desc;
4070 u64 rdba = ring->dma;
4071 u32 rxdctl;
4072 u8 reg_idx = ring->reg_idx;
4073
4074 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4075 ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
4076 if (ring->xsk_umem) {
4077 ring->zca.free = ixgbe_zca_free;
4078 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4079 MEM_TYPE_ZERO_COPY,
4080 &ring->zca));
4081
4082 } else {
4083 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4084 MEM_TYPE_PAGE_SHARED, NULL));
4085 }
4086
4087
4088 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4089 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4090
4091
4092 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4093 IXGBE_WRITE_FLUSH(hw);
4094
4095 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4096 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4097 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4098 ring->count * sizeof(union ixgbe_adv_rx_desc));
4099
4100 IXGBE_WRITE_FLUSH(hw);
4101
4102 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4103 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4104 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4105
4106 ixgbe_configure_srrctl(adapter, ring);
4107 ixgbe_configure_rscctl(adapter, ring);
4108
4109 if (hw->mac.type == ixgbe_mac_82598EB) {
4110
4111
4112
4113
4114
4115
4116
4117 rxdctl &= ~0x3FFFFF;
4118 rxdctl |= 0x080420;
4119#if (PAGE_SIZE < 8192)
4120
4121 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4122 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4123 IXGBE_RXDCTL_RLPML_EN);
4124
4125
4126
4127
4128
4129 if (ring_uses_build_skb(ring) &&
4130 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4131 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4132 IXGBE_RXDCTL_RLPML_EN;
4133#endif
4134 }
4135
4136 if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
4137 u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
4138 XDP_PACKET_HEADROOM;
4139
4140 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4141 IXGBE_RXDCTL_RLPML_EN);
4142 rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
4143
4144 ring->rx_buf_len = xsk_buf_len;
4145 }
4146
4147
4148 memset(ring->rx_buffer_info, 0,
4149 sizeof(struct ixgbe_rx_buffer) * ring->count);
4150
4151
4152 rx_desc = IXGBE_RX_DESC(ring, 0);
4153 rx_desc->wb.upper.length = 0;
4154
4155
4156 rxdctl |= IXGBE_RXDCTL_ENABLE;
4157 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4158
4159 ixgbe_rx_desc_queue_enable(adapter, ring);
4160 if (ring->xsk_umem)
4161 ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
4162 else
4163 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4164}
4165
4166static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4167{
4168 struct ixgbe_hw *hw = &adapter->hw;
4169 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4170 u16 pool = adapter->num_rx_pools;
4171
4172
4173 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4174 IXGBE_PSRTYPE_UDPHDR |
4175 IXGBE_PSRTYPE_IPV4HDR |
4176 IXGBE_PSRTYPE_L2HDR |
4177 IXGBE_PSRTYPE_IPV6HDR;
4178
4179 if (hw->mac.type == ixgbe_mac_82598EB)
4180 return;
4181
4182 if (rss_i > 3)
4183 psrtype |= 2u << 29;
4184 else if (rss_i > 1)
4185 psrtype |= 1u << 29;
4186
4187 while (pool--)
4188 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4189}
4190
4191static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4192{
4193 struct ixgbe_hw *hw = &adapter->hw;
4194 u16 pool = adapter->num_rx_pools;
4195 u32 reg_offset, vf_shift, vmolr;
4196 u32 gcr_ext, vmdctl;
4197 int i;
4198
4199 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4200 return;
4201
4202 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4203 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4204 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4205 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4206 vmdctl |= IXGBE_VT_CTL_REPLEN;
4207 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4208
4209
4210
4211
4212 vmolr = IXGBE_VMOLR_AUPE;
4213 while (pool--)
4214 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
4215
4216 vf_shift = VMDQ_P(0) % 32;
4217 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4218
4219
4220 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4221 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4222 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4223 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4224 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4225 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4226
4227
4228 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4229
4230
4231 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4232
4233
4234
4235
4236
4237 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4238 case IXGBE_82599_VMDQ_8Q_MASK:
4239 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4240 break;
4241 case IXGBE_82599_VMDQ_4Q_MASK:
4242 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4243 break;
4244 default:
4245 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4246 break;
4247 }
4248
4249 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4250
4251 for (i = 0; i < adapter->num_vfs; i++) {
4252
4253 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4254 adapter->vfinfo[i].spoofchk_enabled);
4255
4256
4257 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4258 adapter->vfinfo[i].rss_query_enabled);
4259 }
4260}
4261
4262static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4263{
4264 struct ixgbe_hw *hw = &adapter->hw;
4265 struct net_device *netdev = adapter->netdev;
4266 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4267 struct ixgbe_ring *rx_ring;
4268 int i;
4269 u32 mhadd, hlreg0;
4270
4271#ifdef IXGBE_FCOE
4272
4273 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4274 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4275 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4276
4277#endif
4278
4279
4280 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4281 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4282
4283 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4284 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4285 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4286 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4287
4288 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4289 }
4290
4291 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4292
4293 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4294 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4295
4296
4297
4298
4299
4300 for (i = 0; i < adapter->num_rx_queues; i++) {
4301 rx_ring = adapter->rx_ring[i];
4302
4303 clear_ring_rsc_enabled(rx_ring);
4304 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4305 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4306
4307 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4308 set_ring_rsc_enabled(rx_ring);
4309
4310 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4311 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4312
4313 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4314 continue;
4315
4316 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4317
4318#if (PAGE_SIZE < 8192)
4319 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4320 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4321
4322 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4323 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4324 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4325#endif
4326 }
4327}
4328
4329static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4330{
4331 struct ixgbe_hw *hw = &adapter->hw;
4332 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4333
4334 switch (hw->mac.type) {
4335 case ixgbe_mac_82598EB:
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4347 break;
4348 case ixgbe_mac_X550:
4349 case ixgbe_mac_X550EM_x:
4350 case ixgbe_mac_x550em_a:
4351 if (adapter->num_vfs)
4352 rdrxctl |= IXGBE_RDRXCTL_PSP;
4353
4354 case ixgbe_mac_82599EB:
4355 case ixgbe_mac_X540:
4356
4357 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4358 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4359 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4360
4361 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4362 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4363 break;
4364 default:
4365
4366 return;
4367 }
4368
4369 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4370}
4371
4372
4373
4374
4375
4376
4377
4378static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4379{
4380 struct ixgbe_hw *hw = &adapter->hw;
4381 int i;
4382 u32 rxctrl, rfctl;
4383
4384
4385 hw->mac.ops.disable_rx(hw);
4386
4387 ixgbe_setup_psrtype(adapter);
4388 ixgbe_setup_rdrxctl(adapter);
4389
4390
4391 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4392 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4393 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4394 rfctl |= IXGBE_RFCTL_RSC_DIS;
4395
4396
4397 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4398 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4399
4400
4401 ixgbe_setup_mrqc(adapter);
4402
4403
4404 ixgbe_set_rx_buffer_len(adapter);
4405
4406
4407
4408
4409
4410 for (i = 0; i < adapter->num_rx_queues; i++)
4411 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4412
4413 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4414
4415 if (hw->mac.type == ixgbe_mac_82598EB)
4416 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4417
4418
4419 rxctrl |= IXGBE_RXCTRL_RXEN;
4420 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4421}
4422
4423static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4424 __be16 proto, u16 vid)
4425{
4426 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4427 struct ixgbe_hw *hw = &adapter->hw;
4428
4429
4430 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4431 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4432
4433 set_bit(vid, adapter->active_vlans);
4434
4435 return 0;
4436}
4437
4438static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4439{
4440 u32 vlvf;
4441 int idx;
4442
4443
4444 if (vlan == 0)
4445 return 0;
4446
4447
4448 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4449 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4450 if ((vlvf & VLAN_VID_MASK) == vlan)
4451 break;
4452 }
4453
4454 return idx;
4455}
4456
4457void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4458{
4459 struct ixgbe_hw *hw = &adapter->hw;
4460 u32 bits, word;
4461 int idx;
4462
4463 idx = ixgbe_find_vlvf_entry(hw, vid);
4464 if (!idx)
4465 return;
4466
4467
4468
4469
4470 word = idx * 2 + (VMDQ_P(0) / 32);
4471 bits = ~BIT(VMDQ_P(0) % 32);
4472 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4473
4474
4475 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4476 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4477 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4478 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4479 }
4480}
4481
4482static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4483 __be16 proto, u16 vid)
4484{
4485 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4486 struct ixgbe_hw *hw = &adapter->hw;
4487
4488
4489 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4490 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4491
4492 clear_bit(vid, adapter->active_vlans);
4493
4494 return 0;
4495}
4496
4497
4498
4499
4500
4501static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4502{
4503 struct ixgbe_hw *hw = &adapter->hw;
4504 u32 vlnctrl;
4505 int i, j;
4506
4507 switch (hw->mac.type) {
4508 case ixgbe_mac_82598EB:
4509 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4510 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4511 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4512 break;
4513 case ixgbe_mac_82599EB:
4514 case ixgbe_mac_X540:
4515 case ixgbe_mac_X550:
4516 case ixgbe_mac_X550EM_x:
4517 case ixgbe_mac_x550em_a:
4518 for (i = 0; i < adapter->num_rx_queues; i++) {
4519 struct ixgbe_ring *ring = adapter->rx_ring[i];
4520
4521 if (!netif_is_ixgbe(ring->netdev))
4522 continue;
4523
4524 j = ring->reg_idx;
4525 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4526 vlnctrl &= ~IXGBE_RXDCTL_VME;
4527 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4528 }
4529 break;
4530 default:
4531 break;
4532 }
4533}
4534
4535
4536
4537
4538
4539static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4540{
4541 struct ixgbe_hw *hw = &adapter->hw;
4542 u32 vlnctrl;
4543 int i, j;
4544
4545 switch (hw->mac.type) {
4546 case ixgbe_mac_82598EB:
4547 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4548 vlnctrl |= IXGBE_VLNCTRL_VME;
4549 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4550 break;
4551 case ixgbe_mac_82599EB:
4552 case ixgbe_mac_X540:
4553 case ixgbe_mac_X550:
4554 case ixgbe_mac_X550EM_x:
4555 case ixgbe_mac_x550em_a:
4556 for (i = 0; i < adapter->num_rx_queues; i++) {
4557 struct ixgbe_ring *ring = adapter->rx_ring[i];
4558
4559 if (!netif_is_ixgbe(ring->netdev))
4560 continue;
4561
4562 j = ring->reg_idx;
4563 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4564 vlnctrl |= IXGBE_RXDCTL_VME;
4565 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4566 }
4567 break;
4568 default:
4569 break;
4570 }
4571}
4572
4573static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4574{
4575 struct ixgbe_hw *hw = &adapter->hw;
4576 u32 vlnctrl, i;
4577
4578 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4579
4580 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4581
4582 vlnctrl |= IXGBE_VLNCTRL_VFE;
4583 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4584 } else {
4585 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4586 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4587 return;
4588 }
4589
4590
4591 if (hw->mac.type == ixgbe_mac_82598EB)
4592 return;
4593
4594
4595 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4596 return;
4597
4598
4599 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4600
4601
4602 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4603 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4604 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4605
4606 vlvfb |= BIT(VMDQ_P(0) % 32);
4607 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4608 }
4609
4610
4611 for (i = hw->mac.vft_size; i--;)
4612 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4613}
4614
4615#define VFTA_BLOCK_SIZE 8
4616static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4617{
4618 struct ixgbe_hw *hw = &adapter->hw;
4619 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4620 u32 vid_start = vfta_offset * 32;
4621 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4622 u32 i, vid, word, bits;
4623
4624 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4625 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4626
4627
4628 vid = vlvf & VLAN_VID_MASK;
4629
4630
4631 if (vid < vid_start || vid >= vid_end)
4632 continue;
4633
4634 if (vlvf) {
4635
4636 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4637
4638
4639 if (test_bit(vid, adapter->active_vlans))
4640 continue;
4641 }
4642
4643
4644 word = i * 2 + VMDQ_P(0) / 32;
4645 bits = ~BIT(VMDQ_P(0) % 32);
4646 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4647 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4648 }
4649
4650
4651 for (i = VFTA_BLOCK_SIZE; i--;) {
4652 vid = (vfta_offset + i) * 32;
4653 word = vid / BITS_PER_LONG;
4654 bits = vid % BITS_PER_LONG;
4655
4656 vfta[i] |= adapter->active_vlans[word] >> bits;
4657
4658 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4659 }
4660}
4661
4662static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4663{
4664 struct ixgbe_hw *hw = &adapter->hw;
4665 u32 vlnctrl, i;
4666
4667
4668 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4669 vlnctrl |= IXGBE_VLNCTRL_VFE;
4670 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4671
4672 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4673 hw->mac.type == ixgbe_mac_82598EB)
4674 return;
4675
4676
4677 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4678 return;
4679
4680
4681 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4682
4683 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4684 ixgbe_scrub_vfta(adapter, i);
4685}
4686
4687static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4688{
4689 u16 vid = 1;
4690
4691 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4692
4693 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4694 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4695}
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4707{
4708 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4709 struct ixgbe_hw *hw = &adapter->hw;
4710
4711 if (!netif_running(netdev))
4712 return 0;
4713
4714 if (hw->mac.ops.update_mc_addr_list)
4715 hw->mac.ops.update_mc_addr_list(hw, netdev);
4716 else
4717 return -ENOMEM;
4718
4719#ifdef CONFIG_PCI_IOV
4720 ixgbe_restore_vf_multicasts(adapter);
4721#endif
4722
4723 return netdev_mc_count(netdev);
4724}
4725
4726#ifdef CONFIG_PCI_IOV
4727void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4728{
4729 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4730 struct ixgbe_hw *hw = &adapter->hw;
4731 int i;
4732
4733 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4734 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4735
4736 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4737 hw->mac.ops.set_rar(hw, i,
4738 mac_table->addr,
4739 mac_table->pool,
4740 IXGBE_RAH_AV);
4741 else
4742 hw->mac.ops.clear_rar(hw, i);
4743 }
4744}
4745
4746#endif
4747static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4748{
4749 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4750 struct ixgbe_hw *hw = &adapter->hw;
4751 int i;
4752
4753 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4754 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4755 continue;
4756
4757 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4758
4759 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4760 hw->mac.ops.set_rar(hw, i,
4761 mac_table->addr,
4762 mac_table->pool,
4763 IXGBE_RAH_AV);
4764 else
4765 hw->mac.ops.clear_rar(hw, i);
4766 }
4767}
4768
4769static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4770{
4771 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4772 struct ixgbe_hw *hw = &adapter->hw;
4773 int i;
4774
4775 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4776 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4777 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4778 }
4779
4780 ixgbe_sync_mac_table(adapter);
4781}
4782
4783static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4784{
4785 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4786 struct ixgbe_hw *hw = &adapter->hw;
4787 int i, count = 0;
4788
4789 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4790
4791 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4792 continue;
4793
4794
4795 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4796 if (mac_table->pool != pool)
4797 continue;
4798 }
4799
4800 count++;
4801 }
4802
4803 return count;
4804}
4805
4806
4807static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4808{
4809 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4810 struct ixgbe_hw *hw = &adapter->hw;
4811
4812 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4813 mac_table->pool = VMDQ_P(0);
4814
4815 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4816
4817 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4818 IXGBE_RAH_AV);
4819}
4820
4821int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4822 const u8 *addr, u16 pool)
4823{
4824 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4825 struct ixgbe_hw *hw = &adapter->hw;
4826 int i;
4827
4828 if (is_zero_ether_addr(addr))
4829 return -EINVAL;
4830
4831 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4832 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4833 continue;
4834
4835 ether_addr_copy(mac_table->addr, addr);
4836 mac_table->pool = pool;
4837
4838 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4839 IXGBE_MAC_STATE_IN_USE;
4840
4841 ixgbe_sync_mac_table(adapter);
4842
4843 return i;
4844 }
4845
4846 return -ENOMEM;
4847}
4848
4849int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4850 const u8 *addr, u16 pool)
4851{
4852 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4853 struct ixgbe_hw *hw = &adapter->hw;
4854 int i;
4855
4856 if (is_zero_ether_addr(addr))
4857 return -EINVAL;
4858
4859
4860 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4861
4862 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4863 continue;
4864
4865 if (mac_table->pool != pool)
4866 continue;
4867
4868 if (!ether_addr_equal(addr, mac_table->addr))
4869 continue;
4870
4871 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4872 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4873
4874 ixgbe_sync_mac_table(adapter);
4875
4876 return 0;
4877 }
4878
4879 return -ENOMEM;
4880}
4881
4882static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4883{
4884 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4885 int ret;
4886
4887 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4888
4889 return min_t(int, ret, 0);
4890}
4891
4892static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4893{
4894 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4895
4896 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4897
4898 return 0;
4899}
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910void ixgbe_set_rx_mode(struct net_device *netdev)
4911{
4912 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4913 struct ixgbe_hw *hw = &adapter->hw;
4914 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4915 netdev_features_t features = netdev->features;
4916 int count;
4917
4918
4919 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4920
4921
4922 fctrl &= ~IXGBE_FCTRL_SBP;
4923 fctrl |= IXGBE_FCTRL_BAM;
4924 fctrl |= IXGBE_FCTRL_DPF;
4925 fctrl |= IXGBE_FCTRL_PMCF;
4926
4927
4928 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4929 if (netdev->flags & IFF_PROMISC) {
4930 hw->addr_ctrl.user_set_promisc = true;
4931 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4932 vmolr |= IXGBE_VMOLR_MPE;
4933 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4934 } else {
4935 if (netdev->flags & IFF_ALLMULTI) {
4936 fctrl |= IXGBE_FCTRL_MPE;
4937 vmolr |= IXGBE_VMOLR_MPE;
4938 }
4939 hw->addr_ctrl.user_set_promisc = false;
4940 }
4941
4942
4943
4944
4945
4946
4947 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4948 fctrl |= IXGBE_FCTRL_UPE;
4949 vmolr |= IXGBE_VMOLR_ROPE;
4950 }
4951
4952
4953
4954
4955
4956 count = ixgbe_write_mc_addr_list(netdev);
4957 if (count < 0) {
4958 fctrl |= IXGBE_FCTRL_MPE;
4959 vmolr |= IXGBE_VMOLR_MPE;
4960 } else if (count) {
4961 vmolr |= IXGBE_VMOLR_ROMPE;
4962 }
4963
4964 if (hw->mac.type != ixgbe_mac_82598EB) {
4965 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4966 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4967 IXGBE_VMOLR_ROPE);
4968 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4969 }
4970
4971
4972 if (features & NETIF_F_RXALL) {
4973
4974
4975 fctrl |= (IXGBE_FCTRL_SBP |
4976 IXGBE_FCTRL_BAM |
4977 IXGBE_FCTRL_PMCF);
4978
4979 fctrl &= ~(IXGBE_FCTRL_DPF);
4980
4981 }
4982
4983 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4984
4985 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4986 ixgbe_vlan_strip_enable(adapter);
4987 else
4988 ixgbe_vlan_strip_disable(adapter);
4989
4990 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4991 ixgbe_vlan_promisc_disable(adapter);
4992 else
4993 ixgbe_vlan_promisc_enable(adapter);
4994}
4995
4996static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4997{
4998 int q_idx;
4999
5000 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5001 napi_enable(&adapter->q_vector[q_idx]->napi);
5002}
5003
5004static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
5005{
5006 int q_idx;
5007
5008 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5009 napi_disable(&adapter->q_vector[q_idx]->napi);
5010}
5011
5012static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
5013{
5014 struct ixgbe_hw *hw = &adapter->hw;
5015 u32 vxlanctrl;
5016
5017 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
5018 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
5019 return;
5020
5021 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
5022 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
5023
5024 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
5025 adapter->vxlan_port = 0;
5026
5027 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
5028 adapter->geneve_port = 0;
5029}
5030
5031#ifdef CONFIG_IXGBE_DCB
5032
5033
5034
5035
5036
5037
5038
5039
5040static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5041{
5042 struct ixgbe_hw *hw = &adapter->hw;
5043 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5044
5045 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5046 if (hw->mac.type == ixgbe_mac_82598EB)
5047 netif_set_gso_max_size(adapter->netdev, 65536);
5048 return;
5049 }
5050
5051 if (hw->mac.type == ixgbe_mac_82598EB)
5052 netif_set_gso_max_size(adapter->netdev, 32768);
5053
5054#ifdef IXGBE_FCOE
5055 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5056 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5057#endif
5058
5059
5060 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5061 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5062 DCB_TX_CONFIG);
5063 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5064 DCB_RX_CONFIG);
5065 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5066 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5067 ixgbe_dcb_hw_ets(&adapter->hw,
5068 adapter->ixgbe_ieee_ets,
5069 max_frame);
5070 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5071 adapter->ixgbe_ieee_pfc->pfc_en,
5072 adapter->ixgbe_ieee_ets->prio_tc);
5073 }
5074
5075
5076 if (hw->mac.type != ixgbe_mac_82598EB) {
5077 u32 msb = 0;
5078 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5079
5080 while (rss_i) {
5081 msb++;
5082 rss_i >>= 1;
5083 }
5084
5085
5086 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5087 }
5088}
5089#endif
5090
5091
5092#define IXGBE_ETH_FRAMING 20
5093
5094
5095
5096
5097
5098
5099
5100static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5101{
5102 struct ixgbe_hw *hw = &adapter->hw;
5103 struct net_device *dev = adapter->netdev;
5104 int link, tc, kb, marker;
5105 u32 dv_id, rx_pba;
5106
5107
5108 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5109
5110#ifdef IXGBE_FCOE
5111
5112 if ((dev->features & NETIF_F_FCOE_MTU) &&
5113 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5114 (pb == ixgbe_fcoe_get_tc(adapter)))
5115 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5116#endif
5117
5118
5119 switch (hw->mac.type) {
5120 case ixgbe_mac_X540:
5121 case ixgbe_mac_X550:
5122 case ixgbe_mac_X550EM_x:
5123 case ixgbe_mac_x550em_a:
5124 dv_id = IXGBE_DV_X540(link, tc);
5125 break;
5126 default:
5127 dv_id = IXGBE_DV(link, tc);
5128 break;
5129 }
5130
5131
5132 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5133 dv_id += IXGBE_B2BT(tc);
5134
5135
5136 kb = IXGBE_BT2KB(dv_id);
5137 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5138
5139 marker = rx_pba - kb;
5140
5141
5142
5143
5144
5145 if (marker < 0) {
5146 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5147 "headroom to support flow control."
5148 "Decrease MTU or number of traffic classes\n", pb);
5149 marker = tc + 1;
5150 }
5151
5152 return marker;
5153}
5154
5155
5156
5157
5158
5159
5160
5161static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5162{
5163 struct ixgbe_hw *hw = &adapter->hw;
5164 struct net_device *dev = adapter->netdev;
5165 int tc;
5166 u32 dv_id;
5167
5168
5169 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5170
5171#ifdef IXGBE_FCOE
5172
5173 if ((dev->features & NETIF_F_FCOE_MTU) &&
5174 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5175 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5176 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5177#endif
5178
5179
5180 switch (hw->mac.type) {
5181 case ixgbe_mac_X540:
5182 case ixgbe_mac_X550:
5183 case ixgbe_mac_X550EM_x:
5184 case ixgbe_mac_x550em_a:
5185 dv_id = IXGBE_LOW_DV_X540(tc);
5186 break;
5187 default:
5188 dv_id = IXGBE_LOW_DV(tc);
5189 break;
5190 }
5191
5192
5193 return IXGBE_BT2KB(dv_id);
5194}
5195
5196
5197
5198
5199static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5200{
5201 struct ixgbe_hw *hw = &adapter->hw;
5202 int num_tc = adapter->hw_tcs;
5203 int i;
5204
5205 if (!num_tc)
5206 num_tc = 1;
5207
5208 for (i = 0; i < num_tc; i++) {
5209 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5210 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5211
5212
5213 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5214 hw->fc.low_water[i] = 0;
5215 }
5216
5217 for (; i < MAX_TRAFFIC_CLASS; i++)
5218 hw->fc.high_water[i] = 0;
5219}
5220
5221static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5222{
5223 struct ixgbe_hw *hw = &adapter->hw;
5224 int hdrm;
5225 u8 tc = adapter->hw_tcs;
5226
5227 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5228 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5229 hdrm = 32 << adapter->fdir_pballoc;
5230 else
5231 hdrm = 0;
5232
5233 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5234 ixgbe_pbthresh_setup(adapter);
5235}
5236
5237static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5238{
5239 struct ixgbe_hw *hw = &adapter->hw;
5240 struct hlist_node *node2;
5241 struct ixgbe_fdir_filter *filter;
5242 u8 queue;
5243
5244 spin_lock(&adapter->fdir_perfect_lock);
5245
5246 if (!hlist_empty(&adapter->fdir_filter_list))
5247 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5248
5249 hlist_for_each_entry_safe(filter, node2,
5250 &adapter->fdir_filter_list, fdir_node) {
5251 if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
5252 queue = IXGBE_FDIR_DROP_QUEUE;
5253 } else {
5254 u32 ring = ethtool_get_flow_spec_ring(filter->action);
5255 u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
5256
5257 if (!vf && (ring >= adapter->num_rx_queues)) {
5258 e_err(drv, "FDIR restore failed without VF, ring: %u\n",
5259 ring);
5260 continue;
5261 } else if (vf &&
5262 ((vf > adapter->num_vfs) ||
5263 ring >= adapter->num_rx_queues_per_pool)) {
5264 e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
5265 vf, ring);
5266 continue;
5267 }
5268
5269
5270 if (!vf)
5271 queue = adapter->rx_ring[ring]->reg_idx;
5272 else
5273 queue = ((vf - 1) *
5274 adapter->num_rx_queues_per_pool) + ring;
5275 }
5276
5277 ixgbe_fdir_write_perfect_filter_82599(hw,
5278 &filter->filter, filter->sw_idx, queue);
5279 }
5280
5281 spin_unlock(&adapter->fdir_perfect_lock);
5282}
5283
5284
5285
5286
5287
5288static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5289{
5290 u16 i = rx_ring->next_to_clean;
5291 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5292
5293 if (rx_ring->xsk_umem) {
5294 ixgbe_xsk_clean_rx_ring(rx_ring);
5295 goto skip_free;
5296 }
5297
5298
5299 while (i != rx_ring->next_to_alloc) {
5300 if (rx_buffer->skb) {
5301 struct sk_buff *skb = rx_buffer->skb;
5302 if (IXGBE_CB(skb)->page_released)
5303 dma_unmap_page_attrs(rx_ring->dev,
5304 IXGBE_CB(skb)->dma,
5305 ixgbe_rx_pg_size(rx_ring),
5306 DMA_FROM_DEVICE,
5307 IXGBE_RX_DMA_ATTR);
5308 dev_kfree_skb(skb);
5309 }
5310
5311
5312
5313
5314 dma_sync_single_range_for_cpu(rx_ring->dev,
5315 rx_buffer->dma,
5316 rx_buffer->page_offset,
5317 ixgbe_rx_bufsz(rx_ring),
5318 DMA_FROM_DEVICE);
5319
5320
5321 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5322 ixgbe_rx_pg_size(rx_ring),
5323 DMA_FROM_DEVICE,
5324 IXGBE_RX_DMA_ATTR);
5325 __page_frag_cache_drain(rx_buffer->page,
5326 rx_buffer->pagecnt_bias);
5327
5328 i++;
5329 rx_buffer++;
5330 if (i == rx_ring->count) {
5331 i = 0;
5332 rx_buffer = rx_ring->rx_buffer_info;
5333 }
5334 }
5335
5336skip_free:
5337 rx_ring->next_to_alloc = 0;
5338 rx_ring->next_to_clean = 0;
5339 rx_ring->next_to_use = 0;
5340}
5341
5342static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
5343 struct ixgbe_fwd_adapter *accel)
5344{
5345 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
5346 int num_tc = netdev_get_num_tc(adapter->netdev);
5347 struct net_device *vdev = accel->netdev;
5348 int i, baseq, err;
5349
5350 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5351 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5352 accel->pool, adapter->num_rx_pools,
5353 baseq, baseq + adapter->num_rx_queues_per_pool);
5354
5355 accel->rx_base_queue = baseq;
5356 accel->tx_base_queue = baseq;
5357
5358
5359 for (i = 0; i < num_tc; i++)
5360 netdev_bind_sb_channel_queue(adapter->netdev, vdev,
5361 i, rss_i, baseq + (rss_i * i));
5362
5363 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5364 adapter->rx_ring[baseq + i]->netdev = vdev;
5365
5366
5367
5368
5369 wmb();
5370
5371
5372
5373
5374 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5375 VMDQ_P(accel->pool));
5376 if (err >= 0)
5377 return 0;
5378
5379
5380 macvlan_release_l2fw_offload(vdev);
5381
5382 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5383 adapter->rx_ring[baseq + i]->netdev = NULL;
5384
5385 netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
5386
5387
5388 netdev_unbind_sb_channel(adapter->netdev, vdev);
5389 netdev_set_sb_channel(vdev, 0);
5390
5391 clear_bit(accel->pool, adapter->fwd_bitmask);
5392 kfree(accel);
5393
5394 return err;
5395}
5396
5397static int ixgbe_macvlan_up(struct net_device *vdev, void *data)
5398{
5399 struct ixgbe_adapter *adapter = data;
5400 struct ixgbe_fwd_adapter *accel;
5401
5402 if (!netif_is_macvlan(vdev))
5403 return 0;
5404
5405 accel = macvlan_accel_priv(vdev);
5406 if (!accel)
5407 return 0;
5408
5409 ixgbe_fwd_ring_up(adapter, accel);
5410
5411 return 0;
5412}
5413
5414static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5415{
5416 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5417 ixgbe_macvlan_up, adapter);
5418}
5419
5420static void ixgbe_configure(struct ixgbe_adapter *adapter)
5421{
5422 struct ixgbe_hw *hw = &adapter->hw;
5423
5424 ixgbe_configure_pb(adapter);
5425#ifdef CONFIG_IXGBE_DCB
5426 ixgbe_configure_dcb(adapter);
5427#endif
5428
5429
5430
5431
5432 ixgbe_configure_virtualization(adapter);
5433
5434 ixgbe_set_rx_mode(adapter->netdev);
5435 ixgbe_restore_vlan(adapter);
5436 ixgbe_ipsec_restore(adapter);
5437
5438 switch (hw->mac.type) {
5439 case ixgbe_mac_82599EB:
5440 case ixgbe_mac_X540:
5441 hw->mac.ops.disable_rx_buff(hw);
5442 break;
5443 default:
5444 break;
5445 }
5446
5447 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5448 ixgbe_init_fdir_signature_82599(&adapter->hw,
5449 adapter->fdir_pballoc);
5450 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5451 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5452 adapter->fdir_pballoc);
5453 ixgbe_fdir_filter_restore(adapter);
5454 }
5455
5456 switch (hw->mac.type) {
5457 case ixgbe_mac_82599EB:
5458 case ixgbe_mac_X540:
5459 hw->mac.ops.enable_rx_buff(hw);
5460 break;
5461 default:
5462 break;
5463 }
5464
5465#ifdef CONFIG_IXGBE_DCA
5466
5467 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5468 ixgbe_setup_dca(adapter);
5469#endif
5470
5471#ifdef IXGBE_FCOE
5472
5473 ixgbe_configure_fcoe(adapter);
5474
5475#endif
5476 ixgbe_configure_tx(adapter);
5477 ixgbe_configure_rx(adapter);
5478 ixgbe_configure_dfwd(adapter);
5479}
5480
5481
5482
5483
5484
5485static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5486{
5487
5488
5489
5490
5491
5492
5493 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5494 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5495
5496 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5497 adapter->sfp_poll_time = 0;
5498}
5499
5500
5501
5502
5503
5504
5505
5506static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5507{
5508 u32 speed;
5509 bool autoneg, link_up = false;
5510 int ret = IXGBE_ERR_LINK_SETUP;
5511
5512 if (hw->mac.ops.check_link)
5513 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5514
5515 if (ret)
5516 return ret;
5517
5518 speed = hw->phy.autoneg_advertised;
5519 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5520 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5521 &autoneg);
5522 if (ret)
5523 return ret;
5524
5525 if (hw->mac.ops.setup_link)
5526 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5527
5528 return ret;
5529}
5530
5531static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5532{
5533 struct ixgbe_hw *hw = &adapter->hw;
5534 u32 gpie = 0;
5535
5536 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5537 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5538 IXGBE_GPIE_OCD;
5539 gpie |= IXGBE_GPIE_EIAME;
5540
5541
5542
5543
5544 switch (hw->mac.type) {
5545 case ixgbe_mac_82598EB:
5546 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5547 break;
5548 case ixgbe_mac_82599EB:
5549 case ixgbe_mac_X540:
5550 case ixgbe_mac_X550:
5551 case ixgbe_mac_X550EM_x:
5552 case ixgbe_mac_x550em_a:
5553 default:
5554 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5555 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5556 break;
5557 }
5558 } else {
5559
5560
5561 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5562 }
5563
5564
5565
5566
5567 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5568 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5569
5570 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5571 case IXGBE_82599_VMDQ_8Q_MASK:
5572 gpie |= IXGBE_GPIE_VTMODE_16;
5573 break;
5574 case IXGBE_82599_VMDQ_4Q_MASK:
5575 gpie |= IXGBE_GPIE_VTMODE_32;
5576 break;
5577 default:
5578 gpie |= IXGBE_GPIE_VTMODE_64;
5579 break;
5580 }
5581 }
5582
5583
5584 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5585 switch (adapter->hw.mac.type) {
5586 case ixgbe_mac_82599EB:
5587 gpie |= IXGBE_SDP0_GPIEN_8259X;
5588 break;
5589 default:
5590 break;
5591 }
5592 }
5593
5594
5595 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5596 gpie |= IXGBE_SDP1_GPIEN(hw);
5597
5598 switch (hw->mac.type) {
5599 case ixgbe_mac_82599EB:
5600 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5601 break;
5602 case ixgbe_mac_X550EM_x:
5603 case ixgbe_mac_x550em_a:
5604 gpie |= IXGBE_SDP0_GPIEN_X540;
5605 break;
5606 default:
5607 break;
5608 }
5609
5610 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5611}
5612
5613static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5614{
5615 struct ixgbe_hw *hw = &adapter->hw;
5616 int err;
5617 u32 ctrl_ext;
5618
5619 ixgbe_get_hw_control(adapter);
5620 ixgbe_setup_gpie(adapter);
5621
5622 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5623 ixgbe_configure_msix(adapter);
5624 else
5625 ixgbe_configure_msi_and_legacy(adapter);
5626
5627
5628 if (hw->mac.ops.enable_tx_laser)
5629 hw->mac.ops.enable_tx_laser(hw);
5630
5631 if (hw->phy.ops.set_phy_power)
5632 hw->phy.ops.set_phy_power(hw, true);
5633
5634 smp_mb__before_atomic();
5635 clear_bit(__IXGBE_DOWN, &adapter->state);
5636 ixgbe_napi_enable_all(adapter);
5637
5638 if (ixgbe_is_sfp(hw)) {
5639 ixgbe_sfp_link_config(adapter);
5640 } else {
5641 err = ixgbe_non_sfp_link_config(hw);
5642 if (err)
5643 e_err(probe, "link_config FAILED %d\n", err);
5644 }
5645
5646
5647 IXGBE_READ_REG(hw, IXGBE_EICR);
5648 ixgbe_irq_enable(adapter, true, true);
5649
5650
5651
5652
5653
5654 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5655 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5656 if (esdp & IXGBE_ESDP_SDP1)
5657 e_crit(drv, "Fan has stopped, replace the adapter\n");
5658 }
5659
5660
5661
5662 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5663 adapter->link_check_timeout = jiffies;
5664 mod_timer(&adapter->service_timer, jiffies);
5665
5666
5667 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5668 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5669 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5670}
5671
5672void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5673{
5674 WARN_ON(in_interrupt());
5675
5676 netif_trans_update(adapter->netdev);
5677
5678 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5679 usleep_range(1000, 2000);
5680 if (adapter->hw.phy.type == ixgbe_phy_fw)
5681 ixgbe_watchdog_link_is_down(adapter);
5682 ixgbe_down(adapter);
5683
5684
5685
5686
5687
5688
5689 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5690 msleep(2000);
5691 ixgbe_up(adapter);
5692 clear_bit(__IXGBE_RESETTING, &adapter->state);
5693}
5694
5695void ixgbe_up(struct ixgbe_adapter *adapter)
5696{
5697
5698 ixgbe_configure(adapter);
5699
5700 ixgbe_up_complete(adapter);
5701}
5702
5703static unsigned long ixgbe_get_completion_timeout(struct ixgbe_adapter *adapter)
5704{
5705 u16 devctl2;
5706
5707 pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2);
5708
5709 switch (devctl2 & IXGBE_PCIDEVCTRL2_TIMEO_MASK) {
5710 case IXGBE_PCIDEVCTRL2_17_34s:
5711 case IXGBE_PCIDEVCTRL2_4_8s:
5712
5713
5714
5715
5716 case IXGBE_PCIDEVCTRL2_1_2s:
5717 return 2000000ul;
5718 case IXGBE_PCIDEVCTRL2_260_520ms:
5719 return 520000ul;
5720 case IXGBE_PCIDEVCTRL2_65_130ms:
5721 return 130000ul;
5722 case IXGBE_PCIDEVCTRL2_16_32ms:
5723 return 32000ul;
5724 case IXGBE_PCIDEVCTRL2_1_2ms:
5725 return 2000ul;
5726 case IXGBE_PCIDEVCTRL2_50_100us:
5727 return 100ul;
5728 case IXGBE_PCIDEVCTRL2_16_32ms_def:
5729 return 32000ul;
5730 default:
5731 break;
5732 }
5733
5734
5735
5736
5737 return 32000ul;
5738}
5739
5740void ixgbe_disable_rx(struct ixgbe_adapter *adapter)
5741{
5742 unsigned long wait_delay, delay_interval;
5743 struct ixgbe_hw *hw = &adapter->hw;
5744 int i, wait_loop;
5745 u32 rxdctl;
5746
5747
5748 hw->mac.ops.disable_rx(hw);
5749
5750 if (ixgbe_removed(hw->hw_addr))
5751 return;
5752
5753
5754 for (i = 0; i < adapter->num_rx_queues; i++) {
5755 struct ixgbe_ring *ring = adapter->rx_ring[i];
5756 u8 reg_idx = ring->reg_idx;
5757
5758 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5759 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5760 rxdctl |= IXGBE_RXDCTL_SWFLSH;
5761
5762
5763 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
5764 }
5765
5766
5767 if (hw->mac.type == ixgbe_mac_82598EB &&
5768 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5769 return;
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5783
5784 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5785 wait_delay = delay_interval;
5786
5787 while (wait_loop--) {
5788 usleep_range(wait_delay, wait_delay + 10);
5789 wait_delay += delay_interval * 2;
5790 rxdctl = 0;
5791
5792
5793
5794
5795
5796
5797 for (i = 0; i < adapter->num_rx_queues; i++) {
5798 struct ixgbe_ring *ring = adapter->rx_ring[i];
5799 u8 reg_idx = ring->reg_idx;
5800
5801 rxdctl |= IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
5802 }
5803
5804 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
5805 return;
5806 }
5807
5808 e_err(drv,
5809 "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5810}
5811
5812void ixgbe_disable_tx(struct ixgbe_adapter *adapter)
5813{
5814 unsigned long wait_delay, delay_interval;
5815 struct ixgbe_hw *hw = &adapter->hw;
5816 int i, wait_loop;
5817 u32 txdctl;
5818
5819 if (ixgbe_removed(hw->hw_addr))
5820 return;
5821
5822
5823 for (i = 0; i < adapter->num_tx_queues; i++) {
5824 struct ixgbe_ring *ring = adapter->tx_ring[i];
5825 u8 reg_idx = ring->reg_idx;
5826
5827 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5828 }
5829
5830
5831 for (i = 0; i < adapter->num_xdp_queues; i++) {
5832 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5833 u8 reg_idx = ring->reg_idx;
5834
5835 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5836 }
5837
5838
5839
5840
5841
5842
5843 if (!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
5844 goto dma_engine_disable;
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
5858
5859 wait_loop = IXGBE_MAX_RX_DESC_POLL;
5860 wait_delay = delay_interval;
5861
5862 while (wait_loop--) {
5863 usleep_range(wait_delay, wait_delay + 10);
5864 wait_delay += delay_interval * 2;
5865 txdctl = 0;
5866
5867
5868
5869
5870
5871
5872 for (i = 0; i < adapter->num_tx_queues; i++) {
5873 struct ixgbe_ring *ring = adapter->tx_ring[i];
5874 u8 reg_idx = ring->reg_idx;
5875
5876 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5877 }
5878 for (i = 0; i < adapter->num_xdp_queues; i++) {
5879 struct ixgbe_ring *ring = adapter->xdp_ring[i];
5880 u8 reg_idx = ring->reg_idx;
5881
5882 txdctl |= IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
5883 }
5884
5885 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
5886 goto dma_engine_disable;
5887 }
5888
5889 e_err(drv,
5890 "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n");
5891
5892dma_engine_disable:
5893
5894 switch (hw->mac.type) {
5895 case ixgbe_mac_82599EB:
5896 case ixgbe_mac_X540:
5897 case ixgbe_mac_X550:
5898 case ixgbe_mac_X550EM_x:
5899 case ixgbe_mac_x550em_a:
5900 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5901 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5902 ~IXGBE_DMATXCTL_TE));
5903
5904 default:
5905 break;
5906 }
5907}
5908
5909void ixgbe_reset(struct ixgbe_adapter *adapter)
5910{
5911 struct ixgbe_hw *hw = &adapter->hw;
5912 struct net_device *netdev = adapter->netdev;
5913 int err;
5914
5915 if (ixgbe_removed(hw->hw_addr))
5916 return;
5917
5918 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5919 usleep_range(1000, 2000);
5920
5921
5922 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5923 IXGBE_FLAG2_SFP_NEEDS_RESET);
5924 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5925
5926 err = hw->mac.ops.init_hw(hw);
5927 switch (err) {
5928 case 0:
5929 case IXGBE_ERR_SFP_NOT_PRESENT:
5930 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5931 break;
5932 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5933 e_dev_err("master disable timed out\n");
5934 break;
5935 case IXGBE_ERR_EEPROM_VERSION:
5936
5937 e_dev_warn("This device is a pre-production adapter/LOM. "
5938 "Please be aware there may be issues associated with "
5939 "your hardware. If you are experiencing problems "
5940 "please contact your Intel or hardware "
5941 "representative who provided you with this "
5942 "hardware.\n");
5943 break;
5944 default:
5945 e_dev_err("Hardware Error: %d\n", err);
5946 }
5947
5948 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5949
5950
5951 ixgbe_flush_sw_mac_table(adapter);
5952 __dev_uc_unsync(netdev, NULL);
5953
5954
5955 ixgbe_mac_set_default_filter(adapter);
5956
5957
5958 if (hw->mac.san_mac_rar_index)
5959 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5960
5961 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5962 ixgbe_ptp_reset(adapter);
5963
5964 if (hw->phy.ops.set_phy_power) {
5965 if (!netif_running(adapter->netdev) && !adapter->wol)
5966 hw->phy.ops.set_phy_power(hw, false);
5967 else
5968 hw->phy.ops.set_phy_power(hw, true);
5969 }
5970}
5971
5972
5973
5974
5975
5976static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5977{
5978 u16 i = tx_ring->next_to_clean;
5979 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5980
5981 if (tx_ring->xsk_umem) {
5982 ixgbe_xsk_clean_tx_ring(tx_ring);
5983 goto out;
5984 }
5985
5986 while (i != tx_ring->next_to_use) {
5987 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5988
5989
5990 if (ring_is_xdp(tx_ring))
5991 xdp_return_frame(tx_buffer->xdpf);
5992 else
5993 dev_kfree_skb_any(tx_buffer->skb);
5994
5995
5996 dma_unmap_single(tx_ring->dev,
5997 dma_unmap_addr(tx_buffer, dma),
5998 dma_unmap_len(tx_buffer, len),
5999 DMA_TO_DEVICE);
6000
6001
6002 eop_desc = tx_buffer->next_to_watch;
6003 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6004
6005
6006 while (tx_desc != eop_desc) {
6007 tx_buffer++;
6008 tx_desc++;
6009 i++;
6010 if (unlikely(i == tx_ring->count)) {
6011 i = 0;
6012 tx_buffer = tx_ring->tx_buffer_info;
6013 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6014 }
6015
6016
6017 if (dma_unmap_len(tx_buffer, len))
6018 dma_unmap_page(tx_ring->dev,
6019 dma_unmap_addr(tx_buffer, dma),
6020 dma_unmap_len(tx_buffer, len),
6021 DMA_TO_DEVICE);
6022 }
6023
6024
6025 tx_buffer++;
6026 i++;
6027 if (unlikely(i == tx_ring->count)) {
6028 i = 0;
6029 tx_buffer = tx_ring->tx_buffer_info;
6030 }
6031 }
6032
6033
6034 if (!ring_is_xdp(tx_ring))
6035 netdev_tx_reset_queue(txring_txq(tx_ring));
6036
6037out:
6038
6039 tx_ring->next_to_use = 0;
6040 tx_ring->next_to_clean = 0;
6041}
6042
6043
6044
6045
6046
6047static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
6048{
6049 int i;
6050
6051 for (i = 0; i < adapter->num_rx_queues; i++)
6052 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
6053}
6054
6055
6056
6057
6058
6059static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
6060{
6061 int i;
6062
6063 for (i = 0; i < adapter->num_tx_queues; i++)
6064 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
6065 for (i = 0; i < adapter->num_xdp_queues; i++)
6066 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
6067}
6068
6069static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
6070{
6071 struct hlist_node *node2;
6072 struct ixgbe_fdir_filter *filter;
6073
6074 spin_lock(&adapter->fdir_perfect_lock);
6075
6076 hlist_for_each_entry_safe(filter, node2,
6077 &adapter->fdir_filter_list, fdir_node) {
6078 hlist_del(&filter->fdir_node);
6079 kfree(filter);
6080 }
6081 adapter->fdir_filter_count = 0;
6082
6083 spin_unlock(&adapter->fdir_perfect_lock);
6084}
6085
6086void ixgbe_down(struct ixgbe_adapter *adapter)
6087{
6088 struct net_device *netdev = adapter->netdev;
6089 struct ixgbe_hw *hw = &adapter->hw;
6090 int i;
6091
6092
6093 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
6094 return;
6095
6096
6097 netif_tx_stop_all_queues(netdev);
6098
6099
6100 netif_carrier_off(netdev);
6101 netif_tx_disable(netdev);
6102
6103
6104 ixgbe_disable_rx(adapter);
6105
6106
6107 if (adapter->xdp_ring[0])
6108 synchronize_rcu();
6109
6110 ixgbe_irq_disable(adapter);
6111
6112 ixgbe_napi_disable_all(adapter);
6113
6114 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
6115 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6116 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6117
6118 del_timer_sync(&adapter->service_timer);
6119
6120 if (adapter->num_vfs) {
6121
6122 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
6123
6124
6125 for (i = 0 ; i < adapter->num_vfs; i++)
6126 adapter->vfinfo[i].clear_to_send = false;
6127
6128
6129 ixgbe_ping_all_vfs(adapter);
6130
6131
6132 ixgbe_disable_tx_rx(adapter);
6133 }
6134
6135
6136 ixgbe_disable_tx(adapter);
6137
6138 if (!pci_channel_offline(adapter->pdev))
6139 ixgbe_reset(adapter);
6140
6141
6142 if (hw->mac.ops.disable_tx_laser)
6143 hw->mac.ops.disable_tx_laser(hw);
6144
6145 ixgbe_clean_all_tx_rings(adapter);
6146 ixgbe_clean_all_rx_rings(adapter);
6147}
6148
6149
6150
6151
6152
6153static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
6154{
6155 struct ixgbe_hw *hw = &adapter->hw;
6156
6157 switch (hw->device_id) {
6158 case IXGBE_DEV_ID_X550EM_A_1G_T:
6159 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6160 if (!hw->phy.eee_speeds_supported)
6161 break;
6162 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
6163 if (!hw->phy.eee_speeds_advertised)
6164 break;
6165 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
6166 break;
6167 default:
6168 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
6169 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
6170 break;
6171 }
6172}
6173
6174
6175
6176
6177
6178static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6179{
6180 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6181
6182
6183 ixgbe_tx_timeout_reset(adapter);
6184}
6185
6186#ifdef CONFIG_IXGBE_DCB
6187static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6188{
6189 struct ixgbe_hw *hw = &adapter->hw;
6190 struct tc_configuration *tc;
6191 int j;
6192
6193 switch (hw->mac.type) {
6194 case ixgbe_mac_82598EB:
6195 case ixgbe_mac_82599EB:
6196 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6197 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6198 break;
6199 case ixgbe_mac_X540:
6200 case ixgbe_mac_X550:
6201 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6202 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6203 break;
6204 case ixgbe_mac_X550EM_x:
6205 case ixgbe_mac_x550em_a:
6206 default:
6207 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6208 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6209 break;
6210 }
6211
6212
6213 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6214 tc = &adapter->dcb_cfg.tc_config[j];
6215 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6216 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6217 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6218 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6219 tc->dcb_pfc = pfc_disabled;
6220 }
6221
6222
6223 tc = &adapter->dcb_cfg.tc_config[0];
6224 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6225 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6226
6227 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6228 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6229 adapter->dcb_cfg.pfc_mode_enable = false;
6230 adapter->dcb_set_bitmap = 0x00;
6231 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6232 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6233 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6234 sizeof(adapter->temp_dcb_cfg));
6235}
6236#endif
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6248 const struct ixgbe_info *ii)
6249{
6250 struct ixgbe_hw *hw = &adapter->hw;
6251 struct pci_dev *pdev = adapter->pdev;
6252 unsigned int rss, fdir;
6253 u32 fwsm;
6254 int i;
6255
6256
6257
6258 hw->vendor_id = pdev->vendor;
6259 hw->device_id = pdev->device;
6260 hw->revision_id = pdev->revision;
6261 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6262 hw->subsystem_device_id = pdev->subsystem_device;
6263
6264
6265 ii->get_invariants(hw);
6266
6267
6268 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6269 adapter->ring_feature[RING_F_RSS].limit = rss;
6270 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6271 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6272 adapter->atr_sample_rate = 20;
6273 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6274 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6275 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6276 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6277#ifdef CONFIG_IXGBE_DCA
6278 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6279#endif
6280#ifdef CONFIG_IXGBE_DCB
6281 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6282 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6283#endif
6284#ifdef IXGBE_FCOE
6285 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6286 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6287#ifdef CONFIG_IXGBE_DCB
6288
6289 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6290#endif
6291#endif
6292
6293
6294 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6295 GFP_KERNEL);
6296 if (!adapter->jump_tables[0])
6297 return -ENOMEM;
6298 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6299
6300 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6301 adapter->jump_tables[i] = NULL;
6302
6303 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6304 sizeof(struct ixgbe_mac_addr),
6305 GFP_KERNEL);
6306 if (!adapter->mac_table)
6307 return -ENOMEM;
6308
6309 if (ixgbe_init_rss_key(adapter))
6310 return -ENOMEM;
6311
6312 adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
6313 if (!adapter->af_xdp_zc_qps)
6314 return -ENOMEM;
6315
6316
6317 switch (hw->mac.type) {
6318 case ixgbe_mac_82598EB:
6319 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6320
6321 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6322 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6323
6324 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6325 adapter->ring_feature[RING_F_FDIR].limit = 0;
6326 adapter->atr_sample_rate = 0;
6327 adapter->fdir_pballoc = 0;
6328#ifdef IXGBE_FCOE
6329 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6330 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6331#ifdef CONFIG_IXGBE_DCB
6332 adapter->fcoe.up = 0;
6333#endif
6334#endif
6335 break;
6336 case ixgbe_mac_82599EB:
6337 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6338 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6339 break;
6340 case ixgbe_mac_X540:
6341 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6342 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6343 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6344 break;
6345 case ixgbe_mac_x550em_a:
6346 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
6347 switch (hw->device_id) {
6348 case IXGBE_DEV_ID_X550EM_A_1G_T:
6349 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6350 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6351 break;
6352 default:
6353 break;
6354 }
6355
6356 case ixgbe_mac_X550EM_x:
6357#ifdef CONFIG_IXGBE_DCB
6358 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6359#endif
6360#ifdef IXGBE_FCOE
6361 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6362#ifdef CONFIG_IXGBE_DCB
6363 adapter->fcoe.up = 0;
6364#endif
6365#endif
6366
6367 case ixgbe_mac_X550:
6368 if (hw->mac.type == ixgbe_mac_X550)
6369 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6370#ifdef CONFIG_IXGBE_DCA
6371 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6372#endif
6373 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
6374 break;
6375 default:
6376 break;
6377 }
6378
6379#ifdef IXGBE_FCOE
6380
6381 spin_lock_init(&adapter->fcoe.lock);
6382
6383#endif
6384
6385 spin_lock_init(&adapter->fdir_perfect_lock);
6386
6387#ifdef CONFIG_IXGBE_DCB
6388 ixgbe_init_dcb(adapter);
6389#endif
6390 ixgbe_init_ipsec_offload(adapter);
6391
6392
6393 hw->fc.requested_mode = ixgbe_fc_full;
6394 hw->fc.current_mode = ixgbe_fc_full;
6395 ixgbe_pbthresh_setup(adapter);
6396 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6397 hw->fc.send_xon = true;
6398 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6399
6400#ifdef CONFIG_PCI_IOV
6401 if (max_vfs > 0)
6402 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6403
6404
6405 if (hw->mac.type != ixgbe_mac_82598EB) {
6406 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6407 max_vfs = 0;
6408 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6409 }
6410 }
6411#endif
6412
6413
6414 adapter->rx_itr_setting = 1;
6415 adapter->tx_itr_setting = 1;
6416
6417
6418 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6419 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6420
6421
6422 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6423
6424
6425 if (ixgbe_init_eeprom_params_generic(hw)) {
6426 e_dev_err("EEPROM initialization failed\n");
6427 return -EIO;
6428 }
6429
6430
6431 set_bit(0, adapter->fwd_bitmask);
6432 set_bit(__IXGBE_DOWN, &adapter->state);
6433
6434 return 0;
6435}
6436
6437
6438
6439
6440
6441
6442
6443int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6444{
6445 struct device *dev = tx_ring->dev;
6446 int orig_node = dev_to_node(dev);
6447 int ring_node = NUMA_NO_NODE;
6448 int size;
6449
6450 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6451
6452 if (tx_ring->q_vector)
6453 ring_node = tx_ring->q_vector->numa_node;
6454
6455 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6456 if (!tx_ring->tx_buffer_info)
6457 tx_ring->tx_buffer_info = vmalloc(size);
6458 if (!tx_ring->tx_buffer_info)
6459 goto err;
6460
6461
6462 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6463 tx_ring->size = ALIGN(tx_ring->size, 4096);
6464
6465 set_dev_node(dev, ring_node);
6466 tx_ring->desc = dma_alloc_coherent(dev,
6467 tx_ring->size,
6468 &tx_ring->dma,
6469 GFP_KERNEL);
6470 set_dev_node(dev, orig_node);
6471 if (!tx_ring->desc)
6472 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6473 &tx_ring->dma, GFP_KERNEL);
6474 if (!tx_ring->desc)
6475 goto err;
6476
6477 tx_ring->next_to_use = 0;
6478 tx_ring->next_to_clean = 0;
6479 return 0;
6480
6481err:
6482 vfree(tx_ring->tx_buffer_info);
6483 tx_ring->tx_buffer_info = NULL;
6484 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6485 return -ENOMEM;
6486}
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6499{
6500 int i, j = 0, err = 0;
6501
6502 for (i = 0; i < adapter->num_tx_queues; i++) {
6503 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6504 if (!err)
6505 continue;
6506
6507 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6508 goto err_setup_tx;
6509 }
6510 for (j = 0; j < adapter->num_xdp_queues; j++) {
6511 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6512 if (!err)
6513 continue;
6514
6515 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6516 goto err_setup_tx;
6517 }
6518
6519 return 0;
6520err_setup_tx:
6521
6522 while (j--)
6523 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6524 while (i--)
6525 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6526 return err;
6527}
6528
6529
6530
6531
6532
6533
6534
6535
6536int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6537 struct ixgbe_ring *rx_ring)
6538{
6539 struct device *dev = rx_ring->dev;
6540 int orig_node = dev_to_node(dev);
6541 int ring_node = NUMA_NO_NODE;
6542 int size;
6543
6544 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6545
6546 if (rx_ring->q_vector)
6547 ring_node = rx_ring->q_vector->numa_node;
6548
6549 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6550 if (!rx_ring->rx_buffer_info)
6551 rx_ring->rx_buffer_info = vmalloc(size);
6552 if (!rx_ring->rx_buffer_info)
6553 goto err;
6554
6555
6556 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6557 rx_ring->size = ALIGN(rx_ring->size, 4096);
6558
6559 set_dev_node(dev, ring_node);
6560 rx_ring->desc = dma_alloc_coherent(dev,
6561 rx_ring->size,
6562 &rx_ring->dma,
6563 GFP_KERNEL);
6564 set_dev_node(dev, orig_node);
6565 if (!rx_ring->desc)
6566 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6567 &rx_ring->dma, GFP_KERNEL);
6568 if (!rx_ring->desc)
6569 goto err;
6570
6571 rx_ring->next_to_clean = 0;
6572 rx_ring->next_to_use = 0;
6573
6574
6575 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6576 rx_ring->queue_index) < 0)
6577 goto err;
6578
6579 rx_ring->xdp_prog = adapter->xdp_prog;
6580
6581 return 0;
6582err:
6583 vfree(rx_ring->rx_buffer_info);
6584 rx_ring->rx_buffer_info = NULL;
6585 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6586 return -ENOMEM;
6587}
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6600{
6601 int i, err = 0;
6602
6603 for (i = 0; i < adapter->num_rx_queues; i++) {
6604 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6605 if (!err)
6606 continue;
6607
6608 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6609 goto err_setup_rx;
6610 }
6611
6612#ifdef IXGBE_FCOE
6613 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6614 if (!err)
6615#endif
6616 return 0;
6617err_setup_rx:
6618
6619 while (i--)
6620 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6621 return err;
6622}
6623
6624
6625
6626
6627
6628
6629
6630void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6631{
6632 ixgbe_clean_tx_ring(tx_ring);
6633
6634 vfree(tx_ring->tx_buffer_info);
6635 tx_ring->tx_buffer_info = NULL;
6636
6637
6638 if (!tx_ring->desc)
6639 return;
6640
6641 dma_free_coherent(tx_ring->dev, tx_ring->size,
6642 tx_ring->desc, tx_ring->dma);
6643
6644 tx_ring->desc = NULL;
6645}
6646
6647
6648
6649
6650
6651
6652
6653static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6654{
6655 int i;
6656
6657 for (i = 0; i < adapter->num_tx_queues; i++)
6658 if (adapter->tx_ring[i]->desc)
6659 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6660 for (i = 0; i < adapter->num_xdp_queues; i++)
6661 if (adapter->xdp_ring[i]->desc)
6662 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6663}
6664
6665
6666
6667
6668
6669
6670
6671void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6672{
6673 ixgbe_clean_rx_ring(rx_ring);
6674
6675 rx_ring->xdp_prog = NULL;
6676 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6677 vfree(rx_ring->rx_buffer_info);
6678 rx_ring->rx_buffer_info = NULL;
6679
6680
6681 if (!rx_ring->desc)
6682 return;
6683
6684 dma_free_coherent(rx_ring->dev, rx_ring->size,
6685 rx_ring->desc, rx_ring->dma);
6686
6687 rx_ring->desc = NULL;
6688}
6689
6690
6691
6692
6693
6694
6695
6696static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6697{
6698 int i;
6699
6700#ifdef IXGBE_FCOE
6701 ixgbe_free_fcoe_ddp_resources(adapter);
6702
6703#endif
6704 for (i = 0; i < adapter->num_rx_queues; i++)
6705 if (adapter->rx_ring[i]->desc)
6706 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6707}
6708
6709
6710
6711
6712
6713
6714
6715
6716static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6717{
6718 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6719
6720 if (adapter->xdp_prog) {
6721 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6722 VLAN_HLEN;
6723 int i;
6724
6725 for (i = 0; i < adapter->num_rx_queues; i++) {
6726 struct ixgbe_ring *ring = adapter->rx_ring[i];
6727
6728 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6729 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6730 return -EINVAL;
6731 }
6732 }
6733 }
6734
6735
6736
6737
6738
6739
6740 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6741 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6742 (new_mtu > ETH_DATA_LEN))
6743 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6744
6745 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6746 netdev->mtu, new_mtu);
6747
6748
6749 netdev->mtu = new_mtu;
6750
6751 if (netif_running(netdev))
6752 ixgbe_reinit_locked(adapter);
6753
6754 return 0;
6755}
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769int ixgbe_open(struct net_device *netdev)
6770{
6771 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6772 struct ixgbe_hw *hw = &adapter->hw;
6773 int err, queues;
6774
6775
6776 if (test_bit(__IXGBE_TESTING, &adapter->state))
6777 return -EBUSY;
6778
6779 netif_carrier_off(netdev);
6780
6781
6782 err = ixgbe_setup_all_tx_resources(adapter);
6783 if (err)
6784 goto err_setup_tx;
6785
6786
6787 err = ixgbe_setup_all_rx_resources(adapter);
6788 if (err)
6789 goto err_setup_rx;
6790
6791 ixgbe_configure(adapter);
6792
6793 err = ixgbe_request_irq(adapter);
6794 if (err)
6795 goto err_req_irq;
6796
6797
6798 queues = adapter->num_tx_queues;
6799 err = netif_set_real_num_tx_queues(netdev, queues);
6800 if (err)
6801 goto err_set_queues;
6802
6803 queues = adapter->num_rx_queues;
6804 err = netif_set_real_num_rx_queues(netdev, queues);
6805 if (err)
6806 goto err_set_queues;
6807
6808 ixgbe_ptp_init(adapter);
6809
6810 ixgbe_up_complete(adapter);
6811
6812 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6813 udp_tunnel_get_rx_info(netdev);
6814
6815 return 0;
6816
6817err_set_queues:
6818 ixgbe_free_irq(adapter);
6819err_req_irq:
6820 ixgbe_free_all_rx_resources(adapter);
6821 if (hw->phy.ops.set_phy_power && !adapter->wol)
6822 hw->phy.ops.set_phy_power(&adapter->hw, false);
6823err_setup_rx:
6824 ixgbe_free_all_tx_resources(adapter);
6825err_setup_tx:
6826 ixgbe_reset(adapter);
6827
6828 return err;
6829}
6830
6831static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6832{
6833 ixgbe_ptp_suspend(adapter);
6834
6835 if (adapter->hw.phy.ops.enter_lplu) {
6836 adapter->hw.phy.reset_disable = true;
6837 ixgbe_down(adapter);
6838 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6839 adapter->hw.phy.reset_disable = false;
6840 } else {
6841 ixgbe_down(adapter);
6842 }
6843
6844 ixgbe_free_irq(adapter);
6845
6846 ixgbe_free_all_tx_resources(adapter);
6847 ixgbe_free_all_rx_resources(adapter);
6848}
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861int ixgbe_close(struct net_device *netdev)
6862{
6863 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6864
6865 ixgbe_ptp_stop(adapter);
6866
6867 if (netif_device_present(netdev))
6868 ixgbe_close_suspend(adapter);
6869
6870 ixgbe_fdir_filter_exit(adapter);
6871
6872 ixgbe_release_hw_control(adapter);
6873
6874 return 0;
6875}
6876
6877#ifdef CONFIG_PM
6878static int ixgbe_resume(struct pci_dev *pdev)
6879{
6880 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6881 struct net_device *netdev = adapter->netdev;
6882 u32 err;
6883
6884 adapter->hw.hw_addr = adapter->io_addr;
6885 pci_set_power_state(pdev, PCI_D0);
6886 pci_restore_state(pdev);
6887
6888
6889
6890
6891 pci_save_state(pdev);
6892
6893 err = pci_enable_device_mem(pdev);
6894 if (err) {
6895 e_dev_err("Cannot enable PCI device from suspend\n");
6896 return err;
6897 }
6898 smp_mb__before_atomic();
6899 clear_bit(__IXGBE_DISABLED, &adapter->state);
6900 pci_set_master(pdev);
6901
6902 pci_wake_from_d3(pdev, false);
6903
6904 ixgbe_reset(adapter);
6905
6906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6907
6908 rtnl_lock();
6909 err = ixgbe_init_interrupt_scheme(adapter);
6910 if (!err && netif_running(netdev))
6911 err = ixgbe_open(netdev);
6912
6913
6914 if (!err)
6915 netif_device_attach(netdev);
6916 rtnl_unlock();
6917
6918 return err;
6919}
6920#endif
6921
6922static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6923{
6924 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6925 struct net_device *netdev = adapter->netdev;
6926 struct ixgbe_hw *hw = &adapter->hw;
6927 u32 ctrl;
6928 u32 wufc = adapter->wol;
6929#ifdef CONFIG_PM
6930 int retval = 0;
6931#endif
6932
6933 rtnl_lock();
6934 netif_device_detach(netdev);
6935
6936 if (netif_running(netdev))
6937 ixgbe_close_suspend(adapter);
6938
6939 ixgbe_clear_interrupt_scheme(adapter);
6940 rtnl_unlock();
6941
6942#ifdef CONFIG_PM
6943 retval = pci_save_state(pdev);
6944 if (retval)
6945 return retval;
6946
6947#endif
6948 if (hw->mac.ops.stop_link_on_d3)
6949 hw->mac.ops.stop_link_on_d3(hw);
6950
6951 if (wufc) {
6952 u32 fctrl;
6953
6954 ixgbe_set_rx_mode(netdev);
6955
6956
6957 if (hw->mac.ops.enable_tx_laser)
6958 hw->mac.ops.enable_tx_laser(hw);
6959
6960
6961 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6962 fctrl |= IXGBE_FCTRL_MPE;
6963 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6964
6965 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6966 ctrl |= IXGBE_CTRL_GIO_DIS;
6967 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6968
6969 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6970 } else {
6971 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6972 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6973 }
6974
6975 switch (hw->mac.type) {
6976 case ixgbe_mac_82598EB:
6977 pci_wake_from_d3(pdev, false);
6978 break;
6979 case ixgbe_mac_82599EB:
6980 case ixgbe_mac_X540:
6981 case ixgbe_mac_X550:
6982 case ixgbe_mac_X550EM_x:
6983 case ixgbe_mac_x550em_a:
6984 pci_wake_from_d3(pdev, !!wufc);
6985 break;
6986 default:
6987 break;
6988 }
6989
6990 *enable_wake = !!wufc;
6991 if (hw->phy.ops.set_phy_power && !*enable_wake)
6992 hw->phy.ops.set_phy_power(hw, false);
6993
6994 ixgbe_release_hw_control(adapter);
6995
6996 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6997 pci_disable_device(pdev);
6998
6999 return 0;
7000}
7001
7002#ifdef CONFIG_PM
7003static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
7004{
7005 int retval;
7006 bool wake;
7007
7008 retval = __ixgbe_shutdown(pdev, &wake);
7009 if (retval)
7010 return retval;
7011
7012 if (wake) {
7013 pci_prepare_to_sleep(pdev);
7014 } else {
7015 pci_wake_from_d3(pdev, false);
7016 pci_set_power_state(pdev, PCI_D3hot);
7017 }
7018
7019 return 0;
7020}
7021#endif
7022
7023static void ixgbe_shutdown(struct pci_dev *pdev)
7024{
7025 bool wake;
7026
7027 __ixgbe_shutdown(pdev, &wake);
7028
7029 if (system_state == SYSTEM_POWER_OFF) {
7030 pci_wake_from_d3(pdev, wake);
7031 pci_set_power_state(pdev, PCI_D3hot);
7032 }
7033}
7034
7035
7036
7037
7038
7039void ixgbe_update_stats(struct ixgbe_adapter *adapter)
7040{
7041 struct net_device *netdev = adapter->netdev;
7042 struct ixgbe_hw *hw = &adapter->hw;
7043 struct ixgbe_hw_stats *hwstats = &adapter->stats;
7044 u64 total_mpc = 0;
7045 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
7046 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
7047 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
7048 u64 alloc_rx_page = 0;
7049 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
7050
7051 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7052 test_bit(__IXGBE_RESETTING, &adapter->state))
7053 return;
7054
7055 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
7056 u64 rsc_count = 0;
7057 u64 rsc_flush = 0;
7058 for (i = 0; i < adapter->num_rx_queues; i++) {
7059 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
7060 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
7061 }
7062 adapter->rsc_total_count = rsc_count;
7063 adapter->rsc_total_flush = rsc_flush;
7064 }
7065
7066 for (i = 0; i < adapter->num_rx_queues; i++) {
7067 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
7068 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
7069 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
7070 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
7071 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
7072 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
7073 bytes += rx_ring->stats.bytes;
7074 packets += rx_ring->stats.packets;
7075 }
7076 adapter->non_eop_descs = non_eop_descs;
7077 adapter->alloc_rx_page = alloc_rx_page;
7078 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
7079 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
7080 adapter->hw_csum_rx_error = hw_csum_rx_error;
7081 netdev->stats.rx_bytes = bytes;
7082 netdev->stats.rx_packets = packets;
7083
7084 bytes = 0;
7085 packets = 0;
7086
7087 for (i = 0; i < adapter->num_tx_queues; i++) {
7088 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7089 restart_queue += tx_ring->tx_stats.restart_queue;
7090 tx_busy += tx_ring->tx_stats.tx_busy;
7091 bytes += tx_ring->stats.bytes;
7092 packets += tx_ring->stats.packets;
7093 }
7094 for (i = 0; i < adapter->num_xdp_queues; i++) {
7095 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
7096
7097 restart_queue += xdp_ring->tx_stats.restart_queue;
7098 tx_busy += xdp_ring->tx_stats.tx_busy;
7099 bytes += xdp_ring->stats.bytes;
7100 packets += xdp_ring->stats.packets;
7101 }
7102 adapter->restart_queue = restart_queue;
7103 adapter->tx_busy = tx_busy;
7104 netdev->stats.tx_bytes = bytes;
7105 netdev->stats.tx_packets = packets;
7106
7107 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
7108
7109
7110 for (i = 0; i < 8; i++) {
7111
7112 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
7113 missed_rx += mpc;
7114 hwstats->mpc[i] += mpc;
7115 total_mpc += hwstats->mpc[i];
7116 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
7117 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
7118 switch (hw->mac.type) {
7119 case ixgbe_mac_82598EB:
7120 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
7121 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
7122 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
7123 hwstats->pxonrxc[i] +=
7124 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
7125 break;
7126 case ixgbe_mac_82599EB:
7127 case ixgbe_mac_X540:
7128 case ixgbe_mac_X550:
7129 case ixgbe_mac_X550EM_x:
7130 case ixgbe_mac_x550em_a:
7131 hwstats->pxonrxc[i] +=
7132 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
7133 break;
7134 default:
7135 break;
7136 }
7137 }
7138
7139
7140 for (i = 0; i < 16; i++) {
7141 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
7142 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
7143 if ((hw->mac.type == ixgbe_mac_82599EB) ||
7144 (hw->mac.type == ixgbe_mac_X540) ||
7145 (hw->mac.type == ixgbe_mac_X550) ||
7146 (hw->mac.type == ixgbe_mac_X550EM_x) ||
7147 (hw->mac.type == ixgbe_mac_x550em_a)) {
7148 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
7149 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
7150 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
7151 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
7152 }
7153 }
7154
7155 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
7156
7157 hwstats->gprc -= missed_rx;
7158
7159 ixgbe_update_xoff_received(adapter);
7160
7161
7162 switch (hw->mac.type) {
7163 case ixgbe_mac_82598EB:
7164 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
7165 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
7166 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
7167 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
7168 break;
7169 case ixgbe_mac_X540:
7170 case ixgbe_mac_X550:
7171 case ixgbe_mac_X550EM_x:
7172 case ixgbe_mac_x550em_a:
7173
7174 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
7175 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
7176 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
7177 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
7178
7179 case ixgbe_mac_82599EB:
7180 for (i = 0; i < 16; i++)
7181 adapter->hw_rx_no_dma_resources +=
7182 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
7183 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
7184 IXGBE_READ_REG(hw, IXGBE_GORCH);
7185 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
7186 IXGBE_READ_REG(hw, IXGBE_GOTCH);
7187 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
7188 IXGBE_READ_REG(hw, IXGBE_TORH);
7189 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7190 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
7191 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
7192#ifdef IXGBE_FCOE
7193 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
7194 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7195 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7196 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7197 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7198 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7199
7200 if (adapter->fcoe.ddp_pool) {
7201 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7202 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7203 unsigned int cpu;
7204 u64 noddp = 0, noddp_ext_buff = 0;
7205 for_each_possible_cpu(cpu) {
7206 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7207 noddp += ddp_pool->noddp;
7208 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7209 }
7210 hwstats->fcoe_noddp = noddp;
7211 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7212 }
7213#endif
7214 break;
7215 default:
7216 break;
7217 }
7218 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7219 hwstats->bprc += bprc;
7220 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7221 if (hw->mac.type == ixgbe_mac_82598EB)
7222 hwstats->mprc -= bprc;
7223 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7224 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7225 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7226 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7227 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7228 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7229 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7230 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7231 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7232 hwstats->lxontxc += lxon;
7233 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7234 hwstats->lxofftxc += lxoff;
7235 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7236 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7237
7238
7239
7240 xon_off_tot = lxon + lxoff;
7241 hwstats->gptc -= xon_off_tot;
7242 hwstats->mptc -= xon_off_tot;
7243 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7244 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7245 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7246 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7247 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7248 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7249 hwstats->ptc64 -= xon_off_tot;
7250 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7251 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7252 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7253 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7254 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7255 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7256
7257
7258 netdev->stats.multicast = hwstats->mprc;
7259
7260
7261 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7262 netdev->stats.rx_dropped = 0;
7263 netdev->stats.rx_length_errors = hwstats->rlec;
7264 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7265 netdev->stats.rx_missed_errors = total_mpc;
7266}
7267
7268
7269
7270
7271
7272static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7273{
7274 struct ixgbe_hw *hw = &adapter->hw;
7275 int i;
7276
7277 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7278 return;
7279
7280 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7281
7282
7283 if (test_bit(__IXGBE_DOWN, &adapter->state))
7284 return;
7285
7286
7287 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7288 return;
7289
7290 adapter->fdir_overflow++;
7291
7292 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7293 for (i = 0; i < adapter->num_tx_queues; i++)
7294 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7295 &(adapter->tx_ring[i]->state));
7296 for (i = 0; i < adapter->num_xdp_queues; i++)
7297 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7298 &adapter->xdp_ring[i]->state);
7299
7300 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7301 } else {
7302 e_err(probe, "failed to finish FDIR re-initialization, "
7303 "ignored adding FDIR ATR filters\n");
7304 }
7305}
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315
7316static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7317{
7318 struct ixgbe_hw *hw = &adapter->hw;
7319 u64 eics = 0;
7320 int i;
7321
7322
7323 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7324 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7325 test_bit(__IXGBE_RESETTING, &adapter->state))
7326 return;
7327
7328
7329 if (netif_carrier_ok(adapter->netdev)) {
7330 for (i = 0; i < adapter->num_tx_queues; i++)
7331 set_check_for_tx_hang(adapter->tx_ring[i]);
7332 for (i = 0; i < adapter->num_xdp_queues; i++)
7333 set_check_for_tx_hang(adapter->xdp_ring[i]);
7334 }
7335
7336 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7337
7338
7339
7340
7341
7342 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7343 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7344 } else {
7345
7346 for (i = 0; i < adapter->num_q_vectors; i++) {
7347 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7348 if (qv->rx.ring || qv->tx.ring)
7349 eics |= BIT_ULL(i);
7350 }
7351 }
7352
7353
7354 ixgbe_irq_rearm_queues(adapter, eics);
7355}
7356
7357
7358
7359
7360
7361static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7362{
7363 struct ixgbe_hw *hw = &adapter->hw;
7364 u32 link_speed = adapter->link_speed;
7365 bool link_up = adapter->link_up;
7366 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7367
7368 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7369 return;
7370
7371 if (hw->mac.ops.check_link) {
7372 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7373 } else {
7374
7375 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7376 link_up = true;
7377 }
7378
7379 if (adapter->ixgbe_ieee_pfc)
7380 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7381
7382 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7383 hw->mac.ops.fc_enable(hw);
7384 ixgbe_set_rx_drop_en(adapter);
7385 }
7386
7387 if (link_up ||
7388 time_after(jiffies, (adapter->link_check_timeout +
7389 IXGBE_TRY_LINK_TIMEOUT))) {
7390 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7391 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7392 IXGBE_WRITE_FLUSH(hw);
7393 }
7394
7395 adapter->link_up = link_up;
7396 adapter->link_speed = link_speed;
7397}
7398
7399static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7400{
7401#ifdef CONFIG_IXGBE_DCB
7402 struct net_device *netdev = adapter->netdev;
7403 struct dcb_app app = {
7404 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7405 .protocol = 0,
7406 };
7407 u8 up = 0;
7408
7409 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7410 up = dcb_ieee_getapp_mask(netdev, &app);
7411
7412 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7413#endif
7414}
7415
7416
7417
7418
7419
7420
7421static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7422{
7423 struct net_device *netdev = adapter->netdev;
7424 struct ixgbe_hw *hw = &adapter->hw;
7425 u32 link_speed = adapter->link_speed;
7426 const char *speed_str;
7427 bool flow_rx, flow_tx;
7428
7429
7430 if (netif_carrier_ok(netdev))
7431 return;
7432
7433 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7434
7435 switch (hw->mac.type) {
7436 case ixgbe_mac_82598EB: {
7437 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7438 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7439 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7440 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7441 }
7442 break;
7443 case ixgbe_mac_X540:
7444 case ixgbe_mac_X550:
7445 case ixgbe_mac_X550EM_x:
7446 case ixgbe_mac_x550em_a:
7447 case ixgbe_mac_82599EB: {
7448 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7449 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7450 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7451 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7452 }
7453 break;
7454 default:
7455 flow_tx = false;
7456 flow_rx = false;
7457 break;
7458 }
7459
7460 adapter->last_rx_ptp_check = jiffies;
7461
7462 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7463 ixgbe_ptp_start_cyclecounter(adapter);
7464
7465 switch (link_speed) {
7466 case IXGBE_LINK_SPEED_10GB_FULL:
7467 speed_str = "10 Gbps";
7468 break;
7469 case IXGBE_LINK_SPEED_5GB_FULL:
7470 speed_str = "5 Gbps";
7471 break;
7472 case IXGBE_LINK_SPEED_2_5GB_FULL:
7473 speed_str = "2.5 Gbps";
7474 break;
7475 case IXGBE_LINK_SPEED_1GB_FULL:
7476 speed_str = "1 Gbps";
7477 break;
7478 case IXGBE_LINK_SPEED_100_FULL:
7479 speed_str = "100 Mbps";
7480 break;
7481 case IXGBE_LINK_SPEED_10_FULL:
7482 speed_str = "10 Mbps";
7483 break;
7484 default:
7485 speed_str = "unknown speed";
7486 break;
7487 }
7488 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7489 ((flow_rx && flow_tx) ? "RX/TX" :
7490 (flow_rx ? "RX" :
7491 (flow_tx ? "TX" : "None"))));
7492
7493 netif_carrier_on(netdev);
7494 ixgbe_check_vf_rate_limit(adapter);
7495
7496
7497 netif_tx_wake_all_queues(adapter->netdev);
7498
7499
7500 ixgbe_update_default_up(adapter);
7501
7502
7503 ixgbe_ping_all_vfs(adapter);
7504}
7505
7506
7507
7508
7509
7510
7511static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7512{
7513 struct net_device *netdev = adapter->netdev;
7514 struct ixgbe_hw *hw = &adapter->hw;
7515
7516 adapter->link_up = false;
7517 adapter->link_speed = 0;
7518
7519
7520 if (!netif_carrier_ok(netdev))
7521 return;
7522
7523
7524 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7525 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7526
7527 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7528 ixgbe_ptp_start_cyclecounter(adapter);
7529
7530 e_info(drv, "NIC Link is Down\n");
7531 netif_carrier_off(netdev);
7532
7533
7534 ixgbe_ping_all_vfs(adapter);
7535}
7536
7537static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7538{
7539 int i;
7540
7541 for (i = 0; i < adapter->num_tx_queues; i++) {
7542 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7543
7544 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7545 return true;
7546 }
7547
7548 for (i = 0; i < adapter->num_xdp_queues; i++) {
7549 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7550
7551 if (ring->next_to_use != ring->next_to_clean)
7552 return true;
7553 }
7554
7555 return false;
7556}
7557
7558static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7559{
7560 struct ixgbe_hw *hw = &adapter->hw;
7561 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7562 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7563
7564 int i, j;
7565
7566 if (!adapter->num_vfs)
7567 return false;
7568
7569
7570 if (hw->mac.type >= ixgbe_mac_X550)
7571 return false;
7572
7573 for (i = 0; i < adapter->num_vfs; i++) {
7574 for (j = 0; j < q_per_pool; j++) {
7575 u32 h, t;
7576
7577 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7578 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7579
7580 if (h != t)
7581 return true;
7582 }
7583 }
7584
7585 return false;
7586}
7587
7588
7589
7590
7591
7592static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7593{
7594 if (!netif_carrier_ok(adapter->netdev)) {
7595 if (ixgbe_ring_tx_pending(adapter) ||
7596 ixgbe_vf_tx_pending(adapter)) {
7597
7598
7599
7600
7601
7602 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7603 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7604 }
7605 }
7606}
7607
7608#ifdef CONFIG_PCI_IOV
7609static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7610{
7611 struct ixgbe_hw *hw = &adapter->hw;
7612 struct pci_dev *pdev = adapter->pdev;
7613 unsigned int vf;
7614 u32 gpc;
7615
7616 if (!(netif_carrier_ok(adapter->netdev)))
7617 return;
7618
7619 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7620 if (gpc)
7621 return;
7622
7623
7624
7625
7626
7627
7628 if (!pdev)
7629 return;
7630
7631
7632 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7633 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7634 u16 status_reg;
7635
7636 if (!vfdev)
7637 continue;
7638 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7639 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7640 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7641 pcie_flr(vfdev);
7642 }
7643}
7644
7645static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7646{
7647 u32 ssvpc;
7648
7649
7650 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7651 adapter->num_vfs == 0)
7652 return;
7653
7654 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7655
7656
7657
7658
7659
7660 if (!ssvpc)
7661 return;
7662
7663 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7664}
7665#else
7666static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7667{
7668}
7669
7670static void
7671ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7672{
7673}
7674#endif
7675
7676
7677
7678
7679
7680
7681static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7682{
7683
7684 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7685 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7686 test_bit(__IXGBE_RESETTING, &adapter->state))
7687 return;
7688
7689 ixgbe_watchdog_update_link(adapter);
7690
7691 if (adapter->link_up)
7692 ixgbe_watchdog_link_is_up(adapter);
7693 else
7694 ixgbe_watchdog_link_is_down(adapter);
7695
7696 ixgbe_check_for_bad_vf(adapter);
7697 ixgbe_spoof_check(adapter);
7698 ixgbe_update_stats(adapter);
7699
7700 ixgbe_watchdog_flush_tx(adapter);
7701}
7702
7703
7704
7705
7706
7707static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7708{
7709 struct ixgbe_hw *hw = &adapter->hw;
7710 s32 err;
7711
7712
7713 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7714 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7715 return;
7716
7717 if (adapter->sfp_poll_time &&
7718 time_after(adapter->sfp_poll_time, jiffies))
7719 return;
7720
7721
7722 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7723 return;
7724
7725 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7726
7727 err = hw->phy.ops.identify_sfp(hw);
7728 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7729 goto sfp_out;
7730
7731 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7732
7733
7734 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7735 }
7736
7737
7738 if (err)
7739 goto sfp_out;
7740
7741
7742 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7743 goto sfp_out;
7744
7745 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7746
7747
7748
7749
7750
7751
7752 if (hw->mac.type == ixgbe_mac_82598EB)
7753 err = hw->phy.ops.reset(hw);
7754 else
7755 err = hw->mac.ops.setup_sfp(hw);
7756
7757 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7758 goto sfp_out;
7759
7760 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7761 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7762
7763sfp_out:
7764 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7765
7766 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7767 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7768 e_dev_err("failed to initialize because an unsupported "
7769 "SFP+ module type was detected.\n");
7770 e_dev_err("Reload the driver after installing a "
7771 "supported module.\n");
7772 unregister_netdev(adapter->netdev);
7773 }
7774}
7775
7776
7777
7778
7779
7780static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7781{
7782 struct ixgbe_hw *hw = &adapter->hw;
7783 u32 cap_speed;
7784 u32 speed;
7785 bool autoneg = false;
7786
7787 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7788 return;
7789
7790
7791 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7792 return;
7793
7794 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7795
7796 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7797
7798
7799 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7800 speed = IXGBE_LINK_SPEED_10GB_FULL;
7801 else
7802 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7803 IXGBE_LINK_SPEED_1GB_FULL);
7804
7805 if (hw->mac.ops.setup_link)
7806 hw->mac.ops.setup_link(hw, speed, true);
7807
7808 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7809 adapter->link_check_timeout = jiffies;
7810 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7811}
7812
7813
7814
7815
7816
7817static void ixgbe_service_timer(struct timer_list *t)
7818{
7819 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7820 unsigned long next_event_offset;
7821
7822
7823 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7824 next_event_offset = HZ / 10;
7825 else
7826 next_event_offset = HZ * 2;
7827
7828
7829 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7830
7831 ixgbe_service_event_schedule(adapter);
7832}
7833
7834static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7835{
7836 struct ixgbe_hw *hw = &adapter->hw;
7837 u32 status;
7838
7839 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7840 return;
7841
7842 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7843
7844 if (!hw->phy.ops.handle_lasi)
7845 return;
7846
7847 status = hw->phy.ops.handle_lasi(&adapter->hw);
7848 if (status != IXGBE_ERR_OVERTEMP)
7849 return;
7850
7851 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7852}
7853
7854static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7855{
7856 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7857 return;
7858
7859 rtnl_lock();
7860
7861 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7862 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7863 test_bit(__IXGBE_RESETTING, &adapter->state)) {
7864 rtnl_unlock();
7865 return;
7866 }
7867
7868 ixgbe_dump(adapter);
7869 netdev_err(adapter->netdev, "Reset adapter\n");
7870 adapter->tx_timeout_count++;
7871
7872 ixgbe_reinit_locked(adapter);
7873 rtnl_unlock();
7874}
7875
7876
7877
7878
7879
7880
7881
7882static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
7883{
7884 struct ixgbe_hw *hw = &adapter->hw;
7885 u32 fwsm;
7886
7887
7888 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
7889
7890 if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
7891 !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
7892 e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
7893 fwsm);
7894
7895 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
7896 e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
7897 return true;
7898 }
7899
7900 return false;
7901}
7902
7903
7904
7905
7906
7907static void ixgbe_service_task(struct work_struct *work)
7908{
7909 struct ixgbe_adapter *adapter = container_of(work,
7910 struct ixgbe_adapter,
7911 service_task);
7912 if (ixgbe_removed(adapter->hw.hw_addr)) {
7913 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7914 rtnl_lock();
7915 ixgbe_down(adapter);
7916 rtnl_unlock();
7917 }
7918 ixgbe_service_event_complete(adapter);
7919 return;
7920 }
7921 if (ixgbe_check_fw_error(adapter)) {
7922 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7923 unregister_netdev(adapter->netdev);
7924 ixgbe_service_event_complete(adapter);
7925 return;
7926 }
7927 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7928 rtnl_lock();
7929 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7930 udp_tunnel_get_rx_info(adapter->netdev);
7931 rtnl_unlock();
7932 }
7933 ixgbe_reset_subtask(adapter);
7934 ixgbe_phy_interrupt_subtask(adapter);
7935 ixgbe_sfp_detection_subtask(adapter);
7936 ixgbe_sfp_link_config_subtask(adapter);
7937 ixgbe_check_overtemp_subtask(adapter);
7938 ixgbe_watchdog_subtask(adapter);
7939 ixgbe_fdir_reinit_subtask(adapter);
7940 ixgbe_check_hang_subtask(adapter);
7941
7942 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7943 ixgbe_ptp_overflow_check(adapter);
7944 if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
7945 ixgbe_ptp_rx_hang(adapter);
7946 ixgbe_ptp_tx_hang(adapter);
7947 }
7948
7949 ixgbe_service_event_complete(adapter);
7950}
7951
7952static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7953 struct ixgbe_tx_buffer *first,
7954 u8 *hdr_len,
7955 struct ixgbe_ipsec_tx_data *itd)
7956{
7957 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7958 struct sk_buff *skb = first->skb;
7959 union {
7960 struct iphdr *v4;
7961 struct ipv6hdr *v6;
7962 unsigned char *hdr;
7963 } ip;
7964 union {
7965 struct tcphdr *tcp;
7966 struct udphdr *udp;
7967 unsigned char *hdr;
7968 } l4;
7969 u32 paylen, l4_offset;
7970 u32 fceof_saidx = 0;
7971 int err;
7972
7973 if (skb->ip_summed != CHECKSUM_PARTIAL)
7974 return 0;
7975
7976 if (!skb_is_gso(skb))
7977 return 0;
7978
7979 err = skb_cow_head(skb, 0);
7980 if (err < 0)
7981 return err;
7982
7983 if (eth_p_mpls(first->protocol))
7984 ip.hdr = skb_inner_network_header(skb);
7985 else
7986 ip.hdr = skb_network_header(skb);
7987 l4.hdr = skb_checksum_start(skb);
7988
7989
7990 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
7991 IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
7992
7993
7994 if (ip.v4->version == 4) {
7995 unsigned char *csum_start = skb_checksum_start(skb);
7996 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7997 int len = csum_start - trans_start;
7998
7999
8000
8001
8002
8003 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
8004 csum_fold(csum_partial(trans_start,
8005 len, 0)) : 0;
8006 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
8007
8008 ip.v4->tot_len = 0;
8009 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8010 IXGBE_TX_FLAGS_CSUM |
8011 IXGBE_TX_FLAGS_IPV4;
8012 } else {
8013 ip.v6->payload_len = 0;
8014 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
8015 IXGBE_TX_FLAGS_CSUM;
8016 }
8017
8018
8019 l4_offset = l4.hdr - skb->data;
8020
8021
8022 paylen = skb->len - l4_offset;
8023
8024 if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
8025
8026 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
8027 csum_replace_by_diff(&l4.tcp->check,
8028 (__force __wsum)htonl(paylen));
8029 } else {
8030
8031 *hdr_len = sizeof(*l4.udp) + l4_offset;
8032 csum_replace_by_diff(&l4.udp->check,
8033 (__force __wsum)htonl(paylen));
8034 }
8035
8036
8037 first->gso_segs = skb_shinfo(skb)->gso_segs;
8038 first->bytecount += (first->gso_segs - 1) * *hdr_len;
8039
8040
8041 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
8042 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
8043
8044 fceof_saidx |= itd->sa_idx;
8045 type_tucmd |= itd->flags | itd->trailer_len;
8046
8047
8048 vlan_macip_lens = l4.hdr - ip.hdr;
8049 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
8050 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8051
8052 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
8053 mss_l4len_idx);
8054
8055 return 1;
8056}
8057
8058static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
8059{
8060 unsigned int offset = 0;
8061
8062 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
8063
8064 return offset == skb_checksum_start_offset(skb);
8065}
8066
8067static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
8068 struct ixgbe_tx_buffer *first,
8069 struct ixgbe_ipsec_tx_data *itd)
8070{
8071 struct sk_buff *skb = first->skb;
8072 u32 vlan_macip_lens = 0;
8073 u32 fceof_saidx = 0;
8074 u32 type_tucmd = 0;
8075
8076 if (skb->ip_summed != CHECKSUM_PARTIAL) {
8077csum_failed:
8078 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
8079 IXGBE_TX_FLAGS_CC)))
8080 return;
8081 goto no_csum;
8082 }
8083
8084 switch (skb->csum_offset) {
8085 case offsetof(struct tcphdr, check):
8086 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
8087
8088 case offsetof(struct udphdr, check):
8089 break;
8090 case offsetof(struct sctphdr, checksum):
8091
8092 if (((first->protocol == htons(ETH_P_IP)) &&
8093 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
8094 ((first->protocol == htons(ETH_P_IPV6)) &&
8095 ixgbe_ipv6_csum_is_sctp(skb))) {
8096 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
8097 break;
8098 }
8099
8100 default:
8101 skb_checksum_help(skb);
8102 goto csum_failed;
8103 }
8104
8105
8106 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8107 vlan_macip_lens = skb_checksum_start_offset(skb) -
8108 skb_network_offset(skb);
8109no_csum:
8110
8111 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
8112 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
8113
8114 fceof_saidx |= itd->sa_idx;
8115 type_tucmd |= itd->flags | itd->trailer_len;
8116
8117 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
8118}
8119
8120#define IXGBE_SET_FLAG(_input, _flag, _result) \
8121 ((_flag <= _result) ? \
8122 ((u32)(_input & _flag) * (_result / _flag)) : \
8123 ((u32)(_input & _flag) / (_flag / _result)))
8124
8125static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
8126{
8127
8128 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8129 IXGBE_ADVTXD_DCMD_DEXT |
8130 IXGBE_ADVTXD_DCMD_IFCS;
8131
8132
8133 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
8134 IXGBE_ADVTXD_DCMD_VLE);
8135
8136
8137 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
8138 IXGBE_ADVTXD_DCMD_TSE);
8139
8140
8141 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
8142 IXGBE_ADVTXD_MAC_TSTAMP);
8143
8144
8145 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
8146
8147 return cmd_type;
8148}
8149
8150static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
8151 u32 tx_flags, unsigned int paylen)
8152{
8153 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
8154
8155
8156 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8157 IXGBE_TX_FLAGS_CSUM,
8158 IXGBE_ADVTXD_POPTS_TXSM);
8159
8160
8161 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8162 IXGBE_TX_FLAGS_IPV4,
8163 IXGBE_ADVTXD_POPTS_IXSM);
8164
8165
8166 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8167 IXGBE_TX_FLAGS_IPSEC,
8168 IXGBE_ADVTXD_POPTS_IPSEC);
8169
8170
8171
8172
8173
8174 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
8175 IXGBE_TX_FLAGS_CC,
8176 IXGBE_ADVTXD_CC);
8177
8178 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
8179}
8180
8181static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8182{
8183 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
8184
8185
8186
8187
8188
8189 smp_mb();
8190
8191
8192
8193
8194 if (likely(ixgbe_desc_unused(tx_ring) < size))
8195 return -EBUSY;
8196
8197
8198 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
8199 ++tx_ring->tx_stats.restart_queue;
8200 return 0;
8201}
8202
8203static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
8204{
8205 if (likely(ixgbe_desc_unused(tx_ring) >= size))
8206 return 0;
8207
8208 return __ixgbe_maybe_stop_tx(tx_ring, size);
8209}
8210
8211static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8212 struct ixgbe_tx_buffer *first,
8213 const u8 hdr_len)
8214{
8215 struct sk_buff *skb = first->skb;
8216 struct ixgbe_tx_buffer *tx_buffer;
8217 union ixgbe_adv_tx_desc *tx_desc;
8218 skb_frag_t *frag;
8219 dma_addr_t dma;
8220 unsigned int data_len, size;
8221 u32 tx_flags = first->tx_flags;
8222 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
8223 u16 i = tx_ring->next_to_use;
8224
8225 tx_desc = IXGBE_TX_DESC(tx_ring, i);
8226
8227 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
8228
8229 size = skb_headlen(skb);
8230 data_len = skb->data_len;
8231
8232#ifdef IXGBE_FCOE
8233 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
8234 if (data_len < sizeof(struct fcoe_crc_eof)) {
8235 size -= sizeof(struct fcoe_crc_eof) - data_len;
8236 data_len = 0;
8237 } else {
8238 data_len -= sizeof(struct fcoe_crc_eof);
8239 }
8240 }
8241
8242#endif
8243 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8244
8245 tx_buffer = first;
8246
8247 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8248 if (dma_mapping_error(tx_ring->dev, dma))
8249 goto dma_error;
8250
8251
8252 dma_unmap_len_set(tx_buffer, len, size);
8253 dma_unmap_addr_set(tx_buffer, dma, dma);
8254
8255 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8256
8257 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8258 tx_desc->read.cmd_type_len =
8259 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8260
8261 i++;
8262 tx_desc++;
8263 if (i == tx_ring->count) {
8264 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8265 i = 0;
8266 }
8267 tx_desc->read.olinfo_status = 0;
8268
8269 dma += IXGBE_MAX_DATA_PER_TXD;
8270 size -= IXGBE_MAX_DATA_PER_TXD;
8271
8272 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8273 }
8274
8275 if (likely(!data_len))
8276 break;
8277
8278 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8279
8280 i++;
8281 tx_desc++;
8282 if (i == tx_ring->count) {
8283 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8284 i = 0;
8285 }
8286 tx_desc->read.olinfo_status = 0;
8287
8288#ifdef IXGBE_FCOE
8289 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8290#else
8291 size = skb_frag_size(frag);
8292#endif
8293 data_len -= size;
8294
8295 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8296 DMA_TO_DEVICE);
8297
8298 tx_buffer = &tx_ring->tx_buffer_info[i];
8299 }
8300
8301
8302 cmd_type |= size | IXGBE_TXD_CMD;
8303 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8304
8305 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8306
8307
8308 first->time_stamp = jiffies;
8309
8310 skb_tx_timestamp(skb);
8311
8312
8313
8314
8315
8316
8317
8318
8319
8320 wmb();
8321
8322
8323 first->next_to_watch = tx_desc;
8324
8325 i++;
8326 if (i == tx_ring->count)
8327 i = 0;
8328
8329 tx_ring->next_to_use = i;
8330
8331 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8332
8333 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
8334 writel(i, tx_ring->tail);
8335 }
8336
8337 return 0;
8338dma_error:
8339 dev_err(tx_ring->dev, "TX DMA map failed\n");
8340
8341
8342 for (;;) {
8343 tx_buffer = &tx_ring->tx_buffer_info[i];
8344 if (dma_unmap_len(tx_buffer, len))
8345 dma_unmap_page(tx_ring->dev,
8346 dma_unmap_addr(tx_buffer, dma),
8347 dma_unmap_len(tx_buffer, len),
8348 DMA_TO_DEVICE);
8349 dma_unmap_len_set(tx_buffer, len, 0);
8350 if (tx_buffer == first)
8351 break;
8352 if (i == 0)
8353 i += tx_ring->count;
8354 i--;
8355 }
8356
8357 dev_kfree_skb_any(first->skb);
8358 first->skb = NULL;
8359
8360 tx_ring->next_to_use = i;
8361
8362 return -1;
8363}
8364
8365static void ixgbe_atr(struct ixgbe_ring *ring,
8366 struct ixgbe_tx_buffer *first)
8367{
8368 struct ixgbe_q_vector *q_vector = ring->q_vector;
8369 union ixgbe_atr_hash_dword input = { .dword = 0 };
8370 union ixgbe_atr_hash_dword common = { .dword = 0 };
8371 union {
8372 unsigned char *network;
8373 struct iphdr *ipv4;
8374 struct ipv6hdr *ipv6;
8375 } hdr;
8376 struct tcphdr *th;
8377 unsigned int hlen;
8378 struct sk_buff *skb;
8379 __be16 vlan_id;
8380 int l4_proto;
8381
8382
8383 if (!q_vector)
8384 return;
8385
8386
8387 if (!ring->atr_sample_rate)
8388 return;
8389
8390 ring->atr_count++;
8391
8392
8393 if ((first->protocol != htons(ETH_P_IP)) &&
8394 (first->protocol != htons(ETH_P_IPV6)))
8395 return;
8396
8397
8398 skb = first->skb;
8399 hdr.network = skb_network_header(skb);
8400 if (unlikely(hdr.network <= skb->data))
8401 return;
8402 if (skb->encapsulation &&
8403 first->protocol == htons(ETH_P_IP) &&
8404 hdr.ipv4->protocol == IPPROTO_UDP) {
8405 struct ixgbe_adapter *adapter = q_vector->adapter;
8406
8407 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8408 VXLAN_HEADROOM))
8409 return;
8410
8411
8412 if (adapter->vxlan_port &&
8413 udp_hdr(skb)->dest == adapter->vxlan_port)
8414 hdr.network = skb_inner_network_header(skb);
8415
8416 if (adapter->geneve_port &&
8417 udp_hdr(skb)->dest == adapter->geneve_port)
8418 hdr.network = skb_inner_network_header(skb);
8419 }
8420
8421
8422
8423
8424 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8425 return;
8426
8427
8428 switch (hdr.ipv4->version) {
8429 case IPVERSION:
8430
8431 hlen = (hdr.network[0] & 0x0F) << 2;
8432 l4_proto = hdr.ipv4->protocol;
8433 break;
8434 case 6:
8435 hlen = hdr.network - skb->data;
8436 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8437 hlen -= hdr.network - skb->data;
8438 break;
8439 default:
8440 return;
8441 }
8442
8443 if (l4_proto != IPPROTO_TCP)
8444 return;
8445
8446 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8447 hlen + sizeof(struct tcphdr)))
8448 return;
8449
8450 th = (struct tcphdr *)(hdr.network + hlen);
8451
8452
8453 if (th->fin)
8454 return;
8455
8456
8457 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8458 return;
8459
8460
8461 ring->atr_count = 0;
8462
8463 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8464
8465
8466
8467
8468
8469
8470
8471
8472 input.formatted.vlan_id = vlan_id;
8473
8474
8475
8476
8477
8478 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8479 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8480 else
8481 common.port.src ^= th->dest ^ first->protocol;
8482 common.port.dst ^= th->source;
8483
8484 switch (hdr.ipv4->version) {
8485 case IPVERSION:
8486 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8487 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8488 break;
8489 case 6:
8490 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8491 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8492 hdr.ipv6->saddr.s6_addr32[1] ^
8493 hdr.ipv6->saddr.s6_addr32[2] ^
8494 hdr.ipv6->saddr.s6_addr32[3] ^
8495 hdr.ipv6->daddr.s6_addr32[0] ^
8496 hdr.ipv6->daddr.s6_addr32[1] ^
8497 hdr.ipv6->daddr.s6_addr32[2] ^
8498 hdr.ipv6->daddr.s6_addr32[3];
8499 break;
8500 default:
8501 break;
8502 }
8503
8504 if (hdr.network != skb_network_header(skb))
8505 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8506
8507
8508 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8509 input, common, ring->queue_index);
8510}
8511
8512#ifdef IXGBE_FCOE
8513static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8514 struct net_device *sb_dev)
8515{
8516 struct ixgbe_adapter *adapter;
8517 struct ixgbe_ring_feature *f;
8518 int txq;
8519
8520 if (sb_dev) {
8521 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
8522 struct net_device *vdev = sb_dev;
8523
8524 txq = vdev->tc_to_txq[tc].offset;
8525 txq += reciprocal_scale(skb_get_hash(skb),
8526 vdev->tc_to_txq[tc].count);
8527
8528 return txq;
8529 }
8530
8531
8532
8533
8534
8535 switch (vlan_get_protocol(skb)) {
8536 case htons(ETH_P_FCOE):
8537 case htons(ETH_P_FIP):
8538 adapter = netdev_priv(dev);
8539
8540 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8541 break;
8542
8543 default:
8544 return netdev_pick_tx(dev, skb, sb_dev);
8545 }
8546
8547 f = &adapter->ring_feature[RING_F_FCOE];
8548
8549 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8550 smp_processor_id();
8551
8552 while (txq >= f->indices)
8553 txq -= f->indices;
8554
8555 return txq + f->offset;
8556}
8557
8558#endif
8559int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8560 struct xdp_frame *xdpf)
8561{
8562 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8563 struct ixgbe_tx_buffer *tx_buffer;
8564 union ixgbe_adv_tx_desc *tx_desc;
8565 u32 len, cmd_type;
8566 dma_addr_t dma;
8567 u16 i;
8568
8569 len = xdpf->len;
8570
8571 if (unlikely(!ixgbe_desc_unused(ring)))
8572 return IXGBE_XDP_CONSUMED;
8573
8574 dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
8575 if (dma_mapping_error(ring->dev, dma))
8576 return IXGBE_XDP_CONSUMED;
8577
8578
8579 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8580 tx_buffer->bytecount = len;
8581 tx_buffer->gso_segs = 1;
8582 tx_buffer->protocol = 0;
8583
8584 i = ring->next_to_use;
8585 tx_desc = IXGBE_TX_DESC(ring, i);
8586
8587 dma_unmap_len_set(tx_buffer, len, len);
8588 dma_unmap_addr_set(tx_buffer, dma, dma);
8589 tx_buffer->xdpf = xdpf;
8590
8591 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8592
8593
8594 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8595 IXGBE_ADVTXD_DCMD_DEXT |
8596 IXGBE_ADVTXD_DCMD_IFCS;
8597 cmd_type |= len | IXGBE_TXD_CMD;
8598 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8599 tx_desc->read.olinfo_status =
8600 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8601
8602
8603 smp_wmb();
8604
8605
8606 i++;
8607 if (i == ring->count)
8608 i = 0;
8609
8610 tx_buffer->next_to_watch = tx_desc;
8611 ring->next_to_use = i;
8612
8613 return IXGBE_XDP_TX;
8614}
8615
8616netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8617 struct ixgbe_adapter *adapter,
8618 struct ixgbe_ring *tx_ring)
8619{
8620 struct ixgbe_tx_buffer *first;
8621 int tso;
8622 u32 tx_flags = 0;
8623 unsigned short f;
8624 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8625 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8626 __be16 protocol = skb->protocol;
8627 u8 hdr_len = 0;
8628
8629
8630
8631
8632
8633
8634
8635
8636 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8637 count += TXD_USE_COUNT(skb_frag_size(
8638 &skb_shinfo(skb)->frags[f]));
8639
8640 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8641 tx_ring->tx_stats.tx_busy++;
8642 return NETDEV_TX_BUSY;
8643 }
8644
8645
8646 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8647 first->skb = skb;
8648 first->bytecount = skb->len;
8649 first->gso_segs = 1;
8650
8651
8652 if (skb_vlan_tag_present(skb)) {
8653 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8654 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8655
8656 } else if (protocol == htons(ETH_P_8021Q)) {
8657 struct vlan_hdr *vhdr, _vhdr;
8658 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8659 if (!vhdr)
8660 goto out_drop;
8661
8662 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8663 IXGBE_TX_FLAGS_VLAN_SHIFT;
8664 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8665 }
8666 protocol = vlan_get_protocol(skb);
8667
8668 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8669 adapter->ptp_clock) {
8670 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
8671 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8672 &adapter->state)) {
8673 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8674 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8675
8676
8677 adapter->ptp_tx_skb = skb_get(skb);
8678 adapter->ptp_tx_start = jiffies;
8679 schedule_work(&adapter->ptp_tx_work);
8680 } else {
8681 adapter->tx_hwtstamp_skipped++;
8682 }
8683 }
8684
8685#ifdef CONFIG_PCI_IOV
8686
8687
8688
8689
8690 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8691 tx_flags |= IXGBE_TX_FLAGS_CC;
8692
8693#endif
8694
8695 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8696 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8697 (skb->priority != TC_PRIO_CONTROL))) {
8698 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8699 tx_flags |= (skb->priority & 0x7) <<
8700 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8701 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8702 struct vlan_ethhdr *vhdr;
8703
8704 if (skb_cow_head(skb, 0))
8705 goto out_drop;
8706 vhdr = (struct vlan_ethhdr *)skb->data;
8707 vhdr->h_vlan_TCI = htons(tx_flags >>
8708 IXGBE_TX_FLAGS_VLAN_SHIFT);
8709 } else {
8710 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8711 }
8712 }
8713
8714
8715 first->tx_flags = tx_flags;
8716 first->protocol = protocol;
8717
8718#ifdef IXGBE_FCOE
8719
8720 if ((protocol == htons(ETH_P_FCOE)) &&
8721 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8722 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8723 if (tso < 0)
8724 goto out_drop;
8725
8726 goto xmit_fcoe;
8727 }
8728
8729#endif
8730
8731#ifdef CONFIG_IXGBE_IPSEC
8732 if (xfrm_offload(skb) &&
8733 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8734 goto out_drop;
8735#endif
8736 tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
8737 if (tso < 0)
8738 goto out_drop;
8739 else if (!tso)
8740 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8741
8742
8743 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8744 ixgbe_atr(tx_ring, first);
8745
8746#ifdef IXGBE_FCOE
8747xmit_fcoe:
8748#endif
8749 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8750 goto cleanup_tx_timestamp;
8751
8752 return NETDEV_TX_OK;
8753
8754out_drop:
8755 dev_kfree_skb_any(first->skb);
8756 first->skb = NULL;
8757cleanup_tx_timestamp:
8758 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8759 dev_kfree_skb_any(adapter->ptp_tx_skb);
8760 adapter->ptp_tx_skb = NULL;
8761 cancel_work_sync(&adapter->ptp_tx_work);
8762 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8763 }
8764
8765 return NETDEV_TX_OK;
8766}
8767
8768static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8769 struct net_device *netdev,
8770 struct ixgbe_ring *ring)
8771{
8772 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8773 struct ixgbe_ring *tx_ring;
8774
8775
8776
8777
8778
8779 if (skb_put_padto(skb, 17))
8780 return NETDEV_TX_OK;
8781
8782 tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
8783 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
8784 return NETDEV_TX_BUSY;
8785
8786 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8787}
8788
8789static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8790 struct net_device *netdev)
8791{
8792 return __ixgbe_xmit_frame(skb, netdev, NULL);
8793}
8794
8795
8796
8797
8798
8799
8800
8801
8802static int ixgbe_set_mac(struct net_device *netdev, void *p)
8803{
8804 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8805 struct ixgbe_hw *hw = &adapter->hw;
8806 struct sockaddr *addr = p;
8807
8808 if (!is_valid_ether_addr(addr->sa_data))
8809 return -EADDRNOTAVAIL;
8810
8811 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8812 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8813
8814 ixgbe_mac_set_default_filter(adapter);
8815
8816 return 0;
8817}
8818
8819static int
8820ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8821{
8822 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8823 struct ixgbe_hw *hw = &adapter->hw;
8824 u16 value;
8825 int rc;
8826
8827 if (adapter->mii_bus) {
8828 int regnum = addr;
8829
8830 if (devad != MDIO_DEVAD_NONE)
8831 regnum |= (devad << 16) | MII_ADDR_C45;
8832
8833 return mdiobus_read(adapter->mii_bus, prtad, regnum);
8834 }
8835
8836 if (prtad != hw->phy.mdio.prtad)
8837 return -EINVAL;
8838 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8839 if (!rc)
8840 rc = value;
8841 return rc;
8842}
8843
8844static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8845 u16 addr, u16 value)
8846{
8847 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8848 struct ixgbe_hw *hw = &adapter->hw;
8849
8850 if (adapter->mii_bus) {
8851 int regnum = addr;
8852
8853 if (devad != MDIO_DEVAD_NONE)
8854 regnum |= (devad << 16) | MII_ADDR_C45;
8855
8856 return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
8857 }
8858
8859 if (prtad != hw->phy.mdio.prtad)
8860 return -EINVAL;
8861 return hw->phy.ops.write_reg(hw, addr, devad, value);
8862}
8863
8864static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8865{
8866 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8867
8868 switch (cmd) {
8869 case SIOCSHWTSTAMP:
8870 return ixgbe_ptp_set_ts_config(adapter, req);
8871 case SIOCGHWTSTAMP:
8872 return ixgbe_ptp_get_ts_config(adapter, req);
8873 case SIOCGMIIPHY:
8874 if (!adapter->hw.phy.ops.read_reg)
8875 return -EOPNOTSUPP;
8876
8877 default:
8878 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8879 }
8880}
8881
8882
8883
8884
8885
8886
8887
8888
8889static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8890{
8891 int err = 0;
8892 struct ixgbe_adapter *adapter = netdev_priv(dev);
8893 struct ixgbe_hw *hw = &adapter->hw;
8894
8895 if (is_valid_ether_addr(hw->mac.san_addr)) {
8896 rtnl_lock();
8897 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8898 rtnl_unlock();
8899
8900
8901 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8902 }
8903 return err;
8904}
8905
8906
8907
8908
8909
8910
8911
8912
8913static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8914{
8915 int err = 0;
8916 struct ixgbe_adapter *adapter = netdev_priv(dev);
8917 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8918
8919 if (is_valid_ether_addr(mac->san_addr)) {
8920 rtnl_lock();
8921 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8922 rtnl_unlock();
8923 }
8924 return err;
8925}
8926
8927static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8928 struct ixgbe_ring *ring)
8929{
8930 u64 bytes, packets;
8931 unsigned int start;
8932
8933 if (ring) {
8934 do {
8935 start = u64_stats_fetch_begin_irq(&ring->syncp);
8936 packets = ring->stats.packets;
8937 bytes = ring->stats.bytes;
8938 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8939 stats->tx_packets += packets;
8940 stats->tx_bytes += bytes;
8941 }
8942}
8943
8944static void ixgbe_get_stats64(struct net_device *netdev,
8945 struct rtnl_link_stats64 *stats)
8946{
8947 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8948 int i;
8949
8950 rcu_read_lock();
8951 for (i = 0; i < adapter->num_rx_queues; i++) {
8952 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8953 u64 bytes, packets;
8954 unsigned int start;
8955
8956 if (ring) {
8957 do {
8958 start = u64_stats_fetch_begin_irq(&ring->syncp);
8959 packets = ring->stats.packets;
8960 bytes = ring->stats.bytes;
8961 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8962 stats->rx_packets += packets;
8963 stats->rx_bytes += bytes;
8964 }
8965 }
8966
8967 for (i = 0; i < adapter->num_tx_queues; i++) {
8968 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8969
8970 ixgbe_get_ring_stats64(stats, ring);
8971 }
8972 for (i = 0; i < adapter->num_xdp_queues; i++) {
8973 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8974
8975 ixgbe_get_ring_stats64(stats, ring);
8976 }
8977 rcu_read_unlock();
8978
8979
8980 stats->multicast = netdev->stats.multicast;
8981 stats->rx_errors = netdev->stats.rx_errors;
8982 stats->rx_length_errors = netdev->stats.rx_length_errors;
8983 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8984 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8985}
8986
8987#ifdef CONFIG_IXGBE_DCB
8988
8989
8990
8991
8992
8993
8994
8995
8996static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8997{
8998 struct ixgbe_hw *hw = &adapter->hw;
8999 u32 reg, rsave;
9000 int i;
9001
9002
9003
9004
9005 if (hw->mac.type == ixgbe_mac_82598EB)
9006 return;
9007
9008 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
9009 rsave = reg;
9010
9011 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
9012 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
9013
9014
9015 if (up2tc > tc)
9016 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
9017 }
9018
9019 if (reg != rsave)
9020 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
9021
9022 return;
9023}
9024
9025
9026
9027
9028
9029
9030
9031static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
9032{
9033 struct net_device *dev = adapter->netdev;
9034 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
9035 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
9036 u8 prio;
9037
9038 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
9039 u8 tc = 0;
9040
9041 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
9042 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
9043 else if (ets)
9044 tc = ets->prio_tc[prio];
9045
9046 netdev_set_prio_tc_map(dev, prio, tc);
9047 }
9048}
9049
9050#endif
9051static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
9052{
9053 struct ixgbe_adapter *adapter = data;
9054 struct ixgbe_fwd_adapter *accel;
9055 int pool;
9056
9057
9058 if (!netif_is_macvlan(vdev))
9059 return 0;
9060
9061
9062 accel = macvlan_accel_priv(vdev);
9063 if (!accel)
9064 return 0;
9065
9066
9067 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9068 if (pool < adapter->num_rx_pools) {
9069 set_bit(pool, adapter->fwd_bitmask);
9070 accel->pool = pool;
9071 return 0;
9072 }
9073
9074
9075 netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
9076 macvlan_release_l2fw_offload(vdev);
9077
9078
9079 netdev_unbind_sb_channel(adapter->netdev, vdev);
9080 netdev_set_sb_channel(vdev, 0);
9081
9082 kfree(accel);
9083
9084 return 0;
9085}
9086
9087static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
9088{
9089 struct ixgbe_adapter *adapter = netdev_priv(dev);
9090
9091
9092 bitmap_clear(adapter->fwd_bitmask, 1, 63);
9093
9094
9095 netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool,
9096 adapter);
9097}
9098
9099
9100
9101
9102
9103
9104
9105int ixgbe_setup_tc(struct net_device *dev, u8 tc)
9106{
9107 struct ixgbe_adapter *adapter = netdev_priv(dev);
9108 struct ixgbe_hw *hw = &adapter->hw;
9109
9110
9111 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
9112 return -EINVAL;
9113
9114 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
9115 return -EINVAL;
9116
9117
9118
9119
9120
9121 if (netif_running(dev))
9122 ixgbe_close(dev);
9123 else
9124 ixgbe_reset(adapter);
9125
9126 ixgbe_clear_interrupt_scheme(adapter);
9127
9128#ifdef CONFIG_IXGBE_DCB
9129 if (tc) {
9130 if (adapter->xdp_prog) {
9131 e_warn(probe, "DCB is not supported with XDP\n");
9132
9133 ixgbe_init_interrupt_scheme(adapter);
9134 if (netif_running(dev))
9135 ixgbe_open(dev);
9136 return -EINVAL;
9137 }
9138
9139 netdev_set_num_tc(dev, tc);
9140 ixgbe_set_prio_tc_map(adapter);
9141
9142 adapter->hw_tcs = tc;
9143 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
9144
9145 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
9146 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
9147 adapter->hw.fc.requested_mode = ixgbe_fc_none;
9148 }
9149 } else {
9150 netdev_reset_tc(dev);
9151
9152 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9153 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
9154
9155 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
9156 adapter->hw_tcs = tc;
9157
9158 adapter->temp_dcb_cfg.pfc_mode_enable = false;
9159 adapter->dcb_cfg.pfc_mode_enable = false;
9160 }
9161
9162 ixgbe_validate_rtr(adapter, tc);
9163
9164#endif
9165 ixgbe_init_interrupt_scheme(adapter);
9166
9167 ixgbe_defrag_macvlan_pools(dev);
9168
9169 if (netif_running(dev))
9170 return ixgbe_open(dev);
9171
9172 return 0;
9173}
9174
9175static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
9176 struct tc_cls_u32_offload *cls)
9177{
9178 u32 hdl = cls->knode.handle;
9179 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
9180 u32 loc = cls->knode.handle & 0xfffff;
9181 int err = 0, i, j;
9182 struct ixgbe_jump_table *jump = NULL;
9183
9184 if (loc > IXGBE_MAX_HW_ENTRIES)
9185 return -EINVAL;
9186
9187 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
9188 return -EINVAL;
9189
9190
9191 if (uhtid != 0x800) {
9192 jump = adapter->jump_tables[uhtid];
9193 if (!jump)
9194 return -EINVAL;
9195 if (!test_bit(loc - 1, jump->child_loc_map))
9196 return -EINVAL;
9197 clear_bit(loc - 1, jump->child_loc_map);
9198 }
9199
9200
9201 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9202 jump = adapter->jump_tables[i];
9203 if (jump && jump->link_hdl == hdl) {
9204
9205
9206
9207 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
9208 if (!test_bit(j, jump->child_loc_map))
9209 continue;
9210 spin_lock(&adapter->fdir_perfect_lock);
9211 err = ixgbe_update_ethtool_fdir_entry(adapter,
9212 NULL,
9213 j + 1);
9214 spin_unlock(&adapter->fdir_perfect_lock);
9215 clear_bit(j, jump->child_loc_map);
9216 }
9217
9218 kfree(jump->input);
9219 kfree(jump->mask);
9220 kfree(jump);
9221 adapter->jump_tables[i] = NULL;
9222 return err;
9223 }
9224 }
9225
9226 spin_lock(&adapter->fdir_perfect_lock);
9227 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
9228 spin_unlock(&adapter->fdir_perfect_lock);
9229 return err;
9230}
9231
9232static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
9233 struct tc_cls_u32_offload *cls)
9234{
9235 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9236
9237 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9238 return -EINVAL;
9239
9240
9241
9242
9243 if (cls->hnode.divisor > 0)
9244 return -EINVAL;
9245
9246 set_bit(uhtid - 1, &adapter->tables);
9247 return 0;
9248}
9249
9250static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
9251 struct tc_cls_u32_offload *cls)
9252{
9253 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
9254
9255 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9256 return -EINVAL;
9257
9258 clear_bit(uhtid - 1, &adapter->tables);
9259 return 0;
9260}
9261
9262#ifdef CONFIG_NET_CLS_ACT
9263struct upper_walk_data {
9264 struct ixgbe_adapter *adapter;
9265 u64 action;
9266 int ifindex;
9267 u8 queue;
9268};
9269
9270static int get_macvlan_queue(struct net_device *upper, void *_data)
9271{
9272 if (netif_is_macvlan(upper)) {
9273 struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper);
9274 struct upper_walk_data *data = _data;
9275 struct ixgbe_adapter *adapter = data->adapter;
9276 int ifindex = data->ifindex;
9277
9278 if (vadapter && upper->ifindex == ifindex) {
9279 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
9280 data->action = data->queue;
9281 return 1;
9282 }
9283 }
9284
9285 return 0;
9286}
9287
9288static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9289 u8 *queue, u64 *action)
9290{
9291 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9292 unsigned int num_vfs = adapter->num_vfs, vf;
9293 struct upper_walk_data data;
9294 struct net_device *upper;
9295
9296
9297 for (vf = 0; vf < num_vfs; ++vf) {
9298 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9299 if (upper->ifindex == ifindex) {
9300 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9301 *action = vf + 1;
9302 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9303 return 0;
9304 }
9305 }
9306
9307
9308 data.adapter = adapter;
9309 data.ifindex = ifindex;
9310 data.action = 0;
9311 data.queue = 0;
9312 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9313 get_macvlan_queue, &data)) {
9314 *action = data.action;
9315 *queue = data.queue;
9316
9317 return 0;
9318 }
9319
9320 return -EINVAL;
9321}
9322
9323static int parse_tc_actions(struct ixgbe_adapter *adapter,
9324 struct tcf_exts *exts, u64 *action, u8 *queue)
9325{
9326 const struct tc_action *a;
9327 int i;
9328
9329 if (!tcf_exts_has_actions(exts))
9330 return -EINVAL;
9331
9332 tcf_exts_for_each_action(i, a, exts) {
9333
9334 if (is_tcf_gact_shot(a)) {
9335 *action = IXGBE_FDIR_DROP_QUEUE;
9336 *queue = IXGBE_FDIR_DROP_QUEUE;
9337 return 0;
9338 }
9339
9340
9341 if (is_tcf_mirred_egress_redirect(a)) {
9342 struct net_device *dev = tcf_mirred_dev(a);
9343
9344 if (!dev)
9345 return -EINVAL;
9346 return handle_redirect_action(adapter, dev->ifindex,
9347 queue, action);
9348 }
9349
9350 return -EINVAL;
9351 }
9352
9353 return -EINVAL;
9354}
9355#else
9356static int parse_tc_actions(struct ixgbe_adapter *adapter,
9357 struct tcf_exts *exts, u64 *action, u8 *queue)
9358{
9359 return -EINVAL;
9360}
9361#endif
9362
9363static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9364 union ixgbe_atr_input *mask,
9365 struct tc_cls_u32_offload *cls,
9366 struct ixgbe_mat_field *field_ptr,
9367 struct ixgbe_nexthdr *nexthdr)
9368{
9369 int i, j, off;
9370 __be32 val, m;
9371 bool found_entry = false, found_jump_field = false;
9372
9373 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9374 off = cls->knode.sel->keys[i].off;
9375 val = cls->knode.sel->keys[i].val;
9376 m = cls->knode.sel->keys[i].mask;
9377
9378 for (j = 0; field_ptr[j].val; j++) {
9379 if (field_ptr[j].off == off) {
9380 field_ptr[j].val(input, mask, (__force u32)val,
9381 (__force u32)m);
9382 input->filter.formatted.flow_type |=
9383 field_ptr[j].type;
9384 found_entry = true;
9385 break;
9386 }
9387 }
9388 if (nexthdr) {
9389 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9390 nexthdr->val ==
9391 (__force u32)cls->knode.sel->keys[i].val &&
9392 nexthdr->mask ==
9393 (__force u32)cls->knode.sel->keys[i].mask)
9394 found_jump_field = true;
9395 else
9396 continue;
9397 }
9398 }
9399
9400 if (nexthdr && !found_jump_field)
9401 return -EINVAL;
9402
9403 if (!found_entry)
9404 return 0;
9405
9406 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9407 IXGBE_ATR_L4TYPE_MASK;
9408
9409 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9410 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9411
9412 return 0;
9413}
9414
9415static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9416 struct tc_cls_u32_offload *cls)
9417{
9418 __be16 protocol = cls->common.protocol;
9419 u32 loc = cls->knode.handle & 0xfffff;
9420 struct ixgbe_hw *hw = &adapter->hw;
9421 struct ixgbe_mat_field *field_ptr;
9422 struct ixgbe_fdir_filter *input = NULL;
9423 union ixgbe_atr_input *mask = NULL;
9424 struct ixgbe_jump_table *jump = NULL;
9425 int i, err = -EINVAL;
9426 u8 queue;
9427 u32 uhtid, link_uhtid;
9428
9429 uhtid = TC_U32_USERHTID(cls->knode.handle);
9430 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9431
9432
9433
9434
9435
9436
9437
9438
9439 if (protocol != htons(ETH_P_IP))
9440 return err;
9441
9442 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9443 e_err(drv, "Location out of range\n");
9444 return err;
9445 }
9446
9447
9448
9449
9450
9451
9452
9453
9454 if (uhtid == 0x800) {
9455 field_ptr = (adapter->jump_tables[0])->mat;
9456 } else {
9457 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9458 return err;
9459 if (!adapter->jump_tables[uhtid])
9460 return err;
9461 field_ptr = (adapter->jump_tables[uhtid])->mat;
9462 }
9463
9464 if (!field_ptr)
9465 return err;
9466
9467
9468
9469
9470
9471
9472
9473 if (link_uhtid) {
9474 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9475
9476 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9477 return err;
9478
9479 if (!test_bit(link_uhtid - 1, &adapter->tables))
9480 return err;
9481
9482
9483
9484
9485
9486
9487 if (adapter->jump_tables[link_uhtid] &&
9488 (adapter->jump_tables[link_uhtid])->link_hdl) {
9489 e_err(drv, "Link filter exists for link: %x\n",
9490 link_uhtid);
9491 return err;
9492 }
9493
9494 for (i = 0; nexthdr[i].jump; i++) {
9495 if (nexthdr[i].o != cls->knode.sel->offoff ||
9496 nexthdr[i].s != cls->knode.sel->offshift ||
9497 nexthdr[i].m !=
9498 (__force u32)cls->knode.sel->offmask)
9499 return err;
9500
9501 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9502 if (!jump)
9503 return -ENOMEM;
9504 input = kzalloc(sizeof(*input), GFP_KERNEL);
9505 if (!input) {
9506 err = -ENOMEM;
9507 goto free_jump;
9508 }
9509 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9510 if (!mask) {
9511 err = -ENOMEM;
9512 goto free_input;
9513 }
9514 jump->input = input;
9515 jump->mask = mask;
9516 jump->link_hdl = cls->knode.handle;
9517
9518 err = ixgbe_clsu32_build_input(input, mask, cls,
9519 field_ptr, &nexthdr[i]);
9520 if (!err) {
9521 jump->mat = nexthdr[i].jump;
9522 adapter->jump_tables[link_uhtid] = jump;
9523 break;
9524 } else {
9525 kfree(mask);
9526 kfree(input);
9527 kfree(jump);
9528 }
9529 }
9530 return 0;
9531 }
9532
9533 input = kzalloc(sizeof(*input), GFP_KERNEL);
9534 if (!input)
9535 return -ENOMEM;
9536 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9537 if (!mask) {
9538 err = -ENOMEM;
9539 goto free_input;
9540 }
9541
9542 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9543 if ((adapter->jump_tables[uhtid])->input)
9544 memcpy(input, (adapter->jump_tables[uhtid])->input,
9545 sizeof(*input));
9546 if ((adapter->jump_tables[uhtid])->mask)
9547 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9548 sizeof(*mask));
9549
9550
9551
9552
9553 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9554 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9555
9556 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9557 e_err(drv, "Filter exists in location: %x\n",
9558 loc);
9559 err = -EINVAL;
9560 goto err_out;
9561 }
9562 }
9563 }
9564 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9565 if (err)
9566 goto err_out;
9567
9568 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9569 &queue);
9570 if (err < 0)
9571 goto err_out;
9572
9573 input->sw_idx = loc;
9574
9575 spin_lock(&adapter->fdir_perfect_lock);
9576
9577 if (hlist_empty(&adapter->fdir_filter_list)) {
9578 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9579 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9580 if (err)
9581 goto err_out_w_lock;
9582 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9583 err = -EINVAL;
9584 goto err_out_w_lock;
9585 }
9586
9587 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9588 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9589 input->sw_idx, queue);
9590 if (!err)
9591 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9592 spin_unlock(&adapter->fdir_perfect_lock);
9593
9594 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9595 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9596
9597 kfree(mask);
9598 return err;
9599err_out_w_lock:
9600 spin_unlock(&adapter->fdir_perfect_lock);
9601err_out:
9602 kfree(mask);
9603free_input:
9604 kfree(input);
9605free_jump:
9606 kfree(jump);
9607 return err;
9608}
9609
9610static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9611 struct tc_cls_u32_offload *cls_u32)
9612{
9613 switch (cls_u32->command) {
9614 case TC_CLSU32_NEW_KNODE:
9615 case TC_CLSU32_REPLACE_KNODE:
9616 return ixgbe_configure_clsu32(adapter, cls_u32);
9617 case TC_CLSU32_DELETE_KNODE:
9618 return ixgbe_delete_clsu32(adapter, cls_u32);
9619 case TC_CLSU32_NEW_HNODE:
9620 case TC_CLSU32_REPLACE_HNODE:
9621 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9622 case TC_CLSU32_DELETE_HNODE:
9623 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9624 default:
9625 return -EOPNOTSUPP;
9626 }
9627}
9628
9629static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9630 void *cb_priv)
9631{
9632 struct ixgbe_adapter *adapter = cb_priv;
9633
9634 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9635 return -EOPNOTSUPP;
9636
9637 switch (type) {
9638 case TC_SETUP_CLSU32:
9639 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9640 default:
9641 return -EOPNOTSUPP;
9642 }
9643}
9644
9645static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9646 struct tc_mqprio_qopt *mqprio)
9647{
9648 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9649 return ixgbe_setup_tc(dev, mqprio->num_tc);
9650}
9651
9652static LIST_HEAD(ixgbe_block_cb_list);
9653
9654static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9655 void *type_data)
9656{
9657 struct ixgbe_adapter *adapter = netdev_priv(dev);
9658
9659 switch (type) {
9660 case TC_SETUP_BLOCK:
9661 return flow_block_cb_setup_simple(type_data,
9662 &ixgbe_block_cb_list,
9663 ixgbe_setup_tc_block_cb,
9664 adapter, adapter, true);
9665 case TC_SETUP_QDISC_MQPRIO:
9666 return ixgbe_setup_tc_mqprio(dev, type_data);
9667 default:
9668 return -EOPNOTSUPP;
9669 }
9670}
9671
9672#ifdef CONFIG_PCI_IOV
9673void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9674{
9675 struct net_device *netdev = adapter->netdev;
9676
9677 rtnl_lock();
9678 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9679 rtnl_unlock();
9680}
9681
9682#endif
9683void ixgbe_do_reset(struct net_device *netdev)
9684{
9685 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9686
9687 if (netif_running(netdev))
9688 ixgbe_reinit_locked(adapter);
9689 else
9690 ixgbe_reset(adapter);
9691}
9692
9693static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9694 netdev_features_t features)
9695{
9696 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9697
9698
9699 if (!(features & NETIF_F_RXCSUM))
9700 features &= ~NETIF_F_LRO;
9701
9702
9703 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9704 features &= ~NETIF_F_LRO;
9705
9706 if (adapter->xdp_prog && (features & NETIF_F_LRO)) {
9707 e_dev_err("LRO is not supported with XDP\n");
9708 features &= ~NETIF_F_LRO;
9709 }
9710
9711 return features;
9712}
9713
9714static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
9715{
9716 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9717 num_online_cpus());
9718
9719
9720 if (!adapter->ring_feature[RING_F_VMDQ].offset)
9721 adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED |
9722 IXGBE_FLAG_SRIOV_ENABLED);
9723
9724 adapter->ring_feature[RING_F_RSS].limit = rss;
9725 adapter->ring_feature[RING_F_VMDQ].limit = 1;
9726
9727 ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs);
9728}
9729
9730static int ixgbe_set_features(struct net_device *netdev,
9731 netdev_features_t features)
9732{
9733 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9734 netdev_features_t changed = netdev->features ^ features;
9735 bool need_reset = false;
9736
9737
9738 if (!(features & NETIF_F_LRO)) {
9739 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9740 need_reset = true;
9741 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9742 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9743 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9744 if (adapter->rx_itr_setting == 1 ||
9745 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9746 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9747 need_reset = true;
9748 } else if ((changed ^ features) & NETIF_F_LRO) {
9749 e_info(probe, "rx-usecs set too low, "
9750 "disabling RSC\n");
9751 }
9752 }
9753
9754
9755
9756
9757
9758 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9759
9760 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9761 need_reset = true;
9762
9763 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9764 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9765 } else {
9766
9767 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9768 need_reset = true;
9769
9770 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9771
9772
9773 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9774
9775 (adapter->hw_tcs > 1) ||
9776
9777 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9778
9779 (!adapter->atr_sample_rate))
9780 ;
9781 else
9782 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9783 }
9784
9785 if (changed & NETIF_F_RXALL)
9786 need_reset = true;
9787
9788 netdev->features = features;
9789
9790 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9791 if (features & NETIF_F_RXCSUM) {
9792 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9793 } else {
9794 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9795
9796 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9797 }
9798 }
9799
9800 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9801 if (features & NETIF_F_RXCSUM) {
9802 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9803 } else {
9804 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9805
9806 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9807 }
9808 }
9809
9810 if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
9811 ixgbe_reset_l2fw_offload(adapter);
9812 else if (need_reset)
9813 ixgbe_do_reset(netdev);
9814 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9815 NETIF_F_HW_VLAN_CTAG_FILTER))
9816 ixgbe_set_rx_mode(netdev);
9817
9818 return 1;
9819}
9820
9821
9822
9823
9824
9825
9826static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9827 struct udp_tunnel_info *ti)
9828{
9829 struct ixgbe_adapter *adapter = netdev_priv(dev);
9830 struct ixgbe_hw *hw = &adapter->hw;
9831 __be16 port = ti->port;
9832 u32 port_shift = 0;
9833 u32 reg;
9834
9835 if (ti->sa_family != AF_INET)
9836 return;
9837
9838 switch (ti->type) {
9839 case UDP_TUNNEL_TYPE_VXLAN:
9840 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9841 return;
9842
9843 if (adapter->vxlan_port == port)
9844 return;
9845
9846 if (adapter->vxlan_port) {
9847 netdev_info(dev,
9848 "VXLAN port %d set, not adding port %d\n",
9849 ntohs(adapter->vxlan_port),
9850 ntohs(port));
9851 return;
9852 }
9853
9854 adapter->vxlan_port = port;
9855 break;
9856 case UDP_TUNNEL_TYPE_GENEVE:
9857 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9858 return;
9859
9860 if (adapter->geneve_port == port)
9861 return;
9862
9863 if (adapter->geneve_port) {
9864 netdev_info(dev,
9865 "GENEVE port %d set, not adding port %d\n",
9866 ntohs(adapter->geneve_port),
9867 ntohs(port));
9868 return;
9869 }
9870
9871 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9872 adapter->geneve_port = port;
9873 break;
9874 default:
9875 return;
9876 }
9877
9878 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9879 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9880}
9881
9882
9883
9884
9885
9886
9887static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9888 struct udp_tunnel_info *ti)
9889{
9890 struct ixgbe_adapter *adapter = netdev_priv(dev);
9891 u32 port_mask;
9892
9893 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9894 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9895 return;
9896
9897 if (ti->sa_family != AF_INET)
9898 return;
9899
9900 switch (ti->type) {
9901 case UDP_TUNNEL_TYPE_VXLAN:
9902 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9903 return;
9904
9905 if (adapter->vxlan_port != ti->port) {
9906 netdev_info(dev, "VXLAN port %d not found\n",
9907 ntohs(ti->port));
9908 return;
9909 }
9910
9911 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9912 break;
9913 case UDP_TUNNEL_TYPE_GENEVE:
9914 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9915 return;
9916
9917 if (adapter->geneve_port != ti->port) {
9918 netdev_info(dev, "GENEVE port %d not found\n",
9919 ntohs(ti->port));
9920 return;
9921 }
9922
9923 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9924 break;
9925 default:
9926 return;
9927 }
9928
9929 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9930 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9931}
9932
9933static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9934 struct net_device *dev,
9935 const unsigned char *addr, u16 vid,
9936 u16 flags,
9937 struct netlink_ext_ack *extack)
9938{
9939
9940 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9941 struct ixgbe_adapter *adapter = netdev_priv(dev);
9942 u16 pool = VMDQ_P(0);
9943
9944 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9945 return -ENOMEM;
9946 }
9947
9948 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9949}
9950
9951
9952
9953
9954
9955
9956
9957
9958static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9959 __u16 mode)
9960{
9961 struct ixgbe_hw *hw = &adapter->hw;
9962 unsigned int p, num_pools;
9963 u32 vmdctl;
9964
9965 switch (mode) {
9966 case BRIDGE_MODE_VEPA:
9967
9968 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9969
9970
9971
9972
9973
9974 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9975 vmdctl |= IXGBE_VT_CTL_REPLEN;
9976 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9977
9978
9979
9980
9981 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9982 for (p = 0; p < num_pools; p++) {
9983 if (hw->mac.ops.set_source_address_pruning)
9984 hw->mac.ops.set_source_address_pruning(hw,
9985 true,
9986 p);
9987 }
9988 break;
9989 case BRIDGE_MODE_VEB:
9990
9991 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9992 IXGBE_PFDTXGSWC_VT_LBEN);
9993
9994
9995
9996
9997 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9998 if (!adapter->num_vfs)
9999 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
10000 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
10001
10002
10003
10004
10005 num_pools = adapter->num_vfs + adapter->num_rx_pools;
10006 for (p = 0; p < num_pools; p++) {
10007 if (hw->mac.ops.set_source_address_pruning)
10008 hw->mac.ops.set_source_address_pruning(hw,
10009 false,
10010 p);
10011 }
10012 break;
10013 default:
10014 return -EINVAL;
10015 }
10016
10017 adapter->bridge_mode = mode;
10018
10019 e_info(drv, "enabling bridge mode: %s\n",
10020 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10021
10022 return 0;
10023}
10024
10025static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
10026 struct nlmsghdr *nlh, u16 flags,
10027 struct netlink_ext_ack *extack)
10028{
10029 struct ixgbe_adapter *adapter = netdev_priv(dev);
10030 struct nlattr *attr, *br_spec;
10031 int rem;
10032
10033 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10034 return -EOPNOTSUPP;
10035
10036 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10037 if (!br_spec)
10038 return -EINVAL;
10039
10040 nla_for_each_nested(attr, br_spec, rem) {
10041 int status;
10042 __u16 mode;
10043
10044 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10045 continue;
10046
10047 if (nla_len(attr) < sizeof(mode))
10048 return -EINVAL;
10049
10050 mode = nla_get_u16(attr);
10051 status = ixgbe_configure_bridge_mode(adapter, mode);
10052 if (status)
10053 return status;
10054
10055 break;
10056 }
10057
10058 return 0;
10059}
10060
10061static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10062 struct net_device *dev,
10063 u32 filter_mask, int nlflags)
10064{
10065 struct ixgbe_adapter *adapter = netdev_priv(dev);
10066
10067 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
10068 return 0;
10069
10070 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
10071 adapter->bridge_mode, 0, 0, nlflags,
10072 filter_mask, NULL);
10073}
10074
10075static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
10076{
10077 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10078 struct ixgbe_fwd_adapter *accel;
10079 int tcs = adapter->hw_tcs ? : 1;
10080 int pool, err;
10081
10082 if (adapter->xdp_prog) {
10083 e_warn(probe, "L2FW offload is not supported with XDP\n");
10084 return ERR_PTR(-EINVAL);
10085 }
10086
10087
10088
10089
10090
10091 if (!macvlan_supports_dest_filter(vdev))
10092 return ERR_PTR(-EMEDIUMTYPE);
10093
10094
10095
10096
10097
10098 if (netif_is_multiqueue(vdev))
10099 return ERR_PTR(-ERANGE);
10100
10101 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
10102 if (pool == adapter->num_rx_pools) {
10103 u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
10104 u16 reserved_pools;
10105
10106 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
10107 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
10108 adapter->num_rx_pools > IXGBE_MAX_MACVLANS)
10109 return ERR_PTR(-EBUSY);
10110
10111
10112
10113
10114
10115 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
10116 return ERR_PTR(-EBUSY);
10117
10118
10119 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED |
10120 IXGBE_FLAG_SRIOV_ENABLED;
10121
10122
10123
10124
10125
10126 if (used_pools < 32 && adapter->num_rx_pools < 16)
10127 reserved_pools = min_t(u16,
10128 32 - used_pools,
10129 16 - adapter->num_rx_pools);
10130 else if (adapter->num_rx_pools < 32)
10131 reserved_pools = min_t(u16,
10132 64 - used_pools,
10133 32 - adapter->num_rx_pools);
10134 else
10135 reserved_pools = 64 - used_pools;
10136
10137
10138 if (!reserved_pools)
10139 return ERR_PTR(-EBUSY);
10140
10141 adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools;
10142
10143
10144 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
10145 if (err)
10146 return ERR_PTR(err);
10147
10148 if (pool >= adapter->num_rx_pools)
10149 return ERR_PTR(-ENOMEM);
10150 }
10151
10152 accel = kzalloc(sizeof(*accel), GFP_KERNEL);
10153 if (!accel)
10154 return ERR_PTR(-ENOMEM);
10155
10156 set_bit(pool, adapter->fwd_bitmask);
10157 netdev_set_sb_channel(vdev, pool);
10158 accel->pool = pool;
10159 accel->netdev = vdev;
10160
10161 if (!netif_running(pdev))
10162 return accel;
10163
10164 err = ixgbe_fwd_ring_up(adapter, accel);
10165 if (err)
10166 return ERR_PTR(err);
10167
10168 return accel;
10169}
10170
10171static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
10172{
10173 struct ixgbe_fwd_adapter *accel = priv;
10174 struct ixgbe_adapter *adapter = netdev_priv(pdev);
10175 unsigned int rxbase = accel->rx_base_queue;
10176 unsigned int i;
10177
10178
10179 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
10180 VMDQ_P(accel->pool));
10181
10182
10183
10184
10185 usleep_range(10000, 20000);
10186
10187 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
10188 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
10189 struct ixgbe_q_vector *qv = ring->q_vector;
10190
10191
10192
10193
10194 if (netif_running(adapter->netdev))
10195 napi_synchronize(&qv->napi);
10196 ring->netdev = NULL;
10197 }
10198
10199
10200 netdev_unbind_sb_channel(pdev, accel->netdev);
10201 netdev_set_sb_channel(accel->netdev, 0);
10202
10203 clear_bit(accel->pool, adapter->fwd_bitmask);
10204 kfree(accel);
10205}
10206
10207#define IXGBE_MAX_MAC_HDR_LEN 127
10208#define IXGBE_MAX_NETWORK_HDR_LEN 511
10209
10210static netdev_features_t
10211ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
10212 netdev_features_t features)
10213{
10214 unsigned int network_hdr_len, mac_hdr_len;
10215
10216
10217 mac_hdr_len = skb_network_header(skb) - skb->data;
10218 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
10219 return features & ~(NETIF_F_HW_CSUM |
10220 NETIF_F_SCTP_CRC |
10221 NETIF_F_GSO_UDP_L4 |
10222 NETIF_F_HW_VLAN_CTAG_TX |
10223 NETIF_F_TSO |
10224 NETIF_F_TSO6);
10225
10226 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
10227 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
10228 return features & ~(NETIF_F_HW_CSUM |
10229 NETIF_F_SCTP_CRC |
10230 NETIF_F_GSO_UDP_L4 |
10231 NETIF_F_TSO |
10232 NETIF_F_TSO6);
10233
10234
10235
10236
10237
10238
10239 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
10240#ifdef CONFIG_IXGBE_IPSEC
10241 if (!secpath_exists(skb))
10242#endif
10243 features &= ~NETIF_F_TSO;
10244 }
10245
10246 return features;
10247}
10248
10249static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
10250{
10251 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
10252 struct ixgbe_adapter *adapter = netdev_priv(dev);
10253 struct bpf_prog *old_prog;
10254 bool need_reset;
10255
10256 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
10257 return -EINVAL;
10258
10259 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
10260 return -EINVAL;
10261
10262
10263 for (i = 0; i < adapter->num_rx_queues; i++) {
10264 struct ixgbe_ring *ring = adapter->rx_ring[i];
10265
10266 if (ring_is_rsc_enabled(ring))
10267 return -EINVAL;
10268
10269 if (frame_size > ixgbe_rx_bufsz(ring))
10270 return -EINVAL;
10271 }
10272
10273 if (nr_cpu_ids > MAX_XDP_QUEUES)
10274 return -ENOMEM;
10275
10276 old_prog = xchg(&adapter->xdp_prog, prog);
10277 need_reset = (!!prog != !!old_prog);
10278
10279
10280 if (need_reset) {
10281 int err;
10282
10283 if (!prog)
10284
10285 synchronize_rcu();
10286 err = ixgbe_setup_tc(dev, adapter->hw_tcs);
10287
10288 if (err) {
10289 rcu_assign_pointer(adapter->xdp_prog, old_prog);
10290 return -EINVAL;
10291 }
10292 } else {
10293 for (i = 0; i < adapter->num_rx_queues; i++)
10294 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
10295 adapter->xdp_prog);
10296 }
10297
10298 if (old_prog)
10299 bpf_prog_put(old_prog);
10300
10301
10302
10303
10304 if (need_reset && prog)
10305 for (i = 0; i < adapter->num_rx_queues; i++)
10306 if (adapter->xdp_ring[i]->xsk_umem)
10307 (void)ixgbe_xsk_wakeup(adapter->netdev, i,
10308 XDP_WAKEUP_RX);
10309
10310 return 0;
10311}
10312
10313static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
10314{
10315 struct ixgbe_adapter *adapter = netdev_priv(dev);
10316
10317 switch (xdp->command) {
10318 case XDP_SETUP_PROG:
10319 return ixgbe_xdp_setup(dev, xdp->prog);
10320 case XDP_QUERY_PROG:
10321 xdp->prog_id = adapter->xdp_prog ?
10322 adapter->xdp_prog->aux->id : 0;
10323 return 0;
10324 case XDP_SETUP_XSK_UMEM:
10325 return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
10326 xdp->xsk.queue_id);
10327
10328 default:
10329 return -EINVAL;
10330 }
10331}
10332
10333void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
10334{
10335
10336
10337
10338 wmb();
10339 writel(ring->next_to_use, ring->tail);
10340}
10341
10342static int ixgbe_xdp_xmit(struct net_device *dev, int n,
10343 struct xdp_frame **frames, u32 flags)
10344{
10345 struct ixgbe_adapter *adapter = netdev_priv(dev);
10346 struct ixgbe_ring *ring;
10347 int drops = 0;
10348 int i;
10349
10350 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10351 return -ENETDOWN;
10352
10353 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
10354 return -EINVAL;
10355
10356
10357
10358
10359 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10360 if (unlikely(!ring))
10361 return -ENXIO;
10362
10363 if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
10364 return -ENXIO;
10365
10366 for (i = 0; i < n; i++) {
10367 struct xdp_frame *xdpf = frames[i];
10368 int err;
10369
10370 err = ixgbe_xmit_xdp_ring(adapter, xdpf);
10371 if (err != IXGBE_XDP_TX) {
10372 xdp_return_frame_rx_napi(xdpf);
10373 drops++;
10374 }
10375 }
10376
10377 if (unlikely(flags & XDP_XMIT_FLUSH))
10378 ixgbe_xdp_ring_update_tail(ring);
10379
10380 return n - drops;
10381}
10382
10383static const struct net_device_ops ixgbe_netdev_ops = {
10384 .ndo_open = ixgbe_open,
10385 .ndo_stop = ixgbe_close,
10386 .ndo_start_xmit = ixgbe_xmit_frame,
10387 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10388 .ndo_validate_addr = eth_validate_addr,
10389 .ndo_set_mac_address = ixgbe_set_mac,
10390 .ndo_change_mtu = ixgbe_change_mtu,
10391 .ndo_tx_timeout = ixgbe_tx_timeout,
10392 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10393 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10394 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10395 .ndo_do_ioctl = ixgbe_ioctl,
10396 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10397 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10398 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10399 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10400 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10401 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10402 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10403 .ndo_get_stats64 = ixgbe_get_stats64,
10404 .ndo_setup_tc = __ixgbe_setup_tc,
10405#ifdef IXGBE_FCOE
10406 .ndo_select_queue = ixgbe_select_queue,
10407 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10408 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10409 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10410 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10411 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10412 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10413 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10414#endif
10415 .ndo_set_features = ixgbe_set_features,
10416 .ndo_fix_features = ixgbe_fix_features,
10417 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10418 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10419 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10420 .ndo_dfwd_add_station = ixgbe_fwd_add,
10421 .ndo_dfwd_del_station = ixgbe_fwd_del,
10422 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10423 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10424 .ndo_features_check = ixgbe_features_check,
10425 .ndo_bpf = ixgbe_xdp,
10426 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10427 .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
10428};
10429
10430static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
10431 struct ixgbe_ring *tx_ring)
10432{
10433 unsigned long wait_delay, delay_interval;
10434 struct ixgbe_hw *hw = &adapter->hw;
10435 u8 reg_idx = tx_ring->reg_idx;
10436 int wait_loop;
10437 u32 txdctl;
10438
10439 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
10440
10441
10442 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10443
10444 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10445 wait_delay = delay_interval;
10446
10447 while (wait_loop--) {
10448 usleep_range(wait_delay, wait_delay + 10);
10449 wait_delay += delay_interval * 2;
10450 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
10451
10452 if (!(txdctl & IXGBE_TXDCTL_ENABLE))
10453 return;
10454 }
10455
10456 e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
10457}
10458
10459static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
10460 struct ixgbe_ring *tx_ring)
10461{
10462 set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10463 ixgbe_disable_txr_hw(adapter, tx_ring);
10464}
10465
10466static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
10467 struct ixgbe_ring *rx_ring)
10468{
10469 unsigned long wait_delay, delay_interval;
10470 struct ixgbe_hw *hw = &adapter->hw;
10471 u8 reg_idx = rx_ring->reg_idx;
10472 int wait_loop;
10473 u32 rxdctl;
10474
10475 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10476 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
10477 rxdctl |= IXGBE_RXDCTL_SWFLSH;
10478
10479
10480 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
10481
10482
10483 if (hw->mac.type == ixgbe_mac_82598EB &&
10484 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
10485 return;
10486
10487
10488 delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
10489
10490 wait_loop = IXGBE_MAX_RX_DESC_POLL;
10491 wait_delay = delay_interval;
10492
10493 while (wait_loop--) {
10494 usleep_range(wait_delay, wait_delay + 10);
10495 wait_delay += delay_interval * 2;
10496 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
10497
10498 if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
10499 return;
10500 }
10501
10502 e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
10503}
10504
10505static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
10506{
10507 memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
10508 memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
10509}
10510
10511static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
10512{
10513 memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
10514 memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
10515}
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
10526{
10527 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10528
10529 rx_ring = adapter->rx_ring[ring];
10530 tx_ring = adapter->tx_ring[ring];
10531 xdp_ring = adapter->xdp_ring[ring];
10532
10533 ixgbe_disable_txr(adapter, tx_ring);
10534 if (xdp_ring)
10535 ixgbe_disable_txr(adapter, xdp_ring);
10536 ixgbe_disable_rxr_hw(adapter, rx_ring);
10537
10538 if (xdp_ring)
10539 synchronize_rcu();
10540
10541
10542 napi_disable(&rx_ring->q_vector->napi);
10543
10544 ixgbe_clean_tx_ring(tx_ring);
10545 if (xdp_ring)
10546 ixgbe_clean_tx_ring(xdp_ring);
10547 ixgbe_clean_rx_ring(rx_ring);
10548
10549 ixgbe_reset_txr_stats(tx_ring);
10550 if (xdp_ring)
10551 ixgbe_reset_txr_stats(xdp_ring);
10552 ixgbe_reset_rxr_stats(rx_ring);
10553}
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
10564{
10565 struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
10566
10567 rx_ring = adapter->rx_ring[ring];
10568 tx_ring = adapter->tx_ring[ring];
10569 xdp_ring = adapter->xdp_ring[ring];
10570
10571
10572 napi_enable(&rx_ring->q_vector->napi);
10573
10574 ixgbe_configure_tx_ring(adapter, tx_ring);
10575 if (xdp_ring)
10576 ixgbe_configure_tx_ring(adapter, xdp_ring);
10577 ixgbe_configure_rx_ring(adapter, rx_ring);
10578
10579 clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
10580 if (xdp_ring)
10581 clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
10582}
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10594{
10595 struct pci_dev *entry, *pdev = adapter->pdev;
10596 int physfns = 0;
10597
10598
10599
10600
10601
10602 if (ixgbe_pcie_from_parent(&adapter->hw))
10603 physfns = 4;
10604
10605 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10606
10607 if (entry->is_virtfn)
10608 continue;
10609
10610
10611
10612
10613
10614
10615
10616 if ((entry->vendor != pdev->vendor) ||
10617 (entry->device != pdev->device))
10618 return -1;
10619
10620 physfns++;
10621 }
10622
10623 return physfns;
10624}
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10637 u16 subdevice_id)
10638{
10639 struct ixgbe_hw *hw = &adapter->hw;
10640 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10641
10642
10643 if (hw->mac.type == ixgbe_mac_82598EB)
10644 return false;
10645
10646
10647 if (hw->mac.type >= ixgbe_mac_X540) {
10648 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10649 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10650 (hw->bus.func == 0)))
10651 return true;
10652 }
10653
10654
10655 switch (device_id) {
10656 case IXGBE_DEV_ID_82599_SFP:
10657
10658 switch (subdevice_id) {
10659 case IXGBE_SUBDEV_ID_82599_560FLR:
10660 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10661 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10662 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10663
10664 if (hw->bus.func != 0)
10665 break;
10666
10667 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10668 case IXGBE_SUBDEV_ID_82599_SFP:
10669 case IXGBE_SUBDEV_ID_82599_RNDC:
10670 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10671 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10672 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10673 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10674 return true;
10675 }
10676 break;
10677 case IXGBE_DEV_ID_82599EN_SFP:
10678
10679 switch (subdevice_id) {
10680 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10681 return true;
10682 }
10683 break;
10684 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10685
10686 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10687 return true;
10688 break;
10689 case IXGBE_DEV_ID_82599_KX4:
10690 return true;
10691 default:
10692 break;
10693 }
10694
10695 return false;
10696}
10697
10698
10699
10700
10701
10702
10703
10704
10705static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10706{
10707 struct ixgbe_hw *hw = &adapter->hw;
10708 struct ixgbe_nvm_version nvm_ver;
10709
10710 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10711 if (nvm_ver.oem_valid) {
10712 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10713 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10714 nvm_ver.oem_release);
10715 return;
10716 }
10717
10718 ixgbe_get_etk_id(hw, &nvm_ver);
10719 ixgbe_get_orom_version(hw, &nvm_ver);
10720
10721 if (nvm_ver.or_valid) {
10722 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10723 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10724 nvm_ver.or_build, nvm_ver.or_patch);
10725 return;
10726 }
10727
10728
10729 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10730 "0x%08x", nvm_ver.etk_id);
10731}
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10745{
10746 struct net_device *netdev;
10747 struct ixgbe_adapter *adapter = NULL;
10748 struct ixgbe_hw *hw;
10749 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10750 int i, err, pci_using_dac, expected_gts;
10751 unsigned int indices = MAX_TX_QUEUES;
10752 u8 part_str[IXGBE_PBANUM_LENGTH];
10753 bool disable_dev = false;
10754#ifdef IXGBE_FCOE
10755 u16 device_caps;
10756#endif
10757 u32 eec;
10758
10759
10760
10761
10762 if (pdev->is_virtfn) {
10763 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10764 pci_name(pdev), pdev->vendor, pdev->device);
10765 return -EINVAL;
10766 }
10767
10768 err = pci_enable_device_mem(pdev);
10769 if (err)
10770 return err;
10771
10772 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10773 pci_using_dac = 1;
10774 } else {
10775 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10776 if (err) {
10777 dev_err(&pdev->dev,
10778 "No usable DMA configuration, aborting\n");
10779 goto err_dma;
10780 }
10781 pci_using_dac = 0;
10782 }
10783
10784 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10785 if (err) {
10786 dev_err(&pdev->dev,
10787 "pci_request_selected_regions failed 0x%x\n", err);
10788 goto err_pci_reg;
10789 }
10790
10791 pci_enable_pcie_error_reporting(pdev);
10792
10793 pci_set_master(pdev);
10794 pci_save_state(pdev);
10795
10796 if (ii->mac == ixgbe_mac_82598EB) {
10797#ifdef CONFIG_IXGBE_DCB
10798
10799 indices = 4 * MAX_TRAFFIC_CLASS;
10800#else
10801 indices = IXGBE_MAX_RSS_INDICES;
10802#endif
10803 }
10804
10805 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10806 if (!netdev) {
10807 err = -ENOMEM;
10808 goto err_alloc_etherdev;
10809 }
10810
10811 SET_NETDEV_DEV(netdev, &pdev->dev);
10812
10813 adapter = netdev_priv(netdev);
10814
10815 adapter->netdev = netdev;
10816 adapter->pdev = pdev;
10817 hw = &adapter->hw;
10818 hw->back = adapter;
10819 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10820
10821 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10822 pci_resource_len(pdev, 0));
10823 adapter->io_addr = hw->hw_addr;
10824 if (!hw->hw_addr) {
10825 err = -EIO;
10826 goto err_ioremap;
10827 }
10828
10829 netdev->netdev_ops = &ixgbe_netdev_ops;
10830 ixgbe_set_ethtool_ops(netdev);
10831 netdev->watchdog_timeo = 5 * HZ;
10832 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10833
10834
10835 hw->mac.ops = *ii->mac_ops;
10836 hw->mac.type = ii->mac;
10837 hw->mvals = ii->mvals;
10838 if (ii->link_ops)
10839 hw->link.ops = *ii->link_ops;
10840
10841
10842 hw->eeprom.ops = *ii->eeprom_ops;
10843 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10844 if (ixgbe_removed(hw->hw_addr)) {
10845 err = -EIO;
10846 goto err_ioremap;
10847 }
10848
10849 if (!(eec & BIT(8)))
10850 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10851
10852
10853 hw->phy.ops = *ii->phy_ops;
10854 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10855
10856 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10857 hw->phy.mdio.mmds = 0;
10858 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10859 hw->phy.mdio.dev = netdev;
10860 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10861 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10862
10863
10864 err = ixgbe_sw_init(adapter, ii);
10865 if (err)
10866 goto err_sw_init;
10867
10868
10869 if (hw->mac.ops.init_swfw_sync)
10870 hw->mac.ops.init_swfw_sync(hw);
10871
10872
10873 switch (adapter->hw.mac.type) {
10874 case ixgbe_mac_82599EB:
10875 case ixgbe_mac_X540:
10876 case ixgbe_mac_X550:
10877 case ixgbe_mac_X550EM_x:
10878 case ixgbe_mac_x550em_a:
10879 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10880 break;
10881 default:
10882 break;
10883 }
10884
10885
10886
10887
10888
10889 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10890 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10891 if (esdp & IXGBE_ESDP_SDP1)
10892 e_crit(probe, "Fan has stopped, replace the adapter\n");
10893 }
10894
10895 if (allow_unsupported_sfp)
10896 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10897
10898
10899 hw->phy.reset_if_overtemp = true;
10900 err = hw->mac.ops.reset_hw(hw);
10901 hw->phy.reset_if_overtemp = false;
10902 ixgbe_set_eee_capable(adapter);
10903 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10904 err = 0;
10905 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10906 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10907 e_dev_err("Reload the driver after installing a supported module.\n");
10908 goto err_sw_init;
10909 } else if (err) {
10910 e_dev_err("HW Init failed: %d\n", err);
10911 goto err_sw_init;
10912 }
10913
10914#ifdef CONFIG_PCI_IOV
10915
10916 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10917 goto skip_sriov;
10918
10919 ixgbe_init_mbx_params_pf(hw);
10920 hw->mbx.ops = ii->mbx_ops;
10921 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10922 ixgbe_enable_sriov(adapter, max_vfs);
10923skip_sriov:
10924
10925#endif
10926 netdev->features = NETIF_F_SG |
10927 NETIF_F_TSO |
10928 NETIF_F_TSO6 |
10929 NETIF_F_RXHASH |
10930 NETIF_F_RXCSUM |
10931 NETIF_F_HW_CSUM;
10932
10933#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10934 NETIF_F_GSO_GRE_CSUM | \
10935 NETIF_F_GSO_IPXIP4 | \
10936 NETIF_F_GSO_IPXIP6 | \
10937 NETIF_F_GSO_UDP_TUNNEL | \
10938 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10939
10940 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10941 netdev->features |= NETIF_F_GSO_PARTIAL |
10942 IXGBE_GSO_PARTIAL_FEATURES;
10943
10944 if (hw->mac.type >= ixgbe_mac_82599EB)
10945 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
10946
10947#ifdef CONFIG_IXGBE_IPSEC
10948#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
10949 NETIF_F_HW_ESP_TX_CSUM | \
10950 NETIF_F_GSO_ESP)
10951
10952 if (adapter->ipsec)
10953 netdev->features |= IXGBE_ESP_FEATURES;
10954#endif
10955
10956 netdev->hw_features |= netdev->features |
10957 NETIF_F_HW_VLAN_CTAG_FILTER |
10958 NETIF_F_HW_VLAN_CTAG_RX |
10959 NETIF_F_HW_VLAN_CTAG_TX |
10960 NETIF_F_RXALL |
10961 NETIF_F_HW_L2FW_DOFFLOAD;
10962
10963 if (hw->mac.type >= ixgbe_mac_82599EB)
10964 netdev->hw_features |= NETIF_F_NTUPLE |
10965 NETIF_F_HW_TC;
10966
10967 if (pci_using_dac)
10968 netdev->features |= NETIF_F_HIGHDMA;
10969
10970 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10971 netdev->hw_enc_features |= netdev->vlan_features;
10972 netdev->mpls_features |= NETIF_F_SG |
10973 NETIF_F_TSO |
10974 NETIF_F_TSO6 |
10975 NETIF_F_HW_CSUM;
10976 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10977
10978
10979 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10980 NETIF_F_HW_VLAN_CTAG_RX |
10981 NETIF_F_HW_VLAN_CTAG_TX;
10982
10983 netdev->priv_flags |= IFF_UNICAST_FLT;
10984 netdev->priv_flags |= IFF_SUPP_NOFCS;
10985
10986
10987 netdev->min_mtu = ETH_MIN_MTU;
10988 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10989
10990#ifdef CONFIG_IXGBE_DCB
10991 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10992 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10993#endif
10994
10995#ifdef IXGBE_FCOE
10996 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10997 unsigned int fcoe_l;
10998
10999 if (hw->mac.ops.get_device_caps) {
11000 hw->mac.ops.get_device_caps(hw, &device_caps);
11001 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
11002 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
11003 }
11004
11005
11006 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
11007 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
11008
11009 netdev->features |= NETIF_F_FSO |
11010 NETIF_F_FCOE_CRC;
11011
11012 netdev->vlan_features |= NETIF_F_FSO |
11013 NETIF_F_FCOE_CRC |
11014 NETIF_F_FCOE_MTU;
11015 }
11016#endif
11017 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
11018 netdev->hw_features |= NETIF_F_LRO;
11019 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
11020 netdev->features |= NETIF_F_LRO;
11021
11022 if (ixgbe_check_fw_error(adapter)) {
11023 err = -EIO;
11024 goto err_sw_init;
11025 }
11026
11027
11028 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
11029 e_dev_err("The EEPROM Checksum Is Not Valid\n");
11030 err = -EIO;
11031 goto err_sw_init;
11032 }
11033
11034 eth_platform_get_mac_address(&adapter->pdev->dev,
11035 adapter->hw.mac.perm_addr);
11036
11037 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
11038
11039 if (!is_valid_ether_addr(netdev->dev_addr)) {
11040 e_dev_err("invalid MAC address\n");
11041 err = -EIO;
11042 goto err_sw_init;
11043 }
11044
11045
11046 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
11047 ixgbe_mac_set_default_filter(adapter);
11048
11049 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
11050
11051 if (ixgbe_removed(hw->hw_addr)) {
11052 err = -EIO;
11053 goto err_sw_init;
11054 }
11055 INIT_WORK(&adapter->service_task, ixgbe_service_task);
11056 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
11057 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
11058
11059 err = ixgbe_init_interrupt_scheme(adapter);
11060 if (err)
11061 goto err_sw_init;
11062
11063 for (i = 0; i < adapter->num_rx_queues; i++)
11064 u64_stats_init(&adapter->rx_ring[i]->syncp);
11065 for (i = 0; i < adapter->num_tx_queues; i++)
11066 u64_stats_init(&adapter->tx_ring[i]->syncp);
11067 for (i = 0; i < adapter->num_xdp_queues; i++)
11068 u64_stats_init(&adapter->xdp_ring[i]->syncp);
11069
11070
11071 adapter->wol = 0;
11072 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
11073 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
11074 pdev->subsystem_device);
11075 if (hw->wol_enabled)
11076 adapter->wol = IXGBE_WUFC_MAG;
11077
11078 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
11079
11080
11081 ixgbe_set_fw_version(adapter);
11082
11083
11084 if (ixgbe_pcie_from_parent(hw))
11085 ixgbe_get_parent_bus_info(adapter);
11086 else
11087 hw->mac.ops.get_bus_info(hw);
11088
11089
11090
11091
11092
11093
11094 switch (hw->mac.type) {
11095 case ixgbe_mac_82598EB:
11096 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
11097 break;
11098 default:
11099 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
11100 break;
11101 }
11102
11103
11104 if (expected_gts > 0)
11105 ixgbe_check_minimum_link(adapter, expected_gts);
11106
11107 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
11108 if (err)
11109 strlcpy(part_str, "Unknown", sizeof(part_str));
11110 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
11111 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
11112 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
11113 part_str);
11114 else
11115 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
11116 hw->mac.type, hw->phy.type, part_str);
11117
11118 e_dev_info("%pM\n", netdev->dev_addr);
11119
11120
11121 err = hw->mac.ops.start_hw(hw);
11122 if (err == IXGBE_ERR_EEPROM_VERSION) {
11123
11124 e_dev_warn("This device is a pre-production adapter/LOM. "
11125 "Please be aware there may be issues associated "
11126 "with your hardware. If you are experiencing "
11127 "problems please contact your Intel or hardware "
11128 "representative who provided you with this "
11129 "hardware.\n");
11130 }
11131 strcpy(netdev->name, "eth%d");
11132 pci_set_drvdata(pdev, adapter);
11133 err = register_netdev(netdev);
11134 if (err)
11135 goto err_register;
11136
11137
11138
11139 if (hw->mac.ops.disable_tx_laser)
11140 hw->mac.ops.disable_tx_laser(hw);
11141
11142
11143 netif_carrier_off(netdev);
11144
11145#ifdef CONFIG_IXGBE_DCA
11146 if (dca_add_requester(&pdev->dev) == 0) {
11147 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
11148 ixgbe_setup_dca(adapter);
11149 }
11150#endif
11151 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
11152 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
11153 for (i = 0; i < adapter->num_vfs; i++)
11154 ixgbe_vf_configuration(pdev, (i | 0x10000000));
11155 }
11156
11157
11158
11159
11160 if (hw->mac.ops.set_fw_drv_ver)
11161 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
11162 sizeof(ixgbe_driver_version) - 1,
11163 ixgbe_driver_version);
11164
11165
11166 ixgbe_add_sanmac_netdev(netdev);
11167
11168 e_dev_info("%s\n", ixgbe_default_device_descr);
11169
11170#ifdef CONFIG_IXGBE_HWMON
11171 if (ixgbe_sysfs_init(adapter))
11172 e_err(probe, "failed to allocate sysfs resources\n");
11173#endif
11174
11175 ixgbe_dbg_adapter_init(adapter);
11176
11177
11178 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
11179 hw->mac.ops.setup_link(hw,
11180 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
11181 true);
11182
11183 ixgbe_mii_bus_init(hw);
11184
11185 return 0;
11186
11187err_register:
11188 ixgbe_release_hw_control(adapter);
11189 ixgbe_clear_interrupt_scheme(adapter);
11190err_sw_init:
11191 ixgbe_disable_sriov(adapter);
11192 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
11193 iounmap(adapter->io_addr);
11194 kfree(adapter->jump_tables[0]);
11195 kfree(adapter->mac_table);
11196 kfree(adapter->rss_key);
11197 bitmap_free(adapter->af_xdp_zc_qps);
11198err_ioremap:
11199 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11200 free_netdev(netdev);
11201err_alloc_etherdev:
11202 pci_release_mem_regions(pdev);
11203err_pci_reg:
11204err_dma:
11205 if (!adapter || disable_dev)
11206 pci_disable_device(pdev);
11207 return err;
11208}
11209
11210
11211
11212
11213
11214
11215
11216
11217
11218
11219static void ixgbe_remove(struct pci_dev *pdev)
11220{
11221 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11222 struct net_device *netdev;
11223 bool disable_dev;
11224 int i;
11225
11226
11227 if (!adapter)
11228 return;
11229
11230 netdev = adapter->netdev;
11231 ixgbe_dbg_adapter_exit(adapter);
11232
11233 set_bit(__IXGBE_REMOVING, &adapter->state);
11234 cancel_work_sync(&adapter->service_task);
11235
11236 if (adapter->mii_bus)
11237 mdiobus_unregister(adapter->mii_bus);
11238
11239#ifdef CONFIG_IXGBE_DCA
11240 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
11241 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
11242 dca_remove_requester(&pdev->dev);
11243 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
11244 IXGBE_DCA_CTRL_DCA_DISABLE);
11245 }
11246
11247#endif
11248#ifdef CONFIG_IXGBE_HWMON
11249 ixgbe_sysfs_exit(adapter);
11250#endif
11251
11252
11253 ixgbe_del_sanmac_netdev(netdev);
11254
11255#ifdef CONFIG_PCI_IOV
11256 ixgbe_disable_sriov(adapter);
11257#endif
11258 if (netdev->reg_state == NETREG_REGISTERED)
11259 unregister_netdev(netdev);
11260
11261 ixgbe_stop_ipsec_offload(adapter);
11262 ixgbe_clear_interrupt_scheme(adapter);
11263
11264 ixgbe_release_hw_control(adapter);
11265
11266#ifdef CONFIG_DCB
11267 kfree(adapter->ixgbe_ieee_pfc);
11268 kfree(adapter->ixgbe_ieee_ets);
11269
11270#endif
11271 iounmap(adapter->io_addr);
11272 pci_release_mem_regions(pdev);
11273
11274 e_dev_info("complete\n");
11275
11276 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
11277 if (adapter->jump_tables[i]) {
11278 kfree(adapter->jump_tables[i]->input);
11279 kfree(adapter->jump_tables[i]->mask);
11280 }
11281 kfree(adapter->jump_tables[i]);
11282 }
11283
11284 kfree(adapter->mac_table);
11285 kfree(adapter->rss_key);
11286 bitmap_free(adapter->af_xdp_zc_qps);
11287 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
11288 free_netdev(netdev);
11289
11290 pci_disable_pcie_error_reporting(pdev);
11291
11292 if (disable_dev)
11293 pci_disable_device(pdev);
11294}
11295
11296
11297
11298
11299
11300
11301
11302
11303
11304static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
11305 pci_channel_state_t state)
11306{
11307 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11308 struct net_device *netdev = adapter->netdev;
11309
11310#ifdef CONFIG_PCI_IOV
11311 struct ixgbe_hw *hw = &adapter->hw;
11312 struct pci_dev *bdev, *vfdev;
11313 u32 dw0, dw1, dw2, dw3;
11314 int vf, pos;
11315 u16 req_id, pf_func;
11316
11317 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
11318 adapter->num_vfs == 0)
11319 goto skip_bad_vf_detection;
11320
11321 bdev = pdev->bus->self;
11322 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
11323 bdev = bdev->bus->self;
11324
11325 if (!bdev)
11326 goto skip_bad_vf_detection;
11327
11328 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
11329 if (!pos)
11330 goto skip_bad_vf_detection;
11331
11332 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
11333 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
11334 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
11335 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
11336 if (ixgbe_removed(hw->hw_addr))
11337 goto skip_bad_vf_detection;
11338
11339 req_id = dw1 >> 16;
11340
11341 if (!(req_id & 0x0080))
11342 goto skip_bad_vf_detection;
11343
11344 pf_func = req_id & 0x01;
11345 if ((pf_func & 1) == (pdev->devfn & 1)) {
11346 unsigned int device_id;
11347
11348 vf = (req_id & 0x7F) >> 1;
11349 e_dev_err("VF %d has caused a PCIe error\n", vf);
11350 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
11351 "%8.8x\tdw3: %8.8x\n",
11352 dw0, dw1, dw2, dw3);
11353 switch (adapter->hw.mac.type) {
11354 case ixgbe_mac_82599EB:
11355 device_id = IXGBE_82599_VF_DEVICE_ID;
11356 break;
11357 case ixgbe_mac_X540:
11358 device_id = IXGBE_X540_VF_DEVICE_ID;
11359 break;
11360 case ixgbe_mac_X550:
11361 device_id = IXGBE_DEV_ID_X550_VF;
11362 break;
11363 case ixgbe_mac_X550EM_x:
11364 device_id = IXGBE_DEV_ID_X550EM_X_VF;
11365 break;
11366 case ixgbe_mac_x550em_a:
11367 device_id = IXGBE_DEV_ID_X550EM_A_VF;
11368 break;
11369 default:
11370 device_id = 0;
11371 break;
11372 }
11373
11374
11375 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
11376 while (vfdev) {
11377 if (vfdev->devfn == (req_id & 0xFF))
11378 break;
11379 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
11380 device_id, vfdev);
11381 }
11382
11383
11384
11385
11386
11387 if (vfdev) {
11388 pcie_flr(vfdev);
11389
11390 pci_dev_put(vfdev);
11391 }
11392 }
11393
11394
11395
11396
11397
11398
11399
11400 adapter->vferr_refcount++;
11401
11402 return PCI_ERS_RESULT_RECOVERED;
11403
11404skip_bad_vf_detection:
11405#endif
11406 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
11407 return PCI_ERS_RESULT_DISCONNECT;
11408
11409 if (!netif_device_present(netdev))
11410 return PCI_ERS_RESULT_DISCONNECT;
11411
11412 rtnl_lock();
11413 netif_device_detach(netdev);
11414
11415 if (netif_running(netdev))
11416 ixgbe_close_suspend(adapter);
11417
11418 if (state == pci_channel_io_perm_failure) {
11419 rtnl_unlock();
11420 return PCI_ERS_RESULT_DISCONNECT;
11421 }
11422
11423 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
11424 pci_disable_device(pdev);
11425 rtnl_unlock();
11426
11427
11428 return PCI_ERS_RESULT_NEED_RESET;
11429}
11430
11431
11432
11433
11434
11435
11436
11437static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
11438{
11439 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11440 pci_ers_result_t result;
11441
11442 if (pci_enable_device_mem(pdev)) {
11443 e_err(probe, "Cannot re-enable PCI device after reset.\n");
11444 result = PCI_ERS_RESULT_DISCONNECT;
11445 } else {
11446 smp_mb__before_atomic();
11447 clear_bit(__IXGBE_DISABLED, &adapter->state);
11448 adapter->hw.hw_addr = adapter->io_addr;
11449 pci_set_master(pdev);
11450 pci_restore_state(pdev);
11451 pci_save_state(pdev);
11452
11453 pci_wake_from_d3(pdev, false);
11454
11455 ixgbe_reset(adapter);
11456 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
11457 result = PCI_ERS_RESULT_RECOVERED;
11458 }
11459
11460 return result;
11461}
11462
11463
11464
11465
11466
11467
11468
11469
11470static void ixgbe_io_resume(struct pci_dev *pdev)
11471{
11472 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
11473 struct net_device *netdev = adapter->netdev;
11474
11475#ifdef CONFIG_PCI_IOV
11476 if (adapter->vferr_refcount) {
11477 e_info(drv, "Resuming after VF err\n");
11478 adapter->vferr_refcount--;
11479 return;
11480 }
11481
11482#endif
11483 rtnl_lock();
11484 if (netif_running(netdev))
11485 ixgbe_open(netdev);
11486
11487 netif_device_attach(netdev);
11488 rtnl_unlock();
11489}
11490
11491static const struct pci_error_handlers ixgbe_err_handler = {
11492 .error_detected = ixgbe_io_error_detected,
11493 .slot_reset = ixgbe_io_slot_reset,
11494 .resume = ixgbe_io_resume,
11495};
11496
11497static struct pci_driver ixgbe_driver = {
11498 .name = ixgbe_driver_name,
11499 .id_table = ixgbe_pci_tbl,
11500 .probe = ixgbe_probe,
11501 .remove = ixgbe_remove,
11502#ifdef CONFIG_PM
11503 .suspend = ixgbe_suspend,
11504 .resume = ixgbe_resume,
11505#endif
11506 .shutdown = ixgbe_shutdown,
11507 .sriov_configure = ixgbe_pci_sriov_configure,
11508 .err_handler = &ixgbe_err_handler
11509};
11510
11511
11512
11513
11514
11515
11516
11517static int __init ixgbe_init_module(void)
11518{
11519 int ret;
11520 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
11521 pr_info("%s\n", ixgbe_copyright);
11522
11523 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11524 if (!ixgbe_wq) {
11525 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11526 return -ENOMEM;
11527 }
11528
11529 ixgbe_dbg_init();
11530
11531 ret = pci_register_driver(&ixgbe_driver);
11532 if (ret) {
11533 destroy_workqueue(ixgbe_wq);
11534 ixgbe_dbg_exit();
11535 return ret;
11536 }
11537
11538#ifdef CONFIG_IXGBE_DCA
11539 dca_register_notify(&dca_notifier);
11540#endif
11541
11542 return 0;
11543}
11544
11545module_init(ixgbe_init_module);
11546
11547
11548
11549
11550
11551
11552
11553static void __exit ixgbe_exit_module(void)
11554{
11555#ifdef CONFIG_IXGBE_DCA
11556 dca_unregister_notify(&dca_notifier);
11557#endif
11558 pci_unregister_driver(&ixgbe_driver);
11559
11560 ixgbe_dbg_exit();
11561 if (ixgbe_wq) {
11562 destroy_workqueue(ixgbe_wq);
11563 ixgbe_wq = NULL;
11564 }
11565}
11566
11567#ifdef CONFIG_IXGBE_DCA
11568static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11569 void *p)
11570{
11571 int ret_val;
11572
11573 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11574 __ixgbe_notify_dca);
11575
11576 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11577}
11578
11579#endif
11580
11581module_exit(ixgbe_exit_module);
11582
11583
11584