1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/interrupt.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/sctp.h>
40#include <linux/pkt_sched.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
46#include <linux/ethtool.h>
47#include <linux/if.h>
48#include <linux/if_vlan.h>
49#include <linux/if_macvlan.h>
50#include <linux/if_bridge.h>
51#include <linux/prefetch.h>
52#include <linux/bpf.h>
53#include <linux/bpf_trace.h>
54#include <linux/atomic.h>
55#include <scsi/fc/fc_fcoe.h>
56#include <net/udp_tunnel.h>
57#include <net/pkt_cls.h>
58#include <net/tc_act/tc_gact.h>
59#include <net/tc_act/tc_mirred.h>
60#include <net/vxlan.h>
61#include <net/mpls.h>
62
63#include "ixgbe.h"
64#include "ixgbe_common.h"
65#include "ixgbe_dcb_82599.h"
66#include "ixgbe_sriov.h"
67#include "ixgbe_model.h"
68
69char ixgbe_driver_name[] = "ixgbe";
70static const char ixgbe_driver_string[] =
71 "Intel(R) 10 Gigabit PCI Express Network Driver";
72#ifdef IXGBE_FCOE
73char ixgbe_default_device_descr[] =
74 "Intel(R) 10 Gigabit Network Connection";
75#else
76static char ixgbe_default_device_descr[] =
77 "Intel(R) 10 Gigabit Network Connection";
78#endif
79#define DRV_VERSION "5.1.0-k"
80const char ixgbe_driver_version[] = DRV_VERSION;
81static const char ixgbe_copyright[] =
82 "Copyright (c) 1999-2016 Intel Corporation.";
83
84static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
85
86static const struct ixgbe_info *ixgbe_info_tbl[] = {
87 [board_82598] = &ixgbe_82598_info,
88 [board_82599] = &ixgbe_82599_info,
89 [board_X540] = &ixgbe_X540_info,
90 [board_X550] = &ixgbe_X550_info,
91 [board_X550EM_x] = &ixgbe_X550EM_x_info,
92 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
93 [board_x550em_a] = &ixgbe_x550em_a_info,
94 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
95};
96
97
98
99
100
101
102
103
104
105static const struct pci_device_id ixgbe_pci_tbl[] = {
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
140 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
141 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
142 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
143 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
144 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
145 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
146 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
147 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
148 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
149 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
150 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
151 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
152 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
153
154 {0, }
155};
156MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
157
158#ifdef CONFIG_IXGBE_DCA
159static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
160 void *p);
161static struct notifier_block dca_notifier = {
162 .notifier_call = ixgbe_notify_dca,
163 .next = NULL,
164 .priority = 0
165};
166#endif
167
168#ifdef CONFIG_PCI_IOV
169static unsigned int max_vfs;
170module_param(max_vfs, uint, 0);
171MODULE_PARM_DESC(max_vfs,
172 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
173#endif
174
175static unsigned int allow_unsupported_sfp;
176module_param(allow_unsupported_sfp, uint, 0);
177MODULE_PARM_DESC(allow_unsupported_sfp,
178 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
179
180#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
181static int debug = -1;
182module_param(debug, int, 0);
183MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
184
185MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
186MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
187MODULE_LICENSE("GPL");
188MODULE_VERSION(DRV_VERSION);
189
190static struct workqueue_struct *ixgbe_wq;
191
192static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
193static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
194
195static const struct net_device_ops ixgbe_netdev_ops;
196
197static bool netif_is_ixgbe(struct net_device *dev)
198{
199 return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
200}
201
202static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
203 u32 reg, u16 *value)
204{
205 struct pci_dev *parent_dev;
206 struct pci_bus *parent_bus;
207
208 parent_bus = adapter->pdev->bus->parent;
209 if (!parent_bus)
210 return -1;
211
212 parent_dev = parent_bus->self;
213 if (!parent_dev)
214 return -1;
215
216 if (!pci_is_pcie(parent_dev))
217 return -1;
218
219 pcie_capability_read_word(parent_dev, reg, value);
220 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
221 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
222 return -1;
223 return 0;
224}
225
226static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
227{
228 struct ixgbe_hw *hw = &adapter->hw;
229 u16 link_status = 0;
230 int err;
231
232 hw->bus.type = ixgbe_bus_type_pci_express;
233
234
235
236
237 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
238
239
240 if (err)
241 return err;
242
243 hw->bus.width = ixgbe_convert_bus_width(link_status);
244 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
245
246 return 0;
247}
248
249
250
251
252
253
254
255
256
257
258static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
259{
260 switch (hw->device_id) {
261 case IXGBE_DEV_ID_82599_SFP_SF_QP:
262 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
263 return true;
264 default:
265 return false;
266 }
267}
268
269static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
270 int expected_gts)
271{
272 struct ixgbe_hw *hw = &adapter->hw;
273 int max_gts = 0;
274 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
275 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
276 struct pci_dev *pdev;
277
278
279
280
281
282 if (hw->bus.type == ixgbe_bus_type_internal)
283 return;
284
285
286 if (ixgbe_pcie_from_parent(&adapter->hw))
287 pdev = adapter->pdev->bus->parent->self;
288 else
289 pdev = adapter->pdev;
290
291 if (pcie_get_minimum_link(pdev, &speed, &width) ||
292 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
293 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
294 return;
295 }
296
297 switch (speed) {
298 case PCIE_SPEED_2_5GT:
299
300 max_gts = 2 * width;
301 break;
302 case PCIE_SPEED_5_0GT:
303
304 max_gts = 4 * width;
305 break;
306 case PCIE_SPEED_8_0GT:
307
308 max_gts = 8 * width;
309 break;
310 default:
311 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
312 return;
313 }
314
315 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
316 max_gts);
317 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
318 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
319 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
320 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
321 "Unknown"),
322 width,
323 (speed == PCIE_SPEED_2_5GT ? "20%" :
324 speed == PCIE_SPEED_5_0GT ? "20%" :
325 speed == PCIE_SPEED_8_0GT ? "<2%" :
326 "Unknown"));
327
328 if (max_gts < expected_gts) {
329 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
330 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
331 expected_gts);
332 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
333 }
334}
335
336static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
337{
338 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
339 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
340 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
341 queue_work(ixgbe_wq, &adapter->service_task);
342}
343
344static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
345{
346 struct ixgbe_adapter *adapter = hw->back;
347
348 if (!hw->hw_addr)
349 return;
350 hw->hw_addr = NULL;
351 e_dev_err("Adapter removed\n");
352 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
353 ixgbe_service_event_schedule(adapter);
354}
355
356static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
357{
358 u32 value;
359
360
361
362
363
364
365
366 if (reg == IXGBE_STATUS) {
367 ixgbe_remove_adapter(hw);
368 return;
369 }
370 value = ixgbe_read_reg(hw, IXGBE_STATUS);
371 if (value == IXGBE_FAILED_READ_REG)
372 ixgbe_remove_adapter(hw);
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
389{
390 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
391 u32 value;
392
393 if (ixgbe_removed(reg_addr))
394 return IXGBE_FAILED_READ_REG;
395 if (unlikely(hw->phy.nw_mng_if_sel &
396 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
397 struct ixgbe_adapter *adapter;
398 int i;
399
400 for (i = 0; i < 200; ++i) {
401 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
402 if (likely(!value))
403 goto writes_completed;
404 if (value == IXGBE_FAILED_READ_REG) {
405 ixgbe_remove_adapter(hw);
406 return IXGBE_FAILED_READ_REG;
407 }
408 udelay(5);
409 }
410
411 adapter = hw->back;
412 e_warn(hw, "register writes incomplete %08x\n", value);
413 }
414
415writes_completed:
416 value = readl(reg_addr + reg);
417 if (unlikely(value == IXGBE_FAILED_READ_REG))
418 ixgbe_check_remove(hw, reg);
419 return value;
420}
421
422static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
423{
424 u16 value;
425
426 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
427 if (value == IXGBE_FAILED_READ_CFG_WORD) {
428 ixgbe_remove_adapter(hw);
429 return true;
430 }
431 return false;
432}
433
434u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
435{
436 struct ixgbe_adapter *adapter = hw->back;
437 u16 value;
438
439 if (ixgbe_removed(hw->hw_addr))
440 return IXGBE_FAILED_READ_CFG_WORD;
441 pci_read_config_word(adapter->pdev, reg, &value);
442 if (value == IXGBE_FAILED_READ_CFG_WORD &&
443 ixgbe_check_cfg_remove(hw, adapter->pdev))
444 return IXGBE_FAILED_READ_CFG_WORD;
445 return value;
446}
447
448#ifdef CONFIG_PCI_IOV
449static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
450{
451 struct ixgbe_adapter *adapter = hw->back;
452 u32 value;
453
454 if (ixgbe_removed(hw->hw_addr))
455 return IXGBE_FAILED_READ_CFG_DWORD;
456 pci_read_config_dword(adapter->pdev, reg, &value);
457 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
458 ixgbe_check_cfg_remove(hw, adapter->pdev))
459 return IXGBE_FAILED_READ_CFG_DWORD;
460 return value;
461}
462#endif
463
464void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
465{
466 struct ixgbe_adapter *adapter = hw->back;
467
468 if (ixgbe_removed(hw->hw_addr))
469 return;
470 pci_write_config_word(adapter->pdev, reg, value);
471}
472
473static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
474{
475 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
476
477
478 smp_mb__before_atomic();
479 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
480}
481
482struct ixgbe_reg_info {
483 u32 ofs;
484 char *name;
485};
486
487static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
488
489
490 {IXGBE_CTRL, "CTRL"},
491 {IXGBE_STATUS, "STATUS"},
492 {IXGBE_CTRL_EXT, "CTRL_EXT"},
493
494
495 {IXGBE_EICR, "EICR"},
496
497
498 {IXGBE_SRRCTL(0), "SRRCTL"},
499 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
500 {IXGBE_RDLEN(0), "RDLEN"},
501 {IXGBE_RDH(0), "RDH"},
502 {IXGBE_RDT(0), "RDT"},
503 {IXGBE_RXDCTL(0), "RXDCTL"},
504 {IXGBE_RDBAL(0), "RDBAL"},
505 {IXGBE_RDBAH(0), "RDBAH"},
506
507
508 {IXGBE_TDBAL(0), "TDBAL"},
509 {IXGBE_TDBAH(0), "TDBAH"},
510 {IXGBE_TDLEN(0), "TDLEN"},
511 {IXGBE_TDH(0), "TDH"},
512 {IXGBE_TDT(0), "TDT"},
513 {IXGBE_TXDCTL(0), "TXDCTL"},
514
515
516 { .name = NULL }
517};
518
519
520
521
522
523static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
524{
525 int i;
526 char rname[16];
527 u32 regs[64];
528
529 switch (reginfo->ofs) {
530 case IXGBE_SRRCTL(0):
531 for (i = 0; i < 64; i++)
532 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
533 break;
534 case IXGBE_DCA_RXCTRL(0):
535 for (i = 0; i < 64; i++)
536 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
537 break;
538 case IXGBE_RDLEN(0):
539 for (i = 0; i < 64; i++)
540 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
541 break;
542 case IXGBE_RDH(0):
543 for (i = 0; i < 64; i++)
544 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
545 break;
546 case IXGBE_RDT(0):
547 for (i = 0; i < 64; i++)
548 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
549 break;
550 case IXGBE_RXDCTL(0):
551 for (i = 0; i < 64; i++)
552 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
553 break;
554 case IXGBE_RDBAL(0):
555 for (i = 0; i < 64; i++)
556 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
557 break;
558 case IXGBE_RDBAH(0):
559 for (i = 0; i < 64; i++)
560 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
561 break;
562 case IXGBE_TDBAL(0):
563 for (i = 0; i < 64; i++)
564 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
565 break;
566 case IXGBE_TDBAH(0):
567 for (i = 0; i < 64; i++)
568 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
569 break;
570 case IXGBE_TDLEN(0):
571 for (i = 0; i < 64; i++)
572 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
573 break;
574 case IXGBE_TDH(0):
575 for (i = 0; i < 64; i++)
576 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
577 break;
578 case IXGBE_TDT(0):
579 for (i = 0; i < 64; i++)
580 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
581 break;
582 case IXGBE_TXDCTL(0):
583 for (i = 0; i < 64; i++)
584 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
585 break;
586 default:
587 pr_info("%-15s %08x\n",
588 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
589 return;
590 }
591
592 i = 0;
593 while (i < 64) {
594 int j;
595 char buf[9 * 8 + 1];
596 char *p = buf;
597
598 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
599 for (j = 0; j < 8; j++)
600 p += sprintf(p, " %08x", regs[i++]);
601 pr_err("%-15s%s\n", rname, buf);
602 }
603
604}
605
606static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
607{
608 struct ixgbe_tx_buffer *tx_buffer;
609
610 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
611 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
612 n, ring->next_to_use, ring->next_to_clean,
613 (u64)dma_unmap_addr(tx_buffer, dma),
614 dma_unmap_len(tx_buffer, len),
615 tx_buffer->next_to_watch,
616 (u64)tx_buffer->time_stamp);
617}
618
619
620
621
622static void ixgbe_dump(struct ixgbe_adapter *adapter)
623{
624 struct net_device *netdev = adapter->netdev;
625 struct ixgbe_hw *hw = &adapter->hw;
626 struct ixgbe_reg_info *reginfo;
627 int n = 0;
628 struct ixgbe_ring *ring;
629 struct ixgbe_tx_buffer *tx_buffer;
630 union ixgbe_adv_tx_desc *tx_desc;
631 struct my_u0 { u64 a; u64 b; } *u0;
632 struct ixgbe_ring *rx_ring;
633 union ixgbe_adv_rx_desc *rx_desc;
634 struct ixgbe_rx_buffer *rx_buffer_info;
635 int i = 0;
636
637 if (!netif_msg_hw(adapter))
638 return;
639
640
641 if (netdev) {
642 dev_info(&adapter->pdev->dev, "Net device Info\n");
643 pr_info("Device Name state "
644 "trans_start\n");
645 pr_info("%-15s %016lX %016lX\n",
646 netdev->name,
647 netdev->state,
648 dev_trans_start(netdev));
649 }
650
651
652 dev_info(&adapter->pdev->dev, "Register Dump\n");
653 pr_info(" Register Name Value\n");
654 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
655 reginfo->name; reginfo++) {
656 ixgbe_regdump(hw, reginfo);
657 }
658
659
660 if (!netdev || !netif_running(netdev))
661 return;
662
663 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
664 pr_info(" %s %s %s %s\n",
665 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
666 "leng", "ntw", "timestamp");
667 for (n = 0; n < adapter->num_tx_queues; n++) {
668 ring = adapter->tx_ring[n];
669 ixgbe_print_buffer(ring, n);
670 }
671
672 for (n = 0; n < adapter->num_xdp_queues; n++) {
673 ring = adapter->xdp_ring[n];
674 ixgbe_print_buffer(ring, n);
675 }
676
677
678 if (!netif_msg_tx_done(adapter))
679 goto rx_ring_summary;
680
681 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718 for (n = 0; n < adapter->num_tx_queues; n++) {
719 ring = adapter->tx_ring[n];
720 pr_info("------------------------------------\n");
721 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
722 pr_info("------------------------------------\n");
723 pr_info("%s%s %s %s %s %s\n",
724 "T [desc] [address 63:0 ] ",
725 "[PlPOIdStDDt Ln] [bi->dma ] ",
726 "leng", "ntw", "timestamp", "bi->skb");
727
728 for (i = 0; ring->desc && (i < ring->count); i++) {
729 tx_desc = IXGBE_TX_DESC(ring, i);
730 tx_buffer = &ring->tx_buffer_info[i];
731 u0 = (struct my_u0 *)tx_desc;
732 if (dma_unmap_len(tx_buffer, len) > 0) {
733 const char *ring_desc;
734
735 if (i == ring->next_to_use &&
736 i == ring->next_to_clean)
737 ring_desc = " NTC/U";
738 else if (i == ring->next_to_use)
739 ring_desc = " NTU";
740 else if (i == ring->next_to_clean)
741 ring_desc = " NTC";
742 else
743 ring_desc = "";
744 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
745 i,
746 le64_to_cpu(u0->a),
747 le64_to_cpu(u0->b),
748 (u64)dma_unmap_addr(tx_buffer, dma),
749 dma_unmap_len(tx_buffer, len),
750 tx_buffer->next_to_watch,
751 (u64)tx_buffer->time_stamp,
752 tx_buffer->skb,
753 ring_desc);
754
755 if (netif_msg_pktdata(adapter) &&
756 tx_buffer->skb)
757 print_hex_dump(KERN_INFO, "",
758 DUMP_PREFIX_ADDRESS, 16, 1,
759 tx_buffer->skb->data,
760 dma_unmap_len(tx_buffer, len),
761 true);
762 }
763 }
764 }
765
766
767rx_ring_summary:
768 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
769 pr_info("Queue [NTU] [NTC]\n");
770 for (n = 0; n < adapter->num_rx_queues; n++) {
771 rx_ring = adapter->rx_ring[n];
772 pr_info("%5d %5X %5X\n",
773 n, rx_ring->next_to_use, rx_ring->next_to_clean);
774 }
775
776
777 if (!netif_msg_rx_status(adapter))
778 return;
779
780 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827 for (n = 0; n < adapter->num_rx_queues; n++) {
828 rx_ring = adapter->rx_ring[n];
829 pr_info("------------------------------------\n");
830 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
831 pr_info("------------------------------------\n");
832 pr_info("%s%s%s\n",
833 "R [desc] [ PktBuf A0] ",
834 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
835 "<-- Adv Rx Read format");
836 pr_info("%s%s%s\n",
837 "RWB[desc] [PcsmIpSHl PtRs] ",
838 "[vl er S cks ln] ---------------- [bi->skb ] ",
839 "<-- Adv Rx Write-Back format");
840
841 for (i = 0; i < rx_ring->count; i++) {
842 const char *ring_desc;
843
844 if (i == rx_ring->next_to_use)
845 ring_desc = " NTU";
846 else if (i == rx_ring->next_to_clean)
847 ring_desc = " NTC";
848 else
849 ring_desc = "";
850
851 rx_buffer_info = &rx_ring->rx_buffer_info[i];
852 rx_desc = IXGBE_RX_DESC(rx_ring, i);
853 u0 = (struct my_u0 *)rx_desc;
854 if (rx_desc->wb.upper.length) {
855
856 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
857 i,
858 le64_to_cpu(u0->a),
859 le64_to_cpu(u0->b),
860 rx_buffer_info->skb,
861 ring_desc);
862 } else {
863 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
864 i,
865 le64_to_cpu(u0->a),
866 le64_to_cpu(u0->b),
867 (u64)rx_buffer_info->dma,
868 rx_buffer_info->skb,
869 ring_desc);
870
871 if (netif_msg_pktdata(adapter) &&
872 rx_buffer_info->dma) {
873 print_hex_dump(KERN_INFO, "",
874 DUMP_PREFIX_ADDRESS, 16, 1,
875 page_address(rx_buffer_info->page) +
876 rx_buffer_info->page_offset,
877 ixgbe_rx_bufsz(rx_ring), true);
878 }
879 }
880 }
881 }
882}
883
884static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
885{
886 u32 ctrl_ext;
887
888
889 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
890 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
891 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
892}
893
894static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
895{
896 u32 ctrl_ext;
897
898
899 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
900 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
901 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
902}
903
904
905
906
907
908
909
910
911
912static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
913 u8 queue, u8 msix_vector)
914{
915 u32 ivar, index;
916 struct ixgbe_hw *hw = &adapter->hw;
917 switch (hw->mac.type) {
918 case ixgbe_mac_82598EB:
919 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
920 if (direction == -1)
921 direction = 0;
922 index = (((direction * 64) + queue) >> 2) & 0x1F;
923 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
924 ivar &= ~(0xFF << (8 * (queue & 0x3)));
925 ivar |= (msix_vector << (8 * (queue & 0x3)));
926 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
927 break;
928 case ixgbe_mac_82599EB:
929 case ixgbe_mac_X540:
930 case ixgbe_mac_X550:
931 case ixgbe_mac_X550EM_x:
932 case ixgbe_mac_x550em_a:
933 if (direction == -1) {
934
935 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
936 index = ((queue & 1) * 8);
937 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
938 ivar &= ~(0xFF << index);
939 ivar |= (msix_vector << index);
940 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
941 break;
942 } else {
943
944 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
945 index = ((16 * (queue & 1)) + (8 * direction));
946 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
947 ivar &= ~(0xFF << index);
948 ivar |= (msix_vector << index);
949 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
950 break;
951 }
952 default:
953 break;
954 }
955}
956
957static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
958 u64 qmask)
959{
960 u32 mask;
961
962 switch (adapter->hw.mac.type) {
963 case ixgbe_mac_82598EB:
964 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
965 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
966 break;
967 case ixgbe_mac_82599EB:
968 case ixgbe_mac_X540:
969 case ixgbe_mac_X550:
970 case ixgbe_mac_X550EM_x:
971 case ixgbe_mac_x550em_a:
972 mask = (qmask & 0xFFFFFFFF);
973 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
974 mask = (qmask >> 32);
975 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
976 break;
977 default:
978 break;
979 }
980}
981
982static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
983{
984 struct ixgbe_hw *hw = &adapter->hw;
985 struct ixgbe_hw_stats *hwstats = &adapter->stats;
986 int i;
987 u32 data;
988
989 if ((hw->fc.current_mode != ixgbe_fc_full) &&
990 (hw->fc.current_mode != ixgbe_fc_rx_pause))
991 return;
992
993 switch (hw->mac.type) {
994 case ixgbe_mac_82598EB:
995 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
996 break;
997 default:
998 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
999 }
1000 hwstats->lxoffrxc += data;
1001
1002
1003 if (!data)
1004 return;
1005
1006 for (i = 0; i < adapter->num_tx_queues; i++)
1007 clear_bit(__IXGBE_HANG_CHECK_ARMED,
1008 &adapter->tx_ring[i]->state);
1009
1010 for (i = 0; i < adapter->num_xdp_queues; i++)
1011 clear_bit(__IXGBE_HANG_CHECK_ARMED,
1012 &adapter->xdp_ring[i]->state);
1013}
1014
1015static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
1016{
1017 struct ixgbe_hw *hw = &adapter->hw;
1018 struct ixgbe_hw_stats *hwstats = &adapter->stats;
1019 u32 xoff[8] = {0};
1020 u8 tc;
1021 int i;
1022 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
1023
1024 if (adapter->ixgbe_ieee_pfc)
1025 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
1026
1027 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
1028 ixgbe_update_xoff_rx_lfc(adapter);
1029 return;
1030 }
1031
1032
1033 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1034 u32 pxoffrxc;
1035
1036 switch (hw->mac.type) {
1037 case ixgbe_mac_82598EB:
1038 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1039 break;
1040 default:
1041 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1042 }
1043 hwstats->pxoffrxc[i] += pxoffrxc;
1044
1045 tc = netdev_get_prio_tc_map(adapter->netdev, i);
1046 xoff[tc] += pxoffrxc;
1047 }
1048
1049
1050 for (i = 0; i < adapter->num_tx_queues; i++) {
1051 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1052
1053 tc = tx_ring->dcb_tc;
1054 if (xoff[tc])
1055 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1056 }
1057
1058 for (i = 0; i < adapter->num_xdp_queues; i++) {
1059 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1060
1061 tc = xdp_ring->dcb_tc;
1062 if (xoff[tc])
1063 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1064 }
1065}
1066
1067static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1068{
1069 return ring->stats.packets;
1070}
1071
1072static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1073{
1074 unsigned int head, tail;
1075
1076 head = ring->next_to_clean;
1077 tail = ring->next_to_use;
1078
1079 return ((head <= tail) ? tail : tail + ring->count) - head;
1080}
1081
1082static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1083{
1084 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1085 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1086 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1087
1088 clear_check_for_tx_hang(tx_ring);
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 if (tx_done_old == tx_done && tx_pending)
1103
1104 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1105 &tx_ring->state);
1106
1107 tx_ring->tx_stats.tx_done_old = tx_done;
1108
1109 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1110
1111 return false;
1112}
1113
1114
1115
1116
1117
1118static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1119{
1120
1121
1122 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1123 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1124 e_warn(drv, "initiating reset due to tx timeout\n");
1125 ixgbe_service_event_schedule(adapter);
1126 }
1127}
1128
1129
1130
1131
1132
1133
1134
1135static int ixgbe_tx_maxrate(struct net_device *netdev,
1136 int queue_index, u32 maxrate)
1137{
1138 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1139 struct ixgbe_hw *hw = &adapter->hw;
1140 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1141
1142 if (!maxrate)
1143 return 0;
1144
1145
1146 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1147 bcnrc_val /= maxrate;
1148
1149
1150 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1151 IXGBE_RTTBCNRC_RF_DEC_MASK;
1152
1153
1154 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1155
1156 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1157 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1158
1159 return 0;
1160}
1161
1162
1163
1164
1165
1166
1167
1168static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1169 struct ixgbe_ring *tx_ring, int napi_budget)
1170{
1171 struct ixgbe_adapter *adapter = q_vector->adapter;
1172 struct ixgbe_tx_buffer *tx_buffer;
1173 union ixgbe_adv_tx_desc *tx_desc;
1174 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1175 unsigned int budget = q_vector->tx.work_limit;
1176 unsigned int i = tx_ring->next_to_clean;
1177
1178 if (test_bit(__IXGBE_DOWN, &adapter->state))
1179 return true;
1180
1181 tx_buffer = &tx_ring->tx_buffer_info[i];
1182 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1183 i -= tx_ring->count;
1184
1185 do {
1186 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1187
1188
1189 if (!eop_desc)
1190 break;
1191
1192
1193 smp_rmb();
1194
1195
1196 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1197 break;
1198
1199
1200 tx_buffer->next_to_watch = NULL;
1201
1202
1203 total_bytes += tx_buffer->bytecount;
1204 total_packets += tx_buffer->gso_segs;
1205 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1206 total_ipsec++;
1207
1208
1209 if (ring_is_xdp(tx_ring))
1210 page_frag_free(tx_buffer->data);
1211 else
1212 napi_consume_skb(tx_buffer->skb, napi_budget);
1213
1214
1215 dma_unmap_single(tx_ring->dev,
1216 dma_unmap_addr(tx_buffer, dma),
1217 dma_unmap_len(tx_buffer, len),
1218 DMA_TO_DEVICE);
1219
1220
1221 dma_unmap_len_set(tx_buffer, len, 0);
1222
1223
1224 while (tx_desc != eop_desc) {
1225 tx_buffer++;
1226 tx_desc++;
1227 i++;
1228 if (unlikely(!i)) {
1229 i -= tx_ring->count;
1230 tx_buffer = tx_ring->tx_buffer_info;
1231 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1232 }
1233
1234
1235 if (dma_unmap_len(tx_buffer, len)) {
1236 dma_unmap_page(tx_ring->dev,
1237 dma_unmap_addr(tx_buffer, dma),
1238 dma_unmap_len(tx_buffer, len),
1239 DMA_TO_DEVICE);
1240 dma_unmap_len_set(tx_buffer, len, 0);
1241 }
1242 }
1243
1244
1245 tx_buffer++;
1246 tx_desc++;
1247 i++;
1248 if (unlikely(!i)) {
1249 i -= tx_ring->count;
1250 tx_buffer = tx_ring->tx_buffer_info;
1251 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1252 }
1253
1254
1255 prefetch(tx_desc);
1256
1257
1258 budget--;
1259 } while (likely(budget));
1260
1261 i += tx_ring->count;
1262 tx_ring->next_to_clean = i;
1263 u64_stats_update_begin(&tx_ring->syncp);
1264 tx_ring->stats.bytes += total_bytes;
1265 tx_ring->stats.packets += total_packets;
1266 u64_stats_update_end(&tx_ring->syncp);
1267 q_vector->tx.total_bytes += total_bytes;
1268 q_vector->tx.total_packets += total_packets;
1269 adapter->tx_ipsec += total_ipsec;
1270
1271 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1272
1273 struct ixgbe_hw *hw = &adapter->hw;
1274 e_err(drv, "Detected Tx Unit Hang %s\n"
1275 " Tx Queue <%d>\n"
1276 " TDH, TDT <%x>, <%x>\n"
1277 " next_to_use <%x>\n"
1278 " next_to_clean <%x>\n"
1279 "tx_buffer_info[next_to_clean]\n"
1280 " time_stamp <%lx>\n"
1281 " jiffies <%lx>\n",
1282 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1283 tx_ring->queue_index,
1284 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1285 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1286 tx_ring->next_to_use, i,
1287 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1288
1289 if (!ring_is_xdp(tx_ring))
1290 netif_stop_subqueue(tx_ring->netdev,
1291 tx_ring->queue_index);
1292
1293 e_info(probe,
1294 "tx hang %d detected on queue %d, resetting adapter\n",
1295 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1296
1297
1298 ixgbe_tx_timeout_reset(adapter);
1299
1300
1301 return true;
1302 }
1303
1304 if (ring_is_xdp(tx_ring))
1305 return !!budget;
1306
1307 netdev_tx_completed_queue(txring_txq(tx_ring),
1308 total_packets, total_bytes);
1309
1310#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1311 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1312 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1313
1314
1315
1316 smp_mb();
1317 if (__netif_subqueue_stopped(tx_ring->netdev,
1318 tx_ring->queue_index)
1319 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1320 netif_wake_subqueue(tx_ring->netdev,
1321 tx_ring->queue_index);
1322 ++tx_ring->tx_stats.restart_queue;
1323 }
1324 }
1325
1326 return !!budget;
1327}
1328
1329#ifdef CONFIG_IXGBE_DCA
1330static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1331 struct ixgbe_ring *tx_ring,
1332 int cpu)
1333{
1334 struct ixgbe_hw *hw = &adapter->hw;
1335 u32 txctrl = 0;
1336 u16 reg_offset;
1337
1338 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1339 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1340
1341 switch (hw->mac.type) {
1342 case ixgbe_mac_82598EB:
1343 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1344 break;
1345 case ixgbe_mac_82599EB:
1346 case ixgbe_mac_X540:
1347 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1348 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1349 break;
1350 default:
1351
1352 return;
1353 }
1354
1355
1356
1357
1358
1359
1360 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1361 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1362 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1363
1364 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1365}
1366
1367static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1368 struct ixgbe_ring *rx_ring,
1369 int cpu)
1370{
1371 struct ixgbe_hw *hw = &adapter->hw;
1372 u32 rxctrl = 0;
1373 u8 reg_idx = rx_ring->reg_idx;
1374
1375 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1376 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1377
1378 switch (hw->mac.type) {
1379 case ixgbe_mac_82599EB:
1380 case ixgbe_mac_X540:
1381 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1382 break;
1383 default:
1384 break;
1385 }
1386
1387
1388
1389
1390
1391
1392 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1393 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1394 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1395
1396 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1397}
1398
1399static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1400{
1401 struct ixgbe_adapter *adapter = q_vector->adapter;
1402 struct ixgbe_ring *ring;
1403 int cpu = get_cpu();
1404
1405 if (q_vector->cpu == cpu)
1406 goto out_no_update;
1407
1408 ixgbe_for_each_ring(ring, q_vector->tx)
1409 ixgbe_update_tx_dca(adapter, ring, cpu);
1410
1411 ixgbe_for_each_ring(ring, q_vector->rx)
1412 ixgbe_update_rx_dca(adapter, ring, cpu);
1413
1414 q_vector->cpu = cpu;
1415out_no_update:
1416 put_cpu();
1417}
1418
1419static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1420{
1421 int i;
1422
1423
1424 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1425 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1426 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1427 else
1428 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1429 IXGBE_DCA_CTRL_DCA_DISABLE);
1430
1431 for (i = 0; i < adapter->num_q_vectors; i++) {
1432 adapter->q_vector[i]->cpu = -1;
1433 ixgbe_update_dca(adapter->q_vector[i]);
1434 }
1435}
1436
1437static int __ixgbe_notify_dca(struct device *dev, void *data)
1438{
1439 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1440 unsigned long event = *(unsigned long *)data;
1441
1442 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1443 return 0;
1444
1445 switch (event) {
1446 case DCA_PROVIDER_ADD:
1447
1448 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1449 break;
1450 if (dca_add_requester(dev) == 0) {
1451 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1452 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1453 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1454 break;
1455 }
1456
1457 case DCA_PROVIDER_REMOVE:
1458 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1459 dca_remove_requester(dev);
1460 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1461 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1462 IXGBE_DCA_CTRL_DCA_DISABLE);
1463 }
1464 break;
1465 }
1466
1467 return 0;
1468}
1469
1470#endif
1471
1472#define IXGBE_RSS_L4_TYPES_MASK \
1473 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1474 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1475 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1476 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1477
1478static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1479 union ixgbe_adv_rx_desc *rx_desc,
1480 struct sk_buff *skb)
1481{
1482 u16 rss_type;
1483
1484 if (!(ring->netdev->features & NETIF_F_RXHASH))
1485 return;
1486
1487 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1488 IXGBE_RXDADV_RSSTYPE_MASK;
1489
1490 if (!rss_type)
1491 return;
1492
1493 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1494 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1495 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1496}
1497
1498#ifdef IXGBE_FCOE
1499
1500
1501
1502
1503
1504
1505
1506static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1507 union ixgbe_adv_rx_desc *rx_desc)
1508{
1509 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1510
1511 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1512 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1513 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1514 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1515}
1516
1517#endif
1518
1519
1520
1521
1522
1523
1524static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1525 union ixgbe_adv_rx_desc *rx_desc,
1526 struct sk_buff *skb)
1527{
1528 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1529 bool encap_pkt = false;
1530
1531 skb_checksum_none_assert(skb);
1532
1533
1534 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1535 return;
1536
1537
1538 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1539 encap_pkt = true;
1540 skb->encapsulation = 1;
1541 }
1542
1543
1544 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1545 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1546 ring->rx_stats.csum_err++;
1547 return;
1548 }
1549
1550 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1551 return;
1552
1553 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1554
1555
1556
1557
1558 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1559 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1560 return;
1561
1562 ring->rx_stats.csum_err++;
1563 return;
1564 }
1565
1566
1567 skb->ip_summed = CHECKSUM_UNNECESSARY;
1568 if (encap_pkt) {
1569 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1570 return;
1571
1572 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1573 skb->ip_summed = CHECKSUM_NONE;
1574 return;
1575 }
1576
1577 skb->csum_level = 1;
1578 }
1579}
1580
1581static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1582{
1583 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1584}
1585
1586static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1587 struct ixgbe_rx_buffer *bi)
1588{
1589 struct page *page = bi->page;
1590 dma_addr_t dma;
1591
1592
1593 if (likely(page))
1594 return true;
1595
1596
1597 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1598 if (unlikely(!page)) {
1599 rx_ring->rx_stats.alloc_rx_page_failed++;
1600 return false;
1601 }
1602
1603
1604 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1605 ixgbe_rx_pg_size(rx_ring),
1606 DMA_FROM_DEVICE,
1607 IXGBE_RX_DMA_ATTR);
1608
1609
1610
1611
1612
1613 if (dma_mapping_error(rx_ring->dev, dma)) {
1614 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1615
1616 rx_ring->rx_stats.alloc_rx_page_failed++;
1617 return false;
1618 }
1619
1620 bi->dma = dma;
1621 bi->page = page;
1622 bi->page_offset = ixgbe_rx_offset(rx_ring);
1623 bi->pagecnt_bias = 1;
1624 rx_ring->rx_stats.alloc_rx_page++;
1625
1626 return true;
1627}
1628
1629
1630
1631
1632
1633
1634void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1635{
1636 union ixgbe_adv_rx_desc *rx_desc;
1637 struct ixgbe_rx_buffer *bi;
1638 u16 i = rx_ring->next_to_use;
1639 u16 bufsz;
1640
1641
1642 if (!cleaned_count)
1643 return;
1644
1645 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1646 bi = &rx_ring->rx_buffer_info[i];
1647 i -= rx_ring->count;
1648
1649 bufsz = ixgbe_rx_bufsz(rx_ring);
1650
1651 do {
1652 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1653 break;
1654
1655
1656 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1657 bi->page_offset, bufsz,
1658 DMA_FROM_DEVICE);
1659
1660
1661
1662
1663
1664 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1665
1666 rx_desc++;
1667 bi++;
1668 i++;
1669 if (unlikely(!i)) {
1670 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1671 bi = rx_ring->rx_buffer_info;
1672 i -= rx_ring->count;
1673 }
1674
1675
1676 rx_desc->wb.upper.length = 0;
1677
1678 cleaned_count--;
1679 } while (cleaned_count);
1680
1681 i += rx_ring->count;
1682
1683 if (rx_ring->next_to_use != i) {
1684 rx_ring->next_to_use = i;
1685
1686
1687 rx_ring->next_to_alloc = i;
1688
1689
1690
1691
1692
1693
1694 wmb();
1695 writel(i, rx_ring->tail);
1696 }
1697}
1698
1699static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1700 struct sk_buff *skb)
1701{
1702 u16 hdr_len = skb_headlen(skb);
1703
1704
1705 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1706 IXGBE_CB(skb)->append_cnt);
1707 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1708}
1709
1710static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1711 struct sk_buff *skb)
1712{
1713
1714 if (!IXGBE_CB(skb)->append_cnt)
1715 return;
1716
1717 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1718 rx_ring->rx_stats.rsc_flush++;
1719
1720 ixgbe_set_rsc_gso_size(rx_ring, skb);
1721
1722
1723 IXGBE_CB(skb)->append_cnt = 0;
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1737 union ixgbe_adv_rx_desc *rx_desc,
1738 struct sk_buff *skb)
1739{
1740 struct net_device *dev = rx_ring->netdev;
1741 u32 flags = rx_ring->q_vector->adapter->flags;
1742
1743 ixgbe_update_rsc_stats(rx_ring, skb);
1744
1745 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1746
1747 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1748
1749 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1750 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1751
1752 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1753 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1754 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1755 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1756 }
1757
1758 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1759 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1760
1761 skb->protocol = eth_type_trans(skb, dev);
1762
1763
1764 if (netif_is_ixgbe(dev))
1765 skb_record_rx_queue(skb, rx_ring->queue_index);
1766 else
1767 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
1768 (skb->pkt_type == PACKET_BROADCAST) ||
1769 (skb->pkt_type == PACKET_MULTICAST));
1770}
1771
1772static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1773 struct sk_buff *skb)
1774{
1775 napi_gro_receive(&q_vector->napi, skb);
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1790 union ixgbe_adv_rx_desc *rx_desc,
1791 struct sk_buff *skb)
1792{
1793 u32 ntc = rx_ring->next_to_clean + 1;
1794
1795
1796 ntc = (ntc < rx_ring->count) ? ntc : 0;
1797 rx_ring->next_to_clean = ntc;
1798
1799 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1800
1801
1802 if (ring_is_rsc_enabled(rx_ring)) {
1803 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1804 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1805
1806 if (unlikely(rsc_enabled)) {
1807 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1808
1809 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1810 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1811
1812
1813 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1814 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1815 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1816 }
1817 }
1818
1819
1820 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1821 return false;
1822
1823
1824 rx_ring->rx_buffer_info[ntc].skb = skb;
1825 rx_ring->rx_stats.non_eop_descs++;
1826
1827 return true;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1843 struct sk_buff *skb)
1844{
1845 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1846 unsigned char *va;
1847 unsigned int pull_len;
1848
1849
1850
1851
1852
1853
1854 va = skb_frag_address(frag);
1855
1856
1857
1858
1859
1860 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1861
1862
1863 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1864
1865
1866 skb_frag_size_sub(frag, pull_len);
1867 frag->page_offset += pull_len;
1868 skb->data_len -= pull_len;
1869 skb->tail += pull_len;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1883 struct sk_buff *skb)
1884{
1885
1886 if (unlikely(IXGBE_CB(skb)->page_released)) {
1887 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1888 ixgbe_rx_pg_size(rx_ring),
1889 DMA_FROM_DEVICE,
1890 IXGBE_RX_DMA_ATTR);
1891 } else if (ring_uses_build_skb(rx_ring)) {
1892 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1893
1894 dma_sync_single_range_for_cpu(rx_ring->dev,
1895 IXGBE_CB(skb)->dma,
1896 offset,
1897 skb_headlen(skb),
1898 DMA_FROM_DEVICE);
1899 } else {
1900 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1901
1902 dma_sync_single_range_for_cpu(rx_ring->dev,
1903 IXGBE_CB(skb)->dma,
1904 frag->page_offset,
1905 skb_frag_size(frag),
1906 DMA_FROM_DEVICE);
1907 }
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1933 union ixgbe_adv_rx_desc *rx_desc,
1934 struct sk_buff *skb)
1935{
1936 struct net_device *netdev = rx_ring->netdev;
1937
1938
1939 if (IS_ERR(skb))
1940 return true;
1941
1942
1943
1944
1945 if (!netdev ||
1946 (unlikely(ixgbe_test_staterr(rx_desc,
1947 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1948 !(netdev->features & NETIF_F_RXALL)))) {
1949 dev_kfree_skb_any(skb);
1950 return true;
1951 }
1952
1953
1954 if (!skb_headlen(skb))
1955 ixgbe_pull_tail(rx_ring, skb);
1956
1957#ifdef IXGBE_FCOE
1958
1959 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1960 return false;
1961
1962#endif
1963
1964 if (eth_skb_pad(skb))
1965 return true;
1966
1967 return false;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1978 struct ixgbe_rx_buffer *old_buff)
1979{
1980 struct ixgbe_rx_buffer *new_buff;
1981 u16 nta = rx_ring->next_to_alloc;
1982
1983 new_buff = &rx_ring->rx_buffer_info[nta];
1984
1985
1986 nta++;
1987 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1988
1989
1990
1991
1992
1993 new_buff->dma = old_buff->dma;
1994 new_buff->page = old_buff->page;
1995 new_buff->page_offset = old_buff->page_offset;
1996 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1997}
1998
1999static inline bool ixgbe_page_is_reserved(struct page *page)
2000{
2001 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2002}
2003
2004static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
2005{
2006 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
2007 struct page *page = rx_buffer->page;
2008
2009
2010 if (unlikely(ixgbe_page_is_reserved(page)))
2011 return false;
2012
2013#if (PAGE_SIZE < 8192)
2014
2015 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
2016 return false;
2017#else
2018
2019
2020
2021
2022
2023#define IXGBE_LAST_OFFSET \
2024 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
2025 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
2026 return false;
2027#endif
2028
2029
2030
2031
2032
2033 if (unlikely(!pagecnt_bias)) {
2034 page_ref_add(page, USHRT_MAX);
2035 rx_buffer->pagecnt_bias = USHRT_MAX;
2036 }
2037
2038 return true;
2039}
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2057 struct ixgbe_rx_buffer *rx_buffer,
2058 struct sk_buff *skb,
2059 unsigned int size)
2060{
2061#if (PAGE_SIZE < 8192)
2062 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2063#else
2064 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2065 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2066 SKB_DATA_ALIGN(size);
2067#endif
2068 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2069 rx_buffer->page_offset, size, truesize);
2070#if (PAGE_SIZE < 8192)
2071 rx_buffer->page_offset ^= truesize;
2072#else
2073 rx_buffer->page_offset += truesize;
2074#endif
2075}
2076
2077static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2078 union ixgbe_adv_rx_desc *rx_desc,
2079 struct sk_buff **skb,
2080 const unsigned int size)
2081{
2082 struct ixgbe_rx_buffer *rx_buffer;
2083
2084 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2085 prefetchw(rx_buffer->page);
2086 *skb = rx_buffer->skb;
2087
2088
2089
2090
2091
2092 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2093 if (!*skb)
2094 goto skip_sync;
2095 } else {
2096 if (*skb)
2097 ixgbe_dma_sync_frag(rx_ring, *skb);
2098 }
2099
2100
2101 dma_sync_single_range_for_cpu(rx_ring->dev,
2102 rx_buffer->dma,
2103 rx_buffer->page_offset,
2104 size,
2105 DMA_FROM_DEVICE);
2106skip_sync:
2107 rx_buffer->pagecnt_bias--;
2108
2109 return rx_buffer;
2110}
2111
2112static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2113 struct ixgbe_rx_buffer *rx_buffer,
2114 struct sk_buff *skb)
2115{
2116 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2117
2118 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2119 } else {
2120 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2121
2122 IXGBE_CB(skb)->page_released = true;
2123 } else {
2124
2125 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2126 ixgbe_rx_pg_size(rx_ring),
2127 DMA_FROM_DEVICE,
2128 IXGBE_RX_DMA_ATTR);
2129 }
2130 __page_frag_cache_drain(rx_buffer->page,
2131 rx_buffer->pagecnt_bias);
2132 }
2133
2134
2135 rx_buffer->page = NULL;
2136 rx_buffer->skb = NULL;
2137}
2138
2139static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2140 struct ixgbe_rx_buffer *rx_buffer,
2141 struct xdp_buff *xdp,
2142 union ixgbe_adv_rx_desc *rx_desc)
2143{
2144 unsigned int size = xdp->data_end - xdp->data;
2145#if (PAGE_SIZE < 8192)
2146 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2147#else
2148 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2149 xdp->data_hard_start);
2150#endif
2151 struct sk_buff *skb;
2152
2153
2154 prefetch(xdp->data);
2155#if L1_CACHE_BYTES < 128
2156 prefetch(xdp->data + L1_CACHE_BYTES);
2157#endif
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2176 if (unlikely(!skb))
2177 return NULL;
2178
2179 if (size > IXGBE_RX_HDR_SIZE) {
2180 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2181 IXGBE_CB(skb)->dma = rx_buffer->dma;
2182
2183 skb_add_rx_frag(skb, 0, rx_buffer->page,
2184 xdp->data - page_address(rx_buffer->page),
2185 size, truesize);
2186#if (PAGE_SIZE < 8192)
2187 rx_buffer->page_offset ^= truesize;
2188#else
2189 rx_buffer->page_offset += truesize;
2190#endif
2191 } else {
2192 memcpy(__skb_put(skb, size),
2193 xdp->data, ALIGN(size, sizeof(long)));
2194 rx_buffer->pagecnt_bias++;
2195 }
2196
2197 return skb;
2198}
2199
2200static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2201 struct ixgbe_rx_buffer *rx_buffer,
2202 struct xdp_buff *xdp,
2203 union ixgbe_adv_rx_desc *rx_desc)
2204{
2205 unsigned int metasize = xdp->data - xdp->data_meta;
2206#if (PAGE_SIZE < 8192)
2207 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2208#else
2209 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2210 SKB_DATA_ALIGN(xdp->data_end -
2211 xdp->data_hard_start);
2212#endif
2213 struct sk_buff *skb;
2214
2215
2216
2217
2218
2219
2220 prefetch(xdp->data_meta);
2221#if L1_CACHE_BYTES < 128
2222 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2223#endif
2224
2225
2226 skb = build_skb(xdp->data_hard_start, truesize);
2227 if (unlikely(!skb))
2228 return NULL;
2229
2230
2231 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2232 __skb_put(skb, xdp->data_end - xdp->data);
2233 if (metasize)
2234 skb_metadata_set(skb, metasize);
2235
2236
2237 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2238 IXGBE_CB(skb)->dma = rx_buffer->dma;
2239
2240
2241#if (PAGE_SIZE < 8192)
2242 rx_buffer->page_offset ^= truesize;
2243#else
2244 rx_buffer->page_offset += truesize;
2245#endif
2246
2247 return skb;
2248}
2249
2250#define IXGBE_XDP_PASS 0
2251#define IXGBE_XDP_CONSUMED 1
2252#define IXGBE_XDP_TX 2
2253
2254static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2255 struct xdp_buff *xdp);
2256
2257static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2258 struct ixgbe_ring *rx_ring,
2259 struct xdp_buff *xdp)
2260{
2261 int err, result = IXGBE_XDP_PASS;
2262 struct bpf_prog *xdp_prog;
2263 u32 act;
2264
2265 rcu_read_lock();
2266 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2267
2268 if (!xdp_prog)
2269 goto xdp_out;
2270
2271 act = bpf_prog_run_xdp(xdp_prog, xdp);
2272 switch (act) {
2273 case XDP_PASS:
2274 break;
2275 case XDP_TX:
2276 result = ixgbe_xmit_xdp_ring(adapter, xdp);
2277 break;
2278 case XDP_REDIRECT:
2279 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2280 if (!err)
2281 result = IXGBE_XDP_TX;
2282 else
2283 result = IXGBE_XDP_CONSUMED;
2284 break;
2285 default:
2286 bpf_warn_invalid_xdp_action(act);
2287
2288 case XDP_ABORTED:
2289 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2290
2291 case XDP_DROP:
2292 result = IXGBE_XDP_CONSUMED;
2293 break;
2294 }
2295xdp_out:
2296 rcu_read_unlock();
2297 return ERR_PTR(-result);
2298}
2299
2300static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2301 struct ixgbe_rx_buffer *rx_buffer,
2302 unsigned int size)
2303{
2304#if (PAGE_SIZE < 8192)
2305 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2306
2307 rx_buffer->page_offset ^= truesize;
2308#else
2309 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2310 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2311 SKB_DATA_ALIGN(size);
2312
2313 rx_buffer->page_offset += truesize;
2314#endif
2315}
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2331 struct ixgbe_ring *rx_ring,
2332 const int budget)
2333{
2334 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2335 struct ixgbe_adapter *adapter = q_vector->adapter;
2336#ifdef IXGBE_FCOE
2337 int ddp_bytes;
2338 unsigned int mss = 0;
2339#endif
2340 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2341 bool xdp_xmit = false;
2342 struct xdp_buff xdp;
2343
2344 xdp.rxq = &rx_ring->xdp_rxq;
2345
2346 while (likely(total_rx_packets < budget)) {
2347 union ixgbe_adv_rx_desc *rx_desc;
2348 struct ixgbe_rx_buffer *rx_buffer;
2349 struct sk_buff *skb;
2350 unsigned int size;
2351
2352
2353 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2354 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2355 cleaned_count = 0;
2356 }
2357
2358 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2359 size = le16_to_cpu(rx_desc->wb.upper.length);
2360 if (!size)
2361 break;
2362
2363
2364
2365
2366
2367 dma_rmb();
2368
2369 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2370
2371
2372 if (!skb) {
2373 xdp.data = page_address(rx_buffer->page) +
2374 rx_buffer->page_offset;
2375 xdp.data_meta = xdp.data;
2376 xdp.data_hard_start = xdp.data -
2377 ixgbe_rx_offset(rx_ring);
2378 xdp.data_end = xdp.data + size;
2379
2380 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2381 }
2382
2383 if (IS_ERR(skb)) {
2384 if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
2385 xdp_xmit = true;
2386 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2387 } else {
2388 rx_buffer->pagecnt_bias++;
2389 }
2390 total_rx_packets++;
2391 total_rx_bytes += size;
2392 } else if (skb) {
2393 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2394 } else if (ring_uses_build_skb(rx_ring)) {
2395 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2396 &xdp, rx_desc);
2397 } else {
2398 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2399 &xdp, rx_desc);
2400 }
2401
2402
2403 if (!skb) {
2404 rx_ring->rx_stats.alloc_rx_buff_failed++;
2405 rx_buffer->pagecnt_bias++;
2406 break;
2407 }
2408
2409 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2410 cleaned_count++;
2411
2412
2413 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2414 continue;
2415
2416
2417 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2418 continue;
2419
2420
2421 total_rx_bytes += skb->len;
2422
2423
2424 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2425
2426#ifdef IXGBE_FCOE
2427
2428 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2429 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2430
2431 if (ddp_bytes > 0) {
2432 if (!mss) {
2433 mss = rx_ring->netdev->mtu -
2434 sizeof(struct fcoe_hdr) -
2435 sizeof(struct fc_frame_header) -
2436 sizeof(struct fcoe_crc_eof);
2437 if (mss > 512)
2438 mss &= ~511;
2439 }
2440 total_rx_bytes += ddp_bytes;
2441 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2442 mss);
2443 }
2444 if (!ddp_bytes) {
2445 dev_kfree_skb_any(skb);
2446 continue;
2447 }
2448 }
2449
2450#endif
2451 ixgbe_rx_skb(q_vector, skb);
2452
2453
2454 total_rx_packets++;
2455 }
2456
2457 if (xdp_xmit) {
2458 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2459
2460
2461
2462
2463 wmb();
2464 writel(ring->next_to_use, ring->tail);
2465
2466 xdp_do_flush_map();
2467 }
2468
2469 u64_stats_update_begin(&rx_ring->syncp);
2470 rx_ring->stats.packets += total_rx_packets;
2471 rx_ring->stats.bytes += total_rx_bytes;
2472 u64_stats_update_end(&rx_ring->syncp);
2473 q_vector->rx.total_packets += total_rx_packets;
2474 q_vector->rx.total_bytes += total_rx_bytes;
2475
2476 return total_rx_packets;
2477}
2478
2479
2480
2481
2482
2483
2484
2485
2486static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2487{
2488 struct ixgbe_q_vector *q_vector;
2489 int v_idx;
2490 u32 mask;
2491
2492
2493 if (adapter->num_vfs > 32) {
2494 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2495 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2496 }
2497
2498
2499
2500
2501
2502 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2503 struct ixgbe_ring *ring;
2504 q_vector = adapter->q_vector[v_idx];
2505
2506 ixgbe_for_each_ring(ring, q_vector->rx)
2507 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2508
2509 ixgbe_for_each_ring(ring, q_vector->tx)
2510 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2511
2512 ixgbe_write_eitr(q_vector);
2513 }
2514
2515 switch (adapter->hw.mac.type) {
2516 case ixgbe_mac_82598EB:
2517 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2518 v_idx);
2519 break;
2520 case ixgbe_mac_82599EB:
2521 case ixgbe_mac_X540:
2522 case ixgbe_mac_X550:
2523 case ixgbe_mac_X550EM_x:
2524 case ixgbe_mac_x550em_a:
2525 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2526 break;
2527 default:
2528 break;
2529 }
2530 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2531
2532
2533 mask = IXGBE_EIMS_ENABLE_MASK;
2534 mask &= ~(IXGBE_EIMS_OTHER |
2535 IXGBE_EIMS_MAILBOX |
2536 IXGBE_EIMS_LSC);
2537
2538 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2555 struct ixgbe_ring_container *ring_container)
2556{
2557 unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
2558 IXGBE_ITR_ADAPTIVE_LATENCY;
2559 unsigned int avg_wire_size, packets, bytes;
2560 unsigned long next_update = jiffies;
2561
2562
2563
2564
2565 if (!ring_container->ring)
2566 return;
2567
2568
2569
2570
2571
2572
2573 if (time_after(next_update, ring_container->next_update))
2574 goto clear_counts;
2575
2576 packets = ring_container->total_packets;
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586 if (!packets) {
2587 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2588 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2589 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2590 itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
2591 goto clear_counts;
2592 }
2593
2594 bytes = ring_container->total_bytes;
2595
2596
2597
2598
2599
2600 if (packets < 4 && bytes < 9000) {
2601 itr = IXGBE_ITR_ADAPTIVE_LATENCY;
2602 goto adjust_by_size;
2603 }
2604
2605
2606
2607
2608
2609 if (packets < 48) {
2610 itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
2611 if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
2612 itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
2613 goto clear_counts;
2614 }
2615
2616
2617
2618
2619 if (packets < 96) {
2620 itr = q_vector->itr >> 2;
2621 goto clear_counts;
2622 }
2623
2624
2625
2626
2627
2628 if (packets < 256) {
2629 itr = q_vector->itr >> 3;
2630 if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
2631 itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
2632 goto clear_counts;
2633 }
2634
2635
2636
2637
2638
2639
2640
2641 itr = IXGBE_ITR_ADAPTIVE_BULK;
2642
2643adjust_by_size:
2644
2645
2646
2647
2648
2649 avg_wire_size = bytes / packets;
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666 if (avg_wire_size <= 60) {
2667
2668 avg_wire_size = 5120;
2669 } else if (avg_wire_size <= 316) {
2670
2671 avg_wire_size *= 40;
2672 avg_wire_size += 2720;
2673 } else if (avg_wire_size <= 1084) {
2674
2675 avg_wire_size *= 15;
2676 avg_wire_size += 11452;
2677 } else if (avg_wire_size <= 1980) {
2678
2679 avg_wire_size *= 5;
2680 avg_wire_size += 22420;
2681 } else {
2682
2683 avg_wire_size = 32256;
2684 }
2685
2686
2687
2688
2689 if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
2690 avg_wire_size >>= 1;
2691
2692
2693
2694
2695
2696
2697
2698
2699 switch (q_vector->adapter->link_speed) {
2700 case IXGBE_LINK_SPEED_10GB_FULL:
2701 case IXGBE_LINK_SPEED_100_FULL:
2702 default:
2703 itr += DIV_ROUND_UP(avg_wire_size,
2704 IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
2705 IXGBE_ITR_ADAPTIVE_MIN_INC;
2706 break;
2707 case IXGBE_LINK_SPEED_2_5GB_FULL:
2708 case IXGBE_LINK_SPEED_1GB_FULL:
2709 case IXGBE_LINK_SPEED_10_FULL:
2710 itr += DIV_ROUND_UP(avg_wire_size,
2711 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2712 IXGBE_ITR_ADAPTIVE_MIN_INC;
2713 break;
2714 }
2715
2716clear_counts:
2717
2718 ring_container->itr = itr;
2719
2720
2721 ring_container->next_update = next_update + 1;
2722
2723 ring_container->total_bytes = 0;
2724 ring_container->total_packets = 0;
2725}
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2736{
2737 struct ixgbe_adapter *adapter = q_vector->adapter;
2738 struct ixgbe_hw *hw = &adapter->hw;
2739 int v_idx = q_vector->v_idx;
2740 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2741
2742 switch (adapter->hw.mac.type) {
2743 case ixgbe_mac_82598EB:
2744
2745 itr_reg |= (itr_reg << 16);
2746 break;
2747 case ixgbe_mac_82599EB:
2748 case ixgbe_mac_X540:
2749 case ixgbe_mac_X550:
2750 case ixgbe_mac_X550EM_x:
2751 case ixgbe_mac_x550em_a:
2752
2753
2754
2755
2756 itr_reg |= IXGBE_EITR_CNT_WDIS;
2757 break;
2758 default:
2759 break;
2760 }
2761 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2762}
2763
2764static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2765{
2766 u32 new_itr;
2767
2768 ixgbe_update_itr(q_vector, &q_vector->tx);
2769 ixgbe_update_itr(q_vector, &q_vector->rx);
2770
2771
2772 new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
2773
2774
2775 new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
2776 new_itr <<= 2;
2777
2778 if (new_itr != q_vector->itr) {
2779
2780 q_vector->itr = new_itr;
2781
2782 ixgbe_write_eitr(q_vector);
2783 }
2784}
2785
2786
2787
2788
2789
2790static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2791{
2792 struct ixgbe_hw *hw = &adapter->hw;
2793 u32 eicr = adapter->interrupt_event;
2794 s32 rc;
2795
2796 if (test_bit(__IXGBE_DOWN, &adapter->state))
2797 return;
2798
2799 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2800 return;
2801
2802 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2803
2804 switch (hw->device_id) {
2805 case IXGBE_DEV_ID_82599_T3_LOM:
2806
2807
2808
2809
2810
2811
2812
2813 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2814 !(eicr & IXGBE_EICR_LSC))
2815 return;
2816
2817 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2818 u32 speed;
2819 bool link_up = false;
2820
2821 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2822
2823 if (link_up)
2824 return;
2825 }
2826
2827
2828 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2829 return;
2830
2831 break;
2832 case IXGBE_DEV_ID_X550EM_A_1G_T:
2833 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2834 rc = hw->phy.ops.check_overtemp(hw);
2835 if (rc != IXGBE_ERR_OVERTEMP)
2836 return;
2837 break;
2838 default:
2839 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2840 return;
2841 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2842 return;
2843 break;
2844 }
2845 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2846
2847 adapter->interrupt_event = 0;
2848}
2849
2850static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2851{
2852 struct ixgbe_hw *hw = &adapter->hw;
2853
2854 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2855 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2856 e_crit(probe, "Fan has stopped, replace the adapter\n");
2857
2858 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2859 }
2860}
2861
2862static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2863{
2864 struct ixgbe_hw *hw = &adapter->hw;
2865
2866 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2867 return;
2868
2869 switch (adapter->hw.mac.type) {
2870 case ixgbe_mac_82599EB:
2871
2872
2873
2874
2875 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2876 (eicr & IXGBE_EICR_LSC)) &&
2877 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2878 adapter->interrupt_event = eicr;
2879 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2880 ixgbe_service_event_schedule(adapter);
2881 return;
2882 }
2883 return;
2884 case ixgbe_mac_x550em_a:
2885 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2886 adapter->interrupt_event = eicr;
2887 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2888 ixgbe_service_event_schedule(adapter);
2889 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2890 IXGBE_EICR_GPI_SDP0_X550EM_a);
2891 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2892 IXGBE_EICR_GPI_SDP0_X550EM_a);
2893 }
2894 return;
2895 case ixgbe_mac_X550:
2896 case ixgbe_mac_X540:
2897 if (!(eicr & IXGBE_EICR_TS))
2898 return;
2899 break;
2900 default:
2901 return;
2902 }
2903
2904 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2905}
2906
2907static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2908{
2909 switch (hw->mac.type) {
2910 case ixgbe_mac_82598EB:
2911 if (hw->phy.type == ixgbe_phy_nl)
2912 return true;
2913 return false;
2914 case ixgbe_mac_82599EB:
2915 case ixgbe_mac_X550EM_x:
2916 case ixgbe_mac_x550em_a:
2917 switch (hw->mac.ops.get_media_type(hw)) {
2918 case ixgbe_media_type_fiber:
2919 case ixgbe_media_type_fiber_qsfp:
2920 return true;
2921 default:
2922 return false;
2923 }
2924 default:
2925 return false;
2926 }
2927}
2928
2929static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2930{
2931 struct ixgbe_hw *hw = &adapter->hw;
2932 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2933
2934 if (!ixgbe_is_sfp(hw))
2935 return;
2936
2937
2938 if (hw->mac.type >= ixgbe_mac_X540)
2939 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2940
2941 if (eicr & eicr_mask) {
2942
2943 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2944 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2945 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2946 adapter->sfp_poll_time = 0;
2947 ixgbe_service_event_schedule(adapter);
2948 }
2949 }
2950
2951 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2952 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2953
2954 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2955 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2956 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2957 ixgbe_service_event_schedule(adapter);
2958 }
2959 }
2960}
2961
2962static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2963{
2964 struct ixgbe_hw *hw = &adapter->hw;
2965
2966 adapter->lsc_int++;
2967 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2968 adapter->link_check_timeout = jiffies;
2969 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2970 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2971 IXGBE_WRITE_FLUSH(hw);
2972 ixgbe_service_event_schedule(adapter);
2973 }
2974}
2975
2976static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2977 u64 qmask)
2978{
2979 u32 mask;
2980 struct ixgbe_hw *hw = &adapter->hw;
2981
2982 switch (hw->mac.type) {
2983 case ixgbe_mac_82598EB:
2984 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2985 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2986 break;
2987 case ixgbe_mac_82599EB:
2988 case ixgbe_mac_X540:
2989 case ixgbe_mac_X550:
2990 case ixgbe_mac_X550EM_x:
2991 case ixgbe_mac_x550em_a:
2992 mask = (qmask & 0xFFFFFFFF);
2993 if (mask)
2994 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2995 mask = (qmask >> 32);
2996 if (mask)
2997 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2998 break;
2999 default:
3000 break;
3001 }
3002
3003}
3004
3005static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
3006 u64 qmask)
3007{
3008 u32 mask;
3009 struct ixgbe_hw *hw = &adapter->hw;
3010
3011 switch (hw->mac.type) {
3012 case ixgbe_mac_82598EB:
3013 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
3014 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
3015 break;
3016 case ixgbe_mac_82599EB:
3017 case ixgbe_mac_X540:
3018 case ixgbe_mac_X550:
3019 case ixgbe_mac_X550EM_x:
3020 case ixgbe_mac_x550em_a:
3021 mask = (qmask & 0xFFFFFFFF);
3022 if (mask)
3023 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
3024 mask = (qmask >> 32);
3025 if (mask)
3026 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
3027 break;
3028 default:
3029 break;
3030 }
3031
3032}
3033
3034
3035
3036
3037
3038
3039
3040static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
3041 bool flush)
3042{
3043 struct ixgbe_hw *hw = &adapter->hw;
3044 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3045
3046
3047 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
3048 mask &= ~IXGBE_EIMS_LSC;
3049
3050 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3051 switch (adapter->hw.mac.type) {
3052 case ixgbe_mac_82599EB:
3053 mask |= IXGBE_EIMS_GPI_SDP0(hw);
3054 break;
3055 case ixgbe_mac_X540:
3056 case ixgbe_mac_X550:
3057 case ixgbe_mac_X550EM_x:
3058 case ixgbe_mac_x550em_a:
3059 mask |= IXGBE_EIMS_TS;
3060 break;
3061 default:
3062 break;
3063 }
3064 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3065 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3066 switch (adapter->hw.mac.type) {
3067 case ixgbe_mac_82599EB:
3068 mask |= IXGBE_EIMS_GPI_SDP1(hw);
3069 mask |= IXGBE_EIMS_GPI_SDP2(hw);
3070
3071 case ixgbe_mac_X540:
3072 case ixgbe_mac_X550:
3073 case ixgbe_mac_X550EM_x:
3074 case ixgbe_mac_x550em_a:
3075 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3076 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3077 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
3078 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
3079 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
3080 mask |= IXGBE_EICR_GPI_SDP0_X540;
3081 mask |= IXGBE_EIMS_ECC;
3082 mask |= IXGBE_EIMS_MAILBOX;
3083 break;
3084 default:
3085 break;
3086 }
3087
3088 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
3089 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
3090 mask |= IXGBE_EIMS_FLOW_DIR;
3091
3092 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
3093 if (queues)
3094 ixgbe_irq_enable_queues(adapter, ~0);
3095 if (flush)
3096 IXGBE_WRITE_FLUSH(&adapter->hw);
3097}
3098
3099static irqreturn_t ixgbe_msix_other(int irq, void *data)
3100{
3101 struct ixgbe_adapter *adapter = data;
3102 struct ixgbe_hw *hw = &adapter->hw;
3103 u32 eicr;
3104
3105
3106
3107
3108
3109
3110
3111 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3112
3113
3114
3115
3116
3117
3118
3119
3120 eicr &= 0xFFFF0000;
3121
3122 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3123
3124 if (eicr & IXGBE_EICR_LSC)
3125 ixgbe_check_lsc(adapter);
3126
3127 if (eicr & IXGBE_EICR_MAILBOX)
3128 ixgbe_msg_task(adapter);
3129
3130 switch (hw->mac.type) {
3131 case ixgbe_mac_82599EB:
3132 case ixgbe_mac_X540:
3133 case ixgbe_mac_X550:
3134 case ixgbe_mac_X550EM_x:
3135 case ixgbe_mac_x550em_a:
3136 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
3137 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3138 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
3139 ixgbe_service_event_schedule(adapter);
3140 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3141 IXGBE_EICR_GPI_SDP0_X540);
3142 }
3143 if (eicr & IXGBE_EICR_ECC) {
3144 e_info(link, "Received ECC Err, initiating reset\n");
3145 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3146 ixgbe_service_event_schedule(adapter);
3147 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3148 }
3149
3150 if (eicr & IXGBE_EICR_FLOW_DIR) {
3151 int reinit_count = 0;
3152 int i;
3153 for (i = 0; i < adapter->num_tx_queues; i++) {
3154 struct ixgbe_ring *ring = adapter->tx_ring[i];
3155 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3156 &ring->state))
3157 reinit_count++;
3158 }
3159 if (reinit_count) {
3160
3161 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3162 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3163 ixgbe_service_event_schedule(adapter);
3164 }
3165 }
3166 ixgbe_check_sfp_event(adapter, eicr);
3167 ixgbe_check_overtemp_event(adapter, eicr);
3168 break;
3169 default:
3170 break;
3171 }
3172
3173 ixgbe_check_fan_failure(adapter, eicr);
3174
3175 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3176 ixgbe_ptp_check_pps_event(adapter);
3177
3178
3179 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3180 ixgbe_irq_enable(adapter, false, false);
3181
3182 return IRQ_HANDLED;
3183}
3184
3185static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3186{
3187 struct ixgbe_q_vector *q_vector = data;
3188
3189
3190
3191 if (q_vector->rx.ring || q_vector->tx.ring)
3192 napi_schedule_irqoff(&q_vector->napi);
3193
3194 return IRQ_HANDLED;
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204int ixgbe_poll(struct napi_struct *napi, int budget)
3205{
3206 struct ixgbe_q_vector *q_vector =
3207 container_of(napi, struct ixgbe_q_vector, napi);
3208 struct ixgbe_adapter *adapter = q_vector->adapter;
3209 struct ixgbe_ring *ring;
3210 int per_ring_budget, work_done = 0;
3211 bool clean_complete = true;
3212
3213#ifdef CONFIG_IXGBE_DCA
3214 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3215 ixgbe_update_dca(q_vector);
3216#endif
3217
3218 ixgbe_for_each_ring(ring, q_vector->tx) {
3219 if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
3220 clean_complete = false;
3221 }
3222
3223
3224 if (budget <= 0)
3225 return budget;
3226
3227
3228
3229 if (q_vector->rx.count > 1)
3230 per_ring_budget = max(budget/q_vector->rx.count, 1);
3231 else
3232 per_ring_budget = budget;
3233
3234 ixgbe_for_each_ring(ring, q_vector->rx) {
3235 int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
3236 per_ring_budget);
3237
3238 work_done += cleaned;
3239 if (cleaned >= per_ring_budget)
3240 clean_complete = false;
3241 }
3242
3243
3244 if (!clean_complete)
3245 return budget;
3246
3247
3248 napi_complete_done(napi, work_done);
3249 if (adapter->rx_itr_setting & 1)
3250 ixgbe_set_itr(q_vector);
3251 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3252 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
3253
3254 return min(work_done, budget - 1);
3255}
3256
3257
3258
3259
3260
3261
3262
3263
3264static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3265{
3266 struct net_device *netdev = adapter->netdev;
3267 unsigned int ri = 0, ti = 0;
3268 int vector, err;
3269
3270 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3271 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3272 struct msix_entry *entry = &adapter->msix_entries[vector];
3273
3274 if (q_vector->tx.ring && q_vector->rx.ring) {
3275 snprintf(q_vector->name, sizeof(q_vector->name),
3276 "%s-TxRx-%u", netdev->name, ri++);
3277 ti++;
3278 } else if (q_vector->rx.ring) {
3279 snprintf(q_vector->name, sizeof(q_vector->name),
3280 "%s-rx-%u", netdev->name, ri++);
3281 } else if (q_vector->tx.ring) {
3282 snprintf(q_vector->name, sizeof(q_vector->name),
3283 "%s-tx-%u", netdev->name, ti++);
3284 } else {
3285
3286 continue;
3287 }
3288 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3289 q_vector->name, q_vector);
3290 if (err) {
3291 e_err(probe, "request_irq failed for MSIX interrupt "
3292 "Error: %d\n", err);
3293 goto free_queue_irqs;
3294 }
3295
3296 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3297
3298 irq_set_affinity_hint(entry->vector,
3299 &q_vector->affinity_mask);
3300 }
3301 }
3302
3303 err = request_irq(adapter->msix_entries[vector].vector,
3304 ixgbe_msix_other, 0, netdev->name, adapter);
3305 if (err) {
3306 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3307 goto free_queue_irqs;
3308 }
3309
3310 return 0;
3311
3312free_queue_irqs:
3313 while (vector) {
3314 vector--;
3315 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3316 NULL);
3317 free_irq(adapter->msix_entries[vector].vector,
3318 adapter->q_vector[vector]);
3319 }
3320 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3321 pci_disable_msix(adapter->pdev);
3322 kfree(adapter->msix_entries);
3323 adapter->msix_entries = NULL;
3324 return err;
3325}
3326
3327
3328
3329
3330
3331
3332static irqreturn_t ixgbe_intr(int irq, void *data)
3333{
3334 struct ixgbe_adapter *adapter = data;
3335 struct ixgbe_hw *hw = &adapter->hw;
3336 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3337 u32 eicr;
3338
3339
3340
3341
3342
3343 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3344
3345
3346
3347 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3348 if (!eicr) {
3349
3350
3351
3352
3353
3354
3355
3356 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3357 ixgbe_irq_enable(adapter, true, true);
3358 return IRQ_NONE;
3359 }
3360
3361 if (eicr & IXGBE_EICR_LSC)
3362 ixgbe_check_lsc(adapter);
3363
3364 switch (hw->mac.type) {
3365 case ixgbe_mac_82599EB:
3366 ixgbe_check_sfp_event(adapter, eicr);
3367
3368 case ixgbe_mac_X540:
3369 case ixgbe_mac_X550:
3370 case ixgbe_mac_X550EM_x:
3371 case ixgbe_mac_x550em_a:
3372 if (eicr & IXGBE_EICR_ECC) {
3373 e_info(link, "Received ECC Err, initiating reset\n");
3374 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3375 ixgbe_service_event_schedule(adapter);
3376 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3377 }
3378 ixgbe_check_overtemp_event(adapter, eicr);
3379 break;
3380 default:
3381 break;
3382 }
3383
3384 ixgbe_check_fan_failure(adapter, eicr);
3385 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3386 ixgbe_ptp_check_pps_event(adapter);
3387
3388
3389 napi_schedule_irqoff(&q_vector->napi);
3390
3391
3392
3393
3394
3395 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3396 ixgbe_irq_enable(adapter, false, false);
3397
3398 return IRQ_HANDLED;
3399}
3400
3401
3402
3403
3404
3405
3406
3407
3408static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3409{
3410 struct net_device *netdev = adapter->netdev;
3411 int err;
3412
3413 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3414 err = ixgbe_request_msix_irqs(adapter);
3415 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3416 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3417 netdev->name, adapter);
3418 else
3419 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3420 netdev->name, adapter);
3421
3422 if (err)
3423 e_err(probe, "request_irq failed, Error %d\n", err);
3424
3425 return err;
3426}
3427
3428static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3429{
3430 int vector;
3431
3432 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3433 free_irq(adapter->pdev->irq, adapter);
3434 return;
3435 }
3436
3437 if (!adapter->msix_entries)
3438 return;
3439
3440 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3441 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3442 struct msix_entry *entry = &adapter->msix_entries[vector];
3443
3444
3445 if (!q_vector->rx.ring && !q_vector->tx.ring)
3446 continue;
3447
3448
3449 irq_set_affinity_hint(entry->vector, NULL);
3450
3451 free_irq(entry->vector, q_vector);
3452 }
3453
3454 free_irq(adapter->msix_entries[vector].vector, adapter);
3455}
3456
3457
3458
3459
3460
3461static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3462{
3463 switch (adapter->hw.mac.type) {
3464 case ixgbe_mac_82598EB:
3465 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3466 break;
3467 case ixgbe_mac_82599EB:
3468 case ixgbe_mac_X540:
3469 case ixgbe_mac_X550:
3470 case ixgbe_mac_X550EM_x:
3471 case ixgbe_mac_x550em_a:
3472 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3473 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3474 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3475 break;
3476 default:
3477 break;
3478 }
3479 IXGBE_WRITE_FLUSH(&adapter->hw);
3480 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3481 int vector;
3482
3483 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3484 synchronize_irq(adapter->msix_entries[vector].vector);
3485
3486 synchronize_irq(adapter->msix_entries[vector++].vector);
3487 } else {
3488 synchronize_irq(adapter->pdev->irq);
3489 }
3490}
3491
3492
3493
3494
3495
3496
3497static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3498{
3499 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3500
3501 ixgbe_write_eitr(q_vector);
3502
3503 ixgbe_set_ivar(adapter, 0, 0, 0);
3504 ixgbe_set_ivar(adapter, 1, 0, 0);
3505
3506 e_info(hw, "Legacy interrupt IVAR setup done\n");
3507}
3508
3509
3510
3511
3512
3513
3514
3515
3516void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3517 struct ixgbe_ring *ring)
3518{
3519 struct ixgbe_hw *hw = &adapter->hw;
3520 u64 tdba = ring->dma;
3521 int wait_loop = 10;
3522 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3523 u8 reg_idx = ring->reg_idx;
3524
3525
3526 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3527 IXGBE_WRITE_FLUSH(hw);
3528
3529 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3530 (tdba & DMA_BIT_MASK(32)));
3531 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3532 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3533 ring->count * sizeof(union ixgbe_adv_tx_desc));
3534 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3535 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3536 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3549 txdctl |= 1u << 16;
3550 else
3551 txdctl |= 8u << 16;
3552
3553
3554
3555
3556
3557 txdctl |= (1u << 8) |
3558 32;
3559
3560
3561 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3562 ring->atr_sample_rate = adapter->atr_sample_rate;
3563 ring->atr_count = 0;
3564 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3565 } else {
3566 ring->atr_sample_rate = 0;
3567 }
3568
3569
3570 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3571 struct ixgbe_q_vector *q_vector = ring->q_vector;
3572
3573 if (q_vector)
3574 netif_set_xps_queue(ring->netdev,
3575 &q_vector->affinity_mask,
3576 ring->queue_index);
3577 }
3578
3579 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3580
3581
3582 memset(ring->tx_buffer_info, 0,
3583 sizeof(struct ixgbe_tx_buffer) * ring->count);
3584
3585
3586 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3587
3588
3589 if (hw->mac.type == ixgbe_mac_82598EB &&
3590 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3591 return;
3592
3593
3594 do {
3595 usleep_range(1000, 2000);
3596 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3597 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3598 if (!wait_loop)
3599 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3600}
3601
3602static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3603{
3604 struct ixgbe_hw *hw = &adapter->hw;
3605 u32 rttdcs, mtqc;
3606 u8 tcs = adapter->hw_tcs;
3607
3608 if (hw->mac.type == ixgbe_mac_82598EB)
3609 return;
3610
3611
3612 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3613 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3614 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3615
3616
3617 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3618 mtqc = IXGBE_MTQC_VT_ENA;
3619 if (tcs > 4)
3620 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3621 else if (tcs > 1)
3622 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3623 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3624 IXGBE_82599_VMDQ_4Q_MASK)
3625 mtqc |= IXGBE_MTQC_32VF;
3626 else
3627 mtqc |= IXGBE_MTQC_64VF;
3628 } else {
3629 if (tcs > 4)
3630 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3631 else if (tcs > 1)
3632 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3633 else
3634 mtqc = IXGBE_MTQC_64Q_1PB;
3635 }
3636
3637 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3638
3639
3640 if (tcs) {
3641 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3642 sectx |= IXGBE_SECTX_DCB;
3643 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3644 }
3645
3646
3647 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3648 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3649}
3650
3651
3652
3653
3654
3655
3656
3657static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3658{
3659 struct ixgbe_hw *hw = &adapter->hw;
3660 u32 dmatxctl;
3661 u32 i;
3662
3663 ixgbe_setup_mtqc(adapter);
3664
3665 if (hw->mac.type != ixgbe_mac_82598EB) {
3666
3667 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3668 dmatxctl |= IXGBE_DMATXCTL_TE;
3669 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3670 }
3671
3672
3673 for (i = 0; i < adapter->num_tx_queues; i++)
3674 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3675 for (i = 0; i < adapter->num_xdp_queues; i++)
3676 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3677}
3678
3679static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3680 struct ixgbe_ring *ring)
3681{
3682 struct ixgbe_hw *hw = &adapter->hw;
3683 u8 reg_idx = ring->reg_idx;
3684 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3685
3686 srrctl |= IXGBE_SRRCTL_DROP_EN;
3687
3688 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3689}
3690
3691static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3692 struct ixgbe_ring *ring)
3693{
3694 struct ixgbe_hw *hw = &adapter->hw;
3695 u8 reg_idx = ring->reg_idx;
3696 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3697
3698 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3699
3700 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3701}
3702
3703#ifdef CONFIG_IXGBE_DCB
3704void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3705#else
3706static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3707#endif
3708{
3709 int i;
3710 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3711
3712 if (adapter->ixgbe_ieee_pfc)
3713 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3725 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3726 for (i = 0; i < adapter->num_rx_queues; i++)
3727 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3728 } else {
3729 for (i = 0; i < adapter->num_rx_queues; i++)
3730 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3731 }
3732}
3733
3734#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3735
3736static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3737 struct ixgbe_ring *rx_ring)
3738{
3739 struct ixgbe_hw *hw = &adapter->hw;
3740 u32 srrctl;
3741 u8 reg_idx = rx_ring->reg_idx;
3742
3743 if (hw->mac.type == ixgbe_mac_82598EB) {
3744 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3745
3746
3747
3748
3749
3750 reg_idx &= mask;
3751 }
3752
3753
3754 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3755
3756
3757 if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
3758 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3759 else
3760 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3761
3762
3763 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3764
3765 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3766}
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3777{
3778 if (adapter->hw.mac.type < ixgbe_mac_X550)
3779 return 128;
3780 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3781 return 64;
3782 else
3783 return 512;
3784}
3785
3786
3787
3788
3789
3790
3791
3792void ixgbe_store_key(struct ixgbe_adapter *adapter)
3793{
3794 struct ixgbe_hw *hw = &adapter->hw;
3795 int i;
3796
3797 for (i = 0; i < 10; i++)
3798 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3799}
3800
3801
3802
3803
3804
3805
3806
3807static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3808{
3809 u32 *rss_key;
3810
3811 if (!adapter->rss_key) {
3812 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3813 if (unlikely(!rss_key))
3814 return -ENOMEM;
3815
3816 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3817 adapter->rss_key = rss_key;
3818 }
3819
3820 return 0;
3821}
3822
3823
3824
3825
3826
3827
3828
3829void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3830{
3831 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3832 struct ixgbe_hw *hw = &adapter->hw;
3833 u32 reta = 0;
3834 u32 indices_multi;
3835 u8 *indir_tbl = adapter->rss_indir_tbl;
3836
3837
3838
3839
3840
3841
3842
3843 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3844 indices_multi = 0x11;
3845 else
3846 indices_multi = 0x1;
3847
3848
3849 for (i = 0; i < reta_entries; i++) {
3850 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3851 if ((i & 3) == 3) {
3852 if (i < 128)
3853 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3854 else
3855 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3856 reta);
3857 reta = 0;
3858 }
3859 }
3860}
3861
3862
3863
3864
3865
3866
3867
3868static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3869{
3870 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3871 struct ixgbe_hw *hw = &adapter->hw;
3872 u32 vfreta = 0;
3873
3874
3875 for (i = 0; i < reta_entries; i++) {
3876 u16 pool = adapter->num_rx_pools;
3877
3878 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3879 if ((i & 3) != 3)
3880 continue;
3881
3882 while (pool--)
3883 IXGBE_WRITE_REG(hw,
3884 IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
3885 vfreta);
3886 vfreta = 0;
3887 }
3888}
3889
3890static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3891{
3892 u32 i, j;
3893 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3894 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3895
3896
3897
3898
3899
3900 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3901 rss_i = 4;
3902
3903
3904 ixgbe_store_key(adapter);
3905
3906
3907 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3908
3909 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3910 if (j == rss_i)
3911 j = 0;
3912
3913 adapter->rss_indir_tbl[i] = j;
3914 }
3915
3916 ixgbe_store_reta(adapter);
3917}
3918
3919static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3920{
3921 struct ixgbe_hw *hw = &adapter->hw;
3922 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3923 int i, j;
3924
3925
3926 for (i = 0; i < 10; i++) {
3927 u16 pool = adapter->num_rx_pools;
3928
3929 while (pool--)
3930 IXGBE_WRITE_REG(hw,
3931 IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
3932 *(adapter->rss_key + i));
3933 }
3934
3935
3936 for (i = 0, j = 0; i < 64; i++, j++) {
3937 if (j == rss_i)
3938 j = 0;
3939
3940 adapter->rss_indir_tbl[i] = j;
3941 }
3942
3943 ixgbe_store_vfreta(adapter);
3944}
3945
3946static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3947{
3948 struct ixgbe_hw *hw = &adapter->hw;
3949 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3950 u32 rxcsum;
3951
3952
3953 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3954 rxcsum |= IXGBE_RXCSUM_PCSD;
3955 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3956
3957 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3958 if (adapter->ring_feature[RING_F_RSS].mask)
3959 mrqc = IXGBE_MRQC_RSSEN;
3960 } else {
3961 u8 tcs = adapter->hw_tcs;
3962
3963 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3964 if (tcs > 4)
3965 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3966 else if (tcs > 1)
3967 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3968 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3969 IXGBE_82599_VMDQ_4Q_MASK)
3970 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3971 else
3972 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3973
3974
3975 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3976 } else {
3977 if (tcs > 4)
3978 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3979 else if (tcs > 1)
3980 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3981 else
3982 mrqc = IXGBE_MRQC_RSSEN;
3983 }
3984 }
3985
3986
3987 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3988 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3989 IXGBE_MRQC_RSS_FIELD_IPV6 |
3990 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3991
3992 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3993 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3994 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3995 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3996
3997 if ((hw->mac.type >= ixgbe_mac_X550) &&
3998 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3999 u16 pool = adapter->num_rx_pools;
4000
4001
4002 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
4003 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4004
4005
4006 ixgbe_setup_vfreta(adapter);
4007 vfmrqc = IXGBE_MRQC_RSSEN;
4008 vfmrqc |= rss_field;
4009
4010 while (pool--)
4011 IXGBE_WRITE_REG(hw,
4012 IXGBE_PFVFMRQC(VMDQ_P(pool)),
4013 vfmrqc);
4014 } else {
4015 ixgbe_setup_reta(adapter);
4016 mrqc |= rss_field;
4017 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4018 }
4019}
4020
4021
4022
4023
4024
4025
4026static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
4027 struct ixgbe_ring *ring)
4028{
4029 struct ixgbe_hw *hw = &adapter->hw;
4030 u32 rscctrl;
4031 u8 reg_idx = ring->reg_idx;
4032
4033 if (!ring_is_rsc_enabled(ring))
4034 return;
4035
4036 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
4037 rscctrl |= IXGBE_RSCCTL_RSCEN;
4038
4039
4040
4041
4042
4043 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
4044 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
4045}
4046
4047#define IXGBE_MAX_RX_DESC_POLL 10
4048static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
4049 struct ixgbe_ring *ring)
4050{
4051 struct ixgbe_hw *hw = &adapter->hw;
4052 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4053 u32 rxdctl;
4054 u8 reg_idx = ring->reg_idx;
4055
4056 if (ixgbe_removed(hw->hw_addr))
4057 return;
4058
4059 if (hw->mac.type == ixgbe_mac_82598EB &&
4060 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4061 return;
4062
4063 do {
4064 usleep_range(1000, 2000);
4065 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4066 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
4067
4068 if (!wait_loop) {
4069 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
4070 "the polling period\n", reg_idx);
4071 }
4072}
4073
4074void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
4075 struct ixgbe_ring *ring)
4076{
4077 struct ixgbe_hw *hw = &adapter->hw;
4078 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
4079 u32 rxdctl;
4080 u8 reg_idx = ring->reg_idx;
4081
4082 if (ixgbe_removed(hw->hw_addr))
4083 return;
4084 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4085 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
4086
4087
4088 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4089
4090 if (hw->mac.type == ixgbe_mac_82598EB &&
4091 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
4092 return;
4093
4094
4095 do {
4096 udelay(10);
4097 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4098 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
4099
4100 if (!wait_loop) {
4101 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
4102 "the polling period\n", reg_idx);
4103 }
4104}
4105
4106void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
4107 struct ixgbe_ring *ring)
4108{
4109 struct ixgbe_hw *hw = &adapter->hw;
4110 union ixgbe_adv_rx_desc *rx_desc;
4111 u64 rdba = ring->dma;
4112 u32 rxdctl;
4113 u8 reg_idx = ring->reg_idx;
4114
4115
4116 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
4117 ixgbe_disable_rx_queue(adapter, ring);
4118
4119 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
4120 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
4121 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
4122 ring->count * sizeof(union ixgbe_adv_rx_desc));
4123
4124 IXGBE_WRITE_FLUSH(hw);
4125
4126 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
4127 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
4128 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
4129
4130 ixgbe_configure_srrctl(adapter, ring);
4131 ixgbe_configure_rscctl(adapter, ring);
4132
4133 if (hw->mac.type == ixgbe_mac_82598EB) {
4134
4135
4136
4137
4138
4139
4140
4141 rxdctl &= ~0x3FFFFF;
4142 rxdctl |= 0x080420;
4143#if (PAGE_SIZE < 8192)
4144
4145 } else if (hw->mac.type != ixgbe_mac_82599EB) {
4146 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4147 IXGBE_RXDCTL_RLPML_EN);
4148
4149
4150
4151
4152
4153 if (ring_uses_build_skb(ring) &&
4154 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
4155 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
4156 IXGBE_RXDCTL_RLPML_EN;
4157#endif
4158 }
4159
4160
4161 memset(ring->rx_buffer_info, 0,
4162 sizeof(struct ixgbe_rx_buffer) * ring->count);
4163
4164
4165 rx_desc = IXGBE_RX_DESC(ring, 0);
4166 rx_desc->wb.upper.length = 0;
4167
4168
4169 rxdctl |= IXGBE_RXDCTL_ENABLE;
4170 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4171
4172 ixgbe_rx_desc_queue_enable(adapter, ring);
4173 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4174}
4175
4176static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4177{
4178 struct ixgbe_hw *hw = &adapter->hw;
4179 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4180 u16 pool = adapter->num_rx_pools;
4181
4182
4183 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4184 IXGBE_PSRTYPE_UDPHDR |
4185 IXGBE_PSRTYPE_IPV4HDR |
4186 IXGBE_PSRTYPE_L2HDR |
4187 IXGBE_PSRTYPE_IPV6HDR;
4188
4189 if (hw->mac.type == ixgbe_mac_82598EB)
4190 return;
4191
4192 if (rss_i > 3)
4193 psrtype |= 2u << 29;
4194 else if (rss_i > 1)
4195 psrtype |= 1u << 29;
4196
4197 while (pool--)
4198 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4199}
4200
4201static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4202{
4203 struct ixgbe_hw *hw = &adapter->hw;
4204 u32 reg_offset, vf_shift;
4205 u32 gcr_ext, vmdctl;
4206 int i;
4207
4208 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4209 return;
4210
4211 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4212 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4213 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4214 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4215 vmdctl |= IXGBE_VT_CTL_REPLEN;
4216 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4217
4218 vf_shift = VMDQ_P(0) % 32;
4219 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4220
4221
4222 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4223 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4224 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4225 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4226 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4227 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4228
4229
4230 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4231
4232
4233 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4234
4235
4236
4237
4238
4239 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4240 case IXGBE_82599_VMDQ_8Q_MASK:
4241 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4242 break;
4243 case IXGBE_82599_VMDQ_4Q_MASK:
4244 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4245 break;
4246 default:
4247 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4248 break;
4249 }
4250
4251 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4252
4253 for (i = 0; i < adapter->num_vfs; i++) {
4254
4255 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4256 adapter->vfinfo[i].spoofchk_enabled);
4257
4258
4259 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4260 adapter->vfinfo[i].rss_query_enabled);
4261 }
4262}
4263
4264static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4265{
4266 struct ixgbe_hw *hw = &adapter->hw;
4267 struct net_device *netdev = adapter->netdev;
4268 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4269 struct ixgbe_ring *rx_ring;
4270 int i;
4271 u32 mhadd, hlreg0;
4272
4273#ifdef IXGBE_FCOE
4274
4275 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4276 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4277 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4278
4279#endif
4280
4281
4282 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4283 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4284
4285 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4286 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4287 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4288 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4289
4290 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4291 }
4292
4293 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4294
4295 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4296 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4297
4298
4299
4300
4301
4302 for (i = 0; i < adapter->num_rx_queues; i++) {
4303 rx_ring = adapter->rx_ring[i];
4304
4305 clear_ring_rsc_enabled(rx_ring);
4306 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4307 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4308
4309 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4310 set_ring_rsc_enabled(rx_ring);
4311
4312 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4313 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4314
4315 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4316 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4317 continue;
4318
4319 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4320
4321#if (PAGE_SIZE < 8192)
4322 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4323 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4324
4325 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4326 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4327 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4328#endif
4329 }
4330}
4331
4332static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4333{
4334 struct ixgbe_hw *hw = &adapter->hw;
4335 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4336
4337 switch (hw->mac.type) {
4338 case ixgbe_mac_82598EB:
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4350 break;
4351 case ixgbe_mac_X550:
4352 case ixgbe_mac_X550EM_x:
4353 case ixgbe_mac_x550em_a:
4354 if (adapter->num_vfs)
4355 rdrxctl |= IXGBE_RDRXCTL_PSP;
4356
4357 case ixgbe_mac_82599EB:
4358 case ixgbe_mac_X540:
4359
4360 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4361 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4362 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4363
4364 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4365 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4366 break;
4367 default:
4368
4369 return;
4370 }
4371
4372 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4373}
4374
4375
4376
4377
4378
4379
4380
4381static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4382{
4383 struct ixgbe_hw *hw = &adapter->hw;
4384 int i;
4385 u32 rxctrl, rfctl;
4386
4387
4388 hw->mac.ops.disable_rx(hw);
4389
4390 ixgbe_setup_psrtype(adapter);
4391 ixgbe_setup_rdrxctl(adapter);
4392
4393
4394 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4395 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4396 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4397 rfctl |= IXGBE_RFCTL_RSC_DIS;
4398
4399
4400 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4401 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4402
4403
4404 ixgbe_setup_mrqc(adapter);
4405
4406
4407 ixgbe_set_rx_buffer_len(adapter);
4408
4409
4410
4411
4412
4413 for (i = 0; i < adapter->num_rx_queues; i++)
4414 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4415
4416 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4417
4418 if (hw->mac.type == ixgbe_mac_82598EB)
4419 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4420
4421
4422 rxctrl |= IXGBE_RXCTRL_RXEN;
4423 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4424}
4425
4426static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4427 __be16 proto, u16 vid)
4428{
4429 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4430 struct ixgbe_hw *hw = &adapter->hw;
4431
4432
4433 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4434 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4435
4436 set_bit(vid, adapter->active_vlans);
4437
4438 return 0;
4439}
4440
4441static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4442{
4443 u32 vlvf;
4444 int idx;
4445
4446
4447 if (vlan == 0)
4448 return 0;
4449
4450
4451 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4452 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4453 if ((vlvf & VLAN_VID_MASK) == vlan)
4454 break;
4455 }
4456
4457 return idx;
4458}
4459
4460void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4461{
4462 struct ixgbe_hw *hw = &adapter->hw;
4463 u32 bits, word;
4464 int idx;
4465
4466 idx = ixgbe_find_vlvf_entry(hw, vid);
4467 if (!idx)
4468 return;
4469
4470
4471
4472
4473 word = idx * 2 + (VMDQ_P(0) / 32);
4474 bits = ~BIT(VMDQ_P(0) % 32);
4475 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4476
4477
4478 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4479 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4480 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4481 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4482 }
4483}
4484
4485static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4486 __be16 proto, u16 vid)
4487{
4488 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4489 struct ixgbe_hw *hw = &adapter->hw;
4490
4491
4492 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4493 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4494
4495 clear_bit(vid, adapter->active_vlans);
4496
4497 return 0;
4498}
4499
4500
4501
4502
4503
4504static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4505{
4506 struct ixgbe_hw *hw = &adapter->hw;
4507 u32 vlnctrl;
4508 int i, j;
4509
4510 switch (hw->mac.type) {
4511 case ixgbe_mac_82598EB:
4512 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4513 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4514 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4515 break;
4516 case ixgbe_mac_82599EB:
4517 case ixgbe_mac_X540:
4518 case ixgbe_mac_X550:
4519 case ixgbe_mac_X550EM_x:
4520 case ixgbe_mac_x550em_a:
4521 for (i = 0; i < adapter->num_rx_queues; i++) {
4522 struct ixgbe_ring *ring = adapter->rx_ring[i];
4523
4524 if (!netif_is_ixgbe(ring->netdev))
4525 continue;
4526
4527 j = ring->reg_idx;
4528 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4529 vlnctrl &= ~IXGBE_RXDCTL_VME;
4530 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4531 }
4532 break;
4533 default:
4534 break;
4535 }
4536}
4537
4538
4539
4540
4541
4542static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4543{
4544 struct ixgbe_hw *hw = &adapter->hw;
4545 u32 vlnctrl;
4546 int i, j;
4547
4548 switch (hw->mac.type) {
4549 case ixgbe_mac_82598EB:
4550 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4551 vlnctrl |= IXGBE_VLNCTRL_VME;
4552 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4553 break;
4554 case ixgbe_mac_82599EB:
4555 case ixgbe_mac_X540:
4556 case ixgbe_mac_X550:
4557 case ixgbe_mac_X550EM_x:
4558 case ixgbe_mac_x550em_a:
4559 for (i = 0; i < adapter->num_rx_queues; i++) {
4560 struct ixgbe_ring *ring = adapter->rx_ring[i];
4561
4562 if (!netif_is_ixgbe(ring->netdev))
4563 continue;
4564
4565 j = ring->reg_idx;
4566 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4567 vlnctrl |= IXGBE_RXDCTL_VME;
4568 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4569 }
4570 break;
4571 default:
4572 break;
4573 }
4574}
4575
4576static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4577{
4578 struct ixgbe_hw *hw = &adapter->hw;
4579 u32 vlnctrl, i;
4580
4581 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4582
4583 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4584
4585 vlnctrl |= IXGBE_VLNCTRL_VFE;
4586 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4587 } else {
4588 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4589 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4590 return;
4591 }
4592
4593
4594 if (hw->mac.type == ixgbe_mac_82598EB)
4595 return;
4596
4597
4598 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4599 return;
4600
4601
4602 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4603
4604
4605 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4606 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4607 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4608
4609 vlvfb |= BIT(VMDQ_P(0) % 32);
4610 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4611 }
4612
4613
4614 for (i = hw->mac.vft_size; i--;)
4615 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4616}
4617
4618#define VFTA_BLOCK_SIZE 8
4619static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4620{
4621 struct ixgbe_hw *hw = &adapter->hw;
4622 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4623 u32 vid_start = vfta_offset * 32;
4624 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4625 u32 i, vid, word, bits;
4626
4627 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4628 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4629
4630
4631 vid = vlvf & VLAN_VID_MASK;
4632
4633
4634 if (vid < vid_start || vid >= vid_end)
4635 continue;
4636
4637 if (vlvf) {
4638
4639 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4640
4641
4642 if (test_bit(vid, adapter->active_vlans))
4643 continue;
4644 }
4645
4646
4647 word = i * 2 + VMDQ_P(0) / 32;
4648 bits = ~BIT(VMDQ_P(0) % 32);
4649 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4650 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4651 }
4652
4653
4654 for (i = VFTA_BLOCK_SIZE; i--;) {
4655 vid = (vfta_offset + i) * 32;
4656 word = vid / BITS_PER_LONG;
4657 bits = vid % BITS_PER_LONG;
4658
4659 vfta[i] |= adapter->active_vlans[word] >> bits;
4660
4661 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4662 }
4663}
4664
4665static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4666{
4667 struct ixgbe_hw *hw = &adapter->hw;
4668 u32 vlnctrl, i;
4669
4670
4671 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4672 vlnctrl |= IXGBE_VLNCTRL_VFE;
4673 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4674
4675 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4676 hw->mac.type == ixgbe_mac_82598EB)
4677 return;
4678
4679
4680 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4681 return;
4682
4683
4684 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4685
4686 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4687 ixgbe_scrub_vfta(adapter, i);
4688}
4689
4690static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4691{
4692 u16 vid = 1;
4693
4694 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4695
4696 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4697 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4698}
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4710{
4711 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4712 struct ixgbe_hw *hw = &adapter->hw;
4713
4714 if (!netif_running(netdev))
4715 return 0;
4716
4717 if (hw->mac.ops.update_mc_addr_list)
4718 hw->mac.ops.update_mc_addr_list(hw, netdev);
4719 else
4720 return -ENOMEM;
4721
4722#ifdef CONFIG_PCI_IOV
4723 ixgbe_restore_vf_multicasts(adapter);
4724#endif
4725
4726 return netdev_mc_count(netdev);
4727}
4728
4729#ifdef CONFIG_PCI_IOV
4730void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4731{
4732 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4733 struct ixgbe_hw *hw = &adapter->hw;
4734 int i;
4735
4736 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4737 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4738
4739 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4740 hw->mac.ops.set_rar(hw, i,
4741 mac_table->addr,
4742 mac_table->pool,
4743 IXGBE_RAH_AV);
4744 else
4745 hw->mac.ops.clear_rar(hw, i);
4746 }
4747}
4748
4749#endif
4750static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4751{
4752 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4753 struct ixgbe_hw *hw = &adapter->hw;
4754 int i;
4755
4756 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4757 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4758 continue;
4759
4760 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4761
4762 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4763 hw->mac.ops.set_rar(hw, i,
4764 mac_table->addr,
4765 mac_table->pool,
4766 IXGBE_RAH_AV);
4767 else
4768 hw->mac.ops.clear_rar(hw, i);
4769 }
4770}
4771
4772static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4773{
4774 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4775 struct ixgbe_hw *hw = &adapter->hw;
4776 int i;
4777
4778 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4779 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4780 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4781 }
4782
4783 ixgbe_sync_mac_table(adapter);
4784}
4785
4786static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4787{
4788 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4789 struct ixgbe_hw *hw = &adapter->hw;
4790 int i, count = 0;
4791
4792 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4793
4794 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4795 continue;
4796
4797
4798 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4799 if (mac_table->pool != pool)
4800 continue;
4801 }
4802
4803 count++;
4804 }
4805
4806 return count;
4807}
4808
4809
4810static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4811{
4812 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4813 struct ixgbe_hw *hw = &adapter->hw;
4814
4815 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4816 mac_table->pool = VMDQ_P(0);
4817
4818 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4819
4820 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4821 IXGBE_RAH_AV);
4822}
4823
4824int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4825 const u8 *addr, u16 pool)
4826{
4827 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4828 struct ixgbe_hw *hw = &adapter->hw;
4829 int i;
4830
4831 if (is_zero_ether_addr(addr))
4832 return -EINVAL;
4833
4834 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4835 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4836 continue;
4837
4838 ether_addr_copy(mac_table->addr, addr);
4839 mac_table->pool = pool;
4840
4841 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4842 IXGBE_MAC_STATE_IN_USE;
4843
4844 ixgbe_sync_mac_table(adapter);
4845
4846 return i;
4847 }
4848
4849 return -ENOMEM;
4850}
4851
4852int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4853 const u8 *addr, u16 pool)
4854{
4855 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4856 struct ixgbe_hw *hw = &adapter->hw;
4857 int i;
4858
4859 if (is_zero_ether_addr(addr))
4860 return -EINVAL;
4861
4862
4863 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4864
4865 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4866 continue;
4867
4868 if (mac_table->pool != pool)
4869 continue;
4870
4871 if (!ether_addr_equal(addr, mac_table->addr))
4872 continue;
4873
4874 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4875 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4876
4877 ixgbe_sync_mac_table(adapter);
4878
4879 return 0;
4880 }
4881
4882 return -ENOMEM;
4883}
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4896{
4897 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4898 int count = 0;
4899
4900
4901 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
4902 return -ENOMEM;
4903
4904 if (!netdev_uc_empty(netdev)) {
4905 struct netdev_hw_addr *ha;
4906 netdev_for_each_uc_addr(ha, netdev) {
4907 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4908 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4909 count++;
4910 }
4911 }
4912 return count;
4913}
4914
4915static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4916{
4917 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4918 int ret;
4919
4920 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4921
4922 return min_t(int, ret, 0);
4923}
4924
4925static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4926{
4927 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4928
4929 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4930
4931 return 0;
4932}
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943void ixgbe_set_rx_mode(struct net_device *netdev)
4944{
4945 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4946 struct ixgbe_hw *hw = &adapter->hw;
4947 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4948 netdev_features_t features = netdev->features;
4949 int count;
4950
4951
4952 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4953
4954
4955 fctrl &= ~IXGBE_FCTRL_SBP;
4956 fctrl |= IXGBE_FCTRL_BAM;
4957 fctrl |= IXGBE_FCTRL_DPF;
4958 fctrl |= IXGBE_FCTRL_PMCF;
4959
4960
4961 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4962 if (netdev->flags & IFF_PROMISC) {
4963 hw->addr_ctrl.user_set_promisc = true;
4964 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4965 vmolr |= IXGBE_VMOLR_MPE;
4966 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4967 } else {
4968 if (netdev->flags & IFF_ALLMULTI) {
4969 fctrl |= IXGBE_FCTRL_MPE;
4970 vmolr |= IXGBE_VMOLR_MPE;
4971 }
4972 hw->addr_ctrl.user_set_promisc = false;
4973 }
4974
4975
4976
4977
4978
4979
4980 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4981 fctrl |= IXGBE_FCTRL_UPE;
4982 vmolr |= IXGBE_VMOLR_ROPE;
4983 }
4984
4985
4986
4987
4988
4989 count = ixgbe_write_mc_addr_list(netdev);
4990 if (count < 0) {
4991 fctrl |= IXGBE_FCTRL_MPE;
4992 vmolr |= IXGBE_VMOLR_MPE;
4993 } else if (count) {
4994 vmolr |= IXGBE_VMOLR_ROMPE;
4995 }
4996
4997 if (hw->mac.type != ixgbe_mac_82598EB) {
4998 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4999 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
5000 IXGBE_VMOLR_ROPE);
5001 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
5002 }
5003
5004
5005 if (features & NETIF_F_RXALL) {
5006
5007
5008 fctrl |= (IXGBE_FCTRL_SBP |
5009 IXGBE_FCTRL_BAM |
5010 IXGBE_FCTRL_PMCF);
5011
5012 fctrl &= ~(IXGBE_FCTRL_DPF);
5013
5014 }
5015
5016 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5017
5018 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5019 ixgbe_vlan_strip_enable(adapter);
5020 else
5021 ixgbe_vlan_strip_disable(adapter);
5022
5023 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
5024 ixgbe_vlan_promisc_disable(adapter);
5025 else
5026 ixgbe_vlan_promisc_enable(adapter);
5027}
5028
5029static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
5030{
5031 int q_idx;
5032
5033 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5034 napi_enable(&adapter->q_vector[q_idx]->napi);
5035}
5036
5037static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
5038{
5039 int q_idx;
5040
5041 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
5042 napi_disable(&adapter->q_vector[q_idx]->napi);
5043}
5044
5045static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
5046{
5047 struct ixgbe_hw *hw = &adapter->hw;
5048 u32 vxlanctrl;
5049
5050 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
5051 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
5052 return;
5053
5054 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
5055 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
5056
5057 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
5058 adapter->vxlan_port = 0;
5059
5060 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
5061 adapter->geneve_port = 0;
5062}
5063
5064#ifdef CONFIG_IXGBE_DCB
5065
5066
5067
5068
5069
5070
5071
5072
5073static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
5074{
5075 struct ixgbe_hw *hw = &adapter->hw;
5076 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
5077
5078 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
5079 if (hw->mac.type == ixgbe_mac_82598EB)
5080 netif_set_gso_max_size(adapter->netdev, 65536);
5081 return;
5082 }
5083
5084 if (hw->mac.type == ixgbe_mac_82598EB)
5085 netif_set_gso_max_size(adapter->netdev, 32768);
5086
5087#ifdef IXGBE_FCOE
5088 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
5089 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
5090#endif
5091
5092
5093 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
5094 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5095 DCB_TX_CONFIG);
5096 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
5097 DCB_RX_CONFIG);
5098 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
5099 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
5100 ixgbe_dcb_hw_ets(&adapter->hw,
5101 adapter->ixgbe_ieee_ets,
5102 max_frame);
5103 ixgbe_dcb_hw_pfc_config(&adapter->hw,
5104 adapter->ixgbe_ieee_pfc->pfc_en,
5105 adapter->ixgbe_ieee_ets->prio_tc);
5106 }
5107
5108
5109 if (hw->mac.type != ixgbe_mac_82598EB) {
5110 u32 msb = 0;
5111 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
5112
5113 while (rss_i) {
5114 msb++;
5115 rss_i >>= 1;
5116 }
5117
5118
5119 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
5120 }
5121}
5122#endif
5123
5124
5125#define IXGBE_ETH_FRAMING 20
5126
5127
5128
5129
5130
5131
5132
5133static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
5134{
5135 struct ixgbe_hw *hw = &adapter->hw;
5136 struct net_device *dev = adapter->netdev;
5137 int link, tc, kb, marker;
5138 u32 dv_id, rx_pba;
5139
5140
5141 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
5142
5143#ifdef IXGBE_FCOE
5144
5145 if ((dev->features & NETIF_F_FCOE_MTU) &&
5146 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5147 (pb == ixgbe_fcoe_get_tc(adapter)))
5148 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5149#endif
5150
5151
5152 switch (hw->mac.type) {
5153 case ixgbe_mac_X540:
5154 case ixgbe_mac_X550:
5155 case ixgbe_mac_X550EM_x:
5156 case ixgbe_mac_x550em_a:
5157 dv_id = IXGBE_DV_X540(link, tc);
5158 break;
5159 default:
5160 dv_id = IXGBE_DV(link, tc);
5161 break;
5162 }
5163
5164
5165 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5166 dv_id += IXGBE_B2BT(tc);
5167
5168
5169 kb = IXGBE_BT2KB(dv_id);
5170 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5171
5172 marker = rx_pba - kb;
5173
5174
5175
5176
5177
5178 if (marker < 0) {
5179 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5180 "headroom to support flow control."
5181 "Decrease MTU or number of traffic classes\n", pb);
5182 marker = tc + 1;
5183 }
5184
5185 return marker;
5186}
5187
5188
5189
5190
5191
5192
5193
5194static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5195{
5196 struct ixgbe_hw *hw = &adapter->hw;
5197 struct net_device *dev = adapter->netdev;
5198 int tc;
5199 u32 dv_id;
5200
5201
5202 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5203
5204#ifdef IXGBE_FCOE
5205
5206 if ((dev->features & NETIF_F_FCOE_MTU) &&
5207 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5208 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5209 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5210#endif
5211
5212
5213 switch (hw->mac.type) {
5214 case ixgbe_mac_X540:
5215 case ixgbe_mac_X550:
5216 case ixgbe_mac_X550EM_x:
5217 case ixgbe_mac_x550em_a:
5218 dv_id = IXGBE_LOW_DV_X540(tc);
5219 break;
5220 default:
5221 dv_id = IXGBE_LOW_DV(tc);
5222 break;
5223 }
5224
5225
5226 return IXGBE_BT2KB(dv_id);
5227}
5228
5229
5230
5231
5232static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5233{
5234 struct ixgbe_hw *hw = &adapter->hw;
5235 int num_tc = adapter->hw_tcs;
5236 int i;
5237
5238 if (!num_tc)
5239 num_tc = 1;
5240
5241 for (i = 0; i < num_tc; i++) {
5242 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5243 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5244
5245
5246 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5247 hw->fc.low_water[i] = 0;
5248 }
5249
5250 for (; i < MAX_TRAFFIC_CLASS; i++)
5251 hw->fc.high_water[i] = 0;
5252}
5253
5254static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5255{
5256 struct ixgbe_hw *hw = &adapter->hw;
5257 int hdrm;
5258 u8 tc = adapter->hw_tcs;
5259
5260 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5261 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5262 hdrm = 32 << adapter->fdir_pballoc;
5263 else
5264 hdrm = 0;
5265
5266 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5267 ixgbe_pbthresh_setup(adapter);
5268}
5269
5270static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5271{
5272 struct ixgbe_hw *hw = &adapter->hw;
5273 struct hlist_node *node2;
5274 struct ixgbe_fdir_filter *filter;
5275
5276 spin_lock(&adapter->fdir_perfect_lock);
5277
5278 if (!hlist_empty(&adapter->fdir_filter_list))
5279 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5280
5281 hlist_for_each_entry_safe(filter, node2,
5282 &adapter->fdir_filter_list, fdir_node) {
5283 ixgbe_fdir_write_perfect_filter_82599(hw,
5284 &filter->filter,
5285 filter->sw_idx,
5286 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
5287 IXGBE_FDIR_DROP_QUEUE :
5288 adapter->rx_ring[filter->action]->reg_idx);
5289 }
5290
5291 spin_unlock(&adapter->fdir_perfect_lock);
5292}
5293
5294static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
5295 struct ixgbe_adapter *adapter)
5296{
5297 struct ixgbe_hw *hw = &adapter->hw;
5298 u32 vmolr;
5299
5300
5301 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
5302 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
5303
5304
5305 vmolr &= ~IXGBE_VMOLR_MPE;
5306
5307 if (dev->flags & IFF_ALLMULTI) {
5308 vmolr |= IXGBE_VMOLR_MPE;
5309 } else {
5310 vmolr |= IXGBE_VMOLR_ROMPE;
5311 hw->mac.ops.update_mc_addr_list(hw, dev);
5312 }
5313 ixgbe_write_uc_addr_list(adapter->netdev, pool);
5314 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
5315}
5316
5317
5318
5319
5320
5321static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5322{
5323 u16 i = rx_ring->next_to_clean;
5324 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5325
5326
5327 while (i != rx_ring->next_to_alloc) {
5328 if (rx_buffer->skb) {
5329 struct sk_buff *skb = rx_buffer->skb;
5330 if (IXGBE_CB(skb)->page_released)
5331 dma_unmap_page_attrs(rx_ring->dev,
5332 IXGBE_CB(skb)->dma,
5333 ixgbe_rx_pg_size(rx_ring),
5334 DMA_FROM_DEVICE,
5335 IXGBE_RX_DMA_ATTR);
5336 dev_kfree_skb(skb);
5337 }
5338
5339
5340
5341
5342 dma_sync_single_range_for_cpu(rx_ring->dev,
5343 rx_buffer->dma,
5344 rx_buffer->page_offset,
5345 ixgbe_rx_bufsz(rx_ring),
5346 DMA_FROM_DEVICE);
5347
5348
5349 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5350 ixgbe_rx_pg_size(rx_ring),
5351 DMA_FROM_DEVICE,
5352 IXGBE_RX_DMA_ATTR);
5353 __page_frag_cache_drain(rx_buffer->page,
5354 rx_buffer->pagecnt_bias);
5355
5356 i++;
5357 rx_buffer++;
5358 if (i == rx_ring->count) {
5359 i = 0;
5360 rx_buffer = rx_ring->rx_buffer_info;
5361 }
5362 }
5363
5364 rx_ring->next_to_alloc = 0;
5365 rx_ring->next_to_clean = 0;
5366 rx_ring->next_to_use = 0;
5367}
5368
5369static int ixgbe_fwd_ring_up(struct net_device *vdev,
5370 struct ixgbe_fwd_adapter *accel)
5371{
5372 struct ixgbe_adapter *adapter = accel->real_adapter;
5373 int i, baseq, err;
5374
5375 if (!test_bit(accel->pool, adapter->fwd_bitmask))
5376 return 0;
5377
5378 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5379 netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
5380 accel->pool, adapter->num_rx_pools,
5381 baseq, baseq + adapter->num_rx_queues_per_pool);
5382
5383 accel->netdev = vdev;
5384 accel->rx_base_queue = baseq;
5385 accel->tx_base_queue = baseq;
5386
5387 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5388 adapter->rx_ring[baseq + i]->netdev = vdev;
5389
5390
5391
5392
5393 wmb();
5394
5395
5396
5397
5398 err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
5399 VMDQ_P(accel->pool));
5400 if (err >= 0) {
5401 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
5402 return 0;
5403 }
5404
5405 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5406 adapter->rx_ring[baseq + i]->netdev = NULL;
5407
5408 return err;
5409}
5410
5411static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
5412{
5413 if (netif_is_macvlan(upper)) {
5414 struct macvlan_dev *dfwd = netdev_priv(upper);
5415 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
5416
5417 if (dfwd->fwd_priv)
5418 ixgbe_fwd_ring_up(upper, vadapter);
5419 }
5420
5421 return 0;
5422}
5423
5424static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5425{
5426 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5427 ixgbe_upper_dev_walk, NULL);
5428}
5429
5430static void ixgbe_configure(struct ixgbe_adapter *adapter)
5431{
5432 struct ixgbe_hw *hw = &adapter->hw;
5433
5434 ixgbe_configure_pb(adapter);
5435#ifdef CONFIG_IXGBE_DCB
5436 ixgbe_configure_dcb(adapter);
5437#endif
5438
5439
5440
5441
5442 ixgbe_configure_virtualization(adapter);
5443
5444 ixgbe_set_rx_mode(adapter->netdev);
5445 ixgbe_restore_vlan(adapter);
5446 ixgbe_ipsec_restore(adapter);
5447
5448 switch (hw->mac.type) {
5449 case ixgbe_mac_82599EB:
5450 case ixgbe_mac_X540:
5451 hw->mac.ops.disable_rx_buff(hw);
5452 break;
5453 default:
5454 break;
5455 }
5456
5457 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5458 ixgbe_init_fdir_signature_82599(&adapter->hw,
5459 adapter->fdir_pballoc);
5460 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5461 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5462 adapter->fdir_pballoc);
5463 ixgbe_fdir_filter_restore(adapter);
5464 }
5465
5466 switch (hw->mac.type) {
5467 case ixgbe_mac_82599EB:
5468 case ixgbe_mac_X540:
5469 hw->mac.ops.enable_rx_buff(hw);
5470 break;
5471 default:
5472 break;
5473 }
5474
5475#ifdef CONFIG_IXGBE_DCA
5476
5477 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5478 ixgbe_setup_dca(adapter);
5479#endif
5480
5481#ifdef IXGBE_FCOE
5482
5483 ixgbe_configure_fcoe(adapter);
5484
5485#endif
5486 ixgbe_configure_tx(adapter);
5487 ixgbe_configure_rx(adapter);
5488 ixgbe_configure_dfwd(adapter);
5489}
5490
5491
5492
5493
5494
5495static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5496{
5497
5498
5499
5500
5501
5502
5503 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5504 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5505
5506 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5507 adapter->sfp_poll_time = 0;
5508}
5509
5510
5511
5512
5513
5514
5515
5516static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5517{
5518 u32 speed;
5519 bool autoneg, link_up = false;
5520 int ret = IXGBE_ERR_LINK_SETUP;
5521
5522 if (hw->mac.ops.check_link)
5523 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5524
5525 if (ret)
5526 return ret;
5527
5528 speed = hw->phy.autoneg_advertised;
5529 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5530 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5531 &autoneg);
5532 if (ret)
5533 return ret;
5534
5535 if (hw->mac.ops.setup_link)
5536 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5537
5538 return ret;
5539}
5540
5541static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5542{
5543 struct ixgbe_hw *hw = &adapter->hw;
5544 u32 gpie = 0;
5545
5546 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5547 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5548 IXGBE_GPIE_OCD;
5549 gpie |= IXGBE_GPIE_EIAME;
5550
5551
5552
5553
5554 switch (hw->mac.type) {
5555 case ixgbe_mac_82598EB:
5556 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5557 break;
5558 case ixgbe_mac_82599EB:
5559 case ixgbe_mac_X540:
5560 case ixgbe_mac_X550:
5561 case ixgbe_mac_X550EM_x:
5562 case ixgbe_mac_x550em_a:
5563 default:
5564 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5565 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5566 break;
5567 }
5568 } else {
5569
5570
5571 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5572 }
5573
5574
5575
5576
5577 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5578 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5579
5580 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5581 case IXGBE_82599_VMDQ_8Q_MASK:
5582 gpie |= IXGBE_GPIE_VTMODE_16;
5583 break;
5584 case IXGBE_82599_VMDQ_4Q_MASK:
5585 gpie |= IXGBE_GPIE_VTMODE_32;
5586 break;
5587 default:
5588 gpie |= IXGBE_GPIE_VTMODE_64;
5589 break;
5590 }
5591 }
5592
5593
5594 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5595 switch (adapter->hw.mac.type) {
5596 case ixgbe_mac_82599EB:
5597 gpie |= IXGBE_SDP0_GPIEN_8259X;
5598 break;
5599 default:
5600 break;
5601 }
5602 }
5603
5604
5605 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5606 gpie |= IXGBE_SDP1_GPIEN(hw);
5607
5608 switch (hw->mac.type) {
5609 case ixgbe_mac_82599EB:
5610 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5611 break;
5612 case ixgbe_mac_X550EM_x:
5613 case ixgbe_mac_x550em_a:
5614 gpie |= IXGBE_SDP0_GPIEN_X540;
5615 break;
5616 default:
5617 break;
5618 }
5619
5620 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5621}
5622
5623static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5624{
5625 struct ixgbe_hw *hw = &adapter->hw;
5626 int err;
5627 u32 ctrl_ext;
5628
5629 ixgbe_get_hw_control(adapter);
5630 ixgbe_setup_gpie(adapter);
5631
5632 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5633 ixgbe_configure_msix(adapter);
5634 else
5635 ixgbe_configure_msi_and_legacy(adapter);
5636
5637
5638 if (hw->mac.ops.enable_tx_laser)
5639 hw->mac.ops.enable_tx_laser(hw);
5640
5641 if (hw->phy.ops.set_phy_power)
5642 hw->phy.ops.set_phy_power(hw, true);
5643
5644 smp_mb__before_atomic();
5645 clear_bit(__IXGBE_DOWN, &adapter->state);
5646 ixgbe_napi_enable_all(adapter);
5647
5648 if (ixgbe_is_sfp(hw)) {
5649 ixgbe_sfp_link_config(adapter);
5650 } else {
5651 err = ixgbe_non_sfp_link_config(hw);
5652 if (err)
5653 e_err(probe, "link_config FAILED %d\n", err);
5654 }
5655
5656
5657 IXGBE_READ_REG(hw, IXGBE_EICR);
5658 ixgbe_irq_enable(adapter, true, true);
5659
5660
5661
5662
5663
5664 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5665 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5666 if (esdp & IXGBE_ESDP_SDP1)
5667 e_crit(drv, "Fan has stopped, replace the adapter\n");
5668 }
5669
5670
5671
5672 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5673 adapter->link_check_timeout = jiffies;
5674 mod_timer(&adapter->service_timer, jiffies);
5675
5676
5677 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5678 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5679 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5680}
5681
5682void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5683{
5684 WARN_ON(in_interrupt());
5685
5686 netif_trans_update(adapter->netdev);
5687
5688 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5689 usleep_range(1000, 2000);
5690 if (adapter->hw.phy.type == ixgbe_phy_fw)
5691 ixgbe_watchdog_link_is_down(adapter);
5692 ixgbe_down(adapter);
5693
5694
5695
5696
5697
5698
5699 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5700 msleep(2000);
5701 ixgbe_up(adapter);
5702 clear_bit(__IXGBE_RESETTING, &adapter->state);
5703}
5704
5705void ixgbe_up(struct ixgbe_adapter *adapter)
5706{
5707
5708 ixgbe_configure(adapter);
5709
5710 ixgbe_up_complete(adapter);
5711}
5712
5713void ixgbe_reset(struct ixgbe_adapter *adapter)
5714{
5715 struct ixgbe_hw *hw = &adapter->hw;
5716 struct net_device *netdev = adapter->netdev;
5717 int err;
5718
5719 if (ixgbe_removed(hw->hw_addr))
5720 return;
5721
5722 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5723 usleep_range(1000, 2000);
5724
5725
5726 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5727 IXGBE_FLAG2_SFP_NEEDS_RESET);
5728 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5729
5730 err = hw->mac.ops.init_hw(hw);
5731 switch (err) {
5732 case 0:
5733 case IXGBE_ERR_SFP_NOT_PRESENT:
5734 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5735 break;
5736 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5737 e_dev_err("master disable timed out\n");
5738 break;
5739 case IXGBE_ERR_EEPROM_VERSION:
5740
5741 e_dev_warn("This device is a pre-production adapter/LOM. "
5742 "Please be aware there may be issues associated with "
5743 "your hardware. If you are experiencing problems "
5744 "please contact your Intel or hardware "
5745 "representative who provided you with this "
5746 "hardware.\n");
5747 break;
5748 default:
5749 e_dev_err("Hardware Error: %d\n", err);
5750 }
5751
5752 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5753
5754
5755 ixgbe_flush_sw_mac_table(adapter);
5756 __dev_uc_unsync(netdev, NULL);
5757
5758
5759 ixgbe_mac_set_default_filter(adapter);
5760
5761
5762 if (hw->mac.san_mac_rar_index)
5763 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5764
5765 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5766 ixgbe_ptp_reset(adapter);
5767
5768 if (hw->phy.ops.set_phy_power) {
5769 if (!netif_running(adapter->netdev) && !adapter->wol)
5770 hw->phy.ops.set_phy_power(hw, false);
5771 else
5772 hw->phy.ops.set_phy_power(hw, true);
5773 }
5774}
5775
5776
5777
5778
5779
5780static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5781{
5782 u16 i = tx_ring->next_to_clean;
5783 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5784
5785 while (i != tx_ring->next_to_use) {
5786 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5787
5788
5789 if (ring_is_xdp(tx_ring))
5790 page_frag_free(tx_buffer->data);
5791 else
5792 dev_kfree_skb_any(tx_buffer->skb);
5793
5794
5795 dma_unmap_single(tx_ring->dev,
5796 dma_unmap_addr(tx_buffer, dma),
5797 dma_unmap_len(tx_buffer, len),
5798 DMA_TO_DEVICE);
5799
5800
5801 eop_desc = tx_buffer->next_to_watch;
5802 tx_desc = IXGBE_TX_DESC(tx_ring, i);
5803
5804
5805 while (tx_desc != eop_desc) {
5806 tx_buffer++;
5807 tx_desc++;
5808 i++;
5809 if (unlikely(i == tx_ring->count)) {
5810 i = 0;
5811 tx_buffer = tx_ring->tx_buffer_info;
5812 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
5813 }
5814
5815
5816 if (dma_unmap_len(tx_buffer, len))
5817 dma_unmap_page(tx_ring->dev,
5818 dma_unmap_addr(tx_buffer, dma),
5819 dma_unmap_len(tx_buffer, len),
5820 DMA_TO_DEVICE);
5821 }
5822
5823
5824 tx_buffer++;
5825 i++;
5826 if (unlikely(i == tx_ring->count)) {
5827 i = 0;
5828 tx_buffer = tx_ring->tx_buffer_info;
5829 }
5830 }
5831
5832
5833 if (!ring_is_xdp(tx_ring))
5834 netdev_tx_reset_queue(txring_txq(tx_ring));
5835
5836
5837 tx_ring->next_to_use = 0;
5838 tx_ring->next_to_clean = 0;
5839}
5840
5841
5842
5843
5844
5845static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5846{
5847 int i;
5848
5849 for (i = 0; i < adapter->num_rx_queues; i++)
5850 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5851}
5852
5853
5854
5855
5856
5857static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5858{
5859 int i;
5860
5861 for (i = 0; i < adapter->num_tx_queues; i++)
5862 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5863 for (i = 0; i < adapter->num_xdp_queues; i++)
5864 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
5865}
5866
5867static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5868{
5869 struct hlist_node *node2;
5870 struct ixgbe_fdir_filter *filter;
5871
5872 spin_lock(&adapter->fdir_perfect_lock);
5873
5874 hlist_for_each_entry_safe(filter, node2,
5875 &adapter->fdir_filter_list, fdir_node) {
5876 hlist_del(&filter->fdir_node);
5877 kfree(filter);
5878 }
5879 adapter->fdir_filter_count = 0;
5880
5881 spin_unlock(&adapter->fdir_perfect_lock);
5882}
5883
5884void ixgbe_down(struct ixgbe_adapter *adapter)
5885{
5886 struct net_device *netdev = adapter->netdev;
5887 struct ixgbe_hw *hw = &adapter->hw;
5888 int i;
5889
5890
5891 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5892 return;
5893
5894
5895 hw->mac.ops.disable_rx(hw);
5896
5897
5898 for (i = 0; i < adapter->num_rx_queues; i++)
5899
5900 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5901
5902 usleep_range(10000, 20000);
5903
5904
5905 if (adapter->xdp_ring[0])
5906 synchronize_sched();
5907 netif_tx_stop_all_queues(netdev);
5908
5909
5910 netif_carrier_off(netdev);
5911 netif_tx_disable(netdev);
5912
5913 ixgbe_irq_disable(adapter);
5914
5915 ixgbe_napi_disable_all(adapter);
5916
5917 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
5918 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5919 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5920
5921 del_timer_sync(&adapter->service_timer);
5922
5923 if (adapter->num_vfs) {
5924
5925 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5926
5927
5928 for (i = 0 ; i < adapter->num_vfs; i++)
5929 adapter->vfinfo[i].clear_to_send = false;
5930
5931
5932 ixgbe_ping_all_vfs(adapter);
5933
5934
5935 ixgbe_disable_tx_rx(adapter);
5936 }
5937
5938
5939 for (i = 0; i < adapter->num_tx_queues; i++) {
5940 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5941 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5942 }
5943 for (i = 0; i < adapter->num_xdp_queues; i++) {
5944 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
5945
5946 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5947 }
5948
5949
5950 switch (hw->mac.type) {
5951 case ixgbe_mac_82599EB:
5952 case ixgbe_mac_X540:
5953 case ixgbe_mac_X550:
5954 case ixgbe_mac_X550EM_x:
5955 case ixgbe_mac_x550em_a:
5956 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5957 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5958 ~IXGBE_DMATXCTL_TE));
5959 break;
5960 default:
5961 break;
5962 }
5963
5964 if (!pci_channel_offline(adapter->pdev))
5965 ixgbe_reset(adapter);
5966
5967
5968 if (hw->mac.ops.disable_tx_laser)
5969 hw->mac.ops.disable_tx_laser(hw);
5970
5971 ixgbe_clean_all_tx_rings(adapter);
5972 ixgbe_clean_all_rx_rings(adapter);
5973}
5974
5975
5976
5977
5978
5979static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
5980{
5981 struct ixgbe_hw *hw = &adapter->hw;
5982
5983 switch (hw->device_id) {
5984 case IXGBE_DEV_ID_X550EM_A_1G_T:
5985 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
5986 if (!hw->phy.eee_speeds_supported)
5987 break;
5988 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
5989 if (!hw->phy.eee_speeds_advertised)
5990 break;
5991 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
5992 break;
5993 default:
5994 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
5995 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
5996 break;
5997 }
5998}
5999
6000
6001
6002
6003
6004static void ixgbe_tx_timeout(struct net_device *netdev)
6005{
6006 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6007
6008
6009 ixgbe_tx_timeout_reset(adapter);
6010}
6011
6012#ifdef CONFIG_IXGBE_DCB
6013static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
6014{
6015 struct ixgbe_hw *hw = &adapter->hw;
6016 struct tc_configuration *tc;
6017 int j;
6018
6019 switch (hw->mac.type) {
6020 case ixgbe_mac_82598EB:
6021 case ixgbe_mac_82599EB:
6022 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
6023 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
6024 break;
6025 case ixgbe_mac_X540:
6026 case ixgbe_mac_X550:
6027 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
6028 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
6029 break;
6030 case ixgbe_mac_X550EM_x:
6031 case ixgbe_mac_x550em_a:
6032 default:
6033 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
6034 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
6035 break;
6036 }
6037
6038
6039 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
6040 tc = &adapter->dcb_cfg.tc_config[j];
6041 tc->path[DCB_TX_CONFIG].bwg_id = 0;
6042 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
6043 tc->path[DCB_RX_CONFIG].bwg_id = 0;
6044 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
6045 tc->dcb_pfc = pfc_disabled;
6046 }
6047
6048
6049 tc = &adapter->dcb_cfg.tc_config[0];
6050 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
6051 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
6052
6053 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
6054 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
6055 adapter->dcb_cfg.pfc_mode_enable = false;
6056 adapter->dcb_set_bitmap = 0x00;
6057 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
6058 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
6059 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
6060 sizeof(adapter->temp_dcb_cfg));
6061}
6062#endif
6063
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6074 const struct ixgbe_info *ii)
6075{
6076 struct ixgbe_hw *hw = &adapter->hw;
6077 struct pci_dev *pdev = adapter->pdev;
6078 unsigned int rss, fdir;
6079 u32 fwsm;
6080 int i;
6081
6082
6083
6084 hw->vendor_id = pdev->vendor;
6085 hw->device_id = pdev->device;
6086 hw->revision_id = pdev->revision;
6087 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6088 hw->subsystem_device_id = pdev->subsystem_device;
6089
6090
6091 ii->get_invariants(hw);
6092
6093
6094 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6095 adapter->ring_feature[RING_F_RSS].limit = rss;
6096 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6097 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6098 adapter->atr_sample_rate = 20;
6099 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6100 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6101 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6102 adapter->ring_feature[RING_F_VMDQ].limit = 1;
6103#ifdef CONFIG_IXGBE_DCA
6104 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6105#endif
6106#ifdef CONFIG_IXGBE_DCB
6107 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6108 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6109#endif
6110#ifdef IXGBE_FCOE
6111 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6112 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6113#ifdef CONFIG_IXGBE_DCB
6114
6115 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6116#endif
6117#endif
6118
6119
6120 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6121 GFP_KERNEL);
6122 if (!adapter->jump_tables[0])
6123 return -ENOMEM;
6124 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6125
6126 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6127 adapter->jump_tables[i] = NULL;
6128
6129 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
6130 hw->mac.num_rar_entries,
6131 GFP_ATOMIC);
6132 if (!adapter->mac_table)
6133 return -ENOMEM;
6134
6135 if (ixgbe_init_rss_key(adapter))
6136 return -ENOMEM;
6137
6138
6139 switch (hw->mac.type) {
6140 case ixgbe_mac_82598EB:
6141 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6142
6143 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6144 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6145
6146 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6147 adapter->ring_feature[RING_F_FDIR].limit = 0;
6148 adapter->atr_sample_rate = 0;
6149 adapter->fdir_pballoc = 0;
6150#ifdef IXGBE_FCOE
6151 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6152 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6153#ifdef CONFIG_IXGBE_DCB
6154 adapter->fcoe.up = 0;
6155#endif
6156#endif
6157 break;
6158 case ixgbe_mac_82599EB:
6159 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6160 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6161 break;
6162 case ixgbe_mac_X540:
6163 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6164 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6165 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6166 break;
6167 case ixgbe_mac_x550em_a:
6168 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
6169 switch (hw->device_id) {
6170 case IXGBE_DEV_ID_X550EM_A_1G_T:
6171 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6172 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6173 break;
6174 default:
6175 break;
6176 }
6177
6178 case ixgbe_mac_X550EM_x:
6179#ifdef CONFIG_IXGBE_DCB
6180 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6181#endif
6182#ifdef IXGBE_FCOE
6183 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6184#ifdef CONFIG_IXGBE_DCB
6185 adapter->fcoe.up = 0;
6186#endif
6187#endif
6188
6189 case ixgbe_mac_X550:
6190 if (hw->mac.type == ixgbe_mac_X550)
6191 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6192#ifdef CONFIG_IXGBE_DCA
6193 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6194#endif
6195 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
6196 break;
6197 default:
6198 break;
6199 }
6200
6201#ifdef IXGBE_FCOE
6202
6203 spin_lock_init(&adapter->fcoe.lock);
6204
6205#endif
6206
6207 spin_lock_init(&adapter->fdir_perfect_lock);
6208
6209#ifdef CONFIG_IXGBE_DCB
6210 ixgbe_init_dcb(adapter);
6211#endif
6212
6213
6214 hw->fc.requested_mode = ixgbe_fc_full;
6215 hw->fc.current_mode = ixgbe_fc_full;
6216 ixgbe_pbthresh_setup(adapter);
6217 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6218 hw->fc.send_xon = true;
6219 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6220
6221#ifdef CONFIG_PCI_IOV
6222 if (max_vfs > 0)
6223 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6224
6225
6226 if (hw->mac.type != ixgbe_mac_82598EB) {
6227 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6228 max_vfs = 0;
6229 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6230 }
6231 }
6232#endif
6233
6234
6235 adapter->rx_itr_setting = 1;
6236 adapter->tx_itr_setting = 1;
6237
6238
6239 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6240 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6241
6242
6243 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6244
6245
6246 if (ixgbe_init_eeprom_params_generic(hw)) {
6247 e_dev_err("EEPROM initialization failed\n");
6248 return -EIO;
6249 }
6250
6251
6252 set_bit(0, adapter->fwd_bitmask);
6253 set_bit(__IXGBE_DOWN, &adapter->state);
6254
6255 return 0;
6256}
6257
6258
6259
6260
6261
6262
6263
6264int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6265{
6266 struct device *dev = tx_ring->dev;
6267 int orig_node = dev_to_node(dev);
6268 int ring_node = -1;
6269 int size;
6270
6271 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6272
6273 if (tx_ring->q_vector)
6274 ring_node = tx_ring->q_vector->numa_node;
6275
6276 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6277 if (!tx_ring->tx_buffer_info)
6278 tx_ring->tx_buffer_info = vmalloc(size);
6279 if (!tx_ring->tx_buffer_info)
6280 goto err;
6281
6282
6283 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6284 tx_ring->size = ALIGN(tx_ring->size, 4096);
6285
6286 set_dev_node(dev, ring_node);
6287 tx_ring->desc = dma_alloc_coherent(dev,
6288 tx_ring->size,
6289 &tx_ring->dma,
6290 GFP_KERNEL);
6291 set_dev_node(dev, orig_node);
6292 if (!tx_ring->desc)
6293 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6294 &tx_ring->dma, GFP_KERNEL);
6295 if (!tx_ring->desc)
6296 goto err;
6297
6298 tx_ring->next_to_use = 0;
6299 tx_ring->next_to_clean = 0;
6300 return 0;
6301
6302err:
6303 vfree(tx_ring->tx_buffer_info);
6304 tx_ring->tx_buffer_info = NULL;
6305 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6306 return -ENOMEM;
6307}
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6320{
6321 int i, j = 0, err = 0;
6322
6323 for (i = 0; i < adapter->num_tx_queues; i++) {
6324 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6325 if (!err)
6326 continue;
6327
6328 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6329 goto err_setup_tx;
6330 }
6331 for (j = 0; j < adapter->num_xdp_queues; j++) {
6332 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6333 if (!err)
6334 continue;
6335
6336 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6337 goto err_setup_tx;
6338 }
6339
6340 return 0;
6341err_setup_tx:
6342
6343 while (j--)
6344 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6345 while (i--)
6346 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6347 return err;
6348}
6349
6350
6351
6352
6353
6354
6355
6356
6357int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6358 struct ixgbe_ring *rx_ring)
6359{
6360 struct device *dev = rx_ring->dev;
6361 int orig_node = dev_to_node(dev);
6362 int ring_node = -1;
6363 int size;
6364
6365 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6366
6367 if (rx_ring->q_vector)
6368 ring_node = rx_ring->q_vector->numa_node;
6369
6370 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6371 if (!rx_ring->rx_buffer_info)
6372 rx_ring->rx_buffer_info = vmalloc(size);
6373 if (!rx_ring->rx_buffer_info)
6374 goto err;
6375
6376
6377 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6378 rx_ring->size = ALIGN(rx_ring->size, 4096);
6379
6380 set_dev_node(dev, ring_node);
6381 rx_ring->desc = dma_alloc_coherent(dev,
6382 rx_ring->size,
6383 &rx_ring->dma,
6384 GFP_KERNEL);
6385 set_dev_node(dev, orig_node);
6386 if (!rx_ring->desc)
6387 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6388 &rx_ring->dma, GFP_KERNEL);
6389 if (!rx_ring->desc)
6390 goto err;
6391
6392 rx_ring->next_to_clean = 0;
6393 rx_ring->next_to_use = 0;
6394
6395
6396 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
6397 rx_ring->queue_index) < 0)
6398 goto err;
6399
6400 rx_ring->xdp_prog = adapter->xdp_prog;
6401
6402 return 0;
6403err:
6404 vfree(rx_ring->rx_buffer_info);
6405 rx_ring->rx_buffer_info = NULL;
6406 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6407 return -ENOMEM;
6408}
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6421{
6422 int i, err = 0;
6423
6424 for (i = 0; i < adapter->num_rx_queues; i++) {
6425 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6426 if (!err)
6427 continue;
6428
6429 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6430 goto err_setup_rx;
6431 }
6432
6433#ifdef IXGBE_FCOE
6434 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6435 if (!err)
6436#endif
6437 return 0;
6438err_setup_rx:
6439
6440 while (i--)
6441 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6442 return err;
6443}
6444
6445
6446
6447
6448
6449
6450
6451void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6452{
6453 ixgbe_clean_tx_ring(tx_ring);
6454
6455 vfree(tx_ring->tx_buffer_info);
6456 tx_ring->tx_buffer_info = NULL;
6457
6458
6459 if (!tx_ring->desc)
6460 return;
6461
6462 dma_free_coherent(tx_ring->dev, tx_ring->size,
6463 tx_ring->desc, tx_ring->dma);
6464
6465 tx_ring->desc = NULL;
6466}
6467
6468
6469
6470
6471
6472
6473
6474static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6475{
6476 int i;
6477
6478 for (i = 0; i < adapter->num_tx_queues; i++)
6479 if (adapter->tx_ring[i]->desc)
6480 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6481 for (i = 0; i < adapter->num_xdp_queues; i++)
6482 if (adapter->xdp_ring[i]->desc)
6483 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6484}
6485
6486
6487
6488
6489
6490
6491
6492void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6493{
6494 ixgbe_clean_rx_ring(rx_ring);
6495
6496 rx_ring->xdp_prog = NULL;
6497 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
6498 vfree(rx_ring->rx_buffer_info);
6499 rx_ring->rx_buffer_info = NULL;
6500
6501
6502 if (!rx_ring->desc)
6503 return;
6504
6505 dma_free_coherent(rx_ring->dev, rx_ring->size,
6506 rx_ring->desc, rx_ring->dma);
6507
6508 rx_ring->desc = NULL;
6509}
6510
6511
6512
6513
6514
6515
6516
6517static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6518{
6519 int i;
6520
6521#ifdef IXGBE_FCOE
6522 ixgbe_free_fcoe_ddp_resources(adapter);
6523
6524#endif
6525 for (i = 0; i < adapter->num_rx_queues; i++)
6526 if (adapter->rx_ring[i]->desc)
6527 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6528}
6529
6530
6531
6532
6533
6534
6535
6536
6537static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6538{
6539 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6540
6541
6542
6543
6544
6545
6546 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6547 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6548 (new_mtu > ETH_DATA_LEN))
6549 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6550
6551 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6552
6553
6554 netdev->mtu = new_mtu;
6555
6556 if (netif_running(netdev))
6557 ixgbe_reinit_locked(adapter);
6558
6559 return 0;
6560}
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572
6573
6574int ixgbe_open(struct net_device *netdev)
6575{
6576 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6577 struct ixgbe_hw *hw = &adapter->hw;
6578 int err, queues;
6579
6580
6581 if (test_bit(__IXGBE_TESTING, &adapter->state))
6582 return -EBUSY;
6583
6584 netif_carrier_off(netdev);
6585
6586
6587 err = ixgbe_setup_all_tx_resources(adapter);
6588 if (err)
6589 goto err_setup_tx;
6590
6591
6592 err = ixgbe_setup_all_rx_resources(adapter);
6593 if (err)
6594 goto err_setup_rx;
6595
6596 ixgbe_configure(adapter);
6597
6598 err = ixgbe_request_irq(adapter);
6599 if (err)
6600 goto err_req_irq;
6601
6602
6603 queues = adapter->num_tx_queues;
6604 err = netif_set_real_num_tx_queues(netdev, queues);
6605 if (err)
6606 goto err_set_queues;
6607
6608 queues = adapter->num_rx_queues;
6609 err = netif_set_real_num_rx_queues(netdev, queues);
6610 if (err)
6611 goto err_set_queues;
6612
6613 ixgbe_ptp_init(adapter);
6614
6615 ixgbe_up_complete(adapter);
6616
6617 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6618 udp_tunnel_get_rx_info(netdev);
6619
6620 return 0;
6621
6622err_set_queues:
6623 ixgbe_free_irq(adapter);
6624err_req_irq:
6625 ixgbe_free_all_rx_resources(adapter);
6626 if (hw->phy.ops.set_phy_power && !adapter->wol)
6627 hw->phy.ops.set_phy_power(&adapter->hw, false);
6628err_setup_rx:
6629 ixgbe_free_all_tx_resources(adapter);
6630err_setup_tx:
6631 ixgbe_reset(adapter);
6632
6633 return err;
6634}
6635
6636static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6637{
6638 ixgbe_ptp_suspend(adapter);
6639
6640 if (adapter->hw.phy.ops.enter_lplu) {
6641 adapter->hw.phy.reset_disable = true;
6642 ixgbe_down(adapter);
6643 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6644 adapter->hw.phy.reset_disable = false;
6645 } else {
6646 ixgbe_down(adapter);
6647 }
6648
6649 ixgbe_free_irq(adapter);
6650
6651 ixgbe_free_all_tx_resources(adapter);
6652 ixgbe_free_all_rx_resources(adapter);
6653}
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666int ixgbe_close(struct net_device *netdev)
6667{
6668 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6669
6670 ixgbe_ptp_stop(adapter);
6671
6672 if (netif_device_present(netdev))
6673 ixgbe_close_suspend(adapter);
6674
6675 ixgbe_fdir_filter_exit(adapter);
6676
6677 ixgbe_release_hw_control(adapter);
6678
6679 return 0;
6680}
6681
6682#ifdef CONFIG_PM
6683static int ixgbe_resume(struct pci_dev *pdev)
6684{
6685 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6686 struct net_device *netdev = adapter->netdev;
6687 u32 err;
6688
6689 adapter->hw.hw_addr = adapter->io_addr;
6690 pci_set_power_state(pdev, PCI_D0);
6691 pci_restore_state(pdev);
6692
6693
6694
6695
6696 pci_save_state(pdev);
6697
6698 err = pci_enable_device_mem(pdev);
6699 if (err) {
6700 e_dev_err("Cannot enable PCI device from suspend\n");
6701 return err;
6702 }
6703 smp_mb__before_atomic();
6704 clear_bit(__IXGBE_DISABLED, &adapter->state);
6705 pci_set_master(pdev);
6706
6707 pci_wake_from_d3(pdev, false);
6708
6709 ixgbe_reset(adapter);
6710
6711 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6712
6713 rtnl_lock();
6714 err = ixgbe_init_interrupt_scheme(adapter);
6715 if (!err && netif_running(netdev))
6716 err = ixgbe_open(netdev);
6717
6718
6719 if (!err)
6720 netif_device_attach(netdev);
6721 rtnl_unlock();
6722
6723 return err;
6724}
6725#endif
6726
6727static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6728{
6729 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6730 struct net_device *netdev = adapter->netdev;
6731 struct ixgbe_hw *hw = &adapter->hw;
6732 u32 ctrl;
6733 u32 wufc = adapter->wol;
6734#ifdef CONFIG_PM
6735 int retval = 0;
6736#endif
6737
6738 rtnl_lock();
6739 netif_device_detach(netdev);
6740
6741 if (netif_running(netdev))
6742 ixgbe_close_suspend(adapter);
6743
6744 ixgbe_clear_interrupt_scheme(adapter);
6745 rtnl_unlock();
6746
6747#ifdef CONFIG_PM
6748 retval = pci_save_state(pdev);
6749 if (retval)
6750 return retval;
6751
6752#endif
6753 if (hw->mac.ops.stop_link_on_d3)
6754 hw->mac.ops.stop_link_on_d3(hw);
6755
6756 if (wufc) {
6757 u32 fctrl;
6758
6759 ixgbe_set_rx_mode(netdev);
6760
6761
6762 if (hw->mac.ops.enable_tx_laser)
6763 hw->mac.ops.enable_tx_laser(hw);
6764
6765
6766 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6767 fctrl |= IXGBE_FCTRL_MPE;
6768 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6769
6770 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6771 ctrl |= IXGBE_CTRL_GIO_DIS;
6772 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6773
6774 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6775 } else {
6776 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6777 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6778 }
6779
6780 switch (hw->mac.type) {
6781 case ixgbe_mac_82598EB:
6782 pci_wake_from_d3(pdev, false);
6783 break;
6784 case ixgbe_mac_82599EB:
6785 case ixgbe_mac_X540:
6786 case ixgbe_mac_X550:
6787 case ixgbe_mac_X550EM_x:
6788 case ixgbe_mac_x550em_a:
6789 pci_wake_from_d3(pdev, !!wufc);
6790 break;
6791 default:
6792 break;
6793 }
6794
6795 *enable_wake = !!wufc;
6796 if (hw->phy.ops.set_phy_power && !*enable_wake)
6797 hw->phy.ops.set_phy_power(hw, false);
6798
6799 ixgbe_release_hw_control(adapter);
6800
6801 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6802 pci_disable_device(pdev);
6803
6804 return 0;
6805}
6806
6807#ifdef CONFIG_PM
6808static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6809{
6810 int retval;
6811 bool wake;
6812
6813 retval = __ixgbe_shutdown(pdev, &wake);
6814 if (retval)
6815 return retval;
6816
6817 if (wake) {
6818 pci_prepare_to_sleep(pdev);
6819 } else {
6820 pci_wake_from_d3(pdev, false);
6821 pci_set_power_state(pdev, PCI_D3hot);
6822 }
6823
6824 return 0;
6825}
6826#endif
6827
6828static void ixgbe_shutdown(struct pci_dev *pdev)
6829{
6830 bool wake;
6831
6832 __ixgbe_shutdown(pdev, &wake);
6833
6834 if (system_state == SYSTEM_POWER_OFF) {
6835 pci_wake_from_d3(pdev, wake);
6836 pci_set_power_state(pdev, PCI_D3hot);
6837 }
6838}
6839
6840
6841
6842
6843
6844void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6845{
6846 struct net_device *netdev = adapter->netdev;
6847 struct ixgbe_hw *hw = &adapter->hw;
6848 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6849 u64 total_mpc = 0;
6850 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6851 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6852 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6853 u64 alloc_rx_page = 0;
6854 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6855
6856 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6857 test_bit(__IXGBE_RESETTING, &adapter->state))
6858 return;
6859
6860 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6861 u64 rsc_count = 0;
6862 u64 rsc_flush = 0;
6863 for (i = 0; i < adapter->num_rx_queues; i++) {
6864 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6865 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6866 }
6867 adapter->rsc_total_count = rsc_count;
6868 adapter->rsc_total_flush = rsc_flush;
6869 }
6870
6871 for (i = 0; i < adapter->num_rx_queues; i++) {
6872 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6873 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6874 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
6875 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6876 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6877 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6878 bytes += rx_ring->stats.bytes;
6879 packets += rx_ring->stats.packets;
6880 }
6881 adapter->non_eop_descs = non_eop_descs;
6882 adapter->alloc_rx_page = alloc_rx_page;
6883 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6884 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6885 adapter->hw_csum_rx_error = hw_csum_rx_error;
6886 netdev->stats.rx_bytes = bytes;
6887 netdev->stats.rx_packets = packets;
6888
6889 bytes = 0;
6890 packets = 0;
6891
6892 for (i = 0; i < adapter->num_tx_queues; i++) {
6893 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6894 restart_queue += tx_ring->tx_stats.restart_queue;
6895 tx_busy += tx_ring->tx_stats.tx_busy;
6896 bytes += tx_ring->stats.bytes;
6897 packets += tx_ring->stats.packets;
6898 }
6899 for (i = 0; i < adapter->num_xdp_queues; i++) {
6900 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
6901
6902 restart_queue += xdp_ring->tx_stats.restart_queue;
6903 tx_busy += xdp_ring->tx_stats.tx_busy;
6904 bytes += xdp_ring->stats.bytes;
6905 packets += xdp_ring->stats.packets;
6906 }
6907 adapter->restart_queue = restart_queue;
6908 adapter->tx_busy = tx_busy;
6909 netdev->stats.tx_bytes = bytes;
6910 netdev->stats.tx_packets = packets;
6911
6912 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6913
6914
6915 for (i = 0; i < 8; i++) {
6916
6917 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6918 missed_rx += mpc;
6919 hwstats->mpc[i] += mpc;
6920 total_mpc += hwstats->mpc[i];
6921 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6922 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6923 switch (hw->mac.type) {
6924 case ixgbe_mac_82598EB:
6925 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6926 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6927 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6928 hwstats->pxonrxc[i] +=
6929 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6930 break;
6931 case ixgbe_mac_82599EB:
6932 case ixgbe_mac_X540:
6933 case ixgbe_mac_X550:
6934 case ixgbe_mac_X550EM_x:
6935 case ixgbe_mac_x550em_a:
6936 hwstats->pxonrxc[i] +=
6937 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6938 break;
6939 default:
6940 break;
6941 }
6942 }
6943
6944
6945 for (i = 0; i < 16; i++) {
6946 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6947 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6948 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6949 (hw->mac.type == ixgbe_mac_X540) ||
6950 (hw->mac.type == ixgbe_mac_X550) ||
6951 (hw->mac.type == ixgbe_mac_X550EM_x) ||
6952 (hw->mac.type == ixgbe_mac_x550em_a)) {
6953 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6954 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
6955 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6956 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
6957 }
6958 }
6959
6960 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6961
6962 hwstats->gprc -= missed_rx;
6963
6964 ixgbe_update_xoff_received(adapter);
6965
6966
6967 switch (hw->mac.type) {
6968 case ixgbe_mac_82598EB:
6969 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6970 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6971 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6972 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6973 break;
6974 case ixgbe_mac_X540:
6975 case ixgbe_mac_X550:
6976 case ixgbe_mac_X550EM_x:
6977 case ixgbe_mac_x550em_a:
6978
6979 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6980 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6981 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6982 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6983
6984 case ixgbe_mac_82599EB:
6985 for (i = 0; i < 16; i++)
6986 adapter->hw_rx_no_dma_resources +=
6987 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6988 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6989 IXGBE_READ_REG(hw, IXGBE_GORCH);
6990 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6991 IXGBE_READ_REG(hw, IXGBE_GOTCH);
6992 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6993 IXGBE_READ_REG(hw, IXGBE_TORH);
6994 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6995 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6996 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6997#ifdef IXGBE_FCOE
6998 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6999 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
7000 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
7001 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
7002 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
7003 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
7004
7005 if (adapter->fcoe.ddp_pool) {
7006 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
7007 struct ixgbe_fcoe_ddp_pool *ddp_pool;
7008 unsigned int cpu;
7009 u64 noddp = 0, noddp_ext_buff = 0;
7010 for_each_possible_cpu(cpu) {
7011 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
7012 noddp += ddp_pool->noddp;
7013 noddp_ext_buff += ddp_pool->noddp_ext_buff;
7014 }
7015 hwstats->fcoe_noddp = noddp;
7016 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
7017 }
7018#endif
7019 break;
7020 default:
7021 break;
7022 }
7023 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7024 hwstats->bprc += bprc;
7025 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
7026 if (hw->mac.type == ixgbe_mac_82598EB)
7027 hwstats->mprc -= bprc;
7028 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
7029 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
7030 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
7031 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
7032 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
7033 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
7034 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
7035 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
7036 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7037 hwstats->lxontxc += lxon;
7038 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7039 hwstats->lxofftxc += lxoff;
7040 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
7041 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
7042
7043
7044
7045 xon_off_tot = lxon + lxoff;
7046 hwstats->gptc -= xon_off_tot;
7047 hwstats->mptc -= xon_off_tot;
7048 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
7049 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
7050 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
7051 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
7052 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
7053 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
7054 hwstats->ptc64 -= xon_off_tot;
7055 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
7056 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
7057 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
7058 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
7059 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
7060 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
7061
7062
7063 netdev->stats.multicast = hwstats->mprc;
7064
7065
7066 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
7067 netdev->stats.rx_dropped = 0;
7068 netdev->stats.rx_length_errors = hwstats->rlec;
7069 netdev->stats.rx_crc_errors = hwstats->crcerrs;
7070 netdev->stats.rx_missed_errors = total_mpc;
7071}
7072
7073
7074
7075
7076
7077static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
7078{
7079 struct ixgbe_hw *hw = &adapter->hw;
7080 int i;
7081
7082 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7083 return;
7084
7085 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7086
7087
7088 if (test_bit(__IXGBE_DOWN, &adapter->state))
7089 return;
7090
7091
7092 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7093 return;
7094
7095 adapter->fdir_overflow++;
7096
7097 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7098 for (i = 0; i < adapter->num_tx_queues; i++)
7099 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7100 &(adapter->tx_ring[i]->state));
7101 for (i = 0; i < adapter->num_xdp_queues; i++)
7102 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7103 &adapter->xdp_ring[i]->state);
7104
7105 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7106 } else {
7107 e_err(probe, "failed to finish FDIR re-initialization, "
7108 "ignored adding FDIR ATR filters\n");
7109 }
7110}
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7122{
7123 struct ixgbe_hw *hw = &adapter->hw;
7124 u64 eics = 0;
7125 int i;
7126
7127
7128 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7129 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7130 test_bit(__IXGBE_RESETTING, &adapter->state))
7131 return;
7132
7133
7134 if (netif_carrier_ok(adapter->netdev)) {
7135 for (i = 0; i < adapter->num_tx_queues; i++)
7136 set_check_for_tx_hang(adapter->tx_ring[i]);
7137 for (i = 0; i < adapter->num_xdp_queues; i++)
7138 set_check_for_tx_hang(adapter->xdp_ring[i]);
7139 }
7140
7141 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7142
7143
7144
7145
7146
7147 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7148 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7149 } else {
7150
7151 for (i = 0; i < adapter->num_q_vectors; i++) {
7152 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7153 if (qv->rx.ring || qv->tx.ring)
7154 eics |= BIT_ULL(i);
7155 }
7156 }
7157
7158
7159 ixgbe_irq_rearm_queues(adapter, eics);
7160}
7161
7162
7163
7164
7165
7166static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7167{
7168 struct ixgbe_hw *hw = &adapter->hw;
7169 u32 link_speed = adapter->link_speed;
7170 bool link_up = adapter->link_up;
7171 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7172
7173 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7174 return;
7175
7176 if (hw->mac.ops.check_link) {
7177 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7178 } else {
7179
7180 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7181 link_up = true;
7182 }
7183
7184 if (adapter->ixgbe_ieee_pfc)
7185 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7186
7187 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7188 hw->mac.ops.fc_enable(hw);
7189 ixgbe_set_rx_drop_en(adapter);
7190 }
7191
7192 if (link_up ||
7193 time_after(jiffies, (adapter->link_check_timeout +
7194 IXGBE_TRY_LINK_TIMEOUT))) {
7195 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7196 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7197 IXGBE_WRITE_FLUSH(hw);
7198 }
7199
7200 adapter->link_up = link_up;
7201 adapter->link_speed = link_speed;
7202}
7203
7204static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7205{
7206#ifdef CONFIG_IXGBE_DCB
7207 struct net_device *netdev = adapter->netdev;
7208 struct dcb_app app = {
7209 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7210 .protocol = 0,
7211 };
7212 u8 up = 0;
7213
7214 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7215 up = dcb_ieee_getapp_mask(netdev, &app);
7216
7217 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7218#endif
7219}
7220
7221
7222
7223
7224
7225
7226static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7227{
7228 struct net_device *netdev = adapter->netdev;
7229 struct ixgbe_hw *hw = &adapter->hw;
7230 u32 link_speed = adapter->link_speed;
7231 const char *speed_str;
7232 bool flow_rx, flow_tx;
7233
7234
7235 if (netif_carrier_ok(netdev))
7236 return;
7237
7238 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7239
7240 switch (hw->mac.type) {
7241 case ixgbe_mac_82598EB: {
7242 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7243 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7244 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7245 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7246 }
7247 break;
7248 case ixgbe_mac_X540:
7249 case ixgbe_mac_X550:
7250 case ixgbe_mac_X550EM_x:
7251 case ixgbe_mac_x550em_a:
7252 case ixgbe_mac_82599EB: {
7253 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7254 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7255 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7256 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7257 }
7258 break;
7259 default:
7260 flow_tx = false;
7261 flow_rx = false;
7262 break;
7263 }
7264
7265 adapter->last_rx_ptp_check = jiffies;
7266
7267 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7268 ixgbe_ptp_start_cyclecounter(adapter);
7269
7270 switch (link_speed) {
7271 case IXGBE_LINK_SPEED_10GB_FULL:
7272 speed_str = "10 Gbps";
7273 break;
7274 case IXGBE_LINK_SPEED_5GB_FULL:
7275 speed_str = "5 Gbps";
7276 break;
7277 case IXGBE_LINK_SPEED_2_5GB_FULL:
7278 speed_str = "2.5 Gbps";
7279 break;
7280 case IXGBE_LINK_SPEED_1GB_FULL:
7281 speed_str = "1 Gbps";
7282 break;
7283 case IXGBE_LINK_SPEED_100_FULL:
7284 speed_str = "100 Mbps";
7285 break;
7286 case IXGBE_LINK_SPEED_10_FULL:
7287 speed_str = "10 Mbps";
7288 break;
7289 default:
7290 speed_str = "unknown speed";
7291 break;
7292 }
7293 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7294 ((flow_rx && flow_tx) ? "RX/TX" :
7295 (flow_rx ? "RX" :
7296 (flow_tx ? "TX" : "None"))));
7297
7298 netif_carrier_on(netdev);
7299 ixgbe_check_vf_rate_limit(adapter);
7300
7301
7302 netif_tx_wake_all_queues(adapter->netdev);
7303
7304
7305 ixgbe_update_default_up(adapter);
7306
7307
7308 ixgbe_ping_all_vfs(adapter);
7309}
7310
7311
7312
7313
7314
7315
7316static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7317{
7318 struct net_device *netdev = adapter->netdev;
7319 struct ixgbe_hw *hw = &adapter->hw;
7320
7321 adapter->link_up = false;
7322 adapter->link_speed = 0;
7323
7324
7325 if (!netif_carrier_ok(netdev))
7326 return;
7327
7328
7329 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7330 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7331
7332 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7333 ixgbe_ptp_start_cyclecounter(adapter);
7334
7335 e_info(drv, "NIC Link is Down\n");
7336 netif_carrier_off(netdev);
7337
7338
7339 ixgbe_ping_all_vfs(adapter);
7340}
7341
7342static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7343{
7344 int i;
7345
7346 for (i = 0; i < adapter->num_tx_queues; i++) {
7347 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7348
7349 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7350 return true;
7351 }
7352
7353 for (i = 0; i < adapter->num_xdp_queues; i++) {
7354 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7355
7356 if (ring->next_to_use != ring->next_to_clean)
7357 return true;
7358 }
7359
7360 return false;
7361}
7362
7363static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7364{
7365 struct ixgbe_hw *hw = &adapter->hw;
7366 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7367 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7368
7369 int i, j;
7370
7371 if (!adapter->num_vfs)
7372 return false;
7373
7374
7375 if (hw->mac.type >= ixgbe_mac_X550)
7376 return false;
7377
7378 for (i = 0; i < adapter->num_vfs; i++) {
7379 for (j = 0; j < q_per_pool; j++) {
7380 u32 h, t;
7381
7382 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7383 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7384
7385 if (h != t)
7386 return true;
7387 }
7388 }
7389
7390 return false;
7391}
7392
7393
7394
7395
7396
7397static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7398{
7399 if (!netif_carrier_ok(adapter->netdev)) {
7400 if (ixgbe_ring_tx_pending(adapter) ||
7401 ixgbe_vf_tx_pending(adapter)) {
7402
7403
7404
7405
7406
7407 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7408 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7409 }
7410 }
7411}
7412
7413#ifdef CONFIG_PCI_IOV
7414static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7415{
7416 struct ixgbe_hw *hw = &adapter->hw;
7417 struct pci_dev *pdev = adapter->pdev;
7418 unsigned int vf;
7419 u32 gpc;
7420
7421 if (!(netif_carrier_ok(adapter->netdev)))
7422 return;
7423
7424 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7425 if (gpc)
7426 return;
7427
7428
7429
7430
7431
7432
7433 if (!pdev)
7434 return;
7435
7436
7437 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7438 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7439 u16 status_reg;
7440
7441 if (!vfdev)
7442 continue;
7443 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7444 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7445 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7446 pcie_flr(vfdev);
7447 }
7448}
7449
7450static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7451{
7452 u32 ssvpc;
7453
7454
7455 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7456 adapter->num_vfs == 0)
7457 return;
7458
7459 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7460
7461
7462
7463
7464
7465 if (!ssvpc)
7466 return;
7467
7468 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7469}
7470#else
7471static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7472{
7473}
7474
7475static void
7476ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7477{
7478}
7479#endif
7480
7481
7482
7483
7484
7485
7486static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7487{
7488
7489 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7490 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7491 test_bit(__IXGBE_RESETTING, &adapter->state))
7492 return;
7493
7494 ixgbe_watchdog_update_link(adapter);
7495
7496 if (adapter->link_up)
7497 ixgbe_watchdog_link_is_up(adapter);
7498 else
7499 ixgbe_watchdog_link_is_down(adapter);
7500
7501 ixgbe_check_for_bad_vf(adapter);
7502 ixgbe_spoof_check(adapter);
7503 ixgbe_update_stats(adapter);
7504
7505 ixgbe_watchdog_flush_tx(adapter);
7506}
7507
7508
7509
7510
7511
7512static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7513{
7514 struct ixgbe_hw *hw = &adapter->hw;
7515 s32 err;
7516
7517
7518 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7519 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7520 return;
7521
7522 if (adapter->sfp_poll_time &&
7523 time_after(adapter->sfp_poll_time, jiffies))
7524 return;
7525
7526
7527 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7528 return;
7529
7530 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7531
7532 err = hw->phy.ops.identify_sfp(hw);
7533 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7534 goto sfp_out;
7535
7536 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7537
7538
7539 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7540 }
7541
7542
7543 if (err)
7544 goto sfp_out;
7545
7546
7547 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7548 goto sfp_out;
7549
7550 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7551
7552
7553
7554
7555
7556
7557 if (hw->mac.type == ixgbe_mac_82598EB)
7558 err = hw->phy.ops.reset(hw);
7559 else
7560 err = hw->mac.ops.setup_sfp(hw);
7561
7562 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7563 goto sfp_out;
7564
7565 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7566 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7567
7568sfp_out:
7569 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7570
7571 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7572 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7573 e_dev_err("failed to initialize because an unsupported "
7574 "SFP+ module type was detected.\n");
7575 e_dev_err("Reload the driver after installing a "
7576 "supported module.\n");
7577 unregister_netdev(adapter->netdev);
7578 }
7579}
7580
7581
7582
7583
7584
7585static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7586{
7587 struct ixgbe_hw *hw = &adapter->hw;
7588 u32 cap_speed;
7589 u32 speed;
7590 bool autoneg = false;
7591
7592 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7593 return;
7594
7595
7596 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7597 return;
7598
7599 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7600
7601 hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
7602
7603
7604 if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
7605 speed = IXGBE_LINK_SPEED_10GB_FULL;
7606 else
7607 speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
7608 IXGBE_LINK_SPEED_1GB_FULL);
7609
7610 if (hw->mac.ops.setup_link)
7611 hw->mac.ops.setup_link(hw, speed, true);
7612
7613 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7614 adapter->link_check_timeout = jiffies;
7615 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7616}
7617
7618
7619
7620
7621
7622static void ixgbe_service_timer(struct timer_list *t)
7623{
7624 struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
7625 unsigned long next_event_offset;
7626
7627
7628 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7629 next_event_offset = HZ / 10;
7630 else
7631 next_event_offset = HZ * 2;
7632
7633
7634 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7635
7636 ixgbe_service_event_schedule(adapter);
7637}
7638
7639static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7640{
7641 struct ixgbe_hw *hw = &adapter->hw;
7642 u32 status;
7643
7644 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7645 return;
7646
7647 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7648
7649 if (!hw->phy.ops.handle_lasi)
7650 return;
7651
7652 status = hw->phy.ops.handle_lasi(&adapter->hw);
7653 if (status != IXGBE_ERR_OVERTEMP)
7654 return;
7655
7656 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7657}
7658
7659static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7660{
7661 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7662 return;
7663
7664
7665 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7666 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7667 test_bit(__IXGBE_RESETTING, &adapter->state))
7668 return;
7669
7670 ixgbe_dump(adapter);
7671 netdev_err(adapter->netdev, "Reset adapter\n");
7672 adapter->tx_timeout_count++;
7673
7674 rtnl_lock();
7675 ixgbe_reinit_locked(adapter);
7676 rtnl_unlock();
7677}
7678
7679
7680
7681
7682
7683static void ixgbe_service_task(struct work_struct *work)
7684{
7685 struct ixgbe_adapter *adapter = container_of(work,
7686 struct ixgbe_adapter,
7687 service_task);
7688 if (ixgbe_removed(adapter->hw.hw_addr)) {
7689 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7690 rtnl_lock();
7691 ixgbe_down(adapter);
7692 rtnl_unlock();
7693 }
7694 ixgbe_service_event_complete(adapter);
7695 return;
7696 }
7697 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7698 rtnl_lock();
7699 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7700 udp_tunnel_get_rx_info(adapter->netdev);
7701 rtnl_unlock();
7702 }
7703 ixgbe_reset_subtask(adapter);
7704 ixgbe_phy_interrupt_subtask(adapter);
7705 ixgbe_sfp_detection_subtask(adapter);
7706 ixgbe_sfp_link_config_subtask(adapter);
7707 ixgbe_check_overtemp_subtask(adapter);
7708 ixgbe_watchdog_subtask(adapter);
7709 ixgbe_fdir_reinit_subtask(adapter);
7710 ixgbe_check_hang_subtask(adapter);
7711
7712 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7713 ixgbe_ptp_overflow_check(adapter);
7714 ixgbe_ptp_rx_hang(adapter);
7715 ixgbe_ptp_tx_hang(adapter);
7716 }
7717
7718 ixgbe_service_event_complete(adapter);
7719}
7720
7721static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7722 struct ixgbe_tx_buffer *first,
7723 u8 *hdr_len)
7724{
7725 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7726 struct sk_buff *skb = first->skb;
7727 union {
7728 struct iphdr *v4;
7729 struct ipv6hdr *v6;
7730 unsigned char *hdr;
7731 } ip;
7732 union {
7733 struct tcphdr *tcp;
7734 unsigned char *hdr;
7735 } l4;
7736 u32 paylen, l4_offset;
7737 int err;
7738
7739 if (skb->ip_summed != CHECKSUM_PARTIAL)
7740 return 0;
7741
7742 if (!skb_is_gso(skb))
7743 return 0;
7744
7745 err = skb_cow_head(skb, 0);
7746 if (err < 0)
7747 return err;
7748
7749 if (eth_p_mpls(first->protocol))
7750 ip.hdr = skb_inner_network_header(skb);
7751 else
7752 ip.hdr = skb_network_header(skb);
7753 l4.hdr = skb_checksum_start(skb);
7754
7755
7756 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7757
7758
7759 if (ip.v4->version == 4) {
7760 unsigned char *csum_start = skb_checksum_start(skb);
7761 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7762
7763
7764
7765
7766 ip.v4->check = csum_fold(csum_partial(trans_start,
7767 csum_start - trans_start,
7768 0));
7769 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7770
7771 ip.v4->tot_len = 0;
7772 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7773 IXGBE_TX_FLAGS_CSUM |
7774 IXGBE_TX_FLAGS_IPV4;
7775 } else {
7776 ip.v6->payload_len = 0;
7777 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7778 IXGBE_TX_FLAGS_CSUM;
7779 }
7780
7781
7782 l4_offset = l4.hdr - skb->data;
7783
7784
7785 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
7786
7787
7788 paylen = skb->len - l4_offset;
7789 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
7790
7791
7792 first->gso_segs = skb_shinfo(skb)->gso_segs;
7793 first->bytecount += (first->gso_segs - 1) * *hdr_len;
7794
7795
7796 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
7797 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
7798
7799
7800 vlan_macip_lens = l4.hdr - ip.hdr;
7801 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
7802 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7803
7804 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
7805 mss_l4len_idx);
7806
7807 return 1;
7808}
7809
7810static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
7811{
7812 unsigned int offset = 0;
7813
7814 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
7815
7816 return offset == skb_checksum_start_offset(skb);
7817}
7818
7819static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7820 struct ixgbe_tx_buffer *first,
7821 struct ixgbe_ipsec_tx_data *itd)
7822{
7823 struct sk_buff *skb = first->skb;
7824 u32 vlan_macip_lens = 0;
7825 u32 fceof_saidx = 0;
7826 u32 type_tucmd = 0;
7827
7828 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7829csum_failed:
7830 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
7831 IXGBE_TX_FLAGS_CC)))
7832 return;
7833 goto no_csum;
7834 }
7835
7836 switch (skb->csum_offset) {
7837 case offsetof(struct tcphdr, check):
7838 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7839
7840 case offsetof(struct udphdr, check):
7841 break;
7842 case offsetof(struct sctphdr, checksum):
7843
7844 if (((first->protocol == htons(ETH_P_IP)) &&
7845 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
7846 ((first->protocol == htons(ETH_P_IPV6)) &&
7847 ixgbe_ipv6_csum_is_sctp(skb))) {
7848 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7849 break;
7850 }
7851
7852 default:
7853 skb_checksum_help(skb);
7854 goto csum_failed;
7855 }
7856
7857
7858 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7859 vlan_macip_lens = skb_checksum_start_offset(skb) -
7860 skb_network_offset(skb);
7861no_csum:
7862
7863 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7864 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7865
7866 if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) {
7867 fceof_saidx |= itd->sa_idx;
7868 type_tucmd |= itd->flags | itd->trailer_len;
7869 }
7870
7871 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
7872}
7873
7874#define IXGBE_SET_FLAG(_input, _flag, _result) \
7875 ((_flag <= _result) ? \
7876 ((u32)(_input & _flag) * (_result / _flag)) : \
7877 ((u32)(_input & _flag) / (_flag / _result)))
7878
7879static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7880{
7881
7882 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7883 IXGBE_ADVTXD_DCMD_DEXT |
7884 IXGBE_ADVTXD_DCMD_IFCS;
7885
7886
7887 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7888 IXGBE_ADVTXD_DCMD_VLE);
7889
7890
7891 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7892 IXGBE_ADVTXD_DCMD_TSE);
7893
7894
7895 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7896 IXGBE_ADVTXD_MAC_TSTAMP);
7897
7898
7899 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7900
7901 return cmd_type;
7902}
7903
7904static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7905 u32 tx_flags, unsigned int paylen)
7906{
7907 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7908
7909
7910 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7911 IXGBE_TX_FLAGS_CSUM,
7912 IXGBE_ADVTXD_POPTS_TXSM);
7913
7914
7915 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7916 IXGBE_TX_FLAGS_IPV4,
7917 IXGBE_ADVTXD_POPTS_IXSM);
7918
7919
7920 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7921 IXGBE_TX_FLAGS_IPSEC,
7922 IXGBE_ADVTXD_POPTS_IPSEC);
7923
7924
7925
7926
7927
7928 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7929 IXGBE_TX_FLAGS_CC,
7930 IXGBE_ADVTXD_CC);
7931
7932 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7933}
7934
7935static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7936{
7937 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7938
7939
7940
7941
7942
7943 smp_mb();
7944
7945
7946
7947
7948 if (likely(ixgbe_desc_unused(tx_ring) < size))
7949 return -EBUSY;
7950
7951
7952 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7953 ++tx_ring->tx_stats.restart_queue;
7954 return 0;
7955}
7956
7957static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7958{
7959 if (likely(ixgbe_desc_unused(tx_ring) >= size))
7960 return 0;
7961
7962 return __ixgbe_maybe_stop_tx(tx_ring, size);
7963}
7964
7965#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7966 IXGBE_TXD_CMD_RS)
7967
7968static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7969 struct ixgbe_tx_buffer *first,
7970 const u8 hdr_len)
7971{
7972 struct sk_buff *skb = first->skb;
7973 struct ixgbe_tx_buffer *tx_buffer;
7974 union ixgbe_adv_tx_desc *tx_desc;
7975 struct skb_frag_struct *frag;
7976 dma_addr_t dma;
7977 unsigned int data_len, size;
7978 u32 tx_flags = first->tx_flags;
7979 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7980 u16 i = tx_ring->next_to_use;
7981
7982 tx_desc = IXGBE_TX_DESC(tx_ring, i);
7983
7984 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7985
7986 size = skb_headlen(skb);
7987 data_len = skb->data_len;
7988
7989#ifdef IXGBE_FCOE
7990 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7991 if (data_len < sizeof(struct fcoe_crc_eof)) {
7992 size -= sizeof(struct fcoe_crc_eof) - data_len;
7993 data_len = 0;
7994 } else {
7995 data_len -= sizeof(struct fcoe_crc_eof);
7996 }
7997 }
7998
7999#endif
8000 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
8001
8002 tx_buffer = first;
8003
8004 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
8005 if (dma_mapping_error(tx_ring->dev, dma))
8006 goto dma_error;
8007
8008
8009 dma_unmap_len_set(tx_buffer, len, size);
8010 dma_unmap_addr_set(tx_buffer, dma, dma);
8011
8012 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8013
8014 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
8015 tx_desc->read.cmd_type_len =
8016 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
8017
8018 i++;
8019 tx_desc++;
8020 if (i == tx_ring->count) {
8021 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8022 i = 0;
8023 }
8024 tx_desc->read.olinfo_status = 0;
8025
8026 dma += IXGBE_MAX_DATA_PER_TXD;
8027 size -= IXGBE_MAX_DATA_PER_TXD;
8028
8029 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8030 }
8031
8032 if (likely(!data_len))
8033 break;
8034
8035 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
8036
8037 i++;
8038 tx_desc++;
8039 if (i == tx_ring->count) {
8040 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
8041 i = 0;
8042 }
8043 tx_desc->read.olinfo_status = 0;
8044
8045#ifdef IXGBE_FCOE
8046 size = min_t(unsigned int, data_len, skb_frag_size(frag));
8047#else
8048 size = skb_frag_size(frag);
8049#endif
8050 data_len -= size;
8051
8052 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
8053 DMA_TO_DEVICE);
8054
8055 tx_buffer = &tx_ring->tx_buffer_info[i];
8056 }
8057
8058
8059 cmd_type |= size | IXGBE_TXD_CMD;
8060 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8061
8062 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
8063
8064
8065 first->time_stamp = jiffies;
8066
8067
8068
8069
8070
8071
8072
8073
8074
8075 wmb();
8076
8077
8078 first->next_to_watch = tx_desc;
8079
8080 i++;
8081 if (i == tx_ring->count)
8082 i = 0;
8083
8084 tx_ring->next_to_use = i;
8085
8086 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8087
8088 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
8089 writel(i, tx_ring->tail);
8090
8091
8092
8093
8094 mmiowb();
8095 }
8096
8097 return 0;
8098dma_error:
8099 dev_err(tx_ring->dev, "TX DMA map failed\n");
8100
8101
8102 for (;;) {
8103 tx_buffer = &tx_ring->tx_buffer_info[i];
8104 if (dma_unmap_len(tx_buffer, len))
8105 dma_unmap_page(tx_ring->dev,
8106 dma_unmap_addr(tx_buffer, dma),
8107 dma_unmap_len(tx_buffer, len),
8108 DMA_TO_DEVICE);
8109 dma_unmap_len_set(tx_buffer, len, 0);
8110 if (tx_buffer == first)
8111 break;
8112 if (i == 0)
8113 i += tx_ring->count;
8114 i--;
8115 }
8116
8117 dev_kfree_skb_any(first->skb);
8118 first->skb = NULL;
8119
8120 tx_ring->next_to_use = i;
8121
8122 return -1;
8123}
8124
8125static void ixgbe_atr(struct ixgbe_ring *ring,
8126 struct ixgbe_tx_buffer *first)
8127{
8128 struct ixgbe_q_vector *q_vector = ring->q_vector;
8129 union ixgbe_atr_hash_dword input = { .dword = 0 };
8130 union ixgbe_atr_hash_dword common = { .dword = 0 };
8131 union {
8132 unsigned char *network;
8133 struct iphdr *ipv4;
8134 struct ipv6hdr *ipv6;
8135 } hdr;
8136 struct tcphdr *th;
8137 unsigned int hlen;
8138 struct sk_buff *skb;
8139 __be16 vlan_id;
8140 int l4_proto;
8141
8142
8143 if (!q_vector)
8144 return;
8145
8146
8147 if (!ring->atr_sample_rate)
8148 return;
8149
8150 ring->atr_count++;
8151
8152
8153 if ((first->protocol != htons(ETH_P_IP)) &&
8154 (first->protocol != htons(ETH_P_IPV6)))
8155 return;
8156
8157
8158 skb = first->skb;
8159 hdr.network = skb_network_header(skb);
8160 if (unlikely(hdr.network <= skb->data))
8161 return;
8162 if (skb->encapsulation &&
8163 first->protocol == htons(ETH_P_IP) &&
8164 hdr.ipv4->protocol == IPPROTO_UDP) {
8165 struct ixgbe_adapter *adapter = q_vector->adapter;
8166
8167 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8168 VXLAN_HEADROOM))
8169 return;
8170
8171
8172 if (adapter->vxlan_port &&
8173 udp_hdr(skb)->dest == adapter->vxlan_port)
8174 hdr.network = skb_inner_network_header(skb);
8175
8176 if (adapter->geneve_port &&
8177 udp_hdr(skb)->dest == adapter->geneve_port)
8178 hdr.network = skb_inner_network_header(skb);
8179 }
8180
8181
8182
8183
8184 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8185 return;
8186
8187
8188 switch (hdr.ipv4->version) {
8189 case IPVERSION:
8190
8191 hlen = (hdr.network[0] & 0x0F) << 2;
8192 l4_proto = hdr.ipv4->protocol;
8193 break;
8194 case 6:
8195 hlen = hdr.network - skb->data;
8196 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8197 hlen -= hdr.network - skb->data;
8198 break;
8199 default:
8200 return;
8201 }
8202
8203 if (l4_proto != IPPROTO_TCP)
8204 return;
8205
8206 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8207 hlen + sizeof(struct tcphdr)))
8208 return;
8209
8210 th = (struct tcphdr *)(hdr.network + hlen);
8211
8212
8213 if (th->fin)
8214 return;
8215
8216
8217 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8218 return;
8219
8220
8221 ring->atr_count = 0;
8222
8223 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8224
8225
8226
8227
8228
8229
8230
8231
8232 input.formatted.vlan_id = vlan_id;
8233
8234
8235
8236
8237
8238 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8239 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8240 else
8241 common.port.src ^= th->dest ^ first->protocol;
8242 common.port.dst ^= th->source;
8243
8244 switch (hdr.ipv4->version) {
8245 case IPVERSION:
8246 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8247 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8248 break;
8249 case 6:
8250 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8251 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8252 hdr.ipv6->saddr.s6_addr32[1] ^
8253 hdr.ipv6->saddr.s6_addr32[2] ^
8254 hdr.ipv6->saddr.s6_addr32[3] ^
8255 hdr.ipv6->daddr.s6_addr32[0] ^
8256 hdr.ipv6->daddr.s6_addr32[1] ^
8257 hdr.ipv6->daddr.s6_addr32[2] ^
8258 hdr.ipv6->daddr.s6_addr32[3];
8259 break;
8260 default:
8261 break;
8262 }
8263
8264 if (hdr.network != skb_network_header(skb))
8265 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8266
8267
8268 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8269 input, common, ring->queue_index);
8270}
8271
8272static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8273 void *accel_priv, select_queue_fallback_t fallback)
8274{
8275 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
8276 struct ixgbe_adapter *adapter;
8277 int txq;
8278#ifdef IXGBE_FCOE
8279 struct ixgbe_ring_feature *f;
8280#endif
8281
8282 if (fwd_adapter) {
8283 adapter = netdev_priv(dev);
8284 txq = reciprocal_scale(skb_get_hash(skb),
8285 adapter->num_rx_queues_per_pool);
8286
8287 return txq + fwd_adapter->tx_base_queue;
8288 }
8289
8290#ifdef IXGBE_FCOE
8291
8292
8293
8294
8295
8296 switch (vlan_get_protocol(skb)) {
8297 case htons(ETH_P_FCOE):
8298 case htons(ETH_P_FIP):
8299 adapter = netdev_priv(dev);
8300
8301 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
8302 break;
8303
8304 default:
8305 return fallback(dev, skb);
8306 }
8307
8308 f = &adapter->ring_feature[RING_F_FCOE];
8309
8310 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8311 smp_processor_id();
8312
8313 while (txq >= f->indices)
8314 txq -= f->indices;
8315
8316 return txq + f->offset;
8317#else
8318 return fallback(dev, skb);
8319#endif
8320}
8321
8322static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8323 struct xdp_buff *xdp)
8324{
8325 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8326 struct ixgbe_tx_buffer *tx_buffer;
8327 union ixgbe_adv_tx_desc *tx_desc;
8328 u32 len, cmd_type;
8329 dma_addr_t dma;
8330 u16 i;
8331
8332 len = xdp->data_end - xdp->data;
8333
8334 if (unlikely(!ixgbe_desc_unused(ring)))
8335 return IXGBE_XDP_CONSUMED;
8336
8337 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
8338 if (dma_mapping_error(ring->dev, dma))
8339 return IXGBE_XDP_CONSUMED;
8340
8341
8342 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8343 tx_buffer->bytecount = len;
8344 tx_buffer->gso_segs = 1;
8345 tx_buffer->protocol = 0;
8346
8347 i = ring->next_to_use;
8348 tx_desc = IXGBE_TX_DESC(ring, i);
8349
8350 dma_unmap_len_set(tx_buffer, len, len);
8351 dma_unmap_addr_set(tx_buffer, dma, dma);
8352 tx_buffer->data = xdp->data;
8353 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8354
8355
8356 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8357 IXGBE_ADVTXD_DCMD_DEXT |
8358 IXGBE_ADVTXD_DCMD_IFCS;
8359 cmd_type |= len | IXGBE_TXD_CMD;
8360 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8361 tx_desc->read.olinfo_status =
8362 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8363
8364
8365 smp_wmb();
8366
8367
8368 i++;
8369 if (i == ring->count)
8370 i = 0;
8371
8372 tx_buffer->next_to_watch = tx_desc;
8373 ring->next_to_use = i;
8374
8375 return IXGBE_XDP_TX;
8376}
8377
8378netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8379 struct ixgbe_adapter *adapter,
8380 struct ixgbe_ring *tx_ring)
8381{
8382 struct ixgbe_tx_buffer *first;
8383 int tso;
8384 u32 tx_flags = 0;
8385 unsigned short f;
8386 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8387 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8388 __be16 protocol = skb->protocol;
8389 u8 hdr_len = 0;
8390
8391
8392
8393
8394
8395
8396
8397
8398 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8399 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
8400
8401 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8402 tx_ring->tx_stats.tx_busy++;
8403 return NETDEV_TX_BUSY;
8404 }
8405
8406
8407 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8408 first->skb = skb;
8409 first->bytecount = skb->len;
8410 first->gso_segs = 1;
8411
8412
8413 if (skb_vlan_tag_present(skb)) {
8414 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8415 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8416
8417 } else if (protocol == htons(ETH_P_8021Q)) {
8418 struct vlan_hdr *vhdr, _vhdr;
8419 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8420 if (!vhdr)
8421 goto out_drop;
8422
8423 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8424 IXGBE_TX_FLAGS_VLAN_SHIFT;
8425 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8426 }
8427 protocol = vlan_get_protocol(skb);
8428
8429 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8430 adapter->ptp_clock) {
8431 if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8432 &adapter->state)) {
8433 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8434 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8435
8436
8437 adapter->ptp_tx_skb = skb_get(skb);
8438 adapter->ptp_tx_start = jiffies;
8439 schedule_work(&adapter->ptp_tx_work);
8440 } else {
8441 adapter->tx_hwtstamp_skipped++;
8442 }
8443 }
8444
8445 skb_tx_timestamp(skb);
8446
8447#ifdef CONFIG_PCI_IOV
8448
8449
8450
8451
8452 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8453 tx_flags |= IXGBE_TX_FLAGS_CC;
8454
8455#endif
8456
8457 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8458 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8459 (skb->priority != TC_PRIO_CONTROL))) {
8460 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8461 tx_flags |= (skb->priority & 0x7) <<
8462 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8463 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8464 struct vlan_ethhdr *vhdr;
8465
8466 if (skb_cow_head(skb, 0))
8467 goto out_drop;
8468 vhdr = (struct vlan_ethhdr *)skb->data;
8469 vhdr->h_vlan_TCI = htons(tx_flags >>
8470 IXGBE_TX_FLAGS_VLAN_SHIFT);
8471 } else {
8472 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8473 }
8474 }
8475
8476
8477 first->tx_flags = tx_flags;
8478 first->protocol = protocol;
8479
8480#ifdef IXGBE_FCOE
8481
8482 if ((protocol == htons(ETH_P_FCOE)) &&
8483 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8484 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8485 if (tso < 0)
8486 goto out_drop;
8487
8488 goto xmit_fcoe;
8489 }
8490
8491#endif
8492
8493#ifdef CONFIG_XFRM_OFFLOAD
8494 if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8495 goto out_drop;
8496#endif
8497 tso = ixgbe_tso(tx_ring, first, &hdr_len);
8498 if (tso < 0)
8499 goto out_drop;
8500 else if (!tso)
8501 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8502
8503
8504 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8505 ixgbe_atr(tx_ring, first);
8506
8507#ifdef IXGBE_FCOE
8508xmit_fcoe:
8509#endif
8510 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8511 goto cleanup_tx_timestamp;
8512
8513 return NETDEV_TX_OK;
8514
8515out_drop:
8516 dev_kfree_skb_any(first->skb);
8517 first->skb = NULL;
8518cleanup_tx_timestamp:
8519 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8520 dev_kfree_skb_any(adapter->ptp_tx_skb);
8521 adapter->ptp_tx_skb = NULL;
8522 cancel_work_sync(&adapter->ptp_tx_work);
8523 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8524 }
8525
8526 return NETDEV_TX_OK;
8527}
8528
8529static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8530 struct net_device *netdev,
8531 struct ixgbe_ring *ring)
8532{
8533 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8534 struct ixgbe_ring *tx_ring;
8535
8536
8537
8538
8539
8540 if (skb_put_padto(skb, 17))
8541 return NETDEV_TX_OK;
8542
8543 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
8544
8545 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8546}
8547
8548static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8549 struct net_device *netdev)
8550{
8551 return __ixgbe_xmit_frame(skb, netdev, NULL);
8552}
8553
8554
8555
8556
8557
8558
8559
8560
8561static int ixgbe_set_mac(struct net_device *netdev, void *p)
8562{
8563 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8564 struct ixgbe_hw *hw = &adapter->hw;
8565 struct sockaddr *addr = p;
8566
8567 if (!is_valid_ether_addr(addr->sa_data))
8568 return -EADDRNOTAVAIL;
8569
8570 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8571 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8572
8573 ixgbe_mac_set_default_filter(adapter);
8574
8575 return 0;
8576}
8577
8578static int
8579ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8580{
8581 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8582 struct ixgbe_hw *hw = &adapter->hw;
8583 u16 value;
8584 int rc;
8585
8586 if (prtad != hw->phy.mdio.prtad)
8587 return -EINVAL;
8588 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8589 if (!rc)
8590 rc = value;
8591 return rc;
8592}
8593
8594static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8595 u16 addr, u16 value)
8596{
8597 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8598 struct ixgbe_hw *hw = &adapter->hw;
8599
8600 if (prtad != hw->phy.mdio.prtad)
8601 return -EINVAL;
8602 return hw->phy.ops.write_reg(hw, addr, devad, value);
8603}
8604
8605static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8606{
8607 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8608
8609 switch (cmd) {
8610 case SIOCSHWTSTAMP:
8611 return ixgbe_ptp_set_ts_config(adapter, req);
8612 case SIOCGHWTSTAMP:
8613 return ixgbe_ptp_get_ts_config(adapter, req);
8614 case SIOCGMIIPHY:
8615 if (!adapter->hw.phy.ops.read_reg)
8616 return -EOPNOTSUPP;
8617
8618 default:
8619 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8620 }
8621}
8622
8623
8624
8625
8626
8627
8628
8629
8630static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8631{
8632 int err = 0;
8633 struct ixgbe_adapter *adapter = netdev_priv(dev);
8634 struct ixgbe_hw *hw = &adapter->hw;
8635
8636 if (is_valid_ether_addr(hw->mac.san_addr)) {
8637 rtnl_lock();
8638 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8639 rtnl_unlock();
8640
8641
8642 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8643 }
8644 return err;
8645}
8646
8647
8648
8649
8650
8651
8652
8653
8654static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8655{
8656 int err = 0;
8657 struct ixgbe_adapter *adapter = netdev_priv(dev);
8658 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8659
8660 if (is_valid_ether_addr(mac->san_addr)) {
8661 rtnl_lock();
8662 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8663 rtnl_unlock();
8664 }
8665 return err;
8666}
8667
8668#ifdef CONFIG_NET_POLL_CONTROLLER
8669
8670
8671
8672
8673
8674static void ixgbe_netpoll(struct net_device *netdev)
8675{
8676 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8677 int i;
8678
8679
8680 if (test_bit(__IXGBE_DOWN, &adapter->state))
8681 return;
8682
8683
8684 for (i = 0; i < adapter->num_q_vectors; i++)
8685 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
8686}
8687
8688#endif
8689
8690static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8691 struct ixgbe_ring *ring)
8692{
8693 u64 bytes, packets;
8694 unsigned int start;
8695
8696 if (ring) {
8697 do {
8698 start = u64_stats_fetch_begin_irq(&ring->syncp);
8699 packets = ring->stats.packets;
8700 bytes = ring->stats.bytes;
8701 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8702 stats->tx_packets += packets;
8703 stats->tx_bytes += bytes;
8704 }
8705}
8706
8707static void ixgbe_get_stats64(struct net_device *netdev,
8708 struct rtnl_link_stats64 *stats)
8709{
8710 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8711 int i;
8712
8713 rcu_read_lock();
8714 for (i = 0; i < adapter->num_rx_queues; i++) {
8715 struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
8716 u64 bytes, packets;
8717 unsigned int start;
8718
8719 if (ring) {
8720 do {
8721 start = u64_stats_fetch_begin_irq(&ring->syncp);
8722 packets = ring->stats.packets;
8723 bytes = ring->stats.bytes;
8724 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8725 stats->rx_packets += packets;
8726 stats->rx_bytes += bytes;
8727 }
8728 }
8729
8730 for (i = 0; i < adapter->num_tx_queues; i++) {
8731 struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
8732
8733 ixgbe_get_ring_stats64(stats, ring);
8734 }
8735 for (i = 0; i < adapter->num_xdp_queues; i++) {
8736 struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
8737
8738 ixgbe_get_ring_stats64(stats, ring);
8739 }
8740 rcu_read_unlock();
8741
8742
8743 stats->multicast = netdev->stats.multicast;
8744 stats->rx_errors = netdev->stats.rx_errors;
8745 stats->rx_length_errors = netdev->stats.rx_length_errors;
8746 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8747 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8748}
8749
8750#ifdef CONFIG_IXGBE_DCB
8751
8752
8753
8754
8755
8756
8757
8758
8759static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8760{
8761 struct ixgbe_hw *hw = &adapter->hw;
8762 u32 reg, rsave;
8763 int i;
8764
8765
8766
8767
8768 if (hw->mac.type == ixgbe_mac_82598EB)
8769 return;
8770
8771 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8772 rsave = reg;
8773
8774 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8775 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8776
8777
8778 if (up2tc > tc)
8779 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8780 }
8781
8782 if (reg != rsave)
8783 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8784
8785 return;
8786}
8787
8788
8789
8790
8791
8792
8793
8794static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
8795{
8796 struct net_device *dev = adapter->netdev;
8797 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
8798 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
8799 u8 prio;
8800
8801 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
8802 u8 tc = 0;
8803
8804 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
8805 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
8806 else if (ets)
8807 tc = ets->prio_tc[prio];
8808
8809 netdev_set_prio_tc_map(dev, prio, tc);
8810 }
8811}
8812
8813#endif
8814
8815
8816
8817
8818
8819
8820int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8821{
8822 struct ixgbe_adapter *adapter = netdev_priv(dev);
8823 struct ixgbe_hw *hw = &adapter->hw;
8824
8825
8826 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
8827 return -EINVAL;
8828
8829 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
8830 return -EINVAL;
8831
8832
8833
8834
8835
8836 if (netif_running(dev))
8837 ixgbe_close(dev);
8838 else
8839 ixgbe_reset(adapter);
8840
8841 ixgbe_clear_interrupt_scheme(adapter);
8842
8843#ifdef CONFIG_IXGBE_DCB
8844 if (tc) {
8845 netdev_set_num_tc(dev, tc);
8846 ixgbe_set_prio_tc_map(adapter);
8847
8848 adapter->hw_tcs = tc;
8849 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
8850
8851 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
8852 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
8853 adapter->hw.fc.requested_mode = ixgbe_fc_none;
8854 }
8855 } else {
8856 netdev_reset_tc(dev);
8857
8858
8859
8860
8861
8862
8863 if (!tc && adapter->num_rx_pools > 1)
8864 netdev_set_num_tc(dev, 1);
8865
8866 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8867 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
8868
8869 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
8870 adapter->hw_tcs = tc;
8871
8872 adapter->temp_dcb_cfg.pfc_mode_enable = false;
8873 adapter->dcb_cfg.pfc_mode_enable = false;
8874 }
8875
8876 ixgbe_validate_rtr(adapter, tc);
8877
8878#endif
8879 ixgbe_init_interrupt_scheme(adapter);
8880
8881 if (netif_running(dev))
8882 return ixgbe_open(dev);
8883
8884 return 0;
8885}
8886
8887static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8888 struct tc_cls_u32_offload *cls)
8889{
8890 u32 hdl = cls->knode.handle;
8891 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8892 u32 loc = cls->knode.handle & 0xfffff;
8893 int err = 0, i, j;
8894 struct ixgbe_jump_table *jump = NULL;
8895
8896 if (loc > IXGBE_MAX_HW_ENTRIES)
8897 return -EINVAL;
8898
8899 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8900 return -EINVAL;
8901
8902
8903 if (uhtid != 0x800) {
8904 jump = adapter->jump_tables[uhtid];
8905 if (!jump)
8906 return -EINVAL;
8907 if (!test_bit(loc - 1, jump->child_loc_map))
8908 return -EINVAL;
8909 clear_bit(loc - 1, jump->child_loc_map);
8910 }
8911
8912
8913 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
8914 jump = adapter->jump_tables[i];
8915 if (jump && jump->link_hdl == hdl) {
8916
8917
8918
8919 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
8920 if (!test_bit(j, jump->child_loc_map))
8921 continue;
8922 spin_lock(&adapter->fdir_perfect_lock);
8923 err = ixgbe_update_ethtool_fdir_entry(adapter,
8924 NULL,
8925 j + 1);
8926 spin_unlock(&adapter->fdir_perfect_lock);
8927 clear_bit(j, jump->child_loc_map);
8928 }
8929
8930 kfree(jump->input);
8931 kfree(jump->mask);
8932 kfree(jump);
8933 adapter->jump_tables[i] = NULL;
8934 return err;
8935 }
8936 }
8937
8938 spin_lock(&adapter->fdir_perfect_lock);
8939 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
8940 spin_unlock(&adapter->fdir_perfect_lock);
8941 return err;
8942}
8943
8944static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
8945 struct tc_cls_u32_offload *cls)
8946{
8947 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8948
8949 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8950 return -EINVAL;
8951
8952
8953
8954
8955 if (cls->hnode.divisor > 0)
8956 return -EINVAL;
8957
8958 set_bit(uhtid - 1, &adapter->tables);
8959 return 0;
8960}
8961
8962static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
8963 struct tc_cls_u32_offload *cls)
8964{
8965 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8966
8967 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8968 return -EINVAL;
8969
8970 clear_bit(uhtid - 1, &adapter->tables);
8971 return 0;
8972}
8973
8974#ifdef CONFIG_NET_CLS_ACT
8975struct upper_walk_data {
8976 struct ixgbe_adapter *adapter;
8977 u64 action;
8978 int ifindex;
8979 u8 queue;
8980};
8981
8982static int get_macvlan_queue(struct net_device *upper, void *_data)
8983{
8984 if (netif_is_macvlan(upper)) {
8985 struct macvlan_dev *dfwd = netdev_priv(upper);
8986 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
8987 struct upper_walk_data *data = _data;
8988 struct ixgbe_adapter *adapter = data->adapter;
8989 int ifindex = data->ifindex;
8990
8991 if (vadapter && vadapter->netdev->ifindex == ifindex) {
8992 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
8993 data->action = data->queue;
8994 return 1;
8995 }
8996 }
8997
8998 return 0;
8999}
9000
9001static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
9002 u8 *queue, u64 *action)
9003{
9004 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
9005 unsigned int num_vfs = adapter->num_vfs, vf;
9006 struct upper_walk_data data;
9007 struct net_device *upper;
9008
9009
9010 for (vf = 0; vf < num_vfs; ++vf) {
9011 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
9012 if (upper->ifindex == ifindex) {
9013 *queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
9014 *action = vf + 1;
9015 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
9016 return 0;
9017 }
9018 }
9019
9020
9021 data.adapter = adapter;
9022 data.ifindex = ifindex;
9023 data.action = 0;
9024 data.queue = 0;
9025 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
9026 get_macvlan_queue, &data)) {
9027 *action = data.action;
9028 *queue = data.queue;
9029
9030 return 0;
9031 }
9032
9033 return -EINVAL;
9034}
9035
9036static int parse_tc_actions(struct ixgbe_adapter *adapter,
9037 struct tcf_exts *exts, u64 *action, u8 *queue)
9038{
9039 const struct tc_action *a;
9040 LIST_HEAD(actions);
9041 int err;
9042
9043 if (!tcf_exts_has_actions(exts))
9044 return -EINVAL;
9045
9046 tcf_exts_to_list(exts, &actions);
9047 list_for_each_entry(a, &actions, list) {
9048
9049
9050 if (is_tcf_gact_shot(a)) {
9051 *action = IXGBE_FDIR_DROP_QUEUE;
9052 *queue = IXGBE_FDIR_DROP_QUEUE;
9053 return 0;
9054 }
9055
9056
9057 if (is_tcf_mirred_egress_redirect(a)) {
9058 struct net_device *dev = tcf_mirred_dev(a);
9059
9060 if (!dev)
9061 return -EINVAL;
9062 err = handle_redirect_action(adapter, dev->ifindex, queue,
9063 action);
9064 if (err == 0)
9065 return err;
9066 }
9067 }
9068
9069 return -EINVAL;
9070}
9071#else
9072static int parse_tc_actions(struct ixgbe_adapter *adapter,
9073 struct tcf_exts *exts, u64 *action, u8 *queue)
9074{
9075 return -EINVAL;
9076}
9077#endif
9078
9079static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
9080 union ixgbe_atr_input *mask,
9081 struct tc_cls_u32_offload *cls,
9082 struct ixgbe_mat_field *field_ptr,
9083 struct ixgbe_nexthdr *nexthdr)
9084{
9085 int i, j, off;
9086 __be32 val, m;
9087 bool found_entry = false, found_jump_field = false;
9088
9089 for (i = 0; i < cls->knode.sel->nkeys; i++) {
9090 off = cls->knode.sel->keys[i].off;
9091 val = cls->knode.sel->keys[i].val;
9092 m = cls->knode.sel->keys[i].mask;
9093
9094 for (j = 0; field_ptr[j].val; j++) {
9095 if (field_ptr[j].off == off) {
9096 field_ptr[j].val(input, mask, val, m);
9097 input->filter.formatted.flow_type |=
9098 field_ptr[j].type;
9099 found_entry = true;
9100 break;
9101 }
9102 }
9103 if (nexthdr) {
9104 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9105 nexthdr->val == cls->knode.sel->keys[i].val &&
9106 nexthdr->mask == cls->knode.sel->keys[i].mask)
9107 found_jump_field = true;
9108 else
9109 continue;
9110 }
9111 }
9112
9113 if (nexthdr && !found_jump_field)
9114 return -EINVAL;
9115
9116 if (!found_entry)
9117 return 0;
9118
9119 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9120 IXGBE_ATR_L4TYPE_MASK;
9121
9122 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9123 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9124
9125 return 0;
9126}
9127
9128static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9129 struct tc_cls_u32_offload *cls)
9130{
9131 __be16 protocol = cls->common.protocol;
9132 u32 loc = cls->knode.handle & 0xfffff;
9133 struct ixgbe_hw *hw = &adapter->hw;
9134 struct ixgbe_mat_field *field_ptr;
9135 struct ixgbe_fdir_filter *input = NULL;
9136 union ixgbe_atr_input *mask = NULL;
9137 struct ixgbe_jump_table *jump = NULL;
9138 int i, err = -EINVAL;
9139 u8 queue;
9140 u32 uhtid, link_uhtid;
9141
9142 uhtid = TC_U32_USERHTID(cls->knode.handle);
9143 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9144
9145
9146
9147
9148
9149
9150
9151
9152 if (protocol != htons(ETH_P_IP))
9153 return err;
9154
9155 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9156 e_err(drv, "Location out of range\n");
9157 return err;
9158 }
9159
9160
9161
9162
9163
9164
9165
9166
9167 if (uhtid == 0x800) {
9168 field_ptr = (adapter->jump_tables[0])->mat;
9169 } else {
9170 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9171 return err;
9172 if (!adapter->jump_tables[uhtid])
9173 return err;
9174 field_ptr = (adapter->jump_tables[uhtid])->mat;
9175 }
9176
9177 if (!field_ptr)
9178 return err;
9179
9180
9181
9182
9183
9184
9185
9186 if (link_uhtid) {
9187 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9188
9189 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9190 return err;
9191
9192 if (!test_bit(link_uhtid - 1, &adapter->tables))
9193 return err;
9194
9195
9196
9197
9198
9199
9200 if (adapter->jump_tables[link_uhtid] &&
9201 (adapter->jump_tables[link_uhtid])->link_hdl) {
9202 e_err(drv, "Link filter exists for link: %x\n",
9203 link_uhtid);
9204 return err;
9205 }
9206
9207 for (i = 0; nexthdr[i].jump; i++) {
9208 if (nexthdr[i].o != cls->knode.sel->offoff ||
9209 nexthdr[i].s != cls->knode.sel->offshift ||
9210 nexthdr[i].m != cls->knode.sel->offmask)
9211 return err;
9212
9213 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9214 if (!jump)
9215 return -ENOMEM;
9216 input = kzalloc(sizeof(*input), GFP_KERNEL);
9217 if (!input) {
9218 err = -ENOMEM;
9219 goto free_jump;
9220 }
9221 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9222 if (!mask) {
9223 err = -ENOMEM;
9224 goto free_input;
9225 }
9226 jump->input = input;
9227 jump->mask = mask;
9228 jump->link_hdl = cls->knode.handle;
9229
9230 err = ixgbe_clsu32_build_input(input, mask, cls,
9231 field_ptr, &nexthdr[i]);
9232 if (!err) {
9233 jump->mat = nexthdr[i].jump;
9234 adapter->jump_tables[link_uhtid] = jump;
9235 break;
9236 }
9237 }
9238 return 0;
9239 }
9240
9241 input = kzalloc(sizeof(*input), GFP_KERNEL);
9242 if (!input)
9243 return -ENOMEM;
9244 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9245 if (!mask) {
9246 err = -ENOMEM;
9247 goto free_input;
9248 }
9249
9250 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9251 if ((adapter->jump_tables[uhtid])->input)
9252 memcpy(input, (adapter->jump_tables[uhtid])->input,
9253 sizeof(*input));
9254 if ((adapter->jump_tables[uhtid])->mask)
9255 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9256 sizeof(*mask));
9257
9258
9259
9260
9261 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9262 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9263
9264 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9265 e_err(drv, "Filter exists in location: %x\n",
9266 loc);
9267 err = -EINVAL;
9268 goto err_out;
9269 }
9270 }
9271 }
9272 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9273 if (err)
9274 goto err_out;
9275
9276 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9277 &queue);
9278 if (err < 0)
9279 goto err_out;
9280
9281 input->sw_idx = loc;
9282
9283 spin_lock(&adapter->fdir_perfect_lock);
9284
9285 if (hlist_empty(&adapter->fdir_filter_list)) {
9286 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9287 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9288 if (err)
9289 goto err_out_w_lock;
9290 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9291 err = -EINVAL;
9292 goto err_out_w_lock;
9293 }
9294
9295 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9296 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9297 input->sw_idx, queue);
9298 if (!err)
9299 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9300 spin_unlock(&adapter->fdir_perfect_lock);
9301
9302 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9303 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9304
9305 kfree(mask);
9306 return err;
9307err_out_w_lock:
9308 spin_unlock(&adapter->fdir_perfect_lock);
9309err_out:
9310 kfree(mask);
9311free_input:
9312 kfree(input);
9313free_jump:
9314 kfree(jump);
9315 return err;
9316}
9317
9318static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
9319 struct tc_cls_u32_offload *cls_u32)
9320{
9321 switch (cls_u32->command) {
9322 case TC_CLSU32_NEW_KNODE:
9323 case TC_CLSU32_REPLACE_KNODE:
9324 return ixgbe_configure_clsu32(adapter, cls_u32);
9325 case TC_CLSU32_DELETE_KNODE:
9326 return ixgbe_delete_clsu32(adapter, cls_u32);
9327 case TC_CLSU32_NEW_HNODE:
9328 case TC_CLSU32_REPLACE_HNODE:
9329 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9330 case TC_CLSU32_DELETE_HNODE:
9331 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9332 default:
9333 return -EOPNOTSUPP;
9334 }
9335}
9336
9337static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9338 void *cb_priv)
9339{
9340 struct ixgbe_adapter *adapter = cb_priv;
9341
9342 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
9343 return -EOPNOTSUPP;
9344
9345 switch (type) {
9346 case TC_SETUP_CLSU32:
9347 return ixgbe_setup_tc_cls_u32(adapter, type_data);
9348 default:
9349 return -EOPNOTSUPP;
9350 }
9351}
9352
9353static int ixgbe_setup_tc_block(struct net_device *dev,
9354 struct tc_block_offload *f)
9355{
9356 struct ixgbe_adapter *adapter = netdev_priv(dev);
9357
9358 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9359 return -EOPNOTSUPP;
9360
9361 switch (f->command) {
9362 case TC_BLOCK_BIND:
9363 return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
9364 adapter, adapter);
9365 case TC_BLOCK_UNBIND:
9366 tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
9367 adapter);
9368 return 0;
9369 default:
9370 return -EOPNOTSUPP;
9371 }
9372}
9373
9374static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9375 struct tc_mqprio_qopt *mqprio)
9376{
9377 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9378 return ixgbe_setup_tc(dev, mqprio->num_tc);
9379}
9380
9381static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9382 void *type_data)
9383{
9384 switch (type) {
9385 case TC_SETUP_BLOCK:
9386 return ixgbe_setup_tc_block(dev, type_data);
9387 case TC_SETUP_QDISC_MQPRIO:
9388 return ixgbe_setup_tc_mqprio(dev, type_data);
9389 default:
9390 return -EOPNOTSUPP;
9391 }
9392}
9393
9394#ifdef CONFIG_PCI_IOV
9395void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9396{
9397 struct net_device *netdev = adapter->netdev;
9398
9399 rtnl_lock();
9400 ixgbe_setup_tc(netdev, adapter->hw_tcs);
9401 rtnl_unlock();
9402}
9403
9404#endif
9405void ixgbe_do_reset(struct net_device *netdev)
9406{
9407 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9408
9409 if (netif_running(netdev))
9410 ixgbe_reinit_locked(adapter);
9411 else
9412 ixgbe_reset(adapter);
9413}
9414
9415static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9416 netdev_features_t features)
9417{
9418 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9419
9420
9421 if (!(features & NETIF_F_RXCSUM))
9422 features &= ~NETIF_F_LRO;
9423
9424
9425 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9426 features &= ~NETIF_F_LRO;
9427
9428 return features;
9429}
9430
9431static int ixgbe_set_features(struct net_device *netdev,
9432 netdev_features_t features)
9433{
9434 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9435 netdev_features_t changed = netdev->features ^ features;
9436 bool need_reset = false;
9437
9438
9439 if (!(features & NETIF_F_LRO)) {
9440 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9441 need_reset = true;
9442 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9443 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9444 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9445 if (adapter->rx_itr_setting == 1 ||
9446 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9447 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9448 need_reset = true;
9449 } else if ((changed ^ features) & NETIF_F_LRO) {
9450 e_info(probe, "rx-usecs set too low, "
9451 "disabling RSC\n");
9452 }
9453 }
9454
9455
9456
9457
9458
9459 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9460
9461 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9462 need_reset = true;
9463
9464 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9465 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9466 } else {
9467
9468 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9469 need_reset = true;
9470
9471 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9472
9473
9474 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9475
9476 (adapter->hw_tcs > 1) ||
9477
9478 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9479
9480 (!adapter->atr_sample_rate))
9481 ;
9482 else
9483 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9484 }
9485
9486 if (changed & NETIF_F_RXALL)
9487 need_reset = true;
9488
9489 netdev->features = features;
9490
9491 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9492 if (features & NETIF_F_RXCSUM) {
9493 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9494 } else {
9495 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9496
9497 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9498 }
9499 }
9500
9501 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9502 if (features & NETIF_F_RXCSUM) {
9503 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9504 } else {
9505 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9506
9507 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9508 }
9509 }
9510
9511 if (need_reset)
9512 ixgbe_do_reset(netdev);
9513 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9514 NETIF_F_HW_VLAN_CTAG_FILTER))
9515 ixgbe_set_rx_mode(netdev);
9516
9517 return 0;
9518}
9519
9520
9521
9522
9523
9524
9525static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9526 struct udp_tunnel_info *ti)
9527{
9528 struct ixgbe_adapter *adapter = netdev_priv(dev);
9529 struct ixgbe_hw *hw = &adapter->hw;
9530 __be16 port = ti->port;
9531 u32 port_shift = 0;
9532 u32 reg;
9533
9534 if (ti->sa_family != AF_INET)
9535 return;
9536
9537 switch (ti->type) {
9538 case UDP_TUNNEL_TYPE_VXLAN:
9539 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9540 return;
9541
9542 if (adapter->vxlan_port == port)
9543 return;
9544
9545 if (adapter->vxlan_port) {
9546 netdev_info(dev,
9547 "VXLAN port %d set, not adding port %d\n",
9548 ntohs(adapter->vxlan_port),
9549 ntohs(port));
9550 return;
9551 }
9552
9553 adapter->vxlan_port = port;
9554 break;
9555 case UDP_TUNNEL_TYPE_GENEVE:
9556 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9557 return;
9558
9559 if (adapter->geneve_port == port)
9560 return;
9561
9562 if (adapter->geneve_port) {
9563 netdev_info(dev,
9564 "GENEVE port %d set, not adding port %d\n",
9565 ntohs(adapter->geneve_port),
9566 ntohs(port));
9567 return;
9568 }
9569
9570 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9571 adapter->geneve_port = port;
9572 break;
9573 default:
9574 return;
9575 }
9576
9577 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9578 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9579}
9580
9581
9582
9583
9584
9585
9586static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9587 struct udp_tunnel_info *ti)
9588{
9589 struct ixgbe_adapter *adapter = netdev_priv(dev);
9590 u32 port_mask;
9591
9592 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9593 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9594 return;
9595
9596 if (ti->sa_family != AF_INET)
9597 return;
9598
9599 switch (ti->type) {
9600 case UDP_TUNNEL_TYPE_VXLAN:
9601 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9602 return;
9603
9604 if (adapter->vxlan_port != ti->port) {
9605 netdev_info(dev, "VXLAN port %d not found\n",
9606 ntohs(ti->port));
9607 return;
9608 }
9609
9610 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9611 break;
9612 case UDP_TUNNEL_TYPE_GENEVE:
9613 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9614 return;
9615
9616 if (adapter->geneve_port != ti->port) {
9617 netdev_info(dev, "GENEVE port %d not found\n",
9618 ntohs(ti->port));
9619 return;
9620 }
9621
9622 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9623 break;
9624 default:
9625 return;
9626 }
9627
9628 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9629 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9630}
9631
9632static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9633 struct net_device *dev,
9634 const unsigned char *addr, u16 vid,
9635 u16 flags)
9636{
9637
9638 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9639 struct ixgbe_adapter *adapter = netdev_priv(dev);
9640 u16 pool = VMDQ_P(0);
9641
9642 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9643 return -ENOMEM;
9644 }
9645
9646 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9647}
9648
9649
9650
9651
9652
9653
9654
9655
9656static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9657 __u16 mode)
9658{
9659 struct ixgbe_hw *hw = &adapter->hw;
9660 unsigned int p, num_pools;
9661 u32 vmdctl;
9662
9663 switch (mode) {
9664 case BRIDGE_MODE_VEPA:
9665
9666 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9667
9668
9669
9670
9671
9672 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9673 vmdctl |= IXGBE_VT_CTL_REPLEN;
9674 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9675
9676
9677
9678
9679 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9680 for (p = 0; p < num_pools; p++) {
9681 if (hw->mac.ops.set_source_address_pruning)
9682 hw->mac.ops.set_source_address_pruning(hw,
9683 true,
9684 p);
9685 }
9686 break;
9687 case BRIDGE_MODE_VEB:
9688
9689 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9690 IXGBE_PFDTXGSWC_VT_LBEN);
9691
9692
9693
9694
9695 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9696 if (!adapter->num_vfs)
9697 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9698 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9699
9700
9701
9702
9703 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9704 for (p = 0; p < num_pools; p++) {
9705 if (hw->mac.ops.set_source_address_pruning)
9706 hw->mac.ops.set_source_address_pruning(hw,
9707 false,
9708 p);
9709 }
9710 break;
9711 default:
9712 return -EINVAL;
9713 }
9714
9715 adapter->bridge_mode = mode;
9716
9717 e_info(drv, "enabling bridge mode: %s\n",
9718 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9719
9720 return 0;
9721}
9722
9723static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9724 struct nlmsghdr *nlh, u16 flags)
9725{
9726 struct ixgbe_adapter *adapter = netdev_priv(dev);
9727 struct nlattr *attr, *br_spec;
9728 int rem;
9729
9730 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9731 return -EOPNOTSUPP;
9732
9733 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9734 if (!br_spec)
9735 return -EINVAL;
9736
9737 nla_for_each_nested(attr, br_spec, rem) {
9738 int status;
9739 __u16 mode;
9740
9741 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9742 continue;
9743
9744 if (nla_len(attr) < sizeof(mode))
9745 return -EINVAL;
9746
9747 mode = nla_get_u16(attr);
9748 status = ixgbe_configure_bridge_mode(adapter, mode);
9749 if (status)
9750 return status;
9751
9752 break;
9753 }
9754
9755 return 0;
9756}
9757
9758static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9759 struct net_device *dev,
9760 u32 filter_mask, int nlflags)
9761{
9762 struct ixgbe_adapter *adapter = netdev_priv(dev);
9763
9764 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9765 return 0;
9766
9767 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
9768 adapter->bridge_mode, 0, 0, nlflags,
9769 filter_mask, NULL);
9770}
9771
9772static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9773{
9774 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
9775 struct ixgbe_adapter *adapter = netdev_priv(pdev);
9776 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
9777 int tcs = adapter->hw_tcs ? : 1;
9778 unsigned int limit;
9779 int pool, err;
9780
9781
9782
9783
9784
9785 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9786 return ERR_PTR(-EINVAL);
9787
9788 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9789 adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
9790 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
9791 return ERR_PTR(-EBUSY);
9792
9793 fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
9794 if (!fwd_adapter)
9795 return ERR_PTR(-ENOMEM);
9796
9797 pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9798 set_bit(pool, adapter->fwd_bitmask);
9799 limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
9800
9801
9802 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
9803 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9804
9805 fwd_adapter->pool = pool;
9806 fwd_adapter->real_adapter = adapter;
9807
9808
9809 err = ixgbe_setup_tc(pdev, adapter->hw_tcs);
9810
9811 if (!err && netif_running(pdev))
9812 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
9813
9814 if (!err)
9815 return fwd_adapter;
9816
9817
9818 netdev_info(pdev,
9819 "%s: dfwd hardware acceleration failed\n", vdev->name);
9820 clear_bit(pool, adapter->fwd_bitmask);
9821 kfree(fwd_adapter);
9822 return ERR_PTR(err);
9823}
9824
9825static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
9826{
9827 struct ixgbe_fwd_adapter *accel = priv;
9828 struct ixgbe_adapter *adapter = accel->real_adapter;
9829 unsigned int rxbase = accel->rx_base_queue;
9830 unsigned int limit, i;
9831
9832
9833 ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
9834 VMDQ_P(accel->pool));
9835
9836
9837 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
9838
9839
9840
9841
9842 usleep_range(10000, 20000);
9843
9844 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
9845 struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
9846 struct ixgbe_q_vector *qv = ring->q_vector;
9847
9848
9849
9850
9851 if (netif_running(adapter->netdev))
9852 napi_synchronize(&qv->napi);
9853 ring->netdev = NULL;
9854 }
9855
9856 clear_bit(accel->pool, adapter->fwd_bitmask);
9857 limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
9858 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9859
9860
9861 if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
9862 int rss = min_t(int, ixgbe_max_rss_indices(adapter),
9863 num_online_cpus());
9864
9865 adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
9866 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
9867 adapter->ring_feature[RING_F_RSS].limit = rss;
9868 }
9869
9870 ixgbe_setup_tc(pdev, adapter->hw_tcs);
9871 netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
9872 accel->pool, adapter->num_rx_pools,
9873 accel->rx_base_queue,
9874 accel->rx_base_queue +
9875 adapter->num_rx_queues_per_pool);
9876 kfree(accel);
9877}
9878
9879#define IXGBE_MAX_MAC_HDR_LEN 127
9880#define IXGBE_MAX_NETWORK_HDR_LEN 511
9881
9882static netdev_features_t
9883ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9884 netdev_features_t features)
9885{
9886 unsigned int network_hdr_len, mac_hdr_len;
9887
9888
9889 mac_hdr_len = skb_network_header(skb) - skb->data;
9890 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
9891 return features & ~(NETIF_F_HW_CSUM |
9892 NETIF_F_SCTP_CRC |
9893 NETIF_F_HW_VLAN_CTAG_TX |
9894 NETIF_F_TSO |
9895 NETIF_F_TSO6);
9896
9897 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
9898 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
9899 return features & ~(NETIF_F_HW_CSUM |
9900 NETIF_F_SCTP_CRC |
9901 NETIF_F_TSO |
9902 NETIF_F_TSO6);
9903
9904
9905
9906
9907 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
9908 features &= ~NETIF_F_TSO;
9909
9910#ifdef CONFIG_XFRM_OFFLOAD
9911
9912 if (skb->sp)
9913 features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM);
9914#endif
9915
9916 return features;
9917}
9918
9919static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
9920{
9921 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9922 struct ixgbe_adapter *adapter = netdev_priv(dev);
9923 struct bpf_prog *old_prog;
9924
9925 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
9926 return -EINVAL;
9927
9928 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
9929 return -EINVAL;
9930
9931
9932 for (i = 0; i < adapter->num_rx_queues; i++) {
9933 struct ixgbe_ring *ring = adapter->rx_ring[i];
9934
9935 if (ring_is_rsc_enabled(ring))
9936 return -EINVAL;
9937
9938 if (frame_size > ixgbe_rx_bufsz(ring))
9939 return -EINVAL;
9940 }
9941
9942 if (nr_cpu_ids > MAX_XDP_QUEUES)
9943 return -ENOMEM;
9944
9945 old_prog = xchg(&adapter->xdp_prog, prog);
9946
9947
9948 if (!!prog != !!old_prog) {
9949 int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
9950
9951 if (err) {
9952 rcu_assign_pointer(adapter->xdp_prog, old_prog);
9953 return -EINVAL;
9954 }
9955 } else {
9956 for (i = 0; i < adapter->num_rx_queues; i++)
9957 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
9958 }
9959
9960 if (old_prog)
9961 bpf_prog_put(old_prog);
9962
9963 return 0;
9964}
9965
9966static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
9967{
9968 struct ixgbe_adapter *adapter = netdev_priv(dev);
9969
9970 switch (xdp->command) {
9971 case XDP_SETUP_PROG:
9972 return ixgbe_xdp_setup(dev, xdp->prog);
9973 case XDP_QUERY_PROG:
9974 xdp->prog_attached = !!(adapter->xdp_prog);
9975 xdp->prog_id = adapter->xdp_prog ?
9976 adapter->xdp_prog->aux->id : 0;
9977 return 0;
9978 default:
9979 return -EINVAL;
9980 }
9981}
9982
9983static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
9984{
9985 struct ixgbe_adapter *adapter = netdev_priv(dev);
9986 struct ixgbe_ring *ring;
9987 int err;
9988
9989 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
9990 return -ENETDOWN;
9991
9992
9993
9994
9995 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
9996 if (unlikely(!ring))
9997 return -ENXIO;
9998
9999 err = ixgbe_xmit_xdp_ring(adapter, xdp);
10000 if (err != IXGBE_XDP_TX)
10001 return -ENOSPC;
10002
10003 return 0;
10004}
10005
10006static void ixgbe_xdp_flush(struct net_device *dev)
10007{
10008 struct ixgbe_adapter *adapter = netdev_priv(dev);
10009 struct ixgbe_ring *ring;
10010
10011
10012
10013
10014 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
10015 return;
10016
10017 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
10018 if (unlikely(!ring))
10019 return;
10020
10021
10022
10023
10024 wmb();
10025 writel(ring->next_to_use, ring->tail);
10026
10027 return;
10028}
10029
10030static const struct net_device_ops ixgbe_netdev_ops = {
10031 .ndo_open = ixgbe_open,
10032 .ndo_stop = ixgbe_close,
10033 .ndo_start_xmit = ixgbe_xmit_frame,
10034 .ndo_select_queue = ixgbe_select_queue,
10035 .ndo_set_rx_mode = ixgbe_set_rx_mode,
10036 .ndo_validate_addr = eth_validate_addr,
10037 .ndo_set_mac_address = ixgbe_set_mac,
10038 .ndo_change_mtu = ixgbe_change_mtu,
10039 .ndo_tx_timeout = ixgbe_tx_timeout,
10040 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
10041 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
10042 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
10043 .ndo_do_ioctl = ixgbe_ioctl,
10044 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
10045 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
10046 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
10047 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
10048 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
10049 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
10050 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10051 .ndo_get_stats64 = ixgbe_get_stats64,
10052 .ndo_setup_tc = __ixgbe_setup_tc,
10053#ifdef CONFIG_NET_POLL_CONTROLLER
10054 .ndo_poll_controller = ixgbe_netpoll,
10055#endif
10056#ifdef IXGBE_FCOE
10057 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
10058 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
10059 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
10060 .ndo_fcoe_enable = ixgbe_fcoe_enable,
10061 .ndo_fcoe_disable = ixgbe_fcoe_disable,
10062 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
10063 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
10064#endif
10065 .ndo_set_features = ixgbe_set_features,
10066 .ndo_fix_features = ixgbe_fix_features,
10067 .ndo_fdb_add = ixgbe_ndo_fdb_add,
10068 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
10069 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
10070 .ndo_dfwd_add_station = ixgbe_fwd_add,
10071 .ndo_dfwd_del_station = ixgbe_fwd_del,
10072 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
10073 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
10074 .ndo_features_check = ixgbe_features_check,
10075 .ndo_bpf = ixgbe_xdp,
10076 .ndo_xdp_xmit = ixgbe_xdp_xmit,
10077 .ndo_xdp_flush = ixgbe_xdp_flush,
10078};
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
10090{
10091 struct pci_dev *entry, *pdev = adapter->pdev;
10092 int physfns = 0;
10093
10094
10095
10096
10097
10098 if (ixgbe_pcie_from_parent(&adapter->hw))
10099 physfns = 4;
10100
10101 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
10102
10103 if (entry->is_virtfn)
10104 continue;
10105
10106
10107
10108
10109
10110
10111
10112 if ((entry->vendor != pdev->vendor) ||
10113 (entry->device != pdev->device))
10114 return -1;
10115
10116 physfns++;
10117 }
10118
10119 return physfns;
10120}
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
10133 u16 subdevice_id)
10134{
10135 struct ixgbe_hw *hw = &adapter->hw;
10136 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
10137
10138
10139 if (hw->mac.type == ixgbe_mac_82598EB)
10140 return false;
10141
10142
10143 if (hw->mac.type >= ixgbe_mac_X540) {
10144 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10145 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10146 (hw->bus.func == 0)))
10147 return true;
10148 }
10149
10150
10151 switch (device_id) {
10152 case IXGBE_DEV_ID_82599_SFP:
10153
10154 switch (subdevice_id) {
10155 case IXGBE_SUBDEV_ID_82599_560FLR:
10156 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10157 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10158 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10159
10160 if (hw->bus.func != 0)
10161 break;
10162
10163 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10164 case IXGBE_SUBDEV_ID_82599_SFP:
10165 case IXGBE_SUBDEV_ID_82599_RNDC:
10166 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10167 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10168 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10169 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10170 return true;
10171 }
10172 break;
10173 case IXGBE_DEV_ID_82599EN_SFP:
10174
10175 switch (subdevice_id) {
10176 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10177 return true;
10178 }
10179 break;
10180 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10181
10182 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10183 return true;
10184 break;
10185 case IXGBE_DEV_ID_82599_KX4:
10186 return true;
10187 default:
10188 break;
10189 }
10190
10191 return false;
10192}
10193
10194
10195
10196
10197
10198
10199
10200
10201static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
10202{
10203 struct ixgbe_hw *hw = &adapter->hw;
10204 struct ixgbe_nvm_version nvm_ver;
10205
10206 ixgbe_get_oem_prod_version(hw, &nvm_ver);
10207 if (nvm_ver.oem_valid) {
10208 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10209 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
10210 nvm_ver.oem_release);
10211 return;
10212 }
10213
10214 ixgbe_get_etk_id(hw, &nvm_ver);
10215 ixgbe_get_orom_version(hw, &nvm_ver);
10216
10217 if (nvm_ver.or_valid) {
10218 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10219 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
10220 nvm_ver.or_build, nvm_ver.or_patch);
10221 return;
10222 }
10223
10224
10225 snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
10226 "0x%08x", nvm_ver.etk_id);
10227}
10228
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10241{
10242 struct net_device *netdev;
10243 struct ixgbe_adapter *adapter = NULL;
10244 struct ixgbe_hw *hw;
10245 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10246 int i, err, pci_using_dac, expected_gts;
10247 unsigned int indices = MAX_TX_QUEUES;
10248 u8 part_str[IXGBE_PBANUM_LENGTH];
10249 bool disable_dev = false;
10250#ifdef IXGBE_FCOE
10251 u16 device_caps;
10252#endif
10253 u32 eec;
10254
10255
10256
10257
10258 if (pdev->is_virtfn) {
10259 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10260 pci_name(pdev), pdev->vendor, pdev->device);
10261 return -EINVAL;
10262 }
10263
10264 err = pci_enable_device_mem(pdev);
10265 if (err)
10266 return err;
10267
10268 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10269 pci_using_dac = 1;
10270 } else {
10271 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10272 if (err) {
10273 dev_err(&pdev->dev,
10274 "No usable DMA configuration, aborting\n");
10275 goto err_dma;
10276 }
10277 pci_using_dac = 0;
10278 }
10279
10280 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10281 if (err) {
10282 dev_err(&pdev->dev,
10283 "pci_request_selected_regions failed 0x%x\n", err);
10284 goto err_pci_reg;
10285 }
10286
10287 pci_enable_pcie_error_reporting(pdev);
10288
10289 pci_set_master(pdev);
10290 pci_save_state(pdev);
10291
10292 if (ii->mac == ixgbe_mac_82598EB) {
10293#ifdef CONFIG_IXGBE_DCB
10294
10295 indices = 4 * MAX_TRAFFIC_CLASS;
10296#else
10297 indices = IXGBE_MAX_RSS_INDICES;
10298#endif
10299 }
10300
10301 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10302 if (!netdev) {
10303 err = -ENOMEM;
10304 goto err_alloc_etherdev;
10305 }
10306
10307 SET_NETDEV_DEV(netdev, &pdev->dev);
10308
10309 adapter = netdev_priv(netdev);
10310
10311 adapter->netdev = netdev;
10312 adapter->pdev = pdev;
10313 hw = &adapter->hw;
10314 hw->back = adapter;
10315 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10316
10317 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10318 pci_resource_len(pdev, 0));
10319 adapter->io_addr = hw->hw_addr;
10320 if (!hw->hw_addr) {
10321 err = -EIO;
10322 goto err_ioremap;
10323 }
10324
10325 netdev->netdev_ops = &ixgbe_netdev_ops;
10326 ixgbe_set_ethtool_ops(netdev);
10327 netdev->watchdog_timeo = 5 * HZ;
10328 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10329
10330
10331 hw->mac.ops = *ii->mac_ops;
10332 hw->mac.type = ii->mac;
10333 hw->mvals = ii->mvals;
10334 if (ii->link_ops)
10335 hw->link.ops = *ii->link_ops;
10336
10337
10338 hw->eeprom.ops = *ii->eeprom_ops;
10339 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10340 if (ixgbe_removed(hw->hw_addr)) {
10341 err = -EIO;
10342 goto err_ioremap;
10343 }
10344
10345 if (!(eec & BIT(8)))
10346 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10347
10348
10349 hw->phy.ops = *ii->phy_ops;
10350 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10351
10352 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10353 hw->phy.mdio.mmds = 0;
10354 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10355 hw->phy.mdio.dev = netdev;
10356 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10357 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10358
10359
10360 err = ixgbe_sw_init(adapter, ii);
10361 if (err)
10362 goto err_sw_init;
10363
10364
10365 if (hw->mac.ops.init_swfw_sync)
10366 hw->mac.ops.init_swfw_sync(hw);
10367
10368
10369 switch (adapter->hw.mac.type) {
10370 case ixgbe_mac_82599EB:
10371 case ixgbe_mac_X540:
10372 case ixgbe_mac_X550:
10373 case ixgbe_mac_X550EM_x:
10374 case ixgbe_mac_x550em_a:
10375 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10376 break;
10377 default:
10378 break;
10379 }
10380
10381
10382
10383
10384
10385 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10386 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10387 if (esdp & IXGBE_ESDP_SDP1)
10388 e_crit(probe, "Fan has stopped, replace the adapter\n");
10389 }
10390
10391 if (allow_unsupported_sfp)
10392 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10393
10394
10395 hw->phy.reset_if_overtemp = true;
10396 err = hw->mac.ops.reset_hw(hw);
10397 hw->phy.reset_if_overtemp = false;
10398 ixgbe_set_eee_capable(adapter);
10399 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10400 err = 0;
10401 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10402 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10403 e_dev_err("Reload the driver after installing a supported module.\n");
10404 goto err_sw_init;
10405 } else if (err) {
10406 e_dev_err("HW Init failed: %d\n", err);
10407 goto err_sw_init;
10408 }
10409
10410#ifdef CONFIG_PCI_IOV
10411
10412 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10413 goto skip_sriov;
10414
10415 ixgbe_init_mbx_params_pf(hw);
10416 hw->mbx.ops = ii->mbx_ops;
10417 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10418 ixgbe_enable_sriov(adapter, max_vfs);
10419skip_sriov:
10420
10421#endif
10422 netdev->features = NETIF_F_SG |
10423 NETIF_F_TSO |
10424 NETIF_F_TSO6 |
10425 NETIF_F_RXHASH |
10426 NETIF_F_RXCSUM |
10427 NETIF_F_HW_CSUM;
10428
10429#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10430 NETIF_F_GSO_GRE_CSUM | \
10431 NETIF_F_GSO_IPXIP4 | \
10432 NETIF_F_GSO_IPXIP6 | \
10433 NETIF_F_GSO_UDP_TUNNEL | \
10434 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10435
10436 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10437 netdev->features |= NETIF_F_GSO_PARTIAL |
10438 IXGBE_GSO_PARTIAL_FEATURES;
10439
10440 if (hw->mac.type >= ixgbe_mac_82599EB)
10441 netdev->features |= NETIF_F_SCTP_CRC;
10442
10443
10444 netdev->hw_features |= netdev->features |
10445 NETIF_F_HW_VLAN_CTAG_FILTER |
10446 NETIF_F_HW_VLAN_CTAG_RX |
10447 NETIF_F_HW_VLAN_CTAG_TX |
10448 NETIF_F_RXALL |
10449 NETIF_F_HW_L2FW_DOFFLOAD;
10450
10451 if (hw->mac.type >= ixgbe_mac_82599EB)
10452 netdev->hw_features |= NETIF_F_NTUPLE |
10453 NETIF_F_HW_TC;
10454
10455 if (pci_using_dac)
10456 netdev->features |= NETIF_F_HIGHDMA;
10457
10458 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10459 netdev->hw_enc_features |= netdev->vlan_features;
10460 netdev->mpls_features |= NETIF_F_SG |
10461 NETIF_F_TSO |
10462 NETIF_F_TSO6 |
10463 NETIF_F_HW_CSUM;
10464 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10465
10466
10467 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10468 NETIF_F_HW_VLAN_CTAG_RX |
10469 NETIF_F_HW_VLAN_CTAG_TX;
10470
10471 netdev->priv_flags |= IFF_UNICAST_FLT;
10472 netdev->priv_flags |= IFF_SUPP_NOFCS;
10473
10474
10475 netdev->min_mtu = ETH_MIN_MTU;
10476 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10477
10478#ifdef CONFIG_IXGBE_DCB
10479 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10480 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10481#endif
10482
10483#ifdef IXGBE_FCOE
10484 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10485 unsigned int fcoe_l;
10486
10487 if (hw->mac.ops.get_device_caps) {
10488 hw->mac.ops.get_device_caps(hw, &device_caps);
10489 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10490 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10491 }
10492
10493
10494 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10495 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10496
10497 netdev->features |= NETIF_F_FSO |
10498 NETIF_F_FCOE_CRC;
10499
10500 netdev->vlan_features |= NETIF_F_FSO |
10501 NETIF_F_FCOE_CRC |
10502 NETIF_F_FCOE_MTU;
10503 }
10504#endif
10505 ixgbe_init_ipsec_offload(adapter);
10506
10507 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10508 netdev->hw_features |= NETIF_F_LRO;
10509 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10510 netdev->features |= NETIF_F_LRO;
10511
10512
10513 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10514 e_dev_err("The EEPROM Checksum Is Not Valid\n");
10515 err = -EIO;
10516 goto err_sw_init;
10517 }
10518
10519 eth_platform_get_mac_address(&adapter->pdev->dev,
10520 adapter->hw.mac.perm_addr);
10521
10522 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
10523
10524 if (!is_valid_ether_addr(netdev->dev_addr)) {
10525 e_dev_err("invalid MAC address\n");
10526 err = -EIO;
10527 goto err_sw_init;
10528 }
10529
10530
10531 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
10532 ixgbe_mac_set_default_filter(adapter);
10533
10534 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
10535
10536 if (ixgbe_removed(hw->hw_addr)) {
10537 err = -EIO;
10538 goto err_sw_init;
10539 }
10540 INIT_WORK(&adapter->service_task, ixgbe_service_task);
10541 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
10542 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
10543
10544 err = ixgbe_init_interrupt_scheme(adapter);
10545 if (err)
10546 goto err_sw_init;
10547
10548 for (i = 0; i < adapter->num_rx_queues; i++)
10549 u64_stats_init(&adapter->rx_ring[i]->syncp);
10550 for (i = 0; i < adapter->num_tx_queues; i++)
10551 u64_stats_init(&adapter->tx_ring[i]->syncp);
10552 for (i = 0; i < adapter->num_xdp_queues; i++)
10553 u64_stats_init(&adapter->xdp_ring[i]->syncp);
10554
10555
10556 adapter->wol = 0;
10557 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
10558 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
10559 pdev->subsystem_device);
10560 if (hw->wol_enabled)
10561 adapter->wol = IXGBE_WUFC_MAG;
10562
10563 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
10564
10565
10566 ixgbe_set_fw_version(adapter);
10567
10568
10569 if (ixgbe_pcie_from_parent(hw))
10570 ixgbe_get_parent_bus_info(adapter);
10571 else
10572 hw->mac.ops.get_bus_info(hw);
10573
10574
10575
10576
10577
10578
10579 switch (hw->mac.type) {
10580 case ixgbe_mac_82598EB:
10581 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
10582 break;
10583 default:
10584 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
10585 break;
10586 }
10587
10588
10589 if (expected_gts > 0)
10590 ixgbe_check_minimum_link(adapter, expected_gts);
10591
10592 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
10593 if (err)
10594 strlcpy(part_str, "Unknown", sizeof(part_str));
10595 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
10596 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
10597 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
10598 part_str);
10599 else
10600 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
10601 hw->mac.type, hw->phy.type, part_str);
10602
10603 e_dev_info("%pM\n", netdev->dev_addr);
10604
10605
10606 err = hw->mac.ops.start_hw(hw);
10607 if (err == IXGBE_ERR_EEPROM_VERSION) {
10608
10609 e_dev_warn("This device is a pre-production adapter/LOM. "
10610 "Please be aware there may be issues associated "
10611 "with your hardware. If you are experiencing "
10612 "problems please contact your Intel or hardware "
10613 "representative who provided you with this "
10614 "hardware.\n");
10615 }
10616 strcpy(netdev->name, "eth%d");
10617 pci_set_drvdata(pdev, adapter);
10618 err = register_netdev(netdev);
10619 if (err)
10620 goto err_register;
10621
10622
10623
10624 if (hw->mac.ops.disable_tx_laser)
10625 hw->mac.ops.disable_tx_laser(hw);
10626
10627
10628 netif_carrier_off(netdev);
10629
10630#ifdef CONFIG_IXGBE_DCA
10631 if (dca_add_requester(&pdev->dev) == 0) {
10632 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
10633 ixgbe_setup_dca(adapter);
10634 }
10635#endif
10636 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
10637 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
10638 for (i = 0; i < adapter->num_vfs; i++)
10639 ixgbe_vf_configuration(pdev, (i | 0x10000000));
10640 }
10641
10642
10643
10644
10645 if (hw->mac.ops.set_fw_drv_ver)
10646 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
10647 sizeof(ixgbe_driver_version) - 1,
10648 ixgbe_driver_version);
10649
10650
10651 ixgbe_add_sanmac_netdev(netdev);
10652
10653 e_dev_info("%s\n", ixgbe_default_device_descr);
10654
10655#ifdef CONFIG_IXGBE_HWMON
10656 if (ixgbe_sysfs_init(adapter))
10657 e_err(probe, "failed to allocate sysfs resources\n");
10658#endif
10659
10660 ixgbe_dbg_adapter_init(adapter);
10661
10662
10663 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
10664 hw->mac.ops.setup_link(hw,
10665 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
10666 true);
10667
10668 return 0;
10669
10670err_register:
10671 ixgbe_release_hw_control(adapter);
10672 ixgbe_clear_interrupt_scheme(adapter);
10673err_sw_init:
10674 ixgbe_disable_sriov(adapter);
10675 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
10676 iounmap(adapter->io_addr);
10677 kfree(adapter->jump_tables[0]);
10678 kfree(adapter->mac_table);
10679 kfree(adapter->rss_key);
10680err_ioremap:
10681 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10682 free_netdev(netdev);
10683err_alloc_etherdev:
10684 pci_release_mem_regions(pdev);
10685err_pci_reg:
10686err_dma:
10687 if (!adapter || disable_dev)
10688 pci_disable_device(pdev);
10689 return err;
10690}
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701static void ixgbe_remove(struct pci_dev *pdev)
10702{
10703 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10704 struct net_device *netdev;
10705 bool disable_dev;
10706 int i;
10707
10708
10709 if (!adapter)
10710 return;
10711
10712 netdev = adapter->netdev;
10713 ixgbe_dbg_adapter_exit(adapter);
10714
10715 set_bit(__IXGBE_REMOVING, &adapter->state);
10716 cancel_work_sync(&adapter->service_task);
10717
10718
10719#ifdef CONFIG_IXGBE_DCA
10720 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
10721 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
10722 dca_remove_requester(&pdev->dev);
10723 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
10724 IXGBE_DCA_CTRL_DCA_DISABLE);
10725 }
10726
10727#endif
10728#ifdef CONFIG_IXGBE_HWMON
10729 ixgbe_sysfs_exit(adapter);
10730#endif
10731
10732
10733 ixgbe_del_sanmac_netdev(netdev);
10734
10735#ifdef CONFIG_PCI_IOV
10736 ixgbe_disable_sriov(adapter);
10737#endif
10738 if (netdev->reg_state == NETREG_REGISTERED)
10739 unregister_netdev(netdev);
10740
10741 ixgbe_stop_ipsec_offload(adapter);
10742 ixgbe_clear_interrupt_scheme(adapter);
10743
10744 ixgbe_release_hw_control(adapter);
10745
10746#ifdef CONFIG_DCB
10747 kfree(adapter->ixgbe_ieee_pfc);
10748 kfree(adapter->ixgbe_ieee_ets);
10749
10750#endif
10751 iounmap(adapter->io_addr);
10752 pci_release_mem_regions(pdev);
10753
10754 e_dev_info("complete\n");
10755
10756 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
10757 if (adapter->jump_tables[i]) {
10758 kfree(adapter->jump_tables[i]->input);
10759 kfree(adapter->jump_tables[i]->mask);
10760 }
10761 kfree(adapter->jump_tables[i]);
10762 }
10763
10764 kfree(adapter->mac_table);
10765 kfree(adapter->rss_key);
10766 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10767 free_netdev(netdev);
10768
10769 pci_disable_pcie_error_reporting(pdev);
10770
10771 if (disable_dev)
10772 pci_disable_device(pdev);
10773}
10774
10775
10776
10777
10778
10779
10780
10781
10782
10783static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
10784 pci_channel_state_t state)
10785{
10786 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10787 struct net_device *netdev = adapter->netdev;
10788
10789#ifdef CONFIG_PCI_IOV
10790 struct ixgbe_hw *hw = &adapter->hw;
10791 struct pci_dev *bdev, *vfdev;
10792 u32 dw0, dw1, dw2, dw3;
10793 int vf, pos;
10794 u16 req_id, pf_func;
10795
10796 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
10797 adapter->num_vfs == 0)
10798 goto skip_bad_vf_detection;
10799
10800 bdev = pdev->bus->self;
10801 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
10802 bdev = bdev->bus->self;
10803
10804 if (!bdev)
10805 goto skip_bad_vf_detection;
10806
10807 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
10808 if (!pos)
10809 goto skip_bad_vf_detection;
10810
10811 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
10812 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
10813 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
10814 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
10815 if (ixgbe_removed(hw->hw_addr))
10816 goto skip_bad_vf_detection;
10817
10818 req_id = dw1 >> 16;
10819
10820 if (!(req_id & 0x0080))
10821 goto skip_bad_vf_detection;
10822
10823 pf_func = req_id & 0x01;
10824 if ((pf_func & 1) == (pdev->devfn & 1)) {
10825 unsigned int device_id;
10826
10827 vf = (req_id & 0x7F) >> 1;
10828 e_dev_err("VF %d has caused a PCIe error\n", vf);
10829 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
10830 "%8.8x\tdw3: %8.8x\n",
10831 dw0, dw1, dw2, dw3);
10832 switch (adapter->hw.mac.type) {
10833 case ixgbe_mac_82599EB:
10834 device_id = IXGBE_82599_VF_DEVICE_ID;
10835 break;
10836 case ixgbe_mac_X540:
10837 device_id = IXGBE_X540_VF_DEVICE_ID;
10838 break;
10839 case ixgbe_mac_X550:
10840 device_id = IXGBE_DEV_ID_X550_VF;
10841 break;
10842 case ixgbe_mac_X550EM_x:
10843 device_id = IXGBE_DEV_ID_X550EM_X_VF;
10844 break;
10845 case ixgbe_mac_x550em_a:
10846 device_id = IXGBE_DEV_ID_X550EM_A_VF;
10847 break;
10848 default:
10849 device_id = 0;
10850 break;
10851 }
10852
10853
10854 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
10855 while (vfdev) {
10856 if (vfdev->devfn == (req_id & 0xFF))
10857 break;
10858 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
10859 device_id, vfdev);
10860 }
10861
10862
10863
10864
10865
10866 if (vfdev) {
10867 pcie_flr(vfdev);
10868
10869 pci_dev_put(vfdev);
10870 }
10871
10872 pci_cleanup_aer_uncorrect_error_status(pdev);
10873 }
10874
10875
10876
10877
10878
10879
10880
10881 adapter->vferr_refcount++;
10882
10883 return PCI_ERS_RESULT_RECOVERED;
10884
10885skip_bad_vf_detection:
10886#endif
10887 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
10888 return PCI_ERS_RESULT_DISCONNECT;
10889
10890 if (!netif_device_present(netdev))
10891 return PCI_ERS_RESULT_DISCONNECT;
10892
10893 rtnl_lock();
10894 netif_device_detach(netdev);
10895
10896 if (state == pci_channel_io_perm_failure) {
10897 rtnl_unlock();
10898 return PCI_ERS_RESULT_DISCONNECT;
10899 }
10900
10901 if (netif_running(netdev))
10902 ixgbe_close_suspend(adapter);
10903
10904 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
10905 pci_disable_device(pdev);
10906 rtnl_unlock();
10907
10908
10909 return PCI_ERS_RESULT_NEED_RESET;
10910}
10911
10912
10913
10914
10915
10916
10917
10918static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
10919{
10920 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10921 pci_ers_result_t result;
10922 int err;
10923
10924 if (pci_enable_device_mem(pdev)) {
10925 e_err(probe, "Cannot re-enable PCI device after reset.\n");
10926 result = PCI_ERS_RESULT_DISCONNECT;
10927 } else {
10928 smp_mb__before_atomic();
10929 clear_bit(__IXGBE_DISABLED, &adapter->state);
10930 adapter->hw.hw_addr = adapter->io_addr;
10931 pci_set_master(pdev);
10932 pci_restore_state(pdev);
10933 pci_save_state(pdev);
10934
10935 pci_wake_from_d3(pdev, false);
10936
10937 ixgbe_reset(adapter);
10938 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10939 result = PCI_ERS_RESULT_RECOVERED;
10940 }
10941
10942 err = pci_cleanup_aer_uncorrect_error_status(pdev);
10943 if (err) {
10944 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
10945 "failed 0x%0x\n", err);
10946
10947 }
10948
10949 return result;
10950}
10951
10952
10953
10954
10955
10956
10957
10958
10959static void ixgbe_io_resume(struct pci_dev *pdev)
10960{
10961 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10962 struct net_device *netdev = adapter->netdev;
10963
10964#ifdef CONFIG_PCI_IOV
10965 if (adapter->vferr_refcount) {
10966 e_info(drv, "Resuming after VF err\n");
10967 adapter->vferr_refcount--;
10968 return;
10969 }
10970
10971#endif
10972 rtnl_lock();
10973 if (netif_running(netdev))
10974 ixgbe_open(netdev);
10975
10976 netif_device_attach(netdev);
10977 rtnl_unlock();
10978}
10979
10980static const struct pci_error_handlers ixgbe_err_handler = {
10981 .error_detected = ixgbe_io_error_detected,
10982 .slot_reset = ixgbe_io_slot_reset,
10983 .resume = ixgbe_io_resume,
10984};
10985
10986static struct pci_driver ixgbe_driver = {
10987 .name = ixgbe_driver_name,
10988 .id_table = ixgbe_pci_tbl,
10989 .probe = ixgbe_probe,
10990 .remove = ixgbe_remove,
10991#ifdef CONFIG_PM
10992 .suspend = ixgbe_suspend,
10993 .resume = ixgbe_resume,
10994#endif
10995 .shutdown = ixgbe_shutdown,
10996 .sriov_configure = ixgbe_pci_sriov_configure,
10997 .err_handler = &ixgbe_err_handler
10998};
10999
11000
11001
11002
11003
11004
11005
11006static int __init ixgbe_init_module(void)
11007{
11008 int ret;
11009 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
11010 pr_info("%s\n", ixgbe_copyright);
11011
11012 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
11013 if (!ixgbe_wq) {
11014 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
11015 return -ENOMEM;
11016 }
11017
11018 ixgbe_dbg_init();
11019
11020 ret = pci_register_driver(&ixgbe_driver);
11021 if (ret) {
11022 destroy_workqueue(ixgbe_wq);
11023 ixgbe_dbg_exit();
11024 return ret;
11025 }
11026
11027#ifdef CONFIG_IXGBE_DCA
11028 dca_register_notify(&dca_notifier);
11029#endif
11030
11031 return 0;
11032}
11033
11034module_init(ixgbe_init_module);
11035
11036
11037
11038
11039
11040
11041
11042static void __exit ixgbe_exit_module(void)
11043{
11044#ifdef CONFIG_IXGBE_DCA
11045 dca_unregister_notify(&dca_notifier);
11046#endif
11047 pci_unregister_driver(&ixgbe_driver);
11048
11049 ixgbe_dbg_exit();
11050 if (ixgbe_wq) {
11051 destroy_workqueue(ixgbe_wq);
11052 ixgbe_wq = NULL;
11053 }
11054}
11055
11056#ifdef CONFIG_IXGBE_DCA
11057static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
11058 void *p)
11059{
11060 int ret_val;
11061
11062 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
11063 __ixgbe_notify_dca);
11064
11065 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
11066}
11067
11068#endif
11069
11070module_exit(ixgbe_exit_module);
11071
11072
11073