1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/interrupt.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/sctp.h>
40#include <linux/pkt_sched.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
46#include <linux/ethtool.h>
47#include <linux/if.h>
48#include <linux/if_vlan.h>
49#include <linux/if_macvlan.h>
50#include <linux/if_bridge.h>
51#include <linux/prefetch.h>
52#include <linux/bpf.h>
53#include <linux/bpf_trace.h>
54#include <linux/atomic.h>
55#include <scsi/fc/fc_fcoe.h>
56#include <net/udp_tunnel.h>
57#include <net/pkt_cls.h>
58#include <net/tc_act/tc_gact.h>
59#include <net/tc_act/tc_mirred.h>
60#include <net/vxlan.h>
61#include <net/mpls.h>
62
63#include "ixgbe.h"
64#include "ixgbe_common.h"
65#include "ixgbe_dcb_82599.h"
66#include "ixgbe_sriov.h"
67#include "ixgbe_model.h"
68
69char ixgbe_driver_name[] = "ixgbe";
70static const char ixgbe_driver_string[] =
71 "Intel(R) 10 Gigabit PCI Express Network Driver";
72#ifdef IXGBE_FCOE
73char ixgbe_default_device_descr[] =
74 "Intel(R) 10 Gigabit Network Connection";
75#else
76static char ixgbe_default_device_descr[] =
77 "Intel(R) 10 Gigabit Network Connection";
78#endif
79#define DRV_VERSION "5.1.0-k"
80const char ixgbe_driver_version[] = DRV_VERSION;
81static const char ixgbe_copyright[] =
82 "Copyright (c) 1999-2016 Intel Corporation.";
83
84static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
85
86static const struct ixgbe_info *ixgbe_info_tbl[] = {
87 [board_82598] = &ixgbe_82598_info,
88 [board_82599] = &ixgbe_82599_info,
89 [board_X540] = &ixgbe_X540_info,
90 [board_X550] = &ixgbe_X550_info,
91 [board_X550EM_x] = &ixgbe_X550EM_x_info,
92 [board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
93 [board_x550em_a] = &ixgbe_x550em_a_info,
94 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
95};
96
97
98
99
100
101
102
103
104
105static const struct pci_device_id ixgbe_pci_tbl[] = {
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), board_X550EM_x},
140 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
141 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
142 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
143 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), board_x550em_x_fw},
144 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
145 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
146 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
147 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
148 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
149 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
150 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
151 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
152 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
153
154 {0, }
155};
156MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
157
158#ifdef CONFIG_IXGBE_DCA
159static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
160 void *p);
161static struct notifier_block dca_notifier = {
162 .notifier_call = ixgbe_notify_dca,
163 .next = NULL,
164 .priority = 0
165};
166#endif
167
168#ifdef CONFIG_PCI_IOV
169static unsigned int max_vfs;
170module_param(max_vfs, uint, 0);
171MODULE_PARM_DESC(max_vfs,
172 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
173#endif
174
175static unsigned int allow_unsupported_sfp;
176module_param(allow_unsupported_sfp, uint, 0);
177MODULE_PARM_DESC(allow_unsupported_sfp,
178 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
179
180#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
181static int debug = -1;
182module_param(debug, int, 0);
183MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
184
185MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
186MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
187MODULE_LICENSE("GPL");
188MODULE_VERSION(DRV_VERSION);
189
190static struct workqueue_struct *ixgbe_wq;
191
192static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
193static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
194
195static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
196 u32 reg, u16 *value)
197{
198 struct pci_dev *parent_dev;
199 struct pci_bus *parent_bus;
200
201 parent_bus = adapter->pdev->bus->parent;
202 if (!parent_bus)
203 return -1;
204
205 parent_dev = parent_bus->self;
206 if (!parent_dev)
207 return -1;
208
209 if (!pci_is_pcie(parent_dev))
210 return -1;
211
212 pcie_capability_read_word(parent_dev, reg, value);
213 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
214 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
215 return -1;
216 return 0;
217}
218
219static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
220{
221 struct ixgbe_hw *hw = &adapter->hw;
222 u16 link_status = 0;
223 int err;
224
225 hw->bus.type = ixgbe_bus_type_pci_express;
226
227
228
229
230 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
231
232
233 if (err)
234 return err;
235
236 hw->bus.width = ixgbe_convert_bus_width(link_status);
237 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
238
239 return 0;
240}
241
242
243
244
245
246
247
248
249
250
251static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
252{
253 switch (hw->device_id) {
254 case IXGBE_DEV_ID_82599_SFP_SF_QP:
255 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
256 return true;
257 default:
258 return false;
259 }
260}
261
262static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
263 int expected_gts)
264{
265 struct ixgbe_hw *hw = &adapter->hw;
266 int max_gts = 0;
267 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
268 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
269 struct pci_dev *pdev;
270
271
272
273
274
275 if (hw->bus.type == ixgbe_bus_type_internal)
276 return;
277
278
279 if (ixgbe_pcie_from_parent(&adapter->hw))
280 pdev = adapter->pdev->bus->parent->self;
281 else
282 pdev = adapter->pdev;
283
284 if (pcie_get_minimum_link(pdev, &speed, &width) ||
285 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
286 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
287 return;
288 }
289
290 switch (speed) {
291 case PCIE_SPEED_2_5GT:
292
293 max_gts = 2 * width;
294 break;
295 case PCIE_SPEED_5_0GT:
296
297 max_gts = 4 * width;
298 break;
299 case PCIE_SPEED_8_0GT:
300
301 max_gts = 8 * width;
302 break;
303 default:
304 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
305 return;
306 }
307
308 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
309 max_gts);
310 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
311 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
312 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
313 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
314 "Unknown"),
315 width,
316 (speed == PCIE_SPEED_2_5GT ? "20%" :
317 speed == PCIE_SPEED_5_0GT ? "20%" :
318 speed == PCIE_SPEED_8_0GT ? "<2%" :
319 "Unknown"));
320
321 if (max_gts < expected_gts) {
322 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
323 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
324 expected_gts);
325 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
326 }
327}
328
329static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
330{
331 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
332 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
333 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
334 queue_work(ixgbe_wq, &adapter->service_task);
335}
336
337static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
338{
339 struct ixgbe_adapter *adapter = hw->back;
340
341 if (!hw->hw_addr)
342 return;
343 hw->hw_addr = NULL;
344 e_dev_err("Adapter removed\n");
345 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
346 ixgbe_service_event_schedule(adapter);
347}
348
349static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
350{
351 u32 value;
352
353
354
355
356
357
358
359 if (reg == IXGBE_STATUS) {
360 ixgbe_remove_adapter(hw);
361 return;
362 }
363 value = ixgbe_read_reg(hw, IXGBE_STATUS);
364 if (value == IXGBE_FAILED_READ_REG)
365 ixgbe_remove_adapter(hw);
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
382{
383 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
384 u32 value;
385
386 if (ixgbe_removed(reg_addr))
387 return IXGBE_FAILED_READ_REG;
388 if (unlikely(hw->phy.nw_mng_if_sel &
389 IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) {
390 struct ixgbe_adapter *adapter;
391 int i;
392
393 for (i = 0; i < 200; ++i) {
394 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
395 if (likely(!value))
396 goto writes_completed;
397 if (value == IXGBE_FAILED_READ_REG) {
398 ixgbe_remove_adapter(hw);
399 return IXGBE_FAILED_READ_REG;
400 }
401 udelay(5);
402 }
403
404 adapter = hw->back;
405 e_warn(hw, "register writes incomplete %08x\n", value);
406 }
407
408writes_completed:
409 value = readl(reg_addr + reg);
410 if (unlikely(value == IXGBE_FAILED_READ_REG))
411 ixgbe_check_remove(hw, reg);
412 return value;
413}
414
415static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
416{
417 u16 value;
418
419 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
420 if (value == IXGBE_FAILED_READ_CFG_WORD) {
421 ixgbe_remove_adapter(hw);
422 return true;
423 }
424 return false;
425}
426
427u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
428{
429 struct ixgbe_adapter *adapter = hw->back;
430 u16 value;
431
432 if (ixgbe_removed(hw->hw_addr))
433 return IXGBE_FAILED_READ_CFG_WORD;
434 pci_read_config_word(adapter->pdev, reg, &value);
435 if (value == IXGBE_FAILED_READ_CFG_WORD &&
436 ixgbe_check_cfg_remove(hw, adapter->pdev))
437 return IXGBE_FAILED_READ_CFG_WORD;
438 return value;
439}
440
441#ifdef CONFIG_PCI_IOV
442static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
443{
444 struct ixgbe_adapter *adapter = hw->back;
445 u32 value;
446
447 if (ixgbe_removed(hw->hw_addr))
448 return IXGBE_FAILED_READ_CFG_DWORD;
449 pci_read_config_dword(adapter->pdev, reg, &value);
450 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
451 ixgbe_check_cfg_remove(hw, adapter->pdev))
452 return IXGBE_FAILED_READ_CFG_DWORD;
453 return value;
454}
455#endif
456
457void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
458{
459 struct ixgbe_adapter *adapter = hw->back;
460
461 if (ixgbe_removed(hw->hw_addr))
462 return;
463 pci_write_config_word(adapter->pdev, reg, value);
464}
465
466static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
467{
468 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
469
470
471 smp_mb__before_atomic();
472 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
473}
474
475struct ixgbe_reg_info {
476 u32 ofs;
477 char *name;
478};
479
480static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
481
482
483 {IXGBE_CTRL, "CTRL"},
484 {IXGBE_STATUS, "STATUS"},
485 {IXGBE_CTRL_EXT, "CTRL_EXT"},
486
487
488 {IXGBE_EICR, "EICR"},
489
490
491 {IXGBE_SRRCTL(0), "SRRCTL"},
492 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
493 {IXGBE_RDLEN(0), "RDLEN"},
494 {IXGBE_RDH(0), "RDH"},
495 {IXGBE_RDT(0), "RDT"},
496 {IXGBE_RXDCTL(0), "RXDCTL"},
497 {IXGBE_RDBAL(0), "RDBAL"},
498 {IXGBE_RDBAH(0), "RDBAH"},
499
500
501 {IXGBE_TDBAL(0), "TDBAL"},
502 {IXGBE_TDBAH(0), "TDBAH"},
503 {IXGBE_TDLEN(0), "TDLEN"},
504 {IXGBE_TDH(0), "TDH"},
505 {IXGBE_TDT(0), "TDT"},
506 {IXGBE_TXDCTL(0), "TXDCTL"},
507
508
509 { .name = NULL }
510};
511
512
513
514
515
516static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
517{
518 int i;
519 char rname[16];
520 u32 regs[64];
521
522 switch (reginfo->ofs) {
523 case IXGBE_SRRCTL(0):
524 for (i = 0; i < 64; i++)
525 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
526 break;
527 case IXGBE_DCA_RXCTRL(0):
528 for (i = 0; i < 64; i++)
529 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
530 break;
531 case IXGBE_RDLEN(0):
532 for (i = 0; i < 64; i++)
533 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
534 break;
535 case IXGBE_RDH(0):
536 for (i = 0; i < 64; i++)
537 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
538 break;
539 case IXGBE_RDT(0):
540 for (i = 0; i < 64; i++)
541 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
542 break;
543 case IXGBE_RXDCTL(0):
544 for (i = 0; i < 64; i++)
545 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
546 break;
547 case IXGBE_RDBAL(0):
548 for (i = 0; i < 64; i++)
549 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
550 break;
551 case IXGBE_RDBAH(0):
552 for (i = 0; i < 64; i++)
553 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
554 break;
555 case IXGBE_TDBAL(0):
556 for (i = 0; i < 64; i++)
557 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
558 break;
559 case IXGBE_TDBAH(0):
560 for (i = 0; i < 64; i++)
561 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
562 break;
563 case IXGBE_TDLEN(0):
564 for (i = 0; i < 64; i++)
565 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
566 break;
567 case IXGBE_TDH(0):
568 for (i = 0; i < 64; i++)
569 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
570 break;
571 case IXGBE_TDT(0):
572 for (i = 0; i < 64; i++)
573 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
574 break;
575 case IXGBE_TXDCTL(0):
576 for (i = 0; i < 64; i++)
577 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
578 break;
579 default:
580 pr_info("%-15s %08x\n",
581 reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs));
582 return;
583 }
584
585 i = 0;
586 while (i < 64) {
587 int j;
588 char buf[9 * 8 + 1];
589 char *p = buf;
590
591 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i, i + 7);
592 for (j = 0; j < 8; j++)
593 p += sprintf(p, " %08x", regs[i++]);
594 pr_err("%-15s%s\n", rname, buf);
595 }
596
597}
598
599static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
600{
601 struct ixgbe_tx_buffer *tx_buffer;
602
603 tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
604 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
605 n, ring->next_to_use, ring->next_to_clean,
606 (u64)dma_unmap_addr(tx_buffer, dma),
607 dma_unmap_len(tx_buffer, len),
608 tx_buffer->next_to_watch,
609 (u64)tx_buffer->time_stamp);
610}
611
612
613
614
615static void ixgbe_dump(struct ixgbe_adapter *adapter)
616{
617 struct net_device *netdev = adapter->netdev;
618 struct ixgbe_hw *hw = &adapter->hw;
619 struct ixgbe_reg_info *reginfo;
620 int n = 0;
621 struct ixgbe_ring *ring;
622 struct ixgbe_tx_buffer *tx_buffer;
623 union ixgbe_adv_tx_desc *tx_desc;
624 struct my_u0 { u64 a; u64 b; } *u0;
625 struct ixgbe_ring *rx_ring;
626 union ixgbe_adv_rx_desc *rx_desc;
627 struct ixgbe_rx_buffer *rx_buffer_info;
628 int i = 0;
629
630 if (!netif_msg_hw(adapter))
631 return;
632
633
634 if (netdev) {
635 dev_info(&adapter->pdev->dev, "Net device Info\n");
636 pr_info("Device Name state "
637 "trans_start\n");
638 pr_info("%-15s %016lX %016lX\n",
639 netdev->name,
640 netdev->state,
641 dev_trans_start(netdev));
642 }
643
644
645 dev_info(&adapter->pdev->dev, "Register Dump\n");
646 pr_info(" Register Name Value\n");
647 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
648 reginfo->name; reginfo++) {
649 ixgbe_regdump(hw, reginfo);
650 }
651
652
653 if (!netdev || !netif_running(netdev))
654 return;
655
656 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
657 pr_info(" %s %s %s %s\n",
658 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
659 "leng", "ntw", "timestamp");
660 for (n = 0; n < adapter->num_tx_queues; n++) {
661 ring = adapter->tx_ring[n];
662 ixgbe_print_buffer(ring, n);
663 }
664
665 for (n = 0; n < adapter->num_xdp_queues; n++) {
666 ring = adapter->xdp_ring[n];
667 ixgbe_print_buffer(ring, n);
668 }
669
670
671 if (!netif_msg_tx_done(adapter))
672 goto rx_ring_summary;
673
674 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711 for (n = 0; n < adapter->num_tx_queues; n++) {
712 ring = adapter->tx_ring[n];
713 pr_info("------------------------------------\n");
714 pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
715 pr_info("------------------------------------\n");
716 pr_info("%s%s %s %s %s %s\n",
717 "T [desc] [address 63:0 ] ",
718 "[PlPOIdStDDt Ln] [bi->dma ] ",
719 "leng", "ntw", "timestamp", "bi->skb");
720
721 for (i = 0; ring->desc && (i < ring->count); i++) {
722 tx_desc = IXGBE_TX_DESC(ring, i);
723 tx_buffer = &ring->tx_buffer_info[i];
724 u0 = (struct my_u0 *)tx_desc;
725 if (dma_unmap_len(tx_buffer, len) > 0) {
726 const char *ring_desc;
727
728 if (i == ring->next_to_use &&
729 i == ring->next_to_clean)
730 ring_desc = " NTC/U";
731 else if (i == ring->next_to_use)
732 ring_desc = " NTU";
733 else if (i == ring->next_to_clean)
734 ring_desc = " NTC";
735 else
736 ring_desc = "";
737 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
738 i,
739 le64_to_cpu(u0->a),
740 le64_to_cpu(u0->b),
741 (u64)dma_unmap_addr(tx_buffer, dma),
742 dma_unmap_len(tx_buffer, len),
743 tx_buffer->next_to_watch,
744 (u64)tx_buffer->time_stamp,
745 tx_buffer->skb,
746 ring_desc);
747
748 if (netif_msg_pktdata(adapter) &&
749 tx_buffer->skb)
750 print_hex_dump(KERN_INFO, "",
751 DUMP_PREFIX_ADDRESS, 16, 1,
752 tx_buffer->skb->data,
753 dma_unmap_len(tx_buffer, len),
754 true);
755 }
756 }
757 }
758
759
760rx_ring_summary:
761 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
762 pr_info("Queue [NTU] [NTC]\n");
763 for (n = 0; n < adapter->num_rx_queues; n++) {
764 rx_ring = adapter->rx_ring[n];
765 pr_info("%5d %5X %5X\n",
766 n, rx_ring->next_to_use, rx_ring->next_to_clean);
767 }
768
769
770 if (!netif_msg_rx_status(adapter))
771 return;
772
773 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820 for (n = 0; n < adapter->num_rx_queues; n++) {
821 rx_ring = adapter->rx_ring[n];
822 pr_info("------------------------------------\n");
823 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
824 pr_info("------------------------------------\n");
825 pr_info("%s%s%s\n",
826 "R [desc] [ PktBuf A0] ",
827 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
828 "<-- Adv Rx Read format");
829 pr_info("%s%s%s\n",
830 "RWB[desc] [PcsmIpSHl PtRs] ",
831 "[vl er S cks ln] ---------------- [bi->skb ] ",
832 "<-- Adv Rx Write-Back format");
833
834 for (i = 0; i < rx_ring->count; i++) {
835 const char *ring_desc;
836
837 if (i == rx_ring->next_to_use)
838 ring_desc = " NTU";
839 else if (i == rx_ring->next_to_clean)
840 ring_desc = " NTC";
841 else
842 ring_desc = "";
843
844 rx_buffer_info = &rx_ring->rx_buffer_info[i];
845 rx_desc = IXGBE_RX_DESC(rx_ring, i);
846 u0 = (struct my_u0 *)rx_desc;
847 if (rx_desc->wb.upper.length) {
848
849 pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
850 i,
851 le64_to_cpu(u0->a),
852 le64_to_cpu(u0->b),
853 rx_buffer_info->skb,
854 ring_desc);
855 } else {
856 pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
857 i,
858 le64_to_cpu(u0->a),
859 le64_to_cpu(u0->b),
860 (u64)rx_buffer_info->dma,
861 rx_buffer_info->skb,
862 ring_desc);
863
864 if (netif_msg_pktdata(adapter) &&
865 rx_buffer_info->dma) {
866 print_hex_dump(KERN_INFO, "",
867 DUMP_PREFIX_ADDRESS, 16, 1,
868 page_address(rx_buffer_info->page) +
869 rx_buffer_info->page_offset,
870 ixgbe_rx_bufsz(rx_ring), true);
871 }
872 }
873 }
874 }
875}
876
877static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
878{
879 u32 ctrl_ext;
880
881
882 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
883 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
884 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
885}
886
887static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
888{
889 u32 ctrl_ext;
890
891
892 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
893 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
894 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
895}
896
897
898
899
900
901
902
903
904
905static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
906 u8 queue, u8 msix_vector)
907{
908 u32 ivar, index;
909 struct ixgbe_hw *hw = &adapter->hw;
910 switch (hw->mac.type) {
911 case ixgbe_mac_82598EB:
912 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
913 if (direction == -1)
914 direction = 0;
915 index = (((direction * 64) + queue) >> 2) & 0x1F;
916 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
917 ivar &= ~(0xFF << (8 * (queue & 0x3)));
918 ivar |= (msix_vector << (8 * (queue & 0x3)));
919 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
920 break;
921 case ixgbe_mac_82599EB:
922 case ixgbe_mac_X540:
923 case ixgbe_mac_X550:
924 case ixgbe_mac_X550EM_x:
925 case ixgbe_mac_x550em_a:
926 if (direction == -1) {
927
928 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
929 index = ((queue & 1) * 8);
930 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
931 ivar &= ~(0xFF << index);
932 ivar |= (msix_vector << index);
933 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
934 break;
935 } else {
936
937 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
938 index = ((16 * (queue & 1)) + (8 * direction));
939 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
940 ivar &= ~(0xFF << index);
941 ivar |= (msix_vector << index);
942 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
943 break;
944 }
945 default:
946 break;
947 }
948}
949
950static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
951 u64 qmask)
952{
953 u32 mask;
954
955 switch (adapter->hw.mac.type) {
956 case ixgbe_mac_82598EB:
957 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
958 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
959 break;
960 case ixgbe_mac_82599EB:
961 case ixgbe_mac_X540:
962 case ixgbe_mac_X550:
963 case ixgbe_mac_X550EM_x:
964 case ixgbe_mac_x550em_a:
965 mask = (qmask & 0xFFFFFFFF);
966 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
967 mask = (qmask >> 32);
968 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
969 break;
970 default:
971 break;
972 }
973}
974
975static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
976{
977 struct ixgbe_hw *hw = &adapter->hw;
978 struct ixgbe_hw_stats *hwstats = &adapter->stats;
979 int i;
980 u32 data;
981
982 if ((hw->fc.current_mode != ixgbe_fc_full) &&
983 (hw->fc.current_mode != ixgbe_fc_rx_pause))
984 return;
985
986 switch (hw->mac.type) {
987 case ixgbe_mac_82598EB:
988 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
989 break;
990 default:
991 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
992 }
993 hwstats->lxoffrxc += data;
994
995
996 if (!data)
997 return;
998
999 for (i = 0; i < adapter->num_tx_queues; i++)
1000 clear_bit(__IXGBE_HANG_CHECK_ARMED,
1001 &adapter->tx_ring[i]->state);
1002
1003 for (i = 0; i < adapter->num_xdp_queues; i++)
1004 clear_bit(__IXGBE_HANG_CHECK_ARMED,
1005 &adapter->xdp_ring[i]->state);
1006}
1007
1008static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
1009{
1010 struct ixgbe_hw *hw = &adapter->hw;
1011 struct ixgbe_hw_stats *hwstats = &adapter->stats;
1012 u32 xoff[8] = {0};
1013 u8 tc;
1014 int i;
1015 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
1016
1017 if (adapter->ixgbe_ieee_pfc)
1018 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
1019
1020 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
1021 ixgbe_update_xoff_rx_lfc(adapter);
1022 return;
1023 }
1024
1025
1026 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1027 u32 pxoffrxc;
1028
1029 switch (hw->mac.type) {
1030 case ixgbe_mac_82598EB:
1031 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1032 break;
1033 default:
1034 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1035 }
1036 hwstats->pxoffrxc[i] += pxoffrxc;
1037
1038 tc = netdev_get_prio_tc_map(adapter->netdev, i);
1039 xoff[tc] += pxoffrxc;
1040 }
1041
1042
1043 for (i = 0; i < adapter->num_tx_queues; i++) {
1044 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1045
1046 tc = tx_ring->dcb_tc;
1047 if (xoff[tc])
1048 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1049 }
1050
1051 for (i = 0; i < adapter->num_xdp_queues; i++) {
1052 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
1053
1054 tc = xdp_ring->dcb_tc;
1055 if (xoff[tc])
1056 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state);
1057 }
1058}
1059
1060static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1061{
1062 return ring->stats.packets;
1063}
1064
1065static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1066{
1067 struct ixgbe_adapter *adapter;
1068 struct ixgbe_hw *hw;
1069 u32 head, tail;
1070
1071 if (ring->l2_accel_priv)
1072 adapter = ring->l2_accel_priv->real_adapter;
1073 else
1074 adapter = netdev_priv(ring->netdev);
1075
1076 hw = &adapter->hw;
1077 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
1078 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
1079
1080 if (head != tail)
1081 return (head < tail) ?
1082 tail - head : (tail + ring->count - head);
1083
1084 return 0;
1085}
1086
1087static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1088{
1089 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1090 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1091 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1092
1093 clear_check_for_tx_hang(tx_ring);
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 if (tx_done_old == tx_done && tx_pending)
1108
1109 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1110 &tx_ring->state);
1111
1112 tx_ring->tx_stats.tx_done_old = tx_done;
1113
1114 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1115
1116 return false;
1117}
1118
1119
1120
1121
1122
1123static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1124{
1125
1126
1127 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1128 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1129 e_warn(drv, "initiating reset due to tx timeout\n");
1130 ixgbe_service_event_schedule(adapter);
1131 }
1132}
1133
1134
1135
1136
1137static int ixgbe_tx_maxrate(struct net_device *netdev,
1138 int queue_index, u32 maxrate)
1139{
1140 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1141 struct ixgbe_hw *hw = &adapter->hw;
1142 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1143
1144 if (!maxrate)
1145 return 0;
1146
1147
1148 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1149 bcnrc_val /= maxrate;
1150
1151
1152 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1153 IXGBE_RTTBCNRC_RF_DEC_MASK;
1154
1155
1156 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1157
1158 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1159 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1160
1161 return 0;
1162}
1163
1164
1165
1166
1167
1168
1169
1170static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1171 struct ixgbe_ring *tx_ring, int napi_budget)
1172{
1173 struct ixgbe_adapter *adapter = q_vector->adapter;
1174 struct ixgbe_tx_buffer *tx_buffer;
1175 union ixgbe_adv_tx_desc *tx_desc;
1176 unsigned int total_bytes = 0, total_packets = 0;
1177 unsigned int budget = q_vector->tx.work_limit;
1178 unsigned int i = tx_ring->next_to_clean;
1179
1180 if (test_bit(__IXGBE_DOWN, &adapter->state))
1181 return true;
1182
1183 tx_buffer = &tx_ring->tx_buffer_info[i];
1184 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1185 i -= tx_ring->count;
1186
1187 do {
1188 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1189
1190
1191 if (!eop_desc)
1192 break;
1193
1194
1195 read_barrier_depends();
1196
1197
1198 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1199 break;
1200
1201
1202 tx_buffer->next_to_watch = NULL;
1203
1204
1205 total_bytes += tx_buffer->bytecount;
1206 total_packets += tx_buffer->gso_segs;
1207
1208
1209 if (ring_is_xdp(tx_ring))
1210 page_frag_free(tx_buffer->data);
1211 else
1212 napi_consume_skb(tx_buffer->skb, napi_budget);
1213
1214
1215 dma_unmap_single(tx_ring->dev,
1216 dma_unmap_addr(tx_buffer, dma),
1217 dma_unmap_len(tx_buffer, len),
1218 DMA_TO_DEVICE);
1219
1220
1221 dma_unmap_len_set(tx_buffer, len, 0);
1222
1223
1224 while (tx_desc != eop_desc) {
1225 tx_buffer++;
1226 tx_desc++;
1227 i++;
1228 if (unlikely(!i)) {
1229 i -= tx_ring->count;
1230 tx_buffer = tx_ring->tx_buffer_info;
1231 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1232 }
1233
1234
1235 if (dma_unmap_len(tx_buffer, len)) {
1236 dma_unmap_page(tx_ring->dev,
1237 dma_unmap_addr(tx_buffer, dma),
1238 dma_unmap_len(tx_buffer, len),
1239 DMA_TO_DEVICE);
1240 dma_unmap_len_set(tx_buffer, len, 0);
1241 }
1242 }
1243
1244
1245 tx_buffer++;
1246 tx_desc++;
1247 i++;
1248 if (unlikely(!i)) {
1249 i -= tx_ring->count;
1250 tx_buffer = tx_ring->tx_buffer_info;
1251 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1252 }
1253
1254
1255 prefetch(tx_desc);
1256
1257
1258 budget--;
1259 } while (likely(budget));
1260
1261 i += tx_ring->count;
1262 tx_ring->next_to_clean = i;
1263 u64_stats_update_begin(&tx_ring->syncp);
1264 tx_ring->stats.bytes += total_bytes;
1265 tx_ring->stats.packets += total_packets;
1266 u64_stats_update_end(&tx_ring->syncp);
1267 q_vector->tx.total_bytes += total_bytes;
1268 q_vector->tx.total_packets += total_packets;
1269
1270 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1271
1272 struct ixgbe_hw *hw = &adapter->hw;
1273 e_err(drv, "Detected Tx Unit Hang %s\n"
1274 " Tx Queue <%d>\n"
1275 " TDH, TDT <%x>, <%x>\n"
1276 " next_to_use <%x>\n"
1277 " next_to_clean <%x>\n"
1278 "tx_buffer_info[next_to_clean]\n"
1279 " time_stamp <%lx>\n"
1280 " jiffies <%lx>\n",
1281 ring_is_xdp(tx_ring) ? "(XDP)" : "",
1282 tx_ring->queue_index,
1283 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1284 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1285 tx_ring->next_to_use, i,
1286 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1287
1288 if (!ring_is_xdp(tx_ring))
1289 netif_stop_subqueue(tx_ring->netdev,
1290 tx_ring->queue_index);
1291
1292 e_info(probe,
1293 "tx hang %d detected on queue %d, resetting adapter\n",
1294 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1295
1296
1297 ixgbe_tx_timeout_reset(adapter);
1298
1299
1300 return true;
1301 }
1302
1303 if (ring_is_xdp(tx_ring))
1304 return !!budget;
1305
1306 netdev_tx_completed_queue(txring_txq(tx_ring),
1307 total_packets, total_bytes);
1308
1309#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1310 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1311 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1312
1313
1314
1315 smp_mb();
1316 if (__netif_subqueue_stopped(tx_ring->netdev,
1317 tx_ring->queue_index)
1318 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1319 netif_wake_subqueue(tx_ring->netdev,
1320 tx_ring->queue_index);
1321 ++tx_ring->tx_stats.restart_queue;
1322 }
1323 }
1324
1325 return !!budget;
1326}
1327
1328#ifdef CONFIG_IXGBE_DCA
1329static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1330 struct ixgbe_ring *tx_ring,
1331 int cpu)
1332{
1333 struct ixgbe_hw *hw = &adapter->hw;
1334 u32 txctrl = 0;
1335 u16 reg_offset;
1336
1337 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1338 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1339
1340 switch (hw->mac.type) {
1341 case ixgbe_mac_82598EB:
1342 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1343 break;
1344 case ixgbe_mac_82599EB:
1345 case ixgbe_mac_X540:
1346 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1347 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1348 break;
1349 default:
1350
1351 return;
1352 }
1353
1354
1355
1356
1357
1358
1359 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1360 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1361 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1362
1363 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1364}
1365
1366static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1367 struct ixgbe_ring *rx_ring,
1368 int cpu)
1369{
1370 struct ixgbe_hw *hw = &adapter->hw;
1371 u32 rxctrl = 0;
1372 u8 reg_idx = rx_ring->reg_idx;
1373
1374 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1375 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1376
1377 switch (hw->mac.type) {
1378 case ixgbe_mac_82599EB:
1379 case ixgbe_mac_X540:
1380 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1381 break;
1382 default:
1383 break;
1384 }
1385
1386
1387
1388
1389
1390
1391 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1392 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1393 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1394
1395 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1396}
1397
1398static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1399{
1400 struct ixgbe_adapter *adapter = q_vector->adapter;
1401 struct ixgbe_ring *ring;
1402 int cpu = get_cpu();
1403
1404 if (q_vector->cpu == cpu)
1405 goto out_no_update;
1406
1407 ixgbe_for_each_ring(ring, q_vector->tx)
1408 ixgbe_update_tx_dca(adapter, ring, cpu);
1409
1410 ixgbe_for_each_ring(ring, q_vector->rx)
1411 ixgbe_update_rx_dca(adapter, ring, cpu);
1412
1413 q_vector->cpu = cpu;
1414out_no_update:
1415 put_cpu();
1416}
1417
1418static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1419{
1420 int i;
1421
1422
1423 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1424 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1425 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1426 else
1427 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1428 IXGBE_DCA_CTRL_DCA_DISABLE);
1429
1430 for (i = 0; i < adapter->num_q_vectors; i++) {
1431 adapter->q_vector[i]->cpu = -1;
1432 ixgbe_update_dca(adapter->q_vector[i]);
1433 }
1434}
1435
1436static int __ixgbe_notify_dca(struct device *dev, void *data)
1437{
1438 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1439 unsigned long event = *(unsigned long *)data;
1440
1441 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1442 return 0;
1443
1444 switch (event) {
1445 case DCA_PROVIDER_ADD:
1446
1447 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1448 break;
1449 if (dca_add_requester(dev) == 0) {
1450 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1451 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1452 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1453 break;
1454 }
1455
1456 case DCA_PROVIDER_REMOVE:
1457 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1458 dca_remove_requester(dev);
1459 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1460 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1461 IXGBE_DCA_CTRL_DCA_DISABLE);
1462 }
1463 break;
1464 }
1465
1466 return 0;
1467}
1468
1469#endif
1470
1471#define IXGBE_RSS_L4_TYPES_MASK \
1472 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1473 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1474 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1475 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1476
1477static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1478 union ixgbe_adv_rx_desc *rx_desc,
1479 struct sk_buff *skb)
1480{
1481 u16 rss_type;
1482
1483 if (!(ring->netdev->features & NETIF_F_RXHASH))
1484 return;
1485
1486 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1487 IXGBE_RXDADV_RSSTYPE_MASK;
1488
1489 if (!rss_type)
1490 return;
1491
1492 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1493 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1494 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1495}
1496
1497#ifdef IXGBE_FCOE
1498
1499
1500
1501
1502
1503
1504
1505static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1506 union ixgbe_adv_rx_desc *rx_desc)
1507{
1508 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1509
1510 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1511 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1512 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1513 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1514}
1515
1516#endif
1517
1518
1519
1520
1521
1522
1523static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1524 union ixgbe_adv_rx_desc *rx_desc,
1525 struct sk_buff *skb)
1526{
1527 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1528 bool encap_pkt = false;
1529
1530 skb_checksum_none_assert(skb);
1531
1532
1533 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1534 return;
1535
1536
1537 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1538 encap_pkt = true;
1539 skb->encapsulation = 1;
1540 }
1541
1542
1543 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1544 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1545 ring->rx_stats.csum_err++;
1546 return;
1547 }
1548
1549 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1550 return;
1551
1552 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1553
1554
1555
1556
1557 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1558 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1559 return;
1560
1561 ring->rx_stats.csum_err++;
1562 return;
1563 }
1564
1565
1566 skb->ip_summed = CHECKSUM_UNNECESSARY;
1567 if (encap_pkt) {
1568 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1569 return;
1570
1571 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1572 skb->ip_summed = CHECKSUM_NONE;
1573 return;
1574 }
1575
1576 skb->csum_level = 1;
1577 }
1578}
1579
1580static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1581{
1582 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1583}
1584
1585static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1586 struct ixgbe_rx_buffer *bi)
1587{
1588 struct page *page = bi->page;
1589 dma_addr_t dma;
1590
1591
1592 if (likely(page))
1593 return true;
1594
1595
1596 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1597 if (unlikely(!page)) {
1598 rx_ring->rx_stats.alloc_rx_page_failed++;
1599 return false;
1600 }
1601
1602
1603 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1604 ixgbe_rx_pg_size(rx_ring),
1605 DMA_FROM_DEVICE,
1606 IXGBE_RX_DMA_ATTR);
1607
1608
1609
1610
1611
1612 if (dma_mapping_error(rx_ring->dev, dma)) {
1613 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1614
1615 rx_ring->rx_stats.alloc_rx_page_failed++;
1616 return false;
1617 }
1618
1619 bi->dma = dma;
1620 bi->page = page;
1621 bi->page_offset = ixgbe_rx_offset(rx_ring);
1622 bi->pagecnt_bias = 1;
1623
1624 return true;
1625}
1626
1627
1628
1629
1630
1631
1632void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1633{
1634 union ixgbe_adv_rx_desc *rx_desc;
1635 struct ixgbe_rx_buffer *bi;
1636 u16 i = rx_ring->next_to_use;
1637 u16 bufsz;
1638
1639
1640 if (!cleaned_count)
1641 return;
1642
1643 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1644 bi = &rx_ring->rx_buffer_info[i];
1645 i -= rx_ring->count;
1646
1647 bufsz = ixgbe_rx_bufsz(rx_ring);
1648
1649 do {
1650 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1651 break;
1652
1653
1654 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1655 bi->page_offset, bufsz,
1656 DMA_FROM_DEVICE);
1657
1658
1659
1660
1661
1662 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1663
1664 rx_desc++;
1665 bi++;
1666 i++;
1667 if (unlikely(!i)) {
1668 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1669 bi = rx_ring->rx_buffer_info;
1670 i -= rx_ring->count;
1671 }
1672
1673
1674 rx_desc->wb.upper.length = 0;
1675
1676 cleaned_count--;
1677 } while (cleaned_count);
1678
1679 i += rx_ring->count;
1680
1681 if (rx_ring->next_to_use != i) {
1682 rx_ring->next_to_use = i;
1683
1684
1685 rx_ring->next_to_alloc = i;
1686
1687
1688
1689
1690
1691
1692 wmb();
1693 writel(i, rx_ring->tail);
1694 }
1695}
1696
1697static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1698 struct sk_buff *skb)
1699{
1700 u16 hdr_len = skb_headlen(skb);
1701
1702
1703 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1704 IXGBE_CB(skb)->append_cnt);
1705 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1706}
1707
1708static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1709 struct sk_buff *skb)
1710{
1711
1712 if (!IXGBE_CB(skb)->append_cnt)
1713 return;
1714
1715 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1716 rx_ring->rx_stats.rsc_flush++;
1717
1718 ixgbe_set_rsc_gso_size(rx_ring, skb);
1719
1720
1721 IXGBE_CB(skb)->append_cnt = 0;
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1735 union ixgbe_adv_rx_desc *rx_desc,
1736 struct sk_buff *skb)
1737{
1738 struct net_device *dev = rx_ring->netdev;
1739 u32 flags = rx_ring->q_vector->adapter->flags;
1740
1741 ixgbe_update_rsc_stats(rx_ring, skb);
1742
1743 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1744
1745 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1746
1747 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1748 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1749
1750 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1751 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1752 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1753 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1754 }
1755
1756 skb_record_rx_queue(skb, rx_ring->queue_index);
1757
1758 skb->protocol = eth_type_trans(skb, dev);
1759}
1760
1761static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1762 struct sk_buff *skb)
1763{
1764 napi_gro_receive(&q_vector->napi, skb);
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1779 union ixgbe_adv_rx_desc *rx_desc,
1780 struct sk_buff *skb)
1781{
1782 u32 ntc = rx_ring->next_to_clean + 1;
1783
1784
1785 ntc = (ntc < rx_ring->count) ? ntc : 0;
1786 rx_ring->next_to_clean = ntc;
1787
1788 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1789
1790
1791 if (ring_is_rsc_enabled(rx_ring)) {
1792 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1793 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1794
1795 if (unlikely(rsc_enabled)) {
1796 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1797
1798 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1799 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1800
1801
1802 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1803 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1804 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1805 }
1806 }
1807
1808
1809 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1810 return false;
1811
1812
1813 rx_ring->rx_buffer_info[ntc].skb = skb;
1814 rx_ring->rx_stats.non_eop_descs++;
1815
1816 return true;
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1832 struct sk_buff *skb)
1833{
1834 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1835 unsigned char *va;
1836 unsigned int pull_len;
1837
1838
1839
1840
1841
1842
1843 va = skb_frag_address(frag);
1844
1845
1846
1847
1848
1849 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1850
1851
1852 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1853
1854
1855 skb_frag_size_sub(frag, pull_len);
1856 frag->page_offset += pull_len;
1857 skb->data_len -= pull_len;
1858 skb->tail += pull_len;
1859}
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1872 struct sk_buff *skb)
1873{
1874
1875 if (unlikely(IXGBE_CB(skb)->page_released)) {
1876 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1877 ixgbe_rx_pg_size(rx_ring),
1878 DMA_FROM_DEVICE,
1879 IXGBE_RX_DMA_ATTR);
1880 } else {
1881 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1882
1883 dma_sync_single_range_for_cpu(rx_ring->dev,
1884 IXGBE_CB(skb)->dma,
1885 frag->page_offset,
1886 skb_frag_size(frag),
1887 DMA_FROM_DEVICE);
1888 }
1889}
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1914 union ixgbe_adv_rx_desc *rx_desc,
1915 struct sk_buff *skb)
1916{
1917 struct net_device *netdev = rx_ring->netdev;
1918
1919
1920 if (IS_ERR(skb))
1921 return true;
1922
1923
1924 if (unlikely(ixgbe_test_staterr(rx_desc,
1925 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1926 !(netdev->features & NETIF_F_RXALL))) {
1927 dev_kfree_skb_any(skb);
1928 return true;
1929 }
1930
1931
1932 if (!skb_headlen(skb))
1933 ixgbe_pull_tail(rx_ring, skb);
1934
1935#ifdef IXGBE_FCOE
1936
1937 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1938 return false;
1939
1940#endif
1941
1942 if (eth_skb_pad(skb))
1943 return true;
1944
1945 return false;
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1956 struct ixgbe_rx_buffer *old_buff)
1957{
1958 struct ixgbe_rx_buffer *new_buff;
1959 u16 nta = rx_ring->next_to_alloc;
1960
1961 new_buff = &rx_ring->rx_buffer_info[nta];
1962
1963
1964 nta++;
1965 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1966
1967
1968
1969
1970
1971 new_buff->dma = old_buff->dma;
1972 new_buff->page = old_buff->page;
1973 new_buff->page_offset = old_buff->page_offset;
1974 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1975}
1976
1977static inline bool ixgbe_page_is_reserved(struct page *page)
1978{
1979 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1980}
1981
1982static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
1983{
1984 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1985 struct page *page = rx_buffer->page;
1986
1987
1988 if (unlikely(ixgbe_page_is_reserved(page)))
1989 return false;
1990
1991#if (PAGE_SIZE < 8192)
1992
1993 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1994 return false;
1995#else
1996
1997
1998
1999
2000
2001#define IXGBE_LAST_OFFSET \
2002 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
2003 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
2004 return false;
2005#endif
2006
2007
2008
2009
2010
2011 if (unlikely(!pagecnt_bias)) {
2012 page_ref_add(page, USHRT_MAX);
2013 rx_buffer->pagecnt_bias = USHRT_MAX;
2014 }
2015
2016 return true;
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
2035 struct ixgbe_rx_buffer *rx_buffer,
2036 struct sk_buff *skb,
2037 unsigned int size)
2038{
2039#if (PAGE_SIZE < 8192)
2040 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2041#else
2042 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2043 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2044 SKB_DATA_ALIGN(size);
2045#endif
2046 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2047 rx_buffer->page_offset, size, truesize);
2048#if (PAGE_SIZE < 8192)
2049 rx_buffer->page_offset ^= truesize;
2050#else
2051 rx_buffer->page_offset += truesize;
2052#endif
2053}
2054
2055static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2056 union ixgbe_adv_rx_desc *rx_desc,
2057 struct sk_buff **skb,
2058 const unsigned int size)
2059{
2060 struct ixgbe_rx_buffer *rx_buffer;
2061
2062 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2063 prefetchw(rx_buffer->page);
2064 *skb = rx_buffer->skb;
2065
2066
2067
2068
2069
2070 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2071 if (!*skb)
2072 goto skip_sync;
2073 } else {
2074 if (*skb)
2075 ixgbe_dma_sync_frag(rx_ring, *skb);
2076 }
2077
2078
2079 dma_sync_single_range_for_cpu(rx_ring->dev,
2080 rx_buffer->dma,
2081 rx_buffer->page_offset,
2082 size,
2083 DMA_FROM_DEVICE);
2084skip_sync:
2085 rx_buffer->pagecnt_bias--;
2086
2087 return rx_buffer;
2088}
2089
2090static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2091 struct ixgbe_rx_buffer *rx_buffer,
2092 struct sk_buff *skb)
2093{
2094 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2095
2096 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2097 } else {
2098 if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
2099
2100 IXGBE_CB(skb)->page_released = true;
2101 } else {
2102
2103 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2104 ixgbe_rx_pg_size(rx_ring),
2105 DMA_FROM_DEVICE,
2106 IXGBE_RX_DMA_ATTR);
2107 }
2108 __page_frag_cache_drain(rx_buffer->page,
2109 rx_buffer->pagecnt_bias);
2110 }
2111
2112
2113 rx_buffer->page = NULL;
2114 rx_buffer->skb = NULL;
2115}
2116
2117static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2118 struct ixgbe_rx_buffer *rx_buffer,
2119 struct xdp_buff *xdp,
2120 union ixgbe_adv_rx_desc *rx_desc)
2121{
2122 unsigned int size = xdp->data_end - xdp->data;
2123#if (PAGE_SIZE < 8192)
2124 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2125#else
2126 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
2127 xdp->data_hard_start);
2128#endif
2129 struct sk_buff *skb;
2130
2131
2132 prefetch(xdp->data);
2133#if L1_CACHE_BYTES < 128
2134 prefetch(xdp->data + L1_CACHE_BYTES);
2135#endif
2136
2137
2138 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2139 if (unlikely(!skb))
2140 return NULL;
2141
2142 if (size > IXGBE_RX_HDR_SIZE) {
2143 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2144 IXGBE_CB(skb)->dma = rx_buffer->dma;
2145
2146 skb_add_rx_frag(skb, 0, rx_buffer->page,
2147 xdp->data - page_address(rx_buffer->page),
2148 size, truesize);
2149#if (PAGE_SIZE < 8192)
2150 rx_buffer->page_offset ^= truesize;
2151#else
2152 rx_buffer->page_offset += truesize;
2153#endif
2154 } else {
2155 memcpy(__skb_put(skb, size),
2156 xdp->data, ALIGN(size, sizeof(long)));
2157 rx_buffer->pagecnt_bias++;
2158 }
2159
2160 return skb;
2161}
2162
2163static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2164 struct ixgbe_rx_buffer *rx_buffer,
2165 struct xdp_buff *xdp,
2166 union ixgbe_adv_rx_desc *rx_desc)
2167{
2168#if (PAGE_SIZE < 8192)
2169 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2170#else
2171 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2172 SKB_DATA_ALIGN(xdp->data_end -
2173 xdp->data_hard_start);
2174#endif
2175 struct sk_buff *skb;
2176
2177
2178 prefetch(xdp->data);
2179#if L1_CACHE_BYTES < 128
2180 prefetch(xdp->data + L1_CACHE_BYTES);
2181#endif
2182
2183
2184 skb = build_skb(xdp->data_hard_start, truesize);
2185 if (unlikely(!skb))
2186 return NULL;
2187
2188
2189 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2190 __skb_put(skb, xdp->data_end - xdp->data);
2191
2192
2193 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2194 IXGBE_CB(skb)->dma = rx_buffer->dma;
2195
2196
2197#if (PAGE_SIZE < 8192)
2198 rx_buffer->page_offset ^= truesize;
2199#else
2200 rx_buffer->page_offset += truesize;
2201#endif
2202
2203 return skb;
2204}
2205
2206#define IXGBE_XDP_PASS 0
2207#define IXGBE_XDP_CONSUMED 1
2208#define IXGBE_XDP_TX 2
2209
2210static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2211 struct xdp_buff *xdp);
2212
2213static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2214 struct ixgbe_ring *rx_ring,
2215 struct xdp_buff *xdp)
2216{
2217 int err, result = IXGBE_XDP_PASS;
2218 struct bpf_prog *xdp_prog;
2219 u32 act;
2220
2221 rcu_read_lock();
2222 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2223
2224 if (!xdp_prog)
2225 goto xdp_out;
2226
2227 act = bpf_prog_run_xdp(xdp_prog, xdp);
2228 switch (act) {
2229 case XDP_PASS:
2230 break;
2231 case XDP_TX:
2232 result = ixgbe_xmit_xdp_ring(adapter, xdp);
2233 break;
2234 case XDP_REDIRECT:
2235 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2236 if (!err)
2237 result = IXGBE_XDP_TX;
2238 else
2239 result = IXGBE_XDP_CONSUMED;
2240 break;
2241 default:
2242 bpf_warn_invalid_xdp_action(act);
2243
2244 case XDP_ABORTED:
2245 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2246
2247 case XDP_DROP:
2248 result = IXGBE_XDP_CONSUMED;
2249 break;
2250 }
2251xdp_out:
2252 rcu_read_unlock();
2253 return ERR_PTR(-result);
2254}
2255
2256static void ixgbe_rx_buffer_flip(struct ixgbe_ring *rx_ring,
2257 struct ixgbe_rx_buffer *rx_buffer,
2258 unsigned int size)
2259{
2260#if (PAGE_SIZE < 8192)
2261 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2262
2263 rx_buffer->page_offset ^= truesize;
2264#else
2265 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
2266 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
2267 SKB_DATA_ALIGN(size);
2268
2269 rx_buffer->page_offset += truesize;
2270#endif
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2287 struct ixgbe_ring *rx_ring,
2288 const int budget)
2289{
2290 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2291 struct ixgbe_adapter *adapter = q_vector->adapter;
2292#ifdef IXGBE_FCOE
2293 int ddp_bytes;
2294 unsigned int mss = 0;
2295#endif
2296 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2297 bool xdp_xmit = false;
2298
2299 while (likely(total_rx_packets < budget)) {
2300 union ixgbe_adv_rx_desc *rx_desc;
2301 struct ixgbe_rx_buffer *rx_buffer;
2302 struct sk_buff *skb;
2303 struct xdp_buff xdp;
2304 unsigned int size;
2305
2306
2307 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2308 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2309 cleaned_count = 0;
2310 }
2311
2312 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2313 size = le16_to_cpu(rx_desc->wb.upper.length);
2314 if (!size)
2315 break;
2316
2317
2318
2319
2320
2321 dma_rmb();
2322
2323 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2324
2325
2326 if (!skb) {
2327 xdp.data = page_address(rx_buffer->page) +
2328 rx_buffer->page_offset;
2329 xdp.data_hard_start = xdp.data -
2330 ixgbe_rx_offset(rx_ring);
2331 xdp.data_end = xdp.data + size;
2332
2333 skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
2334 }
2335
2336 if (IS_ERR(skb)) {
2337 if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
2338 xdp_xmit = true;
2339 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2340 } else {
2341 rx_buffer->pagecnt_bias++;
2342 }
2343 total_rx_packets++;
2344 total_rx_bytes += size;
2345 } else if (skb) {
2346 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2347 } else if (ring_uses_build_skb(rx_ring)) {
2348 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2349 &xdp, rx_desc);
2350 } else {
2351 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2352 &xdp, rx_desc);
2353 }
2354
2355
2356 if (!skb) {
2357 rx_ring->rx_stats.alloc_rx_buff_failed++;
2358 rx_buffer->pagecnt_bias++;
2359 break;
2360 }
2361
2362 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2363 cleaned_count++;
2364
2365
2366 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2367 continue;
2368
2369
2370 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2371 continue;
2372
2373
2374 total_rx_bytes += skb->len;
2375
2376
2377 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2378
2379#ifdef IXGBE_FCOE
2380
2381 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2382 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2383
2384 if (ddp_bytes > 0) {
2385 if (!mss) {
2386 mss = rx_ring->netdev->mtu -
2387 sizeof(struct fcoe_hdr) -
2388 sizeof(struct fc_frame_header) -
2389 sizeof(struct fcoe_crc_eof);
2390 if (mss > 512)
2391 mss &= ~511;
2392 }
2393 total_rx_bytes += ddp_bytes;
2394 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2395 mss);
2396 }
2397 if (!ddp_bytes) {
2398 dev_kfree_skb_any(skb);
2399 continue;
2400 }
2401 }
2402
2403#endif
2404 ixgbe_rx_skb(q_vector, skb);
2405
2406
2407 total_rx_packets++;
2408 }
2409
2410 if (xdp_xmit) {
2411 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2412
2413
2414
2415
2416 wmb();
2417 writel(ring->next_to_use, ring->tail);
2418
2419 xdp_do_flush_map();
2420 }
2421
2422 u64_stats_update_begin(&rx_ring->syncp);
2423 rx_ring->stats.packets += total_rx_packets;
2424 rx_ring->stats.bytes += total_rx_bytes;
2425 u64_stats_update_end(&rx_ring->syncp);
2426 q_vector->rx.total_packets += total_rx_packets;
2427 q_vector->rx.total_bytes += total_rx_bytes;
2428
2429 return total_rx_packets;
2430}
2431
2432
2433
2434
2435
2436
2437
2438
2439static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2440{
2441 struct ixgbe_q_vector *q_vector;
2442 int v_idx;
2443 u32 mask;
2444
2445
2446 if (adapter->num_vfs > 32) {
2447 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2448 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2449 }
2450
2451
2452
2453
2454
2455 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2456 struct ixgbe_ring *ring;
2457 q_vector = adapter->q_vector[v_idx];
2458
2459 ixgbe_for_each_ring(ring, q_vector->rx)
2460 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2461
2462 ixgbe_for_each_ring(ring, q_vector->tx)
2463 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2464
2465 ixgbe_write_eitr(q_vector);
2466 }
2467
2468 switch (adapter->hw.mac.type) {
2469 case ixgbe_mac_82598EB:
2470 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2471 v_idx);
2472 break;
2473 case ixgbe_mac_82599EB:
2474 case ixgbe_mac_X540:
2475 case ixgbe_mac_X550:
2476 case ixgbe_mac_X550EM_x:
2477 case ixgbe_mac_x550em_a:
2478 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2479 break;
2480 default:
2481 break;
2482 }
2483 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2484
2485
2486 mask = IXGBE_EIMS_ENABLE_MASK;
2487 mask &= ~(IXGBE_EIMS_OTHER |
2488 IXGBE_EIMS_MAILBOX |
2489 IXGBE_EIMS_LSC);
2490
2491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2492}
2493
2494enum latency_range {
2495 lowest_latency = 0,
2496 low_latency = 1,
2497 bulk_latency = 2,
2498 latency_invalid = 255
2499};
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2517 struct ixgbe_ring_container *ring_container)
2518{
2519 int bytes = ring_container->total_bytes;
2520 int packets = ring_container->total_packets;
2521 u32 timepassed_us;
2522 u64 bytes_perint;
2523 u8 itr_setting = ring_container->itr;
2524
2525 if (packets == 0)
2526 return;
2527
2528
2529
2530
2531
2532
2533
2534 timepassed_us = q_vector->itr >> 2;
2535 if (timepassed_us == 0)
2536 return;
2537
2538 bytes_perint = bytes / timepassed_us;
2539
2540 switch (itr_setting) {
2541 case lowest_latency:
2542 if (bytes_perint > 10)
2543 itr_setting = low_latency;
2544 break;
2545 case low_latency:
2546 if (bytes_perint > 20)
2547 itr_setting = bulk_latency;
2548 else if (bytes_perint <= 10)
2549 itr_setting = lowest_latency;
2550 break;
2551 case bulk_latency:
2552 if (bytes_perint <= 20)
2553 itr_setting = low_latency;
2554 break;
2555 }
2556
2557
2558 ring_container->total_bytes = 0;
2559 ring_container->total_packets = 0;
2560
2561
2562 ring_container->itr = itr_setting;
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2574{
2575 struct ixgbe_adapter *adapter = q_vector->adapter;
2576 struct ixgbe_hw *hw = &adapter->hw;
2577 int v_idx = q_vector->v_idx;
2578 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2579
2580 switch (adapter->hw.mac.type) {
2581 case ixgbe_mac_82598EB:
2582
2583 itr_reg |= (itr_reg << 16);
2584 break;
2585 case ixgbe_mac_82599EB:
2586 case ixgbe_mac_X540:
2587 case ixgbe_mac_X550:
2588 case ixgbe_mac_X550EM_x:
2589 case ixgbe_mac_x550em_a:
2590
2591
2592
2593
2594 itr_reg |= IXGBE_EITR_CNT_WDIS;
2595 break;
2596 default:
2597 break;
2598 }
2599 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2600}
2601
2602static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2603{
2604 u32 new_itr = q_vector->itr;
2605 u8 current_itr;
2606
2607 ixgbe_update_itr(q_vector, &q_vector->tx);
2608 ixgbe_update_itr(q_vector, &q_vector->rx);
2609
2610 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2611
2612 switch (current_itr) {
2613
2614 case lowest_latency:
2615 new_itr = IXGBE_100K_ITR;
2616 break;
2617 case low_latency:
2618 new_itr = IXGBE_20K_ITR;
2619 break;
2620 case bulk_latency:
2621 new_itr = IXGBE_12K_ITR;
2622 break;
2623 default:
2624 break;
2625 }
2626
2627 if (new_itr != q_vector->itr) {
2628
2629 new_itr = (10 * new_itr * q_vector->itr) /
2630 ((9 * new_itr) + q_vector->itr);
2631
2632
2633 q_vector->itr = new_itr;
2634
2635 ixgbe_write_eitr(q_vector);
2636 }
2637}
2638
2639
2640
2641
2642
2643static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2644{
2645 struct ixgbe_hw *hw = &adapter->hw;
2646 u32 eicr = adapter->interrupt_event;
2647 s32 rc;
2648
2649 if (test_bit(__IXGBE_DOWN, &adapter->state))
2650 return;
2651
2652 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2653 return;
2654
2655 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2656
2657 switch (hw->device_id) {
2658 case IXGBE_DEV_ID_82599_T3_LOM:
2659
2660
2661
2662
2663
2664
2665
2666 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2667 !(eicr & IXGBE_EICR_LSC))
2668 return;
2669
2670 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2671 u32 speed;
2672 bool link_up = false;
2673
2674 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2675
2676 if (link_up)
2677 return;
2678 }
2679
2680
2681 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2682 return;
2683
2684 break;
2685 case IXGBE_DEV_ID_X550EM_A_1G_T:
2686 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2687 rc = hw->phy.ops.check_overtemp(hw);
2688 if (rc != IXGBE_ERR_OVERTEMP)
2689 return;
2690 break;
2691 default:
2692 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2693 return;
2694 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2695 return;
2696 break;
2697 }
2698 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2699
2700 adapter->interrupt_event = 0;
2701}
2702
2703static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2704{
2705 struct ixgbe_hw *hw = &adapter->hw;
2706
2707 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2708 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2709 e_crit(probe, "Fan has stopped, replace the adapter\n");
2710
2711 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2712 }
2713}
2714
2715static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2716{
2717 struct ixgbe_hw *hw = &adapter->hw;
2718
2719 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2720 return;
2721
2722 switch (adapter->hw.mac.type) {
2723 case ixgbe_mac_82599EB:
2724
2725
2726
2727
2728 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2729 (eicr & IXGBE_EICR_LSC)) &&
2730 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2731 adapter->interrupt_event = eicr;
2732 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2733 ixgbe_service_event_schedule(adapter);
2734 return;
2735 }
2736 return;
2737 case ixgbe_mac_x550em_a:
2738 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2739 adapter->interrupt_event = eicr;
2740 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2741 ixgbe_service_event_schedule(adapter);
2742 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2743 IXGBE_EICR_GPI_SDP0_X550EM_a);
2744 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2745 IXGBE_EICR_GPI_SDP0_X550EM_a);
2746 }
2747 return;
2748 case ixgbe_mac_X550:
2749 case ixgbe_mac_X540:
2750 if (!(eicr & IXGBE_EICR_TS))
2751 return;
2752 break;
2753 default:
2754 return;
2755 }
2756
2757 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2758}
2759
2760static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2761{
2762 switch (hw->mac.type) {
2763 case ixgbe_mac_82598EB:
2764 if (hw->phy.type == ixgbe_phy_nl)
2765 return true;
2766 return false;
2767 case ixgbe_mac_82599EB:
2768 case ixgbe_mac_X550EM_x:
2769 case ixgbe_mac_x550em_a:
2770 switch (hw->mac.ops.get_media_type(hw)) {
2771 case ixgbe_media_type_fiber:
2772 case ixgbe_media_type_fiber_qsfp:
2773 return true;
2774 default:
2775 return false;
2776 }
2777 default:
2778 return false;
2779 }
2780}
2781
2782static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2783{
2784 struct ixgbe_hw *hw = &adapter->hw;
2785 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2786
2787 if (!ixgbe_is_sfp(hw))
2788 return;
2789
2790
2791 if (hw->mac.type >= ixgbe_mac_X540)
2792 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2793
2794 if (eicr & eicr_mask) {
2795
2796 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2797 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2798 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2799 adapter->sfp_poll_time = 0;
2800 ixgbe_service_event_schedule(adapter);
2801 }
2802 }
2803
2804 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2805 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2806
2807 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2808 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2809 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2810 ixgbe_service_event_schedule(adapter);
2811 }
2812 }
2813}
2814
2815static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2816{
2817 struct ixgbe_hw *hw = &adapter->hw;
2818
2819 adapter->lsc_int++;
2820 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2821 adapter->link_check_timeout = jiffies;
2822 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2823 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2824 IXGBE_WRITE_FLUSH(hw);
2825 ixgbe_service_event_schedule(adapter);
2826 }
2827}
2828
2829static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2830 u64 qmask)
2831{
2832 u32 mask;
2833 struct ixgbe_hw *hw = &adapter->hw;
2834
2835 switch (hw->mac.type) {
2836 case ixgbe_mac_82598EB:
2837 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2838 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2839 break;
2840 case ixgbe_mac_82599EB:
2841 case ixgbe_mac_X540:
2842 case ixgbe_mac_X550:
2843 case ixgbe_mac_X550EM_x:
2844 case ixgbe_mac_x550em_a:
2845 mask = (qmask & 0xFFFFFFFF);
2846 if (mask)
2847 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2848 mask = (qmask >> 32);
2849 if (mask)
2850 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2851 break;
2852 default:
2853 break;
2854 }
2855
2856}
2857
2858static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2859 u64 qmask)
2860{
2861 u32 mask;
2862 struct ixgbe_hw *hw = &adapter->hw;
2863
2864 switch (hw->mac.type) {
2865 case ixgbe_mac_82598EB:
2866 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2867 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2868 break;
2869 case ixgbe_mac_82599EB:
2870 case ixgbe_mac_X540:
2871 case ixgbe_mac_X550:
2872 case ixgbe_mac_X550EM_x:
2873 case ixgbe_mac_x550em_a:
2874 mask = (qmask & 0xFFFFFFFF);
2875 if (mask)
2876 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2877 mask = (qmask >> 32);
2878 if (mask)
2879 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2880 break;
2881 default:
2882 break;
2883 }
2884
2885}
2886
2887
2888
2889
2890
2891static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2892 bool flush)
2893{
2894 struct ixgbe_hw *hw = &adapter->hw;
2895 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2896
2897
2898 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2899 mask &= ~IXGBE_EIMS_LSC;
2900
2901 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2902 switch (adapter->hw.mac.type) {
2903 case ixgbe_mac_82599EB:
2904 mask |= IXGBE_EIMS_GPI_SDP0(hw);
2905 break;
2906 case ixgbe_mac_X540:
2907 case ixgbe_mac_X550:
2908 case ixgbe_mac_X550EM_x:
2909 case ixgbe_mac_x550em_a:
2910 mask |= IXGBE_EIMS_TS;
2911 break;
2912 default:
2913 break;
2914 }
2915 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2916 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2917 switch (adapter->hw.mac.type) {
2918 case ixgbe_mac_82599EB:
2919 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2920 mask |= IXGBE_EIMS_GPI_SDP2(hw);
2921
2922 case ixgbe_mac_X540:
2923 case ixgbe_mac_X550:
2924 case ixgbe_mac_X550EM_x:
2925 case ixgbe_mac_x550em_a:
2926 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
2927 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
2928 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
2929 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
2930 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
2931 mask |= IXGBE_EICR_GPI_SDP0_X540;
2932 mask |= IXGBE_EIMS_ECC;
2933 mask |= IXGBE_EIMS_MAILBOX;
2934 break;
2935 default:
2936 break;
2937 }
2938
2939 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2940 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2941 mask |= IXGBE_EIMS_FLOW_DIR;
2942
2943 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2944 if (queues)
2945 ixgbe_irq_enable_queues(adapter, ~0);
2946 if (flush)
2947 IXGBE_WRITE_FLUSH(&adapter->hw);
2948}
2949
2950static irqreturn_t ixgbe_msix_other(int irq, void *data)
2951{
2952 struct ixgbe_adapter *adapter = data;
2953 struct ixgbe_hw *hw = &adapter->hw;
2954 u32 eicr;
2955
2956
2957
2958
2959
2960
2961
2962 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2963
2964
2965
2966
2967
2968
2969
2970
2971 eicr &= 0xFFFF0000;
2972
2973 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2974
2975 if (eicr & IXGBE_EICR_LSC)
2976 ixgbe_check_lsc(adapter);
2977
2978 if (eicr & IXGBE_EICR_MAILBOX)
2979 ixgbe_msg_task(adapter);
2980
2981 switch (hw->mac.type) {
2982 case ixgbe_mac_82599EB:
2983 case ixgbe_mac_X540:
2984 case ixgbe_mac_X550:
2985 case ixgbe_mac_X550EM_x:
2986 case ixgbe_mac_x550em_a:
2987 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
2988 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2989 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
2990 ixgbe_service_event_schedule(adapter);
2991 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2992 IXGBE_EICR_GPI_SDP0_X540);
2993 }
2994 if (eicr & IXGBE_EICR_ECC) {
2995 e_info(link, "Received ECC Err, initiating reset\n");
2996 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
2997 ixgbe_service_event_schedule(adapter);
2998 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2999 }
3000
3001 if (eicr & IXGBE_EICR_FLOW_DIR) {
3002 int reinit_count = 0;
3003 int i;
3004 for (i = 0; i < adapter->num_tx_queues; i++) {
3005 struct ixgbe_ring *ring = adapter->tx_ring[i];
3006 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
3007 &ring->state))
3008 reinit_count++;
3009 }
3010 if (reinit_count) {
3011
3012 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3013 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
3014 ixgbe_service_event_schedule(adapter);
3015 }
3016 }
3017 ixgbe_check_sfp_event(adapter, eicr);
3018 ixgbe_check_overtemp_event(adapter, eicr);
3019 break;
3020 default:
3021 break;
3022 }
3023
3024 ixgbe_check_fan_failure(adapter, eicr);
3025
3026 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3027 ixgbe_ptp_check_pps_event(adapter);
3028
3029
3030 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3031 ixgbe_irq_enable(adapter, false, false);
3032
3033 return IRQ_HANDLED;
3034}
3035
3036static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
3037{
3038 struct ixgbe_q_vector *q_vector = data;
3039
3040
3041
3042 if (q_vector->rx.ring || q_vector->tx.ring)
3043 napi_schedule_irqoff(&q_vector->napi);
3044
3045 return IRQ_HANDLED;
3046}
3047
3048
3049
3050
3051
3052
3053
3054
3055int ixgbe_poll(struct napi_struct *napi, int budget)
3056{
3057 struct ixgbe_q_vector *q_vector =
3058 container_of(napi, struct ixgbe_q_vector, napi);
3059 struct ixgbe_adapter *adapter = q_vector->adapter;
3060 struct ixgbe_ring *ring;
3061 int per_ring_budget, work_done = 0;
3062 bool clean_complete = true;
3063
3064#ifdef CONFIG_IXGBE_DCA
3065 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3066 ixgbe_update_dca(q_vector);
3067#endif
3068
3069 ixgbe_for_each_ring(ring, q_vector->tx) {
3070 if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
3071 clean_complete = false;
3072 }
3073
3074
3075 if (budget <= 0)
3076 return budget;
3077
3078
3079
3080 if (q_vector->rx.count > 1)
3081 per_ring_budget = max(budget/q_vector->rx.count, 1);
3082 else
3083 per_ring_budget = budget;
3084
3085 ixgbe_for_each_ring(ring, q_vector->rx) {
3086 int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
3087 per_ring_budget);
3088
3089 work_done += cleaned;
3090 if (cleaned >= per_ring_budget)
3091 clean_complete = false;
3092 }
3093
3094
3095 if (!clean_complete)
3096 return budget;
3097
3098
3099 napi_complete_done(napi, work_done);
3100 if (adapter->rx_itr_setting & 1)
3101 ixgbe_set_itr(q_vector);
3102 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3103 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
3104
3105 return min(work_done, budget - 1);
3106}
3107
3108
3109
3110
3111
3112
3113
3114
3115static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
3116{
3117 struct net_device *netdev = adapter->netdev;
3118 unsigned int ri = 0, ti = 0;
3119 int vector, err;
3120
3121 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3122 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3123 struct msix_entry *entry = &adapter->msix_entries[vector];
3124
3125 if (q_vector->tx.ring && q_vector->rx.ring) {
3126 snprintf(q_vector->name, sizeof(q_vector->name),
3127 "%s-TxRx-%u", netdev->name, ri++);
3128 ti++;
3129 } else if (q_vector->rx.ring) {
3130 snprintf(q_vector->name, sizeof(q_vector->name),
3131 "%s-rx-%u", netdev->name, ri++);
3132 } else if (q_vector->tx.ring) {
3133 snprintf(q_vector->name, sizeof(q_vector->name),
3134 "%s-tx-%u", netdev->name, ti++);
3135 } else {
3136
3137 continue;
3138 }
3139 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
3140 q_vector->name, q_vector);
3141 if (err) {
3142 e_err(probe, "request_irq failed for MSIX interrupt "
3143 "Error: %d\n", err);
3144 goto free_queue_irqs;
3145 }
3146
3147 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3148
3149 irq_set_affinity_hint(entry->vector,
3150 &q_vector->affinity_mask);
3151 }
3152 }
3153
3154 err = request_irq(adapter->msix_entries[vector].vector,
3155 ixgbe_msix_other, 0, netdev->name, adapter);
3156 if (err) {
3157 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3158 goto free_queue_irqs;
3159 }
3160
3161 return 0;
3162
3163free_queue_irqs:
3164 while (vector) {
3165 vector--;
3166 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3167 NULL);
3168 free_irq(adapter->msix_entries[vector].vector,
3169 adapter->q_vector[vector]);
3170 }
3171 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3172 pci_disable_msix(adapter->pdev);
3173 kfree(adapter->msix_entries);
3174 adapter->msix_entries = NULL;
3175 return err;
3176}
3177
3178
3179
3180
3181
3182
3183static irqreturn_t ixgbe_intr(int irq, void *data)
3184{
3185 struct ixgbe_adapter *adapter = data;
3186 struct ixgbe_hw *hw = &adapter->hw;
3187 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3188 u32 eicr;
3189
3190
3191
3192
3193
3194 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3195
3196
3197
3198 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3199 if (!eicr) {
3200
3201
3202
3203
3204
3205
3206
3207 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3208 ixgbe_irq_enable(adapter, true, true);
3209 return IRQ_NONE;
3210 }
3211
3212 if (eicr & IXGBE_EICR_LSC)
3213 ixgbe_check_lsc(adapter);
3214
3215 switch (hw->mac.type) {
3216 case ixgbe_mac_82599EB:
3217 ixgbe_check_sfp_event(adapter, eicr);
3218
3219 case ixgbe_mac_X540:
3220 case ixgbe_mac_X550:
3221 case ixgbe_mac_X550EM_x:
3222 case ixgbe_mac_x550em_a:
3223 if (eicr & IXGBE_EICR_ECC) {
3224 e_info(link, "Received ECC Err, initiating reset\n");
3225 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3226 ixgbe_service_event_schedule(adapter);
3227 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3228 }
3229 ixgbe_check_overtemp_event(adapter, eicr);
3230 break;
3231 default:
3232 break;
3233 }
3234
3235 ixgbe_check_fan_failure(adapter, eicr);
3236 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3237 ixgbe_ptp_check_pps_event(adapter);
3238
3239
3240 napi_schedule_irqoff(&q_vector->napi);
3241
3242
3243
3244
3245
3246 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3247 ixgbe_irq_enable(adapter, false, false);
3248
3249 return IRQ_HANDLED;
3250}
3251
3252
3253
3254
3255
3256
3257
3258
3259static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3260{
3261 struct net_device *netdev = adapter->netdev;
3262 int err;
3263
3264 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3265 err = ixgbe_request_msix_irqs(adapter);
3266 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3267 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3268 netdev->name, adapter);
3269 else
3270 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3271 netdev->name, adapter);
3272
3273 if (err)
3274 e_err(probe, "request_irq failed, Error %d\n", err);
3275
3276 return err;
3277}
3278
3279static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3280{
3281 int vector;
3282
3283 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3284 free_irq(adapter->pdev->irq, adapter);
3285 return;
3286 }
3287
3288 if (!adapter->msix_entries)
3289 return;
3290
3291 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3292 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3293 struct msix_entry *entry = &adapter->msix_entries[vector];
3294
3295
3296 if (!q_vector->rx.ring && !q_vector->tx.ring)
3297 continue;
3298
3299
3300 irq_set_affinity_hint(entry->vector, NULL);
3301
3302 free_irq(entry->vector, q_vector);
3303 }
3304
3305 free_irq(adapter->msix_entries[vector].vector, adapter);
3306}
3307
3308
3309
3310
3311
3312static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3313{
3314 switch (adapter->hw.mac.type) {
3315 case ixgbe_mac_82598EB:
3316 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3317 break;
3318 case ixgbe_mac_82599EB:
3319 case ixgbe_mac_X540:
3320 case ixgbe_mac_X550:
3321 case ixgbe_mac_X550EM_x:
3322 case ixgbe_mac_x550em_a:
3323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3324 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3325 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3326 break;
3327 default:
3328 break;
3329 }
3330 IXGBE_WRITE_FLUSH(&adapter->hw);
3331 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3332 int vector;
3333
3334 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3335 synchronize_irq(adapter->msix_entries[vector].vector);
3336
3337 synchronize_irq(adapter->msix_entries[vector++].vector);
3338 } else {
3339 synchronize_irq(adapter->pdev->irq);
3340 }
3341}
3342
3343
3344
3345
3346
3347static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3348{
3349 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3350
3351 ixgbe_write_eitr(q_vector);
3352
3353 ixgbe_set_ivar(adapter, 0, 0, 0);
3354 ixgbe_set_ivar(adapter, 1, 0, 0);
3355
3356 e_info(hw, "Legacy interrupt IVAR setup done\n");
3357}
3358
3359
3360
3361
3362
3363
3364
3365
3366void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3367 struct ixgbe_ring *ring)
3368{
3369 struct ixgbe_hw *hw = &adapter->hw;
3370 u64 tdba = ring->dma;
3371 int wait_loop = 10;
3372 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3373 u8 reg_idx = ring->reg_idx;
3374
3375
3376 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3377 IXGBE_WRITE_FLUSH(hw);
3378
3379 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3380 (tdba & DMA_BIT_MASK(32)));
3381 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3382 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3383 ring->count * sizeof(union ixgbe_adv_tx_desc));
3384 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3385 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3386 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3399 txdctl |= 1u << 16;
3400 else
3401 txdctl |= 8u << 16;
3402
3403
3404
3405
3406
3407 txdctl |= (1u << 8) |
3408 32;
3409
3410
3411 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3412 ring->atr_sample_rate = adapter->atr_sample_rate;
3413 ring->atr_count = 0;
3414 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3415 } else {
3416 ring->atr_sample_rate = 0;
3417 }
3418
3419
3420 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3421 struct ixgbe_q_vector *q_vector = ring->q_vector;
3422
3423 if (q_vector)
3424 netif_set_xps_queue(ring->netdev,
3425 &q_vector->affinity_mask,
3426 ring->queue_index);
3427 }
3428
3429 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3430
3431
3432 memset(ring->tx_buffer_info, 0,
3433 sizeof(struct ixgbe_tx_buffer) * ring->count);
3434
3435
3436 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3437
3438
3439 if (hw->mac.type == ixgbe_mac_82598EB &&
3440 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3441 return;
3442
3443
3444 do {
3445 usleep_range(1000, 2000);
3446 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3447 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3448 if (!wait_loop)
3449 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3450}
3451
3452static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3453{
3454 struct ixgbe_hw *hw = &adapter->hw;
3455 u32 rttdcs, mtqc;
3456 u8 tcs = netdev_get_num_tc(adapter->netdev);
3457
3458 if (hw->mac.type == ixgbe_mac_82598EB)
3459 return;
3460
3461
3462 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3463 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3464 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3465
3466
3467 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3468 mtqc = IXGBE_MTQC_VT_ENA;
3469 if (tcs > 4)
3470 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3471 else if (tcs > 1)
3472 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3473 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3474 IXGBE_82599_VMDQ_4Q_MASK)
3475 mtqc |= IXGBE_MTQC_32VF;
3476 else
3477 mtqc |= IXGBE_MTQC_64VF;
3478 } else {
3479 if (tcs > 4)
3480 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3481 else if (tcs > 1)
3482 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3483 else
3484 mtqc = IXGBE_MTQC_64Q_1PB;
3485 }
3486
3487 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3488
3489
3490 if (tcs) {
3491 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3492 sectx |= IXGBE_SECTX_DCB;
3493 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3494 }
3495
3496
3497 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3498 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3499}
3500
3501
3502
3503
3504
3505
3506
3507static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3508{
3509 struct ixgbe_hw *hw = &adapter->hw;
3510 u32 dmatxctl;
3511 u32 i;
3512
3513 ixgbe_setup_mtqc(adapter);
3514
3515 if (hw->mac.type != ixgbe_mac_82598EB) {
3516
3517 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3518 dmatxctl |= IXGBE_DMATXCTL_TE;
3519 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3520 }
3521
3522
3523 for (i = 0; i < adapter->num_tx_queues; i++)
3524 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3525 for (i = 0; i < adapter->num_xdp_queues; i++)
3526 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]);
3527}
3528
3529static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3530 struct ixgbe_ring *ring)
3531{
3532 struct ixgbe_hw *hw = &adapter->hw;
3533 u8 reg_idx = ring->reg_idx;
3534 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3535
3536 srrctl |= IXGBE_SRRCTL_DROP_EN;
3537
3538 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3539}
3540
3541static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3542 struct ixgbe_ring *ring)
3543{
3544 struct ixgbe_hw *hw = &adapter->hw;
3545 u8 reg_idx = ring->reg_idx;
3546 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3547
3548 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3549
3550 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3551}
3552
3553#ifdef CONFIG_IXGBE_DCB
3554void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3555#else
3556static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3557#endif
3558{
3559 int i;
3560 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3561
3562 if (adapter->ixgbe_ieee_pfc)
3563 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3575 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3576 for (i = 0; i < adapter->num_rx_queues; i++)
3577 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3578 } else {
3579 for (i = 0; i < adapter->num_rx_queues; i++)
3580 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3581 }
3582}
3583
3584#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3585
3586static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3587 struct ixgbe_ring *rx_ring)
3588{
3589 struct ixgbe_hw *hw = &adapter->hw;
3590 u32 srrctl;
3591 u8 reg_idx = rx_ring->reg_idx;
3592
3593 if (hw->mac.type == ixgbe_mac_82598EB) {
3594 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3595
3596
3597
3598
3599
3600 reg_idx &= mask;
3601 }
3602
3603
3604 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3605
3606
3607 if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
3608 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3609 else
3610 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3611
3612
3613 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3614
3615 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3616}
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3627{
3628 if (adapter->hw.mac.type < ixgbe_mac_X550)
3629 return 128;
3630 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3631 return 64;
3632 else
3633 return 512;
3634}
3635
3636
3637
3638
3639
3640
3641
3642void ixgbe_store_key(struct ixgbe_adapter *adapter)
3643{
3644 struct ixgbe_hw *hw = &adapter->hw;
3645 int i;
3646
3647 for (i = 0; i < 10; i++)
3648 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3649}
3650
3651
3652
3653
3654
3655
3656
3657static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter)
3658{
3659 u32 *rss_key;
3660
3661 if (!adapter->rss_key) {
3662 rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL);
3663 if (unlikely(!rss_key))
3664 return -ENOMEM;
3665
3666 netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE);
3667 adapter->rss_key = rss_key;
3668 }
3669
3670 return 0;
3671}
3672
3673
3674
3675
3676
3677
3678
3679void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3680{
3681 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3682 struct ixgbe_hw *hw = &adapter->hw;
3683 u32 reta = 0;
3684 u32 indices_multi;
3685 u8 *indir_tbl = adapter->rss_indir_tbl;
3686
3687
3688
3689
3690
3691
3692
3693 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3694 indices_multi = 0x11;
3695 else
3696 indices_multi = 0x1;
3697
3698
3699 for (i = 0; i < reta_entries; i++) {
3700 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3701 if ((i & 3) == 3) {
3702 if (i < 128)
3703 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3704 else
3705 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3706 reta);
3707 reta = 0;
3708 }
3709 }
3710}
3711
3712
3713
3714
3715
3716
3717
3718static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3719{
3720 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3721 struct ixgbe_hw *hw = &adapter->hw;
3722 u32 vfreta = 0;
3723 unsigned int pf_pool = adapter->num_vfs;
3724
3725
3726 for (i = 0; i < reta_entries; i++) {
3727 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3728 if ((i & 3) == 3) {
3729 IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3730 vfreta);
3731 vfreta = 0;
3732 }
3733 }
3734}
3735
3736static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3737{
3738 u32 i, j;
3739 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3740 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3741
3742
3743
3744
3745
3746 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3747 rss_i = 4;
3748
3749
3750 ixgbe_store_key(adapter);
3751
3752
3753 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3754
3755 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3756 if (j == rss_i)
3757 j = 0;
3758
3759 adapter->rss_indir_tbl[i] = j;
3760 }
3761
3762 ixgbe_store_reta(adapter);
3763}
3764
3765static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3766{
3767 struct ixgbe_hw *hw = &adapter->hw;
3768 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3769 unsigned int pf_pool = adapter->num_vfs;
3770 int i, j;
3771
3772
3773 for (i = 0; i < 10; i++)
3774 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
3775 *(adapter->rss_key + i));
3776
3777
3778 for (i = 0, j = 0; i < 64; i++, j++) {
3779 if (j == rss_i)
3780 j = 0;
3781
3782 adapter->rss_indir_tbl[i] = j;
3783 }
3784
3785 ixgbe_store_vfreta(adapter);
3786}
3787
3788static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3789{
3790 struct ixgbe_hw *hw = &adapter->hw;
3791 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3792 u32 rxcsum;
3793
3794
3795 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3796 rxcsum |= IXGBE_RXCSUM_PCSD;
3797 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3798
3799 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3800 if (adapter->ring_feature[RING_F_RSS].mask)
3801 mrqc = IXGBE_MRQC_RSSEN;
3802 } else {
3803 u8 tcs = netdev_get_num_tc(adapter->netdev);
3804
3805 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3806 if (tcs > 4)
3807 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3808 else if (tcs > 1)
3809 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3810 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3811 IXGBE_82599_VMDQ_4Q_MASK)
3812 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3813 else
3814 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3815
3816
3817 mrqc |= IXGBE_MRQC_L3L4TXSWEN;
3818 } else {
3819 if (tcs > 4)
3820 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3821 else if (tcs > 1)
3822 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3823 else
3824 mrqc = IXGBE_MRQC_RSSEN;
3825 }
3826 }
3827
3828
3829 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3830 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3831 IXGBE_MRQC_RSS_FIELD_IPV6 |
3832 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3833
3834 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3835 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3836 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3837 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3838
3839 if ((hw->mac.type >= ixgbe_mac_X550) &&
3840 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3841 unsigned int pf_pool = adapter->num_vfs;
3842
3843
3844 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3845 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3846
3847
3848 ixgbe_setup_vfreta(adapter);
3849 vfmrqc = IXGBE_MRQC_RSSEN;
3850 vfmrqc |= rss_field;
3851 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3852 } else {
3853 ixgbe_setup_reta(adapter);
3854 mrqc |= rss_field;
3855 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3856 }
3857}
3858
3859
3860
3861
3862
3863
3864static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3865 struct ixgbe_ring *ring)
3866{
3867 struct ixgbe_hw *hw = &adapter->hw;
3868 u32 rscctrl;
3869 u8 reg_idx = ring->reg_idx;
3870
3871 if (!ring_is_rsc_enabled(ring))
3872 return;
3873
3874 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3875 rscctrl |= IXGBE_RSCCTL_RSCEN;
3876
3877
3878
3879
3880
3881 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3882 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3883}
3884
3885#define IXGBE_MAX_RX_DESC_POLL 10
3886static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3887 struct ixgbe_ring *ring)
3888{
3889 struct ixgbe_hw *hw = &adapter->hw;
3890 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3891 u32 rxdctl;
3892 u8 reg_idx = ring->reg_idx;
3893
3894 if (ixgbe_removed(hw->hw_addr))
3895 return;
3896
3897 if (hw->mac.type == ixgbe_mac_82598EB &&
3898 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3899 return;
3900
3901 do {
3902 usleep_range(1000, 2000);
3903 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3904 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3905
3906 if (!wait_loop) {
3907 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3908 "the polling period\n", reg_idx);
3909 }
3910}
3911
3912void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3913 struct ixgbe_ring *ring)
3914{
3915 struct ixgbe_hw *hw = &adapter->hw;
3916 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3917 u32 rxdctl;
3918 u8 reg_idx = ring->reg_idx;
3919
3920 if (ixgbe_removed(hw->hw_addr))
3921 return;
3922 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3923 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3924
3925
3926 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3927
3928 if (hw->mac.type == ixgbe_mac_82598EB &&
3929 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3930 return;
3931
3932
3933 do {
3934 udelay(10);
3935 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3936 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3937
3938 if (!wait_loop) {
3939 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3940 "the polling period\n", reg_idx);
3941 }
3942}
3943
3944void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3945 struct ixgbe_ring *ring)
3946{
3947 struct ixgbe_hw *hw = &adapter->hw;
3948 union ixgbe_adv_rx_desc *rx_desc;
3949 u64 rdba = ring->dma;
3950 u32 rxdctl;
3951 u8 reg_idx = ring->reg_idx;
3952
3953
3954 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3955 ixgbe_disable_rx_queue(adapter, ring);
3956
3957 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3958 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3959 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3960 ring->count * sizeof(union ixgbe_adv_rx_desc));
3961
3962 IXGBE_WRITE_FLUSH(hw);
3963
3964 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3965 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3966 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
3967
3968 ixgbe_configure_srrctl(adapter, ring);
3969 ixgbe_configure_rscctl(adapter, ring);
3970
3971 if (hw->mac.type == ixgbe_mac_82598EB) {
3972
3973
3974
3975
3976
3977
3978
3979 rxdctl &= ~0x3FFFFF;
3980 rxdctl |= 0x080420;
3981#if (PAGE_SIZE < 8192)
3982 } else {
3983 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
3984 IXGBE_RXDCTL_RLPML_EN);
3985
3986
3987 if (ring_uses_build_skb(ring) &&
3988 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
3989 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
3990 IXGBE_RXDCTL_RLPML_EN;
3991#endif
3992 }
3993
3994
3995 memset(ring->rx_buffer_info, 0,
3996 sizeof(struct ixgbe_rx_buffer) * ring->count);
3997
3998
3999 rx_desc = IXGBE_RX_DESC(ring, 0);
4000 rx_desc->wb.upper.length = 0;
4001
4002
4003 rxdctl |= IXGBE_RXDCTL_ENABLE;
4004 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
4005
4006 ixgbe_rx_desc_queue_enable(adapter, ring);
4007 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
4008}
4009
4010static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
4011{
4012 struct ixgbe_hw *hw = &adapter->hw;
4013 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
4014 u16 pool;
4015
4016
4017 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4018 IXGBE_PSRTYPE_UDPHDR |
4019 IXGBE_PSRTYPE_IPV4HDR |
4020 IXGBE_PSRTYPE_L2HDR |
4021 IXGBE_PSRTYPE_IPV6HDR;
4022
4023 if (hw->mac.type == ixgbe_mac_82598EB)
4024 return;
4025
4026 if (rss_i > 3)
4027 psrtype |= 2u << 29;
4028 else if (rss_i > 1)
4029 psrtype |= 1u << 29;
4030
4031 for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
4032 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4033}
4034
4035static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
4036{
4037 struct ixgbe_hw *hw = &adapter->hw;
4038 u32 reg_offset, vf_shift;
4039 u32 gcr_ext, vmdctl;
4040 int i;
4041
4042 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
4043 return;
4044
4045 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
4046 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
4047 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
4048 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
4049 vmdctl |= IXGBE_VT_CTL_REPLEN;
4050 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
4051
4052 vf_shift = VMDQ_P(0) % 32;
4053 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
4054
4055
4056 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
4057 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
4058 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
4059 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
4060 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
4061 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4062
4063
4064 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
4065
4066
4067 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4068
4069
4070
4071
4072
4073 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4074 case IXGBE_82599_VMDQ_8Q_MASK:
4075 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
4076 break;
4077 case IXGBE_82599_VMDQ_4Q_MASK:
4078 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
4079 break;
4080 default:
4081 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
4082 break;
4083 }
4084
4085 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4086
4087 for (i = 0; i < adapter->num_vfs; i++) {
4088
4089 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
4090 adapter->vfinfo[i].spoofchk_enabled);
4091
4092
4093 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
4094 adapter->vfinfo[i].rss_query_enabled);
4095 }
4096}
4097
4098static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
4099{
4100 struct ixgbe_hw *hw = &adapter->hw;
4101 struct net_device *netdev = adapter->netdev;
4102 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4103 struct ixgbe_ring *rx_ring;
4104 int i;
4105 u32 mhadd, hlreg0;
4106
4107#ifdef IXGBE_FCOE
4108
4109 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
4110 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
4111 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4112
4113#endif
4114
4115
4116 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4117 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
4118
4119 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4120 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
4121 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4122 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
4123
4124 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4125 }
4126
4127 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4128
4129 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4130 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4131
4132
4133
4134
4135
4136 for (i = 0; i < adapter->num_rx_queues; i++) {
4137 rx_ring = adapter->rx_ring[i];
4138
4139 clear_ring_rsc_enabled(rx_ring);
4140 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4141 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4142
4143 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4144 set_ring_rsc_enabled(rx_ring);
4145
4146 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
4147 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4148
4149 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4150 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
4151 continue;
4152
4153 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
4154
4155#if (PAGE_SIZE < 8192)
4156 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
4157 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4158
4159 if (IXGBE_2K_TOO_SMALL_WITH_PADDING ||
4160 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
4161 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
4162#endif
4163 }
4164}
4165
4166static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
4167{
4168 struct ixgbe_hw *hw = &adapter->hw;
4169 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4170
4171 switch (hw->mac.type) {
4172 case ixgbe_mac_82598EB:
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4184 break;
4185 case ixgbe_mac_X550:
4186 case ixgbe_mac_X550EM_x:
4187 case ixgbe_mac_x550em_a:
4188 if (adapter->num_vfs)
4189 rdrxctl |= IXGBE_RDRXCTL_PSP;
4190
4191 case ixgbe_mac_82599EB:
4192 case ixgbe_mac_X540:
4193
4194 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4195 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4196 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4197
4198 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4199 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4200 break;
4201 default:
4202
4203 return;
4204 }
4205
4206 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4207}
4208
4209
4210
4211
4212
4213
4214
4215static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4216{
4217 struct ixgbe_hw *hw = &adapter->hw;
4218 int i;
4219 u32 rxctrl, rfctl;
4220
4221
4222 hw->mac.ops.disable_rx(hw);
4223
4224 ixgbe_setup_psrtype(adapter);
4225 ixgbe_setup_rdrxctl(adapter);
4226
4227
4228 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4229 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4230 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4231 rfctl |= IXGBE_RFCTL_RSC_DIS;
4232
4233
4234 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4235 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4236
4237
4238 ixgbe_setup_mrqc(adapter);
4239
4240
4241 ixgbe_set_rx_buffer_len(adapter);
4242
4243
4244
4245
4246
4247 for (i = 0; i < adapter->num_rx_queues; i++)
4248 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4249
4250 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4251
4252 if (hw->mac.type == ixgbe_mac_82598EB)
4253 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4254
4255
4256 rxctrl |= IXGBE_RXCTRL_RXEN;
4257 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4258}
4259
4260static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4261 __be16 proto, u16 vid)
4262{
4263 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4264 struct ixgbe_hw *hw = &adapter->hw;
4265
4266
4267 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4268 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4269
4270 set_bit(vid, adapter->active_vlans);
4271
4272 return 0;
4273}
4274
4275static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4276{
4277 u32 vlvf;
4278 int idx;
4279
4280
4281 if (vlan == 0)
4282 return 0;
4283
4284
4285 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4286 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4287 if ((vlvf & VLAN_VID_MASK) == vlan)
4288 break;
4289 }
4290
4291 return idx;
4292}
4293
4294void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4295{
4296 struct ixgbe_hw *hw = &adapter->hw;
4297 u32 bits, word;
4298 int idx;
4299
4300 idx = ixgbe_find_vlvf_entry(hw, vid);
4301 if (!idx)
4302 return;
4303
4304
4305
4306
4307 word = idx * 2 + (VMDQ_P(0) / 32);
4308 bits = ~BIT(VMDQ_P(0) % 32);
4309 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4310
4311
4312 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4313 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4314 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4315 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4316 }
4317}
4318
4319static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4320 __be16 proto, u16 vid)
4321{
4322 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4323 struct ixgbe_hw *hw = &adapter->hw;
4324
4325
4326 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4327 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4328
4329 clear_bit(vid, adapter->active_vlans);
4330
4331 return 0;
4332}
4333
4334
4335
4336
4337
4338static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4339{
4340 struct ixgbe_hw *hw = &adapter->hw;
4341 u32 vlnctrl;
4342 int i, j;
4343
4344 switch (hw->mac.type) {
4345 case ixgbe_mac_82598EB:
4346 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4347 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4348 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4349 break;
4350 case ixgbe_mac_82599EB:
4351 case ixgbe_mac_X540:
4352 case ixgbe_mac_X550:
4353 case ixgbe_mac_X550EM_x:
4354 case ixgbe_mac_x550em_a:
4355 for (i = 0; i < adapter->num_rx_queues; i++) {
4356 struct ixgbe_ring *ring = adapter->rx_ring[i];
4357
4358 if (ring->l2_accel_priv)
4359 continue;
4360 j = ring->reg_idx;
4361 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4362 vlnctrl &= ~IXGBE_RXDCTL_VME;
4363 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4364 }
4365 break;
4366 default:
4367 break;
4368 }
4369}
4370
4371
4372
4373
4374
4375static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4376{
4377 struct ixgbe_hw *hw = &adapter->hw;
4378 u32 vlnctrl;
4379 int i, j;
4380
4381 switch (hw->mac.type) {
4382 case ixgbe_mac_82598EB:
4383 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4384 vlnctrl |= IXGBE_VLNCTRL_VME;
4385 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4386 break;
4387 case ixgbe_mac_82599EB:
4388 case ixgbe_mac_X540:
4389 case ixgbe_mac_X550:
4390 case ixgbe_mac_X550EM_x:
4391 case ixgbe_mac_x550em_a:
4392 for (i = 0; i < adapter->num_rx_queues; i++) {
4393 struct ixgbe_ring *ring = adapter->rx_ring[i];
4394
4395 if (ring->l2_accel_priv)
4396 continue;
4397 j = ring->reg_idx;
4398 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4399 vlnctrl |= IXGBE_RXDCTL_VME;
4400 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4401 }
4402 break;
4403 default:
4404 break;
4405 }
4406}
4407
4408static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4409{
4410 struct ixgbe_hw *hw = &adapter->hw;
4411 u32 vlnctrl, i;
4412
4413 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4414
4415 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4416
4417 vlnctrl |= IXGBE_VLNCTRL_VFE;
4418 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4419 } else {
4420 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4421 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4422 return;
4423 }
4424
4425
4426 if (hw->mac.type == ixgbe_mac_82598EB)
4427 return;
4428
4429
4430 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4431 return;
4432
4433
4434 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4435
4436
4437 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4438 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4439 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4440
4441 vlvfb |= BIT(VMDQ_P(0) % 32);
4442 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4443 }
4444
4445
4446 for (i = hw->mac.vft_size; i--;)
4447 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4448}
4449
4450#define VFTA_BLOCK_SIZE 8
4451static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4452{
4453 struct ixgbe_hw *hw = &adapter->hw;
4454 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4455 u32 vid_start = vfta_offset * 32;
4456 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4457 u32 i, vid, word, bits;
4458
4459 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4460 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4461
4462
4463 vid = vlvf & VLAN_VID_MASK;
4464
4465
4466 if (vid < vid_start || vid >= vid_end)
4467 continue;
4468
4469 if (vlvf) {
4470
4471 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4472
4473
4474 if (test_bit(vid, adapter->active_vlans))
4475 continue;
4476 }
4477
4478
4479 word = i * 2 + VMDQ_P(0) / 32;
4480 bits = ~BIT(VMDQ_P(0) % 32);
4481 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4482 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4483 }
4484
4485
4486 for (i = VFTA_BLOCK_SIZE; i--;) {
4487 vid = (vfta_offset + i) * 32;
4488 word = vid / BITS_PER_LONG;
4489 bits = vid % BITS_PER_LONG;
4490
4491 vfta[i] |= adapter->active_vlans[word] >> bits;
4492
4493 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4494 }
4495}
4496
4497static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4498{
4499 struct ixgbe_hw *hw = &adapter->hw;
4500 u32 vlnctrl, i;
4501
4502
4503 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4504 vlnctrl |= IXGBE_VLNCTRL_VFE;
4505 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4506
4507 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4508 hw->mac.type == ixgbe_mac_82598EB)
4509 return;
4510
4511
4512 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4513 return;
4514
4515
4516 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4517
4518 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4519 ixgbe_scrub_vfta(adapter, i);
4520}
4521
4522static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4523{
4524 u16 vid = 1;
4525
4526 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4527
4528 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4529 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4530}
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4542{
4543 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4544 struct ixgbe_hw *hw = &adapter->hw;
4545
4546 if (!netif_running(netdev))
4547 return 0;
4548
4549 if (hw->mac.ops.update_mc_addr_list)
4550 hw->mac.ops.update_mc_addr_list(hw, netdev);
4551 else
4552 return -ENOMEM;
4553
4554#ifdef CONFIG_PCI_IOV
4555 ixgbe_restore_vf_multicasts(adapter);
4556#endif
4557
4558 return netdev_mc_count(netdev);
4559}
4560
4561#ifdef CONFIG_PCI_IOV
4562void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4563{
4564 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4565 struct ixgbe_hw *hw = &adapter->hw;
4566 int i;
4567
4568 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4569 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4570
4571 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4572 hw->mac.ops.set_rar(hw, i,
4573 mac_table->addr,
4574 mac_table->pool,
4575 IXGBE_RAH_AV);
4576 else
4577 hw->mac.ops.clear_rar(hw, i);
4578 }
4579}
4580
4581#endif
4582static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4583{
4584 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4585 struct ixgbe_hw *hw = &adapter->hw;
4586 int i;
4587
4588 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4589 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4590 continue;
4591
4592 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4593
4594 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4595 hw->mac.ops.set_rar(hw, i,
4596 mac_table->addr,
4597 mac_table->pool,
4598 IXGBE_RAH_AV);
4599 else
4600 hw->mac.ops.clear_rar(hw, i);
4601 }
4602}
4603
4604static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4605{
4606 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4607 struct ixgbe_hw *hw = &adapter->hw;
4608 int i;
4609
4610 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4611 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4612 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4613 }
4614
4615 ixgbe_sync_mac_table(adapter);
4616}
4617
4618static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4619{
4620 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4621 struct ixgbe_hw *hw = &adapter->hw;
4622 int i, count = 0;
4623
4624 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4625
4626 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4627 continue;
4628
4629
4630 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4631 if (mac_table->pool != pool)
4632 continue;
4633 }
4634
4635 count++;
4636 }
4637
4638 return count;
4639}
4640
4641
4642static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4643{
4644 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4645 struct ixgbe_hw *hw = &adapter->hw;
4646
4647 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4648 mac_table->pool = VMDQ_P(0);
4649
4650 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4651
4652 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4653 IXGBE_RAH_AV);
4654}
4655
4656int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4657 const u8 *addr, u16 pool)
4658{
4659 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4660 struct ixgbe_hw *hw = &adapter->hw;
4661 int i;
4662
4663 if (is_zero_ether_addr(addr))
4664 return -EINVAL;
4665
4666 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4667 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4668 continue;
4669
4670 ether_addr_copy(mac_table->addr, addr);
4671 mac_table->pool = pool;
4672
4673 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4674 IXGBE_MAC_STATE_IN_USE;
4675
4676 ixgbe_sync_mac_table(adapter);
4677
4678 return i;
4679 }
4680
4681 return -ENOMEM;
4682}
4683
4684int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4685 const u8 *addr, u16 pool)
4686{
4687 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4688 struct ixgbe_hw *hw = &adapter->hw;
4689 int i;
4690
4691 if (is_zero_ether_addr(addr))
4692 return -EINVAL;
4693
4694
4695 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4696
4697 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4698 continue;
4699
4700 if (mac_table->pool != pool)
4701 continue;
4702
4703 if (!ether_addr_equal(addr, mac_table->addr))
4704 continue;
4705
4706 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4707 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4708
4709 ixgbe_sync_mac_table(adapter);
4710
4711 return 0;
4712 }
4713
4714 return -ENOMEM;
4715}
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4726{
4727 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4728 int count = 0;
4729
4730
4731 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
4732 return -ENOMEM;
4733
4734 if (!netdev_uc_empty(netdev)) {
4735 struct netdev_hw_addr *ha;
4736 netdev_for_each_uc_addr(ha, netdev) {
4737 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4738 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4739 count++;
4740 }
4741 }
4742 return count;
4743}
4744
4745static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4746{
4747 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4748 int ret;
4749
4750 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4751
4752 return min_t(int, ret, 0);
4753}
4754
4755static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4756{
4757 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4758
4759 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4760
4761 return 0;
4762}
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773void ixgbe_set_rx_mode(struct net_device *netdev)
4774{
4775 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4776 struct ixgbe_hw *hw = &adapter->hw;
4777 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4778 netdev_features_t features = netdev->features;
4779 int count;
4780
4781
4782 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4783
4784
4785 fctrl &= ~IXGBE_FCTRL_SBP;
4786 fctrl |= IXGBE_FCTRL_BAM;
4787 fctrl |= IXGBE_FCTRL_DPF;
4788 fctrl |= IXGBE_FCTRL_PMCF;
4789
4790
4791 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4792 if (netdev->flags & IFF_PROMISC) {
4793 hw->addr_ctrl.user_set_promisc = true;
4794 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4795 vmolr |= IXGBE_VMOLR_MPE;
4796 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4797 } else {
4798 if (netdev->flags & IFF_ALLMULTI) {
4799 fctrl |= IXGBE_FCTRL_MPE;
4800 vmolr |= IXGBE_VMOLR_MPE;
4801 }
4802 hw->addr_ctrl.user_set_promisc = false;
4803 }
4804
4805
4806
4807
4808
4809
4810 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4811 fctrl |= IXGBE_FCTRL_UPE;
4812 vmolr |= IXGBE_VMOLR_ROPE;
4813 }
4814
4815
4816
4817
4818
4819 count = ixgbe_write_mc_addr_list(netdev);
4820 if (count < 0) {
4821 fctrl |= IXGBE_FCTRL_MPE;
4822 vmolr |= IXGBE_VMOLR_MPE;
4823 } else if (count) {
4824 vmolr |= IXGBE_VMOLR_ROMPE;
4825 }
4826
4827 if (hw->mac.type != ixgbe_mac_82598EB) {
4828 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4829 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4830 IXGBE_VMOLR_ROPE);
4831 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4832 }
4833
4834
4835 if (features & NETIF_F_RXALL) {
4836
4837
4838 fctrl |= (IXGBE_FCTRL_SBP |
4839 IXGBE_FCTRL_BAM |
4840 IXGBE_FCTRL_PMCF);
4841
4842 fctrl &= ~(IXGBE_FCTRL_DPF);
4843
4844 }
4845
4846 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4847
4848 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4849 ixgbe_vlan_strip_enable(adapter);
4850 else
4851 ixgbe_vlan_strip_disable(adapter);
4852
4853 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4854 ixgbe_vlan_promisc_disable(adapter);
4855 else
4856 ixgbe_vlan_promisc_enable(adapter);
4857}
4858
4859static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4860{
4861 int q_idx;
4862
4863 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4864 napi_enable(&adapter->q_vector[q_idx]->napi);
4865}
4866
4867static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4868{
4869 int q_idx;
4870
4871 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4872 napi_disable(&adapter->q_vector[q_idx]->napi);
4873}
4874
4875static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
4876{
4877 struct ixgbe_hw *hw = &adapter->hw;
4878 u32 vxlanctrl;
4879
4880 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
4881 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
4882 return;
4883
4884 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
4885 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
4886
4887 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
4888 adapter->vxlan_port = 0;
4889
4890 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
4891 adapter->geneve_port = 0;
4892}
4893
4894#ifdef CONFIG_IXGBE_DCB
4895
4896
4897
4898
4899
4900
4901
4902
4903static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
4904{
4905 struct ixgbe_hw *hw = &adapter->hw;
4906 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4907
4908 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
4909 if (hw->mac.type == ixgbe_mac_82598EB)
4910 netif_set_gso_max_size(adapter->netdev, 65536);
4911 return;
4912 }
4913
4914 if (hw->mac.type == ixgbe_mac_82598EB)
4915 netif_set_gso_max_size(adapter->netdev, 32768);
4916
4917#ifdef IXGBE_FCOE
4918 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
4919 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
4920#endif
4921
4922
4923 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
4924 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4925 DCB_TX_CONFIG);
4926 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4927 DCB_RX_CONFIG);
4928 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
4929 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
4930 ixgbe_dcb_hw_ets(&adapter->hw,
4931 adapter->ixgbe_ieee_ets,
4932 max_frame);
4933 ixgbe_dcb_hw_pfc_config(&adapter->hw,
4934 adapter->ixgbe_ieee_pfc->pfc_en,
4935 adapter->ixgbe_ieee_ets->prio_tc);
4936 }
4937
4938
4939 if (hw->mac.type != ixgbe_mac_82598EB) {
4940 u32 msb = 0;
4941 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
4942
4943 while (rss_i) {
4944 msb++;
4945 rss_i >>= 1;
4946 }
4947
4948
4949 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
4950 }
4951}
4952#endif
4953
4954
4955#define IXGBE_ETH_FRAMING 20
4956
4957
4958
4959
4960
4961
4962
4963static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4964{
4965 struct ixgbe_hw *hw = &adapter->hw;
4966 struct net_device *dev = adapter->netdev;
4967 int link, tc, kb, marker;
4968 u32 dv_id, rx_pba;
4969
4970
4971 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
4972
4973#ifdef IXGBE_FCOE
4974
4975 if ((dev->features & NETIF_F_FCOE_MTU) &&
4976 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4977 (pb == ixgbe_fcoe_get_tc(adapter)))
4978 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4979#endif
4980
4981
4982 switch (hw->mac.type) {
4983 case ixgbe_mac_X540:
4984 case ixgbe_mac_X550:
4985 case ixgbe_mac_X550EM_x:
4986 case ixgbe_mac_x550em_a:
4987 dv_id = IXGBE_DV_X540(link, tc);
4988 break;
4989 default:
4990 dv_id = IXGBE_DV(link, tc);
4991 break;
4992 }
4993
4994
4995 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4996 dv_id += IXGBE_B2BT(tc);
4997
4998
4999 kb = IXGBE_BT2KB(dv_id);
5000 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
5001
5002 marker = rx_pba - kb;
5003
5004
5005
5006
5007
5008 if (marker < 0) {
5009 e_warn(drv, "Packet Buffer(%i) can not provide enough"
5010 "headroom to support flow control."
5011 "Decrease MTU or number of traffic classes\n", pb);
5012 marker = tc + 1;
5013 }
5014
5015 return marker;
5016}
5017
5018
5019
5020
5021
5022
5023
5024static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
5025{
5026 struct ixgbe_hw *hw = &adapter->hw;
5027 struct net_device *dev = adapter->netdev;
5028 int tc;
5029 u32 dv_id;
5030
5031
5032 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
5033
5034#ifdef IXGBE_FCOE
5035
5036 if ((dev->features & NETIF_F_FCOE_MTU) &&
5037 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
5038 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
5039 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
5040#endif
5041
5042
5043 switch (hw->mac.type) {
5044 case ixgbe_mac_X540:
5045 case ixgbe_mac_X550:
5046 case ixgbe_mac_X550EM_x:
5047 case ixgbe_mac_x550em_a:
5048 dv_id = IXGBE_LOW_DV_X540(tc);
5049 break;
5050 default:
5051 dv_id = IXGBE_LOW_DV(tc);
5052 break;
5053 }
5054
5055
5056 return IXGBE_BT2KB(dv_id);
5057}
5058
5059
5060
5061
5062static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
5063{
5064 struct ixgbe_hw *hw = &adapter->hw;
5065 int num_tc = netdev_get_num_tc(adapter->netdev);
5066 int i;
5067
5068 if (!num_tc)
5069 num_tc = 1;
5070
5071 for (i = 0; i < num_tc; i++) {
5072 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
5073 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
5074
5075
5076 if (hw->fc.low_water[i] > hw->fc.high_water[i])
5077 hw->fc.low_water[i] = 0;
5078 }
5079
5080 for (; i < MAX_TRAFFIC_CLASS; i++)
5081 hw->fc.high_water[i] = 0;
5082}
5083
5084static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
5085{
5086 struct ixgbe_hw *hw = &adapter->hw;
5087 int hdrm;
5088 u8 tc = netdev_get_num_tc(adapter->netdev);
5089
5090 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5091 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5092 hdrm = 32 << adapter->fdir_pballoc;
5093 else
5094 hdrm = 0;
5095
5096 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
5097 ixgbe_pbthresh_setup(adapter);
5098}
5099
5100static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
5101{
5102 struct ixgbe_hw *hw = &adapter->hw;
5103 struct hlist_node *node2;
5104 struct ixgbe_fdir_filter *filter;
5105
5106 spin_lock(&adapter->fdir_perfect_lock);
5107
5108 if (!hlist_empty(&adapter->fdir_filter_list))
5109 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
5110
5111 hlist_for_each_entry_safe(filter, node2,
5112 &adapter->fdir_filter_list, fdir_node) {
5113 ixgbe_fdir_write_perfect_filter_82599(hw,
5114 &filter->filter,
5115 filter->sw_idx,
5116 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
5117 IXGBE_FDIR_DROP_QUEUE :
5118 adapter->rx_ring[filter->action]->reg_idx);
5119 }
5120
5121 spin_unlock(&adapter->fdir_perfect_lock);
5122}
5123
5124static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
5125 struct ixgbe_adapter *adapter)
5126{
5127 struct ixgbe_hw *hw = &adapter->hw;
5128 u32 vmolr;
5129
5130
5131 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
5132 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
5133
5134
5135 vmolr &= ~IXGBE_VMOLR_MPE;
5136
5137 if (dev->flags & IFF_ALLMULTI) {
5138 vmolr |= IXGBE_VMOLR_MPE;
5139 } else {
5140 vmolr |= IXGBE_VMOLR_ROMPE;
5141 hw->mac.ops.update_mc_addr_list(hw, dev);
5142 }
5143 ixgbe_write_uc_addr_list(adapter->netdev, pool);
5144 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
5145}
5146
5147static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
5148{
5149 struct ixgbe_adapter *adapter = vadapter->real_adapter;
5150 int rss_i = adapter->num_rx_queues_per_pool;
5151 struct ixgbe_hw *hw = &adapter->hw;
5152 u16 pool = vadapter->pool;
5153 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
5154 IXGBE_PSRTYPE_UDPHDR |
5155 IXGBE_PSRTYPE_IPV4HDR |
5156 IXGBE_PSRTYPE_L2HDR |
5157 IXGBE_PSRTYPE_IPV6HDR;
5158
5159 if (hw->mac.type == ixgbe_mac_82598EB)
5160 return;
5161
5162 if (rss_i > 3)
5163 psrtype |= 2u << 29;
5164 else if (rss_i > 1)
5165 psrtype |= 1u << 29;
5166
5167 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
5168}
5169
5170
5171
5172
5173
5174static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
5175{
5176 u16 i = rx_ring->next_to_clean;
5177 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
5178
5179
5180 while (i != rx_ring->next_to_alloc) {
5181 if (rx_buffer->skb) {
5182 struct sk_buff *skb = rx_buffer->skb;
5183 if (IXGBE_CB(skb)->page_released)
5184 dma_unmap_page_attrs(rx_ring->dev,
5185 IXGBE_CB(skb)->dma,
5186 ixgbe_rx_pg_size(rx_ring),
5187 DMA_FROM_DEVICE,
5188 IXGBE_RX_DMA_ATTR);
5189 dev_kfree_skb(skb);
5190 }
5191
5192
5193
5194
5195 dma_sync_single_range_for_cpu(rx_ring->dev,
5196 rx_buffer->dma,
5197 rx_buffer->page_offset,
5198 ixgbe_rx_bufsz(rx_ring),
5199 DMA_FROM_DEVICE);
5200
5201
5202 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5203 ixgbe_rx_pg_size(rx_ring),
5204 DMA_FROM_DEVICE,
5205 IXGBE_RX_DMA_ATTR);
5206 __page_frag_cache_drain(rx_buffer->page,
5207 rx_buffer->pagecnt_bias);
5208
5209 i++;
5210 rx_buffer++;
5211 if (i == rx_ring->count) {
5212 i = 0;
5213 rx_buffer = rx_ring->rx_buffer_info;
5214 }
5215 }
5216
5217 rx_ring->next_to_alloc = 0;
5218 rx_ring->next_to_clean = 0;
5219 rx_ring->next_to_use = 0;
5220}
5221
5222static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
5223 struct ixgbe_ring *rx_ring)
5224{
5225 struct ixgbe_adapter *adapter = vadapter->real_adapter;
5226 int index = rx_ring->queue_index + vadapter->rx_base_queue;
5227
5228
5229 ixgbe_disable_rx_queue(adapter, rx_ring);
5230 usleep_range(10000, 20000);
5231 ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
5232 ixgbe_clean_rx_ring(rx_ring);
5233 rx_ring->l2_accel_priv = NULL;
5234}
5235
5236static int ixgbe_fwd_ring_down(struct net_device *vdev,
5237 struct ixgbe_fwd_adapter *accel)
5238{
5239 struct ixgbe_adapter *adapter = accel->real_adapter;
5240 unsigned int rxbase = accel->rx_base_queue;
5241 unsigned int txbase = accel->tx_base_queue;
5242 int i;
5243
5244 netif_tx_stop_all_queues(vdev);
5245
5246 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5247 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
5248 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
5249 }
5250
5251 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5252 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
5253 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
5254 }
5255
5256
5257 return 0;
5258}
5259
5260static int ixgbe_fwd_ring_up(struct net_device *vdev,
5261 struct ixgbe_fwd_adapter *accel)
5262{
5263 struct ixgbe_adapter *adapter = accel->real_adapter;
5264 unsigned int rxbase, txbase, queues;
5265 int i, baseq, err = 0;
5266
5267 if (!test_bit(accel->pool, &adapter->fwd_bitmask))
5268 return 0;
5269
5270 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5271 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
5272 accel->pool, adapter->num_rx_pools,
5273 baseq, baseq + adapter->num_rx_queues_per_pool,
5274 adapter->fwd_bitmask);
5275
5276 accel->netdev = vdev;
5277 accel->rx_base_queue = rxbase = baseq;
5278 accel->tx_base_queue = txbase = baseq;
5279
5280 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5281 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
5282
5283 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5284 adapter->rx_ring[rxbase + i]->netdev = vdev;
5285 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
5286 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
5287 }
5288
5289 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5290 adapter->tx_ring[txbase + i]->netdev = vdev;
5291 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
5292 }
5293
5294 queues = min_t(unsigned int,
5295 adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
5296 err = netif_set_real_num_tx_queues(vdev, queues);
5297 if (err)
5298 goto fwd_queue_err;
5299
5300 err = netif_set_real_num_rx_queues(vdev, queues);
5301 if (err)
5302 goto fwd_queue_err;
5303
5304 if (is_valid_ether_addr(vdev->dev_addr))
5305 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
5306
5307 ixgbe_fwd_psrtype(accel);
5308 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
5309 return err;
5310fwd_queue_err:
5311 ixgbe_fwd_ring_down(vdev, accel);
5312 return err;
5313}
5314
5315static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
5316{
5317 if (netif_is_macvlan(upper)) {
5318 struct macvlan_dev *dfwd = netdev_priv(upper);
5319 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
5320
5321 if (dfwd->fwd_priv)
5322 ixgbe_fwd_ring_up(upper, vadapter);
5323 }
5324
5325 return 0;
5326}
5327
5328static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5329{
5330 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5331 ixgbe_upper_dev_walk, NULL);
5332}
5333
5334static void ixgbe_configure(struct ixgbe_adapter *adapter)
5335{
5336 struct ixgbe_hw *hw = &adapter->hw;
5337
5338 ixgbe_configure_pb(adapter);
5339#ifdef CONFIG_IXGBE_DCB
5340 ixgbe_configure_dcb(adapter);
5341#endif
5342
5343
5344
5345
5346 ixgbe_configure_virtualization(adapter);
5347
5348 ixgbe_set_rx_mode(adapter->netdev);
5349 ixgbe_restore_vlan(adapter);
5350
5351 switch (hw->mac.type) {
5352 case ixgbe_mac_82599EB:
5353 case ixgbe_mac_X540:
5354 hw->mac.ops.disable_rx_buff(hw);
5355 break;
5356 default:
5357 break;
5358 }
5359
5360 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5361 ixgbe_init_fdir_signature_82599(&adapter->hw,
5362 adapter->fdir_pballoc);
5363 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5364 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5365 adapter->fdir_pballoc);
5366 ixgbe_fdir_filter_restore(adapter);
5367 }
5368
5369 switch (hw->mac.type) {
5370 case ixgbe_mac_82599EB:
5371 case ixgbe_mac_X540:
5372 hw->mac.ops.enable_rx_buff(hw);
5373 break;
5374 default:
5375 break;
5376 }
5377
5378#ifdef CONFIG_IXGBE_DCA
5379
5380 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5381 ixgbe_setup_dca(adapter);
5382#endif
5383
5384#ifdef IXGBE_FCOE
5385
5386 ixgbe_configure_fcoe(adapter);
5387
5388#endif
5389 ixgbe_configure_tx(adapter);
5390 ixgbe_configure_rx(adapter);
5391 ixgbe_configure_dfwd(adapter);
5392}
5393
5394
5395
5396
5397
5398static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5399{
5400
5401
5402
5403
5404
5405
5406 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5407 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5408
5409 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5410 adapter->sfp_poll_time = 0;
5411}
5412
5413
5414
5415
5416
5417
5418
5419static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5420{
5421 u32 speed;
5422 bool autoneg, link_up = false;
5423 int ret = IXGBE_ERR_LINK_SETUP;
5424
5425 if (hw->mac.ops.check_link)
5426 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5427
5428 if (ret)
5429 return ret;
5430
5431 speed = hw->phy.autoneg_advertised;
5432 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5433 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5434 &autoneg);
5435 if (ret)
5436 return ret;
5437
5438 if (hw->mac.ops.setup_link)
5439 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5440
5441 return ret;
5442}
5443
5444static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5445{
5446 struct ixgbe_hw *hw = &adapter->hw;
5447 u32 gpie = 0;
5448
5449 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5450 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5451 IXGBE_GPIE_OCD;
5452 gpie |= IXGBE_GPIE_EIAME;
5453
5454
5455
5456
5457 switch (hw->mac.type) {
5458 case ixgbe_mac_82598EB:
5459 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5460 break;
5461 case ixgbe_mac_82599EB:
5462 case ixgbe_mac_X540:
5463 case ixgbe_mac_X550:
5464 case ixgbe_mac_X550EM_x:
5465 case ixgbe_mac_x550em_a:
5466 default:
5467 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5468 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5469 break;
5470 }
5471 } else {
5472
5473
5474 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5475 }
5476
5477
5478
5479
5480 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5481 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5482
5483 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5484 case IXGBE_82599_VMDQ_8Q_MASK:
5485 gpie |= IXGBE_GPIE_VTMODE_16;
5486 break;
5487 case IXGBE_82599_VMDQ_4Q_MASK:
5488 gpie |= IXGBE_GPIE_VTMODE_32;
5489 break;
5490 default:
5491 gpie |= IXGBE_GPIE_VTMODE_64;
5492 break;
5493 }
5494 }
5495
5496
5497 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5498 switch (adapter->hw.mac.type) {
5499 case ixgbe_mac_82599EB:
5500 gpie |= IXGBE_SDP0_GPIEN_8259X;
5501 break;
5502 default:
5503 break;
5504 }
5505 }
5506
5507
5508 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5509 gpie |= IXGBE_SDP1_GPIEN(hw);
5510
5511 switch (hw->mac.type) {
5512 case ixgbe_mac_82599EB:
5513 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5514 break;
5515 case ixgbe_mac_X550EM_x:
5516 case ixgbe_mac_x550em_a:
5517 gpie |= IXGBE_SDP0_GPIEN_X540;
5518 break;
5519 default:
5520 break;
5521 }
5522
5523 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5524}
5525
5526static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5527{
5528 struct ixgbe_hw *hw = &adapter->hw;
5529 int err;
5530 u32 ctrl_ext;
5531
5532 ixgbe_get_hw_control(adapter);
5533 ixgbe_setup_gpie(adapter);
5534
5535 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5536 ixgbe_configure_msix(adapter);
5537 else
5538 ixgbe_configure_msi_and_legacy(adapter);
5539
5540
5541 if (hw->mac.ops.enable_tx_laser)
5542 hw->mac.ops.enable_tx_laser(hw);
5543
5544 if (hw->phy.ops.set_phy_power)
5545 hw->phy.ops.set_phy_power(hw, true);
5546
5547 smp_mb__before_atomic();
5548 clear_bit(__IXGBE_DOWN, &adapter->state);
5549 ixgbe_napi_enable_all(adapter);
5550
5551 if (ixgbe_is_sfp(hw)) {
5552 ixgbe_sfp_link_config(adapter);
5553 } else {
5554 err = ixgbe_non_sfp_link_config(hw);
5555 if (err)
5556 e_err(probe, "link_config FAILED %d\n", err);
5557 }
5558
5559
5560 IXGBE_READ_REG(hw, IXGBE_EICR);
5561 ixgbe_irq_enable(adapter, true, true);
5562
5563
5564
5565
5566
5567 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5568 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5569 if (esdp & IXGBE_ESDP_SDP1)
5570 e_crit(drv, "Fan has stopped, replace the adapter\n");
5571 }
5572
5573
5574
5575 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5576 adapter->link_check_timeout = jiffies;
5577 mod_timer(&adapter->service_timer, jiffies);
5578
5579
5580 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5581 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5582 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5583}
5584
5585void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5586{
5587 WARN_ON(in_interrupt());
5588
5589 netif_trans_update(adapter->netdev);
5590
5591 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5592 usleep_range(1000, 2000);
5593 if (adapter->hw.phy.type == ixgbe_phy_fw)
5594 ixgbe_watchdog_link_is_down(adapter);
5595 ixgbe_down(adapter);
5596
5597
5598
5599
5600
5601
5602 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5603 msleep(2000);
5604 ixgbe_up(adapter);
5605 clear_bit(__IXGBE_RESETTING, &adapter->state);
5606}
5607
5608void ixgbe_up(struct ixgbe_adapter *adapter)
5609{
5610
5611 ixgbe_configure(adapter);
5612
5613 ixgbe_up_complete(adapter);
5614}
5615
5616void ixgbe_reset(struct ixgbe_adapter *adapter)
5617{
5618 struct ixgbe_hw *hw = &adapter->hw;
5619 struct net_device *netdev = adapter->netdev;
5620 int err;
5621
5622 if (ixgbe_removed(hw->hw_addr))
5623 return;
5624
5625 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5626 usleep_range(1000, 2000);
5627
5628
5629 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5630 IXGBE_FLAG2_SFP_NEEDS_RESET);
5631 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5632
5633 err = hw->mac.ops.init_hw(hw);
5634 switch (err) {
5635 case 0:
5636 case IXGBE_ERR_SFP_NOT_PRESENT:
5637 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5638 break;
5639 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5640 e_dev_err("master disable timed out\n");
5641 break;
5642 case IXGBE_ERR_EEPROM_VERSION:
5643
5644 e_dev_warn("This device is a pre-production adapter/LOM. "
5645 "Please be aware there may be issues associated with "
5646 "your hardware. If you are experiencing problems "
5647 "please contact your Intel or hardware "
5648 "representative who provided you with this "
5649 "hardware.\n");
5650 break;
5651 default:
5652 e_dev_err("Hardware Error: %d\n", err);
5653 }
5654
5655 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5656
5657
5658 ixgbe_flush_sw_mac_table(adapter);
5659 __dev_uc_unsync(netdev, NULL);
5660
5661
5662 ixgbe_mac_set_default_filter(adapter);
5663
5664
5665 if (hw->mac.san_mac_rar_index)
5666 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5667
5668 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5669 ixgbe_ptp_reset(adapter);
5670
5671 if (hw->phy.ops.set_phy_power) {
5672 if (!netif_running(adapter->netdev) && !adapter->wol)
5673 hw->phy.ops.set_phy_power(hw, false);
5674 else
5675 hw->phy.ops.set_phy_power(hw, true);
5676 }
5677}
5678
5679
5680
5681
5682
5683static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5684{
5685 u16 i = tx_ring->next_to_clean;
5686 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5687
5688 while (i != tx_ring->next_to_use) {
5689 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5690
5691
5692 if (ring_is_xdp(tx_ring))
5693 page_frag_free(tx_buffer->data);
5694 else
5695 dev_kfree_skb_any(tx_buffer->skb);
5696
5697
5698 dma_unmap_single(tx_ring->dev,
5699 dma_unmap_addr(tx_buffer, dma),
5700 dma_unmap_len(tx_buffer, len),
5701 DMA_TO_DEVICE);
5702
5703
5704 eop_desc = tx_buffer->next_to_watch;
5705 tx_desc = IXGBE_TX_DESC(tx_ring, i);
5706
5707
5708 while (tx_desc != eop_desc) {
5709 tx_buffer++;
5710 tx_desc++;
5711 i++;
5712 if (unlikely(i == tx_ring->count)) {
5713 i = 0;
5714 tx_buffer = tx_ring->tx_buffer_info;
5715 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
5716 }
5717
5718
5719 if (dma_unmap_len(tx_buffer, len))
5720 dma_unmap_page(tx_ring->dev,
5721 dma_unmap_addr(tx_buffer, dma),
5722 dma_unmap_len(tx_buffer, len),
5723 DMA_TO_DEVICE);
5724 }
5725
5726
5727 tx_buffer++;
5728 i++;
5729 if (unlikely(i == tx_ring->count)) {
5730 i = 0;
5731 tx_buffer = tx_ring->tx_buffer_info;
5732 }
5733 }
5734
5735
5736 if (!ring_is_xdp(tx_ring))
5737 netdev_tx_reset_queue(txring_txq(tx_ring));
5738
5739
5740 tx_ring->next_to_use = 0;
5741 tx_ring->next_to_clean = 0;
5742}
5743
5744
5745
5746
5747
5748static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5749{
5750 int i;
5751
5752 for (i = 0; i < adapter->num_rx_queues; i++)
5753 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5754}
5755
5756
5757
5758
5759
5760static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5761{
5762 int i;
5763
5764 for (i = 0; i < adapter->num_tx_queues; i++)
5765 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5766 for (i = 0; i < adapter->num_xdp_queues; i++)
5767 ixgbe_clean_tx_ring(adapter->xdp_ring[i]);
5768}
5769
5770static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5771{
5772 struct hlist_node *node2;
5773 struct ixgbe_fdir_filter *filter;
5774
5775 spin_lock(&adapter->fdir_perfect_lock);
5776
5777 hlist_for_each_entry_safe(filter, node2,
5778 &adapter->fdir_filter_list, fdir_node) {
5779 hlist_del(&filter->fdir_node);
5780 kfree(filter);
5781 }
5782 adapter->fdir_filter_count = 0;
5783
5784 spin_unlock(&adapter->fdir_perfect_lock);
5785}
5786
5787static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
5788{
5789 if (netif_is_macvlan(upper)) {
5790 struct macvlan_dev *vlan = netdev_priv(upper);
5791
5792 if (vlan->fwd_priv) {
5793 netif_tx_stop_all_queues(upper);
5794 netif_carrier_off(upper);
5795 netif_tx_disable(upper);
5796 }
5797 }
5798
5799 return 0;
5800}
5801
5802void ixgbe_down(struct ixgbe_adapter *adapter)
5803{
5804 struct net_device *netdev = adapter->netdev;
5805 struct ixgbe_hw *hw = &adapter->hw;
5806 int i;
5807
5808
5809 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5810 return;
5811
5812
5813 hw->mac.ops.disable_rx(hw);
5814
5815
5816 for (i = 0; i < adapter->num_rx_queues; i++)
5817
5818 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5819
5820 usleep_range(10000, 20000);
5821
5822
5823 if (adapter->xdp_ring[0])
5824 synchronize_sched();
5825 netif_tx_stop_all_queues(netdev);
5826
5827
5828 netif_carrier_off(netdev);
5829 netif_tx_disable(netdev);
5830
5831
5832 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5833 ixgbe_disable_macvlan, NULL);
5834
5835 ixgbe_irq_disable(adapter);
5836
5837 ixgbe_napi_disable_all(adapter);
5838
5839 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
5840 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5841 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5842
5843 del_timer_sync(&adapter->service_timer);
5844
5845 if (adapter->num_vfs) {
5846
5847 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5848
5849
5850 for (i = 0 ; i < adapter->num_vfs; i++)
5851 adapter->vfinfo[i].clear_to_send = false;
5852
5853
5854 ixgbe_ping_all_vfs(adapter);
5855
5856
5857 ixgbe_disable_tx_rx(adapter);
5858 }
5859
5860
5861 for (i = 0; i < adapter->num_tx_queues; i++) {
5862 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5863 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5864 }
5865 for (i = 0; i < adapter->num_xdp_queues; i++) {
5866 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
5867
5868 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5869 }
5870
5871
5872 switch (hw->mac.type) {
5873 case ixgbe_mac_82599EB:
5874 case ixgbe_mac_X540:
5875 case ixgbe_mac_X550:
5876 case ixgbe_mac_X550EM_x:
5877 case ixgbe_mac_x550em_a:
5878 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5879 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5880 ~IXGBE_DMATXCTL_TE));
5881 break;
5882 default:
5883 break;
5884 }
5885
5886 if (!pci_channel_offline(adapter->pdev))
5887 ixgbe_reset(adapter);
5888
5889
5890 if (hw->mac.ops.disable_tx_laser)
5891 hw->mac.ops.disable_tx_laser(hw);
5892
5893 ixgbe_clean_all_tx_rings(adapter);
5894 ixgbe_clean_all_rx_rings(adapter);
5895}
5896
5897
5898
5899
5900
5901static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
5902{
5903 struct ixgbe_hw *hw = &adapter->hw;
5904
5905 switch (hw->device_id) {
5906 case IXGBE_DEV_ID_X550EM_A_1G_T:
5907 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
5908 if (!hw->phy.eee_speeds_supported)
5909 break;
5910 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
5911 if (!hw->phy.eee_speeds_advertised)
5912 break;
5913 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
5914 break;
5915 default:
5916 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
5917 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
5918 break;
5919 }
5920}
5921
5922
5923
5924
5925
5926static void ixgbe_tx_timeout(struct net_device *netdev)
5927{
5928 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5929
5930
5931 ixgbe_tx_timeout_reset(adapter);
5932}
5933
5934#ifdef CONFIG_IXGBE_DCB
5935static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
5936{
5937 struct ixgbe_hw *hw = &adapter->hw;
5938 struct tc_configuration *tc;
5939 int j;
5940
5941 switch (hw->mac.type) {
5942 case ixgbe_mac_82598EB:
5943 case ixgbe_mac_82599EB:
5944 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
5945 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
5946 break;
5947 case ixgbe_mac_X540:
5948 case ixgbe_mac_X550:
5949 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5950 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5951 break;
5952 case ixgbe_mac_X550EM_x:
5953 case ixgbe_mac_x550em_a:
5954 default:
5955 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
5956 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
5957 break;
5958 }
5959
5960
5961 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5962 tc = &adapter->dcb_cfg.tc_config[j];
5963 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5964 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5965 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5966 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5967 tc->dcb_pfc = pfc_disabled;
5968 }
5969
5970
5971 tc = &adapter->dcb_cfg.tc_config[0];
5972 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5973 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5974
5975 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5976 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5977 adapter->dcb_cfg.pfc_mode_enable = false;
5978 adapter->dcb_set_bitmap = 0x00;
5979 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
5980 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5981 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5982 sizeof(adapter->temp_dcb_cfg));
5983}
5984#endif
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
5995 const struct ixgbe_info *ii)
5996{
5997 struct ixgbe_hw *hw = &adapter->hw;
5998 struct pci_dev *pdev = adapter->pdev;
5999 unsigned int rss, fdir;
6000 u32 fwsm;
6001 int i;
6002
6003
6004
6005 hw->vendor_id = pdev->vendor;
6006 hw->device_id = pdev->device;
6007 hw->revision_id = pdev->revision;
6008 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6009 hw->subsystem_device_id = pdev->subsystem_device;
6010
6011
6012 ii->get_invariants(hw);
6013
6014
6015 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
6016 adapter->ring_feature[RING_F_RSS].limit = rss;
6017 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
6018 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
6019 adapter->atr_sample_rate = 20;
6020 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
6021 adapter->ring_feature[RING_F_FDIR].limit = fdir;
6022 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
6023#ifdef CONFIG_IXGBE_DCA
6024 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
6025#endif
6026#ifdef CONFIG_IXGBE_DCB
6027 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
6028 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6029#endif
6030#ifdef IXGBE_FCOE
6031 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
6032 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6033#ifdef CONFIG_IXGBE_DCB
6034
6035 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
6036#endif
6037#endif
6038
6039
6040 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
6041 GFP_KERNEL);
6042 if (!adapter->jump_tables[0])
6043 return -ENOMEM;
6044 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
6045
6046 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
6047 adapter->jump_tables[i] = NULL;
6048
6049 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
6050 hw->mac.num_rar_entries,
6051 GFP_ATOMIC);
6052 if (!adapter->mac_table)
6053 return -ENOMEM;
6054
6055 if (ixgbe_init_rss_key(adapter))
6056 return -ENOMEM;
6057
6058
6059 switch (hw->mac.type) {
6060 case ixgbe_mac_82598EB:
6061 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
6062
6063 if (hw->device_id == IXGBE_DEV_ID_82598AT)
6064 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
6065
6066 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
6067 adapter->ring_feature[RING_F_FDIR].limit = 0;
6068 adapter->atr_sample_rate = 0;
6069 adapter->fdir_pballoc = 0;
6070#ifdef IXGBE_FCOE
6071 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6072 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
6073#ifdef CONFIG_IXGBE_DCB
6074 adapter->fcoe.up = 0;
6075#endif
6076#endif
6077 break;
6078 case ixgbe_mac_82599EB:
6079 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
6080 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6081 break;
6082 case ixgbe_mac_X540:
6083 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
6084 if (fwsm & IXGBE_FWSM_TS_ENABLED)
6085 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6086 break;
6087 case ixgbe_mac_x550em_a:
6088 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
6089 switch (hw->device_id) {
6090 case IXGBE_DEV_ID_X550EM_A_1G_T:
6091 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
6092 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6093 break;
6094 default:
6095 break;
6096 }
6097
6098 case ixgbe_mac_X550EM_x:
6099#ifdef CONFIG_IXGBE_DCB
6100 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
6101#endif
6102#ifdef IXGBE_FCOE
6103 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6104#ifdef CONFIG_IXGBE_DCB
6105 adapter->fcoe.up = 0;
6106#endif
6107#endif
6108
6109 case ixgbe_mac_X550:
6110 if (hw->mac.type == ixgbe_mac_X550)
6111 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
6112#ifdef CONFIG_IXGBE_DCA
6113 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
6114#endif
6115 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
6116 break;
6117 default:
6118 break;
6119 }
6120
6121#ifdef IXGBE_FCOE
6122
6123 spin_lock_init(&adapter->fcoe.lock);
6124
6125#endif
6126
6127 spin_lock_init(&adapter->fdir_perfect_lock);
6128
6129#ifdef CONFIG_IXGBE_DCB
6130 ixgbe_init_dcb(adapter);
6131#endif
6132
6133
6134 hw->fc.requested_mode = ixgbe_fc_full;
6135 hw->fc.current_mode = ixgbe_fc_full;
6136 ixgbe_pbthresh_setup(adapter);
6137 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
6138 hw->fc.send_xon = true;
6139 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
6140
6141#ifdef CONFIG_PCI_IOV
6142 if (max_vfs > 0)
6143 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
6144
6145
6146 if (hw->mac.type != ixgbe_mac_82598EB) {
6147 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
6148 max_vfs = 0;
6149 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
6150 }
6151 }
6152#endif
6153
6154
6155 adapter->rx_itr_setting = 1;
6156 adapter->tx_itr_setting = 1;
6157
6158
6159 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
6160 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
6161
6162
6163 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
6164
6165
6166 if (ixgbe_init_eeprom_params_generic(hw)) {
6167 e_dev_err("EEPROM initialization failed\n");
6168 return -EIO;
6169 }
6170
6171
6172 set_bit(0, &adapter->fwd_bitmask);
6173 set_bit(__IXGBE_DOWN, &adapter->state);
6174
6175 return 0;
6176}
6177
6178
6179
6180
6181
6182
6183
6184int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
6185{
6186 struct device *dev = tx_ring->dev;
6187 int orig_node = dev_to_node(dev);
6188 int ring_node = -1;
6189 int size;
6190
6191 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
6192
6193 if (tx_ring->q_vector)
6194 ring_node = tx_ring->q_vector->numa_node;
6195
6196 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
6197 if (!tx_ring->tx_buffer_info)
6198 tx_ring->tx_buffer_info = vmalloc(size);
6199 if (!tx_ring->tx_buffer_info)
6200 goto err;
6201
6202
6203 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6204 tx_ring->size = ALIGN(tx_ring->size, 4096);
6205
6206 set_dev_node(dev, ring_node);
6207 tx_ring->desc = dma_alloc_coherent(dev,
6208 tx_ring->size,
6209 &tx_ring->dma,
6210 GFP_KERNEL);
6211 set_dev_node(dev, orig_node);
6212 if (!tx_ring->desc)
6213 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6214 &tx_ring->dma, GFP_KERNEL);
6215 if (!tx_ring->desc)
6216 goto err;
6217
6218 tx_ring->next_to_use = 0;
6219 tx_ring->next_to_clean = 0;
6220 return 0;
6221
6222err:
6223 vfree(tx_ring->tx_buffer_info);
6224 tx_ring->tx_buffer_info = NULL;
6225 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6226 return -ENOMEM;
6227}
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6240{
6241 int i, j = 0, err = 0;
6242
6243 for (i = 0; i < adapter->num_tx_queues; i++) {
6244 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6245 if (!err)
6246 continue;
6247
6248 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6249 goto err_setup_tx;
6250 }
6251 for (j = 0; j < adapter->num_xdp_queues; j++) {
6252 err = ixgbe_setup_tx_resources(adapter->xdp_ring[j]);
6253 if (!err)
6254 continue;
6255
6256 e_err(probe, "Allocation for Tx Queue %u failed\n", j);
6257 goto err_setup_tx;
6258 }
6259
6260 return 0;
6261err_setup_tx:
6262
6263 while (j--)
6264 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
6265 while (i--)
6266 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6267 return err;
6268}
6269
6270
6271
6272
6273
6274
6275
6276int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
6277 struct ixgbe_ring *rx_ring)
6278{
6279 struct device *dev = rx_ring->dev;
6280 int orig_node = dev_to_node(dev);
6281 int ring_node = -1;
6282 int size;
6283
6284 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6285
6286 if (rx_ring->q_vector)
6287 ring_node = rx_ring->q_vector->numa_node;
6288
6289 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6290 if (!rx_ring->rx_buffer_info)
6291 rx_ring->rx_buffer_info = vmalloc(size);
6292 if (!rx_ring->rx_buffer_info)
6293 goto err;
6294
6295
6296 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6297 rx_ring->size = ALIGN(rx_ring->size, 4096);
6298
6299 set_dev_node(dev, ring_node);
6300 rx_ring->desc = dma_alloc_coherent(dev,
6301 rx_ring->size,
6302 &rx_ring->dma,
6303 GFP_KERNEL);
6304 set_dev_node(dev, orig_node);
6305 if (!rx_ring->desc)
6306 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6307 &rx_ring->dma, GFP_KERNEL);
6308 if (!rx_ring->desc)
6309 goto err;
6310
6311 rx_ring->next_to_clean = 0;
6312 rx_ring->next_to_use = 0;
6313
6314 rx_ring->xdp_prog = adapter->xdp_prog;
6315
6316 return 0;
6317err:
6318 vfree(rx_ring->rx_buffer_info);
6319 rx_ring->rx_buffer_info = NULL;
6320 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6321 return -ENOMEM;
6322}
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332
6333
6334static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6335{
6336 int i, err = 0;
6337
6338 for (i = 0; i < adapter->num_rx_queues; i++) {
6339 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
6340 if (!err)
6341 continue;
6342
6343 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6344 goto err_setup_rx;
6345 }
6346
6347#ifdef IXGBE_FCOE
6348 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6349 if (!err)
6350#endif
6351 return 0;
6352err_setup_rx:
6353
6354 while (i--)
6355 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6356 return err;
6357}
6358
6359
6360
6361
6362
6363
6364
6365void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6366{
6367 ixgbe_clean_tx_ring(tx_ring);
6368
6369 vfree(tx_ring->tx_buffer_info);
6370 tx_ring->tx_buffer_info = NULL;
6371
6372
6373 if (!tx_ring->desc)
6374 return;
6375
6376 dma_free_coherent(tx_ring->dev, tx_ring->size,
6377 tx_ring->desc, tx_ring->dma);
6378
6379 tx_ring->desc = NULL;
6380}
6381
6382
6383
6384
6385
6386
6387
6388static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6389{
6390 int i;
6391
6392 for (i = 0; i < adapter->num_tx_queues; i++)
6393 if (adapter->tx_ring[i]->desc)
6394 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6395 for (i = 0; i < adapter->num_xdp_queues; i++)
6396 if (adapter->xdp_ring[i]->desc)
6397 ixgbe_free_tx_resources(adapter->xdp_ring[i]);
6398}
6399
6400
6401
6402
6403
6404
6405
6406void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6407{
6408 ixgbe_clean_rx_ring(rx_ring);
6409
6410 rx_ring->xdp_prog = NULL;
6411 vfree(rx_ring->rx_buffer_info);
6412 rx_ring->rx_buffer_info = NULL;
6413
6414
6415 if (!rx_ring->desc)
6416 return;
6417
6418 dma_free_coherent(rx_ring->dev, rx_ring->size,
6419 rx_ring->desc, rx_ring->dma);
6420
6421 rx_ring->desc = NULL;
6422}
6423
6424
6425
6426
6427
6428
6429
6430static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6431{
6432 int i;
6433
6434#ifdef IXGBE_FCOE
6435 ixgbe_free_fcoe_ddp_resources(adapter);
6436
6437#endif
6438 for (i = 0; i < adapter->num_rx_queues; i++)
6439 if (adapter->rx_ring[i]->desc)
6440 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6441}
6442
6443
6444
6445
6446
6447
6448
6449
6450static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6451{
6452 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6453
6454
6455
6456
6457
6458
6459 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6460 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6461 (new_mtu > ETH_DATA_LEN))
6462 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6463
6464 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6465
6466
6467 netdev->mtu = new_mtu;
6468
6469 if (netif_running(netdev))
6470 ixgbe_reinit_locked(adapter);
6471
6472 return 0;
6473}
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487int ixgbe_open(struct net_device *netdev)
6488{
6489 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6490 struct ixgbe_hw *hw = &adapter->hw;
6491 int err, queues;
6492
6493
6494 if (test_bit(__IXGBE_TESTING, &adapter->state))
6495 return -EBUSY;
6496
6497 netif_carrier_off(netdev);
6498
6499
6500 err = ixgbe_setup_all_tx_resources(adapter);
6501 if (err)
6502 goto err_setup_tx;
6503
6504
6505 err = ixgbe_setup_all_rx_resources(adapter);
6506 if (err)
6507 goto err_setup_rx;
6508
6509 ixgbe_configure(adapter);
6510
6511 err = ixgbe_request_irq(adapter);
6512 if (err)
6513 goto err_req_irq;
6514
6515
6516 if (adapter->num_rx_pools > 1)
6517 queues = adapter->num_rx_queues_per_pool;
6518 else
6519 queues = adapter->num_tx_queues;
6520
6521 err = netif_set_real_num_tx_queues(netdev, queues);
6522 if (err)
6523 goto err_set_queues;
6524
6525 if (adapter->num_rx_pools > 1 &&
6526 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
6527 queues = IXGBE_MAX_L2A_QUEUES;
6528 else
6529 queues = adapter->num_rx_queues;
6530 err = netif_set_real_num_rx_queues(netdev, queues);
6531 if (err)
6532 goto err_set_queues;
6533
6534 ixgbe_ptp_init(adapter);
6535
6536 ixgbe_up_complete(adapter);
6537
6538 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6539 udp_tunnel_get_rx_info(netdev);
6540
6541 return 0;
6542
6543err_set_queues:
6544 ixgbe_free_irq(adapter);
6545err_req_irq:
6546 ixgbe_free_all_rx_resources(adapter);
6547 if (hw->phy.ops.set_phy_power && !adapter->wol)
6548 hw->phy.ops.set_phy_power(&adapter->hw, false);
6549err_setup_rx:
6550 ixgbe_free_all_tx_resources(adapter);
6551err_setup_tx:
6552 ixgbe_reset(adapter);
6553
6554 return err;
6555}
6556
6557static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6558{
6559 ixgbe_ptp_suspend(adapter);
6560
6561 if (adapter->hw.phy.ops.enter_lplu) {
6562 adapter->hw.phy.reset_disable = true;
6563 ixgbe_down(adapter);
6564 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6565 adapter->hw.phy.reset_disable = false;
6566 } else {
6567 ixgbe_down(adapter);
6568 }
6569
6570 ixgbe_free_irq(adapter);
6571
6572 ixgbe_free_all_tx_resources(adapter);
6573 ixgbe_free_all_rx_resources(adapter);
6574}
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587int ixgbe_close(struct net_device *netdev)
6588{
6589 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6590
6591 ixgbe_ptp_stop(adapter);
6592
6593 if (netif_device_present(netdev))
6594 ixgbe_close_suspend(adapter);
6595
6596 ixgbe_fdir_filter_exit(adapter);
6597
6598 ixgbe_release_hw_control(adapter);
6599
6600 return 0;
6601}
6602
6603#ifdef CONFIG_PM
6604static int ixgbe_resume(struct pci_dev *pdev)
6605{
6606 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6607 struct net_device *netdev = adapter->netdev;
6608 u32 err;
6609
6610 adapter->hw.hw_addr = adapter->io_addr;
6611 pci_set_power_state(pdev, PCI_D0);
6612 pci_restore_state(pdev);
6613
6614
6615
6616
6617 pci_save_state(pdev);
6618
6619 err = pci_enable_device_mem(pdev);
6620 if (err) {
6621 e_dev_err("Cannot enable PCI device from suspend\n");
6622 return err;
6623 }
6624 smp_mb__before_atomic();
6625 clear_bit(__IXGBE_DISABLED, &adapter->state);
6626 pci_set_master(pdev);
6627
6628 pci_wake_from_d3(pdev, false);
6629
6630 ixgbe_reset(adapter);
6631
6632 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6633
6634 rtnl_lock();
6635 err = ixgbe_init_interrupt_scheme(adapter);
6636 if (!err && netif_running(netdev))
6637 err = ixgbe_open(netdev);
6638
6639
6640 if (!err)
6641 netif_device_attach(netdev);
6642 rtnl_unlock();
6643
6644 return err;
6645}
6646#endif
6647
6648static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6649{
6650 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6651 struct net_device *netdev = adapter->netdev;
6652 struct ixgbe_hw *hw = &adapter->hw;
6653 u32 ctrl, fctrl;
6654 u32 wufc = adapter->wol;
6655#ifdef CONFIG_PM
6656 int retval = 0;
6657#endif
6658
6659 rtnl_lock();
6660 netif_device_detach(netdev);
6661
6662 if (netif_running(netdev))
6663 ixgbe_close_suspend(adapter);
6664
6665 ixgbe_clear_interrupt_scheme(adapter);
6666 rtnl_unlock();
6667
6668#ifdef CONFIG_PM
6669 retval = pci_save_state(pdev);
6670 if (retval)
6671 return retval;
6672
6673#endif
6674 if (hw->mac.ops.stop_link_on_d3)
6675 hw->mac.ops.stop_link_on_d3(hw);
6676
6677 if (wufc) {
6678 ixgbe_set_rx_mode(netdev);
6679
6680
6681 if (hw->mac.ops.enable_tx_laser)
6682 hw->mac.ops.enable_tx_laser(hw);
6683
6684
6685 if (wufc & IXGBE_WUFC_MC) {
6686 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6687 fctrl |= IXGBE_FCTRL_MPE;
6688 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6689 }
6690
6691 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6692 ctrl |= IXGBE_CTRL_GIO_DIS;
6693 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6694
6695 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6696 } else {
6697 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6698 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6699 }
6700
6701 switch (hw->mac.type) {
6702 case ixgbe_mac_82598EB:
6703 pci_wake_from_d3(pdev, false);
6704 break;
6705 case ixgbe_mac_82599EB:
6706 case ixgbe_mac_X540:
6707 case ixgbe_mac_X550:
6708 case ixgbe_mac_X550EM_x:
6709 case ixgbe_mac_x550em_a:
6710 pci_wake_from_d3(pdev, !!wufc);
6711 break;
6712 default:
6713 break;
6714 }
6715
6716 *enable_wake = !!wufc;
6717 if (hw->phy.ops.set_phy_power && !*enable_wake)
6718 hw->phy.ops.set_phy_power(hw, false);
6719
6720 ixgbe_release_hw_control(adapter);
6721
6722 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6723 pci_disable_device(pdev);
6724
6725 return 0;
6726}
6727
6728#ifdef CONFIG_PM
6729static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6730{
6731 int retval;
6732 bool wake;
6733
6734 retval = __ixgbe_shutdown(pdev, &wake);
6735 if (retval)
6736 return retval;
6737
6738 if (wake) {
6739 pci_prepare_to_sleep(pdev);
6740 } else {
6741 pci_wake_from_d3(pdev, false);
6742 pci_set_power_state(pdev, PCI_D3hot);
6743 }
6744
6745 return 0;
6746}
6747#endif
6748
6749static void ixgbe_shutdown(struct pci_dev *pdev)
6750{
6751 bool wake;
6752
6753 __ixgbe_shutdown(pdev, &wake);
6754
6755 if (system_state == SYSTEM_POWER_OFF) {
6756 pci_wake_from_d3(pdev, wake);
6757 pci_set_power_state(pdev, PCI_D3hot);
6758 }
6759}
6760
6761
6762
6763
6764
6765void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6766{
6767 struct net_device *netdev = adapter->netdev;
6768 struct ixgbe_hw *hw = &adapter->hw;
6769 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6770 u64 total_mpc = 0;
6771 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6772 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6773 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6774 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6775
6776 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6777 test_bit(__IXGBE_RESETTING, &adapter->state))
6778 return;
6779
6780 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6781 u64 rsc_count = 0;
6782 u64 rsc_flush = 0;
6783 for (i = 0; i < adapter->num_rx_queues; i++) {
6784 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6785 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6786 }
6787 adapter->rsc_total_count = rsc_count;
6788 adapter->rsc_total_flush = rsc_flush;
6789 }
6790
6791 for (i = 0; i < adapter->num_rx_queues; i++) {
6792 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6793 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6794 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6795 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6796 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6797 bytes += rx_ring->stats.bytes;
6798 packets += rx_ring->stats.packets;
6799 }
6800 adapter->non_eop_descs = non_eop_descs;
6801 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6802 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6803 adapter->hw_csum_rx_error = hw_csum_rx_error;
6804 netdev->stats.rx_bytes = bytes;
6805 netdev->stats.rx_packets = packets;
6806
6807 bytes = 0;
6808 packets = 0;
6809
6810 for (i = 0; i < adapter->num_tx_queues; i++) {
6811 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6812 restart_queue += tx_ring->tx_stats.restart_queue;
6813 tx_busy += tx_ring->tx_stats.tx_busy;
6814 bytes += tx_ring->stats.bytes;
6815 packets += tx_ring->stats.packets;
6816 }
6817 for (i = 0; i < adapter->num_xdp_queues; i++) {
6818 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
6819
6820 restart_queue += xdp_ring->tx_stats.restart_queue;
6821 tx_busy += xdp_ring->tx_stats.tx_busy;
6822 bytes += xdp_ring->stats.bytes;
6823 packets += xdp_ring->stats.packets;
6824 }
6825 adapter->restart_queue = restart_queue;
6826 adapter->tx_busy = tx_busy;
6827 netdev->stats.tx_bytes = bytes;
6828 netdev->stats.tx_packets = packets;
6829
6830 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6831
6832
6833 for (i = 0; i < 8; i++) {
6834
6835 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6836 missed_rx += mpc;
6837 hwstats->mpc[i] += mpc;
6838 total_mpc += hwstats->mpc[i];
6839 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6840 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6841 switch (hw->mac.type) {
6842 case ixgbe_mac_82598EB:
6843 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6844 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6845 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6846 hwstats->pxonrxc[i] +=
6847 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6848 break;
6849 case ixgbe_mac_82599EB:
6850 case ixgbe_mac_X540:
6851 case ixgbe_mac_X550:
6852 case ixgbe_mac_X550EM_x:
6853 case ixgbe_mac_x550em_a:
6854 hwstats->pxonrxc[i] +=
6855 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6856 break;
6857 default:
6858 break;
6859 }
6860 }
6861
6862
6863 for (i = 0; i < 16; i++) {
6864 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6865 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6866 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6867 (hw->mac.type == ixgbe_mac_X540) ||
6868 (hw->mac.type == ixgbe_mac_X550) ||
6869 (hw->mac.type == ixgbe_mac_X550EM_x) ||
6870 (hw->mac.type == ixgbe_mac_x550em_a)) {
6871 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6872 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
6873 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6874 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
6875 }
6876 }
6877
6878 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6879
6880 hwstats->gprc -= missed_rx;
6881
6882 ixgbe_update_xoff_received(adapter);
6883
6884
6885 switch (hw->mac.type) {
6886 case ixgbe_mac_82598EB:
6887 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6888 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6889 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6890 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6891 break;
6892 case ixgbe_mac_X540:
6893 case ixgbe_mac_X550:
6894 case ixgbe_mac_X550EM_x:
6895 case ixgbe_mac_x550em_a:
6896
6897 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6898 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6899 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6900 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6901
6902 case ixgbe_mac_82599EB:
6903 for (i = 0; i < 16; i++)
6904 adapter->hw_rx_no_dma_resources +=
6905 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6906 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6907 IXGBE_READ_REG(hw, IXGBE_GORCH);
6908 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6909 IXGBE_READ_REG(hw, IXGBE_GOTCH);
6910 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6911 IXGBE_READ_REG(hw, IXGBE_TORH);
6912 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6913 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6914 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6915#ifdef IXGBE_FCOE
6916 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6917 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
6918 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
6919 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
6920 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
6921 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6922
6923 if (adapter->fcoe.ddp_pool) {
6924 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
6925 struct ixgbe_fcoe_ddp_pool *ddp_pool;
6926 unsigned int cpu;
6927 u64 noddp = 0, noddp_ext_buff = 0;
6928 for_each_possible_cpu(cpu) {
6929 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
6930 noddp += ddp_pool->noddp;
6931 noddp_ext_buff += ddp_pool->noddp_ext_buff;
6932 }
6933 hwstats->fcoe_noddp = noddp;
6934 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
6935 }
6936#endif
6937 break;
6938 default:
6939 break;
6940 }
6941 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
6942 hwstats->bprc += bprc;
6943 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
6944 if (hw->mac.type == ixgbe_mac_82598EB)
6945 hwstats->mprc -= bprc;
6946 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
6947 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
6948 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
6949 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
6950 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
6951 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
6952 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
6953 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6954 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
6955 hwstats->lxontxc += lxon;
6956 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
6957 hwstats->lxofftxc += lxoff;
6958 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6959 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6960
6961
6962
6963 xon_off_tot = lxon + lxoff;
6964 hwstats->gptc -= xon_off_tot;
6965 hwstats->mptc -= xon_off_tot;
6966 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
6967 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
6968 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
6969 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
6970 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
6971 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6972 hwstats->ptc64 -= xon_off_tot;
6973 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
6974 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
6975 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
6976 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
6977 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
6978 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
6979
6980
6981 netdev->stats.multicast = hwstats->mprc;
6982
6983
6984 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
6985 netdev->stats.rx_dropped = 0;
6986 netdev->stats.rx_length_errors = hwstats->rlec;
6987 netdev->stats.rx_crc_errors = hwstats->crcerrs;
6988 netdev->stats.rx_missed_errors = total_mpc;
6989}
6990
6991
6992
6993
6994
6995static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
6996{
6997 struct ixgbe_hw *hw = &adapter->hw;
6998 int i;
6999
7000 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
7001 return;
7002
7003 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
7004
7005
7006 if (test_bit(__IXGBE_DOWN, &adapter->state))
7007 return;
7008
7009
7010 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
7011 return;
7012
7013 adapter->fdir_overflow++;
7014
7015 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
7016 for (i = 0; i < adapter->num_tx_queues; i++)
7017 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7018 &(adapter->tx_ring[i]->state));
7019 for (i = 0; i < adapter->num_xdp_queues; i++)
7020 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
7021 &adapter->xdp_ring[i]->state);
7022
7023 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
7024 } else {
7025 e_err(probe, "failed to finish FDIR re-initialization, "
7026 "ignored adding FDIR ATR filters\n");
7027 }
7028}
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
7040{
7041 struct ixgbe_hw *hw = &adapter->hw;
7042 u64 eics = 0;
7043 int i;
7044
7045
7046 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7047 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7048 test_bit(__IXGBE_RESETTING, &adapter->state))
7049 return;
7050
7051
7052 if (netif_carrier_ok(adapter->netdev)) {
7053 for (i = 0; i < adapter->num_tx_queues; i++)
7054 set_check_for_tx_hang(adapter->tx_ring[i]);
7055 for (i = 0; i < adapter->num_xdp_queues; i++)
7056 set_check_for_tx_hang(adapter->xdp_ring[i]);
7057 }
7058
7059 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
7060
7061
7062
7063
7064
7065 IXGBE_WRITE_REG(hw, IXGBE_EICS,
7066 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
7067 } else {
7068
7069 for (i = 0; i < adapter->num_q_vectors; i++) {
7070 struct ixgbe_q_vector *qv = adapter->q_vector[i];
7071 if (qv->rx.ring || qv->tx.ring)
7072 eics |= BIT_ULL(i);
7073 }
7074 }
7075
7076
7077 ixgbe_irq_rearm_queues(adapter, eics);
7078}
7079
7080
7081
7082
7083
7084
7085static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
7086{
7087 struct ixgbe_hw *hw = &adapter->hw;
7088 u32 link_speed = adapter->link_speed;
7089 bool link_up = adapter->link_up;
7090 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
7091
7092 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
7093 return;
7094
7095 if (hw->mac.ops.check_link) {
7096 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
7097 } else {
7098
7099 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
7100 link_up = true;
7101 }
7102
7103 if (adapter->ixgbe_ieee_pfc)
7104 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
7105
7106 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
7107 hw->mac.ops.fc_enable(hw);
7108 ixgbe_set_rx_drop_en(adapter);
7109 }
7110
7111 if (link_up ||
7112 time_after(jiffies, (adapter->link_check_timeout +
7113 IXGBE_TRY_LINK_TIMEOUT))) {
7114 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
7115 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
7116 IXGBE_WRITE_FLUSH(hw);
7117 }
7118
7119 adapter->link_up = link_up;
7120 adapter->link_speed = link_speed;
7121}
7122
7123static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
7124{
7125#ifdef CONFIG_IXGBE_DCB
7126 struct net_device *netdev = adapter->netdev;
7127 struct dcb_app app = {
7128 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
7129 .protocol = 0,
7130 };
7131 u8 up = 0;
7132
7133 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
7134 up = dcb_ieee_getapp_mask(netdev, &app);
7135
7136 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
7137#endif
7138}
7139
7140static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
7141{
7142 if (netif_is_macvlan(upper)) {
7143 struct macvlan_dev *vlan = netdev_priv(upper);
7144
7145 if (vlan->fwd_priv)
7146 netif_tx_wake_all_queues(upper);
7147 }
7148
7149 return 0;
7150}
7151
7152
7153
7154
7155
7156
7157static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
7158{
7159 struct net_device *netdev = adapter->netdev;
7160 struct ixgbe_hw *hw = &adapter->hw;
7161 u32 link_speed = adapter->link_speed;
7162 const char *speed_str;
7163 bool flow_rx, flow_tx;
7164
7165
7166 if (netif_carrier_ok(netdev))
7167 return;
7168
7169 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7170
7171 switch (hw->mac.type) {
7172 case ixgbe_mac_82598EB: {
7173 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
7174 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
7175 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
7176 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
7177 }
7178 break;
7179 case ixgbe_mac_X540:
7180 case ixgbe_mac_X550:
7181 case ixgbe_mac_X550EM_x:
7182 case ixgbe_mac_x550em_a:
7183 case ixgbe_mac_82599EB: {
7184 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
7185 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
7186 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
7187 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
7188 }
7189 break;
7190 default:
7191 flow_tx = false;
7192 flow_rx = false;
7193 break;
7194 }
7195
7196 adapter->last_rx_ptp_check = jiffies;
7197
7198 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7199 ixgbe_ptp_start_cyclecounter(adapter);
7200
7201 switch (link_speed) {
7202 case IXGBE_LINK_SPEED_10GB_FULL:
7203 speed_str = "10 Gbps";
7204 break;
7205 case IXGBE_LINK_SPEED_2_5GB_FULL:
7206 speed_str = "2.5 Gbps";
7207 break;
7208 case IXGBE_LINK_SPEED_1GB_FULL:
7209 speed_str = "1 Gbps";
7210 break;
7211 case IXGBE_LINK_SPEED_100_FULL:
7212 speed_str = "100 Mbps";
7213 break;
7214 case IXGBE_LINK_SPEED_10_FULL:
7215 speed_str = "10 Mbps";
7216 break;
7217 default:
7218 speed_str = "unknown speed";
7219 break;
7220 }
7221 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
7222 ((flow_rx && flow_tx) ? "RX/TX" :
7223 (flow_rx ? "RX" :
7224 (flow_tx ? "TX" : "None"))));
7225
7226 netif_carrier_on(netdev);
7227 ixgbe_check_vf_rate_limit(adapter);
7228
7229
7230 netif_tx_wake_all_queues(adapter->netdev);
7231
7232
7233 rtnl_lock();
7234 netdev_walk_all_upper_dev_rcu(adapter->netdev,
7235 ixgbe_enable_macvlan, NULL);
7236 rtnl_unlock();
7237
7238
7239 ixgbe_update_default_up(adapter);
7240
7241
7242 ixgbe_ping_all_vfs(adapter);
7243}
7244
7245
7246
7247
7248
7249
7250static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7251{
7252 struct net_device *netdev = adapter->netdev;
7253 struct ixgbe_hw *hw = &adapter->hw;
7254
7255 adapter->link_up = false;
7256 adapter->link_speed = 0;
7257
7258
7259 if (!netif_carrier_ok(netdev))
7260 return;
7261
7262
7263 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7264 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7265
7266 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7267 ixgbe_ptp_start_cyclecounter(adapter);
7268
7269 e_info(drv, "NIC Link is Down\n");
7270 netif_carrier_off(netdev);
7271
7272
7273 ixgbe_ping_all_vfs(adapter);
7274}
7275
7276static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7277{
7278 int i;
7279
7280 for (i = 0; i < adapter->num_tx_queues; i++) {
7281 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7282
7283 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7284 return true;
7285 }
7286
7287 for (i = 0; i < adapter->num_xdp_queues; i++) {
7288 struct ixgbe_ring *ring = adapter->xdp_ring[i];
7289
7290 if (ring->next_to_use != ring->next_to_clean)
7291 return true;
7292 }
7293
7294 return false;
7295}
7296
7297static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7298{
7299 struct ixgbe_hw *hw = &adapter->hw;
7300 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7301 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7302
7303 int i, j;
7304
7305 if (!adapter->num_vfs)
7306 return false;
7307
7308
7309 if (hw->mac.type >= ixgbe_mac_X550)
7310 return false;
7311
7312 for (i = 0; i < adapter->num_vfs; i++) {
7313 for (j = 0; j < q_per_pool; j++) {
7314 u32 h, t;
7315
7316 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7317 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7318
7319 if (h != t)
7320 return true;
7321 }
7322 }
7323
7324 return false;
7325}
7326
7327
7328
7329
7330
7331static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7332{
7333 if (!netif_carrier_ok(adapter->netdev)) {
7334 if (ixgbe_ring_tx_pending(adapter) ||
7335 ixgbe_vf_tx_pending(adapter)) {
7336
7337
7338
7339
7340
7341 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7342 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7343 }
7344 }
7345}
7346
7347#ifdef CONFIG_PCI_IOV
7348static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7349{
7350 struct ixgbe_hw *hw = &adapter->hw;
7351 struct pci_dev *pdev = adapter->pdev;
7352 unsigned int vf;
7353 u32 gpc;
7354
7355 if (!(netif_carrier_ok(adapter->netdev)))
7356 return;
7357
7358 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7359 if (gpc)
7360 return;
7361
7362
7363
7364
7365
7366
7367 if (!pdev)
7368 return;
7369
7370
7371 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7372 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7373 u16 status_reg;
7374
7375 if (!vfdev)
7376 continue;
7377 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7378 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7379 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7380 pcie_flr(vfdev);
7381 }
7382}
7383
7384static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7385{
7386 u32 ssvpc;
7387
7388
7389 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7390 adapter->num_vfs == 0)
7391 return;
7392
7393 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7394
7395
7396
7397
7398
7399 if (!ssvpc)
7400 return;
7401
7402 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7403}
7404#else
7405static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7406{
7407}
7408
7409static void
7410ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7411{
7412}
7413#endif
7414
7415
7416
7417
7418
7419
7420static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7421{
7422
7423 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7424 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7425 test_bit(__IXGBE_RESETTING, &adapter->state))
7426 return;
7427
7428 ixgbe_watchdog_update_link(adapter);
7429
7430 if (adapter->link_up)
7431 ixgbe_watchdog_link_is_up(adapter);
7432 else
7433 ixgbe_watchdog_link_is_down(adapter);
7434
7435 ixgbe_check_for_bad_vf(adapter);
7436 ixgbe_spoof_check(adapter);
7437 ixgbe_update_stats(adapter);
7438
7439 ixgbe_watchdog_flush_tx(adapter);
7440}
7441
7442
7443
7444
7445
7446static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7447{
7448 struct ixgbe_hw *hw = &adapter->hw;
7449 s32 err;
7450
7451
7452 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7453 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7454 return;
7455
7456 if (adapter->sfp_poll_time &&
7457 time_after(adapter->sfp_poll_time, jiffies))
7458 return;
7459
7460
7461 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7462 return;
7463
7464 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7465
7466 err = hw->phy.ops.identify_sfp(hw);
7467 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7468 goto sfp_out;
7469
7470 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7471
7472
7473 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7474 }
7475
7476
7477 if (err)
7478 goto sfp_out;
7479
7480
7481 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7482 goto sfp_out;
7483
7484 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7485
7486
7487
7488
7489
7490
7491 if (hw->mac.type == ixgbe_mac_82598EB)
7492 err = hw->phy.ops.reset(hw);
7493 else
7494 err = hw->mac.ops.setup_sfp(hw);
7495
7496 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7497 goto sfp_out;
7498
7499 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7500 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7501
7502sfp_out:
7503 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7504
7505 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7506 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7507 e_dev_err("failed to initialize because an unsupported "
7508 "SFP+ module type was detected.\n");
7509 e_dev_err("Reload the driver after installing a "
7510 "supported module.\n");
7511 unregister_netdev(adapter->netdev);
7512 }
7513}
7514
7515
7516
7517
7518
7519static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7520{
7521 struct ixgbe_hw *hw = &adapter->hw;
7522 u32 speed;
7523 bool autoneg = false;
7524
7525 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7526 return;
7527
7528
7529 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7530 return;
7531
7532 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7533
7534 speed = hw->phy.autoneg_advertised;
7535 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
7536 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
7537
7538
7539 if (!autoneg) {
7540 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
7541 speed = IXGBE_LINK_SPEED_10GB_FULL;
7542 }
7543 }
7544
7545 if (hw->mac.ops.setup_link)
7546 hw->mac.ops.setup_link(hw, speed, true);
7547
7548 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7549 adapter->link_check_timeout = jiffies;
7550 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7551}
7552
7553
7554
7555
7556
7557static void ixgbe_service_timer(unsigned long data)
7558{
7559 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
7560 unsigned long next_event_offset;
7561
7562
7563 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7564 next_event_offset = HZ / 10;
7565 else
7566 next_event_offset = HZ * 2;
7567
7568
7569 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7570
7571 ixgbe_service_event_schedule(adapter);
7572}
7573
7574static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7575{
7576 struct ixgbe_hw *hw = &adapter->hw;
7577 u32 status;
7578
7579 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7580 return;
7581
7582 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7583
7584 if (!hw->phy.ops.handle_lasi)
7585 return;
7586
7587 status = hw->phy.ops.handle_lasi(&adapter->hw);
7588 if (status != IXGBE_ERR_OVERTEMP)
7589 return;
7590
7591 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7592}
7593
7594static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7595{
7596 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7597 return;
7598
7599
7600 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7601 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7602 test_bit(__IXGBE_RESETTING, &adapter->state))
7603 return;
7604
7605 ixgbe_dump(adapter);
7606 netdev_err(adapter->netdev, "Reset adapter\n");
7607 adapter->tx_timeout_count++;
7608
7609 rtnl_lock();
7610 ixgbe_reinit_locked(adapter);
7611 rtnl_unlock();
7612}
7613
7614
7615
7616
7617
7618static void ixgbe_service_task(struct work_struct *work)
7619{
7620 struct ixgbe_adapter *adapter = container_of(work,
7621 struct ixgbe_adapter,
7622 service_task);
7623 if (ixgbe_removed(adapter->hw.hw_addr)) {
7624 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7625 rtnl_lock();
7626 ixgbe_down(adapter);
7627 rtnl_unlock();
7628 }
7629 ixgbe_service_event_complete(adapter);
7630 return;
7631 }
7632 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7633 rtnl_lock();
7634 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7635 udp_tunnel_get_rx_info(adapter->netdev);
7636 rtnl_unlock();
7637 }
7638 ixgbe_reset_subtask(adapter);
7639 ixgbe_phy_interrupt_subtask(adapter);
7640 ixgbe_sfp_detection_subtask(adapter);
7641 ixgbe_sfp_link_config_subtask(adapter);
7642 ixgbe_check_overtemp_subtask(adapter);
7643 ixgbe_watchdog_subtask(adapter);
7644 ixgbe_fdir_reinit_subtask(adapter);
7645 ixgbe_check_hang_subtask(adapter);
7646
7647 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7648 ixgbe_ptp_overflow_check(adapter);
7649 ixgbe_ptp_rx_hang(adapter);
7650 ixgbe_ptp_tx_hang(adapter);
7651 }
7652
7653 ixgbe_service_event_complete(adapter);
7654}
7655
7656static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7657 struct ixgbe_tx_buffer *first,
7658 u8 *hdr_len)
7659{
7660 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7661 struct sk_buff *skb = first->skb;
7662 union {
7663 struct iphdr *v4;
7664 struct ipv6hdr *v6;
7665 unsigned char *hdr;
7666 } ip;
7667 union {
7668 struct tcphdr *tcp;
7669 unsigned char *hdr;
7670 } l4;
7671 u32 paylen, l4_offset;
7672 int err;
7673
7674 if (skb->ip_summed != CHECKSUM_PARTIAL)
7675 return 0;
7676
7677 if (!skb_is_gso(skb))
7678 return 0;
7679
7680 err = skb_cow_head(skb, 0);
7681 if (err < 0)
7682 return err;
7683
7684 if (eth_p_mpls(first->protocol))
7685 ip.hdr = skb_inner_network_header(skb);
7686 else
7687 ip.hdr = skb_network_header(skb);
7688 l4.hdr = skb_checksum_start(skb);
7689
7690
7691 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7692
7693
7694 if (ip.v4->version == 4) {
7695 unsigned char *csum_start = skb_checksum_start(skb);
7696 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7697
7698
7699
7700
7701 ip.v4->check = csum_fold(csum_partial(trans_start,
7702 csum_start - trans_start,
7703 0));
7704 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7705
7706 ip.v4->tot_len = 0;
7707 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7708 IXGBE_TX_FLAGS_CSUM |
7709 IXGBE_TX_FLAGS_IPV4;
7710 } else {
7711 ip.v6->payload_len = 0;
7712 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7713 IXGBE_TX_FLAGS_CSUM;
7714 }
7715
7716
7717 l4_offset = l4.hdr - skb->data;
7718
7719
7720 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
7721
7722
7723 paylen = skb->len - l4_offset;
7724 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
7725
7726
7727 first->gso_segs = skb_shinfo(skb)->gso_segs;
7728 first->bytecount += (first->gso_segs - 1) * *hdr_len;
7729
7730
7731 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
7732 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
7733
7734
7735 vlan_macip_lens = l4.hdr - ip.hdr;
7736 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
7737 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7738
7739 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
7740 mss_l4len_idx);
7741
7742 return 1;
7743}
7744
7745static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
7746{
7747 unsigned int offset = 0;
7748
7749 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
7750
7751 return offset == skb_checksum_start_offset(skb);
7752}
7753
7754static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7755 struct ixgbe_tx_buffer *first)
7756{
7757 struct sk_buff *skb = first->skb;
7758 u32 vlan_macip_lens = 0;
7759 u32 type_tucmd = 0;
7760
7761 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7762csum_failed:
7763 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
7764 IXGBE_TX_FLAGS_CC)))
7765 return;
7766 goto no_csum;
7767 }
7768
7769 switch (skb->csum_offset) {
7770 case offsetof(struct tcphdr, check):
7771 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7772
7773 case offsetof(struct udphdr, check):
7774 break;
7775 case offsetof(struct sctphdr, checksum):
7776
7777 if (((first->protocol == htons(ETH_P_IP)) &&
7778 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
7779 ((first->protocol == htons(ETH_P_IPV6)) &&
7780 ixgbe_ipv6_csum_is_sctp(skb))) {
7781 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7782 break;
7783 }
7784
7785 default:
7786 skb_checksum_help(skb);
7787 goto csum_failed;
7788 }
7789
7790
7791 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7792 vlan_macip_lens = skb_checksum_start_offset(skb) -
7793 skb_network_offset(skb);
7794no_csum:
7795
7796 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7797 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7798
7799 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
7800}
7801
7802#define IXGBE_SET_FLAG(_input, _flag, _result) \
7803 ((_flag <= _result) ? \
7804 ((u32)(_input & _flag) * (_result / _flag)) : \
7805 ((u32)(_input & _flag) / (_flag / _result)))
7806
7807static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7808{
7809
7810 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7811 IXGBE_ADVTXD_DCMD_DEXT |
7812 IXGBE_ADVTXD_DCMD_IFCS;
7813
7814
7815 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7816 IXGBE_ADVTXD_DCMD_VLE);
7817
7818
7819 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7820 IXGBE_ADVTXD_DCMD_TSE);
7821
7822
7823 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7824 IXGBE_ADVTXD_MAC_TSTAMP);
7825
7826
7827 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7828
7829 return cmd_type;
7830}
7831
7832static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7833 u32 tx_flags, unsigned int paylen)
7834{
7835 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7836
7837
7838 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7839 IXGBE_TX_FLAGS_CSUM,
7840 IXGBE_ADVTXD_POPTS_TXSM);
7841
7842
7843 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7844 IXGBE_TX_FLAGS_IPV4,
7845 IXGBE_ADVTXD_POPTS_IXSM);
7846
7847
7848
7849
7850
7851 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7852 IXGBE_TX_FLAGS_CC,
7853 IXGBE_ADVTXD_CC);
7854
7855 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7856}
7857
7858static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7859{
7860 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7861
7862
7863
7864
7865
7866 smp_mb();
7867
7868
7869
7870
7871 if (likely(ixgbe_desc_unused(tx_ring) < size))
7872 return -EBUSY;
7873
7874
7875 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7876 ++tx_ring->tx_stats.restart_queue;
7877 return 0;
7878}
7879
7880static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7881{
7882 if (likely(ixgbe_desc_unused(tx_ring) >= size))
7883 return 0;
7884
7885 return __ixgbe_maybe_stop_tx(tx_ring, size);
7886}
7887
7888#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7889 IXGBE_TXD_CMD_RS)
7890
7891static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7892 struct ixgbe_tx_buffer *first,
7893 const u8 hdr_len)
7894{
7895 struct sk_buff *skb = first->skb;
7896 struct ixgbe_tx_buffer *tx_buffer;
7897 union ixgbe_adv_tx_desc *tx_desc;
7898 struct skb_frag_struct *frag;
7899 dma_addr_t dma;
7900 unsigned int data_len, size;
7901 u32 tx_flags = first->tx_flags;
7902 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7903 u16 i = tx_ring->next_to_use;
7904
7905 tx_desc = IXGBE_TX_DESC(tx_ring, i);
7906
7907 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7908
7909 size = skb_headlen(skb);
7910 data_len = skb->data_len;
7911
7912#ifdef IXGBE_FCOE
7913 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7914 if (data_len < sizeof(struct fcoe_crc_eof)) {
7915 size -= sizeof(struct fcoe_crc_eof) - data_len;
7916 data_len = 0;
7917 } else {
7918 data_len -= sizeof(struct fcoe_crc_eof);
7919 }
7920 }
7921
7922#endif
7923 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7924
7925 tx_buffer = first;
7926
7927 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
7928 if (dma_mapping_error(tx_ring->dev, dma))
7929 goto dma_error;
7930
7931
7932 dma_unmap_len_set(tx_buffer, len, size);
7933 dma_unmap_addr_set(tx_buffer, dma, dma);
7934
7935 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7936
7937 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
7938 tx_desc->read.cmd_type_len =
7939 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
7940
7941 i++;
7942 tx_desc++;
7943 if (i == tx_ring->count) {
7944 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7945 i = 0;
7946 }
7947 tx_desc->read.olinfo_status = 0;
7948
7949 dma += IXGBE_MAX_DATA_PER_TXD;
7950 size -= IXGBE_MAX_DATA_PER_TXD;
7951
7952 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7953 }
7954
7955 if (likely(!data_len))
7956 break;
7957
7958 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
7959
7960 i++;
7961 tx_desc++;
7962 if (i == tx_ring->count) {
7963 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7964 i = 0;
7965 }
7966 tx_desc->read.olinfo_status = 0;
7967
7968#ifdef IXGBE_FCOE
7969 size = min_t(unsigned int, data_len, skb_frag_size(frag));
7970#else
7971 size = skb_frag_size(frag);
7972#endif
7973 data_len -= size;
7974
7975 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
7976 DMA_TO_DEVICE);
7977
7978 tx_buffer = &tx_ring->tx_buffer_info[i];
7979 }
7980
7981
7982 cmd_type |= size | IXGBE_TXD_CMD;
7983 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
7984
7985 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
7986
7987
7988 first->time_stamp = jiffies;
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998 wmb();
7999
8000
8001 first->next_to_watch = tx_desc;
8002
8003 i++;
8004 if (i == tx_ring->count)
8005 i = 0;
8006
8007 tx_ring->next_to_use = i;
8008
8009 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
8010
8011 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
8012 writel(i, tx_ring->tail);
8013
8014
8015
8016
8017 mmiowb();
8018 }
8019
8020 return 0;
8021dma_error:
8022 dev_err(tx_ring->dev, "TX DMA map failed\n");
8023
8024
8025 for (;;) {
8026 tx_buffer = &tx_ring->tx_buffer_info[i];
8027 if (dma_unmap_len(tx_buffer, len))
8028 dma_unmap_page(tx_ring->dev,
8029 dma_unmap_addr(tx_buffer, dma),
8030 dma_unmap_len(tx_buffer, len),
8031 DMA_TO_DEVICE);
8032 dma_unmap_len_set(tx_buffer, len, 0);
8033 if (tx_buffer == first)
8034 break;
8035 if (i == 0)
8036 i += tx_ring->count;
8037 i--;
8038 }
8039
8040 dev_kfree_skb_any(first->skb);
8041 first->skb = NULL;
8042
8043 tx_ring->next_to_use = i;
8044
8045 return -1;
8046}
8047
8048static void ixgbe_atr(struct ixgbe_ring *ring,
8049 struct ixgbe_tx_buffer *first)
8050{
8051 struct ixgbe_q_vector *q_vector = ring->q_vector;
8052 union ixgbe_atr_hash_dword input = { .dword = 0 };
8053 union ixgbe_atr_hash_dword common = { .dword = 0 };
8054 union {
8055 unsigned char *network;
8056 struct iphdr *ipv4;
8057 struct ipv6hdr *ipv6;
8058 } hdr;
8059 struct tcphdr *th;
8060 unsigned int hlen;
8061 struct sk_buff *skb;
8062 __be16 vlan_id;
8063 int l4_proto;
8064
8065
8066 if (!q_vector)
8067 return;
8068
8069
8070 if (!ring->atr_sample_rate)
8071 return;
8072
8073 ring->atr_count++;
8074
8075
8076 if ((first->protocol != htons(ETH_P_IP)) &&
8077 (first->protocol != htons(ETH_P_IPV6)))
8078 return;
8079
8080
8081 skb = first->skb;
8082 hdr.network = skb_network_header(skb);
8083 if (unlikely(hdr.network <= skb->data))
8084 return;
8085 if (skb->encapsulation &&
8086 first->protocol == htons(ETH_P_IP) &&
8087 hdr.ipv4->protocol == IPPROTO_UDP) {
8088 struct ixgbe_adapter *adapter = q_vector->adapter;
8089
8090 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8091 VXLAN_HEADROOM))
8092 return;
8093
8094
8095 if (adapter->vxlan_port &&
8096 udp_hdr(skb)->dest == adapter->vxlan_port)
8097 hdr.network = skb_inner_network_header(skb);
8098
8099 if (adapter->geneve_port &&
8100 udp_hdr(skb)->dest == adapter->geneve_port)
8101 hdr.network = skb_inner_network_header(skb);
8102 }
8103
8104
8105
8106
8107 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
8108 return;
8109
8110
8111 switch (hdr.ipv4->version) {
8112 case IPVERSION:
8113
8114 hlen = (hdr.network[0] & 0x0F) << 2;
8115 l4_proto = hdr.ipv4->protocol;
8116 break;
8117 case 6:
8118 hlen = hdr.network - skb->data;
8119 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
8120 hlen -= hdr.network - skb->data;
8121 break;
8122 default:
8123 return;
8124 }
8125
8126 if (l4_proto != IPPROTO_TCP)
8127 return;
8128
8129 if (unlikely(skb_tail_pointer(skb) < hdr.network +
8130 hlen + sizeof(struct tcphdr)))
8131 return;
8132
8133 th = (struct tcphdr *)(hdr.network + hlen);
8134
8135
8136 if (th->fin)
8137 return;
8138
8139
8140 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
8141 return;
8142
8143
8144 ring->atr_count = 0;
8145
8146 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
8147
8148
8149
8150
8151
8152
8153
8154
8155 input.formatted.vlan_id = vlan_id;
8156
8157
8158
8159
8160
8161 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
8162 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
8163 else
8164 common.port.src ^= th->dest ^ first->protocol;
8165 common.port.dst ^= th->source;
8166
8167 switch (hdr.ipv4->version) {
8168 case IPVERSION:
8169 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
8170 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
8171 break;
8172 case 6:
8173 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
8174 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
8175 hdr.ipv6->saddr.s6_addr32[1] ^
8176 hdr.ipv6->saddr.s6_addr32[2] ^
8177 hdr.ipv6->saddr.s6_addr32[3] ^
8178 hdr.ipv6->daddr.s6_addr32[0] ^
8179 hdr.ipv6->daddr.s6_addr32[1] ^
8180 hdr.ipv6->daddr.s6_addr32[2] ^
8181 hdr.ipv6->daddr.s6_addr32[3];
8182 break;
8183 default:
8184 break;
8185 }
8186
8187 if (hdr.network != skb_network_header(skb))
8188 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
8189
8190
8191 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
8192 input, common, ring->queue_index);
8193}
8194
8195static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8196 void *accel_priv, select_queue_fallback_t fallback)
8197{
8198 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
8199#ifdef IXGBE_FCOE
8200 struct ixgbe_adapter *adapter;
8201 struct ixgbe_ring_feature *f;
8202 int txq;
8203#endif
8204
8205 if (fwd_adapter)
8206 return skb->queue_mapping + fwd_adapter->tx_base_queue;
8207
8208#ifdef IXGBE_FCOE
8209
8210
8211
8212
8213
8214 switch (vlan_get_protocol(skb)) {
8215 case htons(ETH_P_FCOE):
8216 case htons(ETH_P_FIP):
8217 adapter = netdev_priv(dev);
8218
8219 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
8220 break;
8221
8222 default:
8223 return fallback(dev, skb);
8224 }
8225
8226 f = &adapter->ring_feature[RING_F_FCOE];
8227
8228 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8229 smp_processor_id();
8230
8231 while (txq >= f->indices)
8232 txq -= f->indices;
8233
8234 return txq + f->offset;
8235#else
8236 return fallback(dev, skb);
8237#endif
8238}
8239
8240static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
8241 struct xdp_buff *xdp)
8242{
8243 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
8244 struct ixgbe_tx_buffer *tx_buffer;
8245 union ixgbe_adv_tx_desc *tx_desc;
8246 u32 len, cmd_type;
8247 dma_addr_t dma;
8248 u16 i;
8249
8250 len = xdp->data_end - xdp->data;
8251
8252 if (unlikely(!ixgbe_desc_unused(ring)))
8253 return IXGBE_XDP_CONSUMED;
8254
8255 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
8256 if (dma_mapping_error(ring->dev, dma))
8257 return IXGBE_XDP_CONSUMED;
8258
8259
8260 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
8261 tx_buffer->bytecount = len;
8262 tx_buffer->gso_segs = 1;
8263 tx_buffer->protocol = 0;
8264
8265 i = ring->next_to_use;
8266 tx_desc = IXGBE_TX_DESC(ring, i);
8267
8268 dma_unmap_len_set(tx_buffer, len, len);
8269 dma_unmap_addr_set(tx_buffer, dma, dma);
8270 tx_buffer->data = xdp->data;
8271 tx_desc->read.buffer_addr = cpu_to_le64(dma);
8272
8273
8274 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
8275 IXGBE_ADVTXD_DCMD_DEXT |
8276 IXGBE_ADVTXD_DCMD_IFCS;
8277 cmd_type |= len | IXGBE_TXD_CMD;
8278 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
8279 tx_desc->read.olinfo_status =
8280 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
8281
8282
8283 smp_wmb();
8284
8285
8286 i++;
8287 if (i == ring->count)
8288 i = 0;
8289
8290 tx_buffer->next_to_watch = tx_desc;
8291 ring->next_to_use = i;
8292
8293 return IXGBE_XDP_TX;
8294}
8295
8296netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8297 struct ixgbe_adapter *adapter,
8298 struct ixgbe_ring *tx_ring)
8299{
8300 struct ixgbe_tx_buffer *first;
8301 int tso;
8302 u32 tx_flags = 0;
8303 unsigned short f;
8304 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8305 __be16 protocol = skb->protocol;
8306 u8 hdr_len = 0;
8307
8308
8309
8310
8311
8312
8313
8314
8315 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8316 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
8317
8318 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8319 tx_ring->tx_stats.tx_busy++;
8320 return NETDEV_TX_BUSY;
8321 }
8322
8323
8324 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8325 first->skb = skb;
8326 first->bytecount = skb->len;
8327 first->gso_segs = 1;
8328
8329
8330 if (skb_vlan_tag_present(skb)) {
8331 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8332 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8333
8334 } else if (protocol == htons(ETH_P_8021Q)) {
8335 struct vlan_hdr *vhdr, _vhdr;
8336 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8337 if (!vhdr)
8338 goto out_drop;
8339
8340 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8341 IXGBE_TX_FLAGS_VLAN_SHIFT;
8342 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8343 }
8344 protocol = vlan_get_protocol(skb);
8345
8346 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8347 adapter->ptp_clock) {
8348 if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8349 &adapter->state)) {
8350 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8351 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8352
8353
8354 adapter->ptp_tx_skb = skb_get(skb);
8355 adapter->ptp_tx_start = jiffies;
8356 schedule_work(&adapter->ptp_tx_work);
8357 } else {
8358 adapter->tx_hwtstamp_skipped++;
8359 }
8360 }
8361
8362 skb_tx_timestamp(skb);
8363
8364#ifdef CONFIG_PCI_IOV
8365
8366
8367
8368
8369 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8370 tx_flags |= IXGBE_TX_FLAGS_CC;
8371
8372#endif
8373
8374 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8375 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8376 (skb->priority != TC_PRIO_CONTROL))) {
8377 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8378 tx_flags |= (skb->priority & 0x7) <<
8379 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8380 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8381 struct vlan_ethhdr *vhdr;
8382
8383 if (skb_cow_head(skb, 0))
8384 goto out_drop;
8385 vhdr = (struct vlan_ethhdr *)skb->data;
8386 vhdr->h_vlan_TCI = htons(tx_flags >>
8387 IXGBE_TX_FLAGS_VLAN_SHIFT);
8388 } else {
8389 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8390 }
8391 }
8392
8393
8394 first->tx_flags = tx_flags;
8395 first->protocol = protocol;
8396
8397#ifdef IXGBE_FCOE
8398
8399 if ((protocol == htons(ETH_P_FCOE)) &&
8400 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8401 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8402 if (tso < 0)
8403 goto out_drop;
8404
8405 goto xmit_fcoe;
8406 }
8407
8408#endif
8409 tso = ixgbe_tso(tx_ring, first, &hdr_len);
8410 if (tso < 0)
8411 goto out_drop;
8412 else if (!tso)
8413 ixgbe_tx_csum(tx_ring, first);
8414
8415
8416 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8417 ixgbe_atr(tx_ring, first);
8418
8419#ifdef IXGBE_FCOE
8420xmit_fcoe:
8421#endif
8422 if (ixgbe_tx_map(tx_ring, first, hdr_len))
8423 goto cleanup_tx_timestamp;
8424
8425 return NETDEV_TX_OK;
8426
8427out_drop:
8428 dev_kfree_skb_any(first->skb);
8429 first->skb = NULL;
8430cleanup_tx_timestamp:
8431 if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) {
8432 dev_kfree_skb_any(adapter->ptp_tx_skb);
8433 adapter->ptp_tx_skb = NULL;
8434 cancel_work_sync(&adapter->ptp_tx_work);
8435 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
8436 }
8437
8438 return NETDEV_TX_OK;
8439}
8440
8441static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8442 struct net_device *netdev,
8443 struct ixgbe_ring *ring)
8444{
8445 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8446 struct ixgbe_ring *tx_ring;
8447
8448
8449
8450
8451
8452 if (skb_put_padto(skb, 17))
8453 return NETDEV_TX_OK;
8454
8455 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
8456
8457 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8458}
8459
8460static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8461 struct net_device *netdev)
8462{
8463 return __ixgbe_xmit_frame(skb, netdev, NULL);
8464}
8465
8466
8467
8468
8469
8470
8471
8472
8473static int ixgbe_set_mac(struct net_device *netdev, void *p)
8474{
8475 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8476 struct ixgbe_hw *hw = &adapter->hw;
8477 struct sockaddr *addr = p;
8478
8479 if (!is_valid_ether_addr(addr->sa_data))
8480 return -EADDRNOTAVAIL;
8481
8482 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8483 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8484
8485 ixgbe_mac_set_default_filter(adapter);
8486
8487 return 0;
8488}
8489
8490static int
8491ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8492{
8493 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8494 struct ixgbe_hw *hw = &adapter->hw;
8495 u16 value;
8496 int rc;
8497
8498 if (prtad != hw->phy.mdio.prtad)
8499 return -EINVAL;
8500 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8501 if (!rc)
8502 rc = value;
8503 return rc;
8504}
8505
8506static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8507 u16 addr, u16 value)
8508{
8509 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8510 struct ixgbe_hw *hw = &adapter->hw;
8511
8512 if (prtad != hw->phy.mdio.prtad)
8513 return -EINVAL;
8514 return hw->phy.ops.write_reg(hw, addr, devad, value);
8515}
8516
8517static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8518{
8519 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8520
8521 switch (cmd) {
8522 case SIOCSHWTSTAMP:
8523 return ixgbe_ptp_set_ts_config(adapter, req);
8524 case SIOCGHWTSTAMP:
8525 return ixgbe_ptp_get_ts_config(adapter, req);
8526 case SIOCGMIIPHY:
8527 if (!adapter->hw.phy.ops.read_reg)
8528 return -EOPNOTSUPP;
8529
8530 default:
8531 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8532 }
8533}
8534
8535
8536
8537
8538
8539
8540
8541
8542static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8543{
8544 int err = 0;
8545 struct ixgbe_adapter *adapter = netdev_priv(dev);
8546 struct ixgbe_hw *hw = &adapter->hw;
8547
8548 if (is_valid_ether_addr(hw->mac.san_addr)) {
8549 rtnl_lock();
8550 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8551 rtnl_unlock();
8552
8553
8554 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8555 }
8556 return err;
8557}
8558
8559
8560
8561
8562
8563
8564
8565
8566static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8567{
8568 int err = 0;
8569 struct ixgbe_adapter *adapter = netdev_priv(dev);
8570 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8571
8572 if (is_valid_ether_addr(mac->san_addr)) {
8573 rtnl_lock();
8574 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8575 rtnl_unlock();
8576 }
8577 return err;
8578}
8579
8580#ifdef CONFIG_NET_POLL_CONTROLLER
8581
8582
8583
8584
8585
8586static void ixgbe_netpoll(struct net_device *netdev)
8587{
8588 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8589 int i;
8590
8591
8592 if (test_bit(__IXGBE_DOWN, &adapter->state))
8593 return;
8594
8595
8596 for (i = 0; i < adapter->num_q_vectors; i++)
8597 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
8598}
8599
8600#endif
8601
8602static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8603 struct ixgbe_ring *ring)
8604{
8605 u64 bytes, packets;
8606 unsigned int start;
8607
8608 if (ring) {
8609 do {
8610 start = u64_stats_fetch_begin_irq(&ring->syncp);
8611 packets = ring->stats.packets;
8612 bytes = ring->stats.bytes;
8613 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8614 stats->tx_packets += packets;
8615 stats->tx_bytes += bytes;
8616 }
8617}
8618
8619static void ixgbe_get_stats64(struct net_device *netdev,
8620 struct rtnl_link_stats64 *stats)
8621{
8622 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8623 int i;
8624
8625 rcu_read_lock();
8626 for (i = 0; i < adapter->num_rx_queues; i++) {
8627 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
8628 u64 bytes, packets;
8629 unsigned int start;
8630
8631 if (ring) {
8632 do {
8633 start = u64_stats_fetch_begin_irq(&ring->syncp);
8634 packets = ring->stats.packets;
8635 bytes = ring->stats.bytes;
8636 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8637 stats->rx_packets += packets;
8638 stats->rx_bytes += bytes;
8639 }
8640 }
8641
8642 for (i = 0; i < adapter->num_tx_queues; i++) {
8643 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
8644
8645 ixgbe_get_ring_stats64(stats, ring);
8646 }
8647 for (i = 0; i < adapter->num_xdp_queues; i++) {
8648 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
8649
8650 ixgbe_get_ring_stats64(stats, ring);
8651 }
8652 rcu_read_unlock();
8653
8654
8655 stats->multicast = netdev->stats.multicast;
8656 stats->rx_errors = netdev->stats.rx_errors;
8657 stats->rx_length_errors = netdev->stats.rx_length_errors;
8658 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8659 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8660}
8661
8662#ifdef CONFIG_IXGBE_DCB
8663
8664
8665
8666
8667
8668
8669
8670
8671static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8672{
8673 struct ixgbe_hw *hw = &adapter->hw;
8674 u32 reg, rsave;
8675 int i;
8676
8677
8678
8679
8680 if (hw->mac.type == ixgbe_mac_82598EB)
8681 return;
8682
8683 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8684 rsave = reg;
8685
8686 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8687 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8688
8689
8690 if (up2tc > tc)
8691 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8692 }
8693
8694 if (reg != rsave)
8695 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8696
8697 return;
8698}
8699
8700
8701
8702
8703
8704
8705
8706static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
8707{
8708 struct net_device *dev = adapter->netdev;
8709 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
8710 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
8711 u8 prio;
8712
8713 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
8714 u8 tc = 0;
8715
8716 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
8717 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
8718 else if (ets)
8719 tc = ets->prio_tc[prio];
8720
8721 netdev_set_prio_tc_map(dev, prio, tc);
8722 }
8723}
8724
8725#endif
8726
8727
8728
8729
8730
8731
8732int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8733{
8734 struct ixgbe_adapter *adapter = netdev_priv(dev);
8735 struct ixgbe_hw *hw = &adapter->hw;
8736 bool pools;
8737
8738
8739 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
8740 return -EINVAL;
8741
8742 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
8743 return -EINVAL;
8744
8745 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
8746 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
8747 return -EBUSY;
8748
8749
8750
8751
8752
8753 if (netif_running(dev))
8754 ixgbe_close(dev);
8755 else
8756 ixgbe_reset(adapter);
8757
8758 ixgbe_clear_interrupt_scheme(adapter);
8759
8760#ifdef CONFIG_IXGBE_DCB
8761 if (tc) {
8762 netdev_set_num_tc(dev, tc);
8763 ixgbe_set_prio_tc_map(adapter);
8764
8765 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
8766
8767 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
8768 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
8769 adapter->hw.fc.requested_mode = ixgbe_fc_none;
8770 }
8771 } else {
8772 netdev_reset_tc(dev);
8773
8774 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8775 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
8776
8777 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
8778
8779 adapter->temp_dcb_cfg.pfc_mode_enable = false;
8780 adapter->dcb_cfg.pfc_mode_enable = false;
8781 }
8782
8783 ixgbe_validate_rtr(adapter, tc);
8784
8785#endif
8786 ixgbe_init_interrupt_scheme(adapter);
8787
8788 if (netif_running(dev))
8789 return ixgbe_open(dev);
8790
8791 return 0;
8792}
8793
8794static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8795 struct tc_cls_u32_offload *cls)
8796{
8797 u32 hdl = cls->knode.handle;
8798 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8799 u32 loc = cls->knode.handle & 0xfffff;
8800 int err = 0, i, j;
8801 struct ixgbe_jump_table *jump = NULL;
8802
8803 if (loc > IXGBE_MAX_HW_ENTRIES)
8804 return -EINVAL;
8805
8806 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8807 return -EINVAL;
8808
8809
8810 if (uhtid != 0x800) {
8811 jump = adapter->jump_tables[uhtid];
8812 if (!jump)
8813 return -EINVAL;
8814 if (!test_bit(loc - 1, jump->child_loc_map))
8815 return -EINVAL;
8816 clear_bit(loc - 1, jump->child_loc_map);
8817 }
8818
8819
8820 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
8821 jump = adapter->jump_tables[i];
8822 if (jump && jump->link_hdl == hdl) {
8823
8824
8825
8826 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
8827 if (!test_bit(j, jump->child_loc_map))
8828 continue;
8829 spin_lock(&adapter->fdir_perfect_lock);
8830 err = ixgbe_update_ethtool_fdir_entry(adapter,
8831 NULL,
8832 j + 1);
8833 spin_unlock(&adapter->fdir_perfect_lock);
8834 clear_bit(j, jump->child_loc_map);
8835 }
8836
8837 kfree(jump->input);
8838 kfree(jump->mask);
8839 kfree(jump);
8840 adapter->jump_tables[i] = NULL;
8841 return err;
8842 }
8843 }
8844
8845 spin_lock(&adapter->fdir_perfect_lock);
8846 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
8847 spin_unlock(&adapter->fdir_perfect_lock);
8848 return err;
8849}
8850
8851static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
8852 struct tc_cls_u32_offload *cls)
8853{
8854 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8855
8856 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8857 return -EINVAL;
8858
8859
8860
8861
8862 if (cls->hnode.divisor > 0)
8863 return -EINVAL;
8864
8865 set_bit(uhtid - 1, &adapter->tables);
8866 return 0;
8867}
8868
8869static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
8870 struct tc_cls_u32_offload *cls)
8871{
8872 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8873
8874 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8875 return -EINVAL;
8876
8877 clear_bit(uhtid - 1, &adapter->tables);
8878 return 0;
8879}
8880
8881#ifdef CONFIG_NET_CLS_ACT
8882struct upper_walk_data {
8883 struct ixgbe_adapter *adapter;
8884 u64 action;
8885 int ifindex;
8886 u8 queue;
8887};
8888
8889static int get_macvlan_queue(struct net_device *upper, void *_data)
8890{
8891 if (netif_is_macvlan(upper)) {
8892 struct macvlan_dev *dfwd = netdev_priv(upper);
8893 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
8894 struct upper_walk_data *data = _data;
8895 struct ixgbe_adapter *adapter = data->adapter;
8896 int ifindex = data->ifindex;
8897
8898 if (vadapter && vadapter->netdev->ifindex == ifindex) {
8899 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
8900 data->action = data->queue;
8901 return 1;
8902 }
8903 }
8904
8905 return 0;
8906}
8907
8908static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
8909 u8 *queue, u64 *action)
8910{
8911 unsigned int num_vfs = adapter->num_vfs, vf;
8912 struct upper_walk_data data;
8913 struct net_device *upper;
8914
8915
8916 for (vf = 0; vf < num_vfs; ++vf) {
8917 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
8918 if (upper->ifindex == ifindex) {
8919 if (adapter->num_rx_pools > 1)
8920 *queue = vf * 2;
8921 else
8922 *queue = vf * adapter->num_rx_queues_per_pool;
8923
8924 *action = vf + 1;
8925 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
8926 return 0;
8927 }
8928 }
8929
8930
8931 data.adapter = adapter;
8932 data.ifindex = ifindex;
8933 data.action = 0;
8934 data.queue = 0;
8935 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
8936 get_macvlan_queue, &data)) {
8937 *action = data.action;
8938 *queue = data.queue;
8939
8940 return 0;
8941 }
8942
8943 return -EINVAL;
8944}
8945
8946static int parse_tc_actions(struct ixgbe_adapter *adapter,
8947 struct tcf_exts *exts, u64 *action, u8 *queue)
8948{
8949 const struct tc_action *a;
8950 LIST_HEAD(actions);
8951 int err;
8952
8953 if (!tcf_exts_has_actions(exts))
8954 return -EINVAL;
8955
8956 tcf_exts_to_list(exts, &actions);
8957 list_for_each_entry(a, &actions, list) {
8958
8959
8960 if (is_tcf_gact_shot(a)) {
8961 *action = IXGBE_FDIR_DROP_QUEUE;
8962 *queue = IXGBE_FDIR_DROP_QUEUE;
8963 return 0;
8964 }
8965
8966
8967 if (is_tcf_mirred_egress_redirect(a)) {
8968 int ifindex = tcf_mirred_ifindex(a);
8969
8970 err = handle_redirect_action(adapter, ifindex, queue,
8971 action);
8972 if (err == 0)
8973 return err;
8974 }
8975 }
8976
8977 return -EINVAL;
8978}
8979#else
8980static int parse_tc_actions(struct ixgbe_adapter *adapter,
8981 struct tcf_exts *exts, u64 *action, u8 *queue)
8982{
8983 return -EINVAL;
8984}
8985#endif
8986
8987static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
8988 union ixgbe_atr_input *mask,
8989 struct tc_cls_u32_offload *cls,
8990 struct ixgbe_mat_field *field_ptr,
8991 struct ixgbe_nexthdr *nexthdr)
8992{
8993 int i, j, off;
8994 __be32 val, m;
8995 bool found_entry = false, found_jump_field = false;
8996
8997 for (i = 0; i < cls->knode.sel->nkeys; i++) {
8998 off = cls->knode.sel->keys[i].off;
8999 val = cls->knode.sel->keys[i].val;
9000 m = cls->knode.sel->keys[i].mask;
9001
9002 for (j = 0; field_ptr[j].val; j++) {
9003 if (field_ptr[j].off == off) {
9004 field_ptr[j].val(input, mask, val, m);
9005 input->filter.formatted.flow_type |=
9006 field_ptr[j].type;
9007 found_entry = true;
9008 break;
9009 }
9010 }
9011 if (nexthdr) {
9012 if (nexthdr->off == cls->knode.sel->keys[i].off &&
9013 nexthdr->val == cls->knode.sel->keys[i].val &&
9014 nexthdr->mask == cls->knode.sel->keys[i].mask)
9015 found_jump_field = true;
9016 else
9017 continue;
9018 }
9019 }
9020
9021 if (nexthdr && !found_jump_field)
9022 return -EINVAL;
9023
9024 if (!found_entry)
9025 return 0;
9026
9027 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
9028 IXGBE_ATR_L4TYPE_MASK;
9029
9030 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
9031 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
9032
9033 return 0;
9034}
9035
9036static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
9037 struct tc_cls_u32_offload *cls)
9038{
9039 __be16 protocol = cls->common.protocol;
9040 u32 loc = cls->knode.handle & 0xfffff;
9041 struct ixgbe_hw *hw = &adapter->hw;
9042 struct ixgbe_mat_field *field_ptr;
9043 struct ixgbe_fdir_filter *input = NULL;
9044 union ixgbe_atr_input *mask = NULL;
9045 struct ixgbe_jump_table *jump = NULL;
9046 int i, err = -EINVAL;
9047 u8 queue;
9048 u32 uhtid, link_uhtid;
9049
9050 uhtid = TC_U32_USERHTID(cls->knode.handle);
9051 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
9052
9053
9054
9055
9056
9057
9058
9059
9060 if (protocol != htons(ETH_P_IP))
9061 return err;
9062
9063 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
9064 e_err(drv, "Location out of range\n");
9065 return err;
9066 }
9067
9068
9069
9070
9071
9072
9073
9074
9075 if (uhtid == 0x800) {
9076 field_ptr = (adapter->jump_tables[0])->mat;
9077 } else {
9078 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
9079 return err;
9080 if (!adapter->jump_tables[uhtid])
9081 return err;
9082 field_ptr = (adapter->jump_tables[uhtid])->mat;
9083 }
9084
9085 if (!field_ptr)
9086 return err;
9087
9088
9089
9090
9091
9092
9093
9094 if (link_uhtid) {
9095 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
9096
9097 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
9098 return err;
9099
9100 if (!test_bit(link_uhtid - 1, &adapter->tables))
9101 return err;
9102
9103
9104
9105
9106
9107
9108 if (adapter->jump_tables[link_uhtid] &&
9109 (adapter->jump_tables[link_uhtid])->link_hdl) {
9110 e_err(drv, "Link filter exists for link: %x\n",
9111 link_uhtid);
9112 return err;
9113 }
9114
9115 for (i = 0; nexthdr[i].jump; i++) {
9116 if (nexthdr[i].o != cls->knode.sel->offoff ||
9117 nexthdr[i].s != cls->knode.sel->offshift ||
9118 nexthdr[i].m != cls->knode.sel->offmask)
9119 return err;
9120
9121 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
9122 if (!jump)
9123 return -ENOMEM;
9124 input = kzalloc(sizeof(*input), GFP_KERNEL);
9125 if (!input) {
9126 err = -ENOMEM;
9127 goto free_jump;
9128 }
9129 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9130 if (!mask) {
9131 err = -ENOMEM;
9132 goto free_input;
9133 }
9134 jump->input = input;
9135 jump->mask = mask;
9136 jump->link_hdl = cls->knode.handle;
9137
9138 err = ixgbe_clsu32_build_input(input, mask, cls,
9139 field_ptr, &nexthdr[i]);
9140 if (!err) {
9141 jump->mat = nexthdr[i].jump;
9142 adapter->jump_tables[link_uhtid] = jump;
9143 break;
9144 }
9145 }
9146 return 0;
9147 }
9148
9149 input = kzalloc(sizeof(*input), GFP_KERNEL);
9150 if (!input)
9151 return -ENOMEM;
9152 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
9153 if (!mask) {
9154 err = -ENOMEM;
9155 goto free_input;
9156 }
9157
9158 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
9159 if ((adapter->jump_tables[uhtid])->input)
9160 memcpy(input, (adapter->jump_tables[uhtid])->input,
9161 sizeof(*input));
9162 if ((adapter->jump_tables[uhtid])->mask)
9163 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
9164 sizeof(*mask));
9165
9166
9167
9168
9169 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
9170 struct ixgbe_jump_table *link = adapter->jump_tables[i];
9171
9172 if (link && (test_bit(loc - 1, link->child_loc_map))) {
9173 e_err(drv, "Filter exists in location: %x\n",
9174 loc);
9175 err = -EINVAL;
9176 goto err_out;
9177 }
9178 }
9179 }
9180 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
9181 if (err)
9182 goto err_out;
9183
9184 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
9185 &queue);
9186 if (err < 0)
9187 goto err_out;
9188
9189 input->sw_idx = loc;
9190
9191 spin_lock(&adapter->fdir_perfect_lock);
9192
9193 if (hlist_empty(&adapter->fdir_filter_list)) {
9194 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
9195 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
9196 if (err)
9197 goto err_out_w_lock;
9198 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
9199 err = -EINVAL;
9200 goto err_out_w_lock;
9201 }
9202
9203 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
9204 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
9205 input->sw_idx, queue);
9206 if (!err)
9207 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
9208 spin_unlock(&adapter->fdir_perfect_lock);
9209
9210 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
9211 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
9212
9213 kfree(mask);
9214 return err;
9215err_out_w_lock:
9216 spin_unlock(&adapter->fdir_perfect_lock);
9217err_out:
9218 kfree(mask);
9219free_input:
9220 kfree(input);
9221free_jump:
9222 kfree(jump);
9223 return err;
9224}
9225
9226static int ixgbe_setup_tc_cls_u32(struct net_device *dev,
9227 struct tc_cls_u32_offload *cls_u32)
9228{
9229 struct ixgbe_adapter *adapter = netdev_priv(dev);
9230
9231 if (!is_classid_clsact_ingress(cls_u32->common.classid) ||
9232 cls_u32->common.chain_index)
9233 return -EOPNOTSUPP;
9234
9235 switch (cls_u32->command) {
9236 case TC_CLSU32_NEW_KNODE:
9237 case TC_CLSU32_REPLACE_KNODE:
9238 return ixgbe_configure_clsu32(adapter, cls_u32);
9239 case TC_CLSU32_DELETE_KNODE:
9240 return ixgbe_delete_clsu32(adapter, cls_u32);
9241 case TC_CLSU32_NEW_HNODE:
9242 case TC_CLSU32_REPLACE_HNODE:
9243 return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32);
9244 case TC_CLSU32_DELETE_HNODE:
9245 return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32);
9246 default:
9247 return -EOPNOTSUPP;
9248 }
9249}
9250
9251static int ixgbe_setup_tc_mqprio(struct net_device *dev,
9252 struct tc_mqprio_qopt *mqprio)
9253{
9254 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9255 return ixgbe_setup_tc(dev, mqprio->num_tc);
9256}
9257
9258static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
9259 void *type_data)
9260{
9261 switch (type) {
9262 case TC_SETUP_CLSU32:
9263 return ixgbe_setup_tc_cls_u32(dev, type_data);
9264 case TC_SETUP_MQPRIO:
9265 return ixgbe_setup_tc_mqprio(dev, type_data);
9266 default:
9267 return -EOPNOTSUPP;
9268 }
9269}
9270
9271#ifdef CONFIG_PCI_IOV
9272void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
9273{
9274 struct net_device *netdev = adapter->netdev;
9275
9276 rtnl_lock();
9277 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
9278 rtnl_unlock();
9279}
9280
9281#endif
9282void ixgbe_do_reset(struct net_device *netdev)
9283{
9284 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9285
9286 if (netif_running(netdev))
9287 ixgbe_reinit_locked(adapter);
9288 else
9289 ixgbe_reset(adapter);
9290}
9291
9292static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
9293 netdev_features_t features)
9294{
9295 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9296
9297
9298 if (!(features & NETIF_F_RXCSUM))
9299 features &= ~NETIF_F_LRO;
9300
9301
9302 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
9303 features &= ~NETIF_F_LRO;
9304
9305 return features;
9306}
9307
9308static int ixgbe_set_features(struct net_device *netdev,
9309 netdev_features_t features)
9310{
9311 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9312 netdev_features_t changed = netdev->features ^ features;
9313 bool need_reset = false;
9314
9315
9316 if (!(features & NETIF_F_LRO)) {
9317 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9318 need_reset = true;
9319 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9320 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9321 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9322 if (adapter->rx_itr_setting == 1 ||
9323 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9324 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9325 need_reset = true;
9326 } else if ((changed ^ features) & NETIF_F_LRO) {
9327 e_info(probe, "rx-usecs set too low, "
9328 "disabling RSC\n");
9329 }
9330 }
9331
9332
9333
9334
9335
9336 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9337
9338 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9339 need_reset = true;
9340
9341 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9342 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9343 } else {
9344
9345 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9346 need_reset = true;
9347
9348 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9349
9350
9351 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9352
9353 (netdev_get_num_tc(netdev) > 1) ||
9354
9355 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9356
9357 (!adapter->atr_sample_rate))
9358 ;
9359 else
9360 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9361 }
9362
9363 if (changed & NETIF_F_RXALL)
9364 need_reset = true;
9365
9366 netdev->features = features;
9367
9368 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9369 if (features & NETIF_F_RXCSUM) {
9370 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9371 } else {
9372 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9373
9374 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9375 }
9376 }
9377
9378 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9379 if (features & NETIF_F_RXCSUM) {
9380 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9381 } else {
9382 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9383
9384 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9385 }
9386 }
9387
9388 if (need_reset)
9389 ixgbe_do_reset(netdev);
9390 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9391 NETIF_F_HW_VLAN_CTAG_FILTER))
9392 ixgbe_set_rx_mode(netdev);
9393
9394 return 0;
9395}
9396
9397
9398
9399
9400
9401
9402static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9403 struct udp_tunnel_info *ti)
9404{
9405 struct ixgbe_adapter *adapter = netdev_priv(dev);
9406 struct ixgbe_hw *hw = &adapter->hw;
9407 __be16 port = ti->port;
9408 u32 port_shift = 0;
9409 u32 reg;
9410
9411 if (ti->sa_family != AF_INET)
9412 return;
9413
9414 switch (ti->type) {
9415 case UDP_TUNNEL_TYPE_VXLAN:
9416 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9417 return;
9418
9419 if (adapter->vxlan_port == port)
9420 return;
9421
9422 if (adapter->vxlan_port) {
9423 netdev_info(dev,
9424 "VXLAN port %d set, not adding port %d\n",
9425 ntohs(adapter->vxlan_port),
9426 ntohs(port));
9427 return;
9428 }
9429
9430 adapter->vxlan_port = port;
9431 break;
9432 case UDP_TUNNEL_TYPE_GENEVE:
9433 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9434 return;
9435
9436 if (adapter->geneve_port == port)
9437 return;
9438
9439 if (adapter->geneve_port) {
9440 netdev_info(dev,
9441 "GENEVE port %d set, not adding port %d\n",
9442 ntohs(adapter->geneve_port),
9443 ntohs(port));
9444 return;
9445 }
9446
9447 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9448 adapter->geneve_port = port;
9449 break;
9450 default:
9451 return;
9452 }
9453
9454 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9455 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9456}
9457
9458
9459
9460
9461
9462
9463static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9464 struct udp_tunnel_info *ti)
9465{
9466 struct ixgbe_adapter *adapter = netdev_priv(dev);
9467 u32 port_mask;
9468
9469 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9470 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9471 return;
9472
9473 if (ti->sa_family != AF_INET)
9474 return;
9475
9476 switch (ti->type) {
9477 case UDP_TUNNEL_TYPE_VXLAN:
9478 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9479 return;
9480
9481 if (adapter->vxlan_port != ti->port) {
9482 netdev_info(dev, "VXLAN port %d not found\n",
9483 ntohs(ti->port));
9484 return;
9485 }
9486
9487 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9488 break;
9489 case UDP_TUNNEL_TYPE_GENEVE:
9490 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9491 return;
9492
9493 if (adapter->geneve_port != ti->port) {
9494 netdev_info(dev, "GENEVE port %d not found\n",
9495 ntohs(ti->port));
9496 return;
9497 }
9498
9499 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9500 break;
9501 default:
9502 return;
9503 }
9504
9505 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9506 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9507}
9508
9509static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9510 struct net_device *dev,
9511 const unsigned char *addr, u16 vid,
9512 u16 flags)
9513{
9514
9515 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9516 struct ixgbe_adapter *adapter = netdev_priv(dev);
9517 u16 pool = VMDQ_P(0);
9518
9519 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9520 return -ENOMEM;
9521 }
9522
9523 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9524}
9525
9526
9527
9528
9529
9530
9531
9532
9533static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9534 __u16 mode)
9535{
9536 struct ixgbe_hw *hw = &adapter->hw;
9537 unsigned int p, num_pools;
9538 u32 vmdctl;
9539
9540 switch (mode) {
9541 case BRIDGE_MODE_VEPA:
9542
9543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9544
9545
9546
9547
9548
9549 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9550 vmdctl |= IXGBE_VT_CTL_REPLEN;
9551 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9552
9553
9554
9555
9556 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9557 for (p = 0; p < num_pools; p++) {
9558 if (hw->mac.ops.set_source_address_pruning)
9559 hw->mac.ops.set_source_address_pruning(hw,
9560 true,
9561 p);
9562 }
9563 break;
9564 case BRIDGE_MODE_VEB:
9565
9566 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9567 IXGBE_PFDTXGSWC_VT_LBEN);
9568
9569
9570
9571
9572 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9573 if (!adapter->num_vfs)
9574 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9575 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9576
9577
9578
9579
9580 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9581 for (p = 0; p < num_pools; p++) {
9582 if (hw->mac.ops.set_source_address_pruning)
9583 hw->mac.ops.set_source_address_pruning(hw,
9584 false,
9585 p);
9586 }
9587 break;
9588 default:
9589 return -EINVAL;
9590 }
9591
9592 adapter->bridge_mode = mode;
9593
9594 e_info(drv, "enabling bridge mode: %s\n",
9595 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9596
9597 return 0;
9598}
9599
9600static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9601 struct nlmsghdr *nlh, u16 flags)
9602{
9603 struct ixgbe_adapter *adapter = netdev_priv(dev);
9604 struct nlattr *attr, *br_spec;
9605 int rem;
9606
9607 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9608 return -EOPNOTSUPP;
9609
9610 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9611 if (!br_spec)
9612 return -EINVAL;
9613
9614 nla_for_each_nested(attr, br_spec, rem) {
9615 int status;
9616 __u16 mode;
9617
9618 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9619 continue;
9620
9621 if (nla_len(attr) < sizeof(mode))
9622 return -EINVAL;
9623
9624 mode = nla_get_u16(attr);
9625 status = ixgbe_configure_bridge_mode(adapter, mode);
9626 if (status)
9627 return status;
9628
9629 break;
9630 }
9631
9632 return 0;
9633}
9634
9635static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9636 struct net_device *dev,
9637 u32 filter_mask, int nlflags)
9638{
9639 struct ixgbe_adapter *adapter = netdev_priv(dev);
9640
9641 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9642 return 0;
9643
9644 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
9645 adapter->bridge_mode, 0, 0, nlflags,
9646 filter_mask, NULL);
9647}
9648
9649static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9650{
9651 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
9652 struct ixgbe_adapter *adapter = netdev_priv(pdev);
9653 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
9654 unsigned int limit;
9655 int pool, err;
9656
9657
9658
9659
9660
9661 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9662 return ERR_PTR(-EINVAL);
9663
9664#ifdef CONFIG_RPS
9665 if (vdev->num_rx_queues != vdev->num_tx_queues) {
9666 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
9667 vdev->name);
9668 return ERR_PTR(-EINVAL);
9669 }
9670#endif
9671
9672 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
9673 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
9674 netdev_info(pdev,
9675 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
9676 pdev->name);
9677 return ERR_PTR(-EINVAL);
9678 }
9679
9680 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9681 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
9682 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
9683 return ERR_PTR(-EBUSY);
9684
9685 fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
9686 if (!fwd_adapter)
9687 return ERR_PTR(-ENOMEM);
9688
9689 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
9690 adapter->num_rx_pools++;
9691 set_bit(pool, &adapter->fwd_bitmask);
9692 limit = find_last_bit(&adapter->fwd_bitmask, 32);
9693
9694
9695 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
9696 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9697 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
9698
9699
9700 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
9701 if (err)
9702 goto fwd_add_err;
9703 fwd_adapter->pool = pool;
9704 fwd_adapter->real_adapter = adapter;
9705
9706 if (netif_running(pdev)) {
9707 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
9708 if (err)
9709 goto fwd_add_err;
9710 netif_tx_start_all_queues(vdev);
9711 }
9712
9713 return fwd_adapter;
9714fwd_add_err:
9715
9716 netdev_info(pdev,
9717 "%s: dfwd hardware acceleration failed\n", vdev->name);
9718 clear_bit(pool, &adapter->fwd_bitmask);
9719 adapter->num_rx_pools--;
9720 kfree(fwd_adapter);
9721 return ERR_PTR(err);
9722}
9723
9724static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
9725{
9726 struct ixgbe_fwd_adapter *fwd_adapter = priv;
9727 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
9728 unsigned int limit;
9729
9730 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
9731 adapter->num_rx_pools--;
9732
9733 limit = find_last_bit(&adapter->fwd_bitmask, 32);
9734 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9735 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
9736 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
9737 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
9738 fwd_adapter->pool, adapter->num_rx_pools,
9739 fwd_adapter->rx_base_queue,
9740 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
9741 adapter->fwd_bitmask);
9742 kfree(fwd_adapter);
9743}
9744
9745#define IXGBE_MAX_MAC_HDR_LEN 127
9746#define IXGBE_MAX_NETWORK_HDR_LEN 511
9747
9748static netdev_features_t
9749ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9750 netdev_features_t features)
9751{
9752 unsigned int network_hdr_len, mac_hdr_len;
9753
9754
9755 mac_hdr_len = skb_network_header(skb) - skb->data;
9756 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
9757 return features & ~(NETIF_F_HW_CSUM |
9758 NETIF_F_SCTP_CRC |
9759 NETIF_F_HW_VLAN_CTAG_TX |
9760 NETIF_F_TSO |
9761 NETIF_F_TSO6);
9762
9763 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
9764 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
9765 return features & ~(NETIF_F_HW_CSUM |
9766 NETIF_F_SCTP_CRC |
9767 NETIF_F_TSO |
9768 NETIF_F_TSO6);
9769
9770
9771
9772
9773 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
9774 features &= ~NETIF_F_TSO;
9775
9776 return features;
9777}
9778
9779static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
9780{
9781 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9782 struct ixgbe_adapter *adapter = netdev_priv(dev);
9783 struct bpf_prog *old_prog;
9784
9785 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
9786 return -EINVAL;
9787
9788 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
9789 return -EINVAL;
9790
9791
9792 for (i = 0; i < adapter->num_rx_queues; i++) {
9793 struct ixgbe_ring *ring = adapter->rx_ring[i];
9794
9795 if (ring_is_rsc_enabled(ring))
9796 return -EINVAL;
9797
9798 if (frame_size > ixgbe_rx_bufsz(ring))
9799 return -EINVAL;
9800 }
9801
9802 if (nr_cpu_ids > MAX_XDP_QUEUES)
9803 return -ENOMEM;
9804
9805 old_prog = xchg(&adapter->xdp_prog, prog);
9806
9807
9808 if (!!prog != !!old_prog) {
9809 int err = ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
9810
9811 if (err) {
9812 rcu_assign_pointer(adapter->xdp_prog, old_prog);
9813 return -EINVAL;
9814 }
9815 } else {
9816 for (i = 0; i < adapter->num_rx_queues; i++)
9817 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
9818 }
9819
9820 if (old_prog)
9821 bpf_prog_put(old_prog);
9822
9823 return 0;
9824}
9825
9826static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
9827{
9828 struct ixgbe_adapter *adapter = netdev_priv(dev);
9829
9830 switch (xdp->command) {
9831 case XDP_SETUP_PROG:
9832 return ixgbe_xdp_setup(dev, xdp->prog);
9833 case XDP_QUERY_PROG:
9834 xdp->prog_attached = !!(adapter->xdp_prog);
9835 xdp->prog_id = adapter->xdp_prog ?
9836 adapter->xdp_prog->aux->id : 0;
9837 return 0;
9838 default:
9839 return -EINVAL;
9840 }
9841}
9842
9843static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
9844{
9845 struct ixgbe_adapter *adapter = netdev_priv(dev);
9846 struct ixgbe_ring *ring;
9847 int err;
9848
9849 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
9850 return -ENETDOWN;
9851
9852
9853
9854
9855 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
9856 if (unlikely(!ring))
9857 return -ENXIO;
9858
9859 err = ixgbe_xmit_xdp_ring(adapter, xdp);
9860 if (err != IXGBE_XDP_TX)
9861 return -ENOSPC;
9862
9863 return 0;
9864}
9865
9866static void ixgbe_xdp_flush(struct net_device *dev)
9867{
9868 struct ixgbe_adapter *adapter = netdev_priv(dev);
9869 struct ixgbe_ring *ring;
9870
9871
9872
9873
9874 if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
9875 return;
9876
9877 ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
9878 if (unlikely(!ring))
9879 return;
9880
9881
9882
9883
9884 wmb();
9885 writel(ring->next_to_use, ring->tail);
9886
9887 return;
9888}
9889
9890static const struct net_device_ops ixgbe_netdev_ops = {
9891 .ndo_open = ixgbe_open,
9892 .ndo_stop = ixgbe_close,
9893 .ndo_start_xmit = ixgbe_xmit_frame,
9894 .ndo_select_queue = ixgbe_select_queue,
9895 .ndo_set_rx_mode = ixgbe_set_rx_mode,
9896 .ndo_validate_addr = eth_validate_addr,
9897 .ndo_set_mac_address = ixgbe_set_mac,
9898 .ndo_change_mtu = ixgbe_change_mtu,
9899 .ndo_tx_timeout = ixgbe_tx_timeout,
9900 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
9901 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
9902 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
9903 .ndo_do_ioctl = ixgbe_ioctl,
9904 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
9905 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
9906 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
9907 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
9908 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
9909 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
9910 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
9911 .ndo_get_stats64 = ixgbe_get_stats64,
9912 .ndo_setup_tc = __ixgbe_setup_tc,
9913#ifdef CONFIG_NET_POLL_CONTROLLER
9914 .ndo_poll_controller = ixgbe_netpoll,
9915#endif
9916#ifdef IXGBE_FCOE
9917 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
9918 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
9919 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
9920 .ndo_fcoe_enable = ixgbe_fcoe_enable,
9921 .ndo_fcoe_disable = ixgbe_fcoe_disable,
9922 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
9923 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
9924#endif
9925 .ndo_set_features = ixgbe_set_features,
9926 .ndo_fix_features = ixgbe_fix_features,
9927 .ndo_fdb_add = ixgbe_ndo_fdb_add,
9928 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
9929 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
9930 .ndo_dfwd_add_station = ixgbe_fwd_add,
9931 .ndo_dfwd_del_station = ixgbe_fwd_del,
9932 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
9933 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
9934 .ndo_features_check = ixgbe_features_check,
9935 .ndo_xdp = ixgbe_xdp,
9936 .ndo_xdp_xmit = ixgbe_xdp_xmit,
9937 .ndo_xdp_flush = ixgbe_xdp_flush,
9938};
9939
9940
9941
9942
9943
9944
9945
9946
9947
9948
9949static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
9950{
9951 struct pci_dev *entry, *pdev = adapter->pdev;
9952 int physfns = 0;
9953
9954
9955
9956
9957
9958 if (ixgbe_pcie_from_parent(&adapter->hw))
9959 physfns = 4;
9960
9961 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
9962
9963 if (entry->is_virtfn)
9964 continue;
9965
9966
9967
9968
9969
9970
9971
9972 if ((entry->vendor != pdev->vendor) ||
9973 (entry->device != pdev->device))
9974 return -1;
9975
9976 physfns++;
9977 }
9978
9979 return physfns;
9980}
9981
9982
9983
9984
9985
9986
9987
9988
9989
9990
9991
9992bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
9993 u16 subdevice_id)
9994{
9995 struct ixgbe_hw *hw = &adapter->hw;
9996 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
9997
9998
9999 if (hw->mac.type == ixgbe_mac_82598EB)
10000 return false;
10001
10002
10003 if (hw->mac.type >= ixgbe_mac_X540) {
10004 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
10005 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
10006 (hw->bus.func == 0)))
10007 return true;
10008 }
10009
10010
10011 switch (device_id) {
10012 case IXGBE_DEV_ID_82599_SFP:
10013
10014 switch (subdevice_id) {
10015 case IXGBE_SUBDEV_ID_82599_560FLR:
10016 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
10017 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
10018 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
10019
10020 if (hw->bus.func != 0)
10021 break;
10022
10023 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
10024 case IXGBE_SUBDEV_ID_82599_SFP:
10025 case IXGBE_SUBDEV_ID_82599_RNDC:
10026 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
10027 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
10028 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
10029 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
10030 return true;
10031 }
10032 break;
10033 case IXGBE_DEV_ID_82599EN_SFP:
10034
10035 switch (subdevice_id) {
10036 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
10037 return true;
10038 }
10039 break;
10040 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
10041
10042 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
10043 return true;
10044 break;
10045 case IXGBE_DEV_ID_82599_KX4:
10046 return true;
10047 default:
10048 break;
10049 }
10050
10051 return false;
10052}
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10066{
10067 struct net_device *netdev;
10068 struct ixgbe_adapter *adapter = NULL;
10069 struct ixgbe_hw *hw;
10070 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
10071 int i, err, pci_using_dac, expected_gts;
10072 unsigned int indices = MAX_TX_QUEUES;
10073 u8 part_str[IXGBE_PBANUM_LENGTH];
10074 bool disable_dev = false;
10075#ifdef IXGBE_FCOE
10076 u16 device_caps;
10077#endif
10078 u32 eec;
10079
10080
10081
10082
10083 if (pdev->is_virtfn) {
10084 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
10085 pci_name(pdev), pdev->vendor, pdev->device);
10086 return -EINVAL;
10087 }
10088
10089 err = pci_enable_device_mem(pdev);
10090 if (err)
10091 return err;
10092
10093 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
10094 pci_using_dac = 1;
10095 } else {
10096 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10097 if (err) {
10098 dev_err(&pdev->dev,
10099 "No usable DMA configuration, aborting\n");
10100 goto err_dma;
10101 }
10102 pci_using_dac = 0;
10103 }
10104
10105 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
10106 if (err) {
10107 dev_err(&pdev->dev,
10108 "pci_request_selected_regions failed 0x%x\n", err);
10109 goto err_pci_reg;
10110 }
10111
10112 pci_enable_pcie_error_reporting(pdev);
10113
10114 pci_set_master(pdev);
10115 pci_save_state(pdev);
10116
10117 if (ii->mac == ixgbe_mac_82598EB) {
10118#ifdef CONFIG_IXGBE_DCB
10119
10120 indices = 4 * MAX_TRAFFIC_CLASS;
10121#else
10122 indices = IXGBE_MAX_RSS_INDICES;
10123#endif
10124 }
10125
10126 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
10127 if (!netdev) {
10128 err = -ENOMEM;
10129 goto err_alloc_etherdev;
10130 }
10131
10132 SET_NETDEV_DEV(netdev, &pdev->dev);
10133
10134 adapter = netdev_priv(netdev);
10135
10136 adapter->netdev = netdev;
10137 adapter->pdev = pdev;
10138 hw = &adapter->hw;
10139 hw->back = adapter;
10140 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
10141
10142 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
10143 pci_resource_len(pdev, 0));
10144 adapter->io_addr = hw->hw_addr;
10145 if (!hw->hw_addr) {
10146 err = -EIO;
10147 goto err_ioremap;
10148 }
10149
10150 netdev->netdev_ops = &ixgbe_netdev_ops;
10151 ixgbe_set_ethtool_ops(netdev);
10152 netdev->watchdog_timeo = 5 * HZ;
10153 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
10154
10155
10156 hw->mac.ops = *ii->mac_ops;
10157 hw->mac.type = ii->mac;
10158 hw->mvals = ii->mvals;
10159 if (ii->link_ops)
10160 hw->link.ops = *ii->link_ops;
10161
10162
10163 hw->eeprom.ops = *ii->eeprom_ops;
10164 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
10165 if (ixgbe_removed(hw->hw_addr)) {
10166 err = -EIO;
10167 goto err_ioremap;
10168 }
10169
10170 if (!(eec & BIT(8)))
10171 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
10172
10173
10174 hw->phy.ops = *ii->phy_ops;
10175 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
10176
10177 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
10178 hw->phy.mdio.mmds = 0;
10179 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10180 hw->phy.mdio.dev = netdev;
10181 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
10182 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
10183
10184
10185 err = ixgbe_sw_init(adapter, ii);
10186 if (err)
10187 goto err_sw_init;
10188
10189
10190 if (hw->mac.ops.init_swfw_sync)
10191 hw->mac.ops.init_swfw_sync(hw);
10192
10193
10194 switch (adapter->hw.mac.type) {
10195 case ixgbe_mac_82599EB:
10196 case ixgbe_mac_X540:
10197 case ixgbe_mac_X550:
10198 case ixgbe_mac_X550EM_x:
10199 case ixgbe_mac_x550em_a:
10200 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10201 break;
10202 default:
10203 break;
10204 }
10205
10206
10207
10208
10209
10210 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
10211 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
10212 if (esdp & IXGBE_ESDP_SDP1)
10213 e_crit(probe, "Fan has stopped, replace the adapter\n");
10214 }
10215
10216 if (allow_unsupported_sfp)
10217 hw->allow_unsupported_sfp = allow_unsupported_sfp;
10218
10219
10220 hw->phy.reset_if_overtemp = true;
10221 err = hw->mac.ops.reset_hw(hw);
10222 hw->phy.reset_if_overtemp = false;
10223 ixgbe_set_eee_capable(adapter);
10224 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
10225 err = 0;
10226 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
10227 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
10228 e_dev_err("Reload the driver after installing a supported module.\n");
10229 goto err_sw_init;
10230 } else if (err) {
10231 e_dev_err("HW Init failed: %d\n", err);
10232 goto err_sw_init;
10233 }
10234
10235#ifdef CONFIG_PCI_IOV
10236
10237 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
10238 goto skip_sriov;
10239
10240 ixgbe_init_mbx_params_pf(hw);
10241 hw->mbx.ops = ii->mbx_ops;
10242 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
10243 ixgbe_enable_sriov(adapter, max_vfs);
10244skip_sriov:
10245
10246#endif
10247 netdev->features = NETIF_F_SG |
10248 NETIF_F_TSO |
10249 NETIF_F_TSO6 |
10250 NETIF_F_RXHASH |
10251 NETIF_F_RXCSUM |
10252 NETIF_F_HW_CSUM;
10253
10254#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
10255 NETIF_F_GSO_GRE_CSUM | \
10256 NETIF_F_GSO_IPXIP4 | \
10257 NETIF_F_GSO_IPXIP6 | \
10258 NETIF_F_GSO_UDP_TUNNEL | \
10259 NETIF_F_GSO_UDP_TUNNEL_CSUM)
10260
10261 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
10262 netdev->features |= NETIF_F_GSO_PARTIAL |
10263 IXGBE_GSO_PARTIAL_FEATURES;
10264
10265 if (hw->mac.type >= ixgbe_mac_82599EB)
10266 netdev->features |= NETIF_F_SCTP_CRC;
10267
10268
10269 netdev->hw_features |= netdev->features |
10270 NETIF_F_HW_VLAN_CTAG_FILTER |
10271 NETIF_F_HW_VLAN_CTAG_RX |
10272 NETIF_F_HW_VLAN_CTAG_TX |
10273 NETIF_F_RXALL |
10274 NETIF_F_HW_L2FW_DOFFLOAD;
10275
10276 if (hw->mac.type >= ixgbe_mac_82599EB)
10277 netdev->hw_features |= NETIF_F_NTUPLE |
10278 NETIF_F_HW_TC;
10279
10280 if (pci_using_dac)
10281 netdev->features |= NETIF_F_HIGHDMA;
10282
10283 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
10284 netdev->hw_enc_features |= netdev->vlan_features;
10285 netdev->mpls_features |= NETIF_F_SG |
10286 NETIF_F_TSO |
10287 NETIF_F_TSO6 |
10288 NETIF_F_HW_CSUM;
10289 netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES;
10290
10291
10292 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
10293 NETIF_F_HW_VLAN_CTAG_RX |
10294 NETIF_F_HW_VLAN_CTAG_TX;
10295
10296 netdev->priv_flags |= IFF_UNICAST_FLT;
10297 netdev->priv_flags |= IFF_SUPP_NOFCS;
10298
10299
10300 netdev->min_mtu = ETH_MIN_MTU;
10301 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
10302
10303#ifdef CONFIG_IXGBE_DCB
10304 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
10305 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
10306#endif
10307
10308#ifdef IXGBE_FCOE
10309 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
10310 unsigned int fcoe_l;
10311
10312 if (hw->mac.ops.get_device_caps) {
10313 hw->mac.ops.get_device_caps(hw, &device_caps);
10314 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
10315 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
10316 }
10317
10318
10319 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
10320 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
10321
10322 netdev->features |= NETIF_F_FSO |
10323 NETIF_F_FCOE_CRC;
10324
10325 netdev->vlan_features |= NETIF_F_FSO |
10326 NETIF_F_FCOE_CRC |
10327 NETIF_F_FCOE_MTU;
10328 }
10329#endif
10330
10331 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10332 netdev->hw_features |= NETIF_F_LRO;
10333 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
10334 netdev->features |= NETIF_F_LRO;
10335
10336
10337 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
10338 e_dev_err("The EEPROM Checksum Is Not Valid\n");
10339 err = -EIO;
10340 goto err_sw_init;
10341 }
10342
10343 eth_platform_get_mac_address(&adapter->pdev->dev,
10344 adapter->hw.mac.perm_addr);
10345
10346 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
10347
10348 if (!is_valid_ether_addr(netdev->dev_addr)) {
10349 e_dev_err("invalid MAC address\n");
10350 err = -EIO;
10351 goto err_sw_init;
10352 }
10353
10354
10355 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
10356 ixgbe_mac_set_default_filter(adapter);
10357
10358 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
10359 (unsigned long) adapter);
10360
10361 if (ixgbe_removed(hw->hw_addr)) {
10362 err = -EIO;
10363 goto err_sw_init;
10364 }
10365 INIT_WORK(&adapter->service_task, ixgbe_service_task);
10366 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
10367 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
10368
10369 err = ixgbe_init_interrupt_scheme(adapter);
10370 if (err)
10371 goto err_sw_init;
10372
10373 for (i = 0; i < adapter->num_rx_queues; i++)
10374 u64_stats_init(&adapter->rx_ring[i]->syncp);
10375 for (i = 0; i < adapter->num_tx_queues; i++)
10376 u64_stats_init(&adapter->tx_ring[i]->syncp);
10377 for (i = 0; i < adapter->num_xdp_queues; i++)
10378 u64_stats_init(&adapter->xdp_ring[i]->syncp);
10379
10380
10381 adapter->wol = 0;
10382 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
10383 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
10384 pdev->subsystem_device);
10385 if (hw->wol_enabled)
10386 adapter->wol = IXGBE_WUFC_MAG;
10387
10388 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
10389
10390
10391 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
10392 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
10393
10394
10395 if (ixgbe_pcie_from_parent(hw))
10396 ixgbe_get_parent_bus_info(adapter);
10397 else
10398 hw->mac.ops.get_bus_info(hw);
10399
10400
10401
10402
10403
10404
10405 switch (hw->mac.type) {
10406 case ixgbe_mac_82598EB:
10407 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
10408 break;
10409 default:
10410 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
10411 break;
10412 }
10413
10414
10415 if (expected_gts > 0)
10416 ixgbe_check_minimum_link(adapter, expected_gts);
10417
10418 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
10419 if (err)
10420 strlcpy(part_str, "Unknown", sizeof(part_str));
10421 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
10422 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
10423 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
10424 part_str);
10425 else
10426 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
10427 hw->mac.type, hw->phy.type, part_str);
10428
10429 e_dev_info("%pM\n", netdev->dev_addr);
10430
10431
10432 err = hw->mac.ops.start_hw(hw);
10433 if (err == IXGBE_ERR_EEPROM_VERSION) {
10434
10435 e_dev_warn("This device is a pre-production adapter/LOM. "
10436 "Please be aware there may be issues associated "
10437 "with your hardware. If you are experiencing "
10438 "problems please contact your Intel or hardware "
10439 "representative who provided you with this "
10440 "hardware.\n");
10441 }
10442 strcpy(netdev->name, "eth%d");
10443 pci_set_drvdata(pdev, adapter);
10444 err = register_netdev(netdev);
10445 if (err)
10446 goto err_register;
10447
10448
10449
10450 if (hw->mac.ops.disable_tx_laser)
10451 hw->mac.ops.disable_tx_laser(hw);
10452
10453
10454 netif_carrier_off(netdev);
10455
10456#ifdef CONFIG_IXGBE_DCA
10457 if (dca_add_requester(&pdev->dev) == 0) {
10458 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
10459 ixgbe_setup_dca(adapter);
10460 }
10461#endif
10462 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
10463 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
10464 for (i = 0; i < adapter->num_vfs; i++)
10465 ixgbe_vf_configuration(pdev, (i | 0x10000000));
10466 }
10467
10468
10469
10470
10471 if (hw->mac.ops.set_fw_drv_ver)
10472 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
10473 sizeof(ixgbe_driver_version) - 1,
10474 ixgbe_driver_version);
10475
10476
10477 ixgbe_add_sanmac_netdev(netdev);
10478
10479 e_dev_info("%s\n", ixgbe_default_device_descr);
10480
10481#ifdef CONFIG_IXGBE_HWMON
10482 if (ixgbe_sysfs_init(adapter))
10483 e_err(probe, "failed to allocate sysfs resources\n");
10484#endif
10485
10486 ixgbe_dbg_adapter_init(adapter);
10487
10488
10489 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
10490 hw->mac.ops.setup_link(hw,
10491 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
10492 true);
10493
10494 return 0;
10495
10496err_register:
10497 ixgbe_release_hw_control(adapter);
10498 ixgbe_clear_interrupt_scheme(adapter);
10499err_sw_init:
10500 ixgbe_disable_sriov(adapter);
10501 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
10502 iounmap(adapter->io_addr);
10503 kfree(adapter->jump_tables[0]);
10504 kfree(adapter->mac_table);
10505 kfree(adapter->rss_key);
10506err_ioremap:
10507 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10508 free_netdev(netdev);
10509err_alloc_etherdev:
10510 pci_release_mem_regions(pdev);
10511err_pci_reg:
10512err_dma:
10513 if (!adapter || disable_dev)
10514 pci_disable_device(pdev);
10515 return err;
10516}
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527static void ixgbe_remove(struct pci_dev *pdev)
10528{
10529 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10530 struct net_device *netdev;
10531 bool disable_dev;
10532 int i;
10533
10534
10535 if (!adapter)
10536 return;
10537
10538 netdev = adapter->netdev;
10539 ixgbe_dbg_adapter_exit(adapter);
10540
10541 set_bit(__IXGBE_REMOVING, &adapter->state);
10542 cancel_work_sync(&adapter->service_task);
10543
10544
10545#ifdef CONFIG_IXGBE_DCA
10546 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
10547 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
10548 dca_remove_requester(&pdev->dev);
10549 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
10550 IXGBE_DCA_CTRL_DCA_DISABLE);
10551 }
10552
10553#endif
10554#ifdef CONFIG_IXGBE_HWMON
10555 ixgbe_sysfs_exit(adapter);
10556#endif
10557
10558
10559 ixgbe_del_sanmac_netdev(netdev);
10560
10561#ifdef CONFIG_PCI_IOV
10562 ixgbe_disable_sriov(adapter);
10563#endif
10564 if (netdev->reg_state == NETREG_REGISTERED)
10565 unregister_netdev(netdev);
10566
10567 ixgbe_clear_interrupt_scheme(adapter);
10568
10569 ixgbe_release_hw_control(adapter);
10570
10571#ifdef CONFIG_DCB
10572 kfree(adapter->ixgbe_ieee_pfc);
10573 kfree(adapter->ixgbe_ieee_ets);
10574
10575#endif
10576 iounmap(adapter->io_addr);
10577 pci_release_mem_regions(pdev);
10578
10579 e_dev_info("complete\n");
10580
10581 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
10582 if (adapter->jump_tables[i]) {
10583 kfree(adapter->jump_tables[i]->input);
10584 kfree(adapter->jump_tables[i]->mask);
10585 }
10586 kfree(adapter->jump_tables[i]);
10587 }
10588
10589 kfree(adapter->mac_table);
10590 kfree(adapter->rss_key);
10591 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10592 free_netdev(netdev);
10593
10594 pci_disable_pcie_error_reporting(pdev);
10595
10596 if (disable_dev)
10597 pci_disable_device(pdev);
10598}
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
10609 pci_channel_state_t state)
10610{
10611 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10612 struct net_device *netdev = adapter->netdev;
10613
10614#ifdef CONFIG_PCI_IOV
10615 struct ixgbe_hw *hw = &adapter->hw;
10616 struct pci_dev *bdev, *vfdev;
10617 u32 dw0, dw1, dw2, dw3;
10618 int vf, pos;
10619 u16 req_id, pf_func;
10620
10621 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
10622 adapter->num_vfs == 0)
10623 goto skip_bad_vf_detection;
10624
10625 bdev = pdev->bus->self;
10626 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
10627 bdev = bdev->bus->self;
10628
10629 if (!bdev)
10630 goto skip_bad_vf_detection;
10631
10632 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
10633 if (!pos)
10634 goto skip_bad_vf_detection;
10635
10636 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
10637 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
10638 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
10639 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
10640 if (ixgbe_removed(hw->hw_addr))
10641 goto skip_bad_vf_detection;
10642
10643 req_id = dw1 >> 16;
10644
10645 if (!(req_id & 0x0080))
10646 goto skip_bad_vf_detection;
10647
10648 pf_func = req_id & 0x01;
10649 if ((pf_func & 1) == (pdev->devfn & 1)) {
10650 unsigned int device_id;
10651
10652 vf = (req_id & 0x7F) >> 1;
10653 e_dev_err("VF %d has caused a PCIe error\n", vf);
10654 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
10655 "%8.8x\tdw3: %8.8x\n",
10656 dw0, dw1, dw2, dw3);
10657 switch (adapter->hw.mac.type) {
10658 case ixgbe_mac_82599EB:
10659 device_id = IXGBE_82599_VF_DEVICE_ID;
10660 break;
10661 case ixgbe_mac_X540:
10662 device_id = IXGBE_X540_VF_DEVICE_ID;
10663 break;
10664 case ixgbe_mac_X550:
10665 device_id = IXGBE_DEV_ID_X550_VF;
10666 break;
10667 case ixgbe_mac_X550EM_x:
10668 device_id = IXGBE_DEV_ID_X550EM_X_VF;
10669 break;
10670 case ixgbe_mac_x550em_a:
10671 device_id = IXGBE_DEV_ID_X550EM_A_VF;
10672 break;
10673 default:
10674 device_id = 0;
10675 break;
10676 }
10677
10678
10679 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
10680 while (vfdev) {
10681 if (vfdev->devfn == (req_id & 0xFF))
10682 break;
10683 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
10684 device_id, vfdev);
10685 }
10686
10687
10688
10689
10690
10691 if (vfdev) {
10692 pcie_flr(vfdev);
10693
10694 pci_dev_put(vfdev);
10695 }
10696
10697 pci_cleanup_aer_uncorrect_error_status(pdev);
10698 }
10699
10700
10701
10702
10703
10704
10705
10706 adapter->vferr_refcount++;
10707
10708 return PCI_ERS_RESULT_RECOVERED;
10709
10710skip_bad_vf_detection:
10711#endif
10712 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
10713 return PCI_ERS_RESULT_DISCONNECT;
10714
10715 rtnl_lock();
10716 netif_device_detach(netdev);
10717
10718 if (state == pci_channel_io_perm_failure) {
10719 rtnl_unlock();
10720 return PCI_ERS_RESULT_DISCONNECT;
10721 }
10722
10723 if (netif_running(netdev))
10724 ixgbe_close_suspend(adapter);
10725
10726 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
10727 pci_disable_device(pdev);
10728 rtnl_unlock();
10729
10730
10731 return PCI_ERS_RESULT_NEED_RESET;
10732}
10733
10734
10735
10736
10737
10738
10739
10740static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
10741{
10742 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10743 pci_ers_result_t result;
10744 int err;
10745
10746 if (pci_enable_device_mem(pdev)) {
10747 e_err(probe, "Cannot re-enable PCI device after reset.\n");
10748 result = PCI_ERS_RESULT_DISCONNECT;
10749 } else {
10750 smp_mb__before_atomic();
10751 clear_bit(__IXGBE_DISABLED, &adapter->state);
10752 adapter->hw.hw_addr = adapter->io_addr;
10753 pci_set_master(pdev);
10754 pci_restore_state(pdev);
10755 pci_save_state(pdev);
10756
10757 pci_wake_from_d3(pdev, false);
10758
10759 ixgbe_reset(adapter);
10760 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10761 result = PCI_ERS_RESULT_RECOVERED;
10762 }
10763
10764 err = pci_cleanup_aer_uncorrect_error_status(pdev);
10765 if (err) {
10766 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
10767 "failed 0x%0x\n", err);
10768
10769 }
10770
10771 return result;
10772}
10773
10774
10775
10776
10777
10778
10779
10780
10781static void ixgbe_io_resume(struct pci_dev *pdev)
10782{
10783 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10784 struct net_device *netdev = adapter->netdev;
10785
10786#ifdef CONFIG_PCI_IOV
10787 if (adapter->vferr_refcount) {
10788 e_info(drv, "Resuming after VF err\n");
10789 adapter->vferr_refcount--;
10790 return;
10791 }
10792
10793#endif
10794 rtnl_lock();
10795 if (netif_running(netdev))
10796 ixgbe_open(netdev);
10797
10798 netif_device_attach(netdev);
10799 rtnl_unlock();
10800}
10801
10802static const struct pci_error_handlers ixgbe_err_handler = {
10803 .error_detected = ixgbe_io_error_detected,
10804 .slot_reset = ixgbe_io_slot_reset,
10805 .resume = ixgbe_io_resume,
10806};
10807
10808static struct pci_driver ixgbe_driver = {
10809 .name = ixgbe_driver_name,
10810 .id_table = ixgbe_pci_tbl,
10811 .probe = ixgbe_probe,
10812 .remove = ixgbe_remove,
10813#ifdef CONFIG_PM
10814 .suspend = ixgbe_suspend,
10815 .resume = ixgbe_resume,
10816#endif
10817 .shutdown = ixgbe_shutdown,
10818 .sriov_configure = ixgbe_pci_sriov_configure,
10819 .err_handler = &ixgbe_err_handler
10820};
10821
10822
10823
10824
10825
10826
10827
10828static int __init ixgbe_init_module(void)
10829{
10830 int ret;
10831 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
10832 pr_info("%s\n", ixgbe_copyright);
10833
10834 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
10835 if (!ixgbe_wq) {
10836 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
10837 return -ENOMEM;
10838 }
10839
10840 ixgbe_dbg_init();
10841
10842 ret = pci_register_driver(&ixgbe_driver);
10843 if (ret) {
10844 destroy_workqueue(ixgbe_wq);
10845 ixgbe_dbg_exit();
10846 return ret;
10847 }
10848
10849#ifdef CONFIG_IXGBE_DCA
10850 dca_register_notify(&dca_notifier);
10851#endif
10852
10853 return 0;
10854}
10855
10856module_init(ixgbe_init_module);
10857
10858
10859
10860
10861
10862
10863
10864static void __exit ixgbe_exit_module(void)
10865{
10866#ifdef CONFIG_IXGBE_DCA
10867 dca_unregister_notify(&dca_notifier);
10868#endif
10869 pci_unregister_driver(&ixgbe_driver);
10870
10871 ixgbe_dbg_exit();
10872 if (ixgbe_wq) {
10873 destroy_workqueue(ixgbe_wq);
10874 ixgbe_wq = NULL;
10875 }
10876}
10877
10878#ifdef CONFIG_IXGBE_DCA
10879static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
10880 void *p)
10881{
10882 int ret_val;
10883
10884 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
10885 __ixgbe_notify_dca);
10886
10887 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
10888}
10889
10890#endif
10891
10892module_exit(ixgbe_exit_module);
10893
10894
10895