1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/interrupt.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/sctp.h>
40#include <linux/pkt_sched.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
46#include <linux/ethtool.h>
47#include <linux/if.h>
48#include <linux/if_vlan.h>
49#include <linux/if_macvlan.h>
50#include <linux/if_bridge.h>
51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h>
53#include <net/udp_tunnel.h>
54#include <net/pkt_cls.h>
55#include <net/tc_act/tc_gact.h>
56#include <net/tc_act/tc_mirred.h>
57#include <net/vxlan.h>
58
59#include "ixgbe.h"
60#include "ixgbe_common.h"
61#include "ixgbe_dcb_82599.h"
62#include "ixgbe_sriov.h"
63#include "ixgbe_model.h"
64
65char ixgbe_driver_name[] = "ixgbe";
66static const char ixgbe_driver_string[] =
67 "Intel(R) 10 Gigabit PCI Express Network Driver";
68#ifdef IXGBE_FCOE
69char ixgbe_default_device_descr[] =
70 "Intel(R) 10 Gigabit Network Connection";
71#else
72static char ixgbe_default_device_descr[] =
73 "Intel(R) 10 Gigabit Network Connection";
74#endif
75#define DRV_VERSION "5.0.0-k"
76const char ixgbe_driver_version[] = DRV_VERSION;
77static const char ixgbe_copyright[] =
78 "Copyright (c) 1999-2016 Intel Corporation.";
79
80static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
81
82static const struct ixgbe_info *ixgbe_info_tbl[] = {
83 [board_82598] = &ixgbe_82598_info,
84 [board_82599] = &ixgbe_82599_info,
85 [board_X540] = &ixgbe_X540_info,
86 [board_X550] = &ixgbe_X550_info,
87 [board_X550EM_x] = &ixgbe_X550EM_x_info,
88 [board_x550em_a] = &ixgbe_x550em_a_info,
89 [board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
90};
91
92
93
94
95
96
97
98
99
100static const struct pci_device_id ixgbe_pci_tbl[] = {
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
140 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
141 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
142 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
143 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
144 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
145 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
146
147 {0, }
148};
149MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
150
151#ifdef CONFIG_IXGBE_DCA
152static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
153 void *p);
154static struct notifier_block dca_notifier = {
155 .notifier_call = ixgbe_notify_dca,
156 .next = NULL,
157 .priority = 0
158};
159#endif
160
161#ifdef CONFIG_PCI_IOV
162static unsigned int max_vfs;
163module_param(max_vfs, uint, 0);
164MODULE_PARM_DESC(max_vfs,
165 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
166#endif
167
168static unsigned int allow_unsupported_sfp;
169module_param(allow_unsupported_sfp, uint, 0);
170MODULE_PARM_DESC(allow_unsupported_sfp,
171 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
172
173#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
174static int debug = -1;
175module_param(debug, int, 0);
176MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
177
178MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
179MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
180MODULE_LICENSE("GPL");
181MODULE_VERSION(DRV_VERSION);
182
183static struct workqueue_struct *ixgbe_wq;
184
185static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
186static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
187
188static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
189 u32 reg, u16 *value)
190{
191 struct pci_dev *parent_dev;
192 struct pci_bus *parent_bus;
193
194 parent_bus = adapter->pdev->bus->parent;
195 if (!parent_bus)
196 return -1;
197
198 parent_dev = parent_bus->self;
199 if (!parent_dev)
200 return -1;
201
202 if (!pci_is_pcie(parent_dev))
203 return -1;
204
205 pcie_capability_read_word(parent_dev, reg, value);
206 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
207 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
208 return -1;
209 return 0;
210}
211
212static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
213{
214 struct ixgbe_hw *hw = &adapter->hw;
215 u16 link_status = 0;
216 int err;
217
218 hw->bus.type = ixgbe_bus_type_pci_express;
219
220
221
222
223 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
224
225
226 if (err)
227 return err;
228
229 hw->bus.width = ixgbe_convert_bus_width(link_status);
230 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
231
232 return 0;
233}
234
235
236
237
238
239
240
241
242
243
244static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
245{
246 switch (hw->device_id) {
247 case IXGBE_DEV_ID_82599_SFP_SF_QP:
248 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
249 return true;
250 default:
251 return false;
252 }
253}
254
255static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
256 int expected_gts)
257{
258 struct ixgbe_hw *hw = &adapter->hw;
259 int max_gts = 0;
260 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
261 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
262 struct pci_dev *pdev;
263
264
265
266
267
268 if (hw->bus.type == ixgbe_bus_type_internal)
269 return;
270
271
272 if (ixgbe_pcie_from_parent(&adapter->hw))
273 pdev = adapter->pdev->bus->parent->self;
274 else
275 pdev = adapter->pdev;
276
277 if (pcie_get_minimum_link(pdev, &speed, &width) ||
278 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
279 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
280 return;
281 }
282
283 switch (speed) {
284 case PCIE_SPEED_2_5GT:
285
286 max_gts = 2 * width;
287 break;
288 case PCIE_SPEED_5_0GT:
289
290 max_gts = 4 * width;
291 break;
292 case PCIE_SPEED_8_0GT:
293
294 max_gts = 8 * width;
295 break;
296 default:
297 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
298 return;
299 }
300
301 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
302 max_gts);
303 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
304 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
305 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
306 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
307 "Unknown"),
308 width,
309 (speed == PCIE_SPEED_2_5GT ? "20%" :
310 speed == PCIE_SPEED_5_0GT ? "20%" :
311 speed == PCIE_SPEED_8_0GT ? "<2%" :
312 "Unknown"));
313
314 if (max_gts < expected_gts) {
315 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
316 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
317 expected_gts);
318 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
319 }
320}
321
322static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
323{
324 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
325 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
326 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
327 queue_work(ixgbe_wq, &adapter->service_task);
328}
329
330static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
331{
332 struct ixgbe_adapter *adapter = hw->back;
333
334 if (!hw->hw_addr)
335 return;
336 hw->hw_addr = NULL;
337 e_dev_err("Adapter removed\n");
338 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
339 ixgbe_service_event_schedule(adapter);
340}
341
342static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
343{
344 u32 value;
345
346
347
348
349
350
351
352 if (reg == IXGBE_STATUS) {
353 ixgbe_remove_adapter(hw);
354 return;
355 }
356 value = ixgbe_read_reg(hw, IXGBE_STATUS);
357 if (value == IXGBE_FAILED_READ_REG)
358 ixgbe_remove_adapter(hw);
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
375{
376 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
377 u32 value;
378
379 if (ixgbe_removed(reg_addr))
380 return IXGBE_FAILED_READ_REG;
381 if (unlikely(hw->phy.nw_mng_if_sel &
382 IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
383 struct ixgbe_adapter *adapter;
384 int i;
385
386 for (i = 0; i < 200; ++i) {
387 value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
388 if (likely(!value))
389 goto writes_completed;
390 if (value == IXGBE_FAILED_READ_REG) {
391 ixgbe_remove_adapter(hw);
392 return IXGBE_FAILED_READ_REG;
393 }
394 udelay(5);
395 }
396
397 adapter = hw->back;
398 e_warn(hw, "register writes incomplete %08x\n", value);
399 }
400
401writes_completed:
402 value = readl(reg_addr + reg);
403 if (unlikely(value == IXGBE_FAILED_READ_REG))
404 ixgbe_check_remove(hw, reg);
405 return value;
406}
407
408static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
409{
410 u16 value;
411
412 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
413 if (value == IXGBE_FAILED_READ_CFG_WORD) {
414 ixgbe_remove_adapter(hw);
415 return true;
416 }
417 return false;
418}
419
420u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
421{
422 struct ixgbe_adapter *adapter = hw->back;
423 u16 value;
424
425 if (ixgbe_removed(hw->hw_addr))
426 return IXGBE_FAILED_READ_CFG_WORD;
427 pci_read_config_word(adapter->pdev, reg, &value);
428 if (value == IXGBE_FAILED_READ_CFG_WORD &&
429 ixgbe_check_cfg_remove(hw, adapter->pdev))
430 return IXGBE_FAILED_READ_CFG_WORD;
431 return value;
432}
433
434#ifdef CONFIG_PCI_IOV
435static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
436{
437 struct ixgbe_adapter *adapter = hw->back;
438 u32 value;
439
440 if (ixgbe_removed(hw->hw_addr))
441 return IXGBE_FAILED_READ_CFG_DWORD;
442 pci_read_config_dword(adapter->pdev, reg, &value);
443 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
444 ixgbe_check_cfg_remove(hw, adapter->pdev))
445 return IXGBE_FAILED_READ_CFG_DWORD;
446 return value;
447}
448#endif
449
450void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
451{
452 struct ixgbe_adapter *adapter = hw->back;
453
454 if (ixgbe_removed(hw->hw_addr))
455 return;
456 pci_write_config_word(adapter->pdev, reg, value);
457}
458
459static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
460{
461 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
462
463
464 smp_mb__before_atomic();
465 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
466}
467
468struct ixgbe_reg_info {
469 u32 ofs;
470 char *name;
471};
472
473static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
474
475
476 {IXGBE_CTRL, "CTRL"},
477 {IXGBE_STATUS, "STATUS"},
478 {IXGBE_CTRL_EXT, "CTRL_EXT"},
479
480
481 {IXGBE_EICR, "EICR"},
482
483
484 {IXGBE_SRRCTL(0), "SRRCTL"},
485 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
486 {IXGBE_RDLEN(0), "RDLEN"},
487 {IXGBE_RDH(0), "RDH"},
488 {IXGBE_RDT(0), "RDT"},
489 {IXGBE_RXDCTL(0), "RXDCTL"},
490 {IXGBE_RDBAL(0), "RDBAL"},
491 {IXGBE_RDBAH(0), "RDBAH"},
492
493
494 {IXGBE_TDBAL(0), "TDBAL"},
495 {IXGBE_TDBAH(0), "TDBAH"},
496 {IXGBE_TDLEN(0), "TDLEN"},
497 {IXGBE_TDH(0), "TDH"},
498 {IXGBE_TDT(0), "TDT"},
499 {IXGBE_TXDCTL(0), "TXDCTL"},
500
501
502 { .name = NULL }
503};
504
505
506
507
508
509static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
510{
511 int i = 0, j = 0;
512 char rname[16];
513 u32 regs[64];
514
515 switch (reginfo->ofs) {
516 case IXGBE_SRRCTL(0):
517 for (i = 0; i < 64; i++)
518 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
519 break;
520 case IXGBE_DCA_RXCTRL(0):
521 for (i = 0; i < 64; i++)
522 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
523 break;
524 case IXGBE_RDLEN(0):
525 for (i = 0; i < 64; i++)
526 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
527 break;
528 case IXGBE_RDH(0):
529 for (i = 0; i < 64; i++)
530 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
531 break;
532 case IXGBE_RDT(0):
533 for (i = 0; i < 64; i++)
534 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
535 break;
536 case IXGBE_RXDCTL(0):
537 for (i = 0; i < 64; i++)
538 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
539 break;
540 case IXGBE_RDBAL(0):
541 for (i = 0; i < 64; i++)
542 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
543 break;
544 case IXGBE_RDBAH(0):
545 for (i = 0; i < 64; i++)
546 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
547 break;
548 case IXGBE_TDBAL(0):
549 for (i = 0; i < 64; i++)
550 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
551 break;
552 case IXGBE_TDBAH(0):
553 for (i = 0; i < 64; i++)
554 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
555 break;
556 case IXGBE_TDLEN(0):
557 for (i = 0; i < 64; i++)
558 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
559 break;
560 case IXGBE_TDH(0):
561 for (i = 0; i < 64; i++)
562 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
563 break;
564 case IXGBE_TDT(0):
565 for (i = 0; i < 64; i++)
566 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
567 break;
568 case IXGBE_TXDCTL(0):
569 for (i = 0; i < 64; i++)
570 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
571 break;
572 default:
573 pr_info("%-15s %08x\n", reginfo->name,
574 IXGBE_READ_REG(hw, reginfo->ofs));
575 return;
576 }
577
578 for (i = 0; i < 8; i++) {
579 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
580 pr_err("%-15s", rname);
581 for (j = 0; j < 8; j++)
582 pr_cont(" %08x", regs[i*8+j]);
583 pr_cont("\n");
584 }
585
586}
587
588
589
590
591static void ixgbe_dump(struct ixgbe_adapter *adapter)
592{
593 struct net_device *netdev = adapter->netdev;
594 struct ixgbe_hw *hw = &adapter->hw;
595 struct ixgbe_reg_info *reginfo;
596 int n = 0;
597 struct ixgbe_ring *tx_ring;
598 struct ixgbe_tx_buffer *tx_buffer;
599 union ixgbe_adv_tx_desc *tx_desc;
600 struct my_u0 { u64 a; u64 b; } *u0;
601 struct ixgbe_ring *rx_ring;
602 union ixgbe_adv_rx_desc *rx_desc;
603 struct ixgbe_rx_buffer *rx_buffer_info;
604 u32 staterr;
605 int i = 0;
606
607 if (!netif_msg_hw(adapter))
608 return;
609
610
611 if (netdev) {
612 dev_info(&adapter->pdev->dev, "Net device Info\n");
613 pr_info("Device Name state "
614 "trans_start\n");
615 pr_info("%-15s %016lX %016lX\n",
616 netdev->name,
617 netdev->state,
618 dev_trans_start(netdev));
619 }
620
621
622 dev_info(&adapter->pdev->dev, "Register Dump\n");
623 pr_info(" Register Name Value\n");
624 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
625 reginfo->name; reginfo++) {
626 ixgbe_regdump(hw, reginfo);
627 }
628
629
630 if (!netdev || !netif_running(netdev))
631 return;
632
633 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
634 pr_info(" %s %s %s %s\n",
635 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
636 "leng", "ntw", "timestamp");
637 for (n = 0; n < adapter->num_tx_queues; n++) {
638 tx_ring = adapter->tx_ring[n];
639 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
640 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
641 n, tx_ring->next_to_use, tx_ring->next_to_clean,
642 (u64)dma_unmap_addr(tx_buffer, dma),
643 dma_unmap_len(tx_buffer, len),
644 tx_buffer->next_to_watch,
645 (u64)tx_buffer->time_stamp);
646 }
647
648
649 if (!netif_msg_tx_done(adapter))
650 goto rx_ring_summary;
651
652 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689 for (n = 0; n < adapter->num_tx_queues; n++) {
690 tx_ring = adapter->tx_ring[n];
691 pr_info("------------------------------------\n");
692 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
693 pr_info("------------------------------------\n");
694 pr_info("%s%s %s %s %s %s\n",
695 "T [desc] [address 63:0 ] ",
696 "[PlPOIdStDDt Ln] [bi->dma ] ",
697 "leng", "ntw", "timestamp", "bi->skb");
698
699 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
700 tx_desc = IXGBE_TX_DESC(tx_ring, i);
701 tx_buffer = &tx_ring->tx_buffer_info[i];
702 u0 = (struct my_u0 *)tx_desc;
703 if (dma_unmap_len(tx_buffer, len) > 0) {
704 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
705 i,
706 le64_to_cpu(u0->a),
707 le64_to_cpu(u0->b),
708 (u64)dma_unmap_addr(tx_buffer, dma),
709 dma_unmap_len(tx_buffer, len),
710 tx_buffer->next_to_watch,
711 (u64)tx_buffer->time_stamp,
712 tx_buffer->skb);
713 if (i == tx_ring->next_to_use &&
714 i == tx_ring->next_to_clean)
715 pr_cont(" NTC/U\n");
716 else if (i == tx_ring->next_to_use)
717 pr_cont(" NTU\n");
718 else if (i == tx_ring->next_to_clean)
719 pr_cont(" NTC\n");
720 else
721 pr_cont("\n");
722
723 if (netif_msg_pktdata(adapter) &&
724 tx_buffer->skb)
725 print_hex_dump(KERN_INFO, "",
726 DUMP_PREFIX_ADDRESS, 16, 1,
727 tx_buffer->skb->data,
728 dma_unmap_len(tx_buffer, len),
729 true);
730 }
731 }
732 }
733
734
735rx_ring_summary:
736 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
737 pr_info("Queue [NTU] [NTC]\n");
738 for (n = 0; n < adapter->num_rx_queues; n++) {
739 rx_ring = adapter->rx_ring[n];
740 pr_info("%5d %5X %5X\n",
741 n, rx_ring->next_to_use, rx_ring->next_to_clean);
742 }
743
744
745 if (!netif_msg_rx_status(adapter))
746 return;
747
748 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795 for (n = 0; n < adapter->num_rx_queues; n++) {
796 rx_ring = adapter->rx_ring[n];
797 pr_info("------------------------------------\n");
798 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
799 pr_info("------------------------------------\n");
800 pr_info("%s%s%s",
801 "R [desc] [ PktBuf A0] ",
802 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
803 "<-- Adv Rx Read format\n");
804 pr_info("%s%s%s",
805 "RWB[desc] [PcsmIpSHl PtRs] ",
806 "[vl er S cks ln] ---------------- [bi->skb ] ",
807 "<-- Adv Rx Write-Back format\n");
808
809 for (i = 0; i < rx_ring->count; i++) {
810 rx_buffer_info = &rx_ring->rx_buffer_info[i];
811 rx_desc = IXGBE_RX_DESC(rx_ring, i);
812 u0 = (struct my_u0 *)rx_desc;
813 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
814 if (staterr & IXGBE_RXD_STAT_DD) {
815
816 pr_info("RWB[0x%03X] %016llX "
817 "%016llX ---------------- %p", i,
818 le64_to_cpu(u0->a),
819 le64_to_cpu(u0->b),
820 rx_buffer_info->skb);
821 } else {
822 pr_info("R [0x%03X] %016llX "
823 "%016llX %016llX %p", i,
824 le64_to_cpu(u0->a),
825 le64_to_cpu(u0->b),
826 (u64)rx_buffer_info->dma,
827 rx_buffer_info->skb);
828
829 if (netif_msg_pktdata(adapter) &&
830 rx_buffer_info->dma) {
831 print_hex_dump(KERN_INFO, "",
832 DUMP_PREFIX_ADDRESS, 16, 1,
833 page_address(rx_buffer_info->page) +
834 rx_buffer_info->page_offset,
835 ixgbe_rx_bufsz(rx_ring), true);
836 }
837 }
838
839 if (i == rx_ring->next_to_use)
840 pr_cont(" NTU\n");
841 else if (i == rx_ring->next_to_clean)
842 pr_cont(" NTC\n");
843 else
844 pr_cont("\n");
845
846 }
847 }
848}
849
850static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
851{
852 u32 ctrl_ext;
853
854
855 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
857 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
858}
859
860static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
861{
862 u32 ctrl_ext;
863
864
865 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
866 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
867 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
868}
869
870
871
872
873
874
875
876
877
878static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
879 u8 queue, u8 msix_vector)
880{
881 u32 ivar, index;
882 struct ixgbe_hw *hw = &adapter->hw;
883 switch (hw->mac.type) {
884 case ixgbe_mac_82598EB:
885 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
886 if (direction == -1)
887 direction = 0;
888 index = (((direction * 64) + queue) >> 2) & 0x1F;
889 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
890 ivar &= ~(0xFF << (8 * (queue & 0x3)));
891 ivar |= (msix_vector << (8 * (queue & 0x3)));
892 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
893 break;
894 case ixgbe_mac_82599EB:
895 case ixgbe_mac_X540:
896 case ixgbe_mac_X550:
897 case ixgbe_mac_X550EM_x:
898 case ixgbe_mac_x550em_a:
899 if (direction == -1) {
900
901 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
902 index = ((queue & 1) * 8);
903 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
904 ivar &= ~(0xFF << index);
905 ivar |= (msix_vector << index);
906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
907 break;
908 } else {
909
910 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
911 index = ((16 * (queue & 1)) + (8 * direction));
912 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
913 ivar &= ~(0xFF << index);
914 ivar |= (msix_vector << index);
915 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
916 break;
917 }
918 default:
919 break;
920 }
921}
922
923static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
924 u64 qmask)
925{
926 u32 mask;
927
928 switch (adapter->hw.mac.type) {
929 case ixgbe_mac_82598EB:
930 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
931 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
932 break;
933 case ixgbe_mac_82599EB:
934 case ixgbe_mac_X540:
935 case ixgbe_mac_X550:
936 case ixgbe_mac_X550EM_x:
937 case ixgbe_mac_x550em_a:
938 mask = (qmask & 0xFFFFFFFF);
939 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
940 mask = (qmask >> 32);
941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
942 break;
943 default:
944 break;
945 }
946}
947
948static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
949{
950 struct ixgbe_hw *hw = &adapter->hw;
951 struct ixgbe_hw_stats *hwstats = &adapter->stats;
952 int i;
953 u32 data;
954
955 if ((hw->fc.current_mode != ixgbe_fc_full) &&
956 (hw->fc.current_mode != ixgbe_fc_rx_pause))
957 return;
958
959 switch (hw->mac.type) {
960 case ixgbe_mac_82598EB:
961 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
962 break;
963 default:
964 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
965 }
966 hwstats->lxoffrxc += data;
967
968
969 if (!data)
970 return;
971
972 for (i = 0; i < adapter->num_tx_queues; i++)
973 clear_bit(__IXGBE_HANG_CHECK_ARMED,
974 &adapter->tx_ring[i]->state);
975}
976
977static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
978{
979 struct ixgbe_hw *hw = &adapter->hw;
980 struct ixgbe_hw_stats *hwstats = &adapter->stats;
981 u32 xoff[8] = {0};
982 u8 tc;
983 int i;
984 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
985
986 if (adapter->ixgbe_ieee_pfc)
987 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
988
989 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
990 ixgbe_update_xoff_rx_lfc(adapter);
991 return;
992 }
993
994
995 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
996 u32 pxoffrxc;
997
998 switch (hw->mac.type) {
999 case ixgbe_mac_82598EB:
1000 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1001 break;
1002 default:
1003 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1004 }
1005 hwstats->pxoffrxc[i] += pxoffrxc;
1006
1007 tc = netdev_get_prio_tc_map(adapter->netdev, i);
1008 xoff[tc] += pxoffrxc;
1009 }
1010
1011
1012 for (i = 0; i < adapter->num_tx_queues; i++) {
1013 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1014
1015 tc = tx_ring->dcb_tc;
1016 if (xoff[tc])
1017 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1018 }
1019}
1020
1021static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1022{
1023 return ring->stats.packets;
1024}
1025
1026static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1027{
1028 struct ixgbe_adapter *adapter;
1029 struct ixgbe_hw *hw;
1030 u32 head, tail;
1031
1032 if (ring->l2_accel_priv)
1033 adapter = ring->l2_accel_priv->real_adapter;
1034 else
1035 adapter = netdev_priv(ring->netdev);
1036
1037 hw = &adapter->hw;
1038 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
1039 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
1040
1041 if (head != tail)
1042 return (head < tail) ?
1043 tail - head : (tail + ring->count - head);
1044
1045 return 0;
1046}
1047
1048static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1049{
1050 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1051 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1052 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1053
1054 clear_check_for_tx_hang(tx_ring);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 if (tx_done_old == tx_done && tx_pending)
1069
1070 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1071 &tx_ring->state);
1072
1073 tx_ring->tx_stats.tx_done_old = tx_done;
1074
1075 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1076
1077 return false;
1078}
1079
1080
1081
1082
1083
1084static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1085{
1086
1087
1088 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1089 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
1090 e_warn(drv, "initiating reset due to tx timeout\n");
1091 ixgbe_service_event_schedule(adapter);
1092 }
1093}
1094
1095
1096
1097
1098static int ixgbe_tx_maxrate(struct net_device *netdev,
1099 int queue_index, u32 maxrate)
1100{
1101 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1102 struct ixgbe_hw *hw = &adapter->hw;
1103 u32 bcnrc_val = ixgbe_link_mbps(adapter);
1104
1105 if (!maxrate)
1106 return 0;
1107
1108
1109 bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1110 bcnrc_val /= maxrate;
1111
1112
1113 bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1114 IXGBE_RTTBCNRC_RF_DEC_MASK;
1115
1116
1117 bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1118
1119 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
1120 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1121
1122 return 0;
1123}
1124
1125
1126
1127
1128
1129
1130
1131static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1132 struct ixgbe_ring *tx_ring, int napi_budget)
1133{
1134 struct ixgbe_adapter *adapter = q_vector->adapter;
1135 struct ixgbe_tx_buffer *tx_buffer;
1136 union ixgbe_adv_tx_desc *tx_desc;
1137 unsigned int total_bytes = 0, total_packets = 0;
1138 unsigned int budget = q_vector->tx.work_limit;
1139 unsigned int i = tx_ring->next_to_clean;
1140
1141 if (test_bit(__IXGBE_DOWN, &adapter->state))
1142 return true;
1143
1144 tx_buffer = &tx_ring->tx_buffer_info[i];
1145 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1146 i -= tx_ring->count;
1147
1148 do {
1149 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1150
1151
1152 if (!eop_desc)
1153 break;
1154
1155
1156 read_barrier_depends();
1157
1158
1159 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1160 break;
1161
1162
1163 tx_buffer->next_to_watch = NULL;
1164
1165
1166 total_bytes += tx_buffer->bytecount;
1167 total_packets += tx_buffer->gso_segs;
1168
1169
1170 napi_consume_skb(tx_buffer->skb, napi_budget);
1171
1172
1173 dma_unmap_single(tx_ring->dev,
1174 dma_unmap_addr(tx_buffer, dma),
1175 dma_unmap_len(tx_buffer, len),
1176 DMA_TO_DEVICE);
1177
1178
1179 dma_unmap_len_set(tx_buffer, len, 0);
1180
1181
1182 while (tx_desc != eop_desc) {
1183 tx_buffer++;
1184 tx_desc++;
1185 i++;
1186 if (unlikely(!i)) {
1187 i -= tx_ring->count;
1188 tx_buffer = tx_ring->tx_buffer_info;
1189 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1190 }
1191
1192
1193 if (dma_unmap_len(tx_buffer, len)) {
1194 dma_unmap_page(tx_ring->dev,
1195 dma_unmap_addr(tx_buffer, dma),
1196 dma_unmap_len(tx_buffer, len),
1197 DMA_TO_DEVICE);
1198 dma_unmap_len_set(tx_buffer, len, 0);
1199 }
1200 }
1201
1202
1203 tx_buffer++;
1204 tx_desc++;
1205 i++;
1206 if (unlikely(!i)) {
1207 i -= tx_ring->count;
1208 tx_buffer = tx_ring->tx_buffer_info;
1209 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1210 }
1211
1212
1213 prefetch(tx_desc);
1214
1215
1216 budget--;
1217 } while (likely(budget));
1218
1219 i += tx_ring->count;
1220 tx_ring->next_to_clean = i;
1221 u64_stats_update_begin(&tx_ring->syncp);
1222 tx_ring->stats.bytes += total_bytes;
1223 tx_ring->stats.packets += total_packets;
1224 u64_stats_update_end(&tx_ring->syncp);
1225 q_vector->tx.total_bytes += total_bytes;
1226 q_vector->tx.total_packets += total_packets;
1227
1228 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1229
1230 struct ixgbe_hw *hw = &adapter->hw;
1231 e_err(drv, "Detected Tx Unit Hang\n"
1232 " Tx Queue <%d>\n"
1233 " TDH, TDT <%x>, <%x>\n"
1234 " next_to_use <%x>\n"
1235 " next_to_clean <%x>\n"
1236 "tx_buffer_info[next_to_clean]\n"
1237 " time_stamp <%lx>\n"
1238 " jiffies <%lx>\n",
1239 tx_ring->queue_index,
1240 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1241 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1242 tx_ring->next_to_use, i,
1243 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1244
1245 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1246
1247 e_info(probe,
1248 "tx hang %d detected on queue %d, resetting adapter\n",
1249 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1250
1251
1252 ixgbe_tx_timeout_reset(adapter);
1253
1254
1255 return true;
1256 }
1257
1258 netdev_tx_completed_queue(txring_txq(tx_ring),
1259 total_packets, total_bytes);
1260
1261#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1262 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1263 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1264
1265
1266
1267 smp_mb();
1268 if (__netif_subqueue_stopped(tx_ring->netdev,
1269 tx_ring->queue_index)
1270 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1271 netif_wake_subqueue(tx_ring->netdev,
1272 tx_ring->queue_index);
1273 ++tx_ring->tx_stats.restart_queue;
1274 }
1275 }
1276
1277 return !!budget;
1278}
1279
1280#ifdef CONFIG_IXGBE_DCA
1281static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1282 struct ixgbe_ring *tx_ring,
1283 int cpu)
1284{
1285 struct ixgbe_hw *hw = &adapter->hw;
1286 u32 txctrl = 0;
1287 u16 reg_offset;
1288
1289 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1290 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1291
1292 switch (hw->mac.type) {
1293 case ixgbe_mac_82598EB:
1294 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1295 break;
1296 case ixgbe_mac_82599EB:
1297 case ixgbe_mac_X540:
1298 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1299 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1300 break;
1301 default:
1302
1303 return;
1304 }
1305
1306
1307
1308
1309
1310
1311 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1312 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1313 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1314
1315 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1316}
1317
1318static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1319 struct ixgbe_ring *rx_ring,
1320 int cpu)
1321{
1322 struct ixgbe_hw *hw = &adapter->hw;
1323 u32 rxctrl = 0;
1324 u8 reg_idx = rx_ring->reg_idx;
1325
1326 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1327 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1328
1329 switch (hw->mac.type) {
1330 case ixgbe_mac_82599EB:
1331 case ixgbe_mac_X540:
1332 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1333 break;
1334 default:
1335 break;
1336 }
1337
1338
1339
1340
1341
1342
1343 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1344 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1345 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1346
1347 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1348}
1349
1350static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1351{
1352 struct ixgbe_adapter *adapter = q_vector->adapter;
1353 struct ixgbe_ring *ring;
1354 int cpu = get_cpu();
1355
1356 if (q_vector->cpu == cpu)
1357 goto out_no_update;
1358
1359 ixgbe_for_each_ring(ring, q_vector->tx)
1360 ixgbe_update_tx_dca(adapter, ring, cpu);
1361
1362 ixgbe_for_each_ring(ring, q_vector->rx)
1363 ixgbe_update_rx_dca(adapter, ring, cpu);
1364
1365 q_vector->cpu = cpu;
1366out_no_update:
1367 put_cpu();
1368}
1369
1370static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1371{
1372 int i;
1373
1374
1375 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1376 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1377 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1378 else
1379 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1380 IXGBE_DCA_CTRL_DCA_DISABLE);
1381
1382 for (i = 0; i < adapter->num_q_vectors; i++) {
1383 adapter->q_vector[i]->cpu = -1;
1384 ixgbe_update_dca(adapter->q_vector[i]);
1385 }
1386}
1387
1388static int __ixgbe_notify_dca(struct device *dev, void *data)
1389{
1390 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1391 unsigned long event = *(unsigned long *)data;
1392
1393 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1394 return 0;
1395
1396 switch (event) {
1397 case DCA_PROVIDER_ADD:
1398
1399 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1400 break;
1401 if (dca_add_requester(dev) == 0) {
1402 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1404 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1405 break;
1406 }
1407
1408 case DCA_PROVIDER_REMOVE:
1409 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1410 dca_remove_requester(dev);
1411 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1412 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1413 IXGBE_DCA_CTRL_DCA_DISABLE);
1414 }
1415 break;
1416 }
1417
1418 return 0;
1419}
1420
1421#endif
1422
1423#define IXGBE_RSS_L4_TYPES_MASK \
1424 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1425 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1426 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1427 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1428
1429static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1430 union ixgbe_adv_rx_desc *rx_desc,
1431 struct sk_buff *skb)
1432{
1433 u16 rss_type;
1434
1435 if (!(ring->netdev->features & NETIF_F_RXHASH))
1436 return;
1437
1438 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1439 IXGBE_RXDADV_RSSTYPE_MASK;
1440
1441 if (!rss_type)
1442 return;
1443
1444 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1445 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1446 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1447}
1448
1449#ifdef IXGBE_FCOE
1450
1451
1452
1453
1454
1455
1456
1457static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1458 union ixgbe_adv_rx_desc *rx_desc)
1459{
1460 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1461
1462 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1463 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1464 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1465 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1466}
1467
1468#endif
1469
1470
1471
1472
1473
1474
1475static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1476 union ixgbe_adv_rx_desc *rx_desc,
1477 struct sk_buff *skb)
1478{
1479 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1480 bool encap_pkt = false;
1481
1482 skb_checksum_none_assert(skb);
1483
1484
1485 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1486 return;
1487
1488
1489 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
1490 encap_pkt = true;
1491 skb->encapsulation = 1;
1492 }
1493
1494
1495 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1496 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1497 ring->rx_stats.csum_err++;
1498 return;
1499 }
1500
1501 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1502 return;
1503
1504 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1505
1506
1507
1508
1509 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1510 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1511 return;
1512
1513 ring->rx_stats.csum_err++;
1514 return;
1515 }
1516
1517
1518 skb->ip_summed = CHECKSUM_UNNECESSARY;
1519 if (encap_pkt) {
1520 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1521 return;
1522
1523 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1524 skb->ip_summed = CHECKSUM_NONE;
1525 return;
1526 }
1527
1528 skb->csum_level = 1;
1529 }
1530}
1531
1532static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
1533{
1534 return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
1535}
1536
1537static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1538 struct ixgbe_rx_buffer *bi)
1539{
1540 struct page *page = bi->page;
1541 dma_addr_t dma;
1542
1543
1544 if (likely(page))
1545 return true;
1546
1547
1548 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1549 if (unlikely(!page)) {
1550 rx_ring->rx_stats.alloc_rx_page_failed++;
1551 return false;
1552 }
1553
1554
1555 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1556 ixgbe_rx_pg_size(rx_ring),
1557 DMA_FROM_DEVICE,
1558 IXGBE_RX_DMA_ATTR);
1559
1560
1561
1562
1563
1564 if (dma_mapping_error(rx_ring->dev, dma)) {
1565 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1566
1567 rx_ring->rx_stats.alloc_rx_page_failed++;
1568 return false;
1569 }
1570
1571 bi->dma = dma;
1572 bi->page = page;
1573 bi->page_offset = ixgbe_rx_offset(rx_ring);
1574 bi->pagecnt_bias = 1;
1575
1576 return true;
1577}
1578
1579
1580
1581
1582
1583
1584void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1585{
1586 union ixgbe_adv_rx_desc *rx_desc;
1587 struct ixgbe_rx_buffer *bi;
1588 u16 i = rx_ring->next_to_use;
1589 u16 bufsz;
1590
1591
1592 if (!cleaned_count)
1593 return;
1594
1595 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1596 bi = &rx_ring->rx_buffer_info[i];
1597 i -= rx_ring->count;
1598
1599 bufsz = ixgbe_rx_bufsz(rx_ring);
1600
1601 do {
1602 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1603 break;
1604
1605
1606 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1607 bi->page_offset, bufsz,
1608 DMA_FROM_DEVICE);
1609
1610
1611
1612
1613
1614 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1615
1616 rx_desc++;
1617 bi++;
1618 i++;
1619 if (unlikely(!i)) {
1620 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1621 bi = rx_ring->rx_buffer_info;
1622 i -= rx_ring->count;
1623 }
1624
1625
1626 rx_desc->wb.upper.length = 0;
1627
1628 cleaned_count--;
1629 } while (cleaned_count);
1630
1631 i += rx_ring->count;
1632
1633 if (rx_ring->next_to_use != i) {
1634 rx_ring->next_to_use = i;
1635
1636
1637 rx_ring->next_to_alloc = i;
1638
1639
1640
1641
1642
1643
1644 wmb();
1645 writel(i, rx_ring->tail);
1646 }
1647}
1648
1649static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1650 struct sk_buff *skb)
1651{
1652 u16 hdr_len = skb_headlen(skb);
1653
1654
1655 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1656 IXGBE_CB(skb)->append_cnt);
1657 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1658}
1659
1660static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1661 struct sk_buff *skb)
1662{
1663
1664 if (!IXGBE_CB(skb)->append_cnt)
1665 return;
1666
1667 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1668 rx_ring->rx_stats.rsc_flush++;
1669
1670 ixgbe_set_rsc_gso_size(rx_ring, skb);
1671
1672
1673 IXGBE_CB(skb)->append_cnt = 0;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1687 union ixgbe_adv_rx_desc *rx_desc,
1688 struct sk_buff *skb)
1689{
1690 struct net_device *dev = rx_ring->netdev;
1691 u32 flags = rx_ring->q_vector->adapter->flags;
1692
1693 ixgbe_update_rsc_stats(rx_ring, skb);
1694
1695 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1696
1697 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1698
1699 if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
1700 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1701
1702 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1703 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1704 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1705 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1706 }
1707
1708 skb_record_rx_queue(skb, rx_ring->queue_index);
1709
1710 skb->protocol = eth_type_trans(skb, dev);
1711}
1712
1713static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1714 struct sk_buff *skb)
1715{
1716 napi_gro_receive(&q_vector->napi, skb);
1717}
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1731 union ixgbe_adv_rx_desc *rx_desc,
1732 struct sk_buff *skb)
1733{
1734 u32 ntc = rx_ring->next_to_clean + 1;
1735
1736
1737 ntc = (ntc < rx_ring->count) ? ntc : 0;
1738 rx_ring->next_to_clean = ntc;
1739
1740 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1741
1742
1743 if (ring_is_rsc_enabled(rx_ring)) {
1744 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1745 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1746
1747 if (unlikely(rsc_enabled)) {
1748 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1749
1750 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1751 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1752
1753
1754 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1755 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1756 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1757 }
1758 }
1759
1760
1761 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1762 return false;
1763
1764
1765 rx_ring->rx_buffer_info[ntc].skb = skb;
1766 rx_ring->rx_stats.non_eop_descs++;
1767
1768 return true;
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1784 struct sk_buff *skb)
1785{
1786 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1787 unsigned char *va;
1788 unsigned int pull_len;
1789
1790
1791
1792
1793
1794
1795 va = skb_frag_address(frag);
1796
1797
1798
1799
1800
1801 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1802
1803
1804 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1805
1806
1807 skb_frag_size_sub(frag, pull_len);
1808 frag->page_offset += pull_len;
1809 skb->data_len -= pull_len;
1810 skb->tail += pull_len;
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1824 struct sk_buff *skb)
1825{
1826
1827 if (unlikely(IXGBE_CB(skb)->page_released)) {
1828 dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
1829 ixgbe_rx_pg_size(rx_ring),
1830 DMA_FROM_DEVICE,
1831 IXGBE_RX_DMA_ATTR);
1832 } else {
1833 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1834
1835 dma_sync_single_range_for_cpu(rx_ring->dev,
1836 IXGBE_CB(skb)->dma,
1837 frag->page_offset,
1838 skb_frag_size(frag),
1839 DMA_FROM_DEVICE);
1840 }
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1862 union ixgbe_adv_rx_desc *rx_desc,
1863 struct sk_buff *skb)
1864{
1865 struct net_device *netdev = rx_ring->netdev;
1866
1867
1868 if (unlikely(ixgbe_test_staterr(rx_desc,
1869 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1870 !(netdev->features & NETIF_F_RXALL))) {
1871 dev_kfree_skb_any(skb);
1872 return true;
1873 }
1874
1875
1876 if (!skb_headlen(skb))
1877 ixgbe_pull_tail(rx_ring, skb);
1878
1879#ifdef IXGBE_FCOE
1880
1881 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1882 return false;
1883
1884#endif
1885
1886 if (eth_skb_pad(skb))
1887 return true;
1888
1889 return false;
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1900 struct ixgbe_rx_buffer *old_buff)
1901{
1902 struct ixgbe_rx_buffer *new_buff;
1903 u16 nta = rx_ring->next_to_alloc;
1904
1905 new_buff = &rx_ring->rx_buffer_info[nta];
1906
1907
1908 nta++;
1909 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1910
1911
1912
1913
1914
1915 new_buff->dma = old_buff->dma;
1916 new_buff->page = old_buff->page;
1917 new_buff->page_offset = old_buff->page_offset;
1918 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1919}
1920
1921static inline bool ixgbe_page_is_reserved(struct page *page)
1922{
1923 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1924}
1925
1926static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
1927{
1928 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1929 struct page *page = rx_buffer->page;
1930
1931
1932 if (unlikely(ixgbe_page_is_reserved(page)))
1933 return false;
1934
1935#if (PAGE_SIZE < 8192)
1936
1937 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1938 return false;
1939#else
1940
1941
1942
1943
1944
1945#define IXGBE_LAST_OFFSET \
1946 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
1947 if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
1948 return false;
1949#endif
1950
1951
1952
1953
1954
1955 if (unlikely(!pagecnt_bias)) {
1956 page_ref_add(page, USHRT_MAX);
1957 rx_buffer->pagecnt_bias = USHRT_MAX;
1958 }
1959
1960 return true;
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1979 struct ixgbe_rx_buffer *rx_buffer,
1980 struct sk_buff *skb,
1981 unsigned int size)
1982{
1983#if (PAGE_SIZE < 8192)
1984 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
1985#else
1986 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1987 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
1988 SKB_DATA_ALIGN(size);
1989#endif
1990 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1991 rx_buffer->page_offset, size, truesize);
1992#if (PAGE_SIZE < 8192)
1993 rx_buffer->page_offset ^= truesize;
1994#else
1995 rx_buffer->page_offset += truesize;
1996#endif
1997}
1998
1999static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
2000 union ixgbe_adv_rx_desc *rx_desc,
2001 struct sk_buff **skb,
2002 const unsigned int size)
2003{
2004 struct ixgbe_rx_buffer *rx_buffer;
2005
2006 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
2007 prefetchw(rx_buffer->page);
2008 *skb = rx_buffer->skb;
2009
2010
2011
2012
2013
2014 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
2015 if (!*skb)
2016 goto skip_sync;
2017 } else {
2018 if (*skb)
2019 ixgbe_dma_sync_frag(rx_ring, *skb);
2020 }
2021
2022
2023 dma_sync_single_range_for_cpu(rx_ring->dev,
2024 rx_buffer->dma,
2025 rx_buffer->page_offset,
2026 size,
2027 DMA_FROM_DEVICE);
2028skip_sync:
2029 rx_buffer->pagecnt_bias--;
2030
2031 return rx_buffer;
2032}
2033
2034static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
2035 struct ixgbe_rx_buffer *rx_buffer,
2036 struct sk_buff *skb)
2037{
2038 if (ixgbe_can_reuse_rx_page(rx_buffer)) {
2039
2040 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2041 } else {
2042 if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
2043
2044 IXGBE_CB(skb)->page_released = true;
2045 } else {
2046
2047 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2048 ixgbe_rx_pg_size(rx_ring),
2049 DMA_FROM_DEVICE,
2050 IXGBE_RX_DMA_ATTR);
2051 }
2052 __page_frag_cache_drain(rx_buffer->page,
2053 rx_buffer->pagecnt_bias);
2054 }
2055
2056
2057 rx_buffer->page = NULL;
2058 rx_buffer->skb = NULL;
2059}
2060
2061static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
2062 struct ixgbe_rx_buffer *rx_buffer,
2063 union ixgbe_adv_rx_desc *rx_desc,
2064 unsigned int size)
2065{
2066 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
2067#if (PAGE_SIZE < 8192)
2068 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2069#else
2070 unsigned int truesize = SKB_DATA_ALIGN(size);
2071#endif
2072 struct sk_buff *skb;
2073
2074
2075 prefetch(va);
2076#if L1_CACHE_BYTES < 128
2077 prefetch(va + L1_CACHE_BYTES);
2078#endif
2079
2080
2081 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
2082 if (unlikely(!skb))
2083 return NULL;
2084
2085 if (size > IXGBE_RX_HDR_SIZE) {
2086 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2087 IXGBE_CB(skb)->dma = rx_buffer->dma;
2088
2089 skb_add_rx_frag(skb, 0, rx_buffer->page,
2090 rx_buffer->page_offset,
2091 size, truesize);
2092#if (PAGE_SIZE < 8192)
2093 rx_buffer->page_offset ^= truesize;
2094#else
2095 rx_buffer->page_offset += truesize;
2096#endif
2097 } else {
2098 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
2099 rx_buffer->pagecnt_bias++;
2100 }
2101
2102 return skb;
2103}
2104
2105static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2106 struct ixgbe_rx_buffer *rx_buffer,
2107 union ixgbe_adv_rx_desc *rx_desc,
2108 unsigned int size)
2109{
2110 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
2111#if (PAGE_SIZE < 8192)
2112 unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
2113#else
2114 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2115 SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
2116#endif
2117 struct sk_buff *skb;
2118
2119
2120 prefetch(va);
2121#if L1_CACHE_BYTES < 128
2122 prefetch(va + L1_CACHE_BYTES);
2123#endif
2124
2125
2126 skb = build_skb(va - IXGBE_SKB_PAD, truesize);
2127 if (unlikely(!skb))
2128 return NULL;
2129
2130
2131 skb_reserve(skb, IXGBE_SKB_PAD);
2132 __skb_put(skb, size);
2133
2134
2135 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
2136 IXGBE_CB(skb)->dma = rx_buffer->dma;
2137
2138
2139#if (PAGE_SIZE < 8192)
2140 rx_buffer->page_offset ^= truesize;
2141#else
2142 rx_buffer->page_offset += truesize;
2143#endif
2144
2145 return skb;
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2162 struct ixgbe_ring *rx_ring,
2163 const int budget)
2164{
2165 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2166#ifdef IXGBE_FCOE
2167 struct ixgbe_adapter *adapter = q_vector->adapter;
2168 int ddp_bytes;
2169 unsigned int mss = 0;
2170#endif
2171 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2172
2173 while (likely(total_rx_packets < budget)) {
2174 union ixgbe_adv_rx_desc *rx_desc;
2175 struct ixgbe_rx_buffer *rx_buffer;
2176 struct sk_buff *skb;
2177 unsigned int size;
2178
2179
2180 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2181 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2182 cleaned_count = 0;
2183 }
2184
2185 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2186 size = le16_to_cpu(rx_desc->wb.upper.length);
2187 if (!size)
2188 break;
2189
2190
2191
2192
2193
2194 dma_rmb();
2195
2196 rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
2197
2198
2199 if (skb)
2200 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
2201 else if (ring_uses_build_skb(rx_ring))
2202 skb = ixgbe_build_skb(rx_ring, rx_buffer,
2203 rx_desc, size);
2204 else
2205 skb = ixgbe_construct_skb(rx_ring, rx_buffer,
2206 rx_desc, size);
2207
2208
2209 if (!skb) {
2210 rx_ring->rx_stats.alloc_rx_buff_failed++;
2211 rx_buffer->pagecnt_bias++;
2212 break;
2213 }
2214
2215 ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
2216 cleaned_count++;
2217
2218
2219 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2220 continue;
2221
2222
2223 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2224 continue;
2225
2226
2227 total_rx_bytes += skb->len;
2228
2229
2230 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2231
2232#ifdef IXGBE_FCOE
2233
2234 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2235 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2236
2237 if (ddp_bytes > 0) {
2238 if (!mss) {
2239 mss = rx_ring->netdev->mtu -
2240 sizeof(struct fcoe_hdr) -
2241 sizeof(struct fc_frame_header) -
2242 sizeof(struct fcoe_crc_eof);
2243 if (mss > 512)
2244 mss &= ~511;
2245 }
2246 total_rx_bytes += ddp_bytes;
2247 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2248 mss);
2249 }
2250 if (!ddp_bytes) {
2251 dev_kfree_skb_any(skb);
2252 continue;
2253 }
2254 }
2255
2256#endif
2257 ixgbe_rx_skb(q_vector, skb);
2258
2259
2260 total_rx_packets++;
2261 }
2262
2263 u64_stats_update_begin(&rx_ring->syncp);
2264 rx_ring->stats.packets += total_rx_packets;
2265 rx_ring->stats.bytes += total_rx_bytes;
2266 u64_stats_update_end(&rx_ring->syncp);
2267 q_vector->rx.total_packets += total_rx_packets;
2268 q_vector->rx.total_bytes += total_rx_bytes;
2269
2270 return total_rx_packets;
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2281{
2282 struct ixgbe_q_vector *q_vector;
2283 int v_idx;
2284 u32 mask;
2285
2286
2287 if (adapter->num_vfs > 32) {
2288 u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
2289 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2290 }
2291
2292
2293
2294
2295
2296 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2297 struct ixgbe_ring *ring;
2298 q_vector = adapter->q_vector[v_idx];
2299
2300 ixgbe_for_each_ring(ring, q_vector->rx)
2301 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2302
2303 ixgbe_for_each_ring(ring, q_vector->tx)
2304 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2305
2306 ixgbe_write_eitr(q_vector);
2307 }
2308
2309 switch (adapter->hw.mac.type) {
2310 case ixgbe_mac_82598EB:
2311 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2312 v_idx);
2313 break;
2314 case ixgbe_mac_82599EB:
2315 case ixgbe_mac_X540:
2316 case ixgbe_mac_X550:
2317 case ixgbe_mac_X550EM_x:
2318 case ixgbe_mac_x550em_a:
2319 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2320 break;
2321 default:
2322 break;
2323 }
2324 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2325
2326
2327 mask = IXGBE_EIMS_ENABLE_MASK;
2328 mask &= ~(IXGBE_EIMS_OTHER |
2329 IXGBE_EIMS_MAILBOX |
2330 IXGBE_EIMS_LSC);
2331
2332 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2333}
2334
2335enum latency_range {
2336 lowest_latency = 0,
2337 low_latency = 1,
2338 bulk_latency = 2,
2339 latency_invalid = 255
2340};
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2358 struct ixgbe_ring_container *ring_container)
2359{
2360 int bytes = ring_container->total_bytes;
2361 int packets = ring_container->total_packets;
2362 u32 timepassed_us;
2363 u64 bytes_perint;
2364 u8 itr_setting = ring_container->itr;
2365
2366 if (packets == 0)
2367 return;
2368
2369
2370
2371
2372
2373
2374
2375 timepassed_us = q_vector->itr >> 2;
2376 if (timepassed_us == 0)
2377 return;
2378
2379 bytes_perint = bytes / timepassed_us;
2380
2381 switch (itr_setting) {
2382 case lowest_latency:
2383 if (bytes_perint > 10)
2384 itr_setting = low_latency;
2385 break;
2386 case low_latency:
2387 if (bytes_perint > 20)
2388 itr_setting = bulk_latency;
2389 else if (bytes_perint <= 10)
2390 itr_setting = lowest_latency;
2391 break;
2392 case bulk_latency:
2393 if (bytes_perint <= 20)
2394 itr_setting = low_latency;
2395 break;
2396 }
2397
2398
2399 ring_container->total_bytes = 0;
2400 ring_container->total_packets = 0;
2401
2402
2403 ring_container->itr = itr_setting;
2404}
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2415{
2416 struct ixgbe_adapter *adapter = q_vector->adapter;
2417 struct ixgbe_hw *hw = &adapter->hw;
2418 int v_idx = q_vector->v_idx;
2419 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2420
2421 switch (adapter->hw.mac.type) {
2422 case ixgbe_mac_82598EB:
2423
2424 itr_reg |= (itr_reg << 16);
2425 break;
2426 case ixgbe_mac_82599EB:
2427 case ixgbe_mac_X540:
2428 case ixgbe_mac_X550:
2429 case ixgbe_mac_X550EM_x:
2430 case ixgbe_mac_x550em_a:
2431
2432
2433
2434
2435 itr_reg |= IXGBE_EITR_CNT_WDIS;
2436 break;
2437 default:
2438 break;
2439 }
2440 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2441}
2442
2443static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2444{
2445 u32 new_itr = q_vector->itr;
2446 u8 current_itr;
2447
2448 ixgbe_update_itr(q_vector, &q_vector->tx);
2449 ixgbe_update_itr(q_vector, &q_vector->rx);
2450
2451 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2452
2453 switch (current_itr) {
2454
2455 case lowest_latency:
2456 new_itr = IXGBE_100K_ITR;
2457 break;
2458 case low_latency:
2459 new_itr = IXGBE_20K_ITR;
2460 break;
2461 case bulk_latency:
2462 new_itr = IXGBE_12K_ITR;
2463 break;
2464 default:
2465 break;
2466 }
2467
2468 if (new_itr != q_vector->itr) {
2469
2470 new_itr = (10 * new_itr * q_vector->itr) /
2471 ((9 * new_itr) + q_vector->itr);
2472
2473
2474 q_vector->itr = new_itr;
2475
2476 ixgbe_write_eitr(q_vector);
2477 }
2478}
2479
2480
2481
2482
2483
2484static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2485{
2486 struct ixgbe_hw *hw = &adapter->hw;
2487 u32 eicr = adapter->interrupt_event;
2488 s32 rc;
2489
2490 if (test_bit(__IXGBE_DOWN, &adapter->state))
2491 return;
2492
2493 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2494 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2495 return;
2496
2497 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2498
2499 switch (hw->device_id) {
2500 case IXGBE_DEV_ID_82599_T3_LOM:
2501
2502
2503
2504
2505
2506
2507
2508 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2509 !(eicr & IXGBE_EICR_LSC))
2510 return;
2511
2512 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2513 u32 speed;
2514 bool link_up = false;
2515
2516 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2517
2518 if (link_up)
2519 return;
2520 }
2521
2522
2523 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2524 return;
2525
2526 break;
2527 case IXGBE_DEV_ID_X550EM_A_1G_T:
2528 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
2529 rc = hw->phy.ops.check_overtemp(hw);
2530 if (rc != IXGBE_ERR_OVERTEMP)
2531 return;
2532 break;
2533 default:
2534 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2535 return;
2536 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2537 return;
2538 break;
2539 }
2540 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2541
2542 adapter->interrupt_event = 0;
2543}
2544
2545static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2546{
2547 struct ixgbe_hw *hw = &adapter->hw;
2548
2549 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2550 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2551 e_crit(probe, "Fan has stopped, replace the adapter\n");
2552
2553 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2554 }
2555}
2556
2557static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2558{
2559 struct ixgbe_hw *hw = &adapter->hw;
2560
2561 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2562 return;
2563
2564 switch (adapter->hw.mac.type) {
2565 case ixgbe_mac_82599EB:
2566
2567
2568
2569
2570 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2571 (eicr & IXGBE_EICR_LSC)) &&
2572 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2573 adapter->interrupt_event = eicr;
2574 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2575 ixgbe_service_event_schedule(adapter);
2576 return;
2577 }
2578 return;
2579 case ixgbe_mac_x550em_a:
2580 if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
2581 adapter->interrupt_event = eicr;
2582 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2583 ixgbe_service_event_schedule(adapter);
2584 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
2585 IXGBE_EICR_GPI_SDP0_X550EM_a);
2586 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
2587 IXGBE_EICR_GPI_SDP0_X550EM_a);
2588 }
2589 return;
2590 case ixgbe_mac_X550:
2591 case ixgbe_mac_X540:
2592 if (!(eicr & IXGBE_EICR_TS))
2593 return;
2594 break;
2595 default:
2596 return;
2597 }
2598
2599 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2600}
2601
2602static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2603{
2604 switch (hw->mac.type) {
2605 case ixgbe_mac_82598EB:
2606 if (hw->phy.type == ixgbe_phy_nl)
2607 return true;
2608 return false;
2609 case ixgbe_mac_82599EB:
2610 case ixgbe_mac_X550EM_x:
2611 case ixgbe_mac_x550em_a:
2612 switch (hw->mac.ops.get_media_type(hw)) {
2613 case ixgbe_media_type_fiber:
2614 case ixgbe_media_type_fiber_qsfp:
2615 return true;
2616 default:
2617 return false;
2618 }
2619 default:
2620 return false;
2621 }
2622}
2623
2624static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2625{
2626 struct ixgbe_hw *hw = &adapter->hw;
2627 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2628
2629 if (!ixgbe_is_sfp(hw))
2630 return;
2631
2632
2633 if (hw->mac.type >= ixgbe_mac_X540)
2634 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2635
2636 if (eicr & eicr_mask) {
2637
2638 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2639 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2640 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2641 adapter->sfp_poll_time = 0;
2642 ixgbe_service_event_schedule(adapter);
2643 }
2644 }
2645
2646 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2647 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2648
2649 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2650 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2651 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2652 ixgbe_service_event_schedule(adapter);
2653 }
2654 }
2655}
2656
2657static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2658{
2659 struct ixgbe_hw *hw = &adapter->hw;
2660
2661 adapter->lsc_int++;
2662 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2663 adapter->link_check_timeout = jiffies;
2664 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2665 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2666 IXGBE_WRITE_FLUSH(hw);
2667 ixgbe_service_event_schedule(adapter);
2668 }
2669}
2670
2671static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2672 u64 qmask)
2673{
2674 u32 mask;
2675 struct ixgbe_hw *hw = &adapter->hw;
2676
2677 switch (hw->mac.type) {
2678 case ixgbe_mac_82598EB:
2679 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2680 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2681 break;
2682 case ixgbe_mac_82599EB:
2683 case ixgbe_mac_X540:
2684 case ixgbe_mac_X550:
2685 case ixgbe_mac_X550EM_x:
2686 case ixgbe_mac_x550em_a:
2687 mask = (qmask & 0xFFFFFFFF);
2688 if (mask)
2689 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2690 mask = (qmask >> 32);
2691 if (mask)
2692 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2693 break;
2694 default:
2695 break;
2696 }
2697
2698}
2699
2700static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2701 u64 qmask)
2702{
2703 u32 mask;
2704 struct ixgbe_hw *hw = &adapter->hw;
2705
2706 switch (hw->mac.type) {
2707 case ixgbe_mac_82598EB:
2708 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2709 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2710 break;
2711 case ixgbe_mac_82599EB:
2712 case ixgbe_mac_X540:
2713 case ixgbe_mac_X550:
2714 case ixgbe_mac_X550EM_x:
2715 case ixgbe_mac_x550em_a:
2716 mask = (qmask & 0xFFFFFFFF);
2717 if (mask)
2718 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2719 mask = (qmask >> 32);
2720 if (mask)
2721 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2722 break;
2723 default:
2724 break;
2725 }
2726
2727}
2728
2729
2730
2731
2732
2733static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2734 bool flush)
2735{
2736 struct ixgbe_hw *hw = &adapter->hw;
2737 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2738
2739
2740 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2741 mask &= ~IXGBE_EIMS_LSC;
2742
2743 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2744 switch (adapter->hw.mac.type) {
2745 case ixgbe_mac_82599EB:
2746 mask |= IXGBE_EIMS_GPI_SDP0(hw);
2747 break;
2748 case ixgbe_mac_X540:
2749 case ixgbe_mac_X550:
2750 case ixgbe_mac_X550EM_x:
2751 case ixgbe_mac_x550em_a:
2752 mask |= IXGBE_EIMS_TS;
2753 break;
2754 default:
2755 break;
2756 }
2757 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2758 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2759 switch (adapter->hw.mac.type) {
2760 case ixgbe_mac_82599EB:
2761 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2762 mask |= IXGBE_EIMS_GPI_SDP2(hw);
2763
2764 case ixgbe_mac_X540:
2765 case ixgbe_mac_X550:
2766 case ixgbe_mac_X550EM_x:
2767 case ixgbe_mac_x550em_a:
2768 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
2769 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
2770 adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
2771 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
2772 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
2773 mask |= IXGBE_EICR_GPI_SDP0_X540;
2774 mask |= IXGBE_EIMS_ECC;
2775 mask |= IXGBE_EIMS_MAILBOX;
2776 break;
2777 default:
2778 break;
2779 }
2780
2781 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2782 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2783 mask |= IXGBE_EIMS_FLOW_DIR;
2784
2785 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2786 if (queues)
2787 ixgbe_irq_enable_queues(adapter, ~0);
2788 if (flush)
2789 IXGBE_WRITE_FLUSH(&adapter->hw);
2790}
2791
2792static irqreturn_t ixgbe_msix_other(int irq, void *data)
2793{
2794 struct ixgbe_adapter *adapter = data;
2795 struct ixgbe_hw *hw = &adapter->hw;
2796 u32 eicr;
2797
2798
2799
2800
2801
2802
2803
2804 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2805
2806
2807
2808
2809
2810
2811
2812
2813 eicr &= 0xFFFF0000;
2814
2815 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2816
2817 if (eicr & IXGBE_EICR_LSC)
2818 ixgbe_check_lsc(adapter);
2819
2820 if (eicr & IXGBE_EICR_MAILBOX)
2821 ixgbe_msg_task(adapter);
2822
2823 switch (hw->mac.type) {
2824 case ixgbe_mac_82599EB:
2825 case ixgbe_mac_X540:
2826 case ixgbe_mac_X550:
2827 case ixgbe_mac_X550EM_x:
2828 case ixgbe_mac_x550em_a:
2829 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
2830 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2831 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
2832 ixgbe_service_event_schedule(adapter);
2833 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2834 IXGBE_EICR_GPI_SDP0_X540);
2835 }
2836 if (eicr & IXGBE_EICR_ECC) {
2837 e_info(link, "Received ECC Err, initiating reset\n");
2838 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
2839 ixgbe_service_event_schedule(adapter);
2840 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2841 }
2842
2843 if (eicr & IXGBE_EICR_FLOW_DIR) {
2844 int reinit_count = 0;
2845 int i;
2846 for (i = 0; i < adapter->num_tx_queues; i++) {
2847 struct ixgbe_ring *ring = adapter->tx_ring[i];
2848 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2849 &ring->state))
2850 reinit_count++;
2851 }
2852 if (reinit_count) {
2853
2854 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2855 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2856 ixgbe_service_event_schedule(adapter);
2857 }
2858 }
2859 ixgbe_check_sfp_event(adapter, eicr);
2860 ixgbe_check_overtemp_event(adapter, eicr);
2861 break;
2862 default:
2863 break;
2864 }
2865
2866 ixgbe_check_fan_failure(adapter, eicr);
2867
2868 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2869 ixgbe_ptp_check_pps_event(adapter);
2870
2871
2872 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2873 ixgbe_irq_enable(adapter, false, false);
2874
2875 return IRQ_HANDLED;
2876}
2877
2878static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2879{
2880 struct ixgbe_q_vector *q_vector = data;
2881
2882
2883
2884 if (q_vector->rx.ring || q_vector->tx.ring)
2885 napi_schedule_irqoff(&q_vector->napi);
2886
2887 return IRQ_HANDLED;
2888}
2889
2890
2891
2892
2893
2894
2895
2896
2897int ixgbe_poll(struct napi_struct *napi, int budget)
2898{
2899 struct ixgbe_q_vector *q_vector =
2900 container_of(napi, struct ixgbe_q_vector, napi);
2901 struct ixgbe_adapter *adapter = q_vector->adapter;
2902 struct ixgbe_ring *ring;
2903 int per_ring_budget, work_done = 0;
2904 bool clean_complete = true;
2905
2906#ifdef CONFIG_IXGBE_DCA
2907 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2908 ixgbe_update_dca(q_vector);
2909#endif
2910
2911 ixgbe_for_each_ring(ring, q_vector->tx) {
2912 if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
2913 clean_complete = false;
2914 }
2915
2916
2917 if (budget <= 0)
2918 return budget;
2919
2920
2921
2922 if (q_vector->rx.count > 1)
2923 per_ring_budget = max(budget/q_vector->rx.count, 1);
2924 else
2925 per_ring_budget = budget;
2926
2927 ixgbe_for_each_ring(ring, q_vector->rx) {
2928 int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
2929 per_ring_budget);
2930
2931 work_done += cleaned;
2932 if (cleaned >= per_ring_budget)
2933 clean_complete = false;
2934 }
2935
2936
2937 if (!clean_complete)
2938 return budget;
2939
2940
2941 napi_complete_done(napi, work_done);
2942 if (adapter->rx_itr_setting & 1)
2943 ixgbe_set_itr(q_vector);
2944 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2945 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
2946
2947 return min(work_done, budget - 1);
2948}
2949
2950
2951
2952
2953
2954
2955
2956
2957static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2958{
2959 struct net_device *netdev = adapter->netdev;
2960 int vector, err;
2961 int ri = 0, ti = 0;
2962
2963 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2964 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2965 struct msix_entry *entry = &adapter->msix_entries[vector];
2966
2967 if (q_vector->tx.ring && q_vector->rx.ring) {
2968 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2969 "%s-%s-%d", netdev->name, "TxRx", ri++);
2970 ti++;
2971 } else if (q_vector->rx.ring) {
2972 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2973 "%s-%s-%d", netdev->name, "rx", ri++);
2974 } else if (q_vector->tx.ring) {
2975 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2976 "%s-%s-%d", netdev->name, "tx", ti++);
2977 } else {
2978
2979 continue;
2980 }
2981 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2982 q_vector->name, q_vector);
2983 if (err) {
2984 e_err(probe, "request_irq failed for MSIX interrupt "
2985 "Error: %d\n", err);
2986 goto free_queue_irqs;
2987 }
2988
2989 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2990
2991 irq_set_affinity_hint(entry->vector,
2992 &q_vector->affinity_mask);
2993 }
2994 }
2995
2996 err = request_irq(adapter->msix_entries[vector].vector,
2997 ixgbe_msix_other, 0, netdev->name, adapter);
2998 if (err) {
2999 e_err(probe, "request_irq for msix_other failed: %d\n", err);
3000 goto free_queue_irqs;
3001 }
3002
3003 return 0;
3004
3005free_queue_irqs:
3006 while (vector) {
3007 vector--;
3008 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
3009 NULL);
3010 free_irq(adapter->msix_entries[vector].vector,
3011 adapter->q_vector[vector]);
3012 }
3013 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3014 pci_disable_msix(adapter->pdev);
3015 kfree(adapter->msix_entries);
3016 adapter->msix_entries = NULL;
3017 return err;
3018}
3019
3020
3021
3022
3023
3024
3025static irqreturn_t ixgbe_intr(int irq, void *data)
3026{
3027 struct ixgbe_adapter *adapter = data;
3028 struct ixgbe_hw *hw = &adapter->hw;
3029 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3030 u32 eicr;
3031
3032
3033
3034
3035
3036 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3037
3038
3039
3040 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3041 if (!eicr) {
3042
3043
3044
3045
3046
3047
3048
3049 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3050 ixgbe_irq_enable(adapter, true, true);
3051 return IRQ_NONE;
3052 }
3053
3054 if (eicr & IXGBE_EICR_LSC)
3055 ixgbe_check_lsc(adapter);
3056
3057 switch (hw->mac.type) {
3058 case ixgbe_mac_82599EB:
3059 ixgbe_check_sfp_event(adapter, eicr);
3060
3061 case ixgbe_mac_X540:
3062 case ixgbe_mac_X550:
3063 case ixgbe_mac_X550EM_x:
3064 case ixgbe_mac_x550em_a:
3065 if (eicr & IXGBE_EICR_ECC) {
3066 e_info(link, "Received ECC Err, initiating reset\n");
3067 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
3068 ixgbe_service_event_schedule(adapter);
3069 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3070 }
3071 ixgbe_check_overtemp_event(adapter, eicr);
3072 break;
3073 default:
3074 break;
3075 }
3076
3077 ixgbe_check_fan_failure(adapter, eicr);
3078 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
3079 ixgbe_ptp_check_pps_event(adapter);
3080
3081
3082 napi_schedule_irqoff(&q_vector->napi);
3083
3084
3085
3086
3087
3088 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3089 ixgbe_irq_enable(adapter, false, false);
3090
3091 return IRQ_HANDLED;
3092}
3093
3094
3095
3096
3097
3098
3099
3100
3101static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
3102{
3103 struct net_device *netdev = adapter->netdev;
3104 int err;
3105
3106 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3107 err = ixgbe_request_msix_irqs(adapter);
3108 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
3109 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
3110 netdev->name, adapter);
3111 else
3112 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
3113 netdev->name, adapter);
3114
3115 if (err)
3116 e_err(probe, "request_irq failed, Error %d\n", err);
3117
3118 return err;
3119}
3120
3121static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
3122{
3123 int vector;
3124
3125 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3126 free_irq(adapter->pdev->irq, adapter);
3127 return;
3128 }
3129
3130 if (!adapter->msix_entries)
3131 return;
3132
3133 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3134 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3135 struct msix_entry *entry = &adapter->msix_entries[vector];
3136
3137
3138 if (!q_vector->rx.ring && !q_vector->tx.ring)
3139 continue;
3140
3141
3142 irq_set_affinity_hint(entry->vector, NULL);
3143
3144 free_irq(entry->vector, q_vector);
3145 }
3146
3147 free_irq(adapter->msix_entries[vector].vector, adapter);
3148}
3149
3150
3151
3152
3153
3154static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3155{
3156 switch (adapter->hw.mac.type) {
3157 case ixgbe_mac_82598EB:
3158 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3159 break;
3160 case ixgbe_mac_82599EB:
3161 case ixgbe_mac_X540:
3162 case ixgbe_mac_X550:
3163 case ixgbe_mac_X550EM_x:
3164 case ixgbe_mac_x550em_a:
3165 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3166 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3167 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3168 break;
3169 default:
3170 break;
3171 }
3172 IXGBE_WRITE_FLUSH(&adapter->hw);
3173 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3174 int vector;
3175
3176 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3177 synchronize_irq(adapter->msix_entries[vector].vector);
3178
3179 synchronize_irq(adapter->msix_entries[vector++].vector);
3180 } else {
3181 synchronize_irq(adapter->pdev->irq);
3182 }
3183}
3184
3185
3186
3187
3188
3189static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3190{
3191 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3192
3193 ixgbe_write_eitr(q_vector);
3194
3195 ixgbe_set_ivar(adapter, 0, 0, 0);
3196 ixgbe_set_ivar(adapter, 1, 0, 0);
3197
3198 e_info(hw, "Legacy interrupt IVAR setup done\n");
3199}
3200
3201
3202
3203
3204
3205
3206
3207
3208void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3209 struct ixgbe_ring *ring)
3210{
3211 struct ixgbe_hw *hw = &adapter->hw;
3212 u64 tdba = ring->dma;
3213 int wait_loop = 10;
3214 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3215 u8 reg_idx = ring->reg_idx;
3216
3217
3218 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3219 IXGBE_WRITE_FLUSH(hw);
3220
3221 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3222 (tdba & DMA_BIT_MASK(32)));
3223 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3224 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3225 ring->count * sizeof(union ixgbe_adv_tx_desc));
3226 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3227 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3228 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3241 txdctl |= 1u << 16;
3242 else
3243 txdctl |= 8u << 16;
3244
3245
3246
3247
3248
3249 txdctl |= (1u << 8) |
3250 32;
3251
3252
3253 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3254 ring->atr_sample_rate = adapter->atr_sample_rate;
3255 ring->atr_count = 0;
3256 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3257 } else {
3258 ring->atr_sample_rate = 0;
3259 }
3260
3261
3262 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3263 struct ixgbe_q_vector *q_vector = ring->q_vector;
3264
3265 if (q_vector)
3266 netif_set_xps_queue(ring->netdev,
3267 &q_vector->affinity_mask,
3268 ring->queue_index);
3269 }
3270
3271 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3272
3273
3274 memset(ring->tx_buffer_info, 0,
3275 sizeof(struct ixgbe_tx_buffer) * ring->count);
3276
3277
3278 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3279
3280
3281 if (hw->mac.type == ixgbe_mac_82598EB &&
3282 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3283 return;
3284
3285
3286 do {
3287 usleep_range(1000, 2000);
3288 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3289 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3290 if (!wait_loop)
3291 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
3292}
3293
3294static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3295{
3296 struct ixgbe_hw *hw = &adapter->hw;
3297 u32 rttdcs, mtqc;
3298 u8 tcs = netdev_get_num_tc(adapter->netdev);
3299
3300 if (hw->mac.type == ixgbe_mac_82598EB)
3301 return;
3302
3303
3304 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3305 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3306 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3307
3308
3309 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3310 mtqc = IXGBE_MTQC_VT_ENA;
3311 if (tcs > 4)
3312 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3313 else if (tcs > 1)
3314 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3315 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3316 IXGBE_82599_VMDQ_4Q_MASK)
3317 mtqc |= IXGBE_MTQC_32VF;
3318 else
3319 mtqc |= IXGBE_MTQC_64VF;
3320 } else {
3321 if (tcs > 4)
3322 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3323 else if (tcs > 1)
3324 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3325 else
3326 mtqc = IXGBE_MTQC_64Q_1PB;
3327 }
3328
3329 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3330
3331
3332 if (tcs) {
3333 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3334 sectx |= IXGBE_SECTX_DCB;
3335 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3336 }
3337
3338
3339 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3340 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3341}
3342
3343
3344
3345
3346
3347
3348
3349static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3350{
3351 struct ixgbe_hw *hw = &adapter->hw;
3352 u32 dmatxctl;
3353 u32 i;
3354
3355 ixgbe_setup_mtqc(adapter);
3356
3357 if (hw->mac.type != ixgbe_mac_82598EB) {
3358
3359 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3360 dmatxctl |= IXGBE_DMATXCTL_TE;
3361 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3362 }
3363
3364
3365 for (i = 0; i < adapter->num_tx_queues; i++)
3366 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3367}
3368
3369static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3370 struct ixgbe_ring *ring)
3371{
3372 struct ixgbe_hw *hw = &adapter->hw;
3373 u8 reg_idx = ring->reg_idx;
3374 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3375
3376 srrctl |= IXGBE_SRRCTL_DROP_EN;
3377
3378 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3379}
3380
3381static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3382 struct ixgbe_ring *ring)
3383{
3384 struct ixgbe_hw *hw = &adapter->hw;
3385 u8 reg_idx = ring->reg_idx;
3386 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3387
3388 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3389
3390 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3391}
3392
3393#ifdef CONFIG_IXGBE_DCB
3394void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3395#else
3396static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3397#endif
3398{
3399 int i;
3400 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3401
3402 if (adapter->ixgbe_ieee_pfc)
3403 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3415 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3416 for (i = 0; i < adapter->num_rx_queues; i++)
3417 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3418 } else {
3419 for (i = 0; i < adapter->num_rx_queues; i++)
3420 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3421 }
3422}
3423
3424#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3425
3426static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3427 struct ixgbe_ring *rx_ring)
3428{
3429 struct ixgbe_hw *hw = &adapter->hw;
3430 u32 srrctl;
3431 u8 reg_idx = rx_ring->reg_idx;
3432
3433 if (hw->mac.type == ixgbe_mac_82598EB) {
3434 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3435
3436
3437
3438
3439
3440 reg_idx &= mask;
3441 }
3442
3443
3444 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3445
3446
3447 if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
3448 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3449 else
3450 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3451
3452
3453 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3454
3455 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3456}
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3467{
3468 if (adapter->hw.mac.type < ixgbe_mac_X550)
3469 return 128;
3470 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3471 return 64;
3472 else
3473 return 512;
3474}
3475
3476
3477
3478
3479
3480
3481
3482void ixgbe_store_key(struct ixgbe_adapter *adapter)
3483{
3484 struct ixgbe_hw *hw = &adapter->hw;
3485 int i;
3486
3487 for (i = 0; i < 10; i++)
3488 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3489}
3490
3491
3492
3493
3494
3495
3496
3497void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3498{
3499 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3500 struct ixgbe_hw *hw = &adapter->hw;
3501 u32 reta = 0;
3502 u32 indices_multi;
3503 u8 *indir_tbl = adapter->rss_indir_tbl;
3504
3505
3506
3507
3508
3509
3510
3511 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3512 indices_multi = 0x11;
3513 else
3514 indices_multi = 0x1;
3515
3516
3517 for (i = 0; i < reta_entries; i++) {
3518 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3519 if ((i & 3) == 3) {
3520 if (i < 128)
3521 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3522 else
3523 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3524 reta);
3525 reta = 0;
3526 }
3527 }
3528}
3529
3530
3531
3532
3533
3534
3535
3536static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3537{
3538 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3539 struct ixgbe_hw *hw = &adapter->hw;
3540 u32 vfreta = 0;
3541 unsigned int pf_pool = adapter->num_vfs;
3542
3543
3544 for (i = 0; i < reta_entries; i++) {
3545 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3546 if ((i & 3) == 3) {
3547 IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3548 vfreta);
3549 vfreta = 0;
3550 }
3551 }
3552}
3553
3554static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3555{
3556 u32 i, j;
3557 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3558 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3559
3560
3561
3562
3563
3564 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
3565 rss_i = 4;
3566
3567
3568 ixgbe_store_key(adapter);
3569
3570
3571 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3572
3573 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3574 if (j == rss_i)
3575 j = 0;
3576
3577 adapter->rss_indir_tbl[i] = j;
3578 }
3579
3580 ixgbe_store_reta(adapter);
3581}
3582
3583static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3584{
3585 struct ixgbe_hw *hw = &adapter->hw;
3586 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3587 unsigned int pf_pool = adapter->num_vfs;
3588 int i, j;
3589
3590
3591 for (i = 0; i < 10; i++)
3592 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
3593 adapter->rss_key[i]);
3594
3595
3596 for (i = 0, j = 0; i < 64; i++, j++) {
3597 if (j == rss_i)
3598 j = 0;
3599
3600 adapter->rss_indir_tbl[i] = j;
3601 }
3602
3603 ixgbe_store_vfreta(adapter);
3604}
3605
3606static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3607{
3608 struct ixgbe_hw *hw = &adapter->hw;
3609 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3610 u32 rxcsum;
3611
3612
3613 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3614 rxcsum |= IXGBE_RXCSUM_PCSD;
3615 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3616
3617 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3618 if (adapter->ring_feature[RING_F_RSS].mask)
3619 mrqc = IXGBE_MRQC_RSSEN;
3620 } else {
3621 u8 tcs = netdev_get_num_tc(adapter->netdev);
3622
3623 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3624 if (tcs > 4)
3625 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3626 else if (tcs > 1)
3627 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3628 else if (adapter->ring_feature[RING_F_VMDQ].mask ==
3629 IXGBE_82599_VMDQ_4Q_MASK)
3630 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3631 else
3632 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3633 } else {
3634 if (tcs > 4)
3635 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3636 else if (tcs > 1)
3637 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3638 else
3639 mrqc = IXGBE_MRQC_RSSEN;
3640 }
3641 }
3642
3643
3644 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3645 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3646 IXGBE_MRQC_RSS_FIELD_IPV6 |
3647 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3648
3649 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3650 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3651 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3652 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3653
3654 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
3655 if ((hw->mac.type >= ixgbe_mac_X550) &&
3656 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3657 unsigned int pf_pool = adapter->num_vfs;
3658
3659
3660 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3661 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3662
3663
3664 ixgbe_setup_vfreta(adapter);
3665 vfmrqc = IXGBE_MRQC_RSSEN;
3666 vfmrqc |= rss_field;
3667 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3668 } else {
3669 ixgbe_setup_reta(adapter);
3670 mrqc |= rss_field;
3671 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3672 }
3673}
3674
3675
3676
3677
3678
3679
3680static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3681 struct ixgbe_ring *ring)
3682{
3683 struct ixgbe_hw *hw = &adapter->hw;
3684 u32 rscctrl;
3685 u8 reg_idx = ring->reg_idx;
3686
3687 if (!ring_is_rsc_enabled(ring))
3688 return;
3689
3690 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3691 rscctrl |= IXGBE_RSCCTL_RSCEN;
3692
3693
3694
3695
3696
3697 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3698 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3699}
3700
3701#define IXGBE_MAX_RX_DESC_POLL 10
3702static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3703 struct ixgbe_ring *ring)
3704{
3705 struct ixgbe_hw *hw = &adapter->hw;
3706 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3707 u32 rxdctl;
3708 u8 reg_idx = ring->reg_idx;
3709
3710 if (ixgbe_removed(hw->hw_addr))
3711 return;
3712
3713 if (hw->mac.type == ixgbe_mac_82598EB &&
3714 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3715 return;
3716
3717 do {
3718 usleep_range(1000, 2000);
3719 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3720 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3721
3722 if (!wait_loop) {
3723 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3724 "the polling period\n", reg_idx);
3725 }
3726}
3727
3728void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3729 struct ixgbe_ring *ring)
3730{
3731 struct ixgbe_hw *hw = &adapter->hw;
3732 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3733 u32 rxdctl;
3734 u8 reg_idx = ring->reg_idx;
3735
3736 if (ixgbe_removed(hw->hw_addr))
3737 return;
3738 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3739 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3740
3741
3742 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3743
3744 if (hw->mac.type == ixgbe_mac_82598EB &&
3745 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3746 return;
3747
3748
3749 do {
3750 udelay(10);
3751 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3752 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3753
3754 if (!wait_loop) {
3755 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3756 "the polling period\n", reg_idx);
3757 }
3758}
3759
3760void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3761 struct ixgbe_ring *ring)
3762{
3763 struct ixgbe_hw *hw = &adapter->hw;
3764 union ixgbe_adv_rx_desc *rx_desc;
3765 u64 rdba = ring->dma;
3766 u32 rxdctl;
3767 u8 reg_idx = ring->reg_idx;
3768
3769
3770 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3771 ixgbe_disable_rx_queue(adapter, ring);
3772
3773 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3774 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3775 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3776 ring->count * sizeof(union ixgbe_adv_rx_desc));
3777
3778 IXGBE_WRITE_FLUSH(hw);
3779
3780 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3781 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3782 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
3783
3784 ixgbe_configure_srrctl(adapter, ring);
3785 ixgbe_configure_rscctl(adapter, ring);
3786
3787 if (hw->mac.type == ixgbe_mac_82598EB) {
3788
3789
3790
3791
3792
3793
3794
3795 rxdctl &= ~0x3FFFFF;
3796 rxdctl |= 0x080420;
3797#if (PAGE_SIZE < 8192)
3798 } else {
3799 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
3800 IXGBE_RXDCTL_RLPML_EN);
3801
3802
3803 if (ring_uses_build_skb(ring) &&
3804 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
3805 rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
3806 IXGBE_RXDCTL_RLPML_EN;
3807#endif
3808 }
3809
3810
3811 memset(ring->rx_buffer_info, 0,
3812 sizeof(struct ixgbe_rx_buffer) * ring->count);
3813
3814
3815 rx_desc = IXGBE_RX_DESC(ring, 0);
3816 rx_desc->wb.upper.length = 0;
3817
3818
3819 rxdctl |= IXGBE_RXDCTL_ENABLE;
3820 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3821
3822 ixgbe_rx_desc_queue_enable(adapter, ring);
3823 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3824}
3825
3826static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3827{
3828 struct ixgbe_hw *hw = &adapter->hw;
3829 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3830 u16 pool;
3831
3832
3833 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3834 IXGBE_PSRTYPE_UDPHDR |
3835 IXGBE_PSRTYPE_IPV4HDR |
3836 IXGBE_PSRTYPE_L2HDR |
3837 IXGBE_PSRTYPE_IPV6HDR;
3838
3839 if (hw->mac.type == ixgbe_mac_82598EB)
3840 return;
3841
3842 if (rss_i > 3)
3843 psrtype |= 2u << 29;
3844 else if (rss_i > 1)
3845 psrtype |= 1u << 29;
3846
3847 for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3848 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3849}
3850
3851static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3852{
3853 struct ixgbe_hw *hw = &adapter->hw;
3854 u32 reg_offset, vf_shift;
3855 u32 gcr_ext, vmdctl;
3856 int i;
3857
3858 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3859 return;
3860
3861 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3862 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3863 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3864 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3865 vmdctl |= IXGBE_VT_CTL_REPLEN;
3866 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3867
3868 vf_shift = VMDQ_P(0) % 32;
3869 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3870
3871
3872 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
3873 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3874 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
3875 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3876 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
3877 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3878
3879
3880 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3881
3882
3883 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
3884
3885
3886
3887
3888
3889 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3890 case IXGBE_82599_VMDQ_8Q_MASK:
3891 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3892 break;
3893 case IXGBE_82599_VMDQ_4Q_MASK:
3894 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3895 break;
3896 default:
3897 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3898 break;
3899 }
3900
3901 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3902
3903 for (i = 0; i < adapter->num_vfs; i++) {
3904
3905 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
3906 adapter->vfinfo[i].spoofchk_enabled);
3907
3908
3909 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
3910 adapter->vfinfo[i].rss_query_enabled);
3911 }
3912}
3913
3914static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3915{
3916 struct ixgbe_hw *hw = &adapter->hw;
3917 struct net_device *netdev = adapter->netdev;
3918 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3919 struct ixgbe_ring *rx_ring;
3920 int i;
3921 u32 mhadd, hlreg0;
3922
3923#ifdef IXGBE_FCOE
3924
3925 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3926 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3927 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3928
3929#endif
3930
3931
3932 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3933 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3934
3935 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3936 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3937 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3938 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3939
3940 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3941 }
3942
3943 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3944
3945 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3946 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3947
3948
3949
3950
3951
3952 for (i = 0; i < adapter->num_rx_queues; i++) {
3953 rx_ring = adapter->rx_ring[i];
3954
3955 clear_ring_rsc_enabled(rx_ring);
3956 clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
3957 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
3958
3959 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3960 set_ring_rsc_enabled(rx_ring);
3961
3962 if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
3963 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
3964
3965 clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
3966 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3967 continue;
3968
3969 set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
3970
3971#if (PAGE_SIZE < 8192)
3972 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3973 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
3974
3975 if ((max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3976 (max_frame > IXGBE_MAX_FRAME_BUILD_SKB))
3977 set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
3978#endif
3979 }
3980}
3981
3982static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3983{
3984 struct ixgbe_hw *hw = &adapter->hw;
3985 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3986
3987 switch (hw->mac.type) {
3988 case ixgbe_mac_82598EB:
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
4000 break;
4001 case ixgbe_mac_X550:
4002 case ixgbe_mac_X550EM_x:
4003 case ixgbe_mac_x550em_a:
4004 if (adapter->num_vfs)
4005 rdrxctl |= IXGBE_RDRXCTL_PSP;
4006
4007 case ixgbe_mac_82599EB:
4008 case ixgbe_mac_X540:
4009
4010 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
4011 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
4012 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
4013
4014 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
4015 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
4016 break;
4017 default:
4018
4019 return;
4020 }
4021
4022 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4023}
4024
4025
4026
4027
4028
4029
4030
4031static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
4032{
4033 struct ixgbe_hw *hw = &adapter->hw;
4034 int i;
4035 u32 rxctrl, rfctl;
4036
4037
4038 hw->mac.ops.disable_rx(hw);
4039
4040 ixgbe_setup_psrtype(adapter);
4041 ixgbe_setup_rdrxctl(adapter);
4042
4043
4044 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4045 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
4046 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
4047 rfctl |= IXGBE_RFCTL_RSC_DIS;
4048
4049
4050 rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
4051 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
4052
4053
4054 ixgbe_setup_mrqc(adapter);
4055
4056
4057 ixgbe_set_rx_buffer_len(adapter);
4058
4059
4060
4061
4062
4063 for (i = 0; i < adapter->num_rx_queues; i++)
4064 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
4065
4066 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4067
4068 if (hw->mac.type == ixgbe_mac_82598EB)
4069 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4070
4071
4072 rxctrl |= IXGBE_RXCTRL_RXEN;
4073 hw->mac.ops.enable_rx_dma(hw, rxctrl);
4074}
4075
4076static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
4077 __be16 proto, u16 vid)
4078{
4079 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4080 struct ixgbe_hw *hw = &adapter->hw;
4081
4082
4083 if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4084 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
4085
4086 set_bit(vid, adapter->active_vlans);
4087
4088 return 0;
4089}
4090
4091static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
4092{
4093 u32 vlvf;
4094 int idx;
4095
4096
4097 if (vlan == 0)
4098 return 0;
4099
4100
4101 for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
4102 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
4103 if ((vlvf & VLAN_VID_MASK) == vlan)
4104 break;
4105 }
4106
4107 return idx;
4108}
4109
4110void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
4111{
4112 struct ixgbe_hw *hw = &adapter->hw;
4113 u32 bits, word;
4114 int idx;
4115
4116 idx = ixgbe_find_vlvf_entry(hw, vid);
4117 if (!idx)
4118 return;
4119
4120
4121
4122
4123 word = idx * 2 + (VMDQ_P(0) / 32);
4124 bits = ~BIT(VMDQ_P(0) % 32);
4125 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4126
4127
4128 if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
4129 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4130 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
4131 IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
4132 }
4133}
4134
4135static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
4136 __be16 proto, u16 vid)
4137{
4138 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4139 struct ixgbe_hw *hw = &adapter->hw;
4140
4141
4142 if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4143 hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
4144
4145 clear_bit(vid, adapter->active_vlans);
4146
4147 return 0;
4148}
4149
4150
4151
4152
4153
4154static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
4155{
4156 struct ixgbe_hw *hw = &adapter->hw;
4157 u32 vlnctrl;
4158 int i, j;
4159
4160 switch (hw->mac.type) {
4161 case ixgbe_mac_82598EB:
4162 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4163 vlnctrl &= ~IXGBE_VLNCTRL_VME;
4164 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4165 break;
4166 case ixgbe_mac_82599EB:
4167 case ixgbe_mac_X540:
4168 case ixgbe_mac_X550:
4169 case ixgbe_mac_X550EM_x:
4170 case ixgbe_mac_x550em_a:
4171 for (i = 0; i < adapter->num_rx_queues; i++) {
4172 struct ixgbe_ring *ring = adapter->rx_ring[i];
4173
4174 if (ring->l2_accel_priv)
4175 continue;
4176 j = ring->reg_idx;
4177 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4178 vlnctrl &= ~IXGBE_RXDCTL_VME;
4179 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4180 }
4181 break;
4182 default:
4183 break;
4184 }
4185}
4186
4187
4188
4189
4190
4191static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
4192{
4193 struct ixgbe_hw *hw = &adapter->hw;
4194 u32 vlnctrl;
4195 int i, j;
4196
4197 switch (hw->mac.type) {
4198 case ixgbe_mac_82598EB:
4199 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4200 vlnctrl |= IXGBE_VLNCTRL_VME;
4201 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4202 break;
4203 case ixgbe_mac_82599EB:
4204 case ixgbe_mac_X540:
4205 case ixgbe_mac_X550:
4206 case ixgbe_mac_X550EM_x:
4207 case ixgbe_mac_x550em_a:
4208 for (i = 0; i < adapter->num_rx_queues; i++) {
4209 struct ixgbe_ring *ring = adapter->rx_ring[i];
4210
4211 if (ring->l2_accel_priv)
4212 continue;
4213 j = ring->reg_idx;
4214 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
4215 vlnctrl |= IXGBE_RXDCTL_VME;
4216 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
4217 }
4218 break;
4219 default:
4220 break;
4221 }
4222}
4223
4224static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4225{
4226 struct ixgbe_hw *hw = &adapter->hw;
4227 u32 vlnctrl, i;
4228
4229 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4230
4231 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
4232
4233 vlnctrl |= IXGBE_VLNCTRL_VFE;
4234 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4235 } else {
4236 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4237 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4238 return;
4239 }
4240
4241
4242 if (hw->mac.type == ixgbe_mac_82598EB)
4243 return;
4244
4245
4246 if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
4247 return;
4248
4249
4250 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4251
4252
4253 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4254 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
4255 u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
4256
4257 vlvfb |= BIT(VMDQ_P(0) % 32);
4258 IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
4259 }
4260
4261
4262 for (i = hw->mac.vft_size; i--;)
4263 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
4264}
4265
4266#define VFTA_BLOCK_SIZE 8
4267static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
4268{
4269 struct ixgbe_hw *hw = &adapter->hw;
4270 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4271 u32 vid_start = vfta_offset * 32;
4272 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4273 u32 i, vid, word, bits;
4274
4275 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4276 u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
4277
4278
4279 vid = vlvf & VLAN_VID_MASK;
4280
4281
4282 if (vid < vid_start || vid >= vid_end)
4283 continue;
4284
4285 if (vlvf) {
4286
4287 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4288
4289
4290 if (test_bit(vid, adapter->active_vlans))
4291 continue;
4292 }
4293
4294
4295 word = i * 2 + VMDQ_P(0) / 32;
4296 bits = ~BIT(VMDQ_P(0) % 32);
4297 bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
4298 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
4299 }
4300
4301
4302 for (i = VFTA_BLOCK_SIZE; i--;) {
4303 vid = (vfta_offset + i) * 32;
4304 word = vid / BITS_PER_LONG;
4305 bits = vid % BITS_PER_LONG;
4306
4307 vfta[i] |= adapter->active_vlans[word] >> bits;
4308
4309 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
4310 }
4311}
4312
4313static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4314{
4315 struct ixgbe_hw *hw = &adapter->hw;
4316 u32 vlnctrl, i;
4317
4318
4319 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4320 vlnctrl |= IXGBE_VLNCTRL_VFE;
4321 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4322
4323 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
4324 hw->mac.type == ixgbe_mac_82598EB)
4325 return;
4326
4327
4328 if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
4329 return;
4330
4331
4332 adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
4333
4334 for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
4335 ixgbe_scrub_vfta(adapter, i);
4336}
4337
4338static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
4339{
4340 u16 vid = 1;
4341
4342 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4343
4344 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
4345 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4346}
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4358{
4359 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4360 struct ixgbe_hw *hw = &adapter->hw;
4361
4362 if (!netif_running(netdev))
4363 return 0;
4364
4365 if (hw->mac.ops.update_mc_addr_list)
4366 hw->mac.ops.update_mc_addr_list(hw, netdev);
4367 else
4368 return -ENOMEM;
4369
4370#ifdef CONFIG_PCI_IOV
4371 ixgbe_restore_vf_multicasts(adapter);
4372#endif
4373
4374 return netdev_mc_count(netdev);
4375}
4376
4377#ifdef CONFIG_PCI_IOV
4378void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4379{
4380 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4381 struct ixgbe_hw *hw = &adapter->hw;
4382 int i;
4383
4384 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4385 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4386
4387 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4388 hw->mac.ops.set_rar(hw, i,
4389 mac_table->addr,
4390 mac_table->pool,
4391 IXGBE_RAH_AV);
4392 else
4393 hw->mac.ops.clear_rar(hw, i);
4394 }
4395}
4396
4397#endif
4398static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4399{
4400 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4401 struct ixgbe_hw *hw = &adapter->hw;
4402 int i;
4403
4404 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4405 if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
4406 continue;
4407
4408 mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
4409
4410 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4411 hw->mac.ops.set_rar(hw, i,
4412 mac_table->addr,
4413 mac_table->pool,
4414 IXGBE_RAH_AV);
4415 else
4416 hw->mac.ops.clear_rar(hw, i);
4417 }
4418}
4419
4420static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4421{
4422 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4423 struct ixgbe_hw *hw = &adapter->hw;
4424 int i;
4425
4426 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4427 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4428 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4429 }
4430
4431 ixgbe_sync_mac_table(adapter);
4432}
4433
4434static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
4435{
4436 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4437 struct ixgbe_hw *hw = &adapter->hw;
4438 int i, count = 0;
4439
4440 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4441
4442 if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
4443 continue;
4444
4445
4446 if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
4447 if (mac_table->pool != pool)
4448 continue;
4449 }
4450
4451 count++;
4452 }
4453
4454 return count;
4455}
4456
4457
4458static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
4459{
4460 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4461 struct ixgbe_hw *hw = &adapter->hw;
4462
4463 memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
4464 mac_table->pool = VMDQ_P(0);
4465
4466 mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
4467
4468 hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
4469 IXGBE_RAH_AV);
4470}
4471
4472int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
4473 const u8 *addr, u16 pool)
4474{
4475 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4476 struct ixgbe_hw *hw = &adapter->hw;
4477 int i;
4478
4479 if (is_zero_ether_addr(addr))
4480 return -EINVAL;
4481
4482 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4483 if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
4484 continue;
4485
4486 ether_addr_copy(mac_table->addr, addr);
4487 mac_table->pool = pool;
4488
4489 mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
4490 IXGBE_MAC_STATE_IN_USE;
4491
4492 ixgbe_sync_mac_table(adapter);
4493
4494 return i;
4495 }
4496
4497 return -ENOMEM;
4498}
4499
4500int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
4501 const u8 *addr, u16 pool)
4502{
4503 struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
4504 struct ixgbe_hw *hw = &adapter->hw;
4505 int i;
4506
4507 if (is_zero_ether_addr(addr))
4508 return -EINVAL;
4509
4510
4511 for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
4512
4513 if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
4514 continue;
4515
4516 if (mac_table->pool != pool)
4517 continue;
4518
4519 if (!ether_addr_equal(addr, mac_table->addr))
4520 continue;
4521
4522 mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
4523 mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
4524
4525 ixgbe_sync_mac_table(adapter);
4526
4527 return 0;
4528 }
4529
4530 return -ENOMEM;
4531}
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4542{
4543 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4544 int count = 0;
4545
4546
4547 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
4548 return -ENOMEM;
4549
4550 if (!netdev_uc_empty(netdev)) {
4551 struct netdev_hw_addr *ha;
4552 netdev_for_each_uc_addr(ha, netdev) {
4553 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4554 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4555 count++;
4556 }
4557 }
4558 return count;
4559}
4560
4561static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
4562{
4563 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4564 int ret;
4565
4566 ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
4567
4568 return min_t(int, ret, 0);
4569}
4570
4571static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
4572{
4573 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4574
4575 ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
4576
4577 return 0;
4578}
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589void ixgbe_set_rx_mode(struct net_device *netdev)
4590{
4591 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4592 struct ixgbe_hw *hw = &adapter->hw;
4593 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4594 netdev_features_t features = netdev->features;
4595 int count;
4596
4597
4598 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4599
4600
4601 fctrl &= ~IXGBE_FCTRL_SBP;
4602 fctrl |= IXGBE_FCTRL_BAM;
4603 fctrl |= IXGBE_FCTRL_DPF;
4604 fctrl |= IXGBE_FCTRL_PMCF;
4605
4606
4607 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4608 if (netdev->flags & IFF_PROMISC) {
4609 hw->addr_ctrl.user_set_promisc = true;
4610 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4611 vmolr |= IXGBE_VMOLR_MPE;
4612 features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4613 } else {
4614 if (netdev->flags & IFF_ALLMULTI) {
4615 fctrl |= IXGBE_FCTRL_MPE;
4616 vmolr |= IXGBE_VMOLR_MPE;
4617 }
4618 hw->addr_ctrl.user_set_promisc = false;
4619 }
4620
4621
4622
4623
4624
4625
4626 if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
4627 fctrl |= IXGBE_FCTRL_UPE;
4628 vmolr |= IXGBE_VMOLR_ROPE;
4629 }
4630
4631
4632
4633
4634
4635 count = ixgbe_write_mc_addr_list(netdev);
4636 if (count < 0) {
4637 fctrl |= IXGBE_FCTRL_MPE;
4638 vmolr |= IXGBE_VMOLR_MPE;
4639 } else if (count) {
4640 vmolr |= IXGBE_VMOLR_ROMPE;
4641 }
4642
4643 if (hw->mac.type != ixgbe_mac_82598EB) {
4644 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4645 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4646 IXGBE_VMOLR_ROPE);
4647 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4648 }
4649
4650
4651 if (features & NETIF_F_RXALL) {
4652
4653
4654 fctrl |= (IXGBE_FCTRL_SBP |
4655 IXGBE_FCTRL_BAM |
4656 IXGBE_FCTRL_PMCF);
4657
4658 fctrl &= ~(IXGBE_FCTRL_DPF);
4659
4660 }
4661
4662 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4663
4664 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4665 ixgbe_vlan_strip_enable(adapter);
4666 else
4667 ixgbe_vlan_strip_disable(adapter);
4668
4669 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
4670 ixgbe_vlan_promisc_disable(adapter);
4671 else
4672 ixgbe_vlan_promisc_enable(adapter);
4673}
4674
4675static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4676{
4677 int q_idx;
4678
4679 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4680 napi_enable(&adapter->q_vector[q_idx]->napi);
4681}
4682
4683static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4684{
4685 int q_idx;
4686
4687 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
4688 napi_disable(&adapter->q_vector[q_idx]->napi);
4689}
4690
4691static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
4692{
4693 struct ixgbe_hw *hw = &adapter->hw;
4694 u32 vxlanctrl;
4695
4696 if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
4697 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
4698 return;
4699
4700 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
4701 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
4702
4703 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
4704 adapter->vxlan_port = 0;
4705
4706 if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
4707 adapter->geneve_port = 0;
4708}
4709
4710#ifdef CONFIG_IXGBE_DCB
4711
4712
4713
4714
4715
4716
4717
4718
4719static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
4720{
4721 struct ixgbe_hw *hw = &adapter->hw;
4722 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4723
4724 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
4725 if (hw->mac.type == ixgbe_mac_82598EB)
4726 netif_set_gso_max_size(adapter->netdev, 65536);
4727 return;
4728 }
4729
4730 if (hw->mac.type == ixgbe_mac_82598EB)
4731 netif_set_gso_max_size(adapter->netdev, 32768);
4732
4733#ifdef IXGBE_FCOE
4734 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
4735 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
4736#endif
4737
4738
4739 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
4740 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4741 DCB_TX_CONFIG);
4742 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4743 DCB_RX_CONFIG);
4744 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
4745 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
4746 ixgbe_dcb_hw_ets(&adapter->hw,
4747 adapter->ixgbe_ieee_ets,
4748 max_frame);
4749 ixgbe_dcb_hw_pfc_config(&adapter->hw,
4750 adapter->ixgbe_ieee_pfc->pfc_en,
4751 adapter->ixgbe_ieee_ets->prio_tc);
4752 }
4753
4754
4755 if (hw->mac.type != ixgbe_mac_82598EB) {
4756 u32 msb = 0;
4757 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
4758
4759 while (rss_i) {
4760 msb++;
4761 rss_i >>= 1;
4762 }
4763
4764
4765 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
4766 }
4767}
4768#endif
4769
4770
4771#define IXGBE_ETH_FRAMING 20
4772
4773
4774
4775
4776
4777
4778
4779static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4780{
4781 struct ixgbe_hw *hw = &adapter->hw;
4782 struct net_device *dev = adapter->netdev;
4783 int link, tc, kb, marker;
4784 u32 dv_id, rx_pba;
4785
4786
4787 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
4788
4789#ifdef IXGBE_FCOE
4790
4791 if ((dev->features & NETIF_F_FCOE_MTU) &&
4792 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4793 (pb == ixgbe_fcoe_get_tc(adapter)))
4794 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4795#endif
4796
4797
4798 switch (hw->mac.type) {
4799 case ixgbe_mac_X540:
4800 case ixgbe_mac_X550:
4801 case ixgbe_mac_X550EM_x:
4802 case ixgbe_mac_x550em_a:
4803 dv_id = IXGBE_DV_X540(link, tc);
4804 break;
4805 default:
4806 dv_id = IXGBE_DV(link, tc);
4807 break;
4808 }
4809
4810
4811 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4812 dv_id += IXGBE_B2BT(tc);
4813
4814
4815 kb = IXGBE_BT2KB(dv_id);
4816 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4817
4818 marker = rx_pba - kb;
4819
4820
4821
4822
4823
4824 if (marker < 0) {
4825 e_warn(drv, "Packet Buffer(%i) can not provide enough"
4826 "headroom to support flow control."
4827 "Decrease MTU or number of traffic classes\n", pb);
4828 marker = tc + 1;
4829 }
4830
4831 return marker;
4832}
4833
4834
4835
4836
4837
4838
4839
4840static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4841{
4842 struct ixgbe_hw *hw = &adapter->hw;
4843 struct net_device *dev = adapter->netdev;
4844 int tc;
4845 u32 dv_id;
4846
4847
4848 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4849
4850#ifdef IXGBE_FCOE
4851
4852 if ((dev->features & NETIF_F_FCOE_MTU) &&
4853 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4854 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4855 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4856#endif
4857
4858
4859 switch (hw->mac.type) {
4860 case ixgbe_mac_X540:
4861 case ixgbe_mac_X550:
4862 case ixgbe_mac_X550EM_x:
4863 case ixgbe_mac_x550em_a:
4864 dv_id = IXGBE_LOW_DV_X540(tc);
4865 break;
4866 default:
4867 dv_id = IXGBE_LOW_DV(tc);
4868 break;
4869 }
4870
4871
4872 return IXGBE_BT2KB(dv_id);
4873}
4874
4875
4876
4877
4878static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4879{
4880 struct ixgbe_hw *hw = &adapter->hw;
4881 int num_tc = netdev_get_num_tc(adapter->netdev);
4882 int i;
4883
4884 if (!num_tc)
4885 num_tc = 1;
4886
4887 for (i = 0; i < num_tc; i++) {
4888 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4889 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4890
4891
4892 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4893 hw->fc.low_water[i] = 0;
4894 }
4895
4896 for (; i < MAX_TRAFFIC_CLASS; i++)
4897 hw->fc.high_water[i] = 0;
4898}
4899
4900static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4901{
4902 struct ixgbe_hw *hw = &adapter->hw;
4903 int hdrm;
4904 u8 tc = netdev_get_num_tc(adapter->netdev);
4905
4906 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4907 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4908 hdrm = 32 << adapter->fdir_pballoc;
4909 else
4910 hdrm = 0;
4911
4912 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4913 ixgbe_pbthresh_setup(adapter);
4914}
4915
4916static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4917{
4918 struct ixgbe_hw *hw = &adapter->hw;
4919 struct hlist_node *node2;
4920 struct ixgbe_fdir_filter *filter;
4921
4922 spin_lock(&adapter->fdir_perfect_lock);
4923
4924 if (!hlist_empty(&adapter->fdir_filter_list))
4925 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4926
4927 hlist_for_each_entry_safe(filter, node2,
4928 &adapter->fdir_filter_list, fdir_node) {
4929 ixgbe_fdir_write_perfect_filter_82599(hw,
4930 &filter->filter,
4931 filter->sw_idx,
4932 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4933 IXGBE_FDIR_DROP_QUEUE :
4934 adapter->rx_ring[filter->action]->reg_idx);
4935 }
4936
4937 spin_unlock(&adapter->fdir_perfect_lock);
4938}
4939
4940static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4941 struct ixgbe_adapter *adapter)
4942{
4943 struct ixgbe_hw *hw = &adapter->hw;
4944 u32 vmolr;
4945
4946
4947 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4948 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4949
4950
4951 vmolr &= ~IXGBE_VMOLR_MPE;
4952
4953 if (dev->flags & IFF_ALLMULTI) {
4954 vmolr |= IXGBE_VMOLR_MPE;
4955 } else {
4956 vmolr |= IXGBE_VMOLR_ROMPE;
4957 hw->mac.ops.update_mc_addr_list(hw, dev);
4958 }
4959 ixgbe_write_uc_addr_list(adapter->netdev, pool);
4960 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4961}
4962
4963static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4964{
4965 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4966 int rss_i = adapter->num_rx_queues_per_pool;
4967 struct ixgbe_hw *hw = &adapter->hw;
4968 u16 pool = vadapter->pool;
4969 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4970 IXGBE_PSRTYPE_UDPHDR |
4971 IXGBE_PSRTYPE_IPV4HDR |
4972 IXGBE_PSRTYPE_L2HDR |
4973 IXGBE_PSRTYPE_IPV6HDR;
4974
4975 if (hw->mac.type == ixgbe_mac_82598EB)
4976 return;
4977
4978 if (rss_i > 3)
4979 psrtype |= 2u << 29;
4980 else if (rss_i > 1)
4981 psrtype |= 1u << 29;
4982
4983 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4984}
4985
4986
4987
4988
4989
4990static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4991{
4992 u16 i = rx_ring->next_to_clean;
4993 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
4994
4995
4996 while (i != rx_ring->next_to_alloc) {
4997 if (rx_buffer->skb) {
4998 struct sk_buff *skb = rx_buffer->skb;
4999 if (IXGBE_CB(skb)->page_released)
5000 dma_unmap_page_attrs(rx_ring->dev,
5001 IXGBE_CB(skb)->dma,
5002 ixgbe_rx_pg_size(rx_ring),
5003 DMA_FROM_DEVICE,
5004 IXGBE_RX_DMA_ATTR);
5005 dev_kfree_skb(skb);
5006 }
5007
5008
5009
5010
5011 dma_sync_single_range_for_cpu(rx_ring->dev,
5012 rx_buffer->dma,
5013 rx_buffer->page_offset,
5014 ixgbe_rx_bufsz(rx_ring),
5015 DMA_FROM_DEVICE);
5016
5017
5018 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
5019 ixgbe_rx_pg_size(rx_ring),
5020 DMA_FROM_DEVICE,
5021 IXGBE_RX_DMA_ATTR);
5022 __page_frag_cache_drain(rx_buffer->page,
5023 rx_buffer->pagecnt_bias);
5024
5025 i++;
5026 rx_buffer++;
5027 if (i == rx_ring->count) {
5028 i = 0;
5029 rx_buffer = rx_ring->rx_buffer_info;
5030 }
5031 }
5032
5033 rx_ring->next_to_alloc = 0;
5034 rx_ring->next_to_clean = 0;
5035 rx_ring->next_to_use = 0;
5036}
5037
5038static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
5039 struct ixgbe_ring *rx_ring)
5040{
5041 struct ixgbe_adapter *adapter = vadapter->real_adapter;
5042 int index = rx_ring->queue_index + vadapter->rx_base_queue;
5043
5044
5045 ixgbe_disable_rx_queue(adapter, rx_ring);
5046 usleep_range(10000, 20000);
5047 ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
5048 ixgbe_clean_rx_ring(rx_ring);
5049 rx_ring->l2_accel_priv = NULL;
5050}
5051
5052static int ixgbe_fwd_ring_down(struct net_device *vdev,
5053 struct ixgbe_fwd_adapter *accel)
5054{
5055 struct ixgbe_adapter *adapter = accel->real_adapter;
5056 unsigned int rxbase = accel->rx_base_queue;
5057 unsigned int txbase = accel->tx_base_queue;
5058 int i;
5059
5060 netif_tx_stop_all_queues(vdev);
5061
5062 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5063 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
5064 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
5065 }
5066
5067 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5068 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
5069 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
5070 }
5071
5072
5073 return 0;
5074}
5075
5076static int ixgbe_fwd_ring_up(struct net_device *vdev,
5077 struct ixgbe_fwd_adapter *accel)
5078{
5079 struct ixgbe_adapter *adapter = accel->real_adapter;
5080 unsigned int rxbase, txbase, queues;
5081 int i, baseq, err = 0;
5082
5083 if (!test_bit(accel->pool, &adapter->fwd_bitmask))
5084 return 0;
5085
5086 baseq = accel->pool * adapter->num_rx_queues_per_pool;
5087 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
5088 accel->pool, adapter->num_rx_pools,
5089 baseq, baseq + adapter->num_rx_queues_per_pool,
5090 adapter->fwd_bitmask);
5091
5092 accel->netdev = vdev;
5093 accel->rx_base_queue = rxbase = baseq;
5094 accel->tx_base_queue = txbase = baseq;
5095
5096 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
5097 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
5098
5099 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5100 adapter->rx_ring[rxbase + i]->netdev = vdev;
5101 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
5102 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
5103 }
5104
5105 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
5106 adapter->tx_ring[txbase + i]->netdev = vdev;
5107 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
5108 }
5109
5110 queues = min_t(unsigned int,
5111 adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
5112 err = netif_set_real_num_tx_queues(vdev, queues);
5113 if (err)
5114 goto fwd_queue_err;
5115
5116 err = netif_set_real_num_rx_queues(vdev, queues);
5117 if (err)
5118 goto fwd_queue_err;
5119
5120 if (is_valid_ether_addr(vdev->dev_addr))
5121 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
5122
5123 ixgbe_fwd_psrtype(accel);
5124 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
5125 return err;
5126fwd_queue_err:
5127 ixgbe_fwd_ring_down(vdev, accel);
5128 return err;
5129}
5130
5131static int ixgbe_upper_dev_walk(struct net_device *upper, void *data)
5132{
5133 if (netif_is_macvlan(upper)) {
5134 struct macvlan_dev *dfwd = netdev_priv(upper);
5135 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
5136
5137 if (dfwd->fwd_priv)
5138 ixgbe_fwd_ring_up(upper, vadapter);
5139 }
5140
5141 return 0;
5142}
5143
5144static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
5145{
5146 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5147 ixgbe_upper_dev_walk, NULL);
5148}
5149
5150static void ixgbe_configure(struct ixgbe_adapter *adapter)
5151{
5152 struct ixgbe_hw *hw = &adapter->hw;
5153
5154 ixgbe_configure_pb(adapter);
5155#ifdef CONFIG_IXGBE_DCB
5156 ixgbe_configure_dcb(adapter);
5157#endif
5158
5159
5160
5161
5162 ixgbe_configure_virtualization(adapter);
5163
5164 ixgbe_set_rx_mode(adapter->netdev);
5165 ixgbe_restore_vlan(adapter);
5166
5167 switch (hw->mac.type) {
5168 case ixgbe_mac_82599EB:
5169 case ixgbe_mac_X540:
5170 hw->mac.ops.disable_rx_buff(hw);
5171 break;
5172 default:
5173 break;
5174 }
5175
5176 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5177 ixgbe_init_fdir_signature_82599(&adapter->hw,
5178 adapter->fdir_pballoc);
5179 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
5180 ixgbe_init_fdir_perfect_82599(&adapter->hw,
5181 adapter->fdir_pballoc);
5182 ixgbe_fdir_filter_restore(adapter);
5183 }
5184
5185 switch (hw->mac.type) {
5186 case ixgbe_mac_82599EB:
5187 case ixgbe_mac_X540:
5188 hw->mac.ops.enable_rx_buff(hw);
5189 break;
5190 default:
5191 break;
5192 }
5193
5194#ifdef CONFIG_IXGBE_DCA
5195
5196 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
5197 ixgbe_setup_dca(adapter);
5198#endif
5199
5200#ifdef IXGBE_FCOE
5201
5202 ixgbe_configure_fcoe(adapter);
5203
5204#endif
5205 ixgbe_configure_tx(adapter);
5206 ixgbe_configure_rx(adapter);
5207 ixgbe_configure_dfwd(adapter);
5208}
5209
5210
5211
5212
5213
5214static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
5215{
5216
5217
5218
5219
5220
5221
5222 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5223 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5224
5225 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5226 adapter->sfp_poll_time = 0;
5227}
5228
5229
5230
5231
5232
5233
5234
5235static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
5236{
5237 u32 speed;
5238 bool autoneg, link_up = false;
5239 int ret = IXGBE_ERR_LINK_SETUP;
5240
5241 if (hw->mac.ops.check_link)
5242 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
5243
5244 if (ret)
5245 return ret;
5246
5247 speed = hw->phy.autoneg_advertised;
5248 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5249 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
5250 &autoneg);
5251 if (ret)
5252 return ret;
5253
5254 if (hw->mac.ops.setup_link)
5255 ret = hw->mac.ops.setup_link(hw, speed, link_up);
5256
5257 return ret;
5258}
5259
5260static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
5261{
5262 struct ixgbe_hw *hw = &adapter->hw;
5263 u32 gpie = 0;
5264
5265 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5266 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5267 IXGBE_GPIE_OCD;
5268 gpie |= IXGBE_GPIE_EIAME;
5269
5270
5271
5272
5273 switch (hw->mac.type) {
5274 case ixgbe_mac_82598EB:
5275 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5276 break;
5277 case ixgbe_mac_82599EB:
5278 case ixgbe_mac_X540:
5279 case ixgbe_mac_X550:
5280 case ixgbe_mac_X550EM_x:
5281 case ixgbe_mac_x550em_a:
5282 default:
5283 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5284 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5285 break;
5286 }
5287 } else {
5288
5289
5290 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5291 }
5292
5293
5294
5295
5296 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
5297 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
5298
5299 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
5300 case IXGBE_82599_VMDQ_8Q_MASK:
5301 gpie |= IXGBE_GPIE_VTMODE_16;
5302 break;
5303 case IXGBE_82599_VMDQ_4Q_MASK:
5304 gpie |= IXGBE_GPIE_VTMODE_32;
5305 break;
5306 default:
5307 gpie |= IXGBE_GPIE_VTMODE_64;
5308 break;
5309 }
5310 }
5311
5312
5313 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
5314 switch (adapter->hw.mac.type) {
5315 case ixgbe_mac_82599EB:
5316 gpie |= IXGBE_SDP0_GPIEN_8259X;
5317 break;
5318 default:
5319 break;
5320 }
5321 }
5322
5323
5324 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
5325 gpie |= IXGBE_SDP1_GPIEN(hw);
5326
5327 switch (hw->mac.type) {
5328 case ixgbe_mac_82599EB:
5329 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
5330 break;
5331 case ixgbe_mac_X550EM_x:
5332 case ixgbe_mac_x550em_a:
5333 gpie |= IXGBE_SDP0_GPIEN_X540;
5334 break;
5335 default:
5336 break;
5337 }
5338
5339 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5340}
5341
5342static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
5343{
5344 struct ixgbe_hw *hw = &adapter->hw;
5345 int err;
5346 u32 ctrl_ext;
5347
5348 ixgbe_get_hw_control(adapter);
5349 ixgbe_setup_gpie(adapter);
5350
5351 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
5352 ixgbe_configure_msix(adapter);
5353 else
5354 ixgbe_configure_msi_and_legacy(adapter);
5355
5356
5357 if (hw->mac.ops.enable_tx_laser)
5358 hw->mac.ops.enable_tx_laser(hw);
5359
5360 if (hw->phy.ops.set_phy_power)
5361 hw->phy.ops.set_phy_power(hw, true);
5362
5363 smp_mb__before_atomic();
5364 clear_bit(__IXGBE_DOWN, &adapter->state);
5365 ixgbe_napi_enable_all(adapter);
5366
5367 if (ixgbe_is_sfp(hw)) {
5368 ixgbe_sfp_link_config(adapter);
5369 } else {
5370 err = ixgbe_non_sfp_link_config(hw);
5371 if (err)
5372 e_err(probe, "link_config FAILED %d\n", err);
5373 }
5374
5375
5376 IXGBE_READ_REG(hw, IXGBE_EICR);
5377 ixgbe_irq_enable(adapter, true, true);
5378
5379
5380
5381
5382
5383 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5384 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5385 if (esdp & IXGBE_ESDP_SDP1)
5386 e_crit(drv, "Fan has stopped, replace the adapter\n");
5387 }
5388
5389
5390
5391 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5392 adapter->link_check_timeout = jiffies;
5393 mod_timer(&adapter->service_timer, jiffies);
5394
5395
5396 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5397 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5398 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5399}
5400
5401void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5402{
5403 WARN_ON(in_interrupt());
5404
5405 netif_trans_update(adapter->netdev);
5406
5407 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5408 usleep_range(1000, 2000);
5409 if (adapter->hw.phy.type == ixgbe_phy_fw)
5410 ixgbe_watchdog_link_is_down(adapter);
5411 ixgbe_down(adapter);
5412
5413
5414
5415
5416
5417
5418 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5419 msleep(2000);
5420 ixgbe_up(adapter);
5421 clear_bit(__IXGBE_RESETTING, &adapter->state);
5422}
5423
5424void ixgbe_up(struct ixgbe_adapter *adapter)
5425{
5426
5427 ixgbe_configure(adapter);
5428
5429 ixgbe_up_complete(adapter);
5430}
5431
5432void ixgbe_reset(struct ixgbe_adapter *adapter)
5433{
5434 struct ixgbe_hw *hw = &adapter->hw;
5435 struct net_device *netdev = adapter->netdev;
5436 int err;
5437
5438 if (ixgbe_removed(hw->hw_addr))
5439 return;
5440
5441 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5442 usleep_range(1000, 2000);
5443
5444
5445 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5446 IXGBE_FLAG2_SFP_NEEDS_RESET);
5447 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5448
5449 err = hw->mac.ops.init_hw(hw);
5450 switch (err) {
5451 case 0:
5452 case IXGBE_ERR_SFP_NOT_PRESENT:
5453 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5454 break;
5455 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5456 e_dev_err("master disable timed out\n");
5457 break;
5458 case IXGBE_ERR_EEPROM_VERSION:
5459
5460 e_dev_warn("This device is a pre-production adapter/LOM. "
5461 "Please be aware there may be issues associated with "
5462 "your hardware. If you are experiencing problems "
5463 "please contact your Intel or hardware "
5464 "representative who provided you with this "
5465 "hardware.\n");
5466 break;
5467 default:
5468 e_dev_err("Hardware Error: %d\n", err);
5469 }
5470
5471 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5472
5473
5474 ixgbe_flush_sw_mac_table(adapter);
5475 __dev_uc_unsync(netdev, NULL);
5476
5477
5478 ixgbe_mac_set_default_filter(adapter);
5479
5480
5481 if (hw->mac.san_mac_rar_index)
5482 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5483
5484 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5485 ixgbe_ptp_reset(adapter);
5486
5487 if (hw->phy.ops.set_phy_power) {
5488 if (!netif_running(adapter->netdev) && !adapter->wol)
5489 hw->phy.ops.set_phy_power(hw, false);
5490 else
5491 hw->phy.ops.set_phy_power(hw, true);
5492 }
5493}
5494
5495
5496
5497
5498
5499static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5500{
5501 u16 i = tx_ring->next_to_clean;
5502 struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
5503
5504 while (i != tx_ring->next_to_use) {
5505 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
5506
5507
5508 dev_kfree_skb_any(tx_buffer->skb);
5509
5510
5511 dma_unmap_single(tx_ring->dev,
5512 dma_unmap_addr(tx_buffer, dma),
5513 dma_unmap_len(tx_buffer, len),
5514 DMA_TO_DEVICE);
5515
5516
5517 eop_desc = tx_buffer->next_to_watch;
5518 tx_desc = IXGBE_TX_DESC(tx_ring, i);
5519
5520
5521 while (tx_desc != eop_desc) {
5522 tx_buffer++;
5523 tx_desc++;
5524 i++;
5525 if (unlikely(i == tx_ring->count)) {
5526 i = 0;
5527 tx_buffer = tx_ring->tx_buffer_info;
5528 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
5529 }
5530
5531
5532 if (dma_unmap_len(tx_buffer, len))
5533 dma_unmap_page(tx_ring->dev,
5534 dma_unmap_addr(tx_buffer, dma),
5535 dma_unmap_len(tx_buffer, len),
5536 DMA_TO_DEVICE);
5537 }
5538
5539
5540 tx_buffer++;
5541 i++;
5542 if (unlikely(i == tx_ring->count)) {
5543 i = 0;
5544 tx_buffer = tx_ring->tx_buffer_info;
5545 }
5546 }
5547
5548
5549 netdev_tx_reset_queue(txring_txq(tx_ring));
5550
5551
5552 tx_ring->next_to_use = 0;
5553 tx_ring->next_to_clean = 0;
5554}
5555
5556
5557
5558
5559
5560static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5561{
5562 int i;
5563
5564 for (i = 0; i < adapter->num_rx_queues; i++)
5565 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5566}
5567
5568
5569
5570
5571
5572static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5573{
5574 int i;
5575
5576 for (i = 0; i < adapter->num_tx_queues; i++)
5577 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5578}
5579
5580static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5581{
5582 struct hlist_node *node2;
5583 struct ixgbe_fdir_filter *filter;
5584
5585 spin_lock(&adapter->fdir_perfect_lock);
5586
5587 hlist_for_each_entry_safe(filter, node2,
5588 &adapter->fdir_filter_list, fdir_node) {
5589 hlist_del(&filter->fdir_node);
5590 kfree(filter);
5591 }
5592 adapter->fdir_filter_count = 0;
5593
5594 spin_unlock(&adapter->fdir_perfect_lock);
5595}
5596
5597static int ixgbe_disable_macvlan(struct net_device *upper, void *data)
5598{
5599 if (netif_is_macvlan(upper)) {
5600 struct macvlan_dev *vlan = netdev_priv(upper);
5601
5602 if (vlan->fwd_priv) {
5603 netif_tx_stop_all_queues(upper);
5604 netif_carrier_off(upper);
5605 netif_tx_disable(upper);
5606 }
5607 }
5608
5609 return 0;
5610}
5611
5612void ixgbe_down(struct ixgbe_adapter *adapter)
5613{
5614 struct net_device *netdev = adapter->netdev;
5615 struct ixgbe_hw *hw = &adapter->hw;
5616 int i;
5617
5618
5619 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5620 return;
5621
5622
5623 hw->mac.ops.disable_rx(hw);
5624
5625
5626 for (i = 0; i < adapter->num_rx_queues; i++)
5627
5628 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5629
5630 usleep_range(10000, 20000);
5631
5632 netif_tx_stop_all_queues(netdev);
5633
5634
5635 netif_carrier_off(netdev);
5636 netif_tx_disable(netdev);
5637
5638
5639 netdev_walk_all_upper_dev_rcu(adapter->netdev,
5640 ixgbe_disable_macvlan, NULL);
5641
5642 ixgbe_irq_disable(adapter);
5643
5644 ixgbe_napi_disable_all(adapter);
5645
5646 clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
5647 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5648 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5649
5650 del_timer_sync(&adapter->service_timer);
5651
5652 if (adapter->num_vfs) {
5653
5654 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5655
5656
5657 for (i = 0 ; i < adapter->num_vfs; i++)
5658 adapter->vfinfo[i].clear_to_send = false;
5659
5660
5661 ixgbe_ping_all_vfs(adapter);
5662
5663
5664 ixgbe_disable_tx_rx(adapter);
5665 }
5666
5667
5668 for (i = 0; i < adapter->num_tx_queues; i++) {
5669 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5670 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5671 }
5672
5673
5674 switch (hw->mac.type) {
5675 case ixgbe_mac_82599EB:
5676 case ixgbe_mac_X540:
5677 case ixgbe_mac_X550:
5678 case ixgbe_mac_X550EM_x:
5679 case ixgbe_mac_x550em_a:
5680 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5681 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5682 ~IXGBE_DMATXCTL_TE));
5683 break;
5684 default:
5685 break;
5686 }
5687
5688 if (!pci_channel_offline(adapter->pdev))
5689 ixgbe_reset(adapter);
5690
5691
5692 if (hw->mac.ops.disable_tx_laser)
5693 hw->mac.ops.disable_tx_laser(hw);
5694
5695 ixgbe_clean_all_tx_rings(adapter);
5696 ixgbe_clean_all_rx_rings(adapter);
5697}
5698
5699
5700
5701
5702
5703static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
5704{
5705 struct ixgbe_hw *hw = &adapter->hw;
5706
5707 switch (hw->device_id) {
5708 case IXGBE_DEV_ID_X550EM_A_1G_T:
5709 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
5710 if (!hw->phy.eee_speeds_supported)
5711 break;
5712 adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
5713 if (!hw->phy.eee_speeds_advertised)
5714 break;
5715 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
5716 break;
5717 default:
5718 adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
5719 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
5720 break;
5721 }
5722}
5723
5724
5725
5726
5727
5728static void ixgbe_tx_timeout(struct net_device *netdev)
5729{
5730 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5731
5732
5733 ixgbe_tx_timeout_reset(adapter);
5734}
5735
5736#ifdef CONFIG_IXGBE_DCB
5737static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
5738{
5739 struct ixgbe_hw *hw = &adapter->hw;
5740 struct tc_configuration *tc;
5741 int j;
5742
5743 switch (hw->mac.type) {
5744 case ixgbe_mac_82598EB:
5745 case ixgbe_mac_82599EB:
5746 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
5747 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
5748 break;
5749 case ixgbe_mac_X540:
5750 case ixgbe_mac_X550:
5751 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5752 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5753 break;
5754 case ixgbe_mac_X550EM_x:
5755 case ixgbe_mac_x550em_a:
5756 default:
5757 adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
5758 adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
5759 break;
5760 }
5761
5762
5763 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5764 tc = &adapter->dcb_cfg.tc_config[j];
5765 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5766 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5767 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5768 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5769 tc->dcb_pfc = pfc_disabled;
5770 }
5771
5772
5773 tc = &adapter->dcb_cfg.tc_config[0];
5774 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5775 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5776
5777 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5778 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5779 adapter->dcb_cfg.pfc_mode_enable = false;
5780 adapter->dcb_set_bitmap = 0x00;
5781 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
5782 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5783 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5784 sizeof(adapter->temp_dcb_cfg));
5785}
5786#endif
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
5797 const struct ixgbe_info *ii)
5798{
5799 struct ixgbe_hw *hw = &adapter->hw;
5800 struct pci_dev *pdev = adapter->pdev;
5801 unsigned int rss, fdir;
5802 u32 fwsm;
5803 int i;
5804
5805
5806
5807 hw->vendor_id = pdev->vendor;
5808 hw->device_id = pdev->device;
5809 hw->revision_id = pdev->revision;
5810 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5811 hw->subsystem_device_id = pdev->subsystem_device;
5812
5813
5814 ii->get_invariants(hw);
5815
5816
5817 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
5818 adapter->ring_feature[RING_F_RSS].limit = rss;
5819 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5820 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
5821 adapter->atr_sample_rate = 20;
5822 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
5823 adapter->ring_feature[RING_F_FDIR].limit = fdir;
5824 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
5825#ifdef CONFIG_IXGBE_DCA
5826 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
5827#endif
5828#ifdef CONFIG_IXGBE_DCB
5829 adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
5830 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
5831#endif
5832#ifdef IXGBE_FCOE
5833 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5834 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5835#ifdef CONFIG_IXGBE_DCB
5836
5837 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
5838#endif
5839#endif
5840
5841
5842 adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
5843 GFP_KERNEL);
5844 if (!adapter->jump_tables[0])
5845 return -ENOMEM;
5846 adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
5847
5848 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
5849 adapter->jump_tables[i] = NULL;
5850
5851 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5852 hw->mac.num_rar_entries,
5853 GFP_ATOMIC);
5854 if (!adapter->mac_table)
5855 return -ENOMEM;
5856
5857
5858 switch (hw->mac.type) {
5859 case ixgbe_mac_82598EB:
5860 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
5861
5862 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5863 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
5864
5865 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
5866 adapter->ring_feature[RING_F_FDIR].limit = 0;
5867 adapter->atr_sample_rate = 0;
5868 adapter->fdir_pballoc = 0;
5869#ifdef IXGBE_FCOE
5870 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5871 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5872#ifdef CONFIG_IXGBE_DCB
5873 adapter->fcoe.up = 0;
5874#endif
5875#endif
5876 break;
5877 case ixgbe_mac_82599EB:
5878 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5879 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5880 break;
5881 case ixgbe_mac_X540:
5882 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
5883 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5884 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5885 break;
5886 case ixgbe_mac_x550em_a:
5887 adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
5888 switch (hw->device_id) {
5889 case IXGBE_DEV_ID_X550EM_A_1G_T:
5890 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
5891 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5892 break;
5893 default:
5894 break;
5895 }
5896
5897 case ixgbe_mac_X550EM_x:
5898#ifdef CONFIG_IXGBE_DCB
5899 adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
5900#endif
5901#ifdef IXGBE_FCOE
5902 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5903#ifdef CONFIG_IXGBE_DCB
5904 adapter->fcoe.up = 0;
5905#endif
5906#endif
5907
5908 case ixgbe_mac_X550:
5909 if (hw->mac.type == ixgbe_mac_X550)
5910 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5911#ifdef CONFIG_IXGBE_DCA
5912 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5913#endif
5914 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
5915 break;
5916 default:
5917 break;
5918 }
5919
5920#ifdef IXGBE_FCOE
5921
5922 spin_lock_init(&adapter->fcoe.lock);
5923
5924#endif
5925
5926 spin_lock_init(&adapter->fdir_perfect_lock);
5927
5928#ifdef CONFIG_IXGBE_DCB
5929 ixgbe_init_dcb(adapter);
5930#endif
5931
5932
5933 hw->fc.requested_mode = ixgbe_fc_full;
5934 hw->fc.current_mode = ixgbe_fc_full;
5935 ixgbe_pbthresh_setup(adapter);
5936 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5937 hw->fc.send_xon = true;
5938 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
5939
5940#ifdef CONFIG_PCI_IOV
5941 if (max_vfs > 0)
5942 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5943
5944
5945 if (hw->mac.type != ixgbe_mac_82598EB) {
5946 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
5947 adapter->num_vfs = 0;
5948 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5949 } else {
5950 adapter->num_vfs = max_vfs;
5951 }
5952 }
5953#endif
5954
5955
5956 adapter->rx_itr_setting = 1;
5957 adapter->tx_itr_setting = 1;
5958
5959
5960 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5961 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5962
5963
5964 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
5965
5966
5967 if (ixgbe_init_eeprom_params_generic(hw)) {
5968 e_dev_err("EEPROM initialization failed\n");
5969 return -EIO;
5970 }
5971
5972
5973 set_bit(0, &adapter->fwd_bitmask);
5974 set_bit(__IXGBE_DOWN, &adapter->state);
5975
5976 return 0;
5977}
5978
5979
5980
5981
5982
5983
5984
5985int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5986{
5987 struct device *dev = tx_ring->dev;
5988 int orig_node = dev_to_node(dev);
5989 int ring_node = -1;
5990 int size;
5991
5992 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5993
5994 if (tx_ring->q_vector)
5995 ring_node = tx_ring->q_vector->numa_node;
5996
5997 tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
5998 if (!tx_ring->tx_buffer_info)
5999 tx_ring->tx_buffer_info = vmalloc(size);
6000 if (!tx_ring->tx_buffer_info)
6001 goto err;
6002
6003 u64_stats_init(&tx_ring->syncp);
6004
6005
6006 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
6007 tx_ring->size = ALIGN(tx_ring->size, 4096);
6008
6009 set_dev_node(dev, ring_node);
6010 tx_ring->desc = dma_alloc_coherent(dev,
6011 tx_ring->size,
6012 &tx_ring->dma,
6013 GFP_KERNEL);
6014 set_dev_node(dev, orig_node);
6015 if (!tx_ring->desc)
6016 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
6017 &tx_ring->dma, GFP_KERNEL);
6018 if (!tx_ring->desc)
6019 goto err;
6020
6021 tx_ring->next_to_use = 0;
6022 tx_ring->next_to_clean = 0;
6023 return 0;
6024
6025err:
6026 vfree(tx_ring->tx_buffer_info);
6027 tx_ring->tx_buffer_info = NULL;
6028 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
6029 return -ENOMEM;
6030}
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
6043{
6044 int i, err = 0;
6045
6046 for (i = 0; i < adapter->num_tx_queues; i++) {
6047 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
6048 if (!err)
6049 continue;
6050
6051 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
6052 goto err_setup_tx;
6053 }
6054
6055 return 0;
6056err_setup_tx:
6057
6058 while (i--)
6059 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6060 return err;
6061}
6062
6063
6064
6065
6066
6067
6068
6069int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
6070{
6071 struct device *dev = rx_ring->dev;
6072 int orig_node = dev_to_node(dev);
6073 int ring_node = -1;
6074 int size;
6075
6076 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
6077
6078 if (rx_ring->q_vector)
6079 ring_node = rx_ring->q_vector->numa_node;
6080
6081 rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
6082 if (!rx_ring->rx_buffer_info)
6083 rx_ring->rx_buffer_info = vmalloc(size);
6084 if (!rx_ring->rx_buffer_info)
6085 goto err;
6086
6087 u64_stats_init(&rx_ring->syncp);
6088
6089
6090 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
6091 rx_ring->size = ALIGN(rx_ring->size, 4096);
6092
6093 set_dev_node(dev, ring_node);
6094 rx_ring->desc = dma_alloc_coherent(dev,
6095 rx_ring->size,
6096 &rx_ring->dma,
6097 GFP_KERNEL);
6098 set_dev_node(dev, orig_node);
6099 if (!rx_ring->desc)
6100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
6101 &rx_ring->dma, GFP_KERNEL);
6102 if (!rx_ring->desc)
6103 goto err;
6104
6105 rx_ring->next_to_clean = 0;
6106 rx_ring->next_to_use = 0;
6107
6108 return 0;
6109err:
6110 vfree(rx_ring->rx_buffer_info);
6111 rx_ring->rx_buffer_info = NULL;
6112 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
6113 return -ENOMEM;
6114}
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
6127{
6128 int i, err = 0;
6129
6130 for (i = 0; i < adapter->num_rx_queues; i++) {
6131 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
6132 if (!err)
6133 continue;
6134
6135 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
6136 goto err_setup_rx;
6137 }
6138
6139#ifdef IXGBE_FCOE
6140 err = ixgbe_setup_fcoe_ddp_resources(adapter);
6141 if (!err)
6142#endif
6143 return 0;
6144err_setup_rx:
6145
6146 while (i--)
6147 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6148 return err;
6149}
6150
6151
6152
6153
6154
6155
6156
6157void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
6158{
6159 ixgbe_clean_tx_ring(tx_ring);
6160
6161 vfree(tx_ring->tx_buffer_info);
6162 tx_ring->tx_buffer_info = NULL;
6163
6164
6165 if (!tx_ring->desc)
6166 return;
6167
6168 dma_free_coherent(tx_ring->dev, tx_ring->size,
6169 tx_ring->desc, tx_ring->dma);
6170
6171 tx_ring->desc = NULL;
6172}
6173
6174
6175
6176
6177
6178
6179
6180static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
6181{
6182 int i;
6183
6184 for (i = 0; i < adapter->num_tx_queues; i++)
6185 if (adapter->tx_ring[i]->desc)
6186 ixgbe_free_tx_resources(adapter->tx_ring[i]);
6187}
6188
6189
6190
6191
6192
6193
6194
6195void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
6196{
6197 ixgbe_clean_rx_ring(rx_ring);
6198
6199 vfree(rx_ring->rx_buffer_info);
6200 rx_ring->rx_buffer_info = NULL;
6201
6202
6203 if (!rx_ring->desc)
6204 return;
6205
6206 dma_free_coherent(rx_ring->dev, rx_ring->size,
6207 rx_ring->desc, rx_ring->dma);
6208
6209 rx_ring->desc = NULL;
6210}
6211
6212
6213
6214
6215
6216
6217
6218static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
6219{
6220 int i;
6221
6222#ifdef IXGBE_FCOE
6223 ixgbe_free_fcoe_ddp_resources(adapter);
6224
6225#endif
6226 for (i = 0; i < adapter->num_rx_queues; i++)
6227 if (adapter->rx_ring[i]->desc)
6228 ixgbe_free_rx_resources(adapter->rx_ring[i]);
6229}
6230
6231
6232
6233
6234
6235
6236
6237
6238static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6239{
6240 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6241
6242
6243
6244
6245
6246
6247 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
6248 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
6249 (new_mtu > ETH_DATA_LEN))
6250 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
6251
6252 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
6253
6254
6255 netdev->mtu = new_mtu;
6256
6257 if (netif_running(netdev))
6258 ixgbe_reinit_locked(adapter);
6259
6260 return 0;
6261}
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275int ixgbe_open(struct net_device *netdev)
6276{
6277 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6278 struct ixgbe_hw *hw = &adapter->hw;
6279 int err, queues;
6280
6281
6282 if (test_bit(__IXGBE_TESTING, &adapter->state))
6283 return -EBUSY;
6284
6285 netif_carrier_off(netdev);
6286
6287
6288 err = ixgbe_setup_all_tx_resources(adapter);
6289 if (err)
6290 goto err_setup_tx;
6291
6292
6293 err = ixgbe_setup_all_rx_resources(adapter);
6294 if (err)
6295 goto err_setup_rx;
6296
6297 ixgbe_configure(adapter);
6298
6299 err = ixgbe_request_irq(adapter);
6300 if (err)
6301 goto err_req_irq;
6302
6303
6304 if (adapter->num_rx_pools > 1)
6305 queues = adapter->num_rx_queues_per_pool;
6306 else
6307 queues = adapter->num_tx_queues;
6308
6309 err = netif_set_real_num_tx_queues(netdev, queues);
6310 if (err)
6311 goto err_set_queues;
6312
6313 if (adapter->num_rx_pools > 1 &&
6314 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
6315 queues = IXGBE_MAX_L2A_QUEUES;
6316 else
6317 queues = adapter->num_rx_queues;
6318 err = netif_set_real_num_rx_queues(netdev, queues);
6319 if (err)
6320 goto err_set_queues;
6321
6322 ixgbe_ptp_init(adapter);
6323
6324 ixgbe_up_complete(adapter);
6325
6326 ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
6327 udp_tunnel_get_rx_info(netdev);
6328
6329 return 0;
6330
6331err_set_queues:
6332 ixgbe_free_irq(adapter);
6333err_req_irq:
6334 ixgbe_free_all_rx_resources(adapter);
6335 if (hw->phy.ops.set_phy_power && !adapter->wol)
6336 hw->phy.ops.set_phy_power(&adapter->hw, false);
6337err_setup_rx:
6338 ixgbe_free_all_tx_resources(adapter);
6339err_setup_tx:
6340 ixgbe_reset(adapter);
6341
6342 return err;
6343}
6344
6345static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
6346{
6347 ixgbe_ptp_suspend(adapter);
6348
6349 if (adapter->hw.phy.ops.enter_lplu) {
6350 adapter->hw.phy.reset_disable = true;
6351 ixgbe_down(adapter);
6352 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
6353 adapter->hw.phy.reset_disable = false;
6354 } else {
6355 ixgbe_down(adapter);
6356 }
6357
6358 ixgbe_free_irq(adapter);
6359
6360 ixgbe_free_all_tx_resources(adapter);
6361 ixgbe_free_all_rx_resources(adapter);
6362}
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375int ixgbe_close(struct net_device *netdev)
6376{
6377 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6378
6379 ixgbe_ptp_stop(adapter);
6380
6381 if (netif_device_present(netdev))
6382 ixgbe_close_suspend(adapter);
6383
6384 ixgbe_fdir_filter_exit(adapter);
6385
6386 ixgbe_release_hw_control(adapter);
6387
6388 return 0;
6389}
6390
6391#ifdef CONFIG_PM
6392static int ixgbe_resume(struct pci_dev *pdev)
6393{
6394 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6395 struct net_device *netdev = adapter->netdev;
6396 u32 err;
6397
6398 adapter->hw.hw_addr = adapter->io_addr;
6399 pci_set_power_state(pdev, PCI_D0);
6400 pci_restore_state(pdev);
6401
6402
6403
6404
6405 pci_save_state(pdev);
6406
6407 err = pci_enable_device_mem(pdev);
6408 if (err) {
6409 e_dev_err("Cannot enable PCI device from suspend\n");
6410 return err;
6411 }
6412 smp_mb__before_atomic();
6413 clear_bit(__IXGBE_DISABLED, &adapter->state);
6414 pci_set_master(pdev);
6415
6416 pci_wake_from_d3(pdev, false);
6417
6418 ixgbe_reset(adapter);
6419
6420 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6421
6422 rtnl_lock();
6423 err = ixgbe_init_interrupt_scheme(adapter);
6424 if (!err && netif_running(netdev))
6425 err = ixgbe_open(netdev);
6426
6427
6428 if (!err)
6429 netif_device_attach(netdev);
6430 rtnl_unlock();
6431
6432 return err;
6433}
6434#endif
6435
6436static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
6437{
6438 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6439 struct net_device *netdev = adapter->netdev;
6440 struct ixgbe_hw *hw = &adapter->hw;
6441 u32 ctrl, fctrl;
6442 u32 wufc = adapter->wol;
6443#ifdef CONFIG_PM
6444 int retval = 0;
6445#endif
6446
6447 rtnl_lock();
6448 netif_device_detach(netdev);
6449
6450 if (netif_running(netdev))
6451 ixgbe_close_suspend(adapter);
6452
6453 ixgbe_clear_interrupt_scheme(adapter);
6454 rtnl_unlock();
6455
6456#ifdef CONFIG_PM
6457 retval = pci_save_state(pdev);
6458 if (retval)
6459 return retval;
6460
6461#endif
6462 if (hw->mac.ops.stop_link_on_d3)
6463 hw->mac.ops.stop_link_on_d3(hw);
6464
6465 if (wufc) {
6466 ixgbe_set_rx_mode(netdev);
6467
6468
6469 if (hw->mac.ops.enable_tx_laser)
6470 hw->mac.ops.enable_tx_laser(hw);
6471
6472
6473 if (wufc & IXGBE_WUFC_MC) {
6474 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6475 fctrl |= IXGBE_FCTRL_MPE;
6476 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
6477 }
6478
6479 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
6480 ctrl |= IXGBE_CTRL_GIO_DIS;
6481 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
6482
6483 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
6484 } else {
6485 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
6486 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
6487 }
6488
6489 switch (hw->mac.type) {
6490 case ixgbe_mac_82598EB:
6491 pci_wake_from_d3(pdev, false);
6492 break;
6493 case ixgbe_mac_82599EB:
6494 case ixgbe_mac_X540:
6495 case ixgbe_mac_X550:
6496 case ixgbe_mac_X550EM_x:
6497 case ixgbe_mac_x550em_a:
6498 pci_wake_from_d3(pdev, !!wufc);
6499 break;
6500 default:
6501 break;
6502 }
6503
6504 *enable_wake = !!wufc;
6505 if (hw->phy.ops.set_phy_power && !*enable_wake)
6506 hw->phy.ops.set_phy_power(hw, false);
6507
6508 ixgbe_release_hw_control(adapter);
6509
6510 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6511 pci_disable_device(pdev);
6512
6513 return 0;
6514}
6515
6516#ifdef CONFIG_PM
6517static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6518{
6519 int retval;
6520 bool wake;
6521
6522 retval = __ixgbe_shutdown(pdev, &wake);
6523 if (retval)
6524 return retval;
6525
6526 if (wake) {
6527 pci_prepare_to_sleep(pdev);
6528 } else {
6529 pci_wake_from_d3(pdev, false);
6530 pci_set_power_state(pdev, PCI_D3hot);
6531 }
6532
6533 return 0;
6534}
6535#endif
6536
6537static void ixgbe_shutdown(struct pci_dev *pdev)
6538{
6539 bool wake;
6540
6541 __ixgbe_shutdown(pdev, &wake);
6542
6543 if (system_state == SYSTEM_POWER_OFF) {
6544 pci_wake_from_d3(pdev, wake);
6545 pci_set_power_state(pdev, PCI_D3hot);
6546 }
6547}
6548
6549
6550
6551
6552
6553void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6554{
6555 struct net_device *netdev = adapter->netdev;
6556 struct ixgbe_hw *hw = &adapter->hw;
6557 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6558 u64 total_mpc = 0;
6559 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6560 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6561 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6562 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6563
6564 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6565 test_bit(__IXGBE_RESETTING, &adapter->state))
6566 return;
6567
6568 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6569 u64 rsc_count = 0;
6570 u64 rsc_flush = 0;
6571 for (i = 0; i < adapter->num_rx_queues; i++) {
6572 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6573 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6574 }
6575 adapter->rsc_total_count = rsc_count;
6576 adapter->rsc_total_flush = rsc_flush;
6577 }
6578
6579 for (i = 0; i < adapter->num_rx_queues; i++) {
6580 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6581 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6582 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6583 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6584 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6585 bytes += rx_ring->stats.bytes;
6586 packets += rx_ring->stats.packets;
6587 }
6588 adapter->non_eop_descs = non_eop_descs;
6589 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6590 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6591 adapter->hw_csum_rx_error = hw_csum_rx_error;
6592 netdev->stats.rx_bytes = bytes;
6593 netdev->stats.rx_packets = packets;
6594
6595 bytes = 0;
6596 packets = 0;
6597
6598 for (i = 0; i < adapter->num_tx_queues; i++) {
6599 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6600 restart_queue += tx_ring->tx_stats.restart_queue;
6601 tx_busy += tx_ring->tx_stats.tx_busy;
6602 bytes += tx_ring->stats.bytes;
6603 packets += tx_ring->stats.packets;
6604 }
6605 adapter->restart_queue = restart_queue;
6606 adapter->tx_busy = tx_busy;
6607 netdev->stats.tx_bytes = bytes;
6608 netdev->stats.tx_packets = packets;
6609
6610 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6611
6612
6613 for (i = 0; i < 8; i++) {
6614
6615 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6616 missed_rx += mpc;
6617 hwstats->mpc[i] += mpc;
6618 total_mpc += hwstats->mpc[i];
6619 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6620 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6621 switch (hw->mac.type) {
6622 case ixgbe_mac_82598EB:
6623 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6624 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6625 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6626 hwstats->pxonrxc[i] +=
6627 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6628 break;
6629 case ixgbe_mac_82599EB:
6630 case ixgbe_mac_X540:
6631 case ixgbe_mac_X550:
6632 case ixgbe_mac_X550EM_x:
6633 case ixgbe_mac_x550em_a:
6634 hwstats->pxonrxc[i] +=
6635 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6636 break;
6637 default:
6638 break;
6639 }
6640 }
6641
6642
6643 for (i = 0; i < 16; i++) {
6644 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6645 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6646 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6647 (hw->mac.type == ixgbe_mac_X540) ||
6648 (hw->mac.type == ixgbe_mac_X550) ||
6649 (hw->mac.type == ixgbe_mac_X550EM_x) ||
6650 (hw->mac.type == ixgbe_mac_x550em_a)) {
6651 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6652 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
6653 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6654 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
6655 }
6656 }
6657
6658 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6659
6660 hwstats->gprc -= missed_rx;
6661
6662 ixgbe_update_xoff_received(adapter);
6663
6664
6665 switch (hw->mac.type) {
6666 case ixgbe_mac_82598EB:
6667 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6668 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6669 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6670 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6671 break;
6672 case ixgbe_mac_X540:
6673 case ixgbe_mac_X550:
6674 case ixgbe_mac_X550EM_x:
6675 case ixgbe_mac_x550em_a:
6676
6677 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6678 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6679 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6680 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6681 case ixgbe_mac_82599EB:
6682 for (i = 0; i < 16; i++)
6683 adapter->hw_rx_no_dma_resources +=
6684 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6685 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6686 IXGBE_READ_REG(hw, IXGBE_GORCH);
6687 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6688 IXGBE_READ_REG(hw, IXGBE_GOTCH);
6689 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6690 IXGBE_READ_REG(hw, IXGBE_TORH);
6691 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6692 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6693 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6694#ifdef IXGBE_FCOE
6695 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6696 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
6697 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
6698 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
6699 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
6700 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6701
6702 if (adapter->fcoe.ddp_pool) {
6703 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
6704 struct ixgbe_fcoe_ddp_pool *ddp_pool;
6705 unsigned int cpu;
6706 u64 noddp = 0, noddp_ext_buff = 0;
6707 for_each_possible_cpu(cpu) {
6708 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
6709 noddp += ddp_pool->noddp;
6710 noddp_ext_buff += ddp_pool->noddp_ext_buff;
6711 }
6712 hwstats->fcoe_noddp = noddp;
6713 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
6714 }
6715#endif
6716 break;
6717 default:
6718 break;
6719 }
6720 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
6721 hwstats->bprc += bprc;
6722 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
6723 if (hw->mac.type == ixgbe_mac_82598EB)
6724 hwstats->mprc -= bprc;
6725 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
6726 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
6727 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
6728 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
6729 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
6730 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
6731 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
6732 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6733 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
6734 hwstats->lxontxc += lxon;
6735 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
6736 hwstats->lxofftxc += lxoff;
6737 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6738 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6739
6740
6741
6742 xon_off_tot = lxon + lxoff;
6743 hwstats->gptc -= xon_off_tot;
6744 hwstats->mptc -= xon_off_tot;
6745 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
6746 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
6747 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
6748 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
6749 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
6750 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6751 hwstats->ptc64 -= xon_off_tot;
6752 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
6753 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
6754 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
6755 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
6756 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
6757 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
6758
6759
6760 netdev->stats.multicast = hwstats->mprc;
6761
6762
6763 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
6764 netdev->stats.rx_dropped = 0;
6765 netdev->stats.rx_length_errors = hwstats->rlec;
6766 netdev->stats.rx_crc_errors = hwstats->crcerrs;
6767 netdev->stats.rx_missed_errors = total_mpc;
6768}
6769
6770
6771
6772
6773
6774static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
6775{
6776 struct ixgbe_hw *hw = &adapter->hw;
6777 int i;
6778
6779 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
6780 return;
6781
6782 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6783
6784
6785 if (test_bit(__IXGBE_DOWN, &adapter->state))
6786 return;
6787
6788
6789 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
6790 return;
6791
6792 adapter->fdir_overflow++;
6793
6794 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
6795 for (i = 0; i < adapter->num_tx_queues; i++)
6796 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
6797 &(adapter->tx_ring[i]->state));
6798
6799 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
6800 } else {
6801 e_err(probe, "failed to finish FDIR re-initialization, "
6802 "ignored adding FDIR ATR filters\n");
6803 }
6804}
6805
6806
6807
6808
6809
6810
6811
6812
6813
6814
6815static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6816{
6817 struct ixgbe_hw *hw = &adapter->hw;
6818 u64 eics = 0;
6819 int i;
6820
6821
6822 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6823 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6824 test_bit(__IXGBE_RESETTING, &adapter->state))
6825 return;
6826
6827
6828 if (netif_carrier_ok(adapter->netdev)) {
6829 for (i = 0; i < adapter->num_tx_queues; i++)
6830 set_check_for_tx_hang(adapter->tx_ring[i]);
6831 }
6832
6833 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
6834
6835
6836
6837
6838
6839 IXGBE_WRITE_REG(hw, IXGBE_EICS,
6840 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6841 } else {
6842
6843 for (i = 0; i < adapter->num_q_vectors; i++) {
6844 struct ixgbe_q_vector *qv = adapter->q_vector[i];
6845 if (qv->rx.ring || qv->tx.ring)
6846 eics |= BIT_ULL(i);
6847 }
6848 }
6849
6850
6851 ixgbe_irq_rearm_queues(adapter, eics);
6852}
6853
6854
6855
6856
6857
6858
6859static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
6860{
6861 struct ixgbe_hw *hw = &adapter->hw;
6862 u32 link_speed = adapter->link_speed;
6863 bool link_up = adapter->link_up;
6864 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
6865
6866 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6867 return;
6868
6869 if (hw->mac.ops.check_link) {
6870 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6871 } else {
6872
6873 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6874 link_up = true;
6875 }
6876
6877 if (adapter->ixgbe_ieee_pfc)
6878 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
6879
6880 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
6881 hw->mac.ops.fc_enable(hw);
6882 ixgbe_set_rx_drop_en(adapter);
6883 }
6884
6885 if (link_up ||
6886 time_after(jiffies, (adapter->link_check_timeout +
6887 IXGBE_TRY_LINK_TIMEOUT))) {
6888 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6889 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6890 IXGBE_WRITE_FLUSH(hw);
6891 }
6892
6893 adapter->link_up = link_up;
6894 adapter->link_speed = link_speed;
6895}
6896
6897static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
6898{
6899#ifdef CONFIG_IXGBE_DCB
6900 struct net_device *netdev = adapter->netdev;
6901 struct dcb_app app = {
6902 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
6903 .protocol = 0,
6904 };
6905 u8 up = 0;
6906
6907 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
6908 up = dcb_ieee_getapp_mask(netdev, &app);
6909
6910 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
6911#endif
6912}
6913
6914static int ixgbe_enable_macvlan(struct net_device *upper, void *data)
6915{
6916 if (netif_is_macvlan(upper)) {
6917 struct macvlan_dev *vlan = netdev_priv(upper);
6918
6919 if (vlan->fwd_priv)
6920 netif_tx_wake_all_queues(upper);
6921 }
6922
6923 return 0;
6924}
6925
6926
6927
6928
6929
6930
6931static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6932{
6933 struct net_device *netdev = adapter->netdev;
6934 struct ixgbe_hw *hw = &adapter->hw;
6935 u32 link_speed = adapter->link_speed;
6936 const char *speed_str;
6937 bool flow_rx, flow_tx;
6938
6939
6940 if (netif_carrier_ok(netdev))
6941 return;
6942
6943 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6944
6945 switch (hw->mac.type) {
6946 case ixgbe_mac_82598EB: {
6947 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6948 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6949 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6950 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6951 }
6952 break;
6953 case ixgbe_mac_X540:
6954 case ixgbe_mac_X550:
6955 case ixgbe_mac_X550EM_x:
6956 case ixgbe_mac_x550em_a:
6957 case ixgbe_mac_82599EB: {
6958 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6959 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6960 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6961 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6962 }
6963 break;
6964 default:
6965 flow_tx = false;
6966 flow_rx = false;
6967 break;
6968 }
6969
6970 adapter->last_rx_ptp_check = jiffies;
6971
6972 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6973 ixgbe_ptp_start_cyclecounter(adapter);
6974
6975 switch (link_speed) {
6976 case IXGBE_LINK_SPEED_10GB_FULL:
6977 speed_str = "10 Gbps";
6978 break;
6979 case IXGBE_LINK_SPEED_2_5GB_FULL:
6980 speed_str = "2.5 Gbps";
6981 break;
6982 case IXGBE_LINK_SPEED_1GB_FULL:
6983 speed_str = "1 Gbps";
6984 break;
6985 case IXGBE_LINK_SPEED_100_FULL:
6986 speed_str = "100 Mbps";
6987 break;
6988 case IXGBE_LINK_SPEED_10_FULL:
6989 speed_str = "10 Mbps";
6990 break;
6991 default:
6992 speed_str = "unknown speed";
6993 break;
6994 }
6995 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
6996 ((flow_rx && flow_tx) ? "RX/TX" :
6997 (flow_rx ? "RX" :
6998 (flow_tx ? "TX" : "None"))));
6999
7000 netif_carrier_on(netdev);
7001 ixgbe_check_vf_rate_limit(adapter);
7002
7003
7004 netif_tx_wake_all_queues(adapter->netdev);
7005
7006
7007 rtnl_lock();
7008 netdev_walk_all_upper_dev_rcu(adapter->netdev,
7009 ixgbe_enable_macvlan, NULL);
7010 rtnl_unlock();
7011
7012
7013 ixgbe_update_default_up(adapter);
7014
7015
7016 ixgbe_ping_all_vfs(adapter);
7017}
7018
7019
7020
7021
7022
7023
7024static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
7025{
7026 struct net_device *netdev = adapter->netdev;
7027 struct ixgbe_hw *hw = &adapter->hw;
7028
7029 adapter->link_up = false;
7030 adapter->link_speed = 0;
7031
7032
7033 if (!netif_carrier_ok(netdev))
7034 return;
7035
7036
7037 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
7038 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
7039
7040 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
7041 ixgbe_ptp_start_cyclecounter(adapter);
7042
7043 e_info(drv, "NIC Link is Down\n");
7044 netif_carrier_off(netdev);
7045
7046
7047 ixgbe_ping_all_vfs(adapter);
7048}
7049
7050static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
7051{
7052 int i;
7053
7054 for (i = 0; i < adapter->num_tx_queues; i++) {
7055 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
7056
7057 if (tx_ring->next_to_use != tx_ring->next_to_clean)
7058 return true;
7059 }
7060
7061 return false;
7062}
7063
7064static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
7065{
7066 struct ixgbe_hw *hw = &adapter->hw;
7067 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
7068 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
7069
7070 int i, j;
7071
7072 if (!adapter->num_vfs)
7073 return false;
7074
7075
7076 if (hw->mac.type >= ixgbe_mac_X550)
7077 return false;
7078
7079 for (i = 0; i < adapter->num_vfs; i++) {
7080 for (j = 0; j < q_per_pool; j++) {
7081 u32 h, t;
7082
7083 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
7084 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
7085
7086 if (h != t)
7087 return true;
7088 }
7089 }
7090
7091 return false;
7092}
7093
7094
7095
7096
7097
7098static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
7099{
7100 if (!netif_carrier_ok(adapter->netdev)) {
7101 if (ixgbe_ring_tx_pending(adapter) ||
7102 ixgbe_vf_tx_pending(adapter)) {
7103
7104
7105
7106
7107
7108 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
7109 set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
7110 }
7111 }
7112}
7113
7114#ifdef CONFIG_PCI_IOV
7115static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
7116 struct pci_dev *vfdev)
7117{
7118 if (!pci_wait_for_pending_transaction(vfdev))
7119 e_dev_warn("Issuing VFLR with pending transactions\n");
7120
7121 e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
7122 pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
7123
7124 msleep(100);
7125}
7126
7127static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
7128{
7129 struct ixgbe_hw *hw = &adapter->hw;
7130 struct pci_dev *pdev = adapter->pdev;
7131 unsigned int vf;
7132 u32 gpc;
7133
7134 if (!(netif_carrier_ok(adapter->netdev)))
7135 return;
7136
7137 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
7138 if (gpc)
7139 return;
7140
7141
7142
7143
7144
7145
7146 if (!pdev)
7147 return;
7148
7149
7150 for (vf = 0; vf < adapter->num_vfs; ++vf) {
7151 struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
7152 u16 status_reg;
7153
7154 if (!vfdev)
7155 continue;
7156 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
7157 if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
7158 status_reg & PCI_STATUS_REC_MASTER_ABORT)
7159 ixgbe_issue_vf_flr(adapter, vfdev);
7160 }
7161}
7162
7163static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
7164{
7165 u32 ssvpc;
7166
7167
7168 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7169 adapter->num_vfs == 0)
7170 return;
7171
7172 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
7173
7174
7175
7176
7177
7178 if (!ssvpc)
7179 return;
7180
7181 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
7182}
7183#else
7184static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
7185{
7186}
7187
7188static void
7189ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
7190{
7191}
7192#endif
7193
7194
7195
7196
7197
7198
7199static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
7200{
7201
7202 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7203 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7204 test_bit(__IXGBE_RESETTING, &adapter->state))
7205 return;
7206
7207 ixgbe_watchdog_update_link(adapter);
7208
7209 if (adapter->link_up)
7210 ixgbe_watchdog_link_is_up(adapter);
7211 else
7212 ixgbe_watchdog_link_is_down(adapter);
7213
7214 ixgbe_check_for_bad_vf(adapter);
7215 ixgbe_spoof_check(adapter);
7216 ixgbe_update_stats(adapter);
7217
7218 ixgbe_watchdog_flush_tx(adapter);
7219}
7220
7221
7222
7223
7224
7225static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
7226{
7227 struct ixgbe_hw *hw = &adapter->hw;
7228 s32 err;
7229
7230
7231 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
7232 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7233 return;
7234
7235 if (adapter->sfp_poll_time &&
7236 time_after(adapter->sfp_poll_time, jiffies))
7237 return;
7238
7239
7240 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7241 return;
7242
7243 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
7244
7245 err = hw->phy.ops.identify_sfp(hw);
7246 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7247 goto sfp_out;
7248
7249 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
7250
7251
7252 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
7253 }
7254
7255
7256 if (err)
7257 goto sfp_out;
7258
7259
7260 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
7261 goto sfp_out;
7262
7263 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
7264
7265
7266
7267
7268
7269
7270 if (hw->mac.type == ixgbe_mac_82598EB)
7271 err = hw->phy.ops.reset(hw);
7272 else
7273 err = hw->mac.ops.setup_sfp(hw);
7274
7275 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
7276 goto sfp_out;
7277
7278 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
7279 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
7280
7281sfp_out:
7282 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7283
7284 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
7285 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
7286 e_dev_err("failed to initialize because an unsupported "
7287 "SFP+ module type was detected.\n");
7288 e_dev_err("Reload the driver after installing a "
7289 "supported module.\n");
7290 unregister_netdev(adapter->netdev);
7291 }
7292}
7293
7294
7295
7296
7297
7298static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
7299{
7300 struct ixgbe_hw *hw = &adapter->hw;
7301 u32 speed;
7302 bool autoneg = false;
7303
7304 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
7305 return;
7306
7307
7308 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
7309 return;
7310
7311 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
7312
7313 speed = hw->phy.autoneg_advertised;
7314 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
7315 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
7316
7317
7318 if (!autoneg) {
7319 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
7320 speed = IXGBE_LINK_SPEED_10GB_FULL;
7321 }
7322 }
7323
7324 if (hw->mac.ops.setup_link)
7325 hw->mac.ops.setup_link(hw, speed, true);
7326
7327 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
7328 adapter->link_check_timeout = jiffies;
7329 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
7330}
7331
7332
7333
7334
7335
7336static void ixgbe_service_timer(unsigned long data)
7337{
7338 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
7339 unsigned long next_event_offset;
7340
7341
7342 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
7343 next_event_offset = HZ / 10;
7344 else
7345 next_event_offset = HZ * 2;
7346
7347
7348 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
7349
7350 ixgbe_service_event_schedule(adapter);
7351}
7352
7353static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
7354{
7355 struct ixgbe_hw *hw = &adapter->hw;
7356 u32 status;
7357
7358 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
7359 return;
7360
7361 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
7362
7363 if (!hw->phy.ops.handle_lasi)
7364 return;
7365
7366 status = hw->phy.ops.handle_lasi(&adapter->hw);
7367 if (status != IXGBE_ERR_OVERTEMP)
7368 return;
7369
7370 e_crit(drv, "%s\n", ixgbe_overheat_msg);
7371}
7372
7373static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
7374{
7375 if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
7376 return;
7377
7378
7379 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
7380 test_bit(__IXGBE_REMOVING, &adapter->state) ||
7381 test_bit(__IXGBE_RESETTING, &adapter->state))
7382 return;
7383
7384 ixgbe_dump(adapter);
7385 netdev_err(adapter->netdev, "Reset adapter\n");
7386 adapter->tx_timeout_count++;
7387
7388 rtnl_lock();
7389 ixgbe_reinit_locked(adapter);
7390 rtnl_unlock();
7391}
7392
7393
7394
7395
7396
7397static void ixgbe_service_task(struct work_struct *work)
7398{
7399 struct ixgbe_adapter *adapter = container_of(work,
7400 struct ixgbe_adapter,
7401 service_task);
7402 if (ixgbe_removed(adapter->hw.hw_addr)) {
7403 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
7404 rtnl_lock();
7405 ixgbe_down(adapter);
7406 rtnl_unlock();
7407 }
7408 ixgbe_service_event_complete(adapter);
7409 return;
7410 }
7411 if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
7412 rtnl_lock();
7413 adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
7414 udp_tunnel_get_rx_info(adapter->netdev);
7415 rtnl_unlock();
7416 }
7417 ixgbe_reset_subtask(adapter);
7418 ixgbe_phy_interrupt_subtask(adapter);
7419 ixgbe_sfp_detection_subtask(adapter);
7420 ixgbe_sfp_link_config_subtask(adapter);
7421 ixgbe_check_overtemp_subtask(adapter);
7422 ixgbe_watchdog_subtask(adapter);
7423 ixgbe_fdir_reinit_subtask(adapter);
7424 ixgbe_check_hang_subtask(adapter);
7425
7426 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
7427 ixgbe_ptp_overflow_check(adapter);
7428 ixgbe_ptp_rx_hang(adapter);
7429 }
7430
7431 ixgbe_service_event_complete(adapter);
7432}
7433
7434static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7435 struct ixgbe_tx_buffer *first,
7436 u8 *hdr_len)
7437{
7438 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
7439 struct sk_buff *skb = first->skb;
7440 union {
7441 struct iphdr *v4;
7442 struct ipv6hdr *v6;
7443 unsigned char *hdr;
7444 } ip;
7445 union {
7446 struct tcphdr *tcp;
7447 unsigned char *hdr;
7448 } l4;
7449 u32 paylen, l4_offset;
7450 int err;
7451
7452 if (skb->ip_summed != CHECKSUM_PARTIAL)
7453 return 0;
7454
7455 if (!skb_is_gso(skb))
7456 return 0;
7457
7458 err = skb_cow_head(skb, 0);
7459 if (err < 0)
7460 return err;
7461
7462 ip.hdr = skb_network_header(skb);
7463 l4.hdr = skb_checksum_start(skb);
7464
7465
7466 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7467
7468
7469 if (ip.v4->version == 4) {
7470 unsigned char *csum_start = skb_checksum_start(skb);
7471 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7472
7473
7474
7475
7476 ip.v4->check = csum_fold(csum_partial(trans_start,
7477 csum_start - trans_start,
7478 0));
7479 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7480
7481 ip.v4->tot_len = 0;
7482 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7483 IXGBE_TX_FLAGS_CSUM |
7484 IXGBE_TX_FLAGS_IPV4;
7485 } else {
7486 ip.v6->payload_len = 0;
7487 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
7488 IXGBE_TX_FLAGS_CSUM;
7489 }
7490
7491
7492 l4_offset = l4.hdr - skb->data;
7493
7494
7495 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
7496
7497
7498 paylen = skb->len - l4_offset;
7499 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
7500
7501
7502 first->gso_segs = skb_shinfo(skb)->gso_segs;
7503 first->bytecount += (first->gso_segs - 1) * *hdr_len;
7504
7505
7506 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
7507 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
7508
7509
7510 vlan_macip_lens = l4.hdr - ip.hdr;
7511 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
7512 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7513
7514 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
7515 mss_l4len_idx);
7516
7517 return 1;
7518}
7519
7520static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
7521{
7522 unsigned int offset = 0;
7523
7524 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
7525
7526 return offset == skb_checksum_start_offset(skb);
7527}
7528
7529static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7530 struct ixgbe_tx_buffer *first)
7531{
7532 struct sk_buff *skb = first->skb;
7533 u32 vlan_macip_lens = 0;
7534 u32 type_tucmd = 0;
7535
7536 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7537csum_failed:
7538 if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
7539 IXGBE_TX_FLAGS_CC)))
7540 return;
7541 goto no_csum;
7542 }
7543
7544 switch (skb->csum_offset) {
7545 case offsetof(struct tcphdr, check):
7546 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
7547
7548 case offsetof(struct udphdr, check):
7549 break;
7550 case offsetof(struct sctphdr, checksum):
7551
7552 if (((first->protocol == htons(ETH_P_IP)) &&
7553 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
7554 ((first->protocol == htons(ETH_P_IPV6)) &&
7555 ixgbe_ipv6_csum_is_sctp(skb))) {
7556 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7557 break;
7558 }
7559
7560 default:
7561 skb_checksum_help(skb);
7562 goto csum_failed;
7563 }
7564
7565
7566 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7567 vlan_macip_lens = skb_checksum_start_offset(skb) -
7568 skb_network_offset(skb);
7569no_csum:
7570
7571 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7572 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7573
7574 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
7575}
7576
7577#define IXGBE_SET_FLAG(_input, _flag, _result) \
7578 ((_flag <= _result) ? \
7579 ((u32)(_input & _flag) * (_result / _flag)) : \
7580 ((u32)(_input & _flag) / (_flag / _result)))
7581
7582static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7583{
7584
7585 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7586 IXGBE_ADVTXD_DCMD_DEXT |
7587 IXGBE_ADVTXD_DCMD_IFCS;
7588
7589
7590 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7591 IXGBE_ADVTXD_DCMD_VLE);
7592
7593
7594 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7595 IXGBE_ADVTXD_DCMD_TSE);
7596
7597
7598 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7599 IXGBE_ADVTXD_MAC_TSTAMP);
7600
7601
7602 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7603
7604 return cmd_type;
7605}
7606
7607static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7608 u32 tx_flags, unsigned int paylen)
7609{
7610 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7611
7612
7613 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7614 IXGBE_TX_FLAGS_CSUM,
7615 IXGBE_ADVTXD_POPTS_TXSM);
7616
7617
7618 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7619 IXGBE_TX_FLAGS_IPV4,
7620 IXGBE_ADVTXD_POPTS_IXSM);
7621
7622
7623
7624
7625
7626 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7627 IXGBE_TX_FLAGS_CC,
7628 IXGBE_ADVTXD_CC);
7629
7630 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7631}
7632
7633static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7634{
7635 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7636
7637
7638
7639
7640
7641 smp_mb();
7642
7643
7644
7645
7646 if (likely(ixgbe_desc_unused(tx_ring) < size))
7647 return -EBUSY;
7648
7649
7650 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7651 ++tx_ring->tx_stats.restart_queue;
7652 return 0;
7653}
7654
7655static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7656{
7657 if (likely(ixgbe_desc_unused(tx_ring) >= size))
7658 return 0;
7659
7660 return __ixgbe_maybe_stop_tx(tx_ring, size);
7661}
7662
7663#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7664 IXGBE_TXD_CMD_RS)
7665
7666static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7667 struct ixgbe_tx_buffer *first,
7668 const u8 hdr_len)
7669{
7670 struct sk_buff *skb = first->skb;
7671 struct ixgbe_tx_buffer *tx_buffer;
7672 union ixgbe_adv_tx_desc *tx_desc;
7673 struct skb_frag_struct *frag;
7674 dma_addr_t dma;
7675 unsigned int data_len, size;
7676 u32 tx_flags = first->tx_flags;
7677 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7678 u16 i = tx_ring->next_to_use;
7679
7680 tx_desc = IXGBE_TX_DESC(tx_ring, i);
7681
7682 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7683
7684 size = skb_headlen(skb);
7685 data_len = skb->data_len;
7686
7687#ifdef IXGBE_FCOE
7688 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7689 if (data_len < sizeof(struct fcoe_crc_eof)) {
7690 size -= sizeof(struct fcoe_crc_eof) - data_len;
7691 data_len = 0;
7692 } else {
7693 data_len -= sizeof(struct fcoe_crc_eof);
7694 }
7695 }
7696
7697#endif
7698 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7699
7700 tx_buffer = first;
7701
7702 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
7703 if (dma_mapping_error(tx_ring->dev, dma))
7704 goto dma_error;
7705
7706
7707 dma_unmap_len_set(tx_buffer, len, size);
7708 dma_unmap_addr_set(tx_buffer, dma, dma);
7709
7710 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7711
7712 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
7713 tx_desc->read.cmd_type_len =
7714 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
7715
7716 i++;
7717 tx_desc++;
7718 if (i == tx_ring->count) {
7719 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7720 i = 0;
7721 }
7722 tx_desc->read.olinfo_status = 0;
7723
7724 dma += IXGBE_MAX_DATA_PER_TXD;
7725 size -= IXGBE_MAX_DATA_PER_TXD;
7726
7727 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7728 }
7729
7730 if (likely(!data_len))
7731 break;
7732
7733 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
7734
7735 i++;
7736 tx_desc++;
7737 if (i == tx_ring->count) {
7738 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7739 i = 0;
7740 }
7741 tx_desc->read.olinfo_status = 0;
7742
7743#ifdef IXGBE_FCOE
7744 size = min_t(unsigned int, data_len, skb_frag_size(frag));
7745#else
7746 size = skb_frag_size(frag);
7747#endif
7748 data_len -= size;
7749
7750 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
7751 DMA_TO_DEVICE);
7752
7753 tx_buffer = &tx_ring->tx_buffer_info[i];
7754 }
7755
7756
7757 cmd_type |= size | IXGBE_TXD_CMD;
7758 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
7759
7760 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
7761
7762
7763 first->time_stamp = jiffies;
7764
7765
7766
7767
7768
7769
7770
7771
7772
7773 wmb();
7774
7775
7776 first->next_to_watch = tx_desc;
7777
7778 i++;
7779 if (i == tx_ring->count)
7780 i = 0;
7781
7782 tx_ring->next_to_use = i;
7783
7784 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
7785
7786 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
7787 writel(i, tx_ring->tail);
7788
7789
7790
7791
7792 mmiowb();
7793 }
7794
7795 return;
7796dma_error:
7797 dev_err(tx_ring->dev, "TX DMA map failed\n");
7798 tx_buffer = &tx_ring->tx_buffer_info[i];
7799
7800
7801 while (tx_buffer != first) {
7802 if (dma_unmap_len(tx_buffer, len))
7803 dma_unmap_page(tx_ring->dev,
7804 dma_unmap_addr(tx_buffer, dma),
7805 dma_unmap_len(tx_buffer, len),
7806 DMA_TO_DEVICE);
7807 dma_unmap_len_set(tx_buffer, len, 0);
7808
7809 if (i--)
7810 i += tx_ring->count;
7811 tx_buffer = &tx_ring->tx_buffer_info[i];
7812 }
7813
7814 if (dma_unmap_len(tx_buffer, len))
7815 dma_unmap_single(tx_ring->dev,
7816 dma_unmap_addr(tx_buffer, dma),
7817 dma_unmap_len(tx_buffer, len),
7818 DMA_TO_DEVICE);
7819 dma_unmap_len_set(tx_buffer, len, 0);
7820
7821 dev_kfree_skb_any(first->skb);
7822 first->skb = NULL;
7823
7824 tx_ring->next_to_use = i;
7825}
7826
7827static void ixgbe_atr(struct ixgbe_ring *ring,
7828 struct ixgbe_tx_buffer *first)
7829{
7830 struct ixgbe_q_vector *q_vector = ring->q_vector;
7831 union ixgbe_atr_hash_dword input = { .dword = 0 };
7832 union ixgbe_atr_hash_dword common = { .dword = 0 };
7833 union {
7834 unsigned char *network;
7835 struct iphdr *ipv4;
7836 struct ipv6hdr *ipv6;
7837 } hdr;
7838 struct tcphdr *th;
7839 unsigned int hlen;
7840 struct sk_buff *skb;
7841 __be16 vlan_id;
7842 int l4_proto;
7843
7844
7845 if (!q_vector)
7846 return;
7847
7848
7849 if (!ring->atr_sample_rate)
7850 return;
7851
7852 ring->atr_count++;
7853
7854
7855 if ((first->protocol != htons(ETH_P_IP)) &&
7856 (first->protocol != htons(ETH_P_IPV6)))
7857 return;
7858
7859
7860 skb = first->skb;
7861 hdr.network = skb_network_header(skb);
7862 if (unlikely(hdr.network <= skb->data))
7863 return;
7864 if (skb->encapsulation &&
7865 first->protocol == htons(ETH_P_IP) &&
7866 hdr.ipv4->protocol == IPPROTO_UDP) {
7867 struct ixgbe_adapter *adapter = q_vector->adapter;
7868
7869 if (unlikely(skb_tail_pointer(skb) < hdr.network +
7870 VXLAN_HEADROOM))
7871 return;
7872
7873
7874 if (adapter->vxlan_port &&
7875 udp_hdr(skb)->dest == adapter->vxlan_port)
7876 hdr.network = skb_inner_network_header(skb);
7877
7878 if (adapter->geneve_port &&
7879 udp_hdr(skb)->dest == adapter->geneve_port)
7880 hdr.network = skb_inner_network_header(skb);
7881 }
7882
7883
7884
7885
7886 if (unlikely(skb_tail_pointer(skb) < hdr.network + 40))
7887 return;
7888
7889
7890 switch (hdr.ipv4->version) {
7891 case IPVERSION:
7892
7893 hlen = (hdr.network[0] & 0x0F) << 2;
7894 l4_proto = hdr.ipv4->protocol;
7895 break;
7896 case 6:
7897 hlen = hdr.network - skb->data;
7898 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
7899 hlen -= hdr.network - skb->data;
7900 break;
7901 default:
7902 return;
7903 }
7904
7905 if (l4_proto != IPPROTO_TCP)
7906 return;
7907
7908 if (unlikely(skb_tail_pointer(skb) < hdr.network +
7909 hlen + sizeof(struct tcphdr)))
7910 return;
7911
7912 th = (struct tcphdr *)(hdr.network + hlen);
7913
7914
7915 if (th->fin)
7916 return;
7917
7918
7919 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
7920 return;
7921
7922
7923 ring->atr_count = 0;
7924
7925 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
7926
7927
7928
7929
7930
7931
7932
7933
7934 input.formatted.vlan_id = vlan_id;
7935
7936
7937
7938
7939
7940 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
7941 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
7942 else
7943 common.port.src ^= th->dest ^ first->protocol;
7944 common.port.dst ^= th->source;
7945
7946 switch (hdr.ipv4->version) {
7947 case IPVERSION:
7948 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
7949 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
7950 break;
7951 case 6:
7952 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
7953 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
7954 hdr.ipv6->saddr.s6_addr32[1] ^
7955 hdr.ipv6->saddr.s6_addr32[2] ^
7956 hdr.ipv6->saddr.s6_addr32[3] ^
7957 hdr.ipv6->daddr.s6_addr32[0] ^
7958 hdr.ipv6->daddr.s6_addr32[1] ^
7959 hdr.ipv6->daddr.s6_addr32[2] ^
7960 hdr.ipv6->daddr.s6_addr32[3];
7961 break;
7962 default:
7963 break;
7964 }
7965
7966 if (hdr.network != skb_network_header(skb))
7967 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7968
7969
7970 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
7971 input, common, ring->queue_index);
7972}
7973
7974static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
7975 void *accel_priv, select_queue_fallback_t fallback)
7976{
7977 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
7978#ifdef IXGBE_FCOE
7979 struct ixgbe_adapter *adapter;
7980 struct ixgbe_ring_feature *f;
7981 int txq;
7982#endif
7983
7984 if (fwd_adapter)
7985 return skb->queue_mapping + fwd_adapter->tx_base_queue;
7986
7987#ifdef IXGBE_FCOE
7988
7989
7990
7991
7992
7993 switch (vlan_get_protocol(skb)) {
7994 case htons(ETH_P_FCOE):
7995 case htons(ETH_P_FIP):
7996 adapter = netdev_priv(dev);
7997
7998 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7999 break;
8000 default:
8001 return fallback(dev, skb);
8002 }
8003
8004 f = &adapter->ring_feature[RING_F_FCOE];
8005
8006 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
8007 smp_processor_id();
8008
8009 while (txq >= f->indices)
8010 txq -= f->indices;
8011
8012 return txq + f->offset;
8013#else
8014 return fallback(dev, skb);
8015#endif
8016}
8017
8018netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8019 struct ixgbe_adapter *adapter,
8020 struct ixgbe_ring *tx_ring)
8021{
8022 struct ixgbe_tx_buffer *first;
8023 int tso;
8024 u32 tx_flags = 0;
8025 unsigned short f;
8026 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8027 __be16 protocol = skb->protocol;
8028 u8 hdr_len = 0;
8029
8030
8031
8032
8033
8034
8035
8036
8037 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
8038 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
8039
8040 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
8041 tx_ring->tx_stats.tx_busy++;
8042 return NETDEV_TX_BUSY;
8043 }
8044
8045
8046 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
8047 first->skb = skb;
8048 first->bytecount = skb->len;
8049 first->gso_segs = 1;
8050
8051
8052 if (skb_vlan_tag_present(skb)) {
8053 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
8054 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8055
8056 } else if (protocol == htons(ETH_P_8021Q)) {
8057 struct vlan_hdr *vhdr, _vhdr;
8058 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
8059 if (!vhdr)
8060 goto out_drop;
8061
8062 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
8063 IXGBE_TX_FLAGS_VLAN_SHIFT;
8064 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
8065 }
8066 protocol = vlan_get_protocol(skb);
8067
8068 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
8069 adapter->ptp_clock &&
8070 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
8071 &adapter->state)) {
8072 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8073 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
8074
8075
8076 adapter->ptp_tx_skb = skb_get(skb);
8077 adapter->ptp_tx_start = jiffies;
8078 schedule_work(&adapter->ptp_tx_work);
8079 }
8080
8081 skb_tx_timestamp(skb);
8082
8083#ifdef CONFIG_PCI_IOV
8084
8085
8086
8087
8088 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8089 tx_flags |= IXGBE_TX_FLAGS_CC;
8090
8091#endif
8092
8093 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8094 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
8095 (skb->priority != TC_PRIO_CONTROL))) {
8096 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
8097 tx_flags |= (skb->priority & 0x7) <<
8098 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
8099 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
8100 struct vlan_ethhdr *vhdr;
8101
8102 if (skb_cow_head(skb, 0))
8103 goto out_drop;
8104 vhdr = (struct vlan_ethhdr *)skb->data;
8105 vhdr->h_vlan_TCI = htons(tx_flags >>
8106 IXGBE_TX_FLAGS_VLAN_SHIFT);
8107 } else {
8108 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
8109 }
8110 }
8111
8112
8113 first->tx_flags = tx_flags;
8114 first->protocol = protocol;
8115
8116#ifdef IXGBE_FCOE
8117
8118 if ((protocol == htons(ETH_P_FCOE)) &&
8119 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
8120 tso = ixgbe_fso(tx_ring, first, &hdr_len);
8121 if (tso < 0)
8122 goto out_drop;
8123
8124 goto xmit_fcoe;
8125 }
8126
8127#endif
8128 tso = ixgbe_tso(tx_ring, first, &hdr_len);
8129 if (tso < 0)
8130 goto out_drop;
8131 else if (!tso)
8132 ixgbe_tx_csum(tx_ring, first);
8133
8134
8135 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
8136 ixgbe_atr(tx_ring, first);
8137
8138#ifdef IXGBE_FCOE
8139xmit_fcoe:
8140#endif
8141 ixgbe_tx_map(tx_ring, first, hdr_len);
8142
8143 return NETDEV_TX_OK;
8144
8145out_drop:
8146 dev_kfree_skb_any(first->skb);
8147 first->skb = NULL;
8148
8149 return NETDEV_TX_OK;
8150}
8151
8152static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
8153 struct net_device *netdev,
8154 struct ixgbe_ring *ring)
8155{
8156 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8157 struct ixgbe_ring *tx_ring;
8158
8159
8160
8161
8162
8163 if (skb_put_padto(skb, 17))
8164 return NETDEV_TX_OK;
8165
8166 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
8167
8168 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
8169}
8170
8171static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
8172 struct net_device *netdev)
8173{
8174 return __ixgbe_xmit_frame(skb, netdev, NULL);
8175}
8176
8177
8178
8179
8180
8181
8182
8183
8184static int ixgbe_set_mac(struct net_device *netdev, void *p)
8185{
8186 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8187 struct ixgbe_hw *hw = &adapter->hw;
8188 struct sockaddr *addr = p;
8189
8190 if (!is_valid_ether_addr(addr->sa_data))
8191 return -EADDRNOTAVAIL;
8192
8193 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
8194 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
8195
8196 ixgbe_mac_set_default_filter(adapter);
8197
8198 return 0;
8199}
8200
8201static int
8202ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
8203{
8204 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8205 struct ixgbe_hw *hw = &adapter->hw;
8206 u16 value;
8207 int rc;
8208
8209 if (prtad != hw->phy.mdio.prtad)
8210 return -EINVAL;
8211 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
8212 if (!rc)
8213 rc = value;
8214 return rc;
8215}
8216
8217static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
8218 u16 addr, u16 value)
8219{
8220 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8221 struct ixgbe_hw *hw = &adapter->hw;
8222
8223 if (prtad != hw->phy.mdio.prtad)
8224 return -EINVAL;
8225 return hw->phy.ops.write_reg(hw, addr, devad, value);
8226}
8227
8228static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8229{
8230 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8231
8232 switch (cmd) {
8233 case SIOCSHWTSTAMP:
8234 return ixgbe_ptp_set_ts_config(adapter, req);
8235 case SIOCGHWTSTAMP:
8236 return ixgbe_ptp_get_ts_config(adapter, req);
8237 default:
8238 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8239 }
8240}
8241
8242
8243
8244
8245
8246
8247
8248
8249static int ixgbe_add_sanmac_netdev(struct net_device *dev)
8250{
8251 int err = 0;
8252 struct ixgbe_adapter *adapter = netdev_priv(dev);
8253 struct ixgbe_hw *hw = &adapter->hw;
8254
8255 if (is_valid_ether_addr(hw->mac.san_addr)) {
8256 rtnl_lock();
8257 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
8258 rtnl_unlock();
8259
8260
8261 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
8262 }
8263 return err;
8264}
8265
8266
8267
8268
8269
8270
8271
8272
8273static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8274{
8275 int err = 0;
8276 struct ixgbe_adapter *adapter = netdev_priv(dev);
8277 struct ixgbe_mac_info *mac = &adapter->hw.mac;
8278
8279 if (is_valid_ether_addr(mac->san_addr)) {
8280 rtnl_lock();
8281 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
8282 rtnl_unlock();
8283 }
8284 return err;
8285}
8286
8287#ifdef CONFIG_NET_POLL_CONTROLLER
8288
8289
8290
8291
8292
8293static void ixgbe_netpoll(struct net_device *netdev)
8294{
8295 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8296 int i;
8297
8298
8299 if (test_bit(__IXGBE_DOWN, &adapter->state))
8300 return;
8301
8302
8303 for (i = 0; i < adapter->num_q_vectors; i++)
8304 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
8305}
8306
8307#endif
8308
8309static void ixgbe_get_stats64(struct net_device *netdev,
8310 struct rtnl_link_stats64 *stats)
8311{
8312 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8313 int i;
8314
8315 rcu_read_lock();
8316 for (i = 0; i < adapter->num_rx_queues; i++) {
8317 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
8318 u64 bytes, packets;
8319 unsigned int start;
8320
8321 if (ring) {
8322 do {
8323 start = u64_stats_fetch_begin_irq(&ring->syncp);
8324 packets = ring->stats.packets;
8325 bytes = ring->stats.bytes;
8326 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8327 stats->rx_packets += packets;
8328 stats->rx_bytes += bytes;
8329 }
8330 }
8331
8332 for (i = 0; i < adapter->num_tx_queues; i++) {
8333 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
8334 u64 bytes, packets;
8335 unsigned int start;
8336
8337 if (ring) {
8338 do {
8339 start = u64_stats_fetch_begin_irq(&ring->syncp);
8340 packets = ring->stats.packets;
8341 bytes = ring->stats.bytes;
8342 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
8343 stats->tx_packets += packets;
8344 stats->tx_bytes += bytes;
8345 }
8346 }
8347 rcu_read_unlock();
8348
8349
8350 stats->multicast = netdev->stats.multicast;
8351 stats->rx_errors = netdev->stats.rx_errors;
8352 stats->rx_length_errors = netdev->stats.rx_length_errors;
8353 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
8354 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
8355}
8356
8357#ifdef CONFIG_IXGBE_DCB
8358
8359
8360
8361
8362
8363
8364
8365
8366static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
8367{
8368 struct ixgbe_hw *hw = &adapter->hw;
8369 u32 reg, rsave;
8370 int i;
8371
8372
8373
8374
8375 if (hw->mac.type == ixgbe_mac_82598EB)
8376 return;
8377
8378 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
8379 rsave = reg;
8380
8381 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
8382 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
8383
8384
8385 if (up2tc > tc)
8386 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
8387 }
8388
8389 if (reg != rsave)
8390 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
8391
8392 return;
8393}
8394
8395
8396
8397
8398
8399
8400
8401static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
8402{
8403 struct net_device *dev = adapter->netdev;
8404 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
8405 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
8406 u8 prio;
8407
8408 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
8409 u8 tc = 0;
8410
8411 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
8412 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
8413 else if (ets)
8414 tc = ets->prio_tc[prio];
8415
8416 netdev_set_prio_tc_map(dev, prio, tc);
8417 }
8418}
8419
8420#endif
8421
8422
8423
8424
8425
8426
8427int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8428{
8429 struct ixgbe_adapter *adapter = netdev_priv(dev);
8430 struct ixgbe_hw *hw = &adapter->hw;
8431 bool pools;
8432
8433
8434 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
8435 return -EINVAL;
8436
8437 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
8438 return -EINVAL;
8439
8440 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
8441 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
8442 return -EBUSY;
8443
8444
8445
8446
8447
8448 if (netif_running(dev))
8449 ixgbe_close(dev);
8450 else
8451 ixgbe_reset(adapter);
8452
8453 ixgbe_clear_interrupt_scheme(adapter);
8454
8455#ifdef CONFIG_IXGBE_DCB
8456 if (tc) {
8457 netdev_set_num_tc(dev, tc);
8458 ixgbe_set_prio_tc_map(adapter);
8459
8460 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
8461
8462 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
8463 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
8464 adapter->hw.fc.requested_mode = ixgbe_fc_none;
8465 }
8466 } else {
8467 netdev_reset_tc(dev);
8468
8469 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8470 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
8471
8472 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
8473
8474 adapter->temp_dcb_cfg.pfc_mode_enable = false;
8475 adapter->dcb_cfg.pfc_mode_enable = false;
8476 }
8477
8478 ixgbe_validate_rtr(adapter, tc);
8479
8480#endif
8481 ixgbe_init_interrupt_scheme(adapter);
8482
8483 if (netif_running(dev))
8484 return ixgbe_open(dev);
8485
8486 return 0;
8487}
8488
8489static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8490 struct tc_cls_u32_offload *cls)
8491{
8492 u32 hdl = cls->knode.handle;
8493 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8494 u32 loc = cls->knode.handle & 0xfffff;
8495 int err = 0, i, j;
8496 struct ixgbe_jump_table *jump = NULL;
8497
8498 if (loc > IXGBE_MAX_HW_ENTRIES)
8499 return -EINVAL;
8500
8501 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8502 return -EINVAL;
8503
8504
8505 if (uhtid != 0x800) {
8506 jump = adapter->jump_tables[uhtid];
8507 if (!jump)
8508 return -EINVAL;
8509 if (!test_bit(loc - 1, jump->child_loc_map))
8510 return -EINVAL;
8511 clear_bit(loc - 1, jump->child_loc_map);
8512 }
8513
8514
8515 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
8516 jump = adapter->jump_tables[i];
8517 if (jump && jump->link_hdl == hdl) {
8518
8519
8520
8521 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
8522 if (!test_bit(j, jump->child_loc_map))
8523 continue;
8524 spin_lock(&adapter->fdir_perfect_lock);
8525 err = ixgbe_update_ethtool_fdir_entry(adapter,
8526 NULL,
8527 j + 1);
8528 spin_unlock(&adapter->fdir_perfect_lock);
8529 clear_bit(j, jump->child_loc_map);
8530 }
8531
8532 kfree(jump->input);
8533 kfree(jump->mask);
8534 kfree(jump);
8535 adapter->jump_tables[i] = NULL;
8536 return err;
8537 }
8538 }
8539
8540 spin_lock(&adapter->fdir_perfect_lock);
8541 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
8542 spin_unlock(&adapter->fdir_perfect_lock);
8543 return err;
8544}
8545
8546static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
8547 __be16 protocol,
8548 struct tc_cls_u32_offload *cls)
8549{
8550 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8551
8552 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8553 return -EINVAL;
8554
8555
8556
8557
8558 if (cls->hnode.divisor > 0)
8559 return -EINVAL;
8560
8561 set_bit(uhtid - 1, &adapter->tables);
8562 return 0;
8563}
8564
8565static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
8566 struct tc_cls_u32_offload *cls)
8567{
8568 u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
8569
8570 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8571 return -EINVAL;
8572
8573 clear_bit(uhtid - 1, &adapter->tables);
8574 return 0;
8575}
8576
8577#ifdef CONFIG_NET_CLS_ACT
8578struct upper_walk_data {
8579 struct ixgbe_adapter *adapter;
8580 u64 action;
8581 int ifindex;
8582 u8 queue;
8583};
8584
8585static int get_macvlan_queue(struct net_device *upper, void *_data)
8586{
8587 if (netif_is_macvlan(upper)) {
8588 struct macvlan_dev *dfwd = netdev_priv(upper);
8589 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
8590 struct upper_walk_data *data = _data;
8591 struct ixgbe_adapter *adapter = data->adapter;
8592 int ifindex = data->ifindex;
8593
8594 if (vadapter && vadapter->netdev->ifindex == ifindex) {
8595 data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
8596 data->action = data->queue;
8597 return 1;
8598 }
8599 }
8600
8601 return 0;
8602}
8603
8604static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
8605 u8 *queue, u64 *action)
8606{
8607 unsigned int num_vfs = adapter->num_vfs, vf;
8608 struct upper_walk_data data;
8609 struct net_device *upper;
8610
8611
8612 for (vf = 0; vf < num_vfs; ++vf) {
8613 upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
8614 if (upper->ifindex == ifindex) {
8615 if (adapter->num_rx_pools > 1)
8616 *queue = vf * 2;
8617 else
8618 *queue = vf * adapter->num_rx_queues_per_pool;
8619
8620 *action = vf + 1;
8621 *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
8622 return 0;
8623 }
8624 }
8625
8626
8627 data.adapter = adapter;
8628 data.ifindex = ifindex;
8629 data.action = 0;
8630 data.queue = 0;
8631 if (netdev_walk_all_upper_dev_rcu(adapter->netdev,
8632 get_macvlan_queue, &data)) {
8633 *action = data.action;
8634 *queue = data.queue;
8635
8636 return 0;
8637 }
8638
8639 return -EINVAL;
8640}
8641
8642static int parse_tc_actions(struct ixgbe_adapter *adapter,
8643 struct tcf_exts *exts, u64 *action, u8 *queue)
8644{
8645 const struct tc_action *a;
8646 LIST_HEAD(actions);
8647 int err;
8648
8649 if (tc_no_actions(exts))
8650 return -EINVAL;
8651
8652 tcf_exts_to_list(exts, &actions);
8653 list_for_each_entry(a, &actions, list) {
8654
8655
8656 if (is_tcf_gact_shot(a)) {
8657 *action = IXGBE_FDIR_DROP_QUEUE;
8658 *queue = IXGBE_FDIR_DROP_QUEUE;
8659 return 0;
8660 }
8661
8662
8663 if (is_tcf_mirred_egress_redirect(a)) {
8664 int ifindex = tcf_mirred_ifindex(a);
8665
8666 err = handle_redirect_action(adapter, ifindex, queue,
8667 action);
8668 if (err == 0)
8669 return err;
8670 }
8671 }
8672
8673 return -EINVAL;
8674}
8675#else
8676static int parse_tc_actions(struct ixgbe_adapter *adapter,
8677 struct tcf_exts *exts, u64 *action, u8 *queue)
8678{
8679 return -EINVAL;
8680}
8681#endif
8682
8683static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
8684 union ixgbe_atr_input *mask,
8685 struct tc_cls_u32_offload *cls,
8686 struct ixgbe_mat_field *field_ptr,
8687 struct ixgbe_nexthdr *nexthdr)
8688{
8689 int i, j, off;
8690 __be32 val, m;
8691 bool found_entry = false, found_jump_field = false;
8692
8693 for (i = 0; i < cls->knode.sel->nkeys; i++) {
8694 off = cls->knode.sel->keys[i].off;
8695 val = cls->knode.sel->keys[i].val;
8696 m = cls->knode.sel->keys[i].mask;
8697
8698 for (j = 0; field_ptr[j].val; j++) {
8699 if (field_ptr[j].off == off) {
8700 field_ptr[j].val(input, mask, val, m);
8701 input->filter.formatted.flow_type |=
8702 field_ptr[j].type;
8703 found_entry = true;
8704 break;
8705 }
8706 }
8707 if (nexthdr) {
8708 if (nexthdr->off == cls->knode.sel->keys[i].off &&
8709 nexthdr->val == cls->knode.sel->keys[i].val &&
8710 nexthdr->mask == cls->knode.sel->keys[i].mask)
8711 found_jump_field = true;
8712 else
8713 continue;
8714 }
8715 }
8716
8717 if (nexthdr && !found_jump_field)
8718 return -EINVAL;
8719
8720 if (!found_entry)
8721 return 0;
8722
8723 mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
8724 IXGBE_ATR_L4TYPE_MASK;
8725
8726 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
8727 mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
8728
8729 return 0;
8730}
8731
8732static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8733 __be16 protocol,
8734 struct tc_cls_u32_offload *cls)
8735{
8736 u32 loc = cls->knode.handle & 0xfffff;
8737 struct ixgbe_hw *hw = &adapter->hw;
8738 struct ixgbe_mat_field *field_ptr;
8739 struct ixgbe_fdir_filter *input = NULL;
8740 union ixgbe_atr_input *mask = NULL;
8741 struct ixgbe_jump_table *jump = NULL;
8742 int i, err = -EINVAL;
8743 u8 queue;
8744 u32 uhtid, link_uhtid;
8745
8746 uhtid = TC_U32_USERHTID(cls->knode.handle);
8747 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
8748
8749
8750
8751
8752
8753
8754
8755
8756 if (protocol != htons(ETH_P_IP))
8757 return err;
8758
8759 if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
8760 e_err(drv, "Location out of range\n");
8761 return err;
8762 }
8763
8764
8765
8766
8767
8768
8769
8770
8771 if (uhtid == 0x800) {
8772 field_ptr = (adapter->jump_tables[0])->mat;
8773 } else {
8774 if (uhtid >= IXGBE_MAX_LINK_HANDLE)
8775 return err;
8776 if (!adapter->jump_tables[uhtid])
8777 return err;
8778 field_ptr = (adapter->jump_tables[uhtid])->mat;
8779 }
8780
8781 if (!field_ptr)
8782 return err;
8783
8784
8785
8786
8787
8788
8789
8790 if (link_uhtid) {
8791 struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
8792
8793 if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
8794 return err;
8795
8796 if (!test_bit(link_uhtid - 1, &adapter->tables))
8797 return err;
8798
8799
8800
8801
8802
8803
8804 if (adapter->jump_tables[link_uhtid] &&
8805 (adapter->jump_tables[link_uhtid])->link_hdl) {
8806 e_err(drv, "Link filter exists for link: %x\n",
8807 link_uhtid);
8808 return err;
8809 }
8810
8811 for (i = 0; nexthdr[i].jump; i++) {
8812 if (nexthdr[i].o != cls->knode.sel->offoff ||
8813 nexthdr[i].s != cls->knode.sel->offshift ||
8814 nexthdr[i].m != cls->knode.sel->offmask)
8815 return err;
8816
8817 jump = kzalloc(sizeof(*jump), GFP_KERNEL);
8818 if (!jump)
8819 return -ENOMEM;
8820 input = kzalloc(sizeof(*input), GFP_KERNEL);
8821 if (!input) {
8822 err = -ENOMEM;
8823 goto free_jump;
8824 }
8825 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
8826 if (!mask) {
8827 err = -ENOMEM;
8828 goto free_input;
8829 }
8830 jump->input = input;
8831 jump->mask = mask;
8832 jump->link_hdl = cls->knode.handle;
8833
8834 err = ixgbe_clsu32_build_input(input, mask, cls,
8835 field_ptr, &nexthdr[i]);
8836 if (!err) {
8837 jump->mat = nexthdr[i].jump;
8838 adapter->jump_tables[link_uhtid] = jump;
8839 break;
8840 }
8841 }
8842 return 0;
8843 }
8844
8845 input = kzalloc(sizeof(*input), GFP_KERNEL);
8846 if (!input)
8847 return -ENOMEM;
8848 mask = kzalloc(sizeof(*mask), GFP_KERNEL);
8849 if (!mask) {
8850 err = -ENOMEM;
8851 goto free_input;
8852 }
8853
8854 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
8855 if ((adapter->jump_tables[uhtid])->input)
8856 memcpy(input, (adapter->jump_tables[uhtid])->input,
8857 sizeof(*input));
8858 if ((adapter->jump_tables[uhtid])->mask)
8859 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
8860 sizeof(*mask));
8861
8862
8863
8864
8865 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
8866 struct ixgbe_jump_table *link = adapter->jump_tables[i];
8867
8868 if (link && (test_bit(loc - 1, link->child_loc_map))) {
8869 e_err(drv, "Filter exists in location: %x\n",
8870 loc);
8871 err = -EINVAL;
8872 goto err_out;
8873 }
8874 }
8875 }
8876 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
8877 if (err)
8878 goto err_out;
8879
8880 err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
8881 &queue);
8882 if (err < 0)
8883 goto err_out;
8884
8885 input->sw_idx = loc;
8886
8887 spin_lock(&adapter->fdir_perfect_lock);
8888
8889 if (hlist_empty(&adapter->fdir_filter_list)) {
8890 memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
8891 err = ixgbe_fdir_set_input_mask_82599(hw, mask);
8892 if (err)
8893 goto err_out_w_lock;
8894 } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
8895 err = -EINVAL;
8896 goto err_out_w_lock;
8897 }
8898
8899 ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
8900 err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
8901 input->sw_idx, queue);
8902 if (!err)
8903 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
8904 spin_unlock(&adapter->fdir_perfect_lock);
8905
8906 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
8907 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
8908
8909 kfree(mask);
8910 return err;
8911err_out_w_lock:
8912 spin_unlock(&adapter->fdir_perfect_lock);
8913err_out:
8914 kfree(mask);
8915free_input:
8916 kfree(input);
8917free_jump:
8918 kfree(jump);
8919 return err;
8920}
8921
8922static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
8923 struct tc_to_netdev *tc)
8924{
8925 struct ixgbe_adapter *adapter = netdev_priv(dev);
8926
8927 if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
8928 tc->type == TC_SETUP_CLSU32) {
8929 switch (tc->cls_u32->command) {
8930 case TC_CLSU32_NEW_KNODE:
8931 case TC_CLSU32_REPLACE_KNODE:
8932 return ixgbe_configure_clsu32(adapter,
8933 proto, tc->cls_u32);
8934 case TC_CLSU32_DELETE_KNODE:
8935 return ixgbe_delete_clsu32(adapter, tc->cls_u32);
8936 case TC_CLSU32_NEW_HNODE:
8937 case TC_CLSU32_REPLACE_HNODE:
8938 return ixgbe_configure_clsu32_add_hnode(adapter, proto,
8939 tc->cls_u32);
8940 case TC_CLSU32_DELETE_HNODE:
8941 return ixgbe_configure_clsu32_del_hnode(adapter,
8942 tc->cls_u32);
8943 default:
8944 return -EINVAL;
8945 }
8946 }
8947
8948 if (tc->type != TC_SETUP_MQPRIO)
8949 return -EINVAL;
8950
8951 return ixgbe_setup_tc(dev, tc->tc);
8952}
8953
8954#ifdef CONFIG_PCI_IOV
8955void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
8956{
8957 struct net_device *netdev = adapter->netdev;
8958
8959 rtnl_lock();
8960 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
8961 rtnl_unlock();
8962}
8963
8964#endif
8965void ixgbe_do_reset(struct net_device *netdev)
8966{
8967 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8968
8969 if (netif_running(netdev))
8970 ixgbe_reinit_locked(adapter);
8971 else
8972 ixgbe_reset(adapter);
8973}
8974
8975static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
8976 netdev_features_t features)
8977{
8978 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8979
8980
8981 if (!(features & NETIF_F_RXCSUM))
8982 features &= ~NETIF_F_LRO;
8983
8984
8985 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
8986 features &= ~NETIF_F_LRO;
8987
8988 return features;
8989}
8990
8991static int ixgbe_set_features(struct net_device *netdev,
8992 netdev_features_t features)
8993{
8994 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8995 netdev_features_t changed = netdev->features ^ features;
8996 bool need_reset = false;
8997
8998
8999 if (!(features & NETIF_F_LRO)) {
9000 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9001 need_reset = true;
9002 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
9003 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
9004 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
9005 if (adapter->rx_itr_setting == 1 ||
9006 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
9007 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
9008 need_reset = true;
9009 } else if ((changed ^ features) & NETIF_F_LRO) {
9010 e_info(probe, "rx-usecs set too low, "
9011 "disabling RSC\n");
9012 }
9013 }
9014
9015
9016
9017
9018
9019 if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
9020
9021 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
9022 need_reset = true;
9023
9024 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
9025 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9026 } else {
9027
9028 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
9029 need_reset = true;
9030
9031 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
9032
9033
9034 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
9035
9036 (netdev_get_num_tc(netdev) > 1) ||
9037
9038 (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
9039
9040 (!adapter->atr_sample_rate))
9041 ;
9042 else
9043 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
9044 }
9045
9046 if (changed & NETIF_F_RXALL)
9047 need_reset = true;
9048
9049 netdev->features = features;
9050
9051 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
9052 if (features & NETIF_F_RXCSUM) {
9053 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9054 } else {
9055 u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9056
9057 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9058 }
9059 }
9060
9061 if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
9062 if (features & NETIF_F_RXCSUM) {
9063 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9064 } else {
9065 u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9066
9067 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9068 }
9069 }
9070
9071 if (need_reset)
9072 ixgbe_do_reset(netdev);
9073 else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
9074 NETIF_F_HW_VLAN_CTAG_FILTER))
9075 ixgbe_set_rx_mode(netdev);
9076
9077 return 0;
9078}
9079
9080
9081
9082
9083
9084
9085static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
9086 struct udp_tunnel_info *ti)
9087{
9088 struct ixgbe_adapter *adapter = netdev_priv(dev);
9089 struct ixgbe_hw *hw = &adapter->hw;
9090 __be16 port = ti->port;
9091 u32 port_shift = 0;
9092 u32 reg;
9093
9094 if (ti->sa_family != AF_INET)
9095 return;
9096
9097 switch (ti->type) {
9098 case UDP_TUNNEL_TYPE_VXLAN:
9099 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9100 return;
9101
9102 if (adapter->vxlan_port == port)
9103 return;
9104
9105 if (adapter->vxlan_port) {
9106 netdev_info(dev,
9107 "VXLAN port %d set, not adding port %d\n",
9108 ntohs(adapter->vxlan_port),
9109 ntohs(port));
9110 return;
9111 }
9112
9113 adapter->vxlan_port = port;
9114 break;
9115 case UDP_TUNNEL_TYPE_GENEVE:
9116 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9117 return;
9118
9119 if (adapter->geneve_port == port)
9120 return;
9121
9122 if (adapter->geneve_port) {
9123 netdev_info(dev,
9124 "GENEVE port %d set, not adding port %d\n",
9125 ntohs(adapter->geneve_port),
9126 ntohs(port));
9127 return;
9128 }
9129
9130 port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
9131 adapter->geneve_port = port;
9132 break;
9133 default:
9134 return;
9135 }
9136
9137 reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
9138 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
9139}
9140
9141
9142
9143
9144
9145
9146static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
9147 struct udp_tunnel_info *ti)
9148{
9149 struct ixgbe_adapter *adapter = netdev_priv(dev);
9150 u32 port_mask;
9151
9152 if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
9153 ti->type != UDP_TUNNEL_TYPE_GENEVE)
9154 return;
9155
9156 if (ti->sa_family != AF_INET)
9157 return;
9158
9159 switch (ti->type) {
9160 case UDP_TUNNEL_TYPE_VXLAN:
9161 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
9162 return;
9163
9164 if (adapter->vxlan_port != ti->port) {
9165 netdev_info(dev, "VXLAN port %d not found\n",
9166 ntohs(ti->port));
9167 return;
9168 }
9169
9170 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
9171 break;
9172 case UDP_TUNNEL_TYPE_GENEVE:
9173 if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
9174 return;
9175
9176 if (adapter->geneve_port != ti->port) {
9177 netdev_info(dev, "GENEVE port %d not found\n",
9178 ntohs(ti->port));
9179 return;
9180 }
9181
9182 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
9183 break;
9184 default:
9185 return;
9186 }
9187
9188 ixgbe_clear_udp_tunnel_port(adapter, port_mask);
9189 adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
9190}
9191
9192static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
9193 struct net_device *dev,
9194 const unsigned char *addr, u16 vid,
9195 u16 flags)
9196{
9197
9198 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
9199 struct ixgbe_adapter *adapter = netdev_priv(dev);
9200 u16 pool = VMDQ_P(0);
9201
9202 if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
9203 return -ENOMEM;
9204 }
9205
9206 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
9207}
9208
9209
9210
9211
9212
9213
9214
9215
9216static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
9217 __u16 mode)
9218{
9219 struct ixgbe_hw *hw = &adapter->hw;
9220 unsigned int p, num_pools;
9221 u32 vmdctl;
9222
9223 switch (mode) {
9224 case BRIDGE_MODE_VEPA:
9225
9226 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
9227
9228
9229
9230
9231
9232 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9233 vmdctl |= IXGBE_VT_CTL_REPLEN;
9234 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9235
9236
9237
9238
9239 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9240 for (p = 0; p < num_pools; p++) {
9241 if (hw->mac.ops.set_source_address_pruning)
9242 hw->mac.ops.set_source_address_pruning(hw,
9243 true,
9244 p);
9245 }
9246 break;
9247 case BRIDGE_MODE_VEB:
9248
9249 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
9250 IXGBE_PFDTXGSWC_VT_LBEN);
9251
9252
9253
9254
9255 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
9256 if (!adapter->num_vfs)
9257 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
9258 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
9259
9260
9261
9262
9263 num_pools = adapter->num_vfs + adapter->num_rx_pools;
9264 for (p = 0; p < num_pools; p++) {
9265 if (hw->mac.ops.set_source_address_pruning)
9266 hw->mac.ops.set_source_address_pruning(hw,
9267 false,
9268 p);
9269 }
9270 break;
9271 default:
9272 return -EINVAL;
9273 }
9274
9275 adapter->bridge_mode = mode;
9276
9277 e_info(drv, "enabling bridge mode: %s\n",
9278 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9279
9280 return 0;
9281}
9282
9283static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
9284 struct nlmsghdr *nlh, u16 flags)
9285{
9286 struct ixgbe_adapter *adapter = netdev_priv(dev);
9287 struct nlattr *attr, *br_spec;
9288 int rem;
9289
9290 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9291 return -EOPNOTSUPP;
9292
9293 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9294 if (!br_spec)
9295 return -EINVAL;
9296
9297 nla_for_each_nested(attr, br_spec, rem) {
9298 int status;
9299 __u16 mode;
9300
9301 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9302 continue;
9303
9304 if (nla_len(attr) < sizeof(mode))
9305 return -EINVAL;
9306
9307 mode = nla_get_u16(attr);
9308 status = ixgbe_configure_bridge_mode(adapter, mode);
9309 if (status)
9310 return status;
9311
9312 break;
9313 }
9314
9315 return 0;
9316}
9317
9318static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9319 struct net_device *dev,
9320 u32 filter_mask, int nlflags)
9321{
9322 struct ixgbe_adapter *adapter = netdev_priv(dev);
9323
9324 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
9325 return 0;
9326
9327 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
9328 adapter->bridge_mode, 0, 0, nlflags,
9329 filter_mask, NULL);
9330}
9331
9332static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9333{
9334 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
9335 struct ixgbe_adapter *adapter = netdev_priv(pdev);
9336 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
9337 unsigned int limit;
9338 int pool, err;
9339
9340
9341
9342
9343
9344 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
9345 return ERR_PTR(-EINVAL);
9346
9347#ifdef CONFIG_RPS
9348 if (vdev->num_rx_queues != vdev->num_tx_queues) {
9349 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
9350 vdev->name);
9351 return ERR_PTR(-EINVAL);
9352 }
9353#endif
9354
9355 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
9356 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
9357 netdev_info(pdev,
9358 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
9359 pdev->name);
9360 return ERR_PTR(-EINVAL);
9361 }
9362
9363 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
9364 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
9365 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
9366 return ERR_PTR(-EBUSY);
9367
9368 fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
9369 if (!fwd_adapter)
9370 return ERR_PTR(-ENOMEM);
9371
9372 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
9373 adapter->num_rx_pools++;
9374 set_bit(pool, &adapter->fwd_bitmask);
9375 limit = find_last_bit(&adapter->fwd_bitmask, 32);
9376
9377
9378 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
9379 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9380 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
9381
9382
9383 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
9384 if (err)
9385 goto fwd_add_err;
9386 fwd_adapter->pool = pool;
9387 fwd_adapter->real_adapter = adapter;
9388
9389 if (netif_running(pdev)) {
9390 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
9391 if (err)
9392 goto fwd_add_err;
9393 netif_tx_start_all_queues(vdev);
9394 }
9395
9396 return fwd_adapter;
9397fwd_add_err:
9398
9399 netdev_info(pdev,
9400 "%s: dfwd hardware acceleration failed\n", vdev->name);
9401 clear_bit(pool, &adapter->fwd_bitmask);
9402 adapter->num_rx_pools--;
9403 kfree(fwd_adapter);
9404 return ERR_PTR(err);
9405}
9406
9407static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
9408{
9409 struct ixgbe_fwd_adapter *fwd_adapter = priv;
9410 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
9411 unsigned int limit;
9412
9413 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
9414 adapter->num_rx_pools--;
9415
9416 limit = find_last_bit(&adapter->fwd_bitmask, 32);
9417 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
9418 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
9419 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
9420 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
9421 fwd_adapter->pool, adapter->num_rx_pools,
9422 fwd_adapter->rx_base_queue,
9423 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
9424 adapter->fwd_bitmask);
9425 kfree(fwd_adapter);
9426}
9427
9428#define IXGBE_MAX_MAC_HDR_LEN 127
9429#define IXGBE_MAX_NETWORK_HDR_LEN 511
9430
9431static netdev_features_t
9432ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9433 netdev_features_t features)
9434{
9435 unsigned int network_hdr_len, mac_hdr_len;
9436
9437
9438 mac_hdr_len = skb_network_header(skb) - skb->data;
9439 if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
9440 return features & ~(NETIF_F_HW_CSUM |
9441 NETIF_F_SCTP_CRC |
9442 NETIF_F_HW_VLAN_CTAG_TX |
9443 NETIF_F_TSO |
9444 NETIF_F_TSO6);
9445
9446 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
9447 if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
9448 return features & ~(NETIF_F_HW_CSUM |
9449 NETIF_F_SCTP_CRC |
9450 NETIF_F_TSO |
9451 NETIF_F_TSO6);
9452
9453
9454
9455
9456 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
9457 features &= ~NETIF_F_TSO;
9458
9459 return features;
9460}
9461
9462static const struct net_device_ops ixgbe_netdev_ops = {
9463 .ndo_open = ixgbe_open,
9464 .ndo_stop = ixgbe_close,
9465 .ndo_start_xmit = ixgbe_xmit_frame,
9466 .ndo_select_queue = ixgbe_select_queue,
9467 .ndo_set_rx_mode = ixgbe_set_rx_mode,
9468 .ndo_validate_addr = eth_validate_addr,
9469 .ndo_set_mac_address = ixgbe_set_mac,
9470 .ndo_change_mtu = ixgbe_change_mtu,
9471 .ndo_tx_timeout = ixgbe_tx_timeout,
9472 .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
9473 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
9474 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
9475 .ndo_do_ioctl = ixgbe_ioctl,
9476 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
9477 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
9478 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
9479 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
9480 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
9481 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
9482 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
9483 .ndo_get_stats64 = ixgbe_get_stats64,
9484 .ndo_setup_tc = __ixgbe_setup_tc,
9485#ifdef CONFIG_NET_POLL_CONTROLLER
9486 .ndo_poll_controller = ixgbe_netpoll,
9487#endif
9488#ifdef IXGBE_FCOE
9489 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
9490 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
9491 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
9492 .ndo_fcoe_enable = ixgbe_fcoe_enable,
9493 .ndo_fcoe_disable = ixgbe_fcoe_disable,
9494 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
9495 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
9496#endif
9497 .ndo_set_features = ixgbe_set_features,
9498 .ndo_fix_features = ixgbe_fix_features,
9499 .ndo_fdb_add = ixgbe_ndo_fdb_add,
9500 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
9501 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
9502 .ndo_dfwd_add_station = ixgbe_fwd_add,
9503 .ndo_dfwd_del_station = ixgbe_fwd_del,
9504 .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
9505 .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
9506 .ndo_features_check = ixgbe_features_check,
9507};
9508
9509
9510
9511
9512
9513
9514
9515
9516
9517
9518static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
9519{
9520 struct pci_dev *entry, *pdev = adapter->pdev;
9521 int physfns = 0;
9522
9523
9524
9525
9526
9527 if (ixgbe_pcie_from_parent(&adapter->hw))
9528 physfns = 4;
9529
9530 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
9531
9532 if (entry->is_virtfn)
9533 continue;
9534
9535
9536
9537
9538
9539
9540
9541 if ((entry->vendor != pdev->vendor) ||
9542 (entry->device != pdev->device))
9543 return -1;
9544
9545 physfns++;
9546 }
9547
9548 return physfns;
9549}
9550
9551
9552
9553
9554
9555
9556
9557
9558
9559
9560
9561bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
9562 u16 subdevice_id)
9563{
9564 struct ixgbe_hw *hw = &adapter->hw;
9565 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
9566
9567
9568 if (hw->mac.type == ixgbe_mac_82598EB)
9569 return false;
9570
9571
9572 if (hw->mac.type >= ixgbe_mac_X540) {
9573 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
9574 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
9575 (hw->bus.func == 0)))
9576 return true;
9577 }
9578
9579
9580 switch (device_id) {
9581 case IXGBE_DEV_ID_82599_SFP:
9582
9583 switch (subdevice_id) {
9584 case IXGBE_SUBDEV_ID_82599_560FLR:
9585 case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
9586 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
9587 case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
9588
9589 if (hw->bus.func != 0)
9590 break;
9591 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
9592 case IXGBE_SUBDEV_ID_82599_SFP:
9593 case IXGBE_SUBDEV_ID_82599_RNDC:
9594 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
9595 case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
9596 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
9597 case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
9598 return true;
9599 }
9600 break;
9601 case IXGBE_DEV_ID_82599EN_SFP:
9602
9603 switch (subdevice_id) {
9604 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
9605 return true;
9606 }
9607 break;
9608 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
9609
9610 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
9611 return true;
9612 break;
9613 case IXGBE_DEV_ID_82599_KX4:
9614 return true;
9615 default:
9616 break;
9617 }
9618
9619 return false;
9620}
9621
9622
9623
9624
9625
9626
9627
9628
9629
9630
9631
9632
9633static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9634{
9635 struct net_device *netdev;
9636 struct ixgbe_adapter *adapter = NULL;
9637 struct ixgbe_hw *hw;
9638 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
9639 int i, err, pci_using_dac, expected_gts;
9640 unsigned int indices = MAX_TX_QUEUES;
9641 u8 part_str[IXGBE_PBANUM_LENGTH];
9642 bool disable_dev = false;
9643#ifdef IXGBE_FCOE
9644 u16 device_caps;
9645#endif
9646 u32 eec;
9647
9648
9649
9650
9651 if (pdev->is_virtfn) {
9652 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
9653 pci_name(pdev), pdev->vendor, pdev->device);
9654 return -EINVAL;
9655 }
9656
9657 err = pci_enable_device_mem(pdev);
9658 if (err)
9659 return err;
9660
9661 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
9662 pci_using_dac = 1;
9663 } else {
9664 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9665 if (err) {
9666 dev_err(&pdev->dev,
9667 "No usable DMA configuration, aborting\n");
9668 goto err_dma;
9669 }
9670 pci_using_dac = 0;
9671 }
9672
9673 err = pci_request_mem_regions(pdev, ixgbe_driver_name);
9674 if (err) {
9675 dev_err(&pdev->dev,
9676 "pci_request_selected_regions failed 0x%x\n", err);
9677 goto err_pci_reg;
9678 }
9679
9680 pci_enable_pcie_error_reporting(pdev);
9681
9682 pci_set_master(pdev);
9683 pci_save_state(pdev);
9684
9685 if (ii->mac == ixgbe_mac_82598EB) {
9686#ifdef CONFIG_IXGBE_DCB
9687
9688 indices = 4 * MAX_TRAFFIC_CLASS;
9689#else
9690 indices = IXGBE_MAX_RSS_INDICES;
9691#endif
9692 }
9693
9694 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
9695 if (!netdev) {
9696 err = -ENOMEM;
9697 goto err_alloc_etherdev;
9698 }
9699
9700 SET_NETDEV_DEV(netdev, &pdev->dev);
9701
9702 adapter = netdev_priv(netdev);
9703
9704 adapter->netdev = netdev;
9705 adapter->pdev = pdev;
9706 hw = &adapter->hw;
9707 hw->back = adapter;
9708 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
9709
9710 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
9711 pci_resource_len(pdev, 0));
9712 adapter->io_addr = hw->hw_addr;
9713 if (!hw->hw_addr) {
9714 err = -EIO;
9715 goto err_ioremap;
9716 }
9717
9718 netdev->netdev_ops = &ixgbe_netdev_ops;
9719 ixgbe_set_ethtool_ops(netdev);
9720 netdev->watchdog_timeo = 5 * HZ;
9721 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
9722
9723
9724 hw->mac.ops = *ii->mac_ops;
9725 hw->mac.type = ii->mac;
9726 hw->mvals = ii->mvals;
9727 if (ii->link_ops)
9728 hw->link.ops = *ii->link_ops;
9729
9730
9731 hw->eeprom.ops = *ii->eeprom_ops;
9732 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
9733 if (ixgbe_removed(hw->hw_addr)) {
9734 err = -EIO;
9735 goto err_ioremap;
9736 }
9737
9738 if (!(eec & BIT(8)))
9739 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
9740
9741
9742 hw->phy.ops = *ii->phy_ops;
9743 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
9744
9745 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
9746 hw->phy.mdio.mmds = 0;
9747 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9748 hw->phy.mdio.dev = netdev;
9749 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
9750 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
9751
9752
9753 err = ixgbe_sw_init(adapter, ii);
9754 if (err)
9755 goto err_sw_init;
9756
9757
9758 if (hw->mac.ops.init_swfw_sync)
9759 hw->mac.ops.init_swfw_sync(hw);
9760
9761
9762 switch (adapter->hw.mac.type) {
9763 case ixgbe_mac_82599EB:
9764 case ixgbe_mac_X540:
9765 case ixgbe_mac_X550:
9766 case ixgbe_mac_X550EM_x:
9767 case ixgbe_mac_x550em_a:
9768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
9769 break;
9770 default:
9771 break;
9772 }
9773
9774
9775
9776
9777
9778 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
9779 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
9780 if (esdp & IXGBE_ESDP_SDP1)
9781 e_crit(probe, "Fan has stopped, replace the adapter\n");
9782 }
9783
9784 if (allow_unsupported_sfp)
9785 hw->allow_unsupported_sfp = allow_unsupported_sfp;
9786
9787
9788 hw->phy.reset_if_overtemp = true;
9789 err = hw->mac.ops.reset_hw(hw);
9790 hw->phy.reset_if_overtemp = false;
9791 ixgbe_set_eee_capable(adapter);
9792 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
9793 err = 0;
9794 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
9795 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
9796 e_dev_err("Reload the driver after installing a supported module.\n");
9797 goto err_sw_init;
9798 } else if (err) {
9799 e_dev_err("HW Init failed: %d\n", err);
9800 goto err_sw_init;
9801 }
9802
9803#ifdef CONFIG_PCI_IOV
9804
9805 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
9806 goto skip_sriov;
9807
9808 ixgbe_init_mbx_params_pf(hw);
9809 hw->mbx.ops = ii->mbx_ops;
9810 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
9811 ixgbe_enable_sriov(adapter);
9812skip_sriov:
9813
9814#endif
9815 netdev->features = NETIF_F_SG |
9816 NETIF_F_TSO |
9817 NETIF_F_TSO6 |
9818 NETIF_F_RXHASH |
9819 NETIF_F_RXCSUM |
9820 NETIF_F_HW_CSUM;
9821
9822#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
9823 NETIF_F_GSO_GRE_CSUM | \
9824 NETIF_F_GSO_IPXIP4 | \
9825 NETIF_F_GSO_IPXIP6 | \
9826 NETIF_F_GSO_UDP_TUNNEL | \
9827 NETIF_F_GSO_UDP_TUNNEL_CSUM)
9828
9829 netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
9830 netdev->features |= NETIF_F_GSO_PARTIAL |
9831 IXGBE_GSO_PARTIAL_FEATURES;
9832
9833 if (hw->mac.type >= ixgbe_mac_82599EB)
9834 netdev->features |= NETIF_F_SCTP_CRC;
9835
9836
9837 netdev->hw_features |= netdev->features |
9838 NETIF_F_HW_VLAN_CTAG_FILTER |
9839 NETIF_F_HW_VLAN_CTAG_RX |
9840 NETIF_F_HW_VLAN_CTAG_TX |
9841 NETIF_F_RXALL |
9842 NETIF_F_HW_L2FW_DOFFLOAD;
9843
9844 if (hw->mac.type >= ixgbe_mac_82599EB)
9845 netdev->hw_features |= NETIF_F_NTUPLE |
9846 NETIF_F_HW_TC;
9847
9848 if (pci_using_dac)
9849 netdev->features |= NETIF_F_HIGHDMA;
9850
9851 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
9852 netdev->hw_enc_features |= netdev->vlan_features;
9853 netdev->mpls_features |= NETIF_F_HW_CSUM;
9854
9855
9856 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
9857 NETIF_F_HW_VLAN_CTAG_RX |
9858 NETIF_F_HW_VLAN_CTAG_TX;
9859
9860 netdev->priv_flags |= IFF_UNICAST_FLT;
9861 netdev->priv_flags |= IFF_SUPP_NOFCS;
9862
9863
9864 netdev->min_mtu = ETH_MIN_MTU;
9865 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
9866
9867#ifdef CONFIG_IXGBE_DCB
9868 if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
9869 netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
9870#endif
9871
9872#ifdef IXGBE_FCOE
9873 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
9874 unsigned int fcoe_l;
9875
9876 if (hw->mac.ops.get_device_caps) {
9877 hw->mac.ops.get_device_caps(hw, &device_caps);
9878 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
9879 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
9880 }
9881
9882
9883 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
9884 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
9885
9886 netdev->features |= NETIF_F_FSO |
9887 NETIF_F_FCOE_CRC;
9888
9889 netdev->vlan_features |= NETIF_F_FSO |
9890 NETIF_F_FCOE_CRC |
9891 NETIF_F_FCOE_MTU;
9892 }
9893#endif
9894
9895 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
9896 netdev->hw_features |= NETIF_F_LRO;
9897 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
9898 netdev->features |= NETIF_F_LRO;
9899
9900
9901 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
9902 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9903 err = -EIO;
9904 goto err_sw_init;
9905 }
9906
9907 eth_platform_get_mac_address(&adapter->pdev->dev,
9908 adapter->hw.mac.perm_addr);
9909
9910 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
9911
9912 if (!is_valid_ether_addr(netdev->dev_addr)) {
9913 e_dev_err("invalid MAC address\n");
9914 err = -EIO;
9915 goto err_sw_init;
9916 }
9917
9918
9919 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
9920 ixgbe_mac_set_default_filter(adapter);
9921
9922 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
9923 (unsigned long) adapter);
9924
9925 if (ixgbe_removed(hw->hw_addr)) {
9926 err = -EIO;
9927 goto err_sw_init;
9928 }
9929 INIT_WORK(&adapter->service_task, ixgbe_service_task);
9930 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
9931 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
9932
9933 err = ixgbe_init_interrupt_scheme(adapter);
9934 if (err)
9935 goto err_sw_init;
9936
9937
9938 adapter->wol = 0;
9939 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
9940 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
9941 pdev->subsystem_device);
9942 if (hw->wol_enabled)
9943 adapter->wol = IXGBE_WUFC_MAG;
9944
9945 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9946
9947
9948 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
9949 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
9950
9951
9952 if (ixgbe_pcie_from_parent(hw))
9953 ixgbe_get_parent_bus_info(adapter);
9954 else
9955 hw->mac.ops.get_bus_info(hw);
9956
9957
9958
9959
9960
9961
9962 switch (hw->mac.type) {
9963 case ixgbe_mac_82598EB:
9964 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
9965 break;
9966 default:
9967 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
9968 break;
9969 }
9970
9971
9972 if (expected_gts > 0)
9973 ixgbe_check_minimum_link(adapter, expected_gts);
9974
9975 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
9976 if (err)
9977 strlcpy(part_str, "Unknown", sizeof(part_str));
9978 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
9979 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
9980 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
9981 part_str);
9982 else
9983 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
9984 hw->mac.type, hw->phy.type, part_str);
9985
9986 e_dev_info("%pM\n", netdev->dev_addr);
9987
9988
9989 err = hw->mac.ops.start_hw(hw);
9990 if (err == IXGBE_ERR_EEPROM_VERSION) {
9991
9992 e_dev_warn("This device is a pre-production adapter/LOM. "
9993 "Please be aware there may be issues associated "
9994 "with your hardware. If you are experiencing "
9995 "problems please contact your Intel or hardware "
9996 "representative who provided you with this "
9997 "hardware.\n");
9998 }
9999 strcpy(netdev->name, "eth%d");
10000 err = register_netdev(netdev);
10001 if (err)
10002 goto err_register;
10003
10004 pci_set_drvdata(pdev, adapter);
10005
10006
10007 if (hw->mac.ops.disable_tx_laser)
10008 hw->mac.ops.disable_tx_laser(hw);
10009
10010
10011 netif_carrier_off(netdev);
10012
10013#ifdef CONFIG_IXGBE_DCA
10014 if (dca_add_requester(&pdev->dev) == 0) {
10015 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
10016 ixgbe_setup_dca(adapter);
10017 }
10018#endif
10019 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
10020 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
10021 for (i = 0; i < adapter->num_vfs; i++)
10022 ixgbe_vf_configuration(pdev, (i | 0x10000000));
10023 }
10024
10025
10026
10027
10028 if (hw->mac.ops.set_fw_drv_ver)
10029 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
10030 sizeof(ixgbe_driver_version) - 1,
10031 ixgbe_driver_version);
10032
10033
10034 ixgbe_add_sanmac_netdev(netdev);
10035
10036 e_dev_info("%s\n", ixgbe_default_device_descr);
10037
10038#ifdef CONFIG_IXGBE_HWMON
10039 if (ixgbe_sysfs_init(adapter))
10040 e_err(probe, "failed to allocate sysfs resources\n");
10041#endif
10042
10043 ixgbe_dbg_adapter_init(adapter);
10044
10045
10046 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
10047 hw->mac.ops.setup_link(hw,
10048 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
10049 true);
10050
10051 return 0;
10052
10053err_register:
10054 ixgbe_release_hw_control(adapter);
10055 ixgbe_clear_interrupt_scheme(adapter);
10056err_sw_init:
10057 ixgbe_disable_sriov(adapter);
10058 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
10059 iounmap(adapter->io_addr);
10060 kfree(adapter->jump_tables[0]);
10061 kfree(adapter->mac_table);
10062err_ioremap:
10063 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10064 free_netdev(netdev);
10065err_alloc_etherdev:
10066 pci_release_mem_regions(pdev);
10067err_pci_reg:
10068err_dma:
10069 if (!adapter || disable_dev)
10070 pci_disable_device(pdev);
10071 return err;
10072}
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083static void ixgbe_remove(struct pci_dev *pdev)
10084{
10085 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10086 struct net_device *netdev;
10087 bool disable_dev;
10088 int i;
10089
10090
10091 if (!adapter)
10092 return;
10093
10094 netdev = adapter->netdev;
10095 ixgbe_dbg_adapter_exit(adapter);
10096
10097 set_bit(__IXGBE_REMOVING, &adapter->state);
10098 cancel_work_sync(&adapter->service_task);
10099
10100
10101#ifdef CONFIG_IXGBE_DCA
10102 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
10103 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
10104 dca_remove_requester(&pdev->dev);
10105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
10106 IXGBE_DCA_CTRL_DCA_DISABLE);
10107 }
10108
10109#endif
10110#ifdef CONFIG_IXGBE_HWMON
10111 ixgbe_sysfs_exit(adapter);
10112#endif
10113
10114
10115 ixgbe_del_sanmac_netdev(netdev);
10116
10117#ifdef CONFIG_PCI_IOV
10118 ixgbe_disable_sriov(adapter);
10119#endif
10120 if (netdev->reg_state == NETREG_REGISTERED)
10121 unregister_netdev(netdev);
10122
10123 ixgbe_clear_interrupt_scheme(adapter);
10124
10125 ixgbe_release_hw_control(adapter);
10126
10127#ifdef CONFIG_DCB
10128 kfree(adapter->ixgbe_ieee_pfc);
10129 kfree(adapter->ixgbe_ieee_ets);
10130
10131#endif
10132 iounmap(adapter->io_addr);
10133 pci_release_mem_regions(pdev);
10134
10135 e_dev_info("complete\n");
10136
10137 for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
10138 if (adapter->jump_tables[i]) {
10139 kfree(adapter->jump_tables[i]->input);
10140 kfree(adapter->jump_tables[i]->mask);
10141 }
10142 kfree(adapter->jump_tables[i]);
10143 }
10144
10145 kfree(adapter->mac_table);
10146 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
10147 free_netdev(netdev);
10148
10149 pci_disable_pcie_error_reporting(pdev);
10150
10151 if (disable_dev)
10152 pci_disable_device(pdev);
10153}
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
10164 pci_channel_state_t state)
10165{
10166 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10167 struct net_device *netdev = adapter->netdev;
10168
10169#ifdef CONFIG_PCI_IOV
10170 struct ixgbe_hw *hw = &adapter->hw;
10171 struct pci_dev *bdev, *vfdev;
10172 u32 dw0, dw1, dw2, dw3;
10173 int vf, pos;
10174 u16 req_id, pf_func;
10175
10176 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
10177 adapter->num_vfs == 0)
10178 goto skip_bad_vf_detection;
10179
10180 bdev = pdev->bus->self;
10181 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
10182 bdev = bdev->bus->self;
10183
10184 if (!bdev)
10185 goto skip_bad_vf_detection;
10186
10187 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
10188 if (!pos)
10189 goto skip_bad_vf_detection;
10190
10191 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
10192 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
10193 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
10194 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
10195 if (ixgbe_removed(hw->hw_addr))
10196 goto skip_bad_vf_detection;
10197
10198 req_id = dw1 >> 16;
10199
10200 if (!(req_id & 0x0080))
10201 goto skip_bad_vf_detection;
10202
10203 pf_func = req_id & 0x01;
10204 if ((pf_func & 1) == (pdev->devfn & 1)) {
10205 unsigned int device_id;
10206
10207 vf = (req_id & 0x7F) >> 1;
10208 e_dev_err("VF %d has caused a PCIe error\n", vf);
10209 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
10210 "%8.8x\tdw3: %8.8x\n",
10211 dw0, dw1, dw2, dw3);
10212 switch (adapter->hw.mac.type) {
10213 case ixgbe_mac_82599EB:
10214 device_id = IXGBE_82599_VF_DEVICE_ID;
10215 break;
10216 case ixgbe_mac_X540:
10217 device_id = IXGBE_X540_VF_DEVICE_ID;
10218 break;
10219 case ixgbe_mac_X550:
10220 device_id = IXGBE_DEV_ID_X550_VF;
10221 break;
10222 case ixgbe_mac_X550EM_x:
10223 device_id = IXGBE_DEV_ID_X550EM_X_VF;
10224 break;
10225 case ixgbe_mac_x550em_a:
10226 device_id = IXGBE_DEV_ID_X550EM_A_VF;
10227 break;
10228 default:
10229 device_id = 0;
10230 break;
10231 }
10232
10233
10234 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
10235 while (vfdev) {
10236 if (vfdev->devfn == (req_id & 0xFF))
10237 break;
10238 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
10239 device_id, vfdev);
10240 }
10241
10242
10243
10244
10245
10246 if (vfdev) {
10247 ixgbe_issue_vf_flr(adapter, vfdev);
10248
10249 pci_dev_put(vfdev);
10250 }
10251
10252 pci_cleanup_aer_uncorrect_error_status(pdev);
10253 }
10254
10255
10256
10257
10258
10259
10260
10261 adapter->vferr_refcount++;
10262
10263 return PCI_ERS_RESULT_RECOVERED;
10264
10265skip_bad_vf_detection:
10266#endif
10267 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
10268 return PCI_ERS_RESULT_DISCONNECT;
10269
10270 rtnl_lock();
10271 netif_device_detach(netdev);
10272
10273 if (state == pci_channel_io_perm_failure) {
10274 rtnl_unlock();
10275 return PCI_ERS_RESULT_DISCONNECT;
10276 }
10277
10278 if (netif_running(netdev))
10279 ixgbe_close_suspend(adapter);
10280
10281 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
10282 pci_disable_device(pdev);
10283 rtnl_unlock();
10284
10285
10286 return PCI_ERS_RESULT_NEED_RESET;
10287}
10288
10289
10290
10291
10292
10293
10294
10295static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
10296{
10297 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10298 pci_ers_result_t result;
10299 int err;
10300
10301 if (pci_enable_device_mem(pdev)) {
10302 e_err(probe, "Cannot re-enable PCI device after reset.\n");
10303 result = PCI_ERS_RESULT_DISCONNECT;
10304 } else {
10305 smp_mb__before_atomic();
10306 clear_bit(__IXGBE_DISABLED, &adapter->state);
10307 adapter->hw.hw_addr = adapter->io_addr;
10308 pci_set_master(pdev);
10309 pci_restore_state(pdev);
10310 pci_save_state(pdev);
10311
10312 pci_wake_from_d3(pdev, false);
10313
10314 ixgbe_reset(adapter);
10315 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
10316 result = PCI_ERS_RESULT_RECOVERED;
10317 }
10318
10319 err = pci_cleanup_aer_uncorrect_error_status(pdev);
10320 if (err) {
10321 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
10322 "failed 0x%0x\n", err);
10323
10324 }
10325
10326 return result;
10327}
10328
10329
10330
10331
10332
10333
10334
10335
10336static void ixgbe_io_resume(struct pci_dev *pdev)
10337{
10338 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
10339 struct net_device *netdev = adapter->netdev;
10340
10341#ifdef CONFIG_PCI_IOV
10342 if (adapter->vferr_refcount) {
10343 e_info(drv, "Resuming after VF err\n");
10344 adapter->vferr_refcount--;
10345 return;
10346 }
10347
10348#endif
10349 rtnl_lock();
10350 if (netif_running(netdev))
10351 ixgbe_open(netdev);
10352
10353 netif_device_attach(netdev);
10354 rtnl_unlock();
10355}
10356
10357static const struct pci_error_handlers ixgbe_err_handler = {
10358 .error_detected = ixgbe_io_error_detected,
10359 .slot_reset = ixgbe_io_slot_reset,
10360 .resume = ixgbe_io_resume,
10361};
10362
10363static struct pci_driver ixgbe_driver = {
10364 .name = ixgbe_driver_name,
10365 .id_table = ixgbe_pci_tbl,
10366 .probe = ixgbe_probe,
10367 .remove = ixgbe_remove,
10368#ifdef CONFIG_PM
10369 .suspend = ixgbe_suspend,
10370 .resume = ixgbe_resume,
10371#endif
10372 .shutdown = ixgbe_shutdown,
10373 .sriov_configure = ixgbe_pci_sriov_configure,
10374 .err_handler = &ixgbe_err_handler
10375};
10376
10377
10378
10379
10380
10381
10382
10383static int __init ixgbe_init_module(void)
10384{
10385 int ret;
10386 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
10387 pr_info("%s\n", ixgbe_copyright);
10388
10389 ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
10390 if (!ixgbe_wq) {
10391 pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
10392 return -ENOMEM;
10393 }
10394
10395 ixgbe_dbg_init();
10396
10397 ret = pci_register_driver(&ixgbe_driver);
10398 if (ret) {
10399 destroy_workqueue(ixgbe_wq);
10400 ixgbe_dbg_exit();
10401 return ret;
10402 }
10403
10404#ifdef CONFIG_IXGBE_DCA
10405 dca_register_notify(&dca_notifier);
10406#endif
10407
10408 return 0;
10409}
10410
10411module_init(ixgbe_init_module);
10412
10413
10414
10415
10416
10417
10418
10419static void __exit ixgbe_exit_module(void)
10420{
10421#ifdef CONFIG_IXGBE_DCA
10422 dca_unregister_notify(&dca_notifier);
10423#endif
10424 pci_unregister_driver(&ixgbe_driver);
10425
10426 ixgbe_dbg_exit();
10427 if (ixgbe_wq) {
10428 destroy_workqueue(ixgbe_wq);
10429 ixgbe_wq = NULL;
10430 }
10431}
10432
10433#ifdef CONFIG_IXGBE_DCA
10434static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
10435 void *p)
10436{
10437 int ret_val;
10438
10439 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
10440 __ixgbe_notify_dca);
10441
10442 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
10443}
10444
10445#endif
10446
10447module_exit(ixgbe_exit_module);
10448
10449
10450