1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/interrupt.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/sctp.h>
40#include <linux/pkt_sched.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
46#include <linux/ethtool.h>
47#include <linux/if.h>
48#include <linux/if_vlan.h>
49#include <linux/if_macvlan.h>
50#include <linux/if_bridge.h>
51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h>
53#include <net/vxlan.h>
54
55#ifdef CONFIG_OF
56#include <linux/of_net.h>
57#endif
58
59#ifdef CONFIG_SPARC
60#include <asm/idprom.h>
61#include <asm/prom.h>
62#endif
63
64#include "ixgbe.h"
65#include "ixgbe_common.h"
66#include "ixgbe_dcb_82599.h"
67#include "ixgbe_sriov.h"
68#ifdef CONFIG_IXGBE_VXLAN
69#include <net/vxlan.h>
70#endif
71
72char ixgbe_driver_name[] = "ixgbe";
73static const char ixgbe_driver_string[] =
74 "Intel(R) 10 Gigabit PCI Express Network Driver";
75#ifdef IXGBE_FCOE
76char ixgbe_default_device_descr[] =
77 "Intel(R) 10 Gigabit Network Connection";
78#else
79static char ixgbe_default_device_descr[] =
80 "Intel(R) 10 Gigabit Network Connection";
81#endif
82#define DRV_VERSION "4.2.1-k"
83const char ixgbe_driver_version[] = DRV_VERSION;
84static const char ixgbe_copyright[] =
85 "Copyright (c) 1999-2015 Intel Corporation.";
86
87static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
88
89static const struct ixgbe_info *ixgbe_info_tbl[] = {
90 [board_82598] = &ixgbe_82598_info,
91 [board_82599] = &ixgbe_82599_info,
92 [board_X540] = &ixgbe_X540_info,
93 [board_X550] = &ixgbe_X550_info,
94 [board_X550EM_x] = &ixgbe_X550EM_x_info,
95};
96
97
98
99
100
101
102
103
104
105static const struct pci_device_id ixgbe_pci_tbl[] = {
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
134 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
135 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
136 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
137 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
138 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
139 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
140 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
141
142 {0, }
143};
144MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
145
146#ifdef CONFIG_IXGBE_DCA
147static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
148 void *p);
149static struct notifier_block dca_notifier = {
150 .notifier_call = ixgbe_notify_dca,
151 .next = NULL,
152 .priority = 0
153};
154#endif
155
156#ifdef CONFIG_PCI_IOV
157static unsigned int max_vfs;
158module_param(max_vfs, uint, 0);
159MODULE_PARM_DESC(max_vfs,
160 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
161#endif
162
163static unsigned int allow_unsupported_sfp;
164module_param(allow_unsupported_sfp, uint, 0);
165MODULE_PARM_DESC(allow_unsupported_sfp,
166 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
167
168#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
169static int debug = -1;
170module_param(debug, int, 0);
171MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
172
173MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
174MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
175MODULE_LICENSE("GPL");
176MODULE_VERSION(DRV_VERSION);
177
178static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
179
180static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
181 u32 reg, u16 *value)
182{
183 struct pci_dev *parent_dev;
184 struct pci_bus *parent_bus;
185
186 parent_bus = adapter->pdev->bus->parent;
187 if (!parent_bus)
188 return -1;
189
190 parent_dev = parent_bus->self;
191 if (!parent_dev)
192 return -1;
193
194 if (!pci_is_pcie(parent_dev))
195 return -1;
196
197 pcie_capability_read_word(parent_dev, reg, value);
198 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
199 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
200 return -1;
201 return 0;
202}
203
204static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
205{
206 struct ixgbe_hw *hw = &adapter->hw;
207 u16 link_status = 0;
208 int err;
209
210 hw->bus.type = ixgbe_bus_type_pci_express;
211
212
213
214
215 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
216
217
218 if (err)
219 return err;
220
221 hw->bus.width = ixgbe_convert_bus_width(link_status);
222 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
223
224 return 0;
225}
226
227
228
229
230
231
232
233
234
235
236static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
237{
238 switch (hw->device_id) {
239 case IXGBE_DEV_ID_82599_SFP_SF_QP:
240 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
241 return true;
242 default:
243 return false;
244 }
245}
246
247static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
248 int expected_gts)
249{
250 struct ixgbe_hw *hw = &adapter->hw;
251 int max_gts = 0;
252 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
253 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
254 struct pci_dev *pdev;
255
256
257
258
259
260 if (hw->bus.type == ixgbe_bus_type_internal)
261 return;
262
263
264 if (ixgbe_pcie_from_parent(&adapter->hw))
265 pdev = adapter->pdev->bus->parent->self;
266 else
267 pdev = adapter->pdev;
268
269 if (pcie_get_minimum_link(pdev, &speed, &width) ||
270 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
271 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
272 return;
273 }
274
275 switch (speed) {
276 case PCIE_SPEED_2_5GT:
277
278 max_gts = 2 * width;
279 break;
280 case PCIE_SPEED_5_0GT:
281
282 max_gts = 4 * width;
283 break;
284 case PCIE_SPEED_8_0GT:
285
286 max_gts = 8 * width;
287 break;
288 default:
289 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
290 return;
291 }
292
293 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
294 max_gts);
295 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
296 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
297 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
298 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
299 "Unknown"),
300 width,
301 (speed == PCIE_SPEED_2_5GT ? "20%" :
302 speed == PCIE_SPEED_5_0GT ? "20%" :
303 speed == PCIE_SPEED_8_0GT ? "<2%" :
304 "Unknown"));
305
306 if (max_gts < expected_gts) {
307 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
308 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
309 expected_gts);
310 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
311 }
312}
313
314static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
315{
316 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
317 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
318 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
319 schedule_work(&adapter->service_task);
320}
321
322static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
323{
324 struct ixgbe_adapter *adapter = hw->back;
325
326 if (!hw->hw_addr)
327 return;
328 hw->hw_addr = NULL;
329 e_dev_err("Adapter removed\n");
330 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
331 ixgbe_service_event_schedule(adapter);
332}
333
334static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
335{
336 u32 value;
337
338
339
340
341
342
343
344 if (reg == IXGBE_STATUS) {
345 ixgbe_remove_adapter(hw);
346 return;
347 }
348 value = ixgbe_read_reg(hw, IXGBE_STATUS);
349 if (value == IXGBE_FAILED_READ_REG)
350 ixgbe_remove_adapter(hw);
351}
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
367{
368 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
369 u32 value;
370
371 if (ixgbe_removed(reg_addr))
372 return IXGBE_FAILED_READ_REG;
373 value = readl(reg_addr + reg);
374 if (unlikely(value == IXGBE_FAILED_READ_REG))
375 ixgbe_check_remove(hw, reg);
376 return value;
377}
378
379static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
380{
381 u16 value;
382
383 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
384 if (value == IXGBE_FAILED_READ_CFG_WORD) {
385 ixgbe_remove_adapter(hw);
386 return true;
387 }
388 return false;
389}
390
391u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
392{
393 struct ixgbe_adapter *adapter = hw->back;
394 u16 value;
395
396 if (ixgbe_removed(hw->hw_addr))
397 return IXGBE_FAILED_READ_CFG_WORD;
398 pci_read_config_word(adapter->pdev, reg, &value);
399 if (value == IXGBE_FAILED_READ_CFG_WORD &&
400 ixgbe_check_cfg_remove(hw, adapter->pdev))
401 return IXGBE_FAILED_READ_CFG_WORD;
402 return value;
403}
404
405#ifdef CONFIG_PCI_IOV
406static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
407{
408 struct ixgbe_adapter *adapter = hw->back;
409 u32 value;
410
411 if (ixgbe_removed(hw->hw_addr))
412 return IXGBE_FAILED_READ_CFG_DWORD;
413 pci_read_config_dword(adapter->pdev, reg, &value);
414 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
415 ixgbe_check_cfg_remove(hw, adapter->pdev))
416 return IXGBE_FAILED_READ_CFG_DWORD;
417 return value;
418}
419#endif
420
421void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
422{
423 struct ixgbe_adapter *adapter = hw->back;
424
425 if (ixgbe_removed(hw->hw_addr))
426 return;
427 pci_write_config_word(adapter->pdev, reg, value);
428}
429
430static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
431{
432 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
433
434
435 smp_mb__before_atomic();
436 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
437}
438
439struct ixgbe_reg_info {
440 u32 ofs;
441 char *name;
442};
443
444static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
445
446
447 {IXGBE_CTRL, "CTRL"},
448 {IXGBE_STATUS, "STATUS"},
449 {IXGBE_CTRL_EXT, "CTRL_EXT"},
450
451
452 {IXGBE_EICR, "EICR"},
453
454
455 {IXGBE_SRRCTL(0), "SRRCTL"},
456 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
457 {IXGBE_RDLEN(0), "RDLEN"},
458 {IXGBE_RDH(0), "RDH"},
459 {IXGBE_RDT(0), "RDT"},
460 {IXGBE_RXDCTL(0), "RXDCTL"},
461 {IXGBE_RDBAL(0), "RDBAL"},
462 {IXGBE_RDBAH(0), "RDBAH"},
463
464
465 {IXGBE_TDBAL(0), "TDBAL"},
466 {IXGBE_TDBAH(0), "TDBAH"},
467 {IXGBE_TDLEN(0), "TDLEN"},
468 {IXGBE_TDH(0), "TDH"},
469 {IXGBE_TDT(0), "TDT"},
470 {IXGBE_TXDCTL(0), "TXDCTL"},
471
472
473 { .name = NULL }
474};
475
476
477
478
479
480static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
481{
482 int i = 0, j = 0;
483 char rname[16];
484 u32 regs[64];
485
486 switch (reginfo->ofs) {
487 case IXGBE_SRRCTL(0):
488 for (i = 0; i < 64; i++)
489 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
490 break;
491 case IXGBE_DCA_RXCTRL(0):
492 for (i = 0; i < 64; i++)
493 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
494 break;
495 case IXGBE_RDLEN(0):
496 for (i = 0; i < 64; i++)
497 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
498 break;
499 case IXGBE_RDH(0):
500 for (i = 0; i < 64; i++)
501 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
502 break;
503 case IXGBE_RDT(0):
504 for (i = 0; i < 64; i++)
505 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
506 break;
507 case IXGBE_RXDCTL(0):
508 for (i = 0; i < 64; i++)
509 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
510 break;
511 case IXGBE_RDBAL(0):
512 for (i = 0; i < 64; i++)
513 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
514 break;
515 case IXGBE_RDBAH(0):
516 for (i = 0; i < 64; i++)
517 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
518 break;
519 case IXGBE_TDBAL(0):
520 for (i = 0; i < 64; i++)
521 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
522 break;
523 case IXGBE_TDBAH(0):
524 for (i = 0; i < 64; i++)
525 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
526 break;
527 case IXGBE_TDLEN(0):
528 for (i = 0; i < 64; i++)
529 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
530 break;
531 case IXGBE_TDH(0):
532 for (i = 0; i < 64; i++)
533 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
534 break;
535 case IXGBE_TDT(0):
536 for (i = 0; i < 64; i++)
537 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
538 break;
539 case IXGBE_TXDCTL(0):
540 for (i = 0; i < 64; i++)
541 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
542 break;
543 default:
544 pr_info("%-15s %08x\n", reginfo->name,
545 IXGBE_READ_REG(hw, reginfo->ofs));
546 return;
547 }
548
549 for (i = 0; i < 8; i++) {
550 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
551 pr_err("%-15s", rname);
552 for (j = 0; j < 8; j++)
553 pr_cont(" %08x", regs[i*8+j]);
554 pr_cont("\n");
555 }
556
557}
558
559
560
561
562static void ixgbe_dump(struct ixgbe_adapter *adapter)
563{
564 struct net_device *netdev = adapter->netdev;
565 struct ixgbe_hw *hw = &adapter->hw;
566 struct ixgbe_reg_info *reginfo;
567 int n = 0;
568 struct ixgbe_ring *tx_ring;
569 struct ixgbe_tx_buffer *tx_buffer;
570 union ixgbe_adv_tx_desc *tx_desc;
571 struct my_u0 { u64 a; u64 b; } *u0;
572 struct ixgbe_ring *rx_ring;
573 union ixgbe_adv_rx_desc *rx_desc;
574 struct ixgbe_rx_buffer *rx_buffer_info;
575 u32 staterr;
576 int i = 0;
577
578 if (!netif_msg_hw(adapter))
579 return;
580
581
582 if (netdev) {
583 dev_info(&adapter->pdev->dev, "Net device Info\n");
584 pr_info("Device Name state "
585 "trans_start last_rx\n");
586 pr_info("%-15s %016lX %016lX %016lX\n",
587 netdev->name,
588 netdev->state,
589 netdev->trans_start,
590 netdev->last_rx);
591 }
592
593
594 dev_info(&adapter->pdev->dev, "Register Dump\n");
595 pr_info(" Register Name Value\n");
596 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
597 reginfo->name; reginfo++) {
598 ixgbe_regdump(hw, reginfo);
599 }
600
601
602 if (!netdev || !netif_running(netdev))
603 return;
604
605 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
606 pr_info(" %s %s %s %s\n",
607 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
608 "leng", "ntw", "timestamp");
609 for (n = 0; n < adapter->num_tx_queues; n++) {
610 tx_ring = adapter->tx_ring[n];
611 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
612 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
613 n, tx_ring->next_to_use, tx_ring->next_to_clean,
614 (u64)dma_unmap_addr(tx_buffer, dma),
615 dma_unmap_len(tx_buffer, len),
616 tx_buffer->next_to_watch,
617 (u64)tx_buffer->time_stamp);
618 }
619
620
621 if (!netif_msg_tx_done(adapter))
622 goto rx_ring_summary;
623
624 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661 for (n = 0; n < adapter->num_tx_queues; n++) {
662 tx_ring = adapter->tx_ring[n];
663 pr_info("------------------------------------\n");
664 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
665 pr_info("------------------------------------\n");
666 pr_info("%s%s %s %s %s %s\n",
667 "T [desc] [address 63:0 ] ",
668 "[PlPOIdStDDt Ln] [bi->dma ] ",
669 "leng", "ntw", "timestamp", "bi->skb");
670
671 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
672 tx_desc = IXGBE_TX_DESC(tx_ring, i);
673 tx_buffer = &tx_ring->tx_buffer_info[i];
674 u0 = (struct my_u0 *)tx_desc;
675 if (dma_unmap_len(tx_buffer, len) > 0) {
676 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
677 i,
678 le64_to_cpu(u0->a),
679 le64_to_cpu(u0->b),
680 (u64)dma_unmap_addr(tx_buffer, dma),
681 dma_unmap_len(tx_buffer, len),
682 tx_buffer->next_to_watch,
683 (u64)tx_buffer->time_stamp,
684 tx_buffer->skb);
685 if (i == tx_ring->next_to_use &&
686 i == tx_ring->next_to_clean)
687 pr_cont(" NTC/U\n");
688 else if (i == tx_ring->next_to_use)
689 pr_cont(" NTU\n");
690 else if (i == tx_ring->next_to_clean)
691 pr_cont(" NTC\n");
692 else
693 pr_cont("\n");
694
695 if (netif_msg_pktdata(adapter) &&
696 tx_buffer->skb)
697 print_hex_dump(KERN_INFO, "",
698 DUMP_PREFIX_ADDRESS, 16, 1,
699 tx_buffer->skb->data,
700 dma_unmap_len(tx_buffer, len),
701 true);
702 }
703 }
704 }
705
706
707rx_ring_summary:
708 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
709 pr_info("Queue [NTU] [NTC]\n");
710 for (n = 0; n < adapter->num_rx_queues; n++) {
711 rx_ring = adapter->rx_ring[n];
712 pr_info("%5d %5X %5X\n",
713 n, rx_ring->next_to_use, rx_ring->next_to_clean);
714 }
715
716
717 if (!netif_msg_rx_status(adapter))
718 return;
719
720 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767 for (n = 0; n < adapter->num_rx_queues; n++) {
768 rx_ring = adapter->rx_ring[n];
769 pr_info("------------------------------------\n");
770 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
771 pr_info("------------------------------------\n");
772 pr_info("%s%s%s",
773 "R [desc] [ PktBuf A0] ",
774 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
775 "<-- Adv Rx Read format\n");
776 pr_info("%s%s%s",
777 "RWB[desc] [PcsmIpSHl PtRs] ",
778 "[vl er S cks ln] ---------------- [bi->skb ] ",
779 "<-- Adv Rx Write-Back format\n");
780
781 for (i = 0; i < rx_ring->count; i++) {
782 rx_buffer_info = &rx_ring->rx_buffer_info[i];
783 rx_desc = IXGBE_RX_DESC(rx_ring, i);
784 u0 = (struct my_u0 *)rx_desc;
785 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
786 if (staterr & IXGBE_RXD_STAT_DD) {
787
788 pr_info("RWB[0x%03X] %016llX "
789 "%016llX ---------------- %p", i,
790 le64_to_cpu(u0->a),
791 le64_to_cpu(u0->b),
792 rx_buffer_info->skb);
793 } else {
794 pr_info("R [0x%03X] %016llX "
795 "%016llX %016llX %p", i,
796 le64_to_cpu(u0->a),
797 le64_to_cpu(u0->b),
798 (u64)rx_buffer_info->dma,
799 rx_buffer_info->skb);
800
801 if (netif_msg_pktdata(adapter) &&
802 rx_buffer_info->dma) {
803 print_hex_dump(KERN_INFO, "",
804 DUMP_PREFIX_ADDRESS, 16, 1,
805 page_address(rx_buffer_info->page) +
806 rx_buffer_info->page_offset,
807 ixgbe_rx_bufsz(rx_ring), true);
808 }
809 }
810
811 if (i == rx_ring->next_to_use)
812 pr_cont(" NTU\n");
813 else if (i == rx_ring->next_to_clean)
814 pr_cont(" NTC\n");
815 else
816 pr_cont("\n");
817
818 }
819 }
820}
821
822static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
823{
824 u32 ctrl_ext;
825
826
827 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
828 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
829 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
830}
831
832static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
833{
834 u32 ctrl_ext;
835
836
837 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
838 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
839 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
840}
841
842
843
844
845
846
847
848
849
850static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
851 u8 queue, u8 msix_vector)
852{
853 u32 ivar, index;
854 struct ixgbe_hw *hw = &adapter->hw;
855 switch (hw->mac.type) {
856 case ixgbe_mac_82598EB:
857 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
858 if (direction == -1)
859 direction = 0;
860 index = (((direction * 64) + queue) >> 2) & 0x1F;
861 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
862 ivar &= ~(0xFF << (8 * (queue & 0x3)));
863 ivar |= (msix_vector << (8 * (queue & 0x3)));
864 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
865 break;
866 case ixgbe_mac_82599EB:
867 case ixgbe_mac_X540:
868 case ixgbe_mac_X550:
869 case ixgbe_mac_X550EM_x:
870 if (direction == -1) {
871
872 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
873 index = ((queue & 1) * 8);
874 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
875 ivar &= ~(0xFF << index);
876 ivar |= (msix_vector << index);
877 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
878 break;
879 } else {
880
881 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
882 index = ((16 * (queue & 1)) + (8 * direction));
883 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
884 ivar &= ~(0xFF << index);
885 ivar |= (msix_vector << index);
886 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
887 break;
888 }
889 default:
890 break;
891 }
892}
893
894static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
895 u64 qmask)
896{
897 u32 mask;
898
899 switch (adapter->hw.mac.type) {
900 case ixgbe_mac_82598EB:
901 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
903 break;
904 case ixgbe_mac_82599EB:
905 case ixgbe_mac_X540:
906 case ixgbe_mac_X550:
907 case ixgbe_mac_X550EM_x:
908 mask = (qmask & 0xFFFFFFFF);
909 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
910 mask = (qmask >> 32);
911 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
912 break;
913 default:
914 break;
915 }
916}
917
918void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
919 struct ixgbe_tx_buffer *tx_buffer)
920{
921 if (tx_buffer->skb) {
922 dev_kfree_skb_any(tx_buffer->skb);
923 if (dma_unmap_len(tx_buffer, len))
924 dma_unmap_single(ring->dev,
925 dma_unmap_addr(tx_buffer, dma),
926 dma_unmap_len(tx_buffer, len),
927 DMA_TO_DEVICE);
928 } else if (dma_unmap_len(tx_buffer, len)) {
929 dma_unmap_page(ring->dev,
930 dma_unmap_addr(tx_buffer, dma),
931 dma_unmap_len(tx_buffer, len),
932 DMA_TO_DEVICE);
933 }
934 tx_buffer->next_to_watch = NULL;
935 tx_buffer->skb = NULL;
936 dma_unmap_len_set(tx_buffer, len, 0);
937
938}
939
940static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
941{
942 struct ixgbe_hw *hw = &adapter->hw;
943 struct ixgbe_hw_stats *hwstats = &adapter->stats;
944 int i;
945 u32 data;
946
947 if ((hw->fc.current_mode != ixgbe_fc_full) &&
948 (hw->fc.current_mode != ixgbe_fc_rx_pause))
949 return;
950
951 switch (hw->mac.type) {
952 case ixgbe_mac_82598EB:
953 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
954 break;
955 default:
956 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
957 }
958 hwstats->lxoffrxc += data;
959
960
961 if (!data)
962 return;
963
964 for (i = 0; i < adapter->num_tx_queues; i++)
965 clear_bit(__IXGBE_HANG_CHECK_ARMED,
966 &adapter->tx_ring[i]->state);
967}
968
969static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
970{
971 struct ixgbe_hw *hw = &adapter->hw;
972 struct ixgbe_hw_stats *hwstats = &adapter->stats;
973 u32 xoff[8] = {0};
974 u8 tc;
975 int i;
976 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
977
978 if (adapter->ixgbe_ieee_pfc)
979 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
980
981 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
982 ixgbe_update_xoff_rx_lfc(adapter);
983 return;
984 }
985
986
987 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
988 u32 pxoffrxc;
989
990 switch (hw->mac.type) {
991 case ixgbe_mac_82598EB:
992 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
993 break;
994 default:
995 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
996 }
997 hwstats->pxoffrxc[i] += pxoffrxc;
998
999 tc = netdev_get_prio_tc_map(adapter->netdev, i);
1000 xoff[tc] += pxoffrxc;
1001 }
1002
1003
1004 for (i = 0; i < adapter->num_tx_queues; i++) {
1005 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
1006
1007 tc = tx_ring->dcb_tc;
1008 if (xoff[tc])
1009 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1010 }
1011}
1012
1013static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1014{
1015 return ring->stats.packets;
1016}
1017
1018static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1019{
1020 struct ixgbe_adapter *adapter;
1021 struct ixgbe_hw *hw;
1022 u32 head, tail;
1023
1024 if (ring->l2_accel_priv)
1025 adapter = ring->l2_accel_priv->real_adapter;
1026 else
1027 adapter = netdev_priv(ring->netdev);
1028
1029 hw = &adapter->hw;
1030 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
1031 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
1032
1033 if (head != tail)
1034 return (head < tail) ?
1035 tail - head : (tail + ring->count - head);
1036
1037 return 0;
1038}
1039
1040static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1041{
1042 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1043 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1044 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1045
1046 clear_check_for_tx_hang(tx_ring);
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 if (tx_done_old == tx_done && tx_pending)
1061
1062 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1063 &tx_ring->state);
1064
1065 tx_ring->tx_stats.tx_done_old = tx_done;
1066
1067 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1068
1069 return false;
1070}
1071
1072
1073
1074
1075
1076static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1077{
1078
1079
1080 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1081 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
1082 e_warn(drv, "initiating reset due to tx timeout\n");
1083 ixgbe_service_event_schedule(adapter);
1084 }
1085}
1086
1087
1088
1089
1090
1091
1092static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1093 struct ixgbe_ring *tx_ring)
1094{
1095 struct ixgbe_adapter *adapter = q_vector->adapter;
1096 struct ixgbe_tx_buffer *tx_buffer;
1097 union ixgbe_adv_tx_desc *tx_desc;
1098 unsigned int total_bytes = 0, total_packets = 0;
1099 unsigned int budget = q_vector->tx.work_limit;
1100 unsigned int i = tx_ring->next_to_clean;
1101
1102 if (test_bit(__IXGBE_DOWN, &adapter->state))
1103 return true;
1104
1105 tx_buffer = &tx_ring->tx_buffer_info[i];
1106 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1107 i -= tx_ring->count;
1108
1109 do {
1110 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1111
1112
1113 if (!eop_desc)
1114 break;
1115
1116
1117 read_barrier_depends();
1118
1119
1120 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1121 break;
1122
1123
1124 tx_buffer->next_to_watch = NULL;
1125
1126
1127 total_bytes += tx_buffer->bytecount;
1128 total_packets += tx_buffer->gso_segs;
1129
1130
1131 dev_consume_skb_any(tx_buffer->skb);
1132
1133
1134 dma_unmap_single(tx_ring->dev,
1135 dma_unmap_addr(tx_buffer, dma),
1136 dma_unmap_len(tx_buffer, len),
1137 DMA_TO_DEVICE);
1138
1139
1140 tx_buffer->skb = NULL;
1141 dma_unmap_len_set(tx_buffer, len, 0);
1142
1143
1144 while (tx_desc != eop_desc) {
1145 tx_buffer++;
1146 tx_desc++;
1147 i++;
1148 if (unlikely(!i)) {
1149 i -= tx_ring->count;
1150 tx_buffer = tx_ring->tx_buffer_info;
1151 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1152 }
1153
1154
1155 if (dma_unmap_len(tx_buffer, len)) {
1156 dma_unmap_page(tx_ring->dev,
1157 dma_unmap_addr(tx_buffer, dma),
1158 dma_unmap_len(tx_buffer, len),
1159 DMA_TO_DEVICE);
1160 dma_unmap_len_set(tx_buffer, len, 0);
1161 }
1162 }
1163
1164
1165 tx_buffer++;
1166 tx_desc++;
1167 i++;
1168 if (unlikely(!i)) {
1169 i -= tx_ring->count;
1170 tx_buffer = tx_ring->tx_buffer_info;
1171 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1172 }
1173
1174
1175 prefetch(tx_desc);
1176
1177
1178 budget--;
1179 } while (likely(budget));
1180
1181 i += tx_ring->count;
1182 tx_ring->next_to_clean = i;
1183 u64_stats_update_begin(&tx_ring->syncp);
1184 tx_ring->stats.bytes += total_bytes;
1185 tx_ring->stats.packets += total_packets;
1186 u64_stats_update_end(&tx_ring->syncp);
1187 q_vector->tx.total_bytes += total_bytes;
1188 q_vector->tx.total_packets += total_packets;
1189
1190 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1191
1192 struct ixgbe_hw *hw = &adapter->hw;
1193 e_err(drv, "Detected Tx Unit Hang\n"
1194 " Tx Queue <%d>\n"
1195 " TDH, TDT <%x>, <%x>\n"
1196 " next_to_use <%x>\n"
1197 " next_to_clean <%x>\n"
1198 "tx_buffer_info[next_to_clean]\n"
1199 " time_stamp <%lx>\n"
1200 " jiffies <%lx>\n",
1201 tx_ring->queue_index,
1202 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1203 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1204 tx_ring->next_to_use, i,
1205 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1206
1207 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1208
1209 e_info(probe,
1210 "tx hang %d detected on queue %d, resetting adapter\n",
1211 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1212
1213
1214 ixgbe_tx_timeout_reset(adapter);
1215
1216
1217 return true;
1218 }
1219
1220 netdev_tx_completed_queue(txring_txq(tx_ring),
1221 total_packets, total_bytes);
1222
1223#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1224 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1225 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1226
1227
1228
1229 smp_mb();
1230 if (__netif_subqueue_stopped(tx_ring->netdev,
1231 tx_ring->queue_index)
1232 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1233 netif_wake_subqueue(tx_ring->netdev,
1234 tx_ring->queue_index);
1235 ++tx_ring->tx_stats.restart_queue;
1236 }
1237 }
1238
1239 return !!budget;
1240}
1241
1242#ifdef CONFIG_IXGBE_DCA
1243static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1244 struct ixgbe_ring *tx_ring,
1245 int cpu)
1246{
1247 struct ixgbe_hw *hw = &adapter->hw;
1248 u32 txctrl = 0;
1249 u16 reg_offset;
1250
1251 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1252 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1253
1254 switch (hw->mac.type) {
1255 case ixgbe_mac_82598EB:
1256 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1257 break;
1258 case ixgbe_mac_82599EB:
1259 case ixgbe_mac_X540:
1260 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1261 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1262 break;
1263 default:
1264
1265 return;
1266 }
1267
1268
1269
1270
1271
1272
1273 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1274 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1275 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1276
1277 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1278}
1279
1280static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1281 struct ixgbe_ring *rx_ring,
1282 int cpu)
1283{
1284 struct ixgbe_hw *hw = &adapter->hw;
1285 u32 rxctrl = 0;
1286 u8 reg_idx = rx_ring->reg_idx;
1287
1288 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1289 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1290
1291 switch (hw->mac.type) {
1292 case ixgbe_mac_82599EB:
1293 case ixgbe_mac_X540:
1294 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1295 break;
1296 default:
1297 break;
1298 }
1299
1300
1301
1302
1303
1304
1305 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1306 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
1307 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1308
1309 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1310}
1311
1312static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1313{
1314 struct ixgbe_adapter *adapter = q_vector->adapter;
1315 struct ixgbe_ring *ring;
1316 int cpu = get_cpu();
1317
1318 if (q_vector->cpu == cpu)
1319 goto out_no_update;
1320
1321 ixgbe_for_each_ring(ring, q_vector->tx)
1322 ixgbe_update_tx_dca(adapter, ring, cpu);
1323
1324 ixgbe_for_each_ring(ring, q_vector->rx)
1325 ixgbe_update_rx_dca(adapter, ring, cpu);
1326
1327 q_vector->cpu = cpu;
1328out_no_update:
1329 put_cpu();
1330}
1331
1332static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1333{
1334 int i;
1335
1336
1337 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1338 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1339 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1340 else
1341 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1342 IXGBE_DCA_CTRL_DCA_DISABLE);
1343
1344 for (i = 0; i < adapter->num_q_vectors; i++) {
1345 adapter->q_vector[i]->cpu = -1;
1346 ixgbe_update_dca(adapter->q_vector[i]);
1347 }
1348}
1349
1350static int __ixgbe_notify_dca(struct device *dev, void *data)
1351{
1352 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1353 unsigned long event = *(unsigned long *)data;
1354
1355 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1356 return 0;
1357
1358 switch (event) {
1359 case DCA_PROVIDER_ADD:
1360
1361 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1362 break;
1363 if (dca_add_requester(dev) == 0) {
1364 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1365 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1366 IXGBE_DCA_CTRL_DCA_MODE_CB2);
1367 break;
1368 }
1369
1370 case DCA_PROVIDER_REMOVE:
1371 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1372 dca_remove_requester(dev);
1373 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1374 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
1375 IXGBE_DCA_CTRL_DCA_DISABLE);
1376 }
1377 break;
1378 }
1379
1380 return 0;
1381}
1382
1383#endif
1384
1385#define IXGBE_RSS_L4_TYPES_MASK \
1386 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
1387 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
1388 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
1389 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
1390
1391static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1392 union ixgbe_adv_rx_desc *rx_desc,
1393 struct sk_buff *skb)
1394{
1395 u16 rss_type;
1396
1397 if (!(ring->netdev->features & NETIF_F_RXHASH))
1398 return;
1399
1400 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
1401 IXGBE_RXDADV_RSSTYPE_MASK;
1402
1403 if (!rss_type)
1404 return;
1405
1406 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1407 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
1408 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
1409}
1410
1411#ifdef IXGBE_FCOE
1412
1413
1414
1415
1416
1417
1418
1419static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1420 union ixgbe_adv_rx_desc *rx_desc)
1421{
1422 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1423
1424 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1425 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1426 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1427 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1428}
1429
1430#endif
1431
1432
1433
1434
1435
1436
1437static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1438 union ixgbe_adv_rx_desc *rx_desc,
1439 struct sk_buff *skb)
1440{
1441 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1442 __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1443 bool encap_pkt = false;
1444
1445 skb_checksum_none_assert(skb);
1446
1447
1448 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1449 return;
1450
1451 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1452 (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1453 encap_pkt = true;
1454 skb->encapsulation = 1;
1455 }
1456
1457
1458 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1459 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1460 ring->rx_stats.csum_err++;
1461 return;
1462 }
1463
1464 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1465 return;
1466
1467 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1468
1469
1470
1471
1472 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1473 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1474 return;
1475
1476 ring->rx_stats.csum_err++;
1477 return;
1478 }
1479
1480
1481 skb->ip_summed = CHECKSUM_UNNECESSARY;
1482 if (encap_pkt) {
1483 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1484 return;
1485
1486 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1487 ring->rx_stats.csum_err++;
1488 return;
1489 }
1490
1491 skb->csum_level = 1;
1492 }
1493}
1494
1495static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1496 struct ixgbe_rx_buffer *bi)
1497{
1498 struct page *page = bi->page;
1499 dma_addr_t dma;
1500
1501
1502 if (likely(page))
1503 return true;
1504
1505
1506 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1507 if (unlikely(!page)) {
1508 rx_ring->rx_stats.alloc_rx_page_failed++;
1509 return false;
1510 }
1511
1512
1513 dma = dma_map_page(rx_ring->dev, page, 0,
1514 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1515
1516
1517
1518
1519
1520 if (dma_mapping_error(rx_ring->dev, dma)) {
1521 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1522
1523 rx_ring->rx_stats.alloc_rx_page_failed++;
1524 return false;
1525 }
1526
1527 bi->dma = dma;
1528 bi->page = page;
1529 bi->page_offset = 0;
1530
1531 return true;
1532}
1533
1534
1535
1536
1537
1538
1539void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1540{
1541 union ixgbe_adv_rx_desc *rx_desc;
1542 struct ixgbe_rx_buffer *bi;
1543 u16 i = rx_ring->next_to_use;
1544
1545
1546 if (!cleaned_count)
1547 return;
1548
1549 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1550 bi = &rx_ring->rx_buffer_info[i];
1551 i -= rx_ring->count;
1552
1553 do {
1554 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1555 break;
1556
1557
1558
1559
1560
1561 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1562
1563 rx_desc++;
1564 bi++;
1565 i++;
1566 if (unlikely(!i)) {
1567 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1568 bi = rx_ring->rx_buffer_info;
1569 i -= rx_ring->count;
1570 }
1571
1572
1573 rx_desc->wb.upper.status_error = 0;
1574
1575 cleaned_count--;
1576 } while (cleaned_count);
1577
1578 i += rx_ring->count;
1579
1580 if (rx_ring->next_to_use != i) {
1581 rx_ring->next_to_use = i;
1582
1583
1584 rx_ring->next_to_alloc = i;
1585
1586
1587
1588
1589
1590
1591 wmb();
1592 writel(i, rx_ring->tail);
1593 }
1594}
1595
1596static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1597 struct sk_buff *skb)
1598{
1599 u16 hdr_len = skb_headlen(skb);
1600
1601
1602 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1603 IXGBE_CB(skb)->append_cnt);
1604 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1605}
1606
1607static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1608 struct sk_buff *skb)
1609{
1610
1611 if (!IXGBE_CB(skb)->append_cnt)
1612 return;
1613
1614 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1615 rx_ring->rx_stats.rsc_flush++;
1616
1617 ixgbe_set_rsc_gso_size(rx_ring, skb);
1618
1619
1620 IXGBE_CB(skb)->append_cnt = 0;
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1634 union ixgbe_adv_rx_desc *rx_desc,
1635 struct sk_buff *skb)
1636{
1637 struct net_device *dev = rx_ring->netdev;
1638
1639 ixgbe_update_rsc_stats(rx_ring, skb);
1640
1641 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1642
1643 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1644
1645 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1646 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
1647
1648 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1649 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1650 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1651 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1652 }
1653
1654 skb_record_rx_queue(skb, rx_ring->queue_index);
1655
1656 skb->protocol = eth_type_trans(skb, dev);
1657}
1658
1659static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1660 struct sk_buff *skb)
1661{
1662 if (ixgbe_qv_busy_polling(q_vector))
1663 netif_receive_skb(skb);
1664 else
1665 napi_gro_receive(&q_vector->napi, skb);
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1680 union ixgbe_adv_rx_desc *rx_desc,
1681 struct sk_buff *skb)
1682{
1683 u32 ntc = rx_ring->next_to_clean + 1;
1684
1685
1686 ntc = (ntc < rx_ring->count) ? ntc : 0;
1687 rx_ring->next_to_clean = ntc;
1688
1689 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1690
1691
1692 if (ring_is_rsc_enabled(rx_ring)) {
1693 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1694 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1695
1696 if (unlikely(rsc_enabled)) {
1697 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1698
1699 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1700 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1701
1702
1703 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1704 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1705 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1706 }
1707 }
1708
1709
1710 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1711 return false;
1712
1713
1714 rx_ring->rx_buffer_info[ntc].skb = skb;
1715 rx_ring->rx_stats.non_eop_descs++;
1716
1717 return true;
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1733 struct sk_buff *skb)
1734{
1735 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1736 unsigned char *va;
1737 unsigned int pull_len;
1738
1739
1740
1741
1742
1743
1744 va = skb_frag_address(frag);
1745
1746
1747
1748
1749
1750 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1751
1752
1753 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1754
1755
1756 skb_frag_size_sub(frag, pull_len);
1757 frag->page_offset += pull_len;
1758 skb->data_len -= pull_len;
1759 skb->tail += pull_len;
1760}
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1773 struct sk_buff *skb)
1774{
1775
1776 if (unlikely(IXGBE_CB(skb)->page_released)) {
1777 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1778 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1779 IXGBE_CB(skb)->page_released = false;
1780 } else {
1781 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1782
1783 dma_sync_single_range_for_cpu(rx_ring->dev,
1784 IXGBE_CB(skb)->dma,
1785 frag->page_offset,
1786 ixgbe_rx_bufsz(rx_ring),
1787 DMA_FROM_DEVICE);
1788 }
1789 IXGBE_CB(skb)->dma = 0;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1811 union ixgbe_adv_rx_desc *rx_desc,
1812 struct sk_buff *skb)
1813{
1814 struct net_device *netdev = rx_ring->netdev;
1815
1816
1817 if (unlikely(ixgbe_test_staterr(rx_desc,
1818 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1819 !(netdev->features & NETIF_F_RXALL))) {
1820 dev_kfree_skb_any(skb);
1821 return true;
1822 }
1823
1824
1825 if (skb_is_nonlinear(skb))
1826 ixgbe_pull_tail(rx_ring, skb);
1827
1828#ifdef IXGBE_FCOE
1829
1830 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1831 return false;
1832
1833#endif
1834
1835 if (eth_skb_pad(skb))
1836 return true;
1837
1838 return false;
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1849 struct ixgbe_rx_buffer *old_buff)
1850{
1851 struct ixgbe_rx_buffer *new_buff;
1852 u16 nta = rx_ring->next_to_alloc;
1853
1854 new_buff = &rx_ring->rx_buffer_info[nta];
1855
1856
1857 nta++;
1858 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1859
1860
1861 *new_buff = *old_buff;
1862
1863
1864 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1865 new_buff->page_offset,
1866 ixgbe_rx_bufsz(rx_ring),
1867 DMA_FROM_DEVICE);
1868}
1869
1870static inline bool ixgbe_page_is_reserved(struct page *page)
1871{
1872 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1873}
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1891 struct ixgbe_rx_buffer *rx_buffer,
1892 union ixgbe_adv_rx_desc *rx_desc,
1893 struct sk_buff *skb)
1894{
1895 struct page *page = rx_buffer->page;
1896 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1897#if (PAGE_SIZE < 8192)
1898 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1899#else
1900 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1901 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1902 ixgbe_rx_bufsz(rx_ring);
1903#endif
1904
1905 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1906 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1907
1908 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1909
1910
1911 if (likely(!ixgbe_page_is_reserved(page)))
1912 return true;
1913
1914
1915 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1916 return false;
1917 }
1918
1919 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1920 rx_buffer->page_offset, size, truesize);
1921
1922
1923 if (unlikely(ixgbe_page_is_reserved(page)))
1924 return false;
1925
1926#if (PAGE_SIZE < 8192)
1927
1928 if (unlikely(page_count(page) != 1))
1929 return false;
1930
1931
1932 rx_buffer->page_offset ^= truesize;
1933#else
1934
1935 rx_buffer->page_offset += truesize;
1936
1937 if (rx_buffer->page_offset > last_offset)
1938 return false;
1939#endif
1940
1941
1942
1943
1944 atomic_inc(&page->_count);
1945
1946 return true;
1947}
1948
1949static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1950 union ixgbe_adv_rx_desc *rx_desc)
1951{
1952 struct ixgbe_rx_buffer *rx_buffer;
1953 struct sk_buff *skb;
1954 struct page *page;
1955
1956 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1957 page = rx_buffer->page;
1958 prefetchw(page);
1959
1960 skb = rx_buffer->skb;
1961
1962 if (likely(!skb)) {
1963 void *page_addr = page_address(page) +
1964 rx_buffer->page_offset;
1965
1966
1967 prefetch(page_addr);
1968#if L1_CACHE_BYTES < 128
1969 prefetch(page_addr + L1_CACHE_BYTES);
1970#endif
1971
1972
1973 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1974 IXGBE_RX_HDR_SIZE);
1975 if (unlikely(!skb)) {
1976 rx_ring->rx_stats.alloc_rx_buff_failed++;
1977 return NULL;
1978 }
1979
1980
1981
1982
1983
1984
1985 prefetchw(skb->data);
1986
1987
1988
1989
1990
1991
1992
1993 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1994 goto dma_sync;
1995
1996 IXGBE_CB(skb)->dma = rx_buffer->dma;
1997 } else {
1998 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1999 ixgbe_dma_sync_frag(rx_ring, skb);
2000
2001dma_sync:
2002
2003 dma_sync_single_range_for_cpu(rx_ring->dev,
2004 rx_buffer->dma,
2005 rx_buffer->page_offset,
2006 ixgbe_rx_bufsz(rx_ring),
2007 DMA_FROM_DEVICE);
2008
2009 rx_buffer->skb = NULL;
2010 }
2011
2012
2013 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
2014
2015 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
2016 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
2017
2018 IXGBE_CB(skb)->page_released = true;
2019 } else {
2020
2021 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
2022 ixgbe_rx_pg_size(rx_ring),
2023 DMA_FROM_DEVICE);
2024 }
2025
2026
2027 rx_buffer->page = NULL;
2028
2029 return skb;
2030}
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2046 struct ixgbe_ring *rx_ring,
2047 const int budget)
2048{
2049 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2050#ifdef IXGBE_FCOE
2051 struct ixgbe_adapter *adapter = q_vector->adapter;
2052 int ddp_bytes;
2053 unsigned int mss = 0;
2054#endif
2055 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2056
2057 while (likely(total_rx_packets < budget)) {
2058 union ixgbe_adv_rx_desc *rx_desc;
2059 struct sk_buff *skb;
2060
2061
2062 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2063 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2064 cleaned_count = 0;
2065 }
2066
2067 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2068
2069 if (!rx_desc->wb.upper.status_error)
2070 break;
2071
2072
2073
2074
2075
2076 dma_rmb();
2077
2078
2079 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
2080
2081
2082 if (!skb)
2083 break;
2084
2085 cleaned_count++;
2086
2087
2088 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2089 continue;
2090
2091
2092 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2093 continue;
2094
2095
2096 total_rx_bytes += skb->len;
2097
2098
2099 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2100
2101#ifdef IXGBE_FCOE
2102
2103 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2104 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2105
2106 if (ddp_bytes > 0) {
2107 if (!mss) {
2108 mss = rx_ring->netdev->mtu -
2109 sizeof(struct fcoe_hdr) -
2110 sizeof(struct fc_frame_header) -
2111 sizeof(struct fcoe_crc_eof);
2112 if (mss > 512)
2113 mss &= ~511;
2114 }
2115 total_rx_bytes += ddp_bytes;
2116 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2117 mss);
2118 }
2119 if (!ddp_bytes) {
2120 dev_kfree_skb_any(skb);
2121 continue;
2122 }
2123 }
2124
2125#endif
2126 skb_mark_napi_id(skb, &q_vector->napi);
2127 ixgbe_rx_skb(q_vector, skb);
2128
2129
2130 total_rx_packets++;
2131 }
2132
2133 u64_stats_update_begin(&rx_ring->syncp);
2134 rx_ring->stats.packets += total_rx_packets;
2135 rx_ring->stats.bytes += total_rx_bytes;
2136 u64_stats_update_end(&rx_ring->syncp);
2137 q_vector->rx.total_packets += total_rx_packets;
2138 q_vector->rx.total_bytes += total_rx_bytes;
2139
2140 return total_rx_packets;
2141}
2142
2143#ifdef CONFIG_NET_RX_BUSY_POLL
2144
2145static int ixgbe_low_latency_recv(struct napi_struct *napi)
2146{
2147 struct ixgbe_q_vector *q_vector =
2148 container_of(napi, struct ixgbe_q_vector, napi);
2149 struct ixgbe_adapter *adapter = q_vector->adapter;
2150 struct ixgbe_ring *ring;
2151 int found = 0;
2152
2153 if (test_bit(__IXGBE_DOWN, &adapter->state))
2154 return LL_FLUSH_FAILED;
2155
2156 if (!ixgbe_qv_lock_poll(q_vector))
2157 return LL_FLUSH_BUSY;
2158
2159 ixgbe_for_each_ring(ring, q_vector->rx) {
2160 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2161#ifdef BP_EXTENDED_STATS
2162 if (found)
2163 ring->stats.cleaned += found;
2164 else
2165 ring->stats.misses++;
2166#endif
2167 if (found)
2168 break;
2169 }
2170
2171 ixgbe_qv_unlock_poll(q_vector);
2172
2173 return found;
2174}
2175#endif
2176
2177
2178
2179
2180
2181
2182
2183
2184static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2185{
2186 struct ixgbe_q_vector *q_vector;
2187 int v_idx;
2188 u32 mask;
2189
2190
2191 if (adapter->num_vfs > 32) {
2192 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2193 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2194 }
2195
2196
2197
2198
2199
2200 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2201 struct ixgbe_ring *ring;
2202 q_vector = adapter->q_vector[v_idx];
2203
2204 ixgbe_for_each_ring(ring, q_vector->rx)
2205 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2206
2207 ixgbe_for_each_ring(ring, q_vector->tx)
2208 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2209
2210 ixgbe_write_eitr(q_vector);
2211 }
2212
2213 switch (adapter->hw.mac.type) {
2214 case ixgbe_mac_82598EB:
2215 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2216 v_idx);
2217 break;
2218 case ixgbe_mac_82599EB:
2219 case ixgbe_mac_X540:
2220 case ixgbe_mac_X550:
2221 case ixgbe_mac_X550EM_x:
2222 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2223 break;
2224 default:
2225 break;
2226 }
2227 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2228
2229
2230 mask = IXGBE_EIMS_ENABLE_MASK;
2231 mask &= ~(IXGBE_EIMS_OTHER |
2232 IXGBE_EIMS_MAILBOX |
2233 IXGBE_EIMS_LSC);
2234
2235 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2236}
2237
2238enum latency_range {
2239 lowest_latency = 0,
2240 low_latency = 1,
2241 bulk_latency = 2,
2242 latency_invalid = 255
2243};
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2261 struct ixgbe_ring_container *ring_container)
2262{
2263 int bytes = ring_container->total_bytes;
2264 int packets = ring_container->total_packets;
2265 u32 timepassed_us;
2266 u64 bytes_perint;
2267 u8 itr_setting = ring_container->itr;
2268
2269 if (packets == 0)
2270 return;
2271
2272
2273
2274
2275
2276
2277
2278 timepassed_us = q_vector->itr >> 2;
2279 if (timepassed_us == 0)
2280 return;
2281
2282 bytes_perint = bytes / timepassed_us;
2283
2284 switch (itr_setting) {
2285 case lowest_latency:
2286 if (bytes_perint > 10)
2287 itr_setting = low_latency;
2288 break;
2289 case low_latency:
2290 if (bytes_perint > 20)
2291 itr_setting = bulk_latency;
2292 else if (bytes_perint <= 10)
2293 itr_setting = lowest_latency;
2294 break;
2295 case bulk_latency:
2296 if (bytes_perint <= 20)
2297 itr_setting = low_latency;
2298 break;
2299 }
2300
2301
2302 ring_container->total_bytes = 0;
2303 ring_container->total_packets = 0;
2304
2305
2306 ring_container->itr = itr_setting;
2307}
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2318{
2319 struct ixgbe_adapter *adapter = q_vector->adapter;
2320 struct ixgbe_hw *hw = &adapter->hw;
2321 int v_idx = q_vector->v_idx;
2322 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2323
2324 switch (adapter->hw.mac.type) {
2325 case ixgbe_mac_82598EB:
2326
2327 itr_reg |= (itr_reg << 16);
2328 break;
2329 case ixgbe_mac_82599EB:
2330 case ixgbe_mac_X540:
2331 case ixgbe_mac_X550:
2332 case ixgbe_mac_X550EM_x:
2333
2334
2335
2336
2337 itr_reg |= IXGBE_EITR_CNT_WDIS;
2338 break;
2339 default:
2340 break;
2341 }
2342 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2343}
2344
2345static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2346{
2347 u32 new_itr = q_vector->itr;
2348 u8 current_itr;
2349
2350 ixgbe_update_itr(q_vector, &q_vector->tx);
2351 ixgbe_update_itr(q_vector, &q_vector->rx);
2352
2353 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2354
2355 switch (current_itr) {
2356
2357 case lowest_latency:
2358 new_itr = IXGBE_100K_ITR;
2359 break;
2360 case low_latency:
2361 new_itr = IXGBE_20K_ITR;
2362 break;
2363 case bulk_latency:
2364 new_itr = IXGBE_12K_ITR;
2365 break;
2366 default:
2367 break;
2368 }
2369
2370 if (new_itr != q_vector->itr) {
2371
2372 new_itr = (10 * new_itr * q_vector->itr) /
2373 ((9 * new_itr) + q_vector->itr);
2374
2375
2376 q_vector->itr = new_itr;
2377
2378 ixgbe_write_eitr(q_vector);
2379 }
2380}
2381
2382
2383
2384
2385
2386static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2387{
2388 struct ixgbe_hw *hw = &adapter->hw;
2389 u32 eicr = adapter->interrupt_event;
2390
2391 if (test_bit(__IXGBE_DOWN, &adapter->state))
2392 return;
2393
2394 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2395 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2396 return;
2397
2398 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2399
2400 switch (hw->device_id) {
2401 case IXGBE_DEV_ID_82599_T3_LOM:
2402
2403
2404
2405
2406
2407
2408
2409 if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
2410 !(eicr & IXGBE_EICR_LSC))
2411 return;
2412
2413 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2414 u32 speed;
2415 bool link_up = false;
2416
2417 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2418
2419 if (link_up)
2420 return;
2421 }
2422
2423
2424 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2425 return;
2426
2427 break;
2428 default:
2429 if (adapter->hw.mac.type >= ixgbe_mac_X540)
2430 return;
2431 if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
2432 return;
2433 break;
2434 }
2435 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2436
2437 adapter->interrupt_event = 0;
2438}
2439
2440static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2441{
2442 struct ixgbe_hw *hw = &adapter->hw;
2443
2444 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2445 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2446 e_crit(probe, "Fan has stopped, replace the adapter\n");
2447
2448 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2449 }
2450}
2451
2452static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2453{
2454 struct ixgbe_hw *hw = &adapter->hw;
2455
2456 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2457 return;
2458
2459 switch (adapter->hw.mac.type) {
2460 case ixgbe_mac_82599EB:
2461
2462
2463
2464
2465 if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
2466 (eicr & IXGBE_EICR_LSC)) &&
2467 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2468 adapter->interrupt_event = eicr;
2469 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2470 ixgbe_service_event_schedule(adapter);
2471 return;
2472 }
2473 return;
2474 case ixgbe_mac_X540:
2475 if (!(eicr & IXGBE_EICR_TS))
2476 return;
2477 break;
2478 default:
2479 return;
2480 }
2481
2482 e_crit(drv, "%s\n", ixgbe_overheat_msg);
2483}
2484
2485static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2486{
2487 switch (hw->mac.type) {
2488 case ixgbe_mac_82598EB:
2489 if (hw->phy.type == ixgbe_phy_nl)
2490 return true;
2491 return false;
2492 case ixgbe_mac_82599EB:
2493 case ixgbe_mac_X550EM_x:
2494 switch (hw->mac.ops.get_media_type(hw)) {
2495 case ixgbe_media_type_fiber:
2496 case ixgbe_media_type_fiber_qsfp:
2497 return true;
2498 default:
2499 return false;
2500 }
2501 default:
2502 return false;
2503 }
2504}
2505
2506static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2507{
2508 struct ixgbe_hw *hw = &adapter->hw;
2509 u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
2510
2511 if (!ixgbe_is_sfp(hw))
2512 return;
2513
2514
2515 if (hw->mac.type >= ixgbe_mac_X540)
2516 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2517
2518 if (eicr & eicr_mask) {
2519
2520 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2521 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2522 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2523 adapter->sfp_poll_time = 0;
2524 ixgbe_service_event_schedule(adapter);
2525 }
2526 }
2527
2528 if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
2529 (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
2530
2531 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
2532 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2533 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2534 ixgbe_service_event_schedule(adapter);
2535 }
2536 }
2537}
2538
2539static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2540{
2541 struct ixgbe_hw *hw = &adapter->hw;
2542
2543 adapter->lsc_int++;
2544 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2545 adapter->link_check_timeout = jiffies;
2546 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2547 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2548 IXGBE_WRITE_FLUSH(hw);
2549 ixgbe_service_event_schedule(adapter);
2550 }
2551}
2552
2553static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2554 u64 qmask)
2555{
2556 u32 mask;
2557 struct ixgbe_hw *hw = &adapter->hw;
2558
2559 switch (hw->mac.type) {
2560 case ixgbe_mac_82598EB:
2561 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2562 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2563 break;
2564 case ixgbe_mac_82599EB:
2565 case ixgbe_mac_X540:
2566 case ixgbe_mac_X550:
2567 case ixgbe_mac_X550EM_x:
2568 mask = (qmask & 0xFFFFFFFF);
2569 if (mask)
2570 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2571 mask = (qmask >> 32);
2572 if (mask)
2573 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2574 break;
2575 default:
2576 break;
2577 }
2578
2579}
2580
2581static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2582 u64 qmask)
2583{
2584 u32 mask;
2585 struct ixgbe_hw *hw = &adapter->hw;
2586
2587 switch (hw->mac.type) {
2588 case ixgbe_mac_82598EB:
2589 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2590 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2591 break;
2592 case ixgbe_mac_82599EB:
2593 case ixgbe_mac_X540:
2594 case ixgbe_mac_X550:
2595 case ixgbe_mac_X550EM_x:
2596 mask = (qmask & 0xFFFFFFFF);
2597 if (mask)
2598 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2599 mask = (qmask >> 32);
2600 if (mask)
2601 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2602 break;
2603 default:
2604 break;
2605 }
2606
2607}
2608
2609
2610
2611
2612
2613static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2614 bool flush)
2615{
2616 struct ixgbe_hw *hw = &adapter->hw;
2617 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2618
2619
2620 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2621 mask &= ~IXGBE_EIMS_LSC;
2622
2623 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2624 switch (adapter->hw.mac.type) {
2625 case ixgbe_mac_82599EB:
2626 mask |= IXGBE_EIMS_GPI_SDP0(hw);
2627 break;
2628 case ixgbe_mac_X540:
2629 case ixgbe_mac_X550:
2630 case ixgbe_mac_X550EM_x:
2631 mask |= IXGBE_EIMS_TS;
2632 break;
2633 default:
2634 break;
2635 }
2636 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2637 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2638 switch (adapter->hw.mac.type) {
2639 case ixgbe_mac_82599EB:
2640 mask |= IXGBE_EIMS_GPI_SDP1(hw);
2641 mask |= IXGBE_EIMS_GPI_SDP2(hw);
2642
2643 case ixgbe_mac_X540:
2644 case ixgbe_mac_X550:
2645 case ixgbe_mac_X550EM_x:
2646 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
2647 mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
2648 if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
2649 mask |= IXGBE_EICR_GPI_SDP0_X540;
2650 mask |= IXGBE_EIMS_ECC;
2651 mask |= IXGBE_EIMS_MAILBOX;
2652 break;
2653 default:
2654 break;
2655 }
2656
2657 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2658 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2659 mask |= IXGBE_EIMS_FLOW_DIR;
2660
2661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2662 if (queues)
2663 ixgbe_irq_enable_queues(adapter, ~0);
2664 if (flush)
2665 IXGBE_WRITE_FLUSH(&adapter->hw);
2666}
2667
2668static irqreturn_t ixgbe_msix_other(int irq, void *data)
2669{
2670 struct ixgbe_adapter *adapter = data;
2671 struct ixgbe_hw *hw = &adapter->hw;
2672 u32 eicr;
2673
2674
2675
2676
2677
2678
2679
2680 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2681
2682
2683
2684
2685
2686
2687
2688
2689 eicr &= 0xFFFF0000;
2690
2691 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2692
2693 if (eicr & IXGBE_EICR_LSC)
2694 ixgbe_check_lsc(adapter);
2695
2696 if (eicr & IXGBE_EICR_MAILBOX)
2697 ixgbe_msg_task(adapter);
2698
2699 switch (hw->mac.type) {
2700 case ixgbe_mac_82599EB:
2701 case ixgbe_mac_X540:
2702 case ixgbe_mac_X550:
2703 case ixgbe_mac_X550EM_x:
2704 if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
2705 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2706 adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
2707 ixgbe_service_event_schedule(adapter);
2708 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2709 IXGBE_EICR_GPI_SDP0_X540);
2710 }
2711 if (eicr & IXGBE_EICR_ECC) {
2712 e_info(link, "Received ECC Err, initiating reset\n");
2713 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2714 ixgbe_service_event_schedule(adapter);
2715 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2716 }
2717
2718 if (eicr & IXGBE_EICR_FLOW_DIR) {
2719 int reinit_count = 0;
2720 int i;
2721 for (i = 0; i < adapter->num_tx_queues; i++) {
2722 struct ixgbe_ring *ring = adapter->tx_ring[i];
2723 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2724 &ring->state))
2725 reinit_count++;
2726 }
2727 if (reinit_count) {
2728
2729 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2730 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2731 ixgbe_service_event_schedule(adapter);
2732 }
2733 }
2734 ixgbe_check_sfp_event(adapter, eicr);
2735 ixgbe_check_overtemp_event(adapter, eicr);
2736 break;
2737 default:
2738 break;
2739 }
2740
2741 ixgbe_check_fan_failure(adapter, eicr);
2742
2743 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2744 ixgbe_ptp_check_pps_event(adapter, eicr);
2745
2746
2747 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2748 ixgbe_irq_enable(adapter, false, false);
2749
2750 return IRQ_HANDLED;
2751}
2752
2753static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2754{
2755 struct ixgbe_q_vector *q_vector = data;
2756
2757
2758
2759 if (q_vector->rx.ring || q_vector->tx.ring)
2760 napi_schedule(&q_vector->napi);
2761
2762 return IRQ_HANDLED;
2763}
2764
2765
2766
2767
2768
2769
2770
2771
2772int ixgbe_poll(struct napi_struct *napi, int budget)
2773{
2774 struct ixgbe_q_vector *q_vector =
2775 container_of(napi, struct ixgbe_q_vector, napi);
2776 struct ixgbe_adapter *adapter = q_vector->adapter;
2777 struct ixgbe_ring *ring;
2778 int per_ring_budget, work_done = 0;
2779 bool clean_complete = true;
2780
2781#ifdef CONFIG_IXGBE_DCA
2782 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2783 ixgbe_update_dca(q_vector);
2784#endif
2785
2786 ixgbe_for_each_ring(ring, q_vector->tx)
2787 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2788
2789 if (!ixgbe_qv_lock_napi(q_vector))
2790 return budget;
2791
2792
2793
2794 if (q_vector->rx.count > 1)
2795 per_ring_budget = max(budget/q_vector->rx.count, 1);
2796 else
2797 per_ring_budget = budget;
2798
2799 ixgbe_for_each_ring(ring, q_vector->rx) {
2800 int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
2801 per_ring_budget);
2802
2803 work_done += cleaned;
2804 clean_complete &= (cleaned < per_ring_budget);
2805 }
2806
2807 ixgbe_qv_unlock_napi(q_vector);
2808
2809 if (!clean_complete)
2810 return budget;
2811
2812
2813 napi_complete_done(napi, work_done);
2814 if (adapter->rx_itr_setting & 1)
2815 ixgbe_set_itr(q_vector);
2816 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2817 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2818
2819 return 0;
2820}
2821
2822
2823
2824
2825
2826
2827
2828
2829static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2830{
2831 struct net_device *netdev = adapter->netdev;
2832 int vector, err;
2833 int ri = 0, ti = 0;
2834
2835 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2836 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2837 struct msix_entry *entry = &adapter->msix_entries[vector];
2838
2839 if (q_vector->tx.ring && q_vector->rx.ring) {
2840 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2841 "%s-%s-%d", netdev->name, "TxRx", ri++);
2842 ti++;
2843 } else if (q_vector->rx.ring) {
2844 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2845 "%s-%s-%d", netdev->name, "rx", ri++);
2846 } else if (q_vector->tx.ring) {
2847 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2848 "%s-%s-%d", netdev->name, "tx", ti++);
2849 } else {
2850
2851 continue;
2852 }
2853 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2854 q_vector->name, q_vector);
2855 if (err) {
2856 e_err(probe, "request_irq failed for MSIX interrupt "
2857 "Error: %d\n", err);
2858 goto free_queue_irqs;
2859 }
2860
2861 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2862
2863 irq_set_affinity_hint(entry->vector,
2864 &q_vector->affinity_mask);
2865 }
2866 }
2867
2868 err = request_irq(adapter->msix_entries[vector].vector,
2869 ixgbe_msix_other, 0, netdev->name, adapter);
2870 if (err) {
2871 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2872 goto free_queue_irqs;
2873 }
2874
2875 return 0;
2876
2877free_queue_irqs:
2878 while (vector) {
2879 vector--;
2880 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2881 NULL);
2882 free_irq(adapter->msix_entries[vector].vector,
2883 adapter->q_vector[vector]);
2884 }
2885 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2886 pci_disable_msix(adapter->pdev);
2887 kfree(adapter->msix_entries);
2888 adapter->msix_entries = NULL;
2889 return err;
2890}
2891
2892
2893
2894
2895
2896
2897static irqreturn_t ixgbe_intr(int irq, void *data)
2898{
2899 struct ixgbe_adapter *adapter = data;
2900 struct ixgbe_hw *hw = &adapter->hw;
2901 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2902 u32 eicr;
2903
2904
2905
2906
2907
2908 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2909
2910
2911
2912 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2913 if (!eicr) {
2914
2915
2916
2917
2918
2919
2920
2921 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2922 ixgbe_irq_enable(adapter, true, true);
2923 return IRQ_NONE;
2924 }
2925
2926 if (eicr & IXGBE_EICR_LSC)
2927 ixgbe_check_lsc(adapter);
2928
2929 switch (hw->mac.type) {
2930 case ixgbe_mac_82599EB:
2931 ixgbe_check_sfp_event(adapter, eicr);
2932
2933 case ixgbe_mac_X540:
2934 case ixgbe_mac_X550:
2935 case ixgbe_mac_X550EM_x:
2936 if (eicr & IXGBE_EICR_ECC) {
2937 e_info(link, "Received ECC Err, initiating reset\n");
2938 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2939 ixgbe_service_event_schedule(adapter);
2940 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2941 }
2942 ixgbe_check_overtemp_event(adapter, eicr);
2943 break;
2944 default:
2945 break;
2946 }
2947
2948 ixgbe_check_fan_failure(adapter, eicr);
2949 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2950 ixgbe_ptp_check_pps_event(adapter, eicr);
2951
2952
2953 napi_schedule(&q_vector->napi);
2954
2955
2956
2957
2958
2959 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2960 ixgbe_irq_enable(adapter, false, false);
2961
2962 return IRQ_HANDLED;
2963}
2964
2965
2966
2967
2968
2969
2970
2971
2972static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2973{
2974 struct net_device *netdev = adapter->netdev;
2975 int err;
2976
2977 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2978 err = ixgbe_request_msix_irqs(adapter);
2979 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2980 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2981 netdev->name, adapter);
2982 else
2983 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2984 netdev->name, adapter);
2985
2986 if (err)
2987 e_err(probe, "request_irq failed, Error %d\n", err);
2988
2989 return err;
2990}
2991
2992static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2993{
2994 int vector;
2995
2996 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2997 free_irq(adapter->pdev->irq, adapter);
2998 return;
2999 }
3000
3001 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
3002 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
3003 struct msix_entry *entry = &adapter->msix_entries[vector];
3004
3005
3006 if (!q_vector->rx.ring && !q_vector->tx.ring)
3007 continue;
3008
3009
3010 irq_set_affinity_hint(entry->vector, NULL);
3011
3012 free_irq(entry->vector, q_vector);
3013 }
3014
3015 free_irq(adapter->msix_entries[vector++].vector, adapter);
3016}
3017
3018
3019
3020
3021
3022static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
3023{
3024 switch (adapter->hw.mac.type) {
3025 case ixgbe_mac_82598EB:
3026 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3027 break;
3028 case ixgbe_mac_82599EB:
3029 case ixgbe_mac_X540:
3030 case ixgbe_mac_X550:
3031 case ixgbe_mac_X550EM_x:
3032 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3033 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3034 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3035 break;
3036 default:
3037 break;
3038 }
3039 IXGBE_WRITE_FLUSH(&adapter->hw);
3040 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3041 int vector;
3042
3043 for (vector = 0; vector < adapter->num_q_vectors; vector++)
3044 synchronize_irq(adapter->msix_entries[vector].vector);
3045
3046 synchronize_irq(adapter->msix_entries[vector++].vector);
3047 } else {
3048 synchronize_irq(adapter->pdev->irq);
3049 }
3050}
3051
3052
3053
3054
3055
3056static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
3057{
3058 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
3059
3060 ixgbe_write_eitr(q_vector);
3061
3062 ixgbe_set_ivar(adapter, 0, 0, 0);
3063 ixgbe_set_ivar(adapter, 1, 0, 0);
3064
3065 e_info(hw, "Legacy interrupt IVAR setup done\n");
3066}
3067
3068
3069
3070
3071
3072
3073
3074
3075void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
3076 struct ixgbe_ring *ring)
3077{
3078 struct ixgbe_hw *hw = &adapter->hw;
3079 u64 tdba = ring->dma;
3080 int wait_loop = 10;
3081 u32 txdctl = IXGBE_TXDCTL_ENABLE;
3082 u8 reg_idx = ring->reg_idx;
3083
3084
3085 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3086 IXGBE_WRITE_FLUSH(hw);
3087
3088 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3089 (tdba & DMA_BIT_MASK(32)));
3090 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3091 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3092 ring->count * sizeof(union ixgbe_adv_tx_desc));
3093 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3094 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3095 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3108 txdctl |= (1 << 16);
3109 else
3110 txdctl |= (8 << 16);
3111
3112
3113
3114
3115
3116 txdctl |= (1 << 8) |
3117 32;
3118
3119
3120 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3121 ring->atr_sample_rate = adapter->atr_sample_rate;
3122 ring->atr_count = 0;
3123 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3124 } else {
3125 ring->atr_sample_rate = 0;
3126 }
3127
3128
3129 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3130 struct ixgbe_q_vector *q_vector = ring->q_vector;
3131
3132 if (q_vector)
3133 netif_set_xps_queue(ring->netdev,
3134 &q_vector->affinity_mask,
3135 ring->queue_index);
3136 }
3137
3138 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3139
3140
3141 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3142
3143
3144 if (hw->mac.type == ixgbe_mac_82598EB &&
3145 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3146 return;
3147
3148
3149 do {
3150 usleep_range(1000, 2000);
3151 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3152 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3153 if (!wait_loop)
3154 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
3155}
3156
3157static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3158{
3159 struct ixgbe_hw *hw = &adapter->hw;
3160 u32 rttdcs, mtqc;
3161 u8 tcs = netdev_get_num_tc(adapter->netdev);
3162
3163 if (hw->mac.type == ixgbe_mac_82598EB)
3164 return;
3165
3166
3167 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3168 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3169 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3170
3171
3172 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3173 mtqc = IXGBE_MTQC_VT_ENA;
3174 if (tcs > 4)
3175 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3176 else if (tcs > 1)
3177 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3178 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3179 mtqc |= IXGBE_MTQC_32VF;
3180 else
3181 mtqc |= IXGBE_MTQC_64VF;
3182 } else {
3183 if (tcs > 4)
3184 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3185 else if (tcs > 1)
3186 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3187 else
3188 mtqc = IXGBE_MTQC_64Q_1PB;
3189 }
3190
3191 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3192
3193
3194 if (tcs) {
3195 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3196 sectx |= IXGBE_SECTX_DCB;
3197 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3198 }
3199
3200
3201 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3202 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3203}
3204
3205
3206
3207
3208
3209
3210
3211static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3212{
3213 struct ixgbe_hw *hw = &adapter->hw;
3214 u32 dmatxctl;
3215 u32 i;
3216
3217 ixgbe_setup_mtqc(adapter);
3218
3219 if (hw->mac.type != ixgbe_mac_82598EB) {
3220
3221 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3222 dmatxctl |= IXGBE_DMATXCTL_TE;
3223 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3224 }
3225
3226
3227 for (i = 0; i < adapter->num_tx_queues; i++)
3228 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3229}
3230
3231static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3232 struct ixgbe_ring *ring)
3233{
3234 struct ixgbe_hw *hw = &adapter->hw;
3235 u8 reg_idx = ring->reg_idx;
3236 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3237
3238 srrctl |= IXGBE_SRRCTL_DROP_EN;
3239
3240 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3241}
3242
3243static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3244 struct ixgbe_ring *ring)
3245{
3246 struct ixgbe_hw *hw = &adapter->hw;
3247 u8 reg_idx = ring->reg_idx;
3248 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3249
3250 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3251
3252 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3253}
3254
3255#ifdef CONFIG_IXGBE_DCB
3256void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3257#else
3258static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3259#endif
3260{
3261 int i;
3262 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3263
3264 if (adapter->ixgbe_ieee_pfc)
3265 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3277 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3278 for (i = 0; i < adapter->num_rx_queues; i++)
3279 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3280 } else {
3281 for (i = 0; i < adapter->num_rx_queues; i++)
3282 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3283 }
3284}
3285
3286#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3287
3288static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3289 struct ixgbe_ring *rx_ring)
3290{
3291 struct ixgbe_hw *hw = &adapter->hw;
3292 u32 srrctl;
3293 u8 reg_idx = rx_ring->reg_idx;
3294
3295 if (hw->mac.type == ixgbe_mac_82598EB) {
3296 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3297
3298
3299
3300
3301
3302 reg_idx &= mask;
3303 }
3304
3305
3306 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3307
3308
3309 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3310
3311
3312 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3313
3314 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3315}
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3327{
3328 if (adapter->hw.mac.type < ixgbe_mac_X550)
3329 return 128;
3330 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3331 return 64;
3332 else
3333 return 512;
3334}
3335
3336
3337
3338
3339
3340
3341
3342
3343void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3344{
3345 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3346 struct ixgbe_hw *hw = &adapter->hw;
3347 u32 reta = 0;
3348 u32 indices_multi;
3349 u8 *indir_tbl = adapter->rss_indir_tbl;
3350
3351
3352
3353
3354
3355
3356
3357 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3358 indices_multi = 0x11;
3359 else
3360 indices_multi = 0x1;
3361
3362
3363 for (i = 0; i < reta_entries; i++) {
3364 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3365 if ((i & 3) == 3) {
3366 if (i < 128)
3367 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3368 else
3369 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3370 reta);
3371 reta = 0;
3372 }
3373 }
3374}
3375
3376
3377
3378
3379
3380
3381
3382
3383static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3384{
3385 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3386 struct ixgbe_hw *hw = &adapter->hw;
3387 u32 vfreta = 0;
3388 unsigned int pf_pool = adapter->num_vfs;
3389
3390
3391 for (i = 0; i < reta_entries; i++) {
3392 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3393 if ((i & 3) == 3) {
3394 IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3395 vfreta);
3396 vfreta = 0;
3397 }
3398 }
3399}
3400
3401static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3402{
3403 struct ixgbe_hw *hw = &adapter->hw;
3404 u32 i, j;
3405 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3406 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3407
3408
3409
3410
3411
3412 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3413 rss_i = 2;
3414
3415
3416 for (i = 0; i < 10; i++)
3417 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3418
3419
3420 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3421
3422 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3423 if (j == rss_i)
3424 j = 0;
3425
3426 adapter->rss_indir_tbl[i] = j;
3427 }
3428
3429 ixgbe_store_reta(adapter);
3430}
3431
3432static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3433{
3434 struct ixgbe_hw *hw = &adapter->hw;
3435 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3436 unsigned int pf_pool = adapter->num_vfs;
3437 int i, j;
3438
3439
3440 for (i = 0; i < 10; i++)
3441 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
3442 adapter->rss_key[i]);
3443
3444
3445 for (i = 0, j = 0; i < 64; i++, j++) {
3446 if (j == rss_i)
3447 j = 0;
3448
3449 adapter->rss_indir_tbl[i] = j;
3450 }
3451
3452 ixgbe_store_vfreta(adapter);
3453}
3454
3455static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3456{
3457 struct ixgbe_hw *hw = &adapter->hw;
3458 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3459 u32 rxcsum;
3460
3461
3462 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3463 rxcsum |= IXGBE_RXCSUM_PCSD;
3464 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3465
3466 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3467 if (adapter->ring_feature[RING_F_RSS].mask)
3468 mrqc = IXGBE_MRQC_RSSEN;
3469 } else {
3470 u8 tcs = netdev_get_num_tc(adapter->netdev);
3471
3472 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3473 if (tcs > 4)
3474 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3475 else if (tcs > 1)
3476 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3477 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3478 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3479 else
3480 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3481 } else {
3482 if (tcs > 4)
3483 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3484 else if (tcs > 1)
3485 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3486 else
3487 mrqc = IXGBE_MRQC_RSSEN;
3488 }
3489 }
3490
3491
3492 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3493 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3494 IXGBE_MRQC_RSS_FIELD_IPV6 |
3495 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3496
3497 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3498 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3499 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3500 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3501
3502 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
3503 if ((hw->mac.type >= ixgbe_mac_X550) &&
3504 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3505 unsigned int pf_pool = adapter->num_vfs;
3506
3507
3508 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3509 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3510
3511
3512 ixgbe_setup_vfreta(adapter);
3513 vfmrqc = IXGBE_MRQC_RSSEN;
3514 vfmrqc |= rss_field;
3515 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3516 } else {
3517 ixgbe_setup_reta(adapter);
3518 mrqc |= rss_field;
3519 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3520 }
3521}
3522
3523
3524
3525
3526
3527
3528static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3529 struct ixgbe_ring *ring)
3530{
3531 struct ixgbe_hw *hw = &adapter->hw;
3532 u32 rscctrl;
3533 u8 reg_idx = ring->reg_idx;
3534
3535 if (!ring_is_rsc_enabled(ring))
3536 return;
3537
3538 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3539 rscctrl |= IXGBE_RSCCTL_RSCEN;
3540
3541
3542
3543
3544
3545 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3546 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3547}
3548
3549#define IXGBE_MAX_RX_DESC_POLL 10
3550static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3551 struct ixgbe_ring *ring)
3552{
3553 struct ixgbe_hw *hw = &adapter->hw;
3554 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3555 u32 rxdctl;
3556 u8 reg_idx = ring->reg_idx;
3557
3558 if (ixgbe_removed(hw->hw_addr))
3559 return;
3560
3561 if (hw->mac.type == ixgbe_mac_82598EB &&
3562 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3563 return;
3564
3565 do {
3566 usleep_range(1000, 2000);
3567 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3568 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3569
3570 if (!wait_loop) {
3571 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3572 "the polling period\n", reg_idx);
3573 }
3574}
3575
3576void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3577 struct ixgbe_ring *ring)
3578{
3579 struct ixgbe_hw *hw = &adapter->hw;
3580 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3581 u32 rxdctl;
3582 u8 reg_idx = ring->reg_idx;
3583
3584 if (ixgbe_removed(hw->hw_addr))
3585 return;
3586 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3587 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3588
3589
3590 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3591
3592 if (hw->mac.type == ixgbe_mac_82598EB &&
3593 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3594 return;
3595
3596
3597 do {
3598 udelay(10);
3599 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3600 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3601
3602 if (!wait_loop) {
3603 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3604 "the polling period\n", reg_idx);
3605 }
3606}
3607
3608void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3609 struct ixgbe_ring *ring)
3610{
3611 struct ixgbe_hw *hw = &adapter->hw;
3612 u64 rdba = ring->dma;
3613 u32 rxdctl;
3614 u8 reg_idx = ring->reg_idx;
3615
3616
3617 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3618 ixgbe_disable_rx_queue(adapter, ring);
3619
3620 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3621 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3622 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3623 ring->count * sizeof(union ixgbe_adv_rx_desc));
3624 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3625 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3626 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
3627
3628 ixgbe_configure_srrctl(adapter, ring);
3629 ixgbe_configure_rscctl(adapter, ring);
3630
3631 if (hw->mac.type == ixgbe_mac_82598EB) {
3632
3633
3634
3635
3636
3637
3638
3639 rxdctl &= ~0x3FFFFF;
3640 rxdctl |= 0x080420;
3641 }
3642
3643
3644 rxdctl |= IXGBE_RXDCTL_ENABLE;
3645 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3646
3647 ixgbe_rx_desc_queue_enable(adapter, ring);
3648 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3649}
3650
3651static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3652{
3653 struct ixgbe_hw *hw = &adapter->hw;
3654 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3655 u16 pool;
3656
3657
3658 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3659 IXGBE_PSRTYPE_UDPHDR |
3660 IXGBE_PSRTYPE_IPV4HDR |
3661 IXGBE_PSRTYPE_L2HDR |
3662 IXGBE_PSRTYPE_IPV6HDR;
3663
3664 if (hw->mac.type == ixgbe_mac_82598EB)
3665 return;
3666
3667 if (rss_i > 3)
3668 psrtype |= 2 << 29;
3669 else if (rss_i > 1)
3670 psrtype |= 1 << 29;
3671
3672 for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3673 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3674}
3675
3676static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3677{
3678 struct ixgbe_hw *hw = &adapter->hw;
3679 u32 reg_offset, vf_shift;
3680 u32 gcr_ext, vmdctl;
3681 int i;
3682
3683 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3684 return;
3685
3686 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3687 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3688 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3689 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3690 vmdctl |= IXGBE_VT_CTL_REPLEN;
3691 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3692
3693 vf_shift = VMDQ_P(0) % 32;
3694 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3695
3696
3697 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3698 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3699 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3700 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3701 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
3702 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3703
3704
3705 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3706
3707
3708
3709
3710
3711 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3712 case IXGBE_82599_VMDQ_8Q_MASK:
3713 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3714 break;
3715 case IXGBE_82599_VMDQ_4Q_MASK:
3716 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3717 break;
3718 default:
3719 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3720 break;
3721 }
3722
3723 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3724
3725
3726
3727 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3728 adapter->num_vfs);
3729
3730
3731
3732
3733 if (hw->mac.ops.set_ethertype_anti_spoofing) {
3734 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3735 (IXGBE_ETQF_FILTER_EN |
3736 IXGBE_ETQF_TX_ANTISPOOF |
3737 IXGBE_ETH_P_LLDP));
3738
3739 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
3740 (IXGBE_ETQF_FILTER_EN |
3741 IXGBE_ETQF_TX_ANTISPOOF |
3742 ETH_P_PAUSE));
3743 }
3744
3745
3746 for (i = 0; i < adapter->num_vfs; i++) {
3747 if (!adapter->vfinfo[i].spoofchk_enabled)
3748 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3749
3750
3751 if (hw->mac.ops.set_ethertype_anti_spoofing)
3752 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3753
3754
3755 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
3756 adapter->vfinfo[i].rss_query_enabled);
3757 }
3758}
3759
3760static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3761{
3762 struct ixgbe_hw *hw = &adapter->hw;
3763 struct net_device *netdev = adapter->netdev;
3764 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3765 struct ixgbe_ring *rx_ring;
3766 int i;
3767 u32 mhadd, hlreg0;
3768
3769#ifdef IXGBE_FCOE
3770
3771 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3772 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3773 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3774
3775#endif
3776
3777
3778 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3779 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3780
3781 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3782 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3783 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3784 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3785
3786 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3787 }
3788
3789 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3790
3791 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3792 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3793
3794
3795
3796
3797
3798 for (i = 0; i < adapter->num_rx_queues; i++) {
3799 rx_ring = adapter->rx_ring[i];
3800 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3801 set_ring_rsc_enabled(rx_ring);
3802 else
3803 clear_ring_rsc_enabled(rx_ring);
3804 }
3805}
3806
3807static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3808{
3809 struct ixgbe_hw *hw = &adapter->hw;
3810 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3811
3812 switch (hw->mac.type) {
3813 case ixgbe_mac_82598EB:
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3825 break;
3826 case ixgbe_mac_X550:
3827 case ixgbe_mac_X550EM_x:
3828 if (adapter->num_vfs)
3829 rdrxctl |= IXGBE_RDRXCTL_PSP;
3830
3831 case ixgbe_mac_82599EB:
3832 case ixgbe_mac_X540:
3833
3834 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3835 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3836 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3837
3838 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3839 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3840 break;
3841 default:
3842
3843 return;
3844 }
3845
3846 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3847}
3848
3849
3850
3851
3852
3853
3854
3855static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3856{
3857 struct ixgbe_hw *hw = &adapter->hw;
3858 int i;
3859 u32 rxctrl, rfctl;
3860
3861
3862 hw->mac.ops.disable_rx(hw);
3863
3864 ixgbe_setup_psrtype(adapter);
3865 ixgbe_setup_rdrxctl(adapter);
3866
3867
3868 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3869 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3870 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3871 rfctl |= IXGBE_RFCTL_RSC_DIS;
3872 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3873
3874
3875 ixgbe_setup_mrqc(adapter);
3876
3877
3878 ixgbe_set_rx_buffer_len(adapter);
3879
3880
3881
3882
3883
3884 for (i = 0; i < adapter->num_rx_queues; i++)
3885 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3886
3887 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3888
3889 if (hw->mac.type == ixgbe_mac_82598EB)
3890 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3891
3892
3893 rxctrl |= IXGBE_RXCTRL_RXEN;
3894 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3895}
3896
3897static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3898 __be16 proto, u16 vid)
3899{
3900 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3901 struct ixgbe_hw *hw = &adapter->hw;
3902
3903
3904 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3905 set_bit(vid, adapter->active_vlans);
3906
3907 return 0;
3908}
3909
3910static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3911 __be16 proto, u16 vid)
3912{
3913 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3914 struct ixgbe_hw *hw = &adapter->hw;
3915
3916
3917 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3918 clear_bit(vid, adapter->active_vlans);
3919
3920 return 0;
3921}
3922
3923
3924
3925
3926
3927static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3928{
3929 struct ixgbe_hw *hw = &adapter->hw;
3930 u32 vlnctrl;
3931 int i, j;
3932
3933 switch (hw->mac.type) {
3934 case ixgbe_mac_82598EB:
3935 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3936 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3937 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3938 break;
3939 case ixgbe_mac_82599EB:
3940 case ixgbe_mac_X540:
3941 case ixgbe_mac_X550:
3942 case ixgbe_mac_X550EM_x:
3943 for (i = 0; i < adapter->num_rx_queues; i++) {
3944 struct ixgbe_ring *ring = adapter->rx_ring[i];
3945
3946 if (ring->l2_accel_priv)
3947 continue;
3948 j = ring->reg_idx;
3949 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3950 vlnctrl &= ~IXGBE_RXDCTL_VME;
3951 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3952 }
3953 break;
3954 default:
3955 break;
3956 }
3957}
3958
3959
3960
3961
3962
3963static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3964{
3965 struct ixgbe_hw *hw = &adapter->hw;
3966 u32 vlnctrl;
3967 int i, j;
3968
3969 switch (hw->mac.type) {
3970 case ixgbe_mac_82598EB:
3971 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3972 vlnctrl |= IXGBE_VLNCTRL_VME;
3973 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3974 break;
3975 case ixgbe_mac_82599EB:
3976 case ixgbe_mac_X540:
3977 case ixgbe_mac_X550:
3978 case ixgbe_mac_X550EM_x:
3979 for (i = 0; i < adapter->num_rx_queues; i++) {
3980 struct ixgbe_ring *ring = adapter->rx_ring[i];
3981
3982 if (ring->l2_accel_priv)
3983 continue;
3984 j = ring->reg_idx;
3985 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3986 vlnctrl |= IXGBE_RXDCTL_VME;
3987 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3988 }
3989 break;
3990 default:
3991 break;
3992 }
3993}
3994
3995static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3996{
3997 u16 vid;
3998
3999 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
4000
4001 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4002 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4003}
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014static int ixgbe_write_mc_addr_list(struct net_device *netdev)
4015{
4016 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4017 struct ixgbe_hw *hw = &adapter->hw;
4018
4019 if (!netif_running(netdev))
4020 return 0;
4021
4022 if (hw->mac.ops.update_mc_addr_list)
4023 hw->mac.ops.update_mc_addr_list(hw, netdev);
4024 else
4025 return -ENOMEM;
4026
4027#ifdef CONFIG_PCI_IOV
4028 ixgbe_restore_vf_multicasts(adapter);
4029#endif
4030
4031 return netdev_mc_count(netdev);
4032}
4033
4034#ifdef CONFIG_PCI_IOV
4035void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
4036{
4037 struct ixgbe_hw *hw = &adapter->hw;
4038 int i;
4039 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4040 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
4041 hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
4042 adapter->mac_table[i].queue,
4043 IXGBE_RAH_AV);
4044 else
4045 hw->mac.ops.clear_rar(hw, i);
4046
4047 adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
4048 }
4049}
4050#endif
4051
4052static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
4053{
4054 struct ixgbe_hw *hw = &adapter->hw;
4055 int i;
4056 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4057 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
4058 if (adapter->mac_table[i].state &
4059 IXGBE_MAC_STATE_IN_USE)
4060 hw->mac.ops.set_rar(hw, i,
4061 adapter->mac_table[i].addr,
4062 adapter->mac_table[i].queue,
4063 IXGBE_RAH_AV);
4064 else
4065 hw->mac.ops.clear_rar(hw, i);
4066
4067 adapter->mac_table[i].state &=
4068 ~(IXGBE_MAC_STATE_MODIFIED);
4069 }
4070 }
4071}
4072
4073static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
4074{
4075 int i;
4076 struct ixgbe_hw *hw = &adapter->hw;
4077
4078 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4079 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
4080 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
4081 eth_zero_addr(adapter->mac_table[i].addr);
4082 adapter->mac_table[i].queue = 0;
4083 }
4084 ixgbe_sync_mac_table(adapter);
4085}
4086
4087static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
4088{
4089 struct ixgbe_hw *hw = &adapter->hw;
4090 int i, count = 0;
4091
4092 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4093 if (adapter->mac_table[i].state == 0)
4094 count++;
4095 }
4096 return count;
4097}
4098
4099
4100static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
4101 u8 *addr)
4102{
4103 struct ixgbe_hw *hw = &adapter->hw;
4104
4105 memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
4106 adapter->mac_table[0].queue = VMDQ_P(0);
4107 adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
4108 IXGBE_MAC_STATE_IN_USE);
4109 hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
4110 adapter->mac_table[0].queue,
4111 IXGBE_RAH_AV);
4112}
4113
4114int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
4115{
4116 struct ixgbe_hw *hw = &adapter->hw;
4117 int i;
4118
4119 if (is_zero_ether_addr(addr))
4120 return -EINVAL;
4121
4122 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4123 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
4124 continue;
4125 adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
4126 IXGBE_MAC_STATE_IN_USE);
4127 ether_addr_copy(adapter->mac_table[i].addr, addr);
4128 adapter->mac_table[i].queue = queue;
4129 ixgbe_sync_mac_table(adapter);
4130 return i;
4131 }
4132 return -ENOMEM;
4133}
4134
4135int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
4136{
4137
4138 int i;
4139 struct ixgbe_hw *hw = &adapter->hw;
4140
4141 if (is_zero_ether_addr(addr))
4142 return -EINVAL;
4143
4144 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4145 if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
4146 adapter->mac_table[i].queue == queue) {
4147 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
4148 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
4149 eth_zero_addr(adapter->mac_table[i].addr);
4150 adapter->mac_table[i].queue = 0;
4151 ixgbe_sync_mac_table(adapter);
4152 return 0;
4153 }
4154 }
4155 return -ENOMEM;
4156}
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4167{
4168 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4169 int count = 0;
4170
4171
4172 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
4173 return -ENOMEM;
4174
4175 if (!netdev_uc_empty(netdev)) {
4176 struct netdev_hw_addr *ha;
4177 netdev_for_each_uc_addr(ha, netdev) {
4178 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4179 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4180 count++;
4181 }
4182 }
4183 return count;
4184}
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195void ixgbe_set_rx_mode(struct net_device *netdev)
4196{
4197 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4198 struct ixgbe_hw *hw = &adapter->hw;
4199 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4200 u32 vlnctrl;
4201 int count;
4202
4203
4204 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4205 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4206
4207
4208 fctrl &= ~IXGBE_FCTRL_SBP;
4209 fctrl |= IXGBE_FCTRL_BAM;
4210 fctrl |= IXGBE_FCTRL_DPF;
4211 fctrl |= IXGBE_FCTRL_PMCF;
4212
4213
4214 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4215 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4216 if (netdev->flags & IFF_PROMISC) {
4217 hw->addr_ctrl.user_set_promisc = true;
4218 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4219 vmolr |= IXGBE_VMOLR_MPE;
4220
4221
4222
4223
4224 if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
4225 IXGBE_FLAG_SRIOV_ENABLED))
4226 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4227 } else {
4228 if (netdev->flags & IFF_ALLMULTI) {
4229 fctrl |= IXGBE_FCTRL_MPE;
4230 vmolr |= IXGBE_VMOLR_MPE;
4231 }
4232 vlnctrl |= IXGBE_VLNCTRL_VFE;
4233 hw->addr_ctrl.user_set_promisc = false;
4234 }
4235
4236
4237
4238
4239
4240
4241 count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
4242 if (count < 0) {
4243 fctrl |= IXGBE_FCTRL_UPE;
4244 vmolr |= IXGBE_VMOLR_ROPE;
4245 }
4246
4247
4248
4249
4250
4251 count = ixgbe_write_mc_addr_list(netdev);
4252 if (count < 0) {
4253 fctrl |= IXGBE_FCTRL_MPE;
4254 vmolr |= IXGBE_VMOLR_MPE;
4255 } else if (count) {
4256 vmolr |= IXGBE_VMOLR_ROMPE;
4257 }
4258
4259 if (hw->mac.type != ixgbe_mac_82598EB) {
4260 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4261 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4262 IXGBE_VMOLR_ROPE);
4263 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4264 }
4265
4266
4267 if (adapter->netdev->features & NETIF_F_RXALL) {
4268
4269
4270 fctrl |= (IXGBE_FCTRL_SBP |
4271 IXGBE_FCTRL_BAM |
4272 IXGBE_FCTRL_PMCF);
4273
4274 fctrl &= ~(IXGBE_FCTRL_DPF);
4275
4276 }
4277
4278 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4279 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4280
4281 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
4282 ixgbe_vlan_strip_enable(adapter);
4283 else
4284 ixgbe_vlan_strip_disable(adapter);
4285}
4286
4287static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4288{
4289 int q_idx;
4290
4291 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4292 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
4293 napi_enable(&adapter->q_vector[q_idx]->napi);
4294 }
4295}
4296
4297static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4298{
4299 int q_idx;
4300
4301 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4302 napi_disable(&adapter->q_vector[q_idx]->napi);
4303 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
4304 pr_info("QV %d locked\n", q_idx);
4305 usleep_range(1000, 20000);
4306 }
4307 }
4308}
4309
4310static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
4311{
4312 switch (adapter->hw.mac.type) {
4313 case ixgbe_mac_X550:
4314 case ixgbe_mac_X550EM_x:
4315 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
4316#ifdef CONFIG_IXGBE_VXLAN
4317 adapter->vxlan_port = 0;
4318#endif
4319 break;
4320 default:
4321 break;
4322 }
4323}
4324
4325#ifdef CONFIG_IXGBE_DCB
4326
4327
4328
4329
4330
4331
4332
4333
4334static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
4335{
4336 struct ixgbe_hw *hw = &adapter->hw;
4337 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4338
4339 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
4340 if (hw->mac.type == ixgbe_mac_82598EB)
4341 netif_set_gso_max_size(adapter->netdev, 65536);
4342 return;
4343 }
4344
4345 if (hw->mac.type == ixgbe_mac_82598EB)
4346 netif_set_gso_max_size(adapter->netdev, 32768);
4347
4348#ifdef IXGBE_FCOE
4349 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
4350 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
4351#endif
4352
4353
4354 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
4355 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4356 DCB_TX_CONFIG);
4357 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4358 DCB_RX_CONFIG);
4359 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
4360 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
4361 ixgbe_dcb_hw_ets(&adapter->hw,
4362 adapter->ixgbe_ieee_ets,
4363 max_frame);
4364 ixgbe_dcb_hw_pfc_config(&adapter->hw,
4365 adapter->ixgbe_ieee_pfc->pfc_en,
4366 adapter->ixgbe_ieee_ets->prio_tc);
4367 }
4368
4369
4370 if (hw->mac.type != ixgbe_mac_82598EB) {
4371 u32 msb = 0;
4372 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
4373
4374 while (rss_i) {
4375 msb++;
4376 rss_i >>= 1;
4377 }
4378
4379
4380 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
4381 }
4382}
4383#endif
4384
4385
4386#define IXGBE_ETH_FRAMING 20
4387
4388
4389
4390
4391
4392
4393
4394static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4395{
4396 struct ixgbe_hw *hw = &adapter->hw;
4397 struct net_device *dev = adapter->netdev;
4398 int link, tc, kb, marker;
4399 u32 dv_id, rx_pba;
4400
4401
4402 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
4403
4404#ifdef IXGBE_FCOE
4405
4406 if ((dev->features & NETIF_F_FCOE_MTU) &&
4407 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4408 (pb == ixgbe_fcoe_get_tc(adapter)))
4409 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4410#endif
4411
4412
4413 switch (hw->mac.type) {
4414 case ixgbe_mac_X540:
4415 case ixgbe_mac_X550:
4416 case ixgbe_mac_X550EM_x:
4417 dv_id = IXGBE_DV_X540(link, tc);
4418 break;
4419 default:
4420 dv_id = IXGBE_DV(link, tc);
4421 break;
4422 }
4423
4424
4425 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4426 dv_id += IXGBE_B2BT(tc);
4427
4428
4429 kb = IXGBE_BT2KB(dv_id);
4430 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4431
4432 marker = rx_pba - kb;
4433
4434
4435
4436
4437
4438 if (marker < 0) {
4439 e_warn(drv, "Packet Buffer(%i) can not provide enough"
4440 "headroom to support flow control."
4441 "Decrease MTU or number of traffic classes\n", pb);
4442 marker = tc + 1;
4443 }
4444
4445 return marker;
4446}
4447
4448
4449
4450
4451
4452
4453
4454static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4455{
4456 struct ixgbe_hw *hw = &adapter->hw;
4457 struct net_device *dev = adapter->netdev;
4458 int tc;
4459 u32 dv_id;
4460
4461
4462 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4463
4464#ifdef IXGBE_FCOE
4465
4466 if ((dev->features & NETIF_F_FCOE_MTU) &&
4467 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4468 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4469 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4470#endif
4471
4472
4473 switch (hw->mac.type) {
4474 case ixgbe_mac_X540:
4475 case ixgbe_mac_X550:
4476 case ixgbe_mac_X550EM_x:
4477 dv_id = IXGBE_LOW_DV_X540(tc);
4478 break;
4479 default:
4480 dv_id = IXGBE_LOW_DV(tc);
4481 break;
4482 }
4483
4484
4485 return IXGBE_BT2KB(dv_id);
4486}
4487
4488
4489
4490
4491static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4492{
4493 struct ixgbe_hw *hw = &adapter->hw;
4494 int num_tc = netdev_get_num_tc(adapter->netdev);
4495 int i;
4496
4497 if (!num_tc)
4498 num_tc = 1;
4499
4500 for (i = 0; i < num_tc; i++) {
4501 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4502 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4503
4504
4505 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4506 hw->fc.low_water[i] = 0;
4507 }
4508
4509 for (; i < MAX_TRAFFIC_CLASS; i++)
4510 hw->fc.high_water[i] = 0;
4511}
4512
4513static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4514{
4515 struct ixgbe_hw *hw = &adapter->hw;
4516 int hdrm;
4517 u8 tc = netdev_get_num_tc(adapter->netdev);
4518
4519 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4520 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4521 hdrm = 32 << adapter->fdir_pballoc;
4522 else
4523 hdrm = 0;
4524
4525 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4526 ixgbe_pbthresh_setup(adapter);
4527}
4528
4529static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4530{
4531 struct ixgbe_hw *hw = &adapter->hw;
4532 struct hlist_node *node2;
4533 struct ixgbe_fdir_filter *filter;
4534
4535 spin_lock(&adapter->fdir_perfect_lock);
4536
4537 if (!hlist_empty(&adapter->fdir_filter_list))
4538 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4539
4540 hlist_for_each_entry_safe(filter, node2,
4541 &adapter->fdir_filter_list, fdir_node) {
4542 ixgbe_fdir_write_perfect_filter_82599(hw,
4543 &filter->filter,
4544 filter->sw_idx,
4545 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4546 IXGBE_FDIR_DROP_QUEUE :
4547 adapter->rx_ring[filter->action]->reg_idx);
4548 }
4549
4550 spin_unlock(&adapter->fdir_perfect_lock);
4551}
4552
4553static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4554 struct ixgbe_adapter *adapter)
4555{
4556 struct ixgbe_hw *hw = &adapter->hw;
4557 u32 vmolr;
4558
4559
4560 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4561 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4562
4563
4564 vmolr &= ~IXGBE_VMOLR_MPE;
4565
4566 if (dev->flags & IFF_ALLMULTI) {
4567 vmolr |= IXGBE_VMOLR_MPE;
4568 } else {
4569 vmolr |= IXGBE_VMOLR_ROMPE;
4570 hw->mac.ops.update_mc_addr_list(hw, dev);
4571 }
4572 ixgbe_write_uc_addr_list(adapter->netdev, pool);
4573 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4574}
4575
4576static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4577{
4578 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4579 int rss_i = adapter->num_rx_queues_per_pool;
4580 struct ixgbe_hw *hw = &adapter->hw;
4581 u16 pool = vadapter->pool;
4582 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4583 IXGBE_PSRTYPE_UDPHDR |
4584 IXGBE_PSRTYPE_IPV4HDR |
4585 IXGBE_PSRTYPE_L2HDR |
4586 IXGBE_PSRTYPE_IPV6HDR;
4587
4588 if (hw->mac.type == ixgbe_mac_82598EB)
4589 return;
4590
4591 if (rss_i > 3)
4592 psrtype |= 2 << 29;
4593 else if (rss_i > 1)
4594 psrtype |= 1 << 29;
4595
4596 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4597}
4598
4599
4600
4601
4602
4603static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4604{
4605 struct device *dev = rx_ring->dev;
4606 unsigned long size;
4607 u16 i;
4608
4609
4610 if (!rx_ring->rx_buffer_info)
4611 return;
4612
4613
4614 for (i = 0; i < rx_ring->count; i++) {
4615 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
4616
4617 if (rx_buffer->skb) {
4618 struct sk_buff *skb = rx_buffer->skb;
4619 if (IXGBE_CB(skb)->page_released)
4620 dma_unmap_page(dev,
4621 IXGBE_CB(skb)->dma,
4622 ixgbe_rx_bufsz(rx_ring),
4623 DMA_FROM_DEVICE);
4624 dev_kfree_skb(skb);
4625 rx_buffer->skb = NULL;
4626 }
4627
4628 if (!rx_buffer->page)
4629 continue;
4630
4631 dma_unmap_page(dev, rx_buffer->dma,
4632 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
4633 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
4634
4635 rx_buffer->page = NULL;
4636 }
4637
4638 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4639 memset(rx_ring->rx_buffer_info, 0, size);
4640
4641
4642 memset(rx_ring->desc, 0, rx_ring->size);
4643
4644 rx_ring->next_to_alloc = 0;
4645 rx_ring->next_to_clean = 0;
4646 rx_ring->next_to_use = 0;
4647}
4648
4649static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4650 struct ixgbe_ring *rx_ring)
4651{
4652 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4653 int index = rx_ring->queue_index + vadapter->rx_base_queue;
4654
4655
4656 ixgbe_disable_rx_queue(adapter, rx_ring);
4657 usleep_range(10000, 20000);
4658 ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
4659 ixgbe_clean_rx_ring(rx_ring);
4660 rx_ring->l2_accel_priv = NULL;
4661}
4662
4663static int ixgbe_fwd_ring_down(struct net_device *vdev,
4664 struct ixgbe_fwd_adapter *accel)
4665{
4666 struct ixgbe_adapter *adapter = accel->real_adapter;
4667 unsigned int rxbase = accel->rx_base_queue;
4668 unsigned int txbase = accel->tx_base_queue;
4669 int i;
4670
4671 netif_tx_stop_all_queues(vdev);
4672
4673 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4674 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4675 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
4676 }
4677
4678 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4679 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
4680 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
4681 }
4682
4683
4684 return 0;
4685}
4686
4687static int ixgbe_fwd_ring_up(struct net_device *vdev,
4688 struct ixgbe_fwd_adapter *accel)
4689{
4690 struct ixgbe_adapter *adapter = accel->real_adapter;
4691 unsigned int rxbase, txbase, queues;
4692 int i, baseq, err = 0;
4693
4694 if (!test_bit(accel->pool, &adapter->fwd_bitmask))
4695 return 0;
4696
4697 baseq = accel->pool * adapter->num_rx_queues_per_pool;
4698 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4699 accel->pool, adapter->num_rx_pools,
4700 baseq, baseq + adapter->num_rx_queues_per_pool,
4701 adapter->fwd_bitmask);
4702
4703 accel->netdev = vdev;
4704 accel->rx_base_queue = rxbase = baseq;
4705 accel->tx_base_queue = txbase = baseq;
4706
4707 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
4708 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4709
4710 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4711 adapter->rx_ring[rxbase + i]->netdev = vdev;
4712 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
4713 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
4714 }
4715
4716 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4717 adapter->tx_ring[txbase + i]->netdev = vdev;
4718 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
4719 }
4720
4721 queues = min_t(unsigned int,
4722 adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
4723 err = netif_set_real_num_tx_queues(vdev, queues);
4724 if (err)
4725 goto fwd_queue_err;
4726
4727 err = netif_set_real_num_rx_queues(vdev, queues);
4728 if (err)
4729 goto fwd_queue_err;
4730
4731 if (is_valid_ether_addr(vdev->dev_addr))
4732 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
4733
4734 ixgbe_fwd_psrtype(accel);
4735 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
4736 return err;
4737fwd_queue_err:
4738 ixgbe_fwd_ring_down(vdev, accel);
4739 return err;
4740}
4741
4742static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
4743{
4744 struct net_device *upper;
4745 struct list_head *iter;
4746 int err;
4747
4748 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4749 if (netif_is_macvlan(upper)) {
4750 struct macvlan_dev *dfwd = netdev_priv(upper);
4751 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
4752
4753 if (dfwd->fwd_priv) {
4754 err = ixgbe_fwd_ring_up(upper, vadapter);
4755 if (err)
4756 continue;
4757 }
4758 }
4759 }
4760}
4761
4762static void ixgbe_configure(struct ixgbe_adapter *adapter)
4763{
4764 struct ixgbe_hw *hw = &adapter->hw;
4765
4766 ixgbe_configure_pb(adapter);
4767#ifdef CONFIG_IXGBE_DCB
4768 ixgbe_configure_dcb(adapter);
4769#endif
4770
4771
4772
4773
4774 ixgbe_configure_virtualization(adapter);
4775
4776 ixgbe_set_rx_mode(adapter->netdev);
4777 ixgbe_restore_vlan(adapter);
4778
4779 switch (hw->mac.type) {
4780 case ixgbe_mac_82599EB:
4781 case ixgbe_mac_X540:
4782 hw->mac.ops.disable_rx_buff(hw);
4783 break;
4784 default:
4785 break;
4786 }
4787
4788 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
4789 ixgbe_init_fdir_signature_82599(&adapter->hw,
4790 adapter->fdir_pballoc);
4791 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
4792 ixgbe_init_fdir_perfect_82599(&adapter->hw,
4793 adapter->fdir_pballoc);
4794 ixgbe_fdir_filter_restore(adapter);
4795 }
4796
4797 switch (hw->mac.type) {
4798 case ixgbe_mac_82599EB:
4799 case ixgbe_mac_X540:
4800 hw->mac.ops.enable_rx_buff(hw);
4801 break;
4802 default:
4803 break;
4804 }
4805
4806#ifdef CONFIG_IXGBE_DCA
4807
4808 if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
4809 ixgbe_setup_dca(adapter);
4810#endif
4811
4812#ifdef IXGBE_FCOE
4813
4814 ixgbe_configure_fcoe(adapter);
4815
4816#endif
4817 ixgbe_configure_tx(adapter);
4818 ixgbe_configure_rx(adapter);
4819 ixgbe_configure_dfwd(adapter);
4820}
4821
4822
4823
4824
4825
4826static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
4827{
4828
4829
4830
4831
4832
4833
4834 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
4835 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
4836
4837 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
4838 adapter->sfp_poll_time = 0;
4839}
4840
4841
4842
4843
4844
4845
4846
4847static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
4848{
4849 u32 speed;
4850 bool autoneg, link_up = false;
4851 int ret = IXGBE_ERR_LINK_SETUP;
4852
4853 if (hw->mac.ops.check_link)
4854 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
4855
4856 if (ret)
4857 return ret;
4858
4859 speed = hw->phy.autoneg_advertised;
4860 if ((!speed) && (hw->mac.ops.get_link_capabilities))
4861 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
4862 &autoneg);
4863 if (ret)
4864 return ret;
4865
4866 if (hw->mac.ops.setup_link)
4867 ret = hw->mac.ops.setup_link(hw, speed, link_up);
4868
4869 return ret;
4870}
4871
4872static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4873{
4874 struct ixgbe_hw *hw = &adapter->hw;
4875 u32 gpie = 0;
4876
4877 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4878 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4879 IXGBE_GPIE_OCD;
4880 gpie |= IXGBE_GPIE_EIAME;
4881
4882
4883
4884
4885 switch (hw->mac.type) {
4886 case ixgbe_mac_82598EB:
4887 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4888 break;
4889 case ixgbe_mac_82599EB:
4890 case ixgbe_mac_X540:
4891 case ixgbe_mac_X550:
4892 case ixgbe_mac_X550EM_x:
4893 default:
4894 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4895 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4896 break;
4897 }
4898 } else {
4899
4900
4901 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4902 }
4903
4904
4905
4906
4907 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
4908 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
4909
4910 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4911 case IXGBE_82599_VMDQ_8Q_MASK:
4912 gpie |= IXGBE_GPIE_VTMODE_16;
4913 break;
4914 case IXGBE_82599_VMDQ_4Q_MASK:
4915 gpie |= IXGBE_GPIE_VTMODE_32;
4916 break;
4917 default:
4918 gpie |= IXGBE_GPIE_VTMODE_64;
4919 break;
4920 }
4921 }
4922
4923
4924 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
4925 switch (adapter->hw.mac.type) {
4926 case ixgbe_mac_82599EB:
4927 gpie |= IXGBE_SDP0_GPIEN_8259X;
4928 break;
4929 default:
4930 break;
4931 }
4932 }
4933
4934
4935 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
4936 gpie |= IXGBE_SDP1_GPIEN(hw);
4937
4938 switch (hw->mac.type) {
4939 case ixgbe_mac_82599EB:
4940 gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
4941 break;
4942 case ixgbe_mac_X550EM_x:
4943 gpie |= IXGBE_SDP0_GPIEN_X540;
4944 break;
4945 default:
4946 break;
4947 }
4948
4949 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4950}
4951
4952static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4953{
4954 struct ixgbe_hw *hw = &adapter->hw;
4955 int err;
4956 u32 ctrl_ext;
4957
4958 ixgbe_get_hw_control(adapter);
4959 ixgbe_setup_gpie(adapter);
4960
4961 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4962 ixgbe_configure_msix(adapter);
4963 else
4964 ixgbe_configure_msi_and_legacy(adapter);
4965
4966
4967 if (hw->mac.ops.enable_tx_laser)
4968 hw->mac.ops.enable_tx_laser(hw);
4969
4970 if (hw->phy.ops.set_phy_power)
4971 hw->phy.ops.set_phy_power(hw, true);
4972
4973 smp_mb__before_atomic();
4974 clear_bit(__IXGBE_DOWN, &adapter->state);
4975 ixgbe_napi_enable_all(adapter);
4976
4977 if (ixgbe_is_sfp(hw)) {
4978 ixgbe_sfp_link_config(adapter);
4979 } else {
4980 err = ixgbe_non_sfp_link_config(hw);
4981 if (err)
4982 e_err(probe, "link_config FAILED %d\n", err);
4983 }
4984
4985
4986 IXGBE_READ_REG(hw, IXGBE_EICR);
4987 ixgbe_irq_enable(adapter, true, true);
4988
4989
4990
4991
4992
4993 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4994 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4995 if (esdp & IXGBE_ESDP_SDP1)
4996 e_crit(drv, "Fan has stopped, replace the adapter\n");
4997 }
4998
4999
5000
5001 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5002 adapter->link_check_timeout = jiffies;
5003 mod_timer(&adapter->service_timer, jiffies);
5004
5005
5006 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5007 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
5008 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5009}
5010
5011void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
5012{
5013 WARN_ON(in_interrupt());
5014
5015 adapter->netdev->trans_start = jiffies;
5016
5017 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
5018 usleep_range(1000, 2000);
5019 ixgbe_down(adapter);
5020
5021
5022
5023
5024
5025
5026 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
5027 msleep(2000);
5028 ixgbe_up(adapter);
5029 clear_bit(__IXGBE_RESETTING, &adapter->state);
5030}
5031
5032void ixgbe_up(struct ixgbe_adapter *adapter)
5033{
5034
5035 ixgbe_configure(adapter);
5036
5037 ixgbe_up_complete(adapter);
5038}
5039
5040void ixgbe_reset(struct ixgbe_adapter *adapter)
5041{
5042 struct ixgbe_hw *hw = &adapter->hw;
5043 struct net_device *netdev = adapter->netdev;
5044 int err;
5045 u8 old_addr[ETH_ALEN];
5046
5047 if (ixgbe_removed(hw->hw_addr))
5048 return;
5049
5050 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5051 usleep_range(1000, 2000);
5052
5053
5054 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
5055 IXGBE_FLAG2_SFP_NEEDS_RESET);
5056 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5057
5058 err = hw->mac.ops.init_hw(hw);
5059 switch (err) {
5060 case 0:
5061 case IXGBE_ERR_SFP_NOT_PRESENT:
5062 case IXGBE_ERR_SFP_NOT_SUPPORTED:
5063 break;
5064 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
5065 e_dev_err("master disable timed out\n");
5066 break;
5067 case IXGBE_ERR_EEPROM_VERSION:
5068
5069 e_dev_warn("This device is a pre-production adapter/LOM. "
5070 "Please be aware there may be issues associated with "
5071 "your hardware. If you are experiencing problems "
5072 "please contact your Intel or hardware "
5073 "representative who provided you with this "
5074 "hardware.\n");
5075 break;
5076 default:
5077 e_dev_err("Hardware Error: %d\n", err);
5078 }
5079
5080 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5081
5082 memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
5083 ixgbe_flush_sw_mac_table(adapter);
5084 ixgbe_mac_set_default_filter(adapter, old_addr);
5085
5086
5087 if (hw->mac.san_mac_rar_index)
5088 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
5089
5090 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
5091 ixgbe_ptp_reset(adapter);
5092
5093 if (hw->phy.ops.set_phy_power) {
5094 if (!netif_running(adapter->netdev) && !adapter->wol)
5095 hw->phy.ops.set_phy_power(hw, false);
5096 else
5097 hw->phy.ops.set_phy_power(hw, true);
5098 }
5099}
5100
5101
5102
5103
5104
5105static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5106{
5107 struct ixgbe_tx_buffer *tx_buffer_info;
5108 unsigned long size;
5109 u16 i;
5110
5111
5112 if (!tx_ring->tx_buffer_info)
5113 return;
5114
5115
5116 for (i = 0; i < tx_ring->count; i++) {
5117 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5118 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
5119 }
5120
5121 netdev_tx_reset_queue(txring_txq(tx_ring));
5122
5123 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5124 memset(tx_ring->tx_buffer_info, 0, size);
5125
5126
5127 memset(tx_ring->desc, 0, tx_ring->size);
5128
5129 tx_ring->next_to_use = 0;
5130 tx_ring->next_to_clean = 0;
5131}
5132
5133
5134
5135
5136
5137static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5138{
5139 int i;
5140
5141 for (i = 0; i < adapter->num_rx_queues; i++)
5142 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5143}
5144
5145
5146
5147
5148
5149static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5150{
5151 int i;
5152
5153 for (i = 0; i < adapter->num_tx_queues; i++)
5154 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5155}
5156
5157static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5158{
5159 struct hlist_node *node2;
5160 struct ixgbe_fdir_filter *filter;
5161
5162 spin_lock(&adapter->fdir_perfect_lock);
5163
5164 hlist_for_each_entry_safe(filter, node2,
5165 &adapter->fdir_filter_list, fdir_node) {
5166 hlist_del(&filter->fdir_node);
5167 kfree(filter);
5168 }
5169 adapter->fdir_filter_count = 0;
5170
5171 spin_unlock(&adapter->fdir_perfect_lock);
5172}
5173
5174void ixgbe_down(struct ixgbe_adapter *adapter)
5175{
5176 struct net_device *netdev = adapter->netdev;
5177 struct ixgbe_hw *hw = &adapter->hw;
5178 struct net_device *upper;
5179 struct list_head *iter;
5180 int i;
5181
5182
5183 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5184 return;
5185
5186
5187 hw->mac.ops.disable_rx(hw);
5188
5189
5190 for (i = 0; i < adapter->num_rx_queues; i++)
5191
5192 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5193
5194 usleep_range(10000, 20000);
5195
5196 netif_tx_stop_all_queues(netdev);
5197
5198
5199 netif_carrier_off(netdev);
5200 netif_tx_disable(netdev);
5201
5202
5203 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
5204 if (netif_is_macvlan(upper)) {
5205 struct macvlan_dev *vlan = netdev_priv(upper);
5206
5207 if (vlan->fwd_priv) {
5208 netif_tx_stop_all_queues(upper);
5209 netif_carrier_off(upper);
5210 netif_tx_disable(upper);
5211 }
5212 }
5213 }
5214
5215 ixgbe_irq_disable(adapter);
5216
5217 ixgbe_napi_disable_all(adapter);
5218
5219 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
5220 IXGBE_FLAG2_RESET_REQUESTED);
5221 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5222
5223 del_timer_sync(&adapter->service_timer);
5224
5225 if (adapter->num_vfs) {
5226
5227 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5228
5229
5230 for (i = 0 ; i < adapter->num_vfs; i++)
5231 adapter->vfinfo[i].clear_to_send = false;
5232
5233
5234 ixgbe_ping_all_vfs(adapter);
5235
5236
5237 ixgbe_disable_tx_rx(adapter);
5238 }
5239
5240
5241 for (i = 0; i < adapter->num_tx_queues; i++) {
5242 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5243 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5244 }
5245
5246
5247 switch (hw->mac.type) {
5248 case ixgbe_mac_82599EB:
5249 case ixgbe_mac_X540:
5250 case ixgbe_mac_X550:
5251 case ixgbe_mac_X550EM_x:
5252 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5253 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5254 ~IXGBE_DMATXCTL_TE));
5255 break;
5256 default:
5257 break;
5258 }
5259
5260 if (!pci_channel_offline(adapter->pdev))
5261 ixgbe_reset(adapter);
5262
5263
5264 if (hw->mac.ops.disable_tx_laser)
5265 hw->mac.ops.disable_tx_laser(hw);
5266
5267 ixgbe_clean_all_tx_rings(adapter);
5268 ixgbe_clean_all_rx_rings(adapter);
5269}
5270
5271
5272
5273
5274
5275static void ixgbe_tx_timeout(struct net_device *netdev)
5276{
5277 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5278
5279
5280 ixgbe_tx_timeout_reset(adapter);
5281}
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5292{
5293 struct ixgbe_hw *hw = &adapter->hw;
5294 struct pci_dev *pdev = adapter->pdev;
5295 unsigned int rss, fdir;
5296 u32 fwsm;
5297#ifdef CONFIG_IXGBE_DCB
5298 int j;
5299 struct tc_configuration *tc;
5300#endif
5301
5302
5303
5304 hw->vendor_id = pdev->vendor;
5305 hw->device_id = pdev->device;
5306 hw->revision_id = pdev->revision;
5307 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5308 hw->subsystem_device_id = pdev->subsystem_device;
5309
5310
5311 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
5312 adapter->ring_feature[RING_F_RSS].limit = rss;
5313 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5314 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
5315 adapter->atr_sample_rate = 20;
5316 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
5317 adapter->ring_feature[RING_F_FDIR].limit = fdir;
5318 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
5319#ifdef CONFIG_IXGBE_DCA
5320 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
5321#endif
5322#ifdef IXGBE_FCOE
5323 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5324 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5325#ifdef CONFIG_IXGBE_DCB
5326
5327 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
5328#endif
5329#endif
5330
5331 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5332 hw->mac.num_rar_entries,
5333 GFP_ATOMIC);
5334
5335
5336 switch (hw->mac.type) {
5337 case ixgbe_mac_82598EB:
5338 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
5339
5340 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5341 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
5342
5343 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
5344 adapter->ring_feature[RING_F_FDIR].limit = 0;
5345 adapter->atr_sample_rate = 0;
5346 adapter->fdir_pballoc = 0;
5347#ifdef IXGBE_FCOE
5348 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5349 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5350#ifdef CONFIG_IXGBE_DCB
5351 adapter->fcoe.up = 0;
5352#endif
5353#endif
5354 break;
5355 case ixgbe_mac_82599EB:
5356 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5357 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5358 break;
5359 case ixgbe_mac_X540:
5360 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
5361 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5362 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5363 break;
5364 case ixgbe_mac_X550EM_x:
5365 case ixgbe_mac_X550:
5366#ifdef CONFIG_IXGBE_DCA
5367 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5368#endif
5369#ifdef CONFIG_IXGBE_VXLAN
5370 adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
5371#endif
5372 break;
5373 default:
5374 break;
5375 }
5376
5377#ifdef IXGBE_FCOE
5378
5379 spin_lock_init(&adapter->fcoe.lock);
5380
5381#endif
5382
5383 spin_lock_init(&adapter->fdir_perfect_lock);
5384
5385#ifdef CONFIG_IXGBE_DCB
5386 switch (hw->mac.type) {
5387 case ixgbe_mac_X540:
5388 case ixgbe_mac_X550:
5389 case ixgbe_mac_X550EM_x:
5390 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5391 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5392 break;
5393 default:
5394 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
5395 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
5396 break;
5397 }
5398
5399
5400 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5401 tc = &adapter->dcb_cfg.tc_config[j];
5402 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5403 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5404 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5405 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5406 tc->dcb_pfc = pfc_disabled;
5407 }
5408
5409
5410 tc = &adapter->dcb_cfg.tc_config[0];
5411 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5412 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5413
5414 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5415 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5416 adapter->dcb_cfg.pfc_mode_enable = false;
5417 adapter->dcb_set_bitmap = 0x00;
5418 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5419 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5420 sizeof(adapter->temp_dcb_cfg));
5421
5422#endif
5423
5424
5425 hw->fc.requested_mode = ixgbe_fc_full;
5426 hw->fc.current_mode = ixgbe_fc_full;
5427 ixgbe_pbthresh_setup(adapter);
5428 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5429 hw->fc.send_xon = true;
5430 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
5431
5432#ifdef CONFIG_PCI_IOV
5433 if (max_vfs > 0)
5434 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5435
5436
5437 if (hw->mac.type != ixgbe_mac_82598EB) {
5438 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
5439 adapter->num_vfs = 0;
5440 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5441 } else {
5442 adapter->num_vfs = max_vfs;
5443 }
5444 }
5445#endif
5446
5447
5448 adapter->rx_itr_setting = 1;
5449 adapter->tx_itr_setting = 1;
5450
5451
5452 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5453 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5454
5455
5456 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
5457
5458
5459 if (ixgbe_init_eeprom_params_generic(hw)) {
5460 e_dev_err("EEPROM initialization failed\n");
5461 return -EIO;
5462 }
5463
5464
5465 set_bit(0, &adapter->fwd_bitmask);
5466 set_bit(__IXGBE_DOWN, &adapter->state);
5467
5468 return 0;
5469}
5470
5471
5472
5473
5474
5475
5476
5477int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5478{
5479 struct device *dev = tx_ring->dev;
5480 int orig_node = dev_to_node(dev);
5481 int ring_node = -1;
5482 int size;
5483
5484 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5485
5486 if (tx_ring->q_vector)
5487 ring_node = tx_ring->q_vector->numa_node;
5488
5489 tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
5490 if (!tx_ring->tx_buffer_info)
5491 tx_ring->tx_buffer_info = vzalloc(size);
5492 if (!tx_ring->tx_buffer_info)
5493 goto err;
5494
5495 u64_stats_init(&tx_ring->syncp);
5496
5497
5498 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
5499 tx_ring->size = ALIGN(tx_ring->size, 4096);
5500
5501 set_dev_node(dev, ring_node);
5502 tx_ring->desc = dma_alloc_coherent(dev,
5503 tx_ring->size,
5504 &tx_ring->dma,
5505 GFP_KERNEL);
5506 set_dev_node(dev, orig_node);
5507 if (!tx_ring->desc)
5508 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
5509 &tx_ring->dma, GFP_KERNEL);
5510 if (!tx_ring->desc)
5511 goto err;
5512
5513 tx_ring->next_to_use = 0;
5514 tx_ring->next_to_clean = 0;
5515 return 0;
5516
5517err:
5518 vfree(tx_ring->tx_buffer_info);
5519 tx_ring->tx_buffer_info = NULL;
5520 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5521 return -ENOMEM;
5522}
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5535{
5536 int i, err = 0;
5537
5538 for (i = 0; i < adapter->num_tx_queues; i++) {
5539 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5540 if (!err)
5541 continue;
5542
5543 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
5544 goto err_setup_tx;
5545 }
5546
5547 return 0;
5548err_setup_tx:
5549
5550 while (i--)
5551 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5552 return err;
5553}
5554
5555
5556
5557
5558
5559
5560
5561int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5562{
5563 struct device *dev = rx_ring->dev;
5564 int orig_node = dev_to_node(dev);
5565 int ring_node = -1;
5566 int size;
5567
5568 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5569
5570 if (rx_ring->q_vector)
5571 ring_node = rx_ring->q_vector->numa_node;
5572
5573 rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
5574 if (!rx_ring->rx_buffer_info)
5575 rx_ring->rx_buffer_info = vzalloc(size);
5576 if (!rx_ring->rx_buffer_info)
5577 goto err;
5578
5579 u64_stats_init(&rx_ring->syncp);
5580
5581
5582 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5583 rx_ring->size = ALIGN(rx_ring->size, 4096);
5584
5585 set_dev_node(dev, ring_node);
5586 rx_ring->desc = dma_alloc_coherent(dev,
5587 rx_ring->size,
5588 &rx_ring->dma,
5589 GFP_KERNEL);
5590 set_dev_node(dev, orig_node);
5591 if (!rx_ring->desc)
5592 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5593 &rx_ring->dma, GFP_KERNEL);
5594 if (!rx_ring->desc)
5595 goto err;
5596
5597 rx_ring->next_to_clean = 0;
5598 rx_ring->next_to_use = 0;
5599
5600 return 0;
5601err:
5602 vfree(rx_ring->rx_buffer_info);
5603 rx_ring->rx_buffer_info = NULL;
5604 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5605 return -ENOMEM;
5606}
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5619{
5620 int i, err = 0;
5621
5622 for (i = 0; i < adapter->num_rx_queues; i++) {
5623 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5624 if (!err)
5625 continue;
5626
5627 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5628 goto err_setup_rx;
5629 }
5630
5631#ifdef IXGBE_FCOE
5632 err = ixgbe_setup_fcoe_ddp_resources(adapter);
5633 if (!err)
5634#endif
5635 return 0;
5636err_setup_rx:
5637
5638 while (i--)
5639 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5640 return err;
5641}
5642
5643
5644
5645
5646
5647
5648
5649void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5650{
5651 ixgbe_clean_tx_ring(tx_ring);
5652
5653 vfree(tx_ring->tx_buffer_info);
5654 tx_ring->tx_buffer_info = NULL;
5655
5656
5657 if (!tx_ring->desc)
5658 return;
5659
5660 dma_free_coherent(tx_ring->dev, tx_ring->size,
5661 tx_ring->desc, tx_ring->dma);
5662
5663 tx_ring->desc = NULL;
5664}
5665
5666
5667
5668
5669
5670
5671
5672static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5673{
5674 int i;
5675
5676 for (i = 0; i < adapter->num_tx_queues; i++)
5677 if (adapter->tx_ring[i]->desc)
5678 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5679}
5680
5681
5682
5683
5684
5685
5686
5687void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5688{
5689 ixgbe_clean_rx_ring(rx_ring);
5690
5691 vfree(rx_ring->rx_buffer_info);
5692 rx_ring->rx_buffer_info = NULL;
5693
5694
5695 if (!rx_ring->desc)
5696 return;
5697
5698 dma_free_coherent(rx_ring->dev, rx_ring->size,
5699 rx_ring->desc, rx_ring->dma);
5700
5701 rx_ring->desc = NULL;
5702}
5703
5704
5705
5706
5707
5708
5709
5710static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5711{
5712 int i;
5713
5714#ifdef IXGBE_FCOE
5715 ixgbe_free_fcoe_ddp_resources(adapter);
5716
5717#endif
5718 for (i = 0; i < adapter->num_rx_queues; i++)
5719 if (adapter->rx_ring[i]->desc)
5720 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5721}
5722
5723
5724
5725
5726
5727
5728
5729
5730static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5731{
5732 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5733 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5734
5735
5736 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5737 return -EINVAL;
5738
5739
5740
5741
5742
5743
5744 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5745 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5746 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5747 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5748
5749 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5750
5751
5752 netdev->mtu = new_mtu;
5753
5754 if (netif_running(netdev))
5755 ixgbe_reinit_locked(adapter);
5756
5757 return 0;
5758}
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772static int ixgbe_open(struct net_device *netdev)
5773{
5774 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5775 struct ixgbe_hw *hw = &adapter->hw;
5776 int err, queues;
5777
5778
5779 if (test_bit(__IXGBE_TESTING, &adapter->state))
5780 return -EBUSY;
5781
5782 netif_carrier_off(netdev);
5783
5784
5785 err = ixgbe_setup_all_tx_resources(adapter);
5786 if (err)
5787 goto err_setup_tx;
5788
5789
5790 err = ixgbe_setup_all_rx_resources(adapter);
5791 if (err)
5792 goto err_setup_rx;
5793
5794 ixgbe_configure(adapter);
5795
5796 err = ixgbe_request_irq(adapter);
5797 if (err)
5798 goto err_req_irq;
5799
5800
5801 if (adapter->num_rx_pools > 1)
5802 queues = adapter->num_rx_queues_per_pool;
5803 else
5804 queues = adapter->num_tx_queues;
5805
5806 err = netif_set_real_num_tx_queues(netdev, queues);
5807 if (err)
5808 goto err_set_queues;
5809
5810 if (adapter->num_rx_pools > 1 &&
5811 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
5812 queues = IXGBE_MAX_L2A_QUEUES;
5813 else
5814 queues = adapter->num_rx_queues;
5815 err = netif_set_real_num_rx_queues(netdev, queues);
5816 if (err)
5817 goto err_set_queues;
5818
5819 ixgbe_ptp_init(adapter);
5820
5821 ixgbe_up_complete(adapter);
5822
5823 ixgbe_clear_vxlan_port(adapter);
5824#ifdef CONFIG_IXGBE_VXLAN
5825 vxlan_get_rx_port(netdev);
5826#endif
5827
5828 return 0;
5829
5830err_set_queues:
5831 ixgbe_free_irq(adapter);
5832err_req_irq:
5833 ixgbe_free_all_rx_resources(adapter);
5834 if (hw->phy.ops.set_phy_power && !adapter->wol)
5835 hw->phy.ops.set_phy_power(&adapter->hw, false);
5836err_setup_rx:
5837 ixgbe_free_all_tx_resources(adapter);
5838err_setup_tx:
5839 ixgbe_reset(adapter);
5840
5841 return err;
5842}
5843
5844static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
5845{
5846 ixgbe_ptp_suspend(adapter);
5847
5848 if (adapter->hw.phy.ops.enter_lplu) {
5849 adapter->hw.phy.reset_disable = true;
5850 ixgbe_down(adapter);
5851 adapter->hw.phy.ops.enter_lplu(&adapter->hw);
5852 adapter->hw.phy.reset_disable = false;
5853 } else {
5854 ixgbe_down(adapter);
5855 }
5856
5857 ixgbe_free_irq(adapter);
5858
5859 ixgbe_free_all_tx_resources(adapter);
5860 ixgbe_free_all_rx_resources(adapter);
5861}
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874static int ixgbe_close(struct net_device *netdev)
5875{
5876 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5877
5878 ixgbe_ptp_stop(adapter);
5879
5880 ixgbe_close_suspend(adapter);
5881
5882 ixgbe_fdir_filter_exit(adapter);
5883
5884 ixgbe_release_hw_control(adapter);
5885
5886 return 0;
5887}
5888
5889#ifdef CONFIG_PM
5890static int ixgbe_resume(struct pci_dev *pdev)
5891{
5892 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5893 struct net_device *netdev = adapter->netdev;
5894 u32 err;
5895
5896 adapter->hw.hw_addr = adapter->io_addr;
5897 pci_set_power_state(pdev, PCI_D0);
5898 pci_restore_state(pdev);
5899
5900
5901
5902
5903 pci_save_state(pdev);
5904
5905 err = pci_enable_device_mem(pdev);
5906 if (err) {
5907 e_dev_err("Cannot enable PCI device from suspend\n");
5908 return err;
5909 }
5910 smp_mb__before_atomic();
5911 clear_bit(__IXGBE_DISABLED, &adapter->state);
5912 pci_set_master(pdev);
5913
5914 pci_wake_from_d3(pdev, false);
5915
5916 ixgbe_reset(adapter);
5917
5918 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5919
5920 rtnl_lock();
5921 err = ixgbe_init_interrupt_scheme(adapter);
5922 if (!err && netif_running(netdev))
5923 err = ixgbe_open(netdev);
5924
5925 rtnl_unlock();
5926
5927 if (err)
5928 return err;
5929
5930 netif_device_attach(netdev);
5931
5932 return 0;
5933}
5934#endif
5935
5936static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5937{
5938 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5939 struct net_device *netdev = adapter->netdev;
5940 struct ixgbe_hw *hw = &adapter->hw;
5941 u32 ctrl, fctrl;
5942 u32 wufc = adapter->wol;
5943#ifdef CONFIG_PM
5944 int retval = 0;
5945#endif
5946
5947 netif_device_detach(netdev);
5948
5949 rtnl_lock();
5950 if (netif_running(netdev))
5951 ixgbe_close_suspend(adapter);
5952 rtnl_unlock();
5953
5954 ixgbe_clear_interrupt_scheme(adapter);
5955
5956#ifdef CONFIG_PM
5957 retval = pci_save_state(pdev);
5958 if (retval)
5959 return retval;
5960
5961#endif
5962 if (hw->mac.ops.stop_link_on_d3)
5963 hw->mac.ops.stop_link_on_d3(hw);
5964
5965 if (wufc) {
5966 ixgbe_set_rx_mode(netdev);
5967
5968
5969 if (hw->mac.ops.enable_tx_laser)
5970 hw->mac.ops.enable_tx_laser(hw);
5971
5972
5973 if (wufc & IXGBE_WUFC_MC) {
5974 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5975 fctrl |= IXGBE_FCTRL_MPE;
5976 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5977 }
5978
5979 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5980 ctrl |= IXGBE_CTRL_GIO_DIS;
5981 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5982
5983 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5984 } else {
5985 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5986 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5987 }
5988
5989 switch (hw->mac.type) {
5990 case ixgbe_mac_82598EB:
5991 pci_wake_from_d3(pdev, false);
5992 break;
5993 case ixgbe_mac_82599EB:
5994 case ixgbe_mac_X540:
5995 case ixgbe_mac_X550:
5996 case ixgbe_mac_X550EM_x:
5997 pci_wake_from_d3(pdev, !!wufc);
5998 break;
5999 default:
6000 break;
6001 }
6002
6003 *enable_wake = !!wufc;
6004 if (hw->phy.ops.set_phy_power && !*enable_wake)
6005 hw->phy.ops.set_phy_power(hw, false);
6006
6007 ixgbe_release_hw_control(adapter);
6008
6009 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
6010 pci_disable_device(pdev);
6011
6012 return 0;
6013}
6014
6015#ifdef CONFIG_PM
6016static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
6017{
6018 int retval;
6019 bool wake;
6020
6021 retval = __ixgbe_shutdown(pdev, &wake);
6022 if (retval)
6023 return retval;
6024
6025 if (wake) {
6026 pci_prepare_to_sleep(pdev);
6027 } else {
6028 pci_wake_from_d3(pdev, false);
6029 pci_set_power_state(pdev, PCI_D3hot);
6030 }
6031
6032 return 0;
6033}
6034#endif
6035
6036static void ixgbe_shutdown(struct pci_dev *pdev)
6037{
6038 bool wake;
6039
6040 __ixgbe_shutdown(pdev, &wake);
6041
6042 if (system_state == SYSTEM_POWER_OFF) {
6043 pci_wake_from_d3(pdev, wake);
6044 pci_set_power_state(pdev, PCI_D3hot);
6045 }
6046}
6047
6048
6049
6050
6051
6052void ixgbe_update_stats(struct ixgbe_adapter *adapter)
6053{
6054 struct net_device *netdev = adapter->netdev;
6055 struct ixgbe_hw *hw = &adapter->hw;
6056 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6057 u64 total_mpc = 0;
6058 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
6059 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
6060 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
6061 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
6062
6063 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6064 test_bit(__IXGBE_RESETTING, &adapter->state))
6065 return;
6066
6067 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
6068 u64 rsc_count = 0;
6069 u64 rsc_flush = 0;
6070 for (i = 0; i < adapter->num_rx_queues; i++) {
6071 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
6072 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
6073 }
6074 adapter->rsc_total_count = rsc_count;
6075 adapter->rsc_total_flush = rsc_flush;
6076 }
6077
6078 for (i = 0; i < adapter->num_rx_queues; i++) {
6079 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
6080 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
6081 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
6082 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
6083 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
6084 bytes += rx_ring->stats.bytes;
6085 packets += rx_ring->stats.packets;
6086 }
6087 adapter->non_eop_descs = non_eop_descs;
6088 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
6089 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
6090 adapter->hw_csum_rx_error = hw_csum_rx_error;
6091 netdev->stats.rx_bytes = bytes;
6092 netdev->stats.rx_packets = packets;
6093
6094 bytes = 0;
6095 packets = 0;
6096
6097 for (i = 0; i < adapter->num_tx_queues; i++) {
6098 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6099 restart_queue += tx_ring->tx_stats.restart_queue;
6100 tx_busy += tx_ring->tx_stats.tx_busy;
6101 bytes += tx_ring->stats.bytes;
6102 packets += tx_ring->stats.packets;
6103 }
6104 adapter->restart_queue = restart_queue;
6105 adapter->tx_busy = tx_busy;
6106 netdev->stats.tx_bytes = bytes;
6107 netdev->stats.tx_packets = packets;
6108
6109 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6110
6111
6112 for (i = 0; i < 8; i++) {
6113
6114 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6115 missed_rx += mpc;
6116 hwstats->mpc[i] += mpc;
6117 total_mpc += hwstats->mpc[i];
6118 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6119 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6120 switch (hw->mac.type) {
6121 case ixgbe_mac_82598EB:
6122 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6123 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6124 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6125 hwstats->pxonrxc[i] +=
6126 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6127 break;
6128 case ixgbe_mac_82599EB:
6129 case ixgbe_mac_X540:
6130 case ixgbe_mac_X550:
6131 case ixgbe_mac_X550EM_x:
6132 hwstats->pxonrxc[i] +=
6133 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6134 break;
6135 default:
6136 break;
6137 }
6138 }
6139
6140
6141 for (i = 0; i < 16; i++) {
6142 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6143 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6144 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6145 (hw->mac.type == ixgbe_mac_X540) ||
6146 (hw->mac.type == ixgbe_mac_X550) ||
6147 (hw->mac.type == ixgbe_mac_X550EM_x)) {
6148 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6149 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
6150 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6151 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
6152 }
6153 }
6154
6155 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6156
6157 hwstats->gprc -= missed_rx;
6158
6159 ixgbe_update_xoff_received(adapter);
6160
6161
6162 switch (hw->mac.type) {
6163 case ixgbe_mac_82598EB:
6164 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6165 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6166 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6167 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6168 break;
6169 case ixgbe_mac_X540:
6170 case ixgbe_mac_X550:
6171 case ixgbe_mac_X550EM_x:
6172
6173 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6174 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6175 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6176 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6177 case ixgbe_mac_82599EB:
6178 for (i = 0; i < 16; i++)
6179 adapter->hw_rx_no_dma_resources +=
6180 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6181 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6182 IXGBE_READ_REG(hw, IXGBE_GORCH);
6183 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6184 IXGBE_READ_REG(hw, IXGBE_GOTCH);
6185 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6186 IXGBE_READ_REG(hw, IXGBE_TORH);
6187 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6188 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6189 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6190#ifdef IXGBE_FCOE
6191 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6192 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
6193 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
6194 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
6195 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
6196 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6197
6198 if (adapter->fcoe.ddp_pool) {
6199 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
6200 struct ixgbe_fcoe_ddp_pool *ddp_pool;
6201 unsigned int cpu;
6202 u64 noddp = 0, noddp_ext_buff = 0;
6203 for_each_possible_cpu(cpu) {
6204 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
6205 noddp += ddp_pool->noddp;
6206 noddp_ext_buff += ddp_pool->noddp_ext_buff;
6207 }
6208 hwstats->fcoe_noddp = noddp;
6209 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
6210 }
6211#endif
6212 break;
6213 default:
6214 break;
6215 }
6216 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
6217 hwstats->bprc += bprc;
6218 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
6219 if (hw->mac.type == ixgbe_mac_82598EB)
6220 hwstats->mprc -= bprc;
6221 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
6222 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
6223 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
6224 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
6225 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
6226 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
6227 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
6228 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6229 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
6230 hwstats->lxontxc += lxon;
6231 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
6232 hwstats->lxofftxc += lxoff;
6233 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6234 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6235
6236
6237
6238 xon_off_tot = lxon + lxoff;
6239 hwstats->gptc -= xon_off_tot;
6240 hwstats->mptc -= xon_off_tot;
6241 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
6242 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
6243 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
6244 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
6245 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
6246 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6247 hwstats->ptc64 -= xon_off_tot;
6248 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
6249 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
6250 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
6251 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
6252 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
6253 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
6254
6255
6256 netdev->stats.multicast = hwstats->mprc;
6257
6258
6259 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
6260 netdev->stats.rx_dropped = 0;
6261 netdev->stats.rx_length_errors = hwstats->rlec;
6262 netdev->stats.rx_crc_errors = hwstats->crcerrs;
6263 netdev->stats.rx_missed_errors = total_mpc;
6264}
6265
6266
6267
6268
6269
6270static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
6271{
6272 struct ixgbe_hw *hw = &adapter->hw;
6273 int i;
6274
6275 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
6276 return;
6277
6278 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6279
6280
6281 if (test_bit(__IXGBE_DOWN, &adapter->state))
6282 return;
6283
6284
6285 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
6286 return;
6287
6288 adapter->fdir_overflow++;
6289
6290 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
6291 for (i = 0; i < adapter->num_tx_queues; i++)
6292 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
6293 &(adapter->tx_ring[i]->state));
6294
6295 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
6296 } else {
6297 e_err(probe, "failed to finish FDIR re-initialization, "
6298 "ignored adding FDIR ATR filters\n");
6299 }
6300}
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310
6311static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6312{
6313 struct ixgbe_hw *hw = &adapter->hw;
6314 u64 eics = 0;
6315 int i;
6316
6317
6318 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6319 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6320 test_bit(__IXGBE_RESETTING, &adapter->state))
6321 return;
6322
6323
6324 if (netif_carrier_ok(adapter->netdev)) {
6325 for (i = 0; i < adapter->num_tx_queues; i++)
6326 set_check_for_tx_hang(adapter->tx_ring[i]);
6327 }
6328
6329 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
6330
6331
6332
6333
6334
6335 IXGBE_WRITE_REG(hw, IXGBE_EICS,
6336 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6337 } else {
6338
6339 for (i = 0; i < adapter->num_q_vectors; i++) {
6340 struct ixgbe_q_vector *qv = adapter->q_vector[i];
6341 if (qv->rx.ring || qv->tx.ring)
6342 eics |= ((u64)1 << i);
6343 }
6344 }
6345
6346
6347 ixgbe_irq_rearm_queues(adapter, eics);
6348}
6349
6350
6351
6352
6353
6354
6355static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
6356{
6357 struct ixgbe_hw *hw = &adapter->hw;
6358 u32 link_speed = adapter->link_speed;
6359 bool link_up = adapter->link_up;
6360 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
6361
6362 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6363 return;
6364
6365 if (hw->mac.ops.check_link) {
6366 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6367 } else {
6368
6369 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6370 link_up = true;
6371 }
6372
6373 if (adapter->ixgbe_ieee_pfc)
6374 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
6375
6376 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
6377 hw->mac.ops.fc_enable(hw);
6378 ixgbe_set_rx_drop_en(adapter);
6379 }
6380
6381 if (link_up ||
6382 time_after(jiffies, (adapter->link_check_timeout +
6383 IXGBE_TRY_LINK_TIMEOUT))) {
6384 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6385 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6386 IXGBE_WRITE_FLUSH(hw);
6387 }
6388
6389 adapter->link_up = link_up;
6390 adapter->link_speed = link_speed;
6391}
6392
6393static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
6394{
6395#ifdef CONFIG_IXGBE_DCB
6396 struct net_device *netdev = adapter->netdev;
6397 struct dcb_app app = {
6398 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
6399 .protocol = 0,
6400 };
6401 u8 up = 0;
6402
6403 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
6404 up = dcb_ieee_getapp_mask(netdev, &app);
6405
6406 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
6407#endif
6408}
6409
6410
6411
6412
6413
6414
6415static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6416{
6417 struct net_device *netdev = adapter->netdev;
6418 struct ixgbe_hw *hw = &adapter->hw;
6419 struct net_device *upper;
6420 struct list_head *iter;
6421 u32 link_speed = adapter->link_speed;
6422 const char *speed_str;
6423 bool flow_rx, flow_tx;
6424
6425
6426 if (netif_carrier_ok(netdev))
6427 return;
6428
6429 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6430
6431 switch (hw->mac.type) {
6432 case ixgbe_mac_82598EB: {
6433 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6434 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6435 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6436 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6437 }
6438 break;
6439 case ixgbe_mac_X540:
6440 case ixgbe_mac_X550:
6441 case ixgbe_mac_X550EM_x:
6442 case ixgbe_mac_82599EB: {
6443 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6444 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6445 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6446 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6447 }
6448 break;
6449 default:
6450 flow_tx = false;
6451 flow_rx = false;
6452 break;
6453 }
6454
6455 adapter->last_rx_ptp_check = jiffies;
6456
6457 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6458 ixgbe_ptp_start_cyclecounter(adapter);
6459
6460 switch (link_speed) {
6461 case IXGBE_LINK_SPEED_10GB_FULL:
6462 speed_str = "10 Gbps";
6463 break;
6464 case IXGBE_LINK_SPEED_2_5GB_FULL:
6465 speed_str = "2.5 Gbps";
6466 break;
6467 case IXGBE_LINK_SPEED_1GB_FULL:
6468 speed_str = "1 Gbps";
6469 break;
6470 case IXGBE_LINK_SPEED_100_FULL:
6471 speed_str = "100 Mbps";
6472 break;
6473 default:
6474 speed_str = "unknown speed";
6475 break;
6476 }
6477 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
6478 ((flow_rx && flow_tx) ? "RX/TX" :
6479 (flow_rx ? "RX" :
6480 (flow_tx ? "TX" : "None"))));
6481
6482 netif_carrier_on(netdev);
6483 ixgbe_check_vf_rate_limit(adapter);
6484
6485
6486 netif_tx_wake_all_queues(adapter->netdev);
6487
6488
6489 rtnl_lock();
6490 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
6491 if (netif_is_macvlan(upper)) {
6492 struct macvlan_dev *vlan = netdev_priv(upper);
6493
6494 if (vlan->fwd_priv)
6495 netif_tx_wake_all_queues(upper);
6496 }
6497 }
6498 rtnl_unlock();
6499
6500
6501 ixgbe_update_default_up(adapter);
6502
6503
6504 ixgbe_ping_all_vfs(adapter);
6505}
6506
6507
6508
6509
6510
6511
6512static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
6513{
6514 struct net_device *netdev = adapter->netdev;
6515 struct ixgbe_hw *hw = &adapter->hw;
6516
6517 adapter->link_up = false;
6518 adapter->link_speed = 0;
6519
6520
6521 if (!netif_carrier_ok(netdev))
6522 return;
6523
6524
6525 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6526 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6527
6528 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6529 ixgbe_ptp_start_cyclecounter(adapter);
6530
6531 e_info(drv, "NIC Link is Down\n");
6532 netif_carrier_off(netdev);
6533
6534
6535 ixgbe_ping_all_vfs(adapter);
6536}
6537
6538static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
6539{
6540 int i;
6541
6542 for (i = 0; i < adapter->num_tx_queues; i++) {
6543 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6544
6545 if (tx_ring->next_to_use != tx_ring->next_to_clean)
6546 return true;
6547 }
6548
6549 return false;
6550}
6551
6552static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
6553{
6554 struct ixgbe_hw *hw = &adapter->hw;
6555 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
6556 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6557
6558 int i, j;
6559
6560 if (!adapter->num_vfs)
6561 return false;
6562
6563
6564 if (hw->mac.type >= ixgbe_mac_X550)
6565 return false;
6566
6567 for (i = 0; i < adapter->num_vfs; i++) {
6568 for (j = 0; j < q_per_pool; j++) {
6569 u32 h, t;
6570
6571 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
6572 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
6573
6574 if (h != t)
6575 return true;
6576 }
6577 }
6578
6579 return false;
6580}
6581
6582
6583
6584
6585
6586static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6587{
6588 if (!netif_carrier_ok(adapter->netdev)) {
6589 if (ixgbe_ring_tx_pending(adapter) ||
6590 ixgbe_vf_tx_pending(adapter)) {
6591
6592
6593
6594
6595
6596 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
6597 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
6598 }
6599 }
6600}
6601
6602#ifdef CONFIG_PCI_IOV
6603static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
6604 struct pci_dev *vfdev)
6605{
6606 if (!pci_wait_for_pending_transaction(vfdev))
6607 e_dev_warn("Issuing VFLR with pending transactions\n");
6608
6609 e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
6610 pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
6611
6612 msleep(100);
6613}
6614
6615static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6616{
6617 struct ixgbe_hw *hw = &adapter->hw;
6618 struct pci_dev *pdev = adapter->pdev;
6619 struct pci_dev *vfdev;
6620 u32 gpc;
6621 int pos;
6622 unsigned short vf_id;
6623
6624 if (!(netif_carrier_ok(adapter->netdev)))
6625 return;
6626
6627 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6628 if (gpc)
6629 return;
6630
6631
6632
6633
6634
6635
6636 if (!pdev)
6637 return;
6638
6639 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6640 if (!pos)
6641 return;
6642
6643
6644 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
6645
6646
6647 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
6648 while (vfdev) {
6649 if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
6650 u16 status_reg;
6651
6652 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
6653 if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
6654
6655 ixgbe_issue_vf_flr(adapter, vfdev);
6656 }
6657
6658 vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
6659 }
6660}
6661
6662static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6663{
6664 u32 ssvpc;
6665
6666
6667 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
6668 adapter->num_vfs == 0)
6669 return;
6670
6671 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6672
6673
6674
6675
6676
6677 if (!ssvpc)
6678 return;
6679
6680 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
6681}
6682#else
6683static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
6684{
6685}
6686
6687static void
6688ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
6689{
6690}
6691#endif
6692
6693
6694
6695
6696
6697
6698static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6699{
6700
6701 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6702 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6703 test_bit(__IXGBE_RESETTING, &adapter->state))
6704 return;
6705
6706 ixgbe_watchdog_update_link(adapter);
6707
6708 if (adapter->link_up)
6709 ixgbe_watchdog_link_is_up(adapter);
6710 else
6711 ixgbe_watchdog_link_is_down(adapter);
6712
6713 ixgbe_check_for_bad_vf(adapter);
6714 ixgbe_spoof_check(adapter);
6715 ixgbe_update_stats(adapter);
6716
6717 ixgbe_watchdog_flush_tx(adapter);
6718}
6719
6720
6721
6722
6723
6724static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6725{
6726 struct ixgbe_hw *hw = &adapter->hw;
6727 s32 err;
6728
6729
6730 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6731 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6732 return;
6733
6734 if (adapter->sfp_poll_time &&
6735 time_after(adapter->sfp_poll_time, jiffies))
6736 return;
6737
6738
6739 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6740 return;
6741
6742 adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
6743
6744 err = hw->phy.ops.identify_sfp(hw);
6745 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6746 goto sfp_out;
6747
6748 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6749
6750
6751 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6752 }
6753
6754
6755 if (err)
6756 goto sfp_out;
6757
6758
6759 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6760 goto sfp_out;
6761
6762 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6763
6764
6765
6766
6767
6768
6769 if (hw->mac.type == ixgbe_mac_82598EB)
6770 err = hw->phy.ops.reset(hw);
6771 else
6772 err = hw->mac.ops.setup_sfp(hw);
6773
6774 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6775 goto sfp_out;
6776
6777 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6778 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6779
6780sfp_out:
6781 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6782
6783 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6784 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6785 e_dev_err("failed to initialize because an unsupported "
6786 "SFP+ module type was detected.\n");
6787 e_dev_err("Reload the driver after installing a "
6788 "supported module.\n");
6789 unregister_netdev(adapter->netdev);
6790 }
6791}
6792
6793
6794
6795
6796
6797static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6798{
6799 struct ixgbe_hw *hw = &adapter->hw;
6800 u32 speed;
6801 bool autoneg = false;
6802
6803 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6804 return;
6805
6806
6807 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6808 return;
6809
6810 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6811
6812 speed = hw->phy.autoneg_advertised;
6813 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
6814 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6815
6816
6817 if (!autoneg) {
6818 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6819 speed = IXGBE_LINK_SPEED_10GB_FULL;
6820 }
6821 }
6822
6823 if (hw->mac.ops.setup_link)
6824 hw->mac.ops.setup_link(hw, speed, true);
6825
6826 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6827 adapter->link_check_timeout = jiffies;
6828 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6829}
6830
6831
6832
6833
6834
6835static void ixgbe_service_timer(unsigned long data)
6836{
6837 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6838 unsigned long next_event_offset;
6839
6840
6841 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6842 next_event_offset = HZ / 10;
6843 else
6844 next_event_offset = HZ * 2;
6845
6846
6847 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6848
6849 ixgbe_service_event_schedule(adapter);
6850}
6851
6852static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
6853{
6854 struct ixgbe_hw *hw = &adapter->hw;
6855 u32 status;
6856
6857 if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
6858 return;
6859
6860 adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
6861
6862 if (!hw->phy.ops.handle_lasi)
6863 return;
6864
6865 status = hw->phy.ops.handle_lasi(&adapter->hw);
6866 if (status != IXGBE_ERR_OVERTEMP)
6867 return;
6868
6869 e_crit(drv, "%s\n", ixgbe_overheat_msg);
6870}
6871
6872static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6873{
6874 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6875 return;
6876
6877 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6878
6879
6880 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6881 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6882 test_bit(__IXGBE_RESETTING, &adapter->state))
6883 return;
6884
6885 ixgbe_dump(adapter);
6886 netdev_err(adapter->netdev, "Reset adapter\n");
6887 adapter->tx_timeout_count++;
6888
6889 rtnl_lock();
6890 ixgbe_reinit_locked(adapter);
6891 rtnl_unlock();
6892}
6893
6894
6895
6896
6897
6898static void ixgbe_service_task(struct work_struct *work)
6899{
6900 struct ixgbe_adapter *adapter = container_of(work,
6901 struct ixgbe_adapter,
6902 service_task);
6903 if (ixgbe_removed(adapter->hw.hw_addr)) {
6904 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
6905 rtnl_lock();
6906 ixgbe_down(adapter);
6907 rtnl_unlock();
6908 }
6909 ixgbe_service_event_complete(adapter);
6910 return;
6911 }
6912#ifdef CONFIG_IXGBE_VXLAN
6913 if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
6914 adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
6915 vxlan_get_rx_port(adapter->netdev);
6916 }
6917#endif
6918 ixgbe_reset_subtask(adapter);
6919 ixgbe_phy_interrupt_subtask(adapter);
6920 ixgbe_sfp_detection_subtask(adapter);
6921 ixgbe_sfp_link_config_subtask(adapter);
6922 ixgbe_check_overtemp_subtask(adapter);
6923 ixgbe_watchdog_subtask(adapter);
6924 ixgbe_fdir_reinit_subtask(adapter);
6925 ixgbe_check_hang_subtask(adapter);
6926
6927 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
6928 ixgbe_ptp_overflow_check(adapter);
6929 ixgbe_ptp_rx_hang(adapter);
6930 }
6931
6932 ixgbe_service_event_complete(adapter);
6933}
6934
6935static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6936 struct ixgbe_tx_buffer *first,
6937 u8 *hdr_len)
6938{
6939 struct sk_buff *skb = first->skb;
6940 u32 vlan_macip_lens, type_tucmd;
6941 u32 mss_l4len_idx, l4len;
6942 int err;
6943
6944 if (skb->ip_summed != CHECKSUM_PARTIAL)
6945 return 0;
6946
6947 if (!skb_is_gso(skb))
6948 return 0;
6949
6950 err = skb_cow_head(skb, 0);
6951 if (err < 0)
6952 return err;
6953
6954
6955 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6956
6957 if (first->protocol == htons(ETH_P_IP)) {
6958 struct iphdr *iph = ip_hdr(skb);
6959 iph->tot_len = 0;
6960 iph->check = 0;
6961 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6962 iph->daddr, 0,
6963 IPPROTO_TCP,
6964 0);
6965 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6966 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6967 IXGBE_TX_FLAGS_CSUM |
6968 IXGBE_TX_FLAGS_IPV4;
6969 } else if (skb_is_gso_v6(skb)) {
6970 ipv6_hdr(skb)->payload_len = 0;
6971 tcp_hdr(skb)->check =
6972 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6973 &ipv6_hdr(skb)->daddr,
6974 0, IPPROTO_TCP, 0);
6975 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6976 IXGBE_TX_FLAGS_CSUM;
6977 }
6978
6979
6980 l4len = tcp_hdrlen(skb);
6981 *hdr_len = skb_transport_offset(skb) + l4len;
6982
6983
6984 first->gso_segs = skb_shinfo(skb)->gso_segs;
6985 first->bytecount += (first->gso_segs - 1) * *hdr_len;
6986
6987
6988 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6989 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6990
6991
6992 vlan_macip_lens = skb_network_header_len(skb);
6993 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6994 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6995
6996 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6997 mss_l4len_idx);
6998
6999 return 1;
7000}
7001
7002static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7003 struct ixgbe_tx_buffer *first)
7004{
7005 struct sk_buff *skb = first->skb;
7006 u32 vlan_macip_lens = 0;
7007 u32 mss_l4len_idx = 0;
7008 u32 type_tucmd = 0;
7009
7010 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7011 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
7012 !(first->tx_flags & IXGBE_TX_FLAGS_CC))
7013 return;
7014 vlan_macip_lens = skb_network_offset(skb) <<
7015 IXGBE_ADVTXD_MACLEN_SHIFT;
7016 } else {
7017 u8 l4_hdr = 0;
7018 union {
7019 struct iphdr *ipv4;
7020 struct ipv6hdr *ipv6;
7021 u8 *raw;
7022 } network_hdr;
7023 union {
7024 struct tcphdr *tcphdr;
7025 u8 *raw;
7026 } transport_hdr;
7027
7028 if (skb->encapsulation) {
7029 network_hdr.raw = skb_inner_network_header(skb);
7030 transport_hdr.raw = skb_inner_transport_header(skb);
7031 vlan_macip_lens = skb_inner_network_offset(skb) <<
7032 IXGBE_ADVTXD_MACLEN_SHIFT;
7033 } else {
7034 network_hdr.raw = skb_network_header(skb);
7035 transport_hdr.raw = skb_transport_header(skb);
7036 vlan_macip_lens = skb_network_offset(skb) <<
7037 IXGBE_ADVTXD_MACLEN_SHIFT;
7038 }
7039
7040
7041 switch (network_hdr.ipv4->version) {
7042 case IPVERSION:
7043 vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7044 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7045 l4_hdr = network_hdr.ipv4->protocol;
7046 break;
7047 case 6:
7048 vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
7049 l4_hdr = network_hdr.ipv6->nexthdr;
7050 break;
7051 default:
7052 if (unlikely(net_ratelimit())) {
7053 dev_warn(tx_ring->dev,
7054 "partial checksum but version=%d\n",
7055 network_hdr.ipv4->version);
7056 }
7057 }
7058
7059 switch (l4_hdr) {
7060 case IPPROTO_TCP:
7061 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
7062 mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
7063 IXGBE_ADVTXD_L4LEN_SHIFT;
7064 break;
7065 case IPPROTO_SCTP:
7066 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
7067 mss_l4len_idx = sizeof(struct sctphdr) <<
7068 IXGBE_ADVTXD_L4LEN_SHIFT;
7069 break;
7070 case IPPROTO_UDP:
7071 mss_l4len_idx = sizeof(struct udphdr) <<
7072 IXGBE_ADVTXD_L4LEN_SHIFT;
7073 break;
7074 default:
7075 if (unlikely(net_ratelimit())) {
7076 dev_warn(tx_ring->dev,
7077 "partial checksum but l4 proto=%x!\n",
7078 l4_hdr);
7079 }
7080 break;
7081 }
7082
7083
7084 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
7085 }
7086
7087
7088 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7089
7090 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
7091 type_tucmd, mss_l4len_idx);
7092}
7093
7094#define IXGBE_SET_FLAG(_input, _flag, _result) \
7095 ((_flag <= _result) ? \
7096 ((u32)(_input & _flag) * (_result / _flag)) : \
7097 ((u32)(_input & _flag) / (_flag / _result)))
7098
7099static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
7100{
7101
7102 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
7103 IXGBE_ADVTXD_DCMD_DEXT |
7104 IXGBE_ADVTXD_DCMD_IFCS;
7105
7106
7107 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
7108 IXGBE_ADVTXD_DCMD_VLE);
7109
7110
7111 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
7112 IXGBE_ADVTXD_DCMD_TSE);
7113
7114
7115 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
7116 IXGBE_ADVTXD_MAC_TSTAMP);
7117
7118
7119 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
7120
7121 return cmd_type;
7122}
7123
7124static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7125 u32 tx_flags, unsigned int paylen)
7126{
7127 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
7128
7129
7130 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7131 IXGBE_TX_FLAGS_CSUM,
7132 IXGBE_ADVTXD_POPTS_TXSM);
7133
7134
7135 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7136 IXGBE_TX_FLAGS_IPV4,
7137 IXGBE_ADVTXD_POPTS_IXSM);
7138
7139
7140
7141
7142
7143 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7144 IXGBE_TX_FLAGS_CC,
7145 IXGBE_ADVTXD_CC);
7146
7147 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
7148}
7149
7150static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7151{
7152 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7153
7154
7155
7156
7157
7158 smp_mb();
7159
7160
7161
7162
7163 if (likely(ixgbe_desc_unused(tx_ring) < size))
7164 return -EBUSY;
7165
7166
7167 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7168 ++tx_ring->tx_stats.restart_queue;
7169 return 0;
7170}
7171
7172static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7173{
7174 if (likely(ixgbe_desc_unused(tx_ring) >= size))
7175 return 0;
7176
7177 return __ixgbe_maybe_stop_tx(tx_ring, size);
7178}
7179
7180#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7181 IXGBE_TXD_CMD_RS)
7182
7183static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7184 struct ixgbe_tx_buffer *first,
7185 const u8 hdr_len)
7186{
7187 struct sk_buff *skb = first->skb;
7188 struct ixgbe_tx_buffer *tx_buffer;
7189 union ixgbe_adv_tx_desc *tx_desc;
7190 struct skb_frag_struct *frag;
7191 dma_addr_t dma;
7192 unsigned int data_len, size;
7193 u32 tx_flags = first->tx_flags;
7194 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7195 u16 i = tx_ring->next_to_use;
7196
7197 tx_desc = IXGBE_TX_DESC(tx_ring, i);
7198
7199 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7200
7201 size = skb_headlen(skb);
7202 data_len = skb->data_len;
7203
7204#ifdef IXGBE_FCOE
7205 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7206 if (data_len < sizeof(struct fcoe_crc_eof)) {
7207 size -= sizeof(struct fcoe_crc_eof) - data_len;
7208 data_len = 0;
7209 } else {
7210 data_len -= sizeof(struct fcoe_crc_eof);
7211 }
7212 }
7213
7214#endif
7215 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7216
7217 tx_buffer = first;
7218
7219 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
7220 if (dma_mapping_error(tx_ring->dev, dma))
7221 goto dma_error;
7222
7223
7224 dma_unmap_len_set(tx_buffer, len, size);
7225 dma_unmap_addr_set(tx_buffer, dma, dma);
7226
7227 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7228
7229 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
7230 tx_desc->read.cmd_type_len =
7231 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
7232
7233 i++;
7234 tx_desc++;
7235 if (i == tx_ring->count) {
7236 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7237 i = 0;
7238 }
7239 tx_desc->read.olinfo_status = 0;
7240
7241 dma += IXGBE_MAX_DATA_PER_TXD;
7242 size -= IXGBE_MAX_DATA_PER_TXD;
7243
7244 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7245 }
7246
7247 if (likely(!data_len))
7248 break;
7249
7250 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
7251
7252 i++;
7253 tx_desc++;
7254 if (i == tx_ring->count) {
7255 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7256 i = 0;
7257 }
7258 tx_desc->read.olinfo_status = 0;
7259
7260#ifdef IXGBE_FCOE
7261 size = min_t(unsigned int, data_len, skb_frag_size(frag));
7262#else
7263 size = skb_frag_size(frag);
7264#endif
7265 data_len -= size;
7266
7267 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
7268 DMA_TO_DEVICE);
7269
7270 tx_buffer = &tx_ring->tx_buffer_info[i];
7271 }
7272
7273
7274 cmd_type |= size | IXGBE_TXD_CMD;
7275 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
7276
7277 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
7278
7279
7280 first->time_stamp = jiffies;
7281
7282
7283
7284
7285
7286
7287
7288
7289
7290 wmb();
7291
7292
7293 first->next_to_watch = tx_desc;
7294
7295 i++;
7296 if (i == tx_ring->count)
7297 i = 0;
7298
7299 tx_ring->next_to_use = i;
7300
7301 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
7302
7303 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
7304 writel(i, tx_ring->tail);
7305
7306
7307
7308
7309 mmiowb();
7310 }
7311
7312 return;
7313dma_error:
7314 dev_err(tx_ring->dev, "TX DMA map failed\n");
7315
7316
7317 for (;;) {
7318 tx_buffer = &tx_ring->tx_buffer_info[i];
7319 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
7320 if (tx_buffer == first)
7321 break;
7322 if (i == 0)
7323 i = tx_ring->count;
7324 i--;
7325 }
7326
7327 tx_ring->next_to_use = i;
7328}
7329
7330static void ixgbe_atr(struct ixgbe_ring *ring,
7331 struct ixgbe_tx_buffer *first)
7332{
7333 struct ixgbe_q_vector *q_vector = ring->q_vector;
7334 union ixgbe_atr_hash_dword input = { .dword = 0 };
7335 union ixgbe_atr_hash_dword common = { .dword = 0 };
7336 union {
7337 unsigned char *network;
7338 struct iphdr *ipv4;
7339 struct ipv6hdr *ipv6;
7340 } hdr;
7341 struct tcphdr *th;
7342 struct sk_buff *skb;
7343#ifdef CONFIG_IXGBE_VXLAN
7344 u8 encap = false;
7345#endif
7346 __be16 vlan_id;
7347
7348
7349 if (!q_vector)
7350 return;
7351
7352
7353 if (!ring->atr_sample_rate)
7354 return;
7355
7356 ring->atr_count++;
7357
7358
7359 skb = first->skb;
7360 hdr.network = skb_network_header(skb);
7361 if (skb->encapsulation) {
7362#ifdef CONFIG_IXGBE_VXLAN
7363 struct ixgbe_adapter *adapter = q_vector->adapter;
7364
7365 if (!adapter->vxlan_port)
7366 return;
7367 if (first->protocol != htons(ETH_P_IP) ||
7368 hdr.ipv4->version != IPVERSION ||
7369 hdr.ipv4->protocol != IPPROTO_UDP) {
7370 return;
7371 }
7372 if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
7373 return;
7374 encap = true;
7375 hdr.network = skb_inner_network_header(skb);
7376 th = inner_tcp_hdr(skb);
7377#else
7378 return;
7379#endif
7380 } else {
7381
7382 if ((first->protocol != htons(ETH_P_IPV6) ||
7383 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
7384 (first->protocol != htons(ETH_P_IP) ||
7385 hdr.ipv4->protocol != IPPROTO_TCP))
7386 return;
7387 th = tcp_hdr(skb);
7388 }
7389
7390
7391 if (!th || th->fin)
7392 return;
7393
7394
7395 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
7396 return;
7397
7398
7399 ring->atr_count = 0;
7400
7401 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
7402
7403
7404
7405
7406
7407
7408
7409
7410 input.formatted.vlan_id = vlan_id;
7411
7412
7413
7414
7415
7416 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
7417 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
7418 else
7419 common.port.src ^= th->dest ^ first->protocol;
7420 common.port.dst ^= th->source;
7421
7422 if (first->protocol == htons(ETH_P_IP)) {
7423 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
7424 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
7425 } else {
7426 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
7427 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
7428 hdr.ipv6->saddr.s6_addr32[1] ^
7429 hdr.ipv6->saddr.s6_addr32[2] ^
7430 hdr.ipv6->saddr.s6_addr32[3] ^
7431 hdr.ipv6->daddr.s6_addr32[0] ^
7432 hdr.ipv6->daddr.s6_addr32[1] ^
7433 hdr.ipv6->daddr.s6_addr32[2] ^
7434 hdr.ipv6->daddr.s6_addr32[3];
7435 }
7436
7437#ifdef CONFIG_IXGBE_VXLAN
7438 if (encap)
7439 input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
7440#endif
7441
7442
7443 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
7444 input, common, ring->queue_index);
7445}
7446
7447static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
7448 void *accel_priv, select_queue_fallback_t fallback)
7449{
7450 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
7451#ifdef IXGBE_FCOE
7452 struct ixgbe_adapter *adapter;
7453 struct ixgbe_ring_feature *f;
7454 int txq;
7455#endif
7456
7457 if (fwd_adapter)
7458 return skb->queue_mapping + fwd_adapter->tx_base_queue;
7459
7460#ifdef IXGBE_FCOE
7461
7462
7463
7464
7465
7466 switch (vlan_get_protocol(skb)) {
7467 case htons(ETH_P_FCOE):
7468 case htons(ETH_P_FIP):
7469 adapter = netdev_priv(dev);
7470
7471 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7472 break;
7473 default:
7474 return fallback(dev, skb);
7475 }
7476
7477 f = &adapter->ring_feature[RING_F_FCOE];
7478
7479 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
7480 smp_processor_id();
7481
7482 while (txq >= f->indices)
7483 txq -= f->indices;
7484
7485 return txq + f->offset;
7486#else
7487 return fallback(dev, skb);
7488#endif
7489}
7490
7491netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7492 struct ixgbe_adapter *adapter,
7493 struct ixgbe_ring *tx_ring)
7494{
7495 struct ixgbe_tx_buffer *first;
7496 int tso;
7497 u32 tx_flags = 0;
7498 unsigned short f;
7499 u16 count = TXD_USE_COUNT(skb_headlen(skb));
7500 __be16 protocol = skb->protocol;
7501 u8 hdr_len = 0;
7502
7503
7504
7505
7506
7507
7508
7509
7510 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
7511 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
7512
7513 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
7514 tx_ring->tx_stats.tx_busy++;
7515 return NETDEV_TX_BUSY;
7516 }
7517
7518
7519 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
7520 first->skb = skb;
7521 first->bytecount = skb->len;
7522 first->gso_segs = 1;
7523
7524
7525 if (skb_vlan_tag_present(skb)) {
7526 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
7527 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7528
7529 } else if (protocol == htons(ETH_P_8021Q)) {
7530 struct vlan_hdr *vhdr, _vhdr;
7531 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
7532 if (!vhdr)
7533 goto out_drop;
7534
7535 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7536 IXGBE_TX_FLAGS_VLAN_SHIFT;
7537 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7538 }
7539 protocol = vlan_get_protocol(skb);
7540
7541 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7542 adapter->ptp_clock &&
7543 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
7544 &adapter->state)) {
7545 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7546 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
7547
7548
7549 adapter->ptp_tx_skb = skb_get(skb);
7550 adapter->ptp_tx_start = jiffies;
7551 schedule_work(&adapter->ptp_tx_work);
7552 }
7553
7554 skb_tx_timestamp(skb);
7555
7556#ifdef CONFIG_PCI_IOV
7557
7558
7559
7560
7561 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7562 tx_flags |= IXGBE_TX_FLAGS_CC;
7563
7564#endif
7565
7566 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
7567 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
7568 (skb->priority != TC_PRIO_CONTROL))) {
7569 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
7570 tx_flags |= (skb->priority & 0x7) <<
7571 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
7572 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
7573 struct vlan_ethhdr *vhdr;
7574
7575 if (skb_cow_head(skb, 0))
7576 goto out_drop;
7577 vhdr = (struct vlan_ethhdr *)skb->data;
7578 vhdr->h_vlan_TCI = htons(tx_flags >>
7579 IXGBE_TX_FLAGS_VLAN_SHIFT);
7580 } else {
7581 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7582 }
7583 }
7584
7585
7586 first->tx_flags = tx_flags;
7587 first->protocol = protocol;
7588
7589#ifdef IXGBE_FCOE
7590
7591 if ((protocol == htons(ETH_P_FCOE)) &&
7592 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
7593 tso = ixgbe_fso(tx_ring, first, &hdr_len);
7594 if (tso < 0)
7595 goto out_drop;
7596
7597 goto xmit_fcoe;
7598 }
7599
7600#endif
7601 tso = ixgbe_tso(tx_ring, first, &hdr_len);
7602 if (tso < 0)
7603 goto out_drop;
7604 else if (!tso)
7605 ixgbe_tx_csum(tx_ring, first);
7606
7607
7608 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
7609 ixgbe_atr(tx_ring, first);
7610
7611#ifdef IXGBE_FCOE
7612xmit_fcoe:
7613#endif
7614 ixgbe_tx_map(tx_ring, first, hdr_len);
7615
7616 return NETDEV_TX_OK;
7617
7618out_drop:
7619 dev_kfree_skb_any(first->skb);
7620 first->skb = NULL;
7621
7622 return NETDEV_TX_OK;
7623}
7624
7625static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
7626 struct net_device *netdev,
7627 struct ixgbe_ring *ring)
7628{
7629 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7630 struct ixgbe_ring *tx_ring;
7631
7632
7633
7634
7635
7636 if (skb_put_padto(skb, 17))
7637 return NETDEV_TX_OK;
7638
7639 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
7640
7641 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
7642}
7643
7644static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
7645 struct net_device *netdev)
7646{
7647 return __ixgbe_xmit_frame(skb, netdev, NULL);
7648}
7649
7650
7651
7652
7653
7654
7655
7656
7657static int ixgbe_set_mac(struct net_device *netdev, void *p)
7658{
7659 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7660 struct ixgbe_hw *hw = &adapter->hw;
7661 struct sockaddr *addr = p;
7662 int ret;
7663
7664 if (!is_valid_ether_addr(addr->sa_data))
7665 return -EADDRNOTAVAIL;
7666
7667 ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7668 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7669 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7670
7671 ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7672 return ret > 0 ? 0 : ret;
7673}
7674
7675static int
7676ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
7677{
7678 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7679 struct ixgbe_hw *hw = &adapter->hw;
7680 u16 value;
7681 int rc;
7682
7683 if (prtad != hw->phy.mdio.prtad)
7684 return -EINVAL;
7685 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
7686 if (!rc)
7687 rc = value;
7688 return rc;
7689}
7690
7691static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
7692 u16 addr, u16 value)
7693{
7694 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7695 struct ixgbe_hw *hw = &adapter->hw;
7696
7697 if (prtad != hw->phy.mdio.prtad)
7698 return -EINVAL;
7699 return hw->phy.ops.write_reg(hw, addr, devad, value);
7700}
7701
7702static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7703{
7704 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7705
7706 switch (cmd) {
7707 case SIOCSHWTSTAMP:
7708 return ixgbe_ptp_set_ts_config(adapter, req);
7709 case SIOCGHWTSTAMP:
7710 return ixgbe_ptp_get_ts_config(adapter, req);
7711 default:
7712 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7713 }
7714}
7715
7716
7717
7718
7719
7720
7721
7722
7723static int ixgbe_add_sanmac_netdev(struct net_device *dev)
7724{
7725 int err = 0;
7726 struct ixgbe_adapter *adapter = netdev_priv(dev);
7727 struct ixgbe_hw *hw = &adapter->hw;
7728
7729 if (is_valid_ether_addr(hw->mac.san_addr)) {
7730 rtnl_lock();
7731 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
7732 rtnl_unlock();
7733
7734
7735 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
7736 }
7737 return err;
7738}
7739
7740
7741
7742
7743
7744
7745
7746
7747static int ixgbe_del_sanmac_netdev(struct net_device *dev)
7748{
7749 int err = 0;
7750 struct ixgbe_adapter *adapter = netdev_priv(dev);
7751 struct ixgbe_mac_info *mac = &adapter->hw.mac;
7752
7753 if (is_valid_ether_addr(mac->san_addr)) {
7754 rtnl_lock();
7755 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7756 rtnl_unlock();
7757 }
7758 return err;
7759}
7760
7761#ifdef CONFIG_NET_POLL_CONTROLLER
7762
7763
7764
7765
7766
7767static void ixgbe_netpoll(struct net_device *netdev)
7768{
7769 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7770 int i;
7771
7772
7773 if (test_bit(__IXGBE_DOWN, &adapter->state))
7774 return;
7775
7776
7777 for (i = 0; i < adapter->num_q_vectors; i++)
7778 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
7779}
7780
7781#endif
7782static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7783 struct rtnl_link_stats64 *stats)
7784{
7785 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7786 int i;
7787
7788 rcu_read_lock();
7789 for (i = 0; i < adapter->num_rx_queues; i++) {
7790 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
7791 u64 bytes, packets;
7792 unsigned int start;
7793
7794 if (ring) {
7795 do {
7796 start = u64_stats_fetch_begin_irq(&ring->syncp);
7797 packets = ring->stats.packets;
7798 bytes = ring->stats.bytes;
7799 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7800 stats->rx_packets += packets;
7801 stats->rx_bytes += bytes;
7802 }
7803 }
7804
7805 for (i = 0; i < adapter->num_tx_queues; i++) {
7806 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
7807 u64 bytes, packets;
7808 unsigned int start;
7809
7810 if (ring) {
7811 do {
7812 start = u64_stats_fetch_begin_irq(&ring->syncp);
7813 packets = ring->stats.packets;
7814 bytes = ring->stats.bytes;
7815 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7816 stats->tx_packets += packets;
7817 stats->tx_bytes += bytes;
7818 }
7819 }
7820 rcu_read_unlock();
7821
7822 stats->multicast = netdev->stats.multicast;
7823 stats->rx_errors = netdev->stats.rx_errors;
7824 stats->rx_length_errors = netdev->stats.rx_length_errors;
7825 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
7826 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
7827 return stats;
7828}
7829
7830#ifdef CONFIG_IXGBE_DCB
7831
7832
7833
7834
7835
7836
7837
7838
7839static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
7840{
7841 struct ixgbe_hw *hw = &adapter->hw;
7842 u32 reg, rsave;
7843 int i;
7844
7845
7846
7847
7848 if (hw->mac.type == ixgbe_mac_82598EB)
7849 return;
7850
7851 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
7852 rsave = reg;
7853
7854 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
7855 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
7856
7857
7858 if (up2tc > tc)
7859 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
7860 }
7861
7862 if (reg != rsave)
7863 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
7864
7865 return;
7866}
7867
7868
7869
7870
7871
7872
7873
7874static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
7875{
7876 struct net_device *dev = adapter->netdev;
7877 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
7878 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
7879 u8 prio;
7880
7881 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
7882 u8 tc = 0;
7883
7884 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
7885 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
7886 else if (ets)
7887 tc = ets->prio_tc[prio];
7888
7889 netdev_set_prio_tc_map(dev, prio, tc);
7890 }
7891}
7892
7893#endif
7894
7895
7896
7897
7898
7899
7900int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7901{
7902 struct ixgbe_adapter *adapter = netdev_priv(dev);
7903 struct ixgbe_hw *hw = &adapter->hw;
7904 bool pools;
7905
7906
7907 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
7908 return -EINVAL;
7909
7910 if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
7911 return -EINVAL;
7912
7913 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
7914 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
7915 return -EBUSY;
7916
7917
7918
7919
7920
7921 if (netif_running(dev))
7922 ixgbe_close(dev);
7923 else
7924 ixgbe_reset(adapter);
7925
7926 ixgbe_clear_interrupt_scheme(adapter);
7927
7928#ifdef CONFIG_IXGBE_DCB
7929 if (tc) {
7930 netdev_set_num_tc(dev, tc);
7931 ixgbe_set_prio_tc_map(adapter);
7932
7933 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
7934
7935 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
7936 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
7937 adapter->hw.fc.requested_mode = ixgbe_fc_none;
7938 }
7939 } else {
7940 netdev_reset_tc(dev);
7941
7942 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7943 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
7944
7945 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
7946
7947 adapter->temp_dcb_cfg.pfc_mode_enable = false;
7948 adapter->dcb_cfg.pfc_mode_enable = false;
7949 }
7950
7951 ixgbe_validate_rtr(adapter, tc);
7952
7953#endif
7954 ixgbe_init_interrupt_scheme(adapter);
7955
7956 if (netif_running(dev))
7957 return ixgbe_open(dev);
7958
7959 return 0;
7960}
7961
7962#ifdef CONFIG_PCI_IOV
7963void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
7964{
7965 struct net_device *netdev = adapter->netdev;
7966
7967 rtnl_lock();
7968 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
7969 rtnl_unlock();
7970}
7971
7972#endif
7973void ixgbe_do_reset(struct net_device *netdev)
7974{
7975 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7976
7977 if (netif_running(netdev))
7978 ixgbe_reinit_locked(adapter);
7979 else
7980 ixgbe_reset(adapter);
7981}
7982
7983static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
7984 netdev_features_t features)
7985{
7986 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7987
7988
7989 if (!(features & NETIF_F_RXCSUM))
7990 features &= ~NETIF_F_LRO;
7991
7992
7993 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
7994 features &= ~NETIF_F_LRO;
7995
7996 return features;
7997}
7998
7999static int ixgbe_set_features(struct net_device *netdev,
8000 netdev_features_t features)
8001{
8002 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8003 netdev_features_t changed = netdev->features ^ features;
8004 bool need_reset = false;
8005
8006
8007 if (!(features & NETIF_F_LRO)) {
8008 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8009 need_reset = true;
8010 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
8011 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
8012 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
8013 if (adapter->rx_itr_setting == 1 ||
8014 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
8015 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
8016 need_reset = true;
8017 } else if ((changed ^ features) & NETIF_F_LRO) {
8018 e_info(probe, "rx-usecs set too low, "
8019 "disabling RSC\n");
8020 }
8021 }
8022
8023
8024
8025
8026
8027 switch (features & NETIF_F_NTUPLE) {
8028 case NETIF_F_NTUPLE:
8029
8030 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
8031 need_reset = true;
8032
8033 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
8034 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8035 break;
8036 default:
8037
8038 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
8039 need_reset = true;
8040
8041 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
8042
8043
8044 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
8045 break;
8046
8047
8048 if (netdev_get_num_tc(netdev) > 1)
8049 break;
8050
8051
8052 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
8053 break;
8054
8055
8056 if (!adapter->atr_sample_rate)
8057 break;
8058
8059 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
8060 break;
8061 }
8062
8063 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8064 ixgbe_vlan_strip_enable(adapter);
8065 else
8066 ixgbe_vlan_strip_disable(adapter);
8067
8068 if (changed & NETIF_F_RXALL)
8069 need_reset = true;
8070
8071 netdev->features = features;
8072
8073#ifdef CONFIG_IXGBE_VXLAN
8074 if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
8075 if (features & NETIF_F_RXCSUM)
8076 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8077 else
8078 ixgbe_clear_vxlan_port(adapter);
8079 }
8080#endif
8081
8082 if (need_reset)
8083 ixgbe_do_reset(netdev);
8084
8085 return 0;
8086}
8087
8088#ifdef CONFIG_IXGBE_VXLAN
8089
8090
8091
8092
8093
8094
8095static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8096 __be16 port)
8097{
8098 struct ixgbe_adapter *adapter = netdev_priv(dev);
8099 struct ixgbe_hw *hw = &adapter->hw;
8100 u16 new_port = ntohs(port);
8101
8102 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8103 return;
8104
8105 if (sa_family == AF_INET6)
8106 return;
8107
8108 if (adapter->vxlan_port == new_port)
8109 return;
8110
8111 if (adapter->vxlan_port) {
8112 netdev_info(dev,
8113 "Hit Max num of VXLAN ports, not adding port %d\n",
8114 new_port);
8115 return;
8116 }
8117
8118 adapter->vxlan_port = new_port;
8119 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
8120}
8121
8122
8123
8124
8125
8126
8127
8128static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
8129 __be16 port)
8130{
8131 struct ixgbe_adapter *adapter = netdev_priv(dev);
8132 u16 new_port = ntohs(port);
8133
8134 if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
8135 return;
8136
8137 if (sa_family == AF_INET6)
8138 return;
8139
8140 if (adapter->vxlan_port != new_port) {
8141 netdev_info(dev, "Port %d was not found, not deleting\n",
8142 new_port);
8143 return;
8144 }
8145
8146 ixgbe_clear_vxlan_port(adapter);
8147 adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
8148}
8149#endif
8150
8151static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8152 struct net_device *dev,
8153 const unsigned char *addr, u16 vid,
8154 u16 flags)
8155{
8156
8157 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
8158 if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
8159 return -ENOMEM;
8160 }
8161
8162 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
8163}
8164
8165
8166
8167
8168
8169
8170
8171
8172static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
8173 __u16 mode)
8174{
8175 struct ixgbe_hw *hw = &adapter->hw;
8176 unsigned int p, num_pools;
8177 u32 vmdctl;
8178
8179 switch (mode) {
8180 case BRIDGE_MODE_VEPA:
8181
8182 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
8183
8184
8185
8186
8187
8188 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8189 vmdctl |= IXGBE_VT_CTL_REPLEN;
8190 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8191
8192
8193
8194
8195 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8196 for (p = 0; p < num_pools; p++) {
8197 if (hw->mac.ops.set_source_address_pruning)
8198 hw->mac.ops.set_source_address_pruning(hw,
8199 true,
8200 p);
8201 }
8202 break;
8203 case BRIDGE_MODE_VEB:
8204
8205 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
8206 IXGBE_PFDTXGSWC_VT_LBEN);
8207
8208
8209
8210
8211 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
8212 if (!adapter->num_vfs)
8213 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
8214 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
8215
8216
8217
8218
8219 num_pools = adapter->num_vfs + adapter->num_rx_pools;
8220 for (p = 0; p < num_pools; p++) {
8221 if (hw->mac.ops.set_source_address_pruning)
8222 hw->mac.ops.set_source_address_pruning(hw,
8223 false,
8224 p);
8225 }
8226 break;
8227 default:
8228 return -EINVAL;
8229 }
8230
8231 adapter->bridge_mode = mode;
8232
8233 e_info(drv, "enabling bridge mode: %s\n",
8234 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8235
8236 return 0;
8237}
8238
8239static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
8240 struct nlmsghdr *nlh, u16 flags)
8241{
8242 struct ixgbe_adapter *adapter = netdev_priv(dev);
8243 struct nlattr *attr, *br_spec;
8244 int rem;
8245
8246 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8247 return -EOPNOTSUPP;
8248
8249 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8250 if (!br_spec)
8251 return -EINVAL;
8252
8253 nla_for_each_nested(attr, br_spec, rem) {
8254 int status;
8255 __u16 mode;
8256
8257 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8258 continue;
8259
8260 if (nla_len(attr) < sizeof(mode))
8261 return -EINVAL;
8262
8263 mode = nla_get_u16(attr);
8264 status = ixgbe_configure_bridge_mode(adapter, mode);
8265 if (status)
8266 return status;
8267
8268 break;
8269 }
8270
8271 return 0;
8272}
8273
8274static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8275 struct net_device *dev,
8276 u32 filter_mask, int nlflags)
8277{
8278 struct ixgbe_adapter *adapter = netdev_priv(dev);
8279
8280 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8281 return 0;
8282
8283 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
8284 adapter->bridge_mode, 0, 0, nlflags,
8285 filter_mask, NULL);
8286}
8287
8288static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
8289{
8290 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
8291 struct ixgbe_adapter *adapter = netdev_priv(pdev);
8292 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
8293 unsigned int limit;
8294 int pool, err;
8295
8296
8297
8298
8299
8300 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
8301 return ERR_PTR(-EINVAL);
8302
8303#ifdef CONFIG_RPS
8304 if (vdev->num_rx_queues != vdev->num_tx_queues) {
8305 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
8306 vdev->name);
8307 return ERR_PTR(-EINVAL);
8308 }
8309#endif
8310
8311 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
8312 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
8313 netdev_info(pdev,
8314 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
8315 pdev->name);
8316 return ERR_PTR(-EINVAL);
8317 }
8318
8319 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8320 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
8321 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
8322 return ERR_PTR(-EBUSY);
8323
8324 fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
8325 if (!fwd_adapter)
8326 return ERR_PTR(-ENOMEM);
8327
8328 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
8329 adapter->num_rx_pools++;
8330 set_bit(pool, &adapter->fwd_bitmask);
8331 limit = find_last_bit(&adapter->fwd_bitmask, 32);
8332
8333
8334 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
8335 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8336 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
8337
8338
8339 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8340 if (err)
8341 goto fwd_add_err;
8342 fwd_adapter->pool = pool;
8343 fwd_adapter->real_adapter = adapter;
8344 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
8345 if (err)
8346 goto fwd_add_err;
8347 netif_tx_start_all_queues(vdev);
8348 return fwd_adapter;
8349fwd_add_err:
8350
8351 netdev_info(pdev,
8352 "%s: dfwd hardware acceleration failed\n", vdev->name);
8353 clear_bit(pool, &adapter->fwd_bitmask);
8354 adapter->num_rx_pools--;
8355 kfree(fwd_adapter);
8356 return ERR_PTR(err);
8357}
8358
8359static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
8360{
8361 struct ixgbe_fwd_adapter *fwd_adapter = priv;
8362 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
8363 unsigned int limit;
8364
8365 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
8366 adapter->num_rx_pools--;
8367
8368 limit = find_last_bit(&adapter->fwd_bitmask, 32);
8369 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8370 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
8371 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8372 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
8373 fwd_adapter->pool, adapter->num_rx_pools,
8374 fwd_adapter->rx_base_queue,
8375 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
8376 adapter->fwd_bitmask);
8377 kfree(fwd_adapter);
8378}
8379
8380#define IXGBE_MAX_TUNNEL_HDR_LEN 80
8381static netdev_features_t
8382ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
8383 netdev_features_t features)
8384{
8385 if (!skb->encapsulation)
8386 return features;
8387
8388 if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
8389 IXGBE_MAX_TUNNEL_HDR_LEN))
8390 return features & ~NETIF_F_ALL_CSUM;
8391
8392 return features;
8393}
8394
8395static const struct net_device_ops ixgbe_netdev_ops = {
8396 .ndo_open = ixgbe_open,
8397 .ndo_stop = ixgbe_close,
8398 .ndo_start_xmit = ixgbe_xmit_frame,
8399 .ndo_select_queue = ixgbe_select_queue,
8400 .ndo_set_rx_mode = ixgbe_set_rx_mode,
8401 .ndo_validate_addr = eth_validate_addr,
8402 .ndo_set_mac_address = ixgbe_set_mac,
8403 .ndo_change_mtu = ixgbe_change_mtu,
8404 .ndo_tx_timeout = ixgbe_tx_timeout,
8405 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
8406 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
8407 .ndo_do_ioctl = ixgbe_ioctl,
8408 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
8409 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
8410 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
8411 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
8412 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
8413 .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
8414 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
8415 .ndo_get_stats64 = ixgbe_get_stats64,
8416#ifdef CONFIG_IXGBE_DCB
8417 .ndo_setup_tc = ixgbe_setup_tc,
8418#endif
8419#ifdef CONFIG_NET_POLL_CONTROLLER
8420 .ndo_poll_controller = ixgbe_netpoll,
8421#endif
8422#ifdef CONFIG_NET_RX_BUSY_POLL
8423 .ndo_busy_poll = ixgbe_low_latency_recv,
8424#endif
8425#ifdef IXGBE_FCOE
8426 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
8427 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
8428 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8429 .ndo_fcoe_enable = ixgbe_fcoe_enable,
8430 .ndo_fcoe_disable = ixgbe_fcoe_disable,
8431 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
8432 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
8433#endif
8434 .ndo_set_features = ixgbe_set_features,
8435 .ndo_fix_features = ixgbe_fix_features,
8436 .ndo_fdb_add = ixgbe_ndo_fdb_add,
8437 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
8438 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
8439 .ndo_dfwd_add_station = ixgbe_fwd_add,
8440 .ndo_dfwd_del_station = ixgbe_fwd_del,
8441#ifdef CONFIG_IXGBE_VXLAN
8442 .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
8443 .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
8444#endif
8445 .ndo_features_check = ixgbe_features_check,
8446};
8447
8448
8449
8450
8451
8452
8453
8454
8455
8456
8457static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
8458{
8459 struct pci_dev *entry, *pdev = adapter->pdev;
8460 int physfns = 0;
8461
8462
8463
8464
8465
8466 if (ixgbe_pcie_from_parent(&adapter->hw))
8467 physfns = 4;
8468
8469 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
8470
8471 if (entry->is_virtfn)
8472 continue;
8473
8474
8475
8476
8477
8478
8479
8480 if ((entry->vendor != pdev->vendor) ||
8481 (entry->device != pdev->device))
8482 return -1;
8483
8484 physfns++;
8485 }
8486
8487 return physfns;
8488}
8489
8490
8491
8492
8493
8494
8495
8496
8497
8498
8499
8500int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
8501 u16 subdevice_id)
8502{
8503 struct ixgbe_hw *hw = &adapter->hw;
8504 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
8505 int is_wol_supported = 0;
8506
8507 switch (device_id) {
8508 case IXGBE_DEV_ID_82599_SFP:
8509
8510 switch (subdevice_id) {
8511 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
8512 case IXGBE_SUBDEV_ID_82599_560FLR:
8513
8514 if (hw->bus.func != 0)
8515 break;
8516 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
8517 case IXGBE_SUBDEV_ID_82599_SFP:
8518 case IXGBE_SUBDEV_ID_82599_RNDC:
8519 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
8520 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
8521 is_wol_supported = 1;
8522 break;
8523 }
8524 break;
8525 case IXGBE_DEV_ID_82599EN_SFP:
8526
8527 switch (subdevice_id) {
8528 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
8529 is_wol_supported = 1;
8530 break;
8531 }
8532 break;
8533 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
8534
8535 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
8536 is_wol_supported = 1;
8537 break;
8538 case IXGBE_DEV_ID_82599_KX4:
8539 is_wol_supported = 1;
8540 break;
8541 case IXGBE_DEV_ID_X540T:
8542 case IXGBE_DEV_ID_X540T1:
8543 case IXGBE_DEV_ID_X550T:
8544 case IXGBE_DEV_ID_X550EM_X_KX4:
8545 case IXGBE_DEV_ID_X550EM_X_KR:
8546 case IXGBE_DEV_ID_X550EM_X_10G_T:
8547
8548 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
8549 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
8550 (hw->bus.func == 0))) {
8551 is_wol_supported = 1;
8552 }
8553 break;
8554 }
8555
8556 return is_wol_supported;
8557}
8558
8559
8560
8561
8562
8563static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
8564{
8565#ifdef CONFIG_OF
8566 struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
8567 struct ixgbe_hw *hw = &adapter->hw;
8568 const unsigned char *addr;
8569
8570 addr = of_get_mac_address(dp);
8571 if (addr) {
8572 ether_addr_copy(hw->mac.perm_addr, addr);
8573 return;
8574 }
8575#endif
8576
8577#ifdef CONFIG_SPARC
8578 ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
8579#endif
8580}
8581
8582
8583
8584
8585
8586
8587
8588
8589
8590
8591
8592
8593static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8594{
8595 struct net_device *netdev;
8596 struct ixgbe_adapter *adapter = NULL;
8597 struct ixgbe_hw *hw;
8598 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
8599 int i, err, pci_using_dac, expected_gts;
8600 unsigned int indices = MAX_TX_QUEUES;
8601 u8 part_str[IXGBE_PBANUM_LENGTH];
8602 bool disable_dev = false;
8603#ifdef IXGBE_FCOE
8604 u16 device_caps;
8605#endif
8606 u32 eec;
8607
8608
8609
8610
8611 if (pdev->is_virtfn) {
8612 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
8613 pci_name(pdev), pdev->vendor, pdev->device);
8614 return -EINVAL;
8615 }
8616
8617 err = pci_enable_device_mem(pdev);
8618 if (err)
8619 return err;
8620
8621 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
8622 pci_using_dac = 1;
8623 } else {
8624 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8625 if (err) {
8626 dev_err(&pdev->dev,
8627 "No usable DMA configuration, aborting\n");
8628 goto err_dma;
8629 }
8630 pci_using_dac = 0;
8631 }
8632
8633 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8634 IORESOURCE_MEM), ixgbe_driver_name);
8635 if (err) {
8636 dev_err(&pdev->dev,
8637 "pci_request_selected_regions failed 0x%x\n", err);
8638 goto err_pci_reg;
8639 }
8640
8641 pci_enable_pcie_error_reporting(pdev);
8642
8643 pci_set_master(pdev);
8644 pci_save_state(pdev);
8645
8646 if (ii->mac == ixgbe_mac_82598EB) {
8647#ifdef CONFIG_IXGBE_DCB
8648
8649 indices = 4 * MAX_TRAFFIC_CLASS;
8650#else
8651 indices = IXGBE_MAX_RSS_INDICES;
8652#endif
8653 }
8654
8655 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
8656 if (!netdev) {
8657 err = -ENOMEM;
8658 goto err_alloc_etherdev;
8659 }
8660
8661 SET_NETDEV_DEV(netdev, &pdev->dev);
8662
8663 adapter = netdev_priv(netdev);
8664
8665 adapter->netdev = netdev;
8666 adapter->pdev = pdev;
8667 hw = &adapter->hw;
8668 hw->back = adapter;
8669 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
8670
8671 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8672 pci_resource_len(pdev, 0));
8673 adapter->io_addr = hw->hw_addr;
8674 if (!hw->hw_addr) {
8675 err = -EIO;
8676 goto err_ioremap;
8677 }
8678
8679 netdev->netdev_ops = &ixgbe_netdev_ops;
8680 ixgbe_set_ethtool_ops(netdev);
8681 netdev->watchdog_timeo = 5 * HZ;
8682 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
8683
8684
8685 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
8686 hw->mac.type = ii->mac;
8687 hw->mvals = ii->mvals;
8688
8689
8690 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
8691 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
8692 if (ixgbe_removed(hw->hw_addr)) {
8693 err = -EIO;
8694 goto err_ioremap;
8695 }
8696
8697 if (!(eec & (1 << 8)))
8698 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
8699
8700
8701 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
8702 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
8703
8704 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
8705 hw->phy.mdio.mmds = 0;
8706 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8707 hw->phy.mdio.dev = netdev;
8708 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
8709 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
8710
8711 ii->get_invariants(hw);
8712
8713
8714 err = ixgbe_sw_init(adapter);
8715 if (err)
8716 goto err_sw_init;
8717
8718
8719 switch (adapter->hw.mac.type) {
8720 case ixgbe_mac_82599EB:
8721 case ixgbe_mac_X540:
8722 case ixgbe_mac_X550:
8723 case ixgbe_mac_X550EM_x:
8724 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8725 break;
8726 default:
8727 break;
8728 }
8729
8730
8731
8732
8733
8734 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
8735 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
8736 if (esdp & IXGBE_ESDP_SDP1)
8737 e_crit(probe, "Fan has stopped, replace the adapter\n");
8738 }
8739
8740 if (allow_unsupported_sfp)
8741 hw->allow_unsupported_sfp = allow_unsupported_sfp;
8742
8743
8744 hw->phy.reset_if_overtemp = true;
8745 err = hw->mac.ops.reset_hw(hw);
8746 hw->phy.reset_if_overtemp = false;
8747 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
8748 err = 0;
8749 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
8750 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
8751 e_dev_err("Reload the driver after installing a supported module.\n");
8752 goto err_sw_init;
8753 } else if (err) {
8754 e_dev_err("HW Init failed: %d\n", err);
8755 goto err_sw_init;
8756 }
8757
8758#ifdef CONFIG_PCI_IOV
8759
8760 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8761 goto skip_sriov;
8762
8763 ixgbe_init_mbx_params_pf(hw);
8764 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
8765 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
8766 ixgbe_enable_sriov(adapter);
8767skip_sriov:
8768
8769#endif
8770 netdev->features = NETIF_F_SG |
8771 NETIF_F_IP_CSUM |
8772 NETIF_F_IPV6_CSUM |
8773 NETIF_F_HW_VLAN_CTAG_TX |
8774 NETIF_F_HW_VLAN_CTAG_RX |
8775 NETIF_F_TSO |
8776 NETIF_F_TSO6 |
8777 NETIF_F_RXHASH |
8778 NETIF_F_RXCSUM;
8779
8780 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
8781
8782 switch (adapter->hw.mac.type) {
8783 case ixgbe_mac_82599EB:
8784 case ixgbe_mac_X540:
8785 case ixgbe_mac_X550:
8786 case ixgbe_mac_X550EM_x:
8787 netdev->features |= NETIF_F_SCTP_CSUM;
8788 netdev->hw_features |= NETIF_F_SCTP_CSUM |
8789 NETIF_F_NTUPLE;
8790 break;
8791 default:
8792 break;
8793 }
8794
8795 netdev->hw_features |= NETIF_F_RXALL;
8796 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
8797
8798 netdev->vlan_features |= NETIF_F_TSO;
8799 netdev->vlan_features |= NETIF_F_TSO6;
8800 netdev->vlan_features |= NETIF_F_IP_CSUM;
8801 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
8802 netdev->vlan_features |= NETIF_F_SG;
8803
8804 netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
8805 NETIF_F_IPV6_CSUM;
8806
8807 netdev->priv_flags |= IFF_UNICAST_FLT;
8808 netdev->priv_flags |= IFF_SUPP_NOFCS;
8809
8810#ifdef CONFIG_IXGBE_VXLAN
8811 switch (adapter->hw.mac.type) {
8812 case ixgbe_mac_X550:
8813 case ixgbe_mac_X550EM_x:
8814 netdev->hw_enc_features |= NETIF_F_RXCSUM |
8815 NETIF_F_IP_CSUM |
8816 NETIF_F_IPV6_CSUM;
8817 break;
8818 default:
8819 break;
8820 }
8821#endif
8822
8823#ifdef CONFIG_IXGBE_DCB
8824 netdev->dcbnl_ops = &dcbnl_ops;
8825#endif
8826
8827#ifdef IXGBE_FCOE
8828 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
8829 unsigned int fcoe_l;
8830
8831 if (hw->mac.ops.get_device_caps) {
8832 hw->mac.ops.get_device_caps(hw, &device_caps);
8833 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
8834 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
8835 }
8836
8837
8838 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
8839 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
8840
8841 netdev->features |= NETIF_F_FSO |
8842 NETIF_F_FCOE_CRC;
8843
8844 netdev->vlan_features |= NETIF_F_FSO |
8845 NETIF_F_FCOE_CRC |
8846 NETIF_F_FCOE_MTU;
8847 }
8848#endif
8849 if (pci_using_dac) {
8850 netdev->features |= NETIF_F_HIGHDMA;
8851 netdev->vlan_features |= NETIF_F_HIGHDMA;
8852 }
8853
8854 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
8855 netdev->hw_features |= NETIF_F_LRO;
8856 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8857 netdev->features |= NETIF_F_LRO;
8858
8859
8860 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
8861 e_dev_err("The EEPROM Checksum Is Not Valid\n");
8862 err = -EIO;
8863 goto err_sw_init;
8864 }
8865
8866 ixgbe_get_platform_mac_addr(adapter);
8867
8868 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
8869
8870 if (!is_valid_ether_addr(netdev->dev_addr)) {
8871 e_dev_err("invalid MAC address\n");
8872 err = -EIO;
8873 goto err_sw_init;
8874 }
8875
8876 ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
8877
8878 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8879 (unsigned long) adapter);
8880
8881 if (ixgbe_removed(hw->hw_addr)) {
8882 err = -EIO;
8883 goto err_sw_init;
8884 }
8885 INIT_WORK(&adapter->service_task, ixgbe_service_task);
8886 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
8887 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
8888
8889 err = ixgbe_init_interrupt_scheme(adapter);
8890 if (err)
8891 goto err_sw_init;
8892
8893
8894 adapter->wol = 0;
8895 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
8896 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
8897 pdev->subsystem_device);
8898 if (hw->wol_enabled)
8899 adapter->wol = IXGBE_WUFC_MAG;
8900
8901 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
8902
8903
8904 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
8905 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
8906
8907
8908 if (ixgbe_pcie_from_parent(hw))
8909 ixgbe_get_parent_bus_info(adapter);
8910 else
8911 hw->mac.ops.get_bus_info(hw);
8912
8913
8914
8915
8916
8917
8918 switch (hw->mac.type) {
8919 case ixgbe_mac_82598EB:
8920 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
8921 break;
8922 default:
8923 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
8924 break;
8925 }
8926
8927
8928 if (expected_gts > 0)
8929 ixgbe_check_minimum_link(adapter, expected_gts);
8930
8931 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
8932 if (err)
8933 strlcpy(part_str, "Unknown", sizeof(part_str));
8934 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
8935 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
8936 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
8937 part_str);
8938 else
8939 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
8940 hw->mac.type, hw->phy.type, part_str);
8941
8942 e_dev_info("%pM\n", netdev->dev_addr);
8943
8944
8945 err = hw->mac.ops.start_hw(hw);
8946 if (err == IXGBE_ERR_EEPROM_VERSION) {
8947
8948 e_dev_warn("This device is a pre-production adapter/LOM. "
8949 "Please be aware there may be issues associated "
8950 "with your hardware. If you are experiencing "
8951 "problems please contact your Intel or hardware "
8952 "representative who provided you with this "
8953 "hardware.\n");
8954 }
8955 strcpy(netdev->name, "eth%d");
8956 err = register_netdev(netdev);
8957 if (err)
8958 goto err_register;
8959
8960 pci_set_drvdata(pdev, adapter);
8961
8962
8963 if (hw->mac.ops.disable_tx_laser)
8964 hw->mac.ops.disable_tx_laser(hw);
8965
8966
8967 netif_carrier_off(netdev);
8968
8969#ifdef CONFIG_IXGBE_DCA
8970 if (dca_add_requester(&pdev->dev) == 0) {
8971 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
8972 ixgbe_setup_dca(adapter);
8973 }
8974#endif
8975 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
8976 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
8977 for (i = 0; i < adapter->num_vfs; i++)
8978 ixgbe_vf_configuration(pdev, (i | 0x10000000));
8979 }
8980
8981
8982
8983
8984 if (hw->mac.ops.set_fw_drv_ver)
8985 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
8986 0xFF);
8987
8988
8989 ixgbe_add_sanmac_netdev(netdev);
8990
8991 e_dev_info("%s\n", ixgbe_default_device_descr);
8992
8993#ifdef CONFIG_IXGBE_HWMON
8994 if (ixgbe_sysfs_init(adapter))
8995 e_err(probe, "failed to allocate sysfs resources\n");
8996#endif
8997
8998 ixgbe_dbg_adapter_init(adapter);
8999
9000
9001 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
9002 hw->mac.ops.setup_link(hw,
9003 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
9004 true);
9005
9006 return 0;
9007
9008err_register:
9009 ixgbe_release_hw_control(adapter);
9010 ixgbe_clear_interrupt_scheme(adapter);
9011err_sw_init:
9012 ixgbe_disable_sriov(adapter);
9013 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
9014 iounmap(adapter->io_addr);
9015 kfree(adapter->mac_table);
9016err_ioremap:
9017 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9018 free_netdev(netdev);
9019err_alloc_etherdev:
9020 pci_release_selected_regions(pdev,
9021 pci_select_bars(pdev, IORESOURCE_MEM));
9022err_pci_reg:
9023err_dma:
9024 if (!adapter || disable_dev)
9025 pci_disable_device(pdev);
9026 return err;
9027}
9028
9029
9030
9031
9032
9033
9034
9035
9036
9037
9038static void ixgbe_remove(struct pci_dev *pdev)
9039{
9040 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9041 struct net_device *netdev;
9042 bool disable_dev;
9043
9044
9045 if (!adapter)
9046 return;
9047
9048 netdev = adapter->netdev;
9049 ixgbe_dbg_adapter_exit(adapter);
9050
9051 set_bit(__IXGBE_REMOVING, &adapter->state);
9052 cancel_work_sync(&adapter->service_task);
9053
9054
9055#ifdef CONFIG_IXGBE_DCA
9056 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
9057 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
9058 dca_remove_requester(&pdev->dev);
9059 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
9060 IXGBE_DCA_CTRL_DCA_DISABLE);
9061 }
9062
9063#endif
9064#ifdef CONFIG_IXGBE_HWMON
9065 ixgbe_sysfs_exit(adapter);
9066#endif
9067
9068
9069 ixgbe_del_sanmac_netdev(netdev);
9070
9071#ifdef CONFIG_PCI_IOV
9072 ixgbe_disable_sriov(adapter);
9073#endif
9074 if (netdev->reg_state == NETREG_REGISTERED)
9075 unregister_netdev(netdev);
9076
9077 ixgbe_clear_interrupt_scheme(adapter);
9078
9079 ixgbe_release_hw_control(adapter);
9080
9081#ifdef CONFIG_DCB
9082 kfree(adapter->ixgbe_ieee_pfc);
9083 kfree(adapter->ixgbe_ieee_ets);
9084
9085#endif
9086 iounmap(adapter->io_addr);
9087 pci_release_selected_regions(pdev, pci_select_bars(pdev,
9088 IORESOURCE_MEM));
9089
9090 e_dev_info("complete\n");
9091
9092 kfree(adapter->mac_table);
9093 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
9094 free_netdev(netdev);
9095
9096 pci_disable_pcie_error_reporting(pdev);
9097
9098 if (disable_dev)
9099 pci_disable_device(pdev);
9100}
9101
9102
9103
9104
9105
9106
9107
9108
9109
9110static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
9111 pci_channel_state_t state)
9112{
9113 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9114 struct net_device *netdev = adapter->netdev;
9115
9116#ifdef CONFIG_PCI_IOV
9117 struct ixgbe_hw *hw = &adapter->hw;
9118 struct pci_dev *bdev, *vfdev;
9119 u32 dw0, dw1, dw2, dw3;
9120 int vf, pos;
9121 u16 req_id, pf_func;
9122
9123 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
9124 adapter->num_vfs == 0)
9125 goto skip_bad_vf_detection;
9126
9127 bdev = pdev->bus->self;
9128 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
9129 bdev = bdev->bus->self;
9130
9131 if (!bdev)
9132 goto skip_bad_vf_detection;
9133
9134 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
9135 if (!pos)
9136 goto skip_bad_vf_detection;
9137
9138 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
9139 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
9140 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
9141 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
9142 if (ixgbe_removed(hw->hw_addr))
9143 goto skip_bad_vf_detection;
9144
9145 req_id = dw1 >> 16;
9146
9147 if (!(req_id & 0x0080))
9148 goto skip_bad_vf_detection;
9149
9150 pf_func = req_id & 0x01;
9151 if ((pf_func & 1) == (pdev->devfn & 1)) {
9152 unsigned int device_id;
9153
9154 vf = (req_id & 0x7F) >> 1;
9155 e_dev_err("VF %d has caused a PCIe error\n", vf);
9156 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
9157 "%8.8x\tdw3: %8.8x\n",
9158 dw0, dw1, dw2, dw3);
9159 switch (adapter->hw.mac.type) {
9160 case ixgbe_mac_82599EB:
9161 device_id = IXGBE_82599_VF_DEVICE_ID;
9162 break;
9163 case ixgbe_mac_X540:
9164 device_id = IXGBE_X540_VF_DEVICE_ID;
9165 break;
9166 case ixgbe_mac_X550:
9167 device_id = IXGBE_DEV_ID_X550_VF;
9168 break;
9169 case ixgbe_mac_X550EM_x:
9170 device_id = IXGBE_DEV_ID_X550EM_X_VF;
9171 break;
9172 default:
9173 device_id = 0;
9174 break;
9175 }
9176
9177
9178 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
9179 while (vfdev) {
9180 if (vfdev->devfn == (req_id & 0xFF))
9181 break;
9182 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
9183 device_id, vfdev);
9184 }
9185
9186
9187
9188
9189
9190 if (vfdev) {
9191 ixgbe_issue_vf_flr(adapter, vfdev);
9192
9193 pci_dev_put(vfdev);
9194 }
9195
9196 pci_cleanup_aer_uncorrect_error_status(pdev);
9197 }
9198
9199
9200
9201
9202
9203
9204
9205 adapter->vferr_refcount++;
9206
9207 return PCI_ERS_RESULT_RECOVERED;
9208
9209skip_bad_vf_detection:
9210#endif
9211 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
9212 return PCI_ERS_RESULT_DISCONNECT;
9213
9214 rtnl_lock();
9215 netif_device_detach(netdev);
9216
9217 if (state == pci_channel_io_perm_failure) {
9218 rtnl_unlock();
9219 return PCI_ERS_RESULT_DISCONNECT;
9220 }
9221
9222 if (netif_running(netdev))
9223 ixgbe_down(adapter);
9224
9225 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
9226 pci_disable_device(pdev);
9227 rtnl_unlock();
9228
9229
9230 return PCI_ERS_RESULT_NEED_RESET;
9231}
9232
9233
9234
9235
9236
9237
9238
9239static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
9240{
9241 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9242 pci_ers_result_t result;
9243 int err;
9244
9245 if (pci_enable_device_mem(pdev)) {
9246 e_err(probe, "Cannot re-enable PCI device after reset.\n");
9247 result = PCI_ERS_RESULT_DISCONNECT;
9248 } else {
9249 smp_mb__before_atomic();
9250 clear_bit(__IXGBE_DISABLED, &adapter->state);
9251 adapter->hw.hw_addr = adapter->io_addr;
9252 pci_set_master(pdev);
9253 pci_restore_state(pdev);
9254 pci_save_state(pdev);
9255
9256 pci_wake_from_d3(pdev, false);
9257
9258 ixgbe_reset(adapter);
9259 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
9260 result = PCI_ERS_RESULT_RECOVERED;
9261 }
9262
9263 err = pci_cleanup_aer_uncorrect_error_status(pdev);
9264 if (err) {
9265 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
9266 "failed 0x%0x\n", err);
9267
9268 }
9269
9270 return result;
9271}
9272
9273
9274
9275
9276
9277
9278
9279
9280static void ixgbe_io_resume(struct pci_dev *pdev)
9281{
9282 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9283 struct net_device *netdev = adapter->netdev;
9284
9285#ifdef CONFIG_PCI_IOV
9286 if (adapter->vferr_refcount) {
9287 e_info(drv, "Resuming after VF err\n");
9288 adapter->vferr_refcount--;
9289 return;
9290 }
9291
9292#endif
9293 if (netif_running(netdev))
9294 ixgbe_up(adapter);
9295
9296 netif_device_attach(netdev);
9297}
9298
9299static const struct pci_error_handlers ixgbe_err_handler = {
9300 .error_detected = ixgbe_io_error_detected,
9301 .slot_reset = ixgbe_io_slot_reset,
9302 .resume = ixgbe_io_resume,
9303};
9304
9305static struct pci_driver ixgbe_driver = {
9306 .name = ixgbe_driver_name,
9307 .id_table = ixgbe_pci_tbl,
9308 .probe = ixgbe_probe,
9309 .remove = ixgbe_remove,
9310#ifdef CONFIG_PM
9311 .suspend = ixgbe_suspend,
9312 .resume = ixgbe_resume,
9313#endif
9314 .shutdown = ixgbe_shutdown,
9315 .sriov_configure = ixgbe_pci_sriov_configure,
9316 .err_handler = &ixgbe_err_handler
9317};
9318
9319
9320
9321
9322
9323
9324
9325static int __init ixgbe_init_module(void)
9326{
9327 int ret;
9328 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
9329 pr_info("%s\n", ixgbe_copyright);
9330
9331 ixgbe_dbg_init();
9332
9333 ret = pci_register_driver(&ixgbe_driver);
9334 if (ret) {
9335 ixgbe_dbg_exit();
9336 return ret;
9337 }
9338
9339#ifdef CONFIG_IXGBE_DCA
9340 dca_register_notify(&dca_notifier);
9341#endif
9342
9343 return 0;
9344}
9345
9346module_init(ixgbe_init_module);
9347
9348
9349
9350
9351
9352
9353
9354static void __exit ixgbe_exit_module(void)
9355{
9356#ifdef CONFIG_IXGBE_DCA
9357 dca_unregister_notify(&dca_notifier);
9358#endif
9359 pci_unregister_driver(&ixgbe_driver);
9360
9361 ixgbe_dbg_exit();
9362}
9363
9364#ifdef CONFIG_IXGBE_DCA
9365static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
9366 void *p)
9367{
9368 int ret_val;
9369
9370 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
9371 __ixgbe_notify_dca);
9372
9373 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
9374}
9375
9376#endif
9377
9378module_exit(ixgbe_exit_module);
9379
9380
9381