1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/interrupt.h>
37#include <linux/ip.h>
38#include <linux/tcp.h>
39#include <linux/sctp.h>
40#include <linux/pkt_sched.h>
41#include <linux/ipv6.h>
42#include <linux/slab.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
45#include <linux/etherdevice.h>
46#include <linux/ethtool.h>
47#include <linux/if.h>
48#include <linux/if_vlan.h>
49#include <linux/if_macvlan.h>
50#include <linux/if_bridge.h>
51#include <linux/prefetch.h>
52#include <scsi/fc/fc_fcoe.h>
53#include <net/vxlan.h>
54
55#ifdef CONFIG_OF
56#include <linux/of_net.h>
57#endif
58
59#ifdef CONFIG_SPARC
60#include <asm/idprom.h>
61#include <asm/prom.h>
62#endif
63
64#include "ixgbe.h"
65#include "ixgbe_common.h"
66#include "ixgbe_dcb_82599.h"
67#include "ixgbe_sriov.h"
68
69char ixgbe_driver_name[] = "ixgbe";
70static const char ixgbe_driver_string[] =
71 "Intel(R) 10 Gigabit PCI Express Network Driver";
72#ifdef IXGBE_FCOE
73char ixgbe_default_device_descr[] =
74 "Intel(R) 10 Gigabit Network Connection";
75#else
76static char ixgbe_default_device_descr[] =
77 "Intel(R) 10 Gigabit Network Connection";
78#endif
79#define DRV_VERSION "4.0.1-k"
80const char ixgbe_driver_version[] = DRV_VERSION;
81static const char ixgbe_copyright[] =
82 "Copyright (c) 1999-2014 Intel Corporation.";
83
84static const struct ixgbe_info *ixgbe_info_tbl[] = {
85 [board_82598] = &ixgbe_82598_info,
86 [board_82599] = &ixgbe_82599_info,
87 [board_X540] = &ixgbe_X540_info,
88 [board_X550] = &ixgbe_X550_info,
89 [board_X550EM_x] = &ixgbe_X550EM_x_info,
90};
91
92
93
94
95
96
97
98
99
100static const struct pci_device_id ixgbe_pci_tbl[] = {
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
122 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
124 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
126 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
128 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
130 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
132 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
133 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
134
135 {0, }
136};
137MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
138
139#ifdef CONFIG_IXGBE_DCA
140static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
141 void *p);
142static struct notifier_block dca_notifier = {
143 .notifier_call = ixgbe_notify_dca,
144 .next = NULL,
145 .priority = 0
146};
147#endif
148
149#ifdef CONFIG_PCI_IOV
150static unsigned int max_vfs;
151module_param(max_vfs, uint, 0);
152MODULE_PARM_DESC(max_vfs,
153 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
154#endif
155
156static unsigned int allow_unsupported_sfp;
157module_param(allow_unsupported_sfp, uint, 0);
158MODULE_PARM_DESC(allow_unsupported_sfp,
159 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
160
161#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
162static int debug = -1;
163module_param(debug, int, 0);
164MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
165
166MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
167MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
168MODULE_LICENSE("GPL");
169MODULE_VERSION(DRV_VERSION);
170
171static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
172
173static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
174 u32 reg, u16 *value)
175{
176 struct pci_dev *parent_dev;
177 struct pci_bus *parent_bus;
178
179 parent_bus = adapter->pdev->bus->parent;
180 if (!parent_bus)
181 return -1;
182
183 parent_dev = parent_bus->self;
184 if (!parent_dev)
185 return -1;
186
187 if (!pci_is_pcie(parent_dev))
188 return -1;
189
190 pcie_capability_read_word(parent_dev, reg, value);
191 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
192 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
193 return -1;
194 return 0;
195}
196
197static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
198{
199 struct ixgbe_hw *hw = &adapter->hw;
200 u16 link_status = 0;
201 int err;
202
203 hw->bus.type = ixgbe_bus_type_pci_express;
204
205
206
207
208 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
209
210
211 if (err)
212 return err;
213
214 hw->bus.width = ixgbe_convert_bus_width(link_status);
215 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
216
217 return 0;
218}
219
220
221
222
223
224
225
226
227
228
229static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
230{
231 switch (hw->device_id) {
232 case IXGBE_DEV_ID_82599_SFP_SF_QP:
233 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
234 return true;
235 default:
236 return false;
237 }
238}
239
240static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
241 int expected_gts)
242{
243 int max_gts = 0;
244 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
245 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
246 struct pci_dev *pdev;
247
248
249
250 if (ixgbe_pcie_from_parent(&adapter->hw))
251 pdev = adapter->pdev->bus->parent->self;
252 else
253 pdev = adapter->pdev;
254
255 if (pcie_get_minimum_link(pdev, &speed, &width) ||
256 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
257 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
258 return;
259 }
260
261 switch (speed) {
262 case PCIE_SPEED_2_5GT:
263
264 max_gts = 2 * width;
265 break;
266 case PCIE_SPEED_5_0GT:
267
268 max_gts = 4 * width;
269 break;
270 case PCIE_SPEED_8_0GT:
271
272 max_gts = 8 * width;
273 break;
274 default:
275 e_dev_warn("Unable to determine PCI Express bandwidth.\n");
276 return;
277 }
278
279 e_dev_info("PCI Express bandwidth of %dGT/s available\n",
280 max_gts);
281 e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
282 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
283 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
284 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
285 "Unknown"),
286 width,
287 (speed == PCIE_SPEED_2_5GT ? "20%" :
288 speed == PCIE_SPEED_5_0GT ? "20%" :
289 speed == PCIE_SPEED_8_0GT ? "<2%" :
290 "Unknown"));
291
292 if (max_gts < expected_gts) {
293 e_dev_warn("This is not sufficient for optimal performance of this card.\n");
294 e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
295 expected_gts);
296 e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
297 }
298}
299
300static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
301{
302 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
303 !test_bit(__IXGBE_REMOVING, &adapter->state) &&
304 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
305 schedule_work(&adapter->service_task);
306}
307
308static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
309{
310 struct ixgbe_adapter *adapter = hw->back;
311
312 if (!hw->hw_addr)
313 return;
314 hw->hw_addr = NULL;
315 e_dev_err("Adapter removed\n");
316 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
317 ixgbe_service_event_schedule(adapter);
318}
319
320static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
321{
322 u32 value;
323
324
325
326
327
328
329
330 if (reg == IXGBE_STATUS) {
331 ixgbe_remove_adapter(hw);
332 return;
333 }
334 value = ixgbe_read_reg(hw, IXGBE_STATUS);
335 if (value == IXGBE_FAILED_READ_REG)
336 ixgbe_remove_adapter(hw);
337}
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
353{
354 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
355 u32 value;
356
357 if (ixgbe_removed(reg_addr))
358 return IXGBE_FAILED_READ_REG;
359 value = readl(reg_addr + reg);
360 if (unlikely(value == IXGBE_FAILED_READ_REG))
361 ixgbe_check_remove(hw, reg);
362 return value;
363}
364
365static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
366{
367 u16 value;
368
369 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
370 if (value == IXGBE_FAILED_READ_CFG_WORD) {
371 ixgbe_remove_adapter(hw);
372 return true;
373 }
374 return false;
375}
376
377u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
378{
379 struct ixgbe_adapter *adapter = hw->back;
380 u16 value;
381
382 if (ixgbe_removed(hw->hw_addr))
383 return IXGBE_FAILED_READ_CFG_WORD;
384 pci_read_config_word(adapter->pdev, reg, &value);
385 if (value == IXGBE_FAILED_READ_CFG_WORD &&
386 ixgbe_check_cfg_remove(hw, adapter->pdev))
387 return IXGBE_FAILED_READ_CFG_WORD;
388 return value;
389}
390
391#ifdef CONFIG_PCI_IOV
392static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
393{
394 struct ixgbe_adapter *adapter = hw->back;
395 u32 value;
396
397 if (ixgbe_removed(hw->hw_addr))
398 return IXGBE_FAILED_READ_CFG_DWORD;
399 pci_read_config_dword(adapter->pdev, reg, &value);
400 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
401 ixgbe_check_cfg_remove(hw, adapter->pdev))
402 return IXGBE_FAILED_READ_CFG_DWORD;
403 return value;
404}
405#endif
406
407void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
408{
409 struct ixgbe_adapter *adapter = hw->back;
410
411 if (ixgbe_removed(hw->hw_addr))
412 return;
413 pci_write_config_word(adapter->pdev, reg, value);
414}
415
416static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
417{
418 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
419
420
421 smp_mb__before_atomic();
422 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
423}
424
425struct ixgbe_reg_info {
426 u32 ofs;
427 char *name;
428};
429
430static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
431
432
433 {IXGBE_CTRL, "CTRL"},
434 {IXGBE_STATUS, "STATUS"},
435 {IXGBE_CTRL_EXT, "CTRL_EXT"},
436
437
438 {IXGBE_EICR, "EICR"},
439
440
441 {IXGBE_SRRCTL(0), "SRRCTL"},
442 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
443 {IXGBE_RDLEN(0), "RDLEN"},
444 {IXGBE_RDH(0), "RDH"},
445 {IXGBE_RDT(0), "RDT"},
446 {IXGBE_RXDCTL(0), "RXDCTL"},
447 {IXGBE_RDBAL(0), "RDBAL"},
448 {IXGBE_RDBAH(0), "RDBAH"},
449
450
451 {IXGBE_TDBAL(0), "TDBAL"},
452 {IXGBE_TDBAH(0), "TDBAH"},
453 {IXGBE_TDLEN(0), "TDLEN"},
454 {IXGBE_TDH(0), "TDH"},
455 {IXGBE_TDT(0), "TDT"},
456 {IXGBE_TXDCTL(0), "TXDCTL"},
457
458
459 { .name = NULL }
460};
461
462
463
464
465
466static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
467{
468 int i = 0, j = 0;
469 char rname[16];
470 u32 regs[64];
471
472 switch (reginfo->ofs) {
473 case IXGBE_SRRCTL(0):
474 for (i = 0; i < 64; i++)
475 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
476 break;
477 case IXGBE_DCA_RXCTRL(0):
478 for (i = 0; i < 64; i++)
479 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
480 break;
481 case IXGBE_RDLEN(0):
482 for (i = 0; i < 64; i++)
483 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
484 break;
485 case IXGBE_RDH(0):
486 for (i = 0; i < 64; i++)
487 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
488 break;
489 case IXGBE_RDT(0):
490 for (i = 0; i < 64; i++)
491 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
492 break;
493 case IXGBE_RXDCTL(0):
494 for (i = 0; i < 64; i++)
495 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
496 break;
497 case IXGBE_RDBAL(0):
498 for (i = 0; i < 64; i++)
499 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
500 break;
501 case IXGBE_RDBAH(0):
502 for (i = 0; i < 64; i++)
503 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
504 break;
505 case IXGBE_TDBAL(0):
506 for (i = 0; i < 64; i++)
507 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
508 break;
509 case IXGBE_TDBAH(0):
510 for (i = 0; i < 64; i++)
511 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
512 break;
513 case IXGBE_TDLEN(0):
514 for (i = 0; i < 64; i++)
515 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
516 break;
517 case IXGBE_TDH(0):
518 for (i = 0; i < 64; i++)
519 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
520 break;
521 case IXGBE_TDT(0):
522 for (i = 0; i < 64; i++)
523 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
524 break;
525 case IXGBE_TXDCTL(0):
526 for (i = 0; i < 64; i++)
527 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
528 break;
529 default:
530 pr_info("%-15s %08x\n", reginfo->name,
531 IXGBE_READ_REG(hw, reginfo->ofs));
532 return;
533 }
534
535 for (i = 0; i < 8; i++) {
536 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
537 pr_err("%-15s", rname);
538 for (j = 0; j < 8; j++)
539 pr_cont(" %08x", regs[i*8+j]);
540 pr_cont("\n");
541 }
542
543}
544
545
546
547
548static void ixgbe_dump(struct ixgbe_adapter *adapter)
549{
550 struct net_device *netdev = adapter->netdev;
551 struct ixgbe_hw *hw = &adapter->hw;
552 struct ixgbe_reg_info *reginfo;
553 int n = 0;
554 struct ixgbe_ring *tx_ring;
555 struct ixgbe_tx_buffer *tx_buffer;
556 union ixgbe_adv_tx_desc *tx_desc;
557 struct my_u0 { u64 a; u64 b; } *u0;
558 struct ixgbe_ring *rx_ring;
559 union ixgbe_adv_rx_desc *rx_desc;
560 struct ixgbe_rx_buffer *rx_buffer_info;
561 u32 staterr;
562 int i = 0;
563
564 if (!netif_msg_hw(adapter))
565 return;
566
567
568 if (netdev) {
569 dev_info(&adapter->pdev->dev, "Net device Info\n");
570 pr_info("Device Name state "
571 "trans_start last_rx\n");
572 pr_info("%-15s %016lX %016lX %016lX\n",
573 netdev->name,
574 netdev->state,
575 netdev->trans_start,
576 netdev->last_rx);
577 }
578
579
580 dev_info(&adapter->pdev->dev, "Register Dump\n");
581 pr_info(" Register Name Value\n");
582 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
583 reginfo->name; reginfo++) {
584 ixgbe_regdump(hw, reginfo);
585 }
586
587
588 if (!netdev || !netif_running(netdev))
589 return;
590
591 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
592 pr_info(" %s %s %s %s\n",
593 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
594 "leng", "ntw", "timestamp");
595 for (n = 0; n < adapter->num_tx_queues; n++) {
596 tx_ring = adapter->tx_ring[n];
597 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
598 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
599 n, tx_ring->next_to_use, tx_ring->next_to_clean,
600 (u64)dma_unmap_addr(tx_buffer, dma),
601 dma_unmap_len(tx_buffer, len),
602 tx_buffer->next_to_watch,
603 (u64)tx_buffer->time_stamp);
604 }
605
606
607 if (!netif_msg_tx_done(adapter))
608 goto rx_ring_summary;
609
610 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647 for (n = 0; n < adapter->num_tx_queues; n++) {
648 tx_ring = adapter->tx_ring[n];
649 pr_info("------------------------------------\n");
650 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
651 pr_info("------------------------------------\n");
652 pr_info("%s%s %s %s %s %s\n",
653 "T [desc] [address 63:0 ] ",
654 "[PlPOIdStDDt Ln] [bi->dma ] ",
655 "leng", "ntw", "timestamp", "bi->skb");
656
657 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
658 tx_desc = IXGBE_TX_DESC(tx_ring, i);
659 tx_buffer = &tx_ring->tx_buffer_info[i];
660 u0 = (struct my_u0 *)tx_desc;
661 if (dma_unmap_len(tx_buffer, len) > 0) {
662 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
663 i,
664 le64_to_cpu(u0->a),
665 le64_to_cpu(u0->b),
666 (u64)dma_unmap_addr(tx_buffer, dma),
667 dma_unmap_len(tx_buffer, len),
668 tx_buffer->next_to_watch,
669 (u64)tx_buffer->time_stamp,
670 tx_buffer->skb);
671 if (i == tx_ring->next_to_use &&
672 i == tx_ring->next_to_clean)
673 pr_cont(" NTC/U\n");
674 else if (i == tx_ring->next_to_use)
675 pr_cont(" NTU\n");
676 else if (i == tx_ring->next_to_clean)
677 pr_cont(" NTC\n");
678 else
679 pr_cont("\n");
680
681 if (netif_msg_pktdata(adapter) &&
682 tx_buffer->skb)
683 print_hex_dump(KERN_INFO, "",
684 DUMP_PREFIX_ADDRESS, 16, 1,
685 tx_buffer->skb->data,
686 dma_unmap_len(tx_buffer, len),
687 true);
688 }
689 }
690 }
691
692
693rx_ring_summary:
694 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
695 pr_info("Queue [NTU] [NTC]\n");
696 for (n = 0; n < adapter->num_rx_queues; n++) {
697 rx_ring = adapter->rx_ring[n];
698 pr_info("%5d %5X %5X\n",
699 n, rx_ring->next_to_use, rx_ring->next_to_clean);
700 }
701
702
703 if (!netif_msg_rx_status(adapter))
704 return;
705
706 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753 for (n = 0; n < adapter->num_rx_queues; n++) {
754 rx_ring = adapter->rx_ring[n];
755 pr_info("------------------------------------\n");
756 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
757 pr_info("------------------------------------\n");
758 pr_info("%s%s%s",
759 "R [desc] [ PktBuf A0] ",
760 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
761 "<-- Adv Rx Read format\n");
762 pr_info("%s%s%s",
763 "RWB[desc] [PcsmIpSHl PtRs] ",
764 "[vl er S cks ln] ---------------- [bi->skb ] ",
765 "<-- Adv Rx Write-Back format\n");
766
767 for (i = 0; i < rx_ring->count; i++) {
768 rx_buffer_info = &rx_ring->rx_buffer_info[i];
769 rx_desc = IXGBE_RX_DESC(rx_ring, i);
770 u0 = (struct my_u0 *)rx_desc;
771 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
772 if (staterr & IXGBE_RXD_STAT_DD) {
773
774 pr_info("RWB[0x%03X] %016llX "
775 "%016llX ---------------- %p", i,
776 le64_to_cpu(u0->a),
777 le64_to_cpu(u0->b),
778 rx_buffer_info->skb);
779 } else {
780 pr_info("R [0x%03X] %016llX "
781 "%016llX %016llX %p", i,
782 le64_to_cpu(u0->a),
783 le64_to_cpu(u0->b),
784 (u64)rx_buffer_info->dma,
785 rx_buffer_info->skb);
786
787 if (netif_msg_pktdata(adapter) &&
788 rx_buffer_info->dma) {
789 print_hex_dump(KERN_INFO, "",
790 DUMP_PREFIX_ADDRESS, 16, 1,
791 page_address(rx_buffer_info->page) +
792 rx_buffer_info->page_offset,
793 ixgbe_rx_bufsz(rx_ring), true);
794 }
795 }
796
797 if (i == rx_ring->next_to_use)
798 pr_cont(" NTU\n");
799 else if (i == rx_ring->next_to_clean)
800 pr_cont(" NTC\n");
801 else
802 pr_cont("\n");
803
804 }
805 }
806}
807
808static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
809{
810 u32 ctrl_ext;
811
812
813 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
814 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
815 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
816}
817
818static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
819{
820 u32 ctrl_ext;
821
822
823 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
825 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
826}
827
828
829
830
831
832
833
834
835
836static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
837 u8 queue, u8 msix_vector)
838{
839 u32 ivar, index;
840 struct ixgbe_hw *hw = &adapter->hw;
841 switch (hw->mac.type) {
842 case ixgbe_mac_82598EB:
843 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
844 if (direction == -1)
845 direction = 0;
846 index = (((direction * 64) + queue) >> 2) & 0x1F;
847 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
848 ivar &= ~(0xFF << (8 * (queue & 0x3)));
849 ivar |= (msix_vector << (8 * (queue & 0x3)));
850 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
851 break;
852 case ixgbe_mac_82599EB:
853 case ixgbe_mac_X540:
854 case ixgbe_mac_X550:
855 case ixgbe_mac_X550EM_x:
856 if (direction == -1) {
857
858 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
859 index = ((queue & 1) * 8);
860 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
861 ivar &= ~(0xFF << index);
862 ivar |= (msix_vector << index);
863 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
864 break;
865 } else {
866
867 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
868 index = ((16 * (queue & 1)) + (8 * direction));
869 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
870 ivar &= ~(0xFF << index);
871 ivar |= (msix_vector << index);
872 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
873 break;
874 }
875 default:
876 break;
877 }
878}
879
880static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
881 u64 qmask)
882{
883 u32 mask;
884
885 switch (adapter->hw.mac.type) {
886 case ixgbe_mac_82598EB:
887 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
888 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
889 break;
890 case ixgbe_mac_82599EB:
891 case ixgbe_mac_X540:
892 case ixgbe_mac_X550:
893 case ixgbe_mac_X550EM_x:
894 mask = (qmask & 0xFFFFFFFF);
895 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
896 mask = (qmask >> 32);
897 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
898 break;
899 default:
900 break;
901 }
902}
903
904void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
905 struct ixgbe_tx_buffer *tx_buffer)
906{
907 if (tx_buffer->skb) {
908 dev_kfree_skb_any(tx_buffer->skb);
909 if (dma_unmap_len(tx_buffer, len))
910 dma_unmap_single(ring->dev,
911 dma_unmap_addr(tx_buffer, dma),
912 dma_unmap_len(tx_buffer, len),
913 DMA_TO_DEVICE);
914 } else if (dma_unmap_len(tx_buffer, len)) {
915 dma_unmap_page(ring->dev,
916 dma_unmap_addr(tx_buffer, dma),
917 dma_unmap_len(tx_buffer, len),
918 DMA_TO_DEVICE);
919 }
920 tx_buffer->next_to_watch = NULL;
921 tx_buffer->skb = NULL;
922 dma_unmap_len_set(tx_buffer, len, 0);
923
924}
925
926static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
927{
928 struct ixgbe_hw *hw = &adapter->hw;
929 struct ixgbe_hw_stats *hwstats = &adapter->stats;
930 int i;
931 u32 data;
932
933 if ((hw->fc.current_mode != ixgbe_fc_full) &&
934 (hw->fc.current_mode != ixgbe_fc_rx_pause))
935 return;
936
937 switch (hw->mac.type) {
938 case ixgbe_mac_82598EB:
939 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
940 break;
941 default:
942 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
943 }
944 hwstats->lxoffrxc += data;
945
946
947 if (!data)
948 return;
949
950 for (i = 0; i < adapter->num_tx_queues; i++)
951 clear_bit(__IXGBE_HANG_CHECK_ARMED,
952 &adapter->tx_ring[i]->state);
953}
954
955static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
956{
957 struct ixgbe_hw *hw = &adapter->hw;
958 struct ixgbe_hw_stats *hwstats = &adapter->stats;
959 u32 xoff[8] = {0};
960 u8 tc;
961 int i;
962 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
963
964 if (adapter->ixgbe_ieee_pfc)
965 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
966
967 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
968 ixgbe_update_xoff_rx_lfc(adapter);
969 return;
970 }
971
972
973 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
974 u32 pxoffrxc;
975
976 switch (hw->mac.type) {
977 case ixgbe_mac_82598EB:
978 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
979 break;
980 default:
981 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
982 }
983 hwstats->pxoffrxc[i] += pxoffrxc;
984
985 tc = netdev_get_prio_tc_map(adapter->netdev, i);
986 xoff[tc] += pxoffrxc;
987 }
988
989
990 for (i = 0; i < adapter->num_tx_queues; i++) {
991 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
992
993 tc = tx_ring->dcb_tc;
994 if (xoff[tc])
995 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
996 }
997}
998
999static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
1000{
1001 return ring->stats.packets;
1002}
1003
1004static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
1005{
1006 struct ixgbe_adapter *adapter;
1007 struct ixgbe_hw *hw;
1008 u32 head, tail;
1009
1010 if (ring->l2_accel_priv)
1011 adapter = ring->l2_accel_priv->real_adapter;
1012 else
1013 adapter = netdev_priv(ring->netdev);
1014
1015 hw = &adapter->hw;
1016 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
1017 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
1018
1019 if (head != tail)
1020 return (head < tail) ?
1021 tail - head : (tail + ring->count - head);
1022
1023 return 0;
1024}
1025
1026static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
1027{
1028 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
1029 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1030 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
1031
1032 clear_check_for_tx_hang(tx_ring);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (tx_done_old == tx_done && tx_pending)
1047
1048 return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
1049 &tx_ring->state);
1050
1051 tx_ring->tx_stats.tx_done_old = tx_done;
1052
1053 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
1054
1055 return false;
1056}
1057
1058
1059
1060
1061
1062static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
1063{
1064
1065
1066 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1067 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
1068 e_warn(drv, "initiating reset due to tx timeout\n");
1069 ixgbe_service_event_schedule(adapter);
1070 }
1071}
1072
1073
1074
1075
1076
1077
1078static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1079 struct ixgbe_ring *tx_ring)
1080{
1081 struct ixgbe_adapter *adapter = q_vector->adapter;
1082 struct ixgbe_tx_buffer *tx_buffer;
1083 union ixgbe_adv_tx_desc *tx_desc;
1084 unsigned int total_bytes = 0, total_packets = 0;
1085 unsigned int budget = q_vector->tx.work_limit;
1086 unsigned int i = tx_ring->next_to_clean;
1087
1088 if (test_bit(__IXGBE_DOWN, &adapter->state))
1089 return true;
1090
1091 tx_buffer = &tx_ring->tx_buffer_info[i];
1092 tx_desc = IXGBE_TX_DESC(tx_ring, i);
1093 i -= tx_ring->count;
1094
1095 do {
1096 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1097
1098
1099 if (!eop_desc)
1100 break;
1101
1102
1103 read_barrier_depends();
1104
1105
1106 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1107 break;
1108
1109
1110 tx_buffer->next_to_watch = NULL;
1111
1112
1113 total_bytes += tx_buffer->bytecount;
1114 total_packets += tx_buffer->gso_segs;
1115
1116
1117 dev_consume_skb_any(tx_buffer->skb);
1118
1119
1120 dma_unmap_single(tx_ring->dev,
1121 dma_unmap_addr(tx_buffer, dma),
1122 dma_unmap_len(tx_buffer, len),
1123 DMA_TO_DEVICE);
1124
1125
1126 tx_buffer->skb = NULL;
1127 dma_unmap_len_set(tx_buffer, len, 0);
1128
1129
1130 while (tx_desc != eop_desc) {
1131 tx_buffer++;
1132 tx_desc++;
1133 i++;
1134 if (unlikely(!i)) {
1135 i -= tx_ring->count;
1136 tx_buffer = tx_ring->tx_buffer_info;
1137 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1138 }
1139
1140
1141 if (dma_unmap_len(tx_buffer, len)) {
1142 dma_unmap_page(tx_ring->dev,
1143 dma_unmap_addr(tx_buffer, dma),
1144 dma_unmap_len(tx_buffer, len),
1145 DMA_TO_DEVICE);
1146 dma_unmap_len_set(tx_buffer, len, 0);
1147 }
1148 }
1149
1150
1151 tx_buffer++;
1152 tx_desc++;
1153 i++;
1154 if (unlikely(!i)) {
1155 i -= tx_ring->count;
1156 tx_buffer = tx_ring->tx_buffer_info;
1157 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
1158 }
1159
1160
1161 prefetch(tx_desc);
1162
1163
1164 budget--;
1165 } while (likely(budget));
1166
1167 i += tx_ring->count;
1168 tx_ring->next_to_clean = i;
1169 u64_stats_update_begin(&tx_ring->syncp);
1170 tx_ring->stats.bytes += total_bytes;
1171 tx_ring->stats.packets += total_packets;
1172 u64_stats_update_end(&tx_ring->syncp);
1173 q_vector->tx.total_bytes += total_bytes;
1174 q_vector->tx.total_packets += total_packets;
1175
1176 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1177
1178 struct ixgbe_hw *hw = &adapter->hw;
1179 e_err(drv, "Detected Tx Unit Hang\n"
1180 " Tx Queue <%d>\n"
1181 " TDH, TDT <%x>, <%x>\n"
1182 " next_to_use <%x>\n"
1183 " next_to_clean <%x>\n"
1184 "tx_buffer_info[next_to_clean]\n"
1185 " time_stamp <%lx>\n"
1186 " jiffies <%lx>\n",
1187 tx_ring->queue_index,
1188 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
1189 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
1190 tx_ring->next_to_use, i,
1191 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
1192
1193 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1194
1195 e_info(probe,
1196 "tx hang %d detected on queue %d, resetting adapter\n",
1197 adapter->tx_timeout_count + 1, tx_ring->queue_index);
1198
1199
1200 ixgbe_tx_timeout_reset(adapter);
1201
1202
1203 return true;
1204 }
1205
1206 netdev_tx_completed_queue(txring_txq(tx_ring),
1207 total_packets, total_bytes);
1208
1209#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1210 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1211 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1212
1213
1214
1215 smp_mb();
1216 if (__netif_subqueue_stopped(tx_ring->netdev,
1217 tx_ring->queue_index)
1218 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1219 netif_wake_subqueue(tx_ring->netdev,
1220 tx_ring->queue_index);
1221 ++tx_ring->tx_stats.restart_queue;
1222 }
1223 }
1224
1225 return !!budget;
1226}
1227
1228#ifdef CONFIG_IXGBE_DCA
1229static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1230 struct ixgbe_ring *tx_ring,
1231 int cpu)
1232{
1233 struct ixgbe_hw *hw = &adapter->hw;
1234 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1235 u16 reg_offset;
1236
1237 switch (hw->mac.type) {
1238 case ixgbe_mac_82598EB:
1239 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1240 break;
1241 case ixgbe_mac_82599EB:
1242 case ixgbe_mac_X540:
1243 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1244 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1245 break;
1246 default:
1247
1248 return;
1249 }
1250
1251
1252
1253
1254
1255
1256 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1257 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1258 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1259
1260 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1261}
1262
1263static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1264 struct ixgbe_ring *rx_ring,
1265 int cpu)
1266{
1267 struct ixgbe_hw *hw = &adapter->hw;
1268 u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1269 u8 reg_idx = rx_ring->reg_idx;
1270
1271
1272 switch (hw->mac.type) {
1273 case ixgbe_mac_82599EB:
1274 case ixgbe_mac_X540:
1275 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1276 break;
1277 default:
1278 break;
1279 }
1280
1281
1282
1283
1284
1285
1286 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1287 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1288
1289 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1290}
1291
1292static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1293{
1294 struct ixgbe_adapter *adapter = q_vector->adapter;
1295 struct ixgbe_ring *ring;
1296 int cpu = get_cpu();
1297
1298 if (q_vector->cpu == cpu)
1299 goto out_no_update;
1300
1301 ixgbe_for_each_ring(ring, q_vector->tx)
1302 ixgbe_update_tx_dca(adapter, ring, cpu);
1303
1304 ixgbe_for_each_ring(ring, q_vector->rx)
1305 ixgbe_update_rx_dca(adapter, ring, cpu);
1306
1307 q_vector->cpu = cpu;
1308out_no_update:
1309 put_cpu();
1310}
1311
1312static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1313{
1314 int i;
1315
1316 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1317 return;
1318
1319
1320 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1321
1322 for (i = 0; i < adapter->num_q_vectors; i++) {
1323 adapter->q_vector[i]->cpu = -1;
1324 ixgbe_update_dca(adapter->q_vector[i]);
1325 }
1326}
1327
1328static int __ixgbe_notify_dca(struct device *dev, void *data)
1329{
1330 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1331 unsigned long event = *(unsigned long *)data;
1332
1333 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1334 return 0;
1335
1336 switch (event) {
1337 case DCA_PROVIDER_ADD:
1338
1339 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1340 break;
1341 if (dca_add_requester(dev) == 0) {
1342 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1343 ixgbe_setup_dca(adapter);
1344 break;
1345 }
1346
1347 case DCA_PROVIDER_REMOVE:
1348 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1349 dca_remove_requester(dev);
1350 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1351 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1352 }
1353 break;
1354 }
1355
1356 return 0;
1357}
1358
1359#endif
1360static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1361 union ixgbe_adv_rx_desc *rx_desc,
1362 struct sk_buff *skb)
1363{
1364 if (ring->netdev->features & NETIF_F_RXHASH)
1365 skb_set_hash(skb,
1366 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1367 PKT_HASH_TYPE_L3);
1368}
1369
1370#ifdef IXGBE_FCOE
1371
1372
1373
1374
1375
1376
1377
1378static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1379 union ixgbe_adv_rx_desc *rx_desc)
1380{
1381 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1382
1383 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1384 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1385 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1386 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1387}
1388
1389#endif
1390
1391
1392
1393
1394
1395
1396static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1397 union ixgbe_adv_rx_desc *rx_desc,
1398 struct sk_buff *skb)
1399{
1400 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1401 __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1402 bool encap_pkt = false;
1403
1404 skb_checksum_none_assert(skb);
1405
1406
1407 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1408 return;
1409
1410 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) &&
1411 (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
1412 encap_pkt = true;
1413 skb->encapsulation = 1;
1414 skb->ip_summed = CHECKSUM_NONE;
1415 }
1416
1417
1418 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1419 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1420 ring->rx_stats.csum_err++;
1421 return;
1422 }
1423
1424 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1425 return;
1426
1427 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1428
1429
1430
1431
1432 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1433 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1434 return;
1435
1436 ring->rx_stats.csum_err++;
1437 return;
1438 }
1439
1440
1441 skb->ip_summed = CHECKSUM_UNNECESSARY;
1442 if (encap_pkt) {
1443 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
1444 return;
1445
1446 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
1447 ring->rx_stats.csum_err++;
1448 return;
1449 }
1450
1451 skb->csum_level = 1;
1452 }
1453}
1454
1455static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1456 struct ixgbe_rx_buffer *bi)
1457{
1458 struct page *page = bi->page;
1459 dma_addr_t dma;
1460
1461
1462 if (likely(page))
1463 return true;
1464
1465
1466 page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1467 if (unlikely(!page)) {
1468 rx_ring->rx_stats.alloc_rx_page_failed++;
1469 return false;
1470 }
1471
1472
1473 dma = dma_map_page(rx_ring->dev, page, 0,
1474 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1475
1476
1477
1478
1479
1480 if (dma_mapping_error(rx_ring->dev, dma)) {
1481 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1482
1483 rx_ring->rx_stats.alloc_rx_page_failed++;
1484 return false;
1485 }
1486
1487 bi->dma = dma;
1488 bi->page = page;
1489 bi->page_offset = 0;
1490
1491 return true;
1492}
1493
1494
1495
1496
1497
1498
1499void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1500{
1501 union ixgbe_adv_rx_desc *rx_desc;
1502 struct ixgbe_rx_buffer *bi;
1503 u16 i = rx_ring->next_to_use;
1504
1505
1506 if (!cleaned_count)
1507 return;
1508
1509 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1510 bi = &rx_ring->rx_buffer_info[i];
1511 i -= rx_ring->count;
1512
1513 do {
1514 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1515 break;
1516
1517
1518
1519
1520
1521 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1522
1523 rx_desc++;
1524 bi++;
1525 i++;
1526 if (unlikely(!i)) {
1527 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1528 bi = rx_ring->rx_buffer_info;
1529 i -= rx_ring->count;
1530 }
1531
1532
1533 rx_desc->wb.upper.status_error = 0;
1534
1535 cleaned_count--;
1536 } while (cleaned_count);
1537
1538 i += rx_ring->count;
1539
1540 if (rx_ring->next_to_use != i) {
1541 rx_ring->next_to_use = i;
1542
1543
1544 rx_ring->next_to_alloc = i;
1545
1546
1547
1548
1549
1550
1551 wmb();
1552 writel(i, rx_ring->tail);
1553 }
1554}
1555
1556static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1557 struct sk_buff *skb)
1558{
1559 u16 hdr_len = skb_headlen(skb);
1560
1561
1562 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1563 IXGBE_CB(skb)->append_cnt);
1564 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1565}
1566
1567static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1568 struct sk_buff *skb)
1569{
1570
1571 if (!IXGBE_CB(skb)->append_cnt)
1572 return;
1573
1574 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1575 rx_ring->rx_stats.rsc_flush++;
1576
1577 ixgbe_set_rsc_gso_size(rx_ring, skb);
1578
1579
1580 IXGBE_CB(skb)->append_cnt = 0;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1594 union ixgbe_adv_rx_desc *rx_desc,
1595 struct sk_buff *skb)
1596{
1597 struct net_device *dev = rx_ring->netdev;
1598
1599 ixgbe_update_rsc_stats(rx_ring, skb);
1600
1601 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1602
1603 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1604
1605 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1606 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
1607
1608 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1609 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1610 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1611 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1612 }
1613
1614 skb_record_rx_queue(skb, rx_ring->queue_index);
1615
1616 skb->protocol = eth_type_trans(skb, dev);
1617}
1618
1619static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1620 struct sk_buff *skb)
1621{
1622 if (ixgbe_qv_busy_polling(q_vector))
1623 netif_receive_skb(skb);
1624 else
1625 napi_gro_receive(&q_vector->napi, skb);
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1640 union ixgbe_adv_rx_desc *rx_desc,
1641 struct sk_buff *skb)
1642{
1643 u32 ntc = rx_ring->next_to_clean + 1;
1644
1645
1646 ntc = (ntc < rx_ring->count) ? ntc : 0;
1647 rx_ring->next_to_clean = ntc;
1648
1649 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1650
1651
1652 if (ring_is_rsc_enabled(rx_ring)) {
1653 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1654 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1655
1656 if (unlikely(rsc_enabled)) {
1657 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1658
1659 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1660 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1661
1662
1663 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1664 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1665 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1666 }
1667 }
1668
1669
1670 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1671 return false;
1672
1673
1674 rx_ring->rx_buffer_info[ntc].skb = skb;
1675 rx_ring->rx_stats.non_eop_descs++;
1676
1677 return true;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1693 struct sk_buff *skb)
1694{
1695 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1696 unsigned char *va;
1697 unsigned int pull_len;
1698
1699
1700
1701
1702
1703
1704 va = skb_frag_address(frag);
1705
1706
1707
1708
1709
1710 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1711
1712
1713 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1714
1715
1716 skb_frag_size_sub(frag, pull_len);
1717 frag->page_offset += pull_len;
1718 skb->data_len -= pull_len;
1719 skb->tail += pull_len;
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1733 struct sk_buff *skb)
1734{
1735
1736 if (unlikely(IXGBE_CB(skb)->page_released)) {
1737 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1738 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1739 IXGBE_CB(skb)->page_released = false;
1740 } else {
1741 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1742
1743 dma_sync_single_range_for_cpu(rx_ring->dev,
1744 IXGBE_CB(skb)->dma,
1745 frag->page_offset,
1746 ixgbe_rx_bufsz(rx_ring),
1747 DMA_FROM_DEVICE);
1748 }
1749 IXGBE_CB(skb)->dma = 0;
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1771 union ixgbe_adv_rx_desc *rx_desc,
1772 struct sk_buff *skb)
1773{
1774 struct net_device *netdev = rx_ring->netdev;
1775
1776
1777 if (unlikely(ixgbe_test_staterr(rx_desc,
1778 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1779 !(netdev->features & NETIF_F_RXALL))) {
1780 dev_kfree_skb_any(skb);
1781 return true;
1782 }
1783
1784
1785 if (skb_is_nonlinear(skb))
1786 ixgbe_pull_tail(rx_ring, skb);
1787
1788#ifdef IXGBE_FCOE
1789
1790 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1791 return false;
1792
1793#endif
1794
1795 if (eth_skb_pad(skb))
1796 return true;
1797
1798 return false;
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1809 struct ixgbe_rx_buffer *old_buff)
1810{
1811 struct ixgbe_rx_buffer *new_buff;
1812 u16 nta = rx_ring->next_to_alloc;
1813
1814 new_buff = &rx_ring->rx_buffer_info[nta];
1815
1816
1817 nta++;
1818 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1819
1820
1821 *new_buff = *old_buff;
1822
1823
1824 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1825 new_buff->page_offset,
1826 ixgbe_rx_bufsz(rx_ring),
1827 DMA_FROM_DEVICE);
1828}
1829
1830static inline bool ixgbe_page_is_reserved(struct page *page)
1831{
1832 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
1833}
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1851 struct ixgbe_rx_buffer *rx_buffer,
1852 union ixgbe_adv_rx_desc *rx_desc,
1853 struct sk_buff *skb)
1854{
1855 struct page *page = rx_buffer->page;
1856 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1857#if (PAGE_SIZE < 8192)
1858 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1859#else
1860 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1861 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1862 ixgbe_rx_bufsz(rx_ring);
1863#endif
1864
1865 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1866 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1867
1868 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1869
1870
1871 if (likely(!ixgbe_page_is_reserved(page)))
1872 return true;
1873
1874
1875 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1876 return false;
1877 }
1878
1879 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1880 rx_buffer->page_offset, size, truesize);
1881
1882
1883 if (unlikely(ixgbe_page_is_reserved(page)))
1884 return false;
1885
1886#if (PAGE_SIZE < 8192)
1887
1888 if (unlikely(page_count(page) != 1))
1889 return false;
1890
1891
1892 rx_buffer->page_offset ^= truesize;
1893#else
1894
1895 rx_buffer->page_offset += truesize;
1896
1897 if (rx_buffer->page_offset > last_offset)
1898 return false;
1899#endif
1900
1901
1902
1903
1904 atomic_inc(&page->_count);
1905
1906 return true;
1907}
1908
1909static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1910 union ixgbe_adv_rx_desc *rx_desc)
1911{
1912 struct ixgbe_rx_buffer *rx_buffer;
1913 struct sk_buff *skb;
1914 struct page *page;
1915
1916 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1917 page = rx_buffer->page;
1918 prefetchw(page);
1919
1920 skb = rx_buffer->skb;
1921
1922 if (likely(!skb)) {
1923 void *page_addr = page_address(page) +
1924 rx_buffer->page_offset;
1925
1926
1927 prefetch(page_addr);
1928#if L1_CACHE_BYTES < 128
1929 prefetch(page_addr + L1_CACHE_BYTES);
1930#endif
1931
1932
1933 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1934 IXGBE_RX_HDR_SIZE);
1935 if (unlikely(!skb)) {
1936 rx_ring->rx_stats.alloc_rx_buff_failed++;
1937 return NULL;
1938 }
1939
1940
1941
1942
1943
1944
1945 prefetchw(skb->data);
1946
1947
1948
1949
1950
1951
1952
1953 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1954 goto dma_sync;
1955
1956 IXGBE_CB(skb)->dma = rx_buffer->dma;
1957 } else {
1958 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1959 ixgbe_dma_sync_frag(rx_ring, skb);
1960
1961dma_sync:
1962
1963 dma_sync_single_range_for_cpu(rx_ring->dev,
1964 rx_buffer->dma,
1965 rx_buffer->page_offset,
1966 ixgbe_rx_bufsz(rx_ring),
1967 DMA_FROM_DEVICE);
1968
1969 rx_buffer->skb = NULL;
1970 }
1971
1972
1973 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1974
1975 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1976 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1977
1978 IXGBE_CB(skb)->page_released = true;
1979 } else {
1980
1981 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1982 ixgbe_rx_pg_size(rx_ring),
1983 DMA_FROM_DEVICE);
1984 }
1985
1986
1987 rx_buffer->page = NULL;
1988
1989 return skb;
1990}
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2006 struct ixgbe_ring *rx_ring,
2007 const int budget)
2008{
2009 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2010#ifdef IXGBE_FCOE
2011 struct ixgbe_adapter *adapter = q_vector->adapter;
2012 int ddp_bytes;
2013 unsigned int mss = 0;
2014#endif
2015 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2016
2017 while (likely(total_rx_packets < budget)) {
2018 union ixgbe_adv_rx_desc *rx_desc;
2019 struct sk_buff *skb;
2020
2021
2022 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
2023 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
2024 cleaned_count = 0;
2025 }
2026
2027 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
2028
2029 if (!rx_desc->wb.upper.status_error)
2030 break;
2031
2032
2033
2034
2035
2036 dma_rmb();
2037
2038
2039 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
2040
2041
2042 if (!skb)
2043 break;
2044
2045 cleaned_count++;
2046
2047
2048 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
2049 continue;
2050
2051
2052 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
2053 continue;
2054
2055
2056 total_rx_bytes += skb->len;
2057
2058
2059 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
2060
2061#ifdef IXGBE_FCOE
2062
2063 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
2064 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
2065
2066 if (ddp_bytes > 0) {
2067 if (!mss) {
2068 mss = rx_ring->netdev->mtu -
2069 sizeof(struct fcoe_hdr) -
2070 sizeof(struct fc_frame_header) -
2071 sizeof(struct fcoe_crc_eof);
2072 if (mss > 512)
2073 mss &= ~511;
2074 }
2075 total_rx_bytes += ddp_bytes;
2076 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
2077 mss);
2078 }
2079 if (!ddp_bytes) {
2080 dev_kfree_skb_any(skb);
2081 continue;
2082 }
2083 }
2084
2085#endif
2086 skb_mark_napi_id(skb, &q_vector->napi);
2087 ixgbe_rx_skb(q_vector, skb);
2088
2089
2090 total_rx_packets++;
2091 }
2092
2093 u64_stats_update_begin(&rx_ring->syncp);
2094 rx_ring->stats.packets += total_rx_packets;
2095 rx_ring->stats.bytes += total_rx_bytes;
2096 u64_stats_update_end(&rx_ring->syncp);
2097 q_vector->rx.total_packets += total_rx_packets;
2098 q_vector->rx.total_bytes += total_rx_bytes;
2099
2100 return total_rx_packets;
2101}
2102
2103#ifdef CONFIG_NET_RX_BUSY_POLL
2104
2105static int ixgbe_low_latency_recv(struct napi_struct *napi)
2106{
2107 struct ixgbe_q_vector *q_vector =
2108 container_of(napi, struct ixgbe_q_vector, napi);
2109 struct ixgbe_adapter *adapter = q_vector->adapter;
2110 struct ixgbe_ring *ring;
2111 int found = 0;
2112
2113 if (test_bit(__IXGBE_DOWN, &adapter->state))
2114 return LL_FLUSH_FAILED;
2115
2116 if (!ixgbe_qv_lock_poll(q_vector))
2117 return LL_FLUSH_BUSY;
2118
2119 ixgbe_for_each_ring(ring, q_vector->rx) {
2120 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2121#ifdef BP_EXTENDED_STATS
2122 if (found)
2123 ring->stats.cleaned += found;
2124 else
2125 ring->stats.misses++;
2126#endif
2127 if (found)
2128 break;
2129 }
2130
2131 ixgbe_qv_unlock_poll(q_vector);
2132
2133 return found;
2134}
2135#endif
2136
2137
2138
2139
2140
2141
2142
2143
2144static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2145{
2146 struct ixgbe_q_vector *q_vector;
2147 int v_idx;
2148 u32 mask;
2149
2150
2151 if (adapter->num_vfs > 32) {
2152 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2153 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2154 }
2155
2156
2157
2158
2159
2160 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2161 struct ixgbe_ring *ring;
2162 q_vector = adapter->q_vector[v_idx];
2163
2164 ixgbe_for_each_ring(ring, q_vector->rx)
2165 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2166
2167 ixgbe_for_each_ring(ring, q_vector->tx)
2168 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2169
2170 ixgbe_write_eitr(q_vector);
2171 }
2172
2173 switch (adapter->hw.mac.type) {
2174 case ixgbe_mac_82598EB:
2175 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2176 v_idx);
2177 break;
2178 case ixgbe_mac_82599EB:
2179 case ixgbe_mac_X540:
2180 case ixgbe_mac_X550:
2181 case ixgbe_mac_X550EM_x:
2182 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2183 break;
2184 default:
2185 break;
2186 }
2187 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2188
2189
2190 mask = IXGBE_EIMS_ENABLE_MASK;
2191 mask &= ~(IXGBE_EIMS_OTHER |
2192 IXGBE_EIMS_MAILBOX |
2193 IXGBE_EIMS_LSC);
2194
2195 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2196}
2197
2198enum latency_range {
2199 lowest_latency = 0,
2200 low_latency = 1,
2201 bulk_latency = 2,
2202 latency_invalid = 255
2203};
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2221 struct ixgbe_ring_container *ring_container)
2222{
2223 int bytes = ring_container->total_bytes;
2224 int packets = ring_container->total_packets;
2225 u32 timepassed_us;
2226 u64 bytes_perint;
2227 u8 itr_setting = ring_container->itr;
2228
2229 if (packets == 0)
2230 return;
2231
2232
2233
2234
2235
2236
2237
2238 timepassed_us = q_vector->itr >> 2;
2239 if (timepassed_us == 0)
2240 return;
2241
2242 bytes_perint = bytes / timepassed_us;
2243
2244 switch (itr_setting) {
2245 case lowest_latency:
2246 if (bytes_perint > 10)
2247 itr_setting = low_latency;
2248 break;
2249 case low_latency:
2250 if (bytes_perint > 20)
2251 itr_setting = bulk_latency;
2252 else if (bytes_perint <= 10)
2253 itr_setting = lowest_latency;
2254 break;
2255 case bulk_latency:
2256 if (bytes_perint <= 20)
2257 itr_setting = low_latency;
2258 break;
2259 }
2260
2261
2262 ring_container->total_bytes = 0;
2263 ring_container->total_packets = 0;
2264
2265
2266 ring_container->itr = itr_setting;
2267}
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2278{
2279 struct ixgbe_adapter *adapter = q_vector->adapter;
2280 struct ixgbe_hw *hw = &adapter->hw;
2281 int v_idx = q_vector->v_idx;
2282 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2283
2284 switch (adapter->hw.mac.type) {
2285 case ixgbe_mac_82598EB:
2286
2287 itr_reg |= (itr_reg << 16);
2288 break;
2289 case ixgbe_mac_82599EB:
2290 case ixgbe_mac_X540:
2291 case ixgbe_mac_X550:
2292 case ixgbe_mac_X550EM_x:
2293
2294
2295
2296
2297 itr_reg |= IXGBE_EITR_CNT_WDIS;
2298 break;
2299 default:
2300 break;
2301 }
2302 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2303}
2304
2305static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2306{
2307 u32 new_itr = q_vector->itr;
2308 u8 current_itr;
2309
2310 ixgbe_update_itr(q_vector, &q_vector->tx);
2311 ixgbe_update_itr(q_vector, &q_vector->rx);
2312
2313 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2314
2315 switch (current_itr) {
2316
2317 case lowest_latency:
2318 new_itr = IXGBE_100K_ITR;
2319 break;
2320 case low_latency:
2321 new_itr = IXGBE_20K_ITR;
2322 break;
2323 case bulk_latency:
2324 new_itr = IXGBE_8K_ITR;
2325 break;
2326 default:
2327 break;
2328 }
2329
2330 if (new_itr != q_vector->itr) {
2331
2332 new_itr = (10 * new_itr * q_vector->itr) /
2333 ((9 * new_itr) + q_vector->itr);
2334
2335
2336 q_vector->itr = new_itr;
2337
2338 ixgbe_write_eitr(q_vector);
2339 }
2340}
2341
2342
2343
2344
2345
2346static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2347{
2348 struct ixgbe_hw *hw = &adapter->hw;
2349 u32 eicr = adapter->interrupt_event;
2350
2351 if (test_bit(__IXGBE_DOWN, &adapter->state))
2352 return;
2353
2354 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2355 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2356 return;
2357
2358 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2359
2360 switch (hw->device_id) {
2361 case IXGBE_DEV_ID_82599_T3_LOM:
2362
2363
2364
2365
2366
2367
2368
2369 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
2370 !(eicr & IXGBE_EICR_LSC))
2371 return;
2372
2373 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2374 u32 speed;
2375 bool link_up = false;
2376
2377 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2378
2379 if (link_up)
2380 return;
2381 }
2382
2383
2384 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2385 return;
2386
2387 break;
2388 default:
2389 if (!(eicr & IXGBE_EICR_GPI_SDP0))
2390 return;
2391 break;
2392 }
2393 e_crit(drv,
2394 "Network adapter has been stopped because it has over heated. "
2395 "Restart the computer. If the problem persists, "
2396 "power off the system and replace the adapter\n");
2397
2398 adapter->interrupt_event = 0;
2399}
2400
2401static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2402{
2403 struct ixgbe_hw *hw = &adapter->hw;
2404
2405 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2406 (eicr & IXGBE_EICR_GPI_SDP1)) {
2407 e_crit(probe, "Fan has stopped, replace the adapter\n");
2408
2409 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2410 }
2411}
2412
2413static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2414{
2415 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2416 return;
2417
2418 switch (adapter->hw.mac.type) {
2419 case ixgbe_mac_82599EB:
2420
2421
2422
2423
2424 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2425 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2426 adapter->interrupt_event = eicr;
2427 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2428 ixgbe_service_event_schedule(adapter);
2429 return;
2430 }
2431 return;
2432 case ixgbe_mac_X540:
2433 if (!(eicr & IXGBE_EICR_TS))
2434 return;
2435 break;
2436 default:
2437 return;
2438 }
2439
2440 e_crit(drv,
2441 "Network adapter has been stopped because it has over heated. "
2442 "Restart the computer. If the problem persists, "
2443 "power off the system and replace the adapter\n");
2444}
2445
2446static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2447{
2448 struct ixgbe_hw *hw = &adapter->hw;
2449
2450 if (eicr & IXGBE_EICR_GPI_SDP2) {
2451
2452 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2453 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2454 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2455 ixgbe_service_event_schedule(adapter);
2456 }
2457 }
2458
2459 if (eicr & IXGBE_EICR_GPI_SDP1) {
2460
2461 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2462 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2463 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2464 ixgbe_service_event_schedule(adapter);
2465 }
2466 }
2467}
2468
2469static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2470{
2471 struct ixgbe_hw *hw = &adapter->hw;
2472
2473 adapter->lsc_int++;
2474 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2475 adapter->link_check_timeout = jiffies;
2476 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2477 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2478 IXGBE_WRITE_FLUSH(hw);
2479 ixgbe_service_event_schedule(adapter);
2480 }
2481}
2482
2483static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2484 u64 qmask)
2485{
2486 u32 mask;
2487 struct ixgbe_hw *hw = &adapter->hw;
2488
2489 switch (hw->mac.type) {
2490 case ixgbe_mac_82598EB:
2491 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2492 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2493 break;
2494 case ixgbe_mac_82599EB:
2495 case ixgbe_mac_X540:
2496 case ixgbe_mac_X550:
2497 case ixgbe_mac_X550EM_x:
2498 mask = (qmask & 0xFFFFFFFF);
2499 if (mask)
2500 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2501 mask = (qmask >> 32);
2502 if (mask)
2503 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2504 break;
2505 default:
2506 break;
2507 }
2508
2509}
2510
2511static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2512 u64 qmask)
2513{
2514 u32 mask;
2515 struct ixgbe_hw *hw = &adapter->hw;
2516
2517 switch (hw->mac.type) {
2518 case ixgbe_mac_82598EB:
2519 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2520 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2521 break;
2522 case ixgbe_mac_82599EB:
2523 case ixgbe_mac_X540:
2524 case ixgbe_mac_X550:
2525 case ixgbe_mac_X550EM_x:
2526 mask = (qmask & 0xFFFFFFFF);
2527 if (mask)
2528 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2529 mask = (qmask >> 32);
2530 if (mask)
2531 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2532 break;
2533 default:
2534 break;
2535 }
2536
2537}
2538
2539
2540
2541
2542
2543static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2544 bool flush)
2545{
2546 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2547
2548
2549 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2550 mask &= ~IXGBE_EIMS_LSC;
2551
2552 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2553 switch (adapter->hw.mac.type) {
2554 case ixgbe_mac_82599EB:
2555 mask |= IXGBE_EIMS_GPI_SDP0;
2556 break;
2557 case ixgbe_mac_X540:
2558 case ixgbe_mac_X550:
2559 case ixgbe_mac_X550EM_x:
2560 mask |= IXGBE_EIMS_TS;
2561 break;
2562 default:
2563 break;
2564 }
2565 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2566 mask |= IXGBE_EIMS_GPI_SDP1;
2567 switch (adapter->hw.mac.type) {
2568 case ixgbe_mac_82599EB:
2569 mask |= IXGBE_EIMS_GPI_SDP1;
2570 mask |= IXGBE_EIMS_GPI_SDP2;
2571
2572 case ixgbe_mac_X540:
2573 case ixgbe_mac_X550:
2574 case ixgbe_mac_X550EM_x:
2575 mask |= IXGBE_EIMS_ECC;
2576 mask |= IXGBE_EIMS_MAILBOX;
2577 break;
2578 default:
2579 break;
2580 }
2581
2582 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2583 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2584 mask |= IXGBE_EIMS_FLOW_DIR;
2585
2586 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2587 if (queues)
2588 ixgbe_irq_enable_queues(adapter, ~0);
2589 if (flush)
2590 IXGBE_WRITE_FLUSH(&adapter->hw);
2591}
2592
2593static irqreturn_t ixgbe_msix_other(int irq, void *data)
2594{
2595 struct ixgbe_adapter *adapter = data;
2596 struct ixgbe_hw *hw = &adapter->hw;
2597 u32 eicr;
2598
2599
2600
2601
2602
2603
2604
2605 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2606
2607
2608
2609
2610
2611
2612
2613
2614 eicr &= 0xFFFF0000;
2615
2616 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2617
2618 if (eicr & IXGBE_EICR_LSC)
2619 ixgbe_check_lsc(adapter);
2620
2621 if (eicr & IXGBE_EICR_MAILBOX)
2622 ixgbe_msg_task(adapter);
2623
2624 switch (hw->mac.type) {
2625 case ixgbe_mac_82599EB:
2626 case ixgbe_mac_X540:
2627 case ixgbe_mac_X550:
2628 case ixgbe_mac_X550EM_x:
2629 if (eicr & IXGBE_EICR_ECC) {
2630 e_info(link, "Received ECC Err, initiating reset\n");
2631 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2632 ixgbe_service_event_schedule(adapter);
2633 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2634 }
2635
2636 if (eicr & IXGBE_EICR_FLOW_DIR) {
2637 int reinit_count = 0;
2638 int i;
2639 for (i = 0; i < adapter->num_tx_queues; i++) {
2640 struct ixgbe_ring *ring = adapter->tx_ring[i];
2641 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2642 &ring->state))
2643 reinit_count++;
2644 }
2645 if (reinit_count) {
2646
2647 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2648 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2649 ixgbe_service_event_schedule(adapter);
2650 }
2651 }
2652 ixgbe_check_sfp_event(adapter, eicr);
2653 ixgbe_check_overtemp_event(adapter, eicr);
2654 break;
2655 default:
2656 break;
2657 }
2658
2659 ixgbe_check_fan_failure(adapter, eicr);
2660
2661 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2662 ixgbe_ptp_check_pps_event(adapter, eicr);
2663
2664
2665 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2666 ixgbe_irq_enable(adapter, false, false);
2667
2668 return IRQ_HANDLED;
2669}
2670
2671static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2672{
2673 struct ixgbe_q_vector *q_vector = data;
2674
2675
2676
2677 if (q_vector->rx.ring || q_vector->tx.ring)
2678 napi_schedule(&q_vector->napi);
2679
2680 return IRQ_HANDLED;
2681}
2682
2683
2684
2685
2686
2687
2688
2689
2690int ixgbe_poll(struct napi_struct *napi, int budget)
2691{
2692 struct ixgbe_q_vector *q_vector =
2693 container_of(napi, struct ixgbe_q_vector, napi);
2694 struct ixgbe_adapter *adapter = q_vector->adapter;
2695 struct ixgbe_ring *ring;
2696 int per_ring_budget;
2697 bool clean_complete = true;
2698
2699#ifdef CONFIG_IXGBE_DCA
2700 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2701 ixgbe_update_dca(q_vector);
2702#endif
2703
2704 ixgbe_for_each_ring(ring, q_vector->tx)
2705 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2706
2707 if (!ixgbe_qv_lock_napi(q_vector))
2708 return budget;
2709
2710
2711
2712 if (q_vector->rx.count > 1)
2713 per_ring_budget = max(budget/q_vector->rx.count, 1);
2714 else
2715 per_ring_budget = budget;
2716
2717 ixgbe_for_each_ring(ring, q_vector->rx)
2718 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
2719 per_ring_budget) < per_ring_budget);
2720
2721 ixgbe_qv_unlock_napi(q_vector);
2722
2723 if (!clean_complete)
2724 return budget;
2725
2726
2727 napi_complete(napi);
2728 if (adapter->rx_itr_setting & 1)
2729 ixgbe_set_itr(q_vector);
2730 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2731 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2732
2733 return 0;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2744{
2745 struct net_device *netdev = adapter->netdev;
2746 int vector, err;
2747 int ri = 0, ti = 0;
2748
2749 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2750 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2751 struct msix_entry *entry = &adapter->msix_entries[vector];
2752
2753 if (q_vector->tx.ring && q_vector->rx.ring) {
2754 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2755 "%s-%s-%d", netdev->name, "TxRx", ri++);
2756 ti++;
2757 } else if (q_vector->rx.ring) {
2758 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2759 "%s-%s-%d", netdev->name, "rx", ri++);
2760 } else if (q_vector->tx.ring) {
2761 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2762 "%s-%s-%d", netdev->name, "tx", ti++);
2763 } else {
2764
2765 continue;
2766 }
2767 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2768 q_vector->name, q_vector);
2769 if (err) {
2770 e_err(probe, "request_irq failed for MSIX interrupt "
2771 "Error: %d\n", err);
2772 goto free_queue_irqs;
2773 }
2774
2775 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2776
2777 irq_set_affinity_hint(entry->vector,
2778 &q_vector->affinity_mask);
2779 }
2780 }
2781
2782 err = request_irq(adapter->msix_entries[vector].vector,
2783 ixgbe_msix_other, 0, netdev->name, adapter);
2784 if (err) {
2785 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2786 goto free_queue_irqs;
2787 }
2788
2789 return 0;
2790
2791free_queue_irqs:
2792 while (vector) {
2793 vector--;
2794 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2795 NULL);
2796 free_irq(adapter->msix_entries[vector].vector,
2797 adapter->q_vector[vector]);
2798 }
2799 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2800 pci_disable_msix(adapter->pdev);
2801 kfree(adapter->msix_entries);
2802 adapter->msix_entries = NULL;
2803 return err;
2804}
2805
2806
2807
2808
2809
2810
2811static irqreturn_t ixgbe_intr(int irq, void *data)
2812{
2813 struct ixgbe_adapter *adapter = data;
2814 struct ixgbe_hw *hw = &adapter->hw;
2815 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2816 u32 eicr;
2817
2818
2819
2820
2821
2822 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2823
2824
2825
2826 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2827 if (!eicr) {
2828
2829
2830
2831
2832
2833
2834
2835 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2836 ixgbe_irq_enable(adapter, true, true);
2837 return IRQ_NONE;
2838 }
2839
2840 if (eicr & IXGBE_EICR_LSC)
2841 ixgbe_check_lsc(adapter);
2842
2843 switch (hw->mac.type) {
2844 case ixgbe_mac_82599EB:
2845 ixgbe_check_sfp_event(adapter, eicr);
2846
2847 case ixgbe_mac_X540:
2848 case ixgbe_mac_X550:
2849 case ixgbe_mac_X550EM_x:
2850 if (eicr & IXGBE_EICR_ECC) {
2851 e_info(link, "Received ECC Err, initiating reset\n");
2852 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2853 ixgbe_service_event_schedule(adapter);
2854 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2855 }
2856 ixgbe_check_overtemp_event(adapter, eicr);
2857 break;
2858 default:
2859 break;
2860 }
2861
2862 ixgbe_check_fan_failure(adapter, eicr);
2863 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2864 ixgbe_ptp_check_pps_event(adapter, eicr);
2865
2866
2867 napi_schedule(&q_vector->napi);
2868
2869
2870
2871
2872
2873 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2874 ixgbe_irq_enable(adapter, false, false);
2875
2876 return IRQ_HANDLED;
2877}
2878
2879
2880
2881
2882
2883
2884
2885
2886static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2887{
2888 struct net_device *netdev = adapter->netdev;
2889 int err;
2890
2891 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2892 err = ixgbe_request_msix_irqs(adapter);
2893 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2894 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2895 netdev->name, adapter);
2896 else
2897 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2898 netdev->name, adapter);
2899
2900 if (err)
2901 e_err(probe, "request_irq failed, Error %d\n", err);
2902
2903 return err;
2904}
2905
2906static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2907{
2908 int vector;
2909
2910 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2911 free_irq(adapter->pdev->irq, adapter);
2912 return;
2913 }
2914
2915 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2916 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2917 struct msix_entry *entry = &adapter->msix_entries[vector];
2918
2919
2920 if (!q_vector->rx.ring && !q_vector->tx.ring)
2921 continue;
2922
2923
2924 irq_set_affinity_hint(entry->vector, NULL);
2925
2926 free_irq(entry->vector, q_vector);
2927 }
2928
2929 free_irq(adapter->msix_entries[vector++].vector, adapter);
2930}
2931
2932
2933
2934
2935
2936static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2937{
2938 switch (adapter->hw.mac.type) {
2939 case ixgbe_mac_82598EB:
2940 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2941 break;
2942 case ixgbe_mac_82599EB:
2943 case ixgbe_mac_X540:
2944 case ixgbe_mac_X550:
2945 case ixgbe_mac_X550EM_x:
2946 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2948 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2949 break;
2950 default:
2951 break;
2952 }
2953 IXGBE_WRITE_FLUSH(&adapter->hw);
2954 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2955 int vector;
2956
2957 for (vector = 0; vector < adapter->num_q_vectors; vector++)
2958 synchronize_irq(adapter->msix_entries[vector].vector);
2959
2960 synchronize_irq(adapter->msix_entries[vector++].vector);
2961 } else {
2962 synchronize_irq(adapter->pdev->irq);
2963 }
2964}
2965
2966
2967
2968
2969
2970static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2971{
2972 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2973
2974 ixgbe_write_eitr(q_vector);
2975
2976 ixgbe_set_ivar(adapter, 0, 0, 0);
2977 ixgbe_set_ivar(adapter, 1, 0, 0);
2978
2979 e_info(hw, "Legacy interrupt IVAR setup done\n");
2980}
2981
2982
2983
2984
2985
2986
2987
2988
2989void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2990 struct ixgbe_ring *ring)
2991{
2992 struct ixgbe_hw *hw = &adapter->hw;
2993 u64 tdba = ring->dma;
2994 int wait_loop = 10;
2995 u32 txdctl = IXGBE_TXDCTL_ENABLE;
2996 u8 reg_idx = ring->reg_idx;
2997
2998
2999 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
3000 IXGBE_WRITE_FLUSH(hw);
3001
3002 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
3003 (tdba & DMA_BIT_MASK(32)));
3004 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
3005 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
3006 ring->count * sizeof(union ixgbe_adv_tx_desc));
3007 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
3008 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
3009 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
3022 txdctl |= (1 << 16);
3023 else
3024 txdctl |= (8 << 16);
3025
3026
3027
3028
3029
3030 txdctl |= (1 << 8) |
3031 32;
3032
3033
3034 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3035 ring->atr_sample_rate = adapter->atr_sample_rate;
3036 ring->atr_count = 0;
3037 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
3038 } else {
3039 ring->atr_sample_rate = 0;
3040 }
3041
3042
3043 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
3044 struct ixgbe_q_vector *q_vector = ring->q_vector;
3045
3046 if (q_vector)
3047 netif_set_xps_queue(ring->netdev,
3048 &q_vector->affinity_mask,
3049 ring->queue_index);
3050 }
3051
3052 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
3053
3054
3055 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
3056
3057
3058 if (hw->mac.type == ixgbe_mac_82598EB &&
3059 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3060 return;
3061
3062
3063 do {
3064 usleep_range(1000, 2000);
3065 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3066 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
3067 if (!wait_loop)
3068 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
3069}
3070
3071static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
3072{
3073 struct ixgbe_hw *hw = &adapter->hw;
3074 u32 rttdcs, mtqc;
3075 u8 tcs = netdev_get_num_tc(adapter->netdev);
3076
3077 if (hw->mac.type == ixgbe_mac_82598EB)
3078 return;
3079
3080
3081 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3082 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3083 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3084
3085
3086 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3087 mtqc = IXGBE_MTQC_VT_ENA;
3088 if (tcs > 4)
3089 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3090 else if (tcs > 1)
3091 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3092 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3093 mtqc |= IXGBE_MTQC_32VF;
3094 else
3095 mtqc |= IXGBE_MTQC_64VF;
3096 } else {
3097 if (tcs > 4)
3098 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3099 else if (tcs > 1)
3100 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3101 else
3102 mtqc = IXGBE_MTQC_64Q_1PB;
3103 }
3104
3105 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
3106
3107
3108 if (tcs) {
3109 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3110 sectx |= IXGBE_SECTX_DCB;
3111 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
3112 }
3113
3114
3115 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3116 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3117}
3118
3119
3120
3121
3122
3123
3124
3125static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3126{
3127 struct ixgbe_hw *hw = &adapter->hw;
3128 u32 dmatxctl;
3129 u32 i;
3130
3131 ixgbe_setup_mtqc(adapter);
3132
3133 if (hw->mac.type != ixgbe_mac_82598EB) {
3134
3135 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3136 dmatxctl |= IXGBE_DMATXCTL_TE;
3137 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3138 }
3139
3140
3141 for (i = 0; i < adapter->num_tx_queues; i++)
3142 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3143}
3144
3145static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3146 struct ixgbe_ring *ring)
3147{
3148 struct ixgbe_hw *hw = &adapter->hw;
3149 u8 reg_idx = ring->reg_idx;
3150 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3151
3152 srrctl |= IXGBE_SRRCTL_DROP_EN;
3153
3154 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3155}
3156
3157static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3158 struct ixgbe_ring *ring)
3159{
3160 struct ixgbe_hw *hw = &adapter->hw;
3161 u8 reg_idx = ring->reg_idx;
3162 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3163
3164 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3165
3166 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3167}
3168
3169#ifdef CONFIG_IXGBE_DCB
3170void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3171#else
3172static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3173#endif
3174{
3175 int i;
3176 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3177
3178 if (adapter->ixgbe_ieee_pfc)
3179 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3191 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3192 for (i = 0; i < adapter->num_rx_queues; i++)
3193 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3194 } else {
3195 for (i = 0; i < adapter->num_rx_queues; i++)
3196 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3197 }
3198}
3199
3200#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3201
3202static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3203 struct ixgbe_ring *rx_ring)
3204{
3205 struct ixgbe_hw *hw = &adapter->hw;
3206 u32 srrctl;
3207 u8 reg_idx = rx_ring->reg_idx;
3208
3209 if (hw->mac.type == ixgbe_mac_82598EB) {
3210 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3211
3212
3213
3214
3215
3216 reg_idx &= mask;
3217 }
3218
3219
3220 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3221
3222
3223 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3224
3225
3226 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3227
3228 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3229}
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
3241{
3242 if (adapter->hw.mac.type < ixgbe_mac_X550)
3243 return 128;
3244 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3245 return 64;
3246 else
3247 return 512;
3248}
3249
3250
3251
3252
3253
3254
3255
3256
3257static void ixgbe_store_reta(struct ixgbe_adapter *adapter)
3258{
3259 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3260 struct ixgbe_hw *hw = &adapter->hw;
3261 u32 reta = 0;
3262 u32 indices_multi;
3263 u8 *indir_tbl = adapter->rss_indir_tbl;
3264
3265
3266
3267
3268
3269
3270
3271 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3272 indices_multi = 0x11;
3273 else
3274 indices_multi = 0x1;
3275
3276
3277 for (i = 0; i < reta_entries; i++) {
3278 reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
3279 if ((i & 3) == 3) {
3280 if (i < 128)
3281 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3282 else
3283 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3284 reta);
3285 reta = 0;
3286 }
3287 }
3288}
3289
3290
3291
3292
3293
3294
3295
3296
3297static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
3298{
3299 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3300 struct ixgbe_hw *hw = &adapter->hw;
3301 u32 vfreta = 0;
3302 unsigned int pf_pool = adapter->num_vfs;
3303
3304
3305 for (i = 0; i < reta_entries; i++) {
3306 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
3307 if ((i & 3) == 3) {
3308 IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
3309 vfreta);
3310 vfreta = 0;
3311 }
3312 }
3313}
3314
3315static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
3316{
3317 struct ixgbe_hw *hw = &adapter->hw;
3318 u32 i, j;
3319 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3320 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3321
3322
3323
3324
3325
3326 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3327 rss_i = 2;
3328
3329
3330 for (i = 0; i < 10; i++)
3331 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
3332
3333
3334 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
3335
3336 for (i = 0, j = 0; i < reta_entries; i++, j++) {
3337 if (j == rss_i)
3338 j = 0;
3339
3340 adapter->rss_indir_tbl[i] = j;
3341 }
3342
3343 ixgbe_store_reta(adapter);
3344}
3345
3346static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
3347{
3348 struct ixgbe_hw *hw = &adapter->hw;
3349 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3350 unsigned int pf_pool = adapter->num_vfs;
3351 int i, j;
3352
3353
3354 for (i = 0; i < 10; i++)
3355 IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
3356 adapter->rss_key[i]);
3357
3358
3359 for (i = 0, j = 0; i < 64; i++, j++) {
3360 if (j == rss_i)
3361 j = 0;
3362
3363 adapter->rss_indir_tbl[i] = j;
3364 }
3365
3366 ixgbe_store_vfreta(adapter);
3367}
3368
3369static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3370{
3371 struct ixgbe_hw *hw = &adapter->hw;
3372 u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
3373 u32 rxcsum;
3374
3375
3376 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3377 rxcsum |= IXGBE_RXCSUM_PCSD;
3378 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3379
3380 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3381 if (adapter->ring_feature[RING_F_RSS].mask)
3382 mrqc = IXGBE_MRQC_RSSEN;
3383 } else {
3384 u8 tcs = netdev_get_num_tc(adapter->netdev);
3385
3386 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3387 if (tcs > 4)
3388 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3389 else if (tcs > 1)
3390 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3391 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3392 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3393 else
3394 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3395 } else {
3396 if (tcs > 4)
3397 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3398 else if (tcs > 1)
3399 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3400 else
3401 mrqc = IXGBE_MRQC_RSSEN;
3402 }
3403 }
3404
3405
3406 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3407 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3408 IXGBE_MRQC_RSS_FIELD_IPV6 |
3409 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3410
3411 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3412 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3413 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3414 rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3415
3416 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
3417 if ((hw->mac.type >= ixgbe_mac_X550) &&
3418 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
3419 unsigned int pf_pool = adapter->num_vfs;
3420
3421
3422 mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
3423 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3424
3425
3426 ixgbe_setup_vfreta(adapter);
3427 vfmrqc = IXGBE_MRQC_RSSEN;
3428 vfmrqc |= rss_field;
3429 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
3430 } else {
3431 ixgbe_setup_reta(adapter);
3432 mrqc |= rss_field;
3433 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3434 }
3435}
3436
3437
3438
3439
3440
3441
3442static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3443 struct ixgbe_ring *ring)
3444{
3445 struct ixgbe_hw *hw = &adapter->hw;
3446 u32 rscctrl;
3447 u8 reg_idx = ring->reg_idx;
3448
3449 if (!ring_is_rsc_enabled(ring))
3450 return;
3451
3452 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3453 rscctrl |= IXGBE_RSCCTL_RSCEN;
3454
3455
3456
3457
3458
3459 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3460 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3461}
3462
3463#define IXGBE_MAX_RX_DESC_POLL 10
3464static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3465 struct ixgbe_ring *ring)
3466{
3467 struct ixgbe_hw *hw = &adapter->hw;
3468 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3469 u32 rxdctl;
3470 u8 reg_idx = ring->reg_idx;
3471
3472 if (ixgbe_removed(hw->hw_addr))
3473 return;
3474
3475 if (hw->mac.type == ixgbe_mac_82598EB &&
3476 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3477 return;
3478
3479 do {
3480 usleep_range(1000, 2000);
3481 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3482 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3483
3484 if (!wait_loop) {
3485 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3486 "the polling period\n", reg_idx);
3487 }
3488}
3489
3490void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3491 struct ixgbe_ring *ring)
3492{
3493 struct ixgbe_hw *hw = &adapter->hw;
3494 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3495 u32 rxdctl;
3496 u8 reg_idx = ring->reg_idx;
3497
3498 if (ixgbe_removed(hw->hw_addr))
3499 return;
3500 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3501 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3502
3503
3504 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3505
3506 if (hw->mac.type == ixgbe_mac_82598EB &&
3507 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3508 return;
3509
3510
3511 do {
3512 udelay(10);
3513 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3514 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3515
3516 if (!wait_loop) {
3517 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3518 "the polling period\n", reg_idx);
3519 }
3520}
3521
3522void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3523 struct ixgbe_ring *ring)
3524{
3525 struct ixgbe_hw *hw = &adapter->hw;
3526 u64 rdba = ring->dma;
3527 u32 rxdctl;
3528 u8 reg_idx = ring->reg_idx;
3529
3530
3531 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3532 ixgbe_disable_rx_queue(adapter, ring);
3533
3534 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3535 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3536 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3537 ring->count * sizeof(union ixgbe_adv_rx_desc));
3538 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3539 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3540 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
3541
3542 ixgbe_configure_srrctl(adapter, ring);
3543 ixgbe_configure_rscctl(adapter, ring);
3544
3545 if (hw->mac.type == ixgbe_mac_82598EB) {
3546
3547
3548
3549
3550
3551
3552
3553 rxdctl &= ~0x3FFFFF;
3554 rxdctl |= 0x080420;
3555 }
3556
3557
3558 rxdctl |= IXGBE_RXDCTL_ENABLE;
3559 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3560
3561 ixgbe_rx_desc_queue_enable(adapter, ring);
3562 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3563}
3564
3565static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3566{
3567 struct ixgbe_hw *hw = &adapter->hw;
3568 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3569 u16 pool;
3570
3571
3572 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3573 IXGBE_PSRTYPE_UDPHDR |
3574 IXGBE_PSRTYPE_IPV4HDR |
3575 IXGBE_PSRTYPE_L2HDR |
3576 IXGBE_PSRTYPE_IPV6HDR;
3577
3578 if (hw->mac.type == ixgbe_mac_82598EB)
3579 return;
3580
3581 if (rss_i > 3)
3582 psrtype |= 2 << 29;
3583 else if (rss_i > 1)
3584 psrtype |= 1 << 29;
3585
3586 for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
3587 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
3588}
3589
3590static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3591{
3592 struct ixgbe_hw *hw = &adapter->hw;
3593 u32 reg_offset, vf_shift;
3594 u32 gcr_ext, vmdctl;
3595 int i;
3596
3597 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3598 return;
3599
3600 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3601 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3602 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3603 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3604 vmdctl |= IXGBE_VT_CTL_REPLEN;
3605 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3606
3607 vf_shift = VMDQ_P(0) % 32;
3608 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3609
3610
3611 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3612 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3613 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3614 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3615 if (adapter->bridge_mode == BRIDGE_MODE_VEB)
3616 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3617
3618
3619 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3620
3621
3622
3623
3624
3625 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3626 case IXGBE_82599_VMDQ_8Q_MASK:
3627 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3628 break;
3629 case IXGBE_82599_VMDQ_4Q_MASK:
3630 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3631 break;
3632 default:
3633 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3634 break;
3635 }
3636
3637 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3638
3639
3640
3641 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3642 adapter->num_vfs);
3643
3644
3645
3646
3647 if (hw->mac.ops.set_ethertype_anti_spoofing)
3648 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
3649 (IXGBE_ETQF_FILTER_EN |
3650 IXGBE_ETQF_TX_ANTISPOOF |
3651 IXGBE_ETH_P_LLDP));
3652
3653
3654 for (i = 0; i < adapter->num_vfs; i++) {
3655 if (!adapter->vfinfo[i].spoofchk_enabled)
3656 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3657
3658
3659 if (hw->mac.ops.set_ethertype_anti_spoofing)
3660 hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
3661
3662
3663 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
3664 adapter->vfinfo[i].rss_query_enabled);
3665 }
3666}
3667
3668static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3669{
3670 struct ixgbe_hw *hw = &adapter->hw;
3671 struct net_device *netdev = adapter->netdev;
3672 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3673 struct ixgbe_ring *rx_ring;
3674 int i;
3675 u32 mhadd, hlreg0;
3676
3677#ifdef IXGBE_FCOE
3678
3679 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3680 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3681 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3682
3683#endif
3684
3685
3686 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3687 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3688
3689 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3690 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3691 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3692 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3693
3694 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3695 }
3696
3697 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3698
3699 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3700 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3701
3702
3703
3704
3705
3706 for (i = 0; i < adapter->num_rx_queues; i++) {
3707 rx_ring = adapter->rx_ring[i];
3708 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3709 set_ring_rsc_enabled(rx_ring);
3710 else
3711 clear_ring_rsc_enabled(rx_ring);
3712 }
3713}
3714
3715static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3716{
3717 struct ixgbe_hw *hw = &adapter->hw;
3718 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3719
3720 switch (hw->mac.type) {
3721 case ixgbe_mac_X550:
3722 case ixgbe_mac_X550EM_x:
3723 case ixgbe_mac_82598EB:
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3735 break;
3736 case ixgbe_mac_82599EB:
3737 case ixgbe_mac_X540:
3738
3739 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3740 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3741 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3742
3743 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3744 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3745 break;
3746 default:
3747
3748 return;
3749 }
3750
3751 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3752}
3753
3754
3755
3756
3757
3758
3759
3760static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3761{
3762 struct ixgbe_hw *hw = &adapter->hw;
3763 int i;
3764 u32 rxctrl, rfctl;
3765
3766
3767 hw->mac.ops.disable_rx(hw);
3768
3769 ixgbe_setup_psrtype(adapter);
3770 ixgbe_setup_rdrxctl(adapter);
3771
3772
3773 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
3774 rfctl &= ~IXGBE_RFCTL_RSC_DIS;
3775 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
3776 rfctl |= IXGBE_RFCTL_RSC_DIS;
3777 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
3778
3779
3780 ixgbe_setup_mrqc(adapter);
3781
3782
3783 ixgbe_set_rx_buffer_len(adapter);
3784
3785
3786
3787
3788
3789 for (i = 0; i < adapter->num_rx_queues; i++)
3790 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3791
3792 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3793
3794 if (hw->mac.type == ixgbe_mac_82598EB)
3795 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3796
3797
3798 rxctrl |= IXGBE_RXCTRL_RXEN;
3799 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3800}
3801
3802static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3803 __be16 proto, u16 vid)
3804{
3805 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3806 struct ixgbe_hw *hw = &adapter->hw;
3807
3808
3809 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3810 set_bit(vid, adapter->active_vlans);
3811
3812 return 0;
3813}
3814
3815static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3816 __be16 proto, u16 vid)
3817{
3818 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3819 struct ixgbe_hw *hw = &adapter->hw;
3820
3821
3822 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3823 clear_bit(vid, adapter->active_vlans);
3824
3825 return 0;
3826}
3827
3828
3829
3830
3831
3832static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3833{
3834 struct ixgbe_hw *hw = &adapter->hw;
3835 u32 vlnctrl;
3836 int i, j;
3837
3838 switch (hw->mac.type) {
3839 case ixgbe_mac_82598EB:
3840 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3841 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3842 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3843 break;
3844 case ixgbe_mac_82599EB:
3845 case ixgbe_mac_X540:
3846 case ixgbe_mac_X550:
3847 case ixgbe_mac_X550EM_x:
3848 for (i = 0; i < adapter->num_rx_queues; i++) {
3849 struct ixgbe_ring *ring = adapter->rx_ring[i];
3850
3851 if (ring->l2_accel_priv)
3852 continue;
3853 j = ring->reg_idx;
3854 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3855 vlnctrl &= ~IXGBE_RXDCTL_VME;
3856 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3857 }
3858 break;
3859 default:
3860 break;
3861 }
3862}
3863
3864
3865
3866
3867
3868static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3869{
3870 struct ixgbe_hw *hw = &adapter->hw;
3871 u32 vlnctrl;
3872 int i, j;
3873
3874 switch (hw->mac.type) {
3875 case ixgbe_mac_82598EB:
3876 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3877 vlnctrl |= IXGBE_VLNCTRL_VME;
3878 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3879 break;
3880 case ixgbe_mac_82599EB:
3881 case ixgbe_mac_X540:
3882 case ixgbe_mac_X550:
3883 case ixgbe_mac_X550EM_x:
3884 for (i = 0; i < adapter->num_rx_queues; i++) {
3885 struct ixgbe_ring *ring = adapter->rx_ring[i];
3886
3887 if (ring->l2_accel_priv)
3888 continue;
3889 j = ring->reg_idx;
3890 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3891 vlnctrl |= IXGBE_RXDCTL_VME;
3892 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3893 }
3894 break;
3895 default:
3896 break;
3897 }
3898}
3899
3900static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3901{
3902 u16 vid;
3903
3904 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
3905
3906 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3907 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
3908}
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919static int ixgbe_write_mc_addr_list(struct net_device *netdev)
3920{
3921 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3922 struct ixgbe_hw *hw = &adapter->hw;
3923
3924 if (!netif_running(netdev))
3925 return 0;
3926
3927 if (hw->mac.ops.update_mc_addr_list)
3928 hw->mac.ops.update_mc_addr_list(hw, netdev);
3929 else
3930 return -ENOMEM;
3931
3932#ifdef CONFIG_PCI_IOV
3933 ixgbe_restore_vf_multicasts(adapter);
3934#endif
3935
3936 return netdev_mc_count(netdev);
3937}
3938
3939#ifdef CONFIG_PCI_IOV
3940void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
3941{
3942 struct ixgbe_hw *hw = &adapter->hw;
3943 int i;
3944 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3945 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
3946 hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
3947 adapter->mac_table[i].queue,
3948 IXGBE_RAH_AV);
3949 else
3950 hw->mac.ops.clear_rar(hw, i);
3951
3952 adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
3953 }
3954}
3955#endif
3956
3957static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
3958{
3959 struct ixgbe_hw *hw = &adapter->hw;
3960 int i;
3961 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3962 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
3963 if (adapter->mac_table[i].state &
3964 IXGBE_MAC_STATE_IN_USE)
3965 hw->mac.ops.set_rar(hw, i,
3966 adapter->mac_table[i].addr,
3967 adapter->mac_table[i].queue,
3968 IXGBE_RAH_AV);
3969 else
3970 hw->mac.ops.clear_rar(hw, i);
3971
3972 adapter->mac_table[i].state &=
3973 ~(IXGBE_MAC_STATE_MODIFIED);
3974 }
3975 }
3976}
3977
3978static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
3979{
3980 int i;
3981 struct ixgbe_hw *hw = &adapter->hw;
3982
3983 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3984 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
3985 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
3986 eth_zero_addr(adapter->mac_table[i].addr);
3987 adapter->mac_table[i].queue = 0;
3988 }
3989 ixgbe_sync_mac_table(adapter);
3990}
3991
3992static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
3993{
3994 struct ixgbe_hw *hw = &adapter->hw;
3995 int i, count = 0;
3996
3997 for (i = 0; i < hw->mac.num_rar_entries; i++) {
3998 if (adapter->mac_table[i].state == 0)
3999 count++;
4000 }
4001 return count;
4002}
4003
4004
4005static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
4006 u8 *addr)
4007{
4008 struct ixgbe_hw *hw = &adapter->hw;
4009
4010 memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
4011 adapter->mac_table[0].queue = VMDQ_P(0);
4012 adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
4013 IXGBE_MAC_STATE_IN_USE);
4014 hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
4015 adapter->mac_table[0].queue,
4016 IXGBE_RAH_AV);
4017}
4018
4019int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
4020{
4021 struct ixgbe_hw *hw = &adapter->hw;
4022 int i;
4023
4024 if (is_zero_ether_addr(addr))
4025 return -EINVAL;
4026
4027 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4028 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
4029 continue;
4030 adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
4031 IXGBE_MAC_STATE_IN_USE);
4032 ether_addr_copy(adapter->mac_table[i].addr, addr);
4033 adapter->mac_table[i].queue = queue;
4034 ixgbe_sync_mac_table(adapter);
4035 return i;
4036 }
4037 return -ENOMEM;
4038}
4039
4040int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
4041{
4042
4043 int i;
4044 struct ixgbe_hw *hw = &adapter->hw;
4045
4046 if (is_zero_ether_addr(addr))
4047 return -EINVAL;
4048
4049 for (i = 0; i < hw->mac.num_rar_entries; i++) {
4050 if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
4051 adapter->mac_table[i].queue == queue) {
4052 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
4053 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
4054 eth_zero_addr(adapter->mac_table[i].addr);
4055 adapter->mac_table[i].queue = 0;
4056 ixgbe_sync_mac_table(adapter);
4057 return 0;
4058 }
4059 }
4060 return -ENOMEM;
4061}
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
4072{
4073 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4074 int count = 0;
4075
4076
4077 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
4078 return -ENOMEM;
4079
4080 if (!netdev_uc_empty(netdev)) {
4081 struct netdev_hw_addr *ha;
4082 netdev_for_each_uc_addr(ha, netdev) {
4083 ixgbe_del_mac_filter(adapter, ha->addr, vfn);
4084 ixgbe_add_mac_filter(adapter, ha->addr, vfn);
4085 count++;
4086 }
4087 }
4088 return count;
4089}
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100void ixgbe_set_rx_mode(struct net_device *netdev)
4101{
4102 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4103 struct ixgbe_hw *hw = &adapter->hw;
4104 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
4105 u32 vlnctrl;
4106 int count;
4107
4108
4109 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4110 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4111
4112
4113 fctrl &= ~IXGBE_FCTRL_SBP;
4114 fctrl |= IXGBE_FCTRL_BAM;
4115 fctrl |= IXGBE_FCTRL_DPF;
4116 fctrl |= IXGBE_FCTRL_PMCF;
4117
4118
4119 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4120 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4121 if (netdev->flags & IFF_PROMISC) {
4122 hw->addr_ctrl.user_set_promisc = true;
4123 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4124 vmolr |= IXGBE_VMOLR_MPE;
4125
4126
4127
4128
4129 if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
4130 IXGBE_FLAG_SRIOV_ENABLED))
4131 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4132 } else {
4133 if (netdev->flags & IFF_ALLMULTI) {
4134 fctrl |= IXGBE_FCTRL_MPE;
4135 vmolr |= IXGBE_VMOLR_MPE;
4136 }
4137 vlnctrl |= IXGBE_VLNCTRL_VFE;
4138 hw->addr_ctrl.user_set_promisc = false;
4139 }
4140
4141
4142
4143
4144
4145
4146 count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
4147 if (count < 0) {
4148 fctrl |= IXGBE_FCTRL_UPE;
4149 vmolr |= IXGBE_VMOLR_ROPE;
4150 }
4151
4152
4153
4154
4155
4156 count = ixgbe_write_mc_addr_list(netdev);
4157 if (count < 0) {
4158 fctrl |= IXGBE_FCTRL_MPE;
4159 vmolr |= IXGBE_VMOLR_MPE;
4160 } else if (count) {
4161 vmolr |= IXGBE_VMOLR_ROMPE;
4162 }
4163
4164 if (hw->mac.type != ixgbe_mac_82598EB) {
4165 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
4166 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
4167 IXGBE_VMOLR_ROPE);
4168 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
4169 }
4170
4171
4172 if (adapter->netdev->features & NETIF_F_RXALL) {
4173
4174
4175 fctrl |= (IXGBE_FCTRL_SBP |
4176 IXGBE_FCTRL_BAM |
4177 IXGBE_FCTRL_PMCF);
4178
4179 fctrl &= ~(IXGBE_FCTRL_DPF);
4180
4181 }
4182
4183 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4184 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4185
4186 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
4187 ixgbe_vlan_strip_enable(adapter);
4188 else
4189 ixgbe_vlan_strip_disable(adapter);
4190}
4191
4192static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
4193{
4194 int q_idx;
4195
4196 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4197 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
4198 napi_enable(&adapter->q_vector[q_idx]->napi);
4199 }
4200}
4201
4202static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
4203{
4204 int q_idx;
4205
4206 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
4207 napi_disable(&adapter->q_vector[q_idx]->napi);
4208 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
4209 pr_info("QV %d locked\n", q_idx);
4210 usleep_range(1000, 20000);
4211 }
4212 }
4213}
4214
4215#ifdef CONFIG_IXGBE_DCB
4216
4217
4218
4219
4220
4221
4222
4223
4224static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
4225{
4226 struct ixgbe_hw *hw = &adapter->hw;
4227 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4228
4229 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
4230 if (hw->mac.type == ixgbe_mac_82598EB)
4231 netif_set_gso_max_size(adapter->netdev, 65536);
4232 return;
4233 }
4234
4235 if (hw->mac.type == ixgbe_mac_82598EB)
4236 netif_set_gso_max_size(adapter->netdev, 32768);
4237
4238#ifdef IXGBE_FCOE
4239 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
4240 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
4241#endif
4242
4243
4244 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
4245 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4246 DCB_TX_CONFIG);
4247 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
4248 DCB_RX_CONFIG);
4249 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
4250 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
4251 ixgbe_dcb_hw_ets(&adapter->hw,
4252 adapter->ixgbe_ieee_ets,
4253 max_frame);
4254 ixgbe_dcb_hw_pfc_config(&adapter->hw,
4255 adapter->ixgbe_ieee_pfc->pfc_en,
4256 adapter->ixgbe_ieee_ets->prio_tc);
4257 }
4258
4259
4260 if (hw->mac.type != ixgbe_mac_82598EB) {
4261 u32 msb = 0;
4262 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
4263
4264 while (rss_i) {
4265 msb++;
4266 rss_i >>= 1;
4267 }
4268
4269
4270 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
4271 }
4272}
4273#endif
4274
4275
4276#define IXGBE_ETH_FRAMING 20
4277
4278
4279
4280
4281
4282
4283
4284static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
4285{
4286 struct ixgbe_hw *hw = &adapter->hw;
4287 struct net_device *dev = adapter->netdev;
4288 int link, tc, kb, marker;
4289 u32 dv_id, rx_pba;
4290
4291
4292 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
4293
4294#ifdef IXGBE_FCOE
4295
4296 if ((dev->features & NETIF_F_FCOE_MTU) &&
4297 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4298 (pb == ixgbe_fcoe_get_tc(adapter)))
4299 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4300#endif
4301
4302
4303 switch (hw->mac.type) {
4304 case ixgbe_mac_X540:
4305 case ixgbe_mac_X550:
4306 case ixgbe_mac_X550EM_x:
4307 dv_id = IXGBE_DV_X540(link, tc);
4308 break;
4309 default:
4310 dv_id = IXGBE_DV(link, tc);
4311 break;
4312 }
4313
4314
4315 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4316 dv_id += IXGBE_B2BT(tc);
4317
4318
4319 kb = IXGBE_BT2KB(dv_id);
4320 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
4321
4322 marker = rx_pba - kb;
4323
4324
4325
4326
4327
4328 if (marker < 0) {
4329 e_warn(drv, "Packet Buffer(%i) can not provide enough"
4330 "headroom to support flow control."
4331 "Decrease MTU or number of traffic classes\n", pb);
4332 marker = tc + 1;
4333 }
4334
4335 return marker;
4336}
4337
4338
4339
4340
4341
4342
4343
4344static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
4345{
4346 struct ixgbe_hw *hw = &adapter->hw;
4347 struct net_device *dev = adapter->netdev;
4348 int tc;
4349 u32 dv_id;
4350
4351
4352 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4353
4354#ifdef IXGBE_FCOE
4355
4356 if ((dev->features & NETIF_F_FCOE_MTU) &&
4357 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
4358 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
4359 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
4360#endif
4361
4362
4363 switch (hw->mac.type) {
4364 case ixgbe_mac_X540:
4365 case ixgbe_mac_X550:
4366 case ixgbe_mac_X550EM_x:
4367 dv_id = IXGBE_LOW_DV_X540(tc);
4368 break;
4369 default:
4370 dv_id = IXGBE_LOW_DV(tc);
4371 break;
4372 }
4373
4374
4375 return IXGBE_BT2KB(dv_id);
4376}
4377
4378
4379
4380
4381static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
4382{
4383 struct ixgbe_hw *hw = &adapter->hw;
4384 int num_tc = netdev_get_num_tc(adapter->netdev);
4385 int i;
4386
4387 if (!num_tc)
4388 num_tc = 1;
4389
4390 for (i = 0; i < num_tc; i++) {
4391 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
4392 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
4393
4394
4395 if (hw->fc.low_water[i] > hw->fc.high_water[i])
4396 hw->fc.low_water[i] = 0;
4397 }
4398
4399 for (; i < MAX_TRAFFIC_CLASS; i++)
4400 hw->fc.high_water[i] = 0;
4401}
4402
4403static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
4404{
4405 struct ixgbe_hw *hw = &adapter->hw;
4406 int hdrm;
4407 u8 tc = netdev_get_num_tc(adapter->netdev);
4408
4409 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4410 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
4411 hdrm = 32 << adapter->fdir_pballoc;
4412 else
4413 hdrm = 0;
4414
4415 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
4416 ixgbe_pbthresh_setup(adapter);
4417}
4418
4419static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4420{
4421 struct ixgbe_hw *hw = &adapter->hw;
4422 struct hlist_node *node2;
4423 struct ixgbe_fdir_filter *filter;
4424
4425 spin_lock(&adapter->fdir_perfect_lock);
4426
4427 if (!hlist_empty(&adapter->fdir_filter_list))
4428 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4429
4430 hlist_for_each_entry_safe(filter, node2,
4431 &adapter->fdir_filter_list, fdir_node) {
4432 ixgbe_fdir_write_perfect_filter_82599(hw,
4433 &filter->filter,
4434 filter->sw_idx,
4435 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4436 IXGBE_FDIR_DROP_QUEUE :
4437 adapter->rx_ring[filter->action]->reg_idx);
4438 }
4439
4440 spin_unlock(&adapter->fdir_perfect_lock);
4441}
4442
4443static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
4444 struct ixgbe_adapter *adapter)
4445{
4446 struct ixgbe_hw *hw = &adapter->hw;
4447 u32 vmolr;
4448
4449
4450 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
4451 vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
4452
4453
4454 vmolr &= ~IXGBE_VMOLR_MPE;
4455
4456 if (dev->flags & IFF_ALLMULTI) {
4457 vmolr |= IXGBE_VMOLR_MPE;
4458 } else {
4459 vmolr |= IXGBE_VMOLR_ROMPE;
4460 hw->mac.ops.update_mc_addr_list(hw, dev);
4461 }
4462 ixgbe_write_uc_addr_list(adapter->netdev, pool);
4463 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
4464}
4465
4466static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
4467{
4468 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4469 int rss_i = adapter->num_rx_queues_per_pool;
4470 struct ixgbe_hw *hw = &adapter->hw;
4471 u16 pool = vadapter->pool;
4472 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4473 IXGBE_PSRTYPE_UDPHDR |
4474 IXGBE_PSRTYPE_IPV4HDR |
4475 IXGBE_PSRTYPE_L2HDR |
4476 IXGBE_PSRTYPE_IPV6HDR;
4477
4478 if (hw->mac.type == ixgbe_mac_82598EB)
4479 return;
4480
4481 if (rss_i > 3)
4482 psrtype |= 2 << 29;
4483 else if (rss_i > 1)
4484 psrtype |= 1 << 29;
4485
4486 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
4487}
4488
4489
4490
4491
4492
4493static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4494{
4495 struct device *dev = rx_ring->dev;
4496 unsigned long size;
4497 u16 i;
4498
4499
4500 if (!rx_ring->rx_buffer_info)
4501 return;
4502
4503
4504 for (i = 0; i < rx_ring->count; i++) {
4505 struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
4506
4507 if (rx_buffer->skb) {
4508 struct sk_buff *skb = rx_buffer->skb;
4509 if (IXGBE_CB(skb)->page_released)
4510 dma_unmap_page(dev,
4511 IXGBE_CB(skb)->dma,
4512 ixgbe_rx_bufsz(rx_ring),
4513 DMA_FROM_DEVICE);
4514 dev_kfree_skb(skb);
4515 rx_buffer->skb = NULL;
4516 }
4517
4518 if (!rx_buffer->page)
4519 continue;
4520
4521 dma_unmap_page(dev, rx_buffer->dma,
4522 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
4523 __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
4524
4525 rx_buffer->page = NULL;
4526 }
4527
4528 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4529 memset(rx_ring->rx_buffer_info, 0, size);
4530
4531
4532 memset(rx_ring->desc, 0, rx_ring->size);
4533
4534 rx_ring->next_to_alloc = 0;
4535 rx_ring->next_to_clean = 0;
4536 rx_ring->next_to_use = 0;
4537}
4538
4539static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
4540 struct ixgbe_ring *rx_ring)
4541{
4542 struct ixgbe_adapter *adapter = vadapter->real_adapter;
4543 int index = rx_ring->queue_index + vadapter->rx_base_queue;
4544
4545
4546 ixgbe_disable_rx_queue(adapter, rx_ring);
4547 usleep_range(10000, 20000);
4548 ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
4549 ixgbe_clean_rx_ring(rx_ring);
4550 rx_ring->l2_accel_priv = NULL;
4551}
4552
4553static int ixgbe_fwd_ring_down(struct net_device *vdev,
4554 struct ixgbe_fwd_adapter *accel)
4555{
4556 struct ixgbe_adapter *adapter = accel->real_adapter;
4557 unsigned int rxbase = accel->rx_base_queue;
4558 unsigned int txbase = accel->tx_base_queue;
4559 int i;
4560
4561 netif_tx_stop_all_queues(vdev);
4562
4563 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4564 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4565 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
4566 }
4567
4568 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4569 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
4570 adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
4571 }
4572
4573
4574 return 0;
4575}
4576
4577static int ixgbe_fwd_ring_up(struct net_device *vdev,
4578 struct ixgbe_fwd_adapter *accel)
4579{
4580 struct ixgbe_adapter *adapter = accel->real_adapter;
4581 unsigned int rxbase, txbase, queues;
4582 int i, baseq, err = 0;
4583
4584 if (!test_bit(accel->pool, &adapter->fwd_bitmask))
4585 return 0;
4586
4587 baseq = accel->pool * adapter->num_rx_queues_per_pool;
4588 netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
4589 accel->pool, adapter->num_rx_pools,
4590 baseq, baseq + adapter->num_rx_queues_per_pool,
4591 adapter->fwd_bitmask);
4592
4593 accel->netdev = vdev;
4594 accel->rx_base_queue = rxbase = baseq;
4595 accel->tx_base_queue = txbase = baseq;
4596
4597 for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
4598 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
4599
4600 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4601 adapter->rx_ring[rxbase + i]->netdev = vdev;
4602 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
4603 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
4604 }
4605
4606 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
4607 adapter->tx_ring[txbase + i]->netdev = vdev;
4608 adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
4609 }
4610
4611 queues = min_t(unsigned int,
4612 adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
4613 err = netif_set_real_num_tx_queues(vdev, queues);
4614 if (err)
4615 goto fwd_queue_err;
4616
4617 err = netif_set_real_num_rx_queues(vdev, queues);
4618 if (err)
4619 goto fwd_queue_err;
4620
4621 if (is_valid_ether_addr(vdev->dev_addr))
4622 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
4623
4624 ixgbe_fwd_psrtype(accel);
4625 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
4626 return err;
4627fwd_queue_err:
4628 ixgbe_fwd_ring_down(vdev, accel);
4629 return err;
4630}
4631
4632static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
4633{
4634 struct net_device *upper;
4635 struct list_head *iter;
4636 int err;
4637
4638 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4639 if (netif_is_macvlan(upper)) {
4640 struct macvlan_dev *dfwd = netdev_priv(upper);
4641 struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
4642
4643 if (dfwd->fwd_priv) {
4644 err = ixgbe_fwd_ring_up(upper, vadapter);
4645 if (err)
4646 continue;
4647 }
4648 }
4649 }
4650}
4651
4652static void ixgbe_configure(struct ixgbe_adapter *adapter)
4653{
4654 struct ixgbe_hw *hw = &adapter->hw;
4655
4656 ixgbe_configure_pb(adapter);
4657#ifdef CONFIG_IXGBE_DCB
4658 ixgbe_configure_dcb(adapter);
4659#endif
4660
4661
4662
4663
4664 ixgbe_configure_virtualization(adapter);
4665
4666 ixgbe_set_rx_mode(adapter->netdev);
4667 ixgbe_restore_vlan(adapter);
4668
4669 switch (hw->mac.type) {
4670 case ixgbe_mac_82599EB:
4671 case ixgbe_mac_X540:
4672 hw->mac.ops.disable_rx_buff(hw);
4673 break;
4674 default:
4675 break;
4676 }
4677
4678 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
4679 ixgbe_init_fdir_signature_82599(&adapter->hw,
4680 adapter->fdir_pballoc);
4681 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
4682 ixgbe_init_fdir_perfect_82599(&adapter->hw,
4683 adapter->fdir_pballoc);
4684 ixgbe_fdir_filter_restore(adapter);
4685 }
4686
4687 switch (hw->mac.type) {
4688 case ixgbe_mac_82599EB:
4689 case ixgbe_mac_X540:
4690 hw->mac.ops.enable_rx_buff(hw);
4691 break;
4692 default:
4693 break;
4694 }
4695
4696#ifdef IXGBE_FCOE
4697
4698 ixgbe_configure_fcoe(adapter);
4699
4700#endif
4701 ixgbe_configure_tx(adapter);
4702 ixgbe_configure_rx(adapter);
4703 ixgbe_configure_dfwd(adapter);
4704}
4705
4706static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4707{
4708 switch (hw->phy.type) {
4709 case ixgbe_phy_sfp_avago:
4710 case ixgbe_phy_sfp_ftl:
4711 case ixgbe_phy_sfp_intel:
4712 case ixgbe_phy_sfp_unknown:
4713 case ixgbe_phy_sfp_passive_tyco:
4714 case ixgbe_phy_sfp_passive_unknown:
4715 case ixgbe_phy_sfp_active_unknown:
4716 case ixgbe_phy_sfp_ftl_active:
4717 case ixgbe_phy_qsfp_passive_unknown:
4718 case ixgbe_phy_qsfp_active_unknown:
4719 case ixgbe_phy_qsfp_intel:
4720 case ixgbe_phy_qsfp_unknown:
4721
4722 case ixgbe_phy_none:
4723 return true;
4724 case ixgbe_phy_nl:
4725 if (hw->mac.type == ixgbe_mac_82598EB)
4726 return true;
4727 default:
4728 return false;
4729 }
4730}
4731
4732
4733
4734
4735
4736static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
4737{
4738
4739
4740
4741
4742
4743
4744 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
4745 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
4746
4747 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
4748}
4749
4750
4751
4752
4753
4754
4755
4756static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
4757{
4758 u32 speed;
4759 bool autoneg, link_up = false;
4760 u32 ret = IXGBE_ERR_LINK_SETUP;
4761
4762 if (hw->mac.ops.check_link)
4763 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
4764
4765 if (ret)
4766 return ret;
4767
4768 speed = hw->phy.autoneg_advertised;
4769 if ((!speed) && (hw->mac.ops.get_link_capabilities))
4770 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
4771 &autoneg);
4772 if (ret)
4773 return ret;
4774
4775 if (hw->mac.ops.setup_link)
4776 ret = hw->mac.ops.setup_link(hw, speed, link_up);
4777
4778 return ret;
4779}
4780
4781static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4782{
4783 struct ixgbe_hw *hw = &adapter->hw;
4784 u32 gpie = 0;
4785
4786 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4787 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4788 IXGBE_GPIE_OCD;
4789 gpie |= IXGBE_GPIE_EIAME;
4790
4791
4792
4793
4794 switch (hw->mac.type) {
4795 case ixgbe_mac_82598EB:
4796 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4797 break;
4798 case ixgbe_mac_82599EB:
4799 case ixgbe_mac_X540:
4800 case ixgbe_mac_X550:
4801 case ixgbe_mac_X550EM_x:
4802 default:
4803 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4804 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4805 break;
4806 }
4807 } else {
4808
4809
4810 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4811 }
4812
4813
4814
4815
4816 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
4817 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
4818
4819 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4820 case IXGBE_82599_VMDQ_8Q_MASK:
4821 gpie |= IXGBE_GPIE_VTMODE_16;
4822 break;
4823 case IXGBE_82599_VMDQ_4Q_MASK:
4824 gpie |= IXGBE_GPIE_VTMODE_32;
4825 break;
4826 default:
4827 gpie |= IXGBE_GPIE_VTMODE_64;
4828 break;
4829 }
4830 }
4831
4832
4833 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
4834 switch (adapter->hw.mac.type) {
4835 case ixgbe_mac_82599EB:
4836 gpie |= IXGBE_SDP0_GPIEN;
4837 break;
4838 case ixgbe_mac_X540:
4839 gpie |= IXGBE_EIMS_TS;
4840 break;
4841 default:
4842 break;
4843 }
4844 }
4845
4846
4847 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
4848 gpie |= IXGBE_SDP1_GPIEN;
4849
4850 if (hw->mac.type == ixgbe_mac_82599EB) {
4851 gpie |= IXGBE_SDP1_GPIEN;
4852 gpie |= IXGBE_SDP2_GPIEN;
4853 }
4854
4855 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4856}
4857
4858static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4859{
4860 struct ixgbe_hw *hw = &adapter->hw;
4861 int err;
4862 u32 ctrl_ext;
4863
4864 ixgbe_get_hw_control(adapter);
4865 ixgbe_setup_gpie(adapter);
4866
4867 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4868 ixgbe_configure_msix(adapter);
4869 else
4870 ixgbe_configure_msi_and_legacy(adapter);
4871
4872
4873 if (hw->mac.ops.enable_tx_laser)
4874 hw->mac.ops.enable_tx_laser(hw);
4875
4876 smp_mb__before_atomic();
4877 clear_bit(__IXGBE_DOWN, &adapter->state);
4878 ixgbe_napi_enable_all(adapter);
4879
4880 if (ixgbe_is_sfp(hw)) {
4881 ixgbe_sfp_link_config(adapter);
4882 } else {
4883 err = ixgbe_non_sfp_link_config(hw);
4884 if (err)
4885 e_err(probe, "link_config FAILED %d\n", err);
4886 }
4887
4888
4889 IXGBE_READ_REG(hw, IXGBE_EICR);
4890 ixgbe_irq_enable(adapter, true, true);
4891
4892
4893
4894
4895
4896 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4897 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4898 if (esdp & IXGBE_ESDP_SDP1)
4899 e_crit(drv, "Fan has stopped, replace the adapter\n");
4900 }
4901
4902
4903
4904 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4905 adapter->link_check_timeout = jiffies;
4906 mod_timer(&adapter->service_timer, jiffies);
4907
4908
4909 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4910 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4911 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4912}
4913
4914void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
4915{
4916 WARN_ON(in_interrupt());
4917
4918 adapter->netdev->trans_start = jiffies;
4919
4920 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
4921 usleep_range(1000, 2000);
4922 ixgbe_down(adapter);
4923
4924
4925
4926
4927
4928
4929 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4930 msleep(2000);
4931 ixgbe_up(adapter);
4932 clear_bit(__IXGBE_RESETTING, &adapter->state);
4933}
4934
4935void ixgbe_up(struct ixgbe_adapter *adapter)
4936{
4937
4938 ixgbe_configure(adapter);
4939
4940 ixgbe_up_complete(adapter);
4941}
4942
4943void ixgbe_reset(struct ixgbe_adapter *adapter)
4944{
4945 struct ixgbe_hw *hw = &adapter->hw;
4946 struct net_device *netdev = adapter->netdev;
4947 int err;
4948 u8 old_addr[ETH_ALEN];
4949
4950 if (ixgbe_removed(hw->hw_addr))
4951 return;
4952
4953 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4954 usleep_range(1000, 2000);
4955
4956
4957 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4958 IXGBE_FLAG2_SFP_NEEDS_RESET);
4959 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4960
4961 err = hw->mac.ops.init_hw(hw);
4962 switch (err) {
4963 case 0:
4964 case IXGBE_ERR_SFP_NOT_PRESENT:
4965 case IXGBE_ERR_SFP_NOT_SUPPORTED:
4966 break;
4967 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
4968 e_dev_err("master disable timed out\n");
4969 break;
4970 case IXGBE_ERR_EEPROM_VERSION:
4971
4972 e_dev_warn("This device is a pre-production adapter/LOM. "
4973 "Please be aware there may be issues associated with "
4974 "your hardware. If you are experiencing problems "
4975 "please contact your Intel or hardware "
4976 "representative who provided you with this "
4977 "hardware.\n");
4978 break;
4979 default:
4980 e_dev_err("Hardware Error: %d\n", err);
4981 }
4982
4983 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4984
4985 memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
4986 ixgbe_flush_sw_mac_table(adapter);
4987 ixgbe_mac_set_default_filter(adapter, old_addr);
4988
4989
4990 if (hw->mac.san_mac_rar_index)
4991 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4992
4993 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
4994 ixgbe_ptp_reset(adapter);
4995}
4996
4997
4998
4999
5000
5001static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
5002{
5003 struct ixgbe_tx_buffer *tx_buffer_info;
5004 unsigned long size;
5005 u16 i;
5006
5007
5008 if (!tx_ring->tx_buffer_info)
5009 return;
5010
5011
5012 for (i = 0; i < tx_ring->count; i++) {
5013 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5014 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
5015 }
5016
5017 netdev_tx_reset_queue(txring_txq(tx_ring));
5018
5019 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5020 memset(tx_ring->tx_buffer_info, 0, size);
5021
5022
5023 memset(tx_ring->desc, 0, tx_ring->size);
5024
5025 tx_ring->next_to_use = 0;
5026 tx_ring->next_to_clean = 0;
5027}
5028
5029
5030
5031
5032
5033static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
5034{
5035 int i;
5036
5037 for (i = 0; i < adapter->num_rx_queues; i++)
5038 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
5039}
5040
5041
5042
5043
5044
5045static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
5046{
5047 int i;
5048
5049 for (i = 0; i < adapter->num_tx_queues; i++)
5050 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
5051}
5052
5053static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
5054{
5055 struct hlist_node *node2;
5056 struct ixgbe_fdir_filter *filter;
5057
5058 spin_lock(&adapter->fdir_perfect_lock);
5059
5060 hlist_for_each_entry_safe(filter, node2,
5061 &adapter->fdir_filter_list, fdir_node) {
5062 hlist_del(&filter->fdir_node);
5063 kfree(filter);
5064 }
5065 adapter->fdir_filter_count = 0;
5066
5067 spin_unlock(&adapter->fdir_perfect_lock);
5068}
5069
5070void ixgbe_down(struct ixgbe_adapter *adapter)
5071{
5072 struct net_device *netdev = adapter->netdev;
5073 struct ixgbe_hw *hw = &adapter->hw;
5074 struct net_device *upper;
5075 struct list_head *iter;
5076 int i;
5077
5078
5079 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
5080 return;
5081
5082
5083 hw->mac.ops.disable_rx(hw);
5084
5085
5086 for (i = 0; i < adapter->num_rx_queues; i++)
5087
5088 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
5089
5090 usleep_range(10000, 20000);
5091
5092 netif_tx_stop_all_queues(netdev);
5093
5094
5095 netif_carrier_off(netdev);
5096 netif_tx_disable(netdev);
5097
5098
5099 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
5100 if (netif_is_macvlan(upper)) {
5101 struct macvlan_dev *vlan = netdev_priv(upper);
5102
5103 if (vlan->fwd_priv) {
5104 netif_tx_stop_all_queues(upper);
5105 netif_carrier_off(upper);
5106 netif_tx_disable(upper);
5107 }
5108 }
5109 }
5110
5111 ixgbe_irq_disable(adapter);
5112
5113 ixgbe_napi_disable_all(adapter);
5114
5115 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
5116 IXGBE_FLAG2_RESET_REQUESTED);
5117 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5118
5119 del_timer_sync(&adapter->service_timer);
5120
5121 if (adapter->num_vfs) {
5122
5123 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
5124
5125
5126 for (i = 0 ; i < adapter->num_vfs; i++)
5127 adapter->vfinfo[i].clear_to_send = false;
5128
5129
5130 ixgbe_ping_all_vfs(adapter);
5131
5132
5133 ixgbe_disable_tx_rx(adapter);
5134 }
5135
5136
5137 for (i = 0; i < adapter->num_tx_queues; i++) {
5138 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
5139 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
5140 }
5141
5142
5143 switch (hw->mac.type) {
5144 case ixgbe_mac_82599EB:
5145 case ixgbe_mac_X540:
5146 case ixgbe_mac_X550:
5147 case ixgbe_mac_X550EM_x:
5148 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
5149 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
5150 ~IXGBE_DMATXCTL_TE));
5151 break;
5152 default:
5153 break;
5154 }
5155
5156 if (!pci_channel_offline(adapter->pdev))
5157 ixgbe_reset(adapter);
5158
5159
5160 if (hw->mac.ops.disable_tx_laser)
5161 hw->mac.ops.disable_tx_laser(hw);
5162
5163 ixgbe_clean_all_tx_rings(adapter);
5164 ixgbe_clean_all_rx_rings(adapter);
5165
5166#ifdef CONFIG_IXGBE_DCA
5167
5168 ixgbe_setup_dca(adapter);
5169#endif
5170}
5171
5172
5173
5174
5175
5176static void ixgbe_tx_timeout(struct net_device *netdev)
5177{
5178 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5179
5180
5181 ixgbe_tx_timeout_reset(adapter);
5182}
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
5193{
5194 struct ixgbe_hw *hw = &adapter->hw;
5195 struct pci_dev *pdev = adapter->pdev;
5196 unsigned int rss, fdir;
5197 u32 fwsm;
5198#ifdef CONFIG_IXGBE_DCB
5199 int j;
5200 struct tc_configuration *tc;
5201#endif
5202
5203
5204
5205 hw->vendor_id = pdev->vendor;
5206 hw->device_id = pdev->device;
5207 hw->revision_id = pdev->revision;
5208 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5209 hw->subsystem_device_id = pdev->subsystem_device;
5210
5211
5212 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
5213 adapter->ring_feature[RING_F_RSS].limit = rss;
5214 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5215 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
5216 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
5217 adapter->atr_sample_rate = 20;
5218 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
5219 adapter->ring_feature[RING_F_FDIR].limit = fdir;
5220 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
5221#ifdef CONFIG_IXGBE_DCA
5222 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
5223#endif
5224#ifdef IXGBE_FCOE
5225 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5226 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5227#ifdef CONFIG_IXGBE_DCB
5228
5229 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
5230#endif
5231#endif
5232
5233 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
5234 hw->mac.num_rar_entries,
5235 GFP_ATOMIC);
5236
5237
5238 switch (hw->mac.type) {
5239 case ixgbe_mac_82598EB:
5240 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
5241 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
5242
5243 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5244 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
5245
5246 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
5247 adapter->ring_feature[RING_F_FDIR].limit = 0;
5248 adapter->atr_sample_rate = 0;
5249 adapter->fdir_pballoc = 0;
5250#ifdef IXGBE_FCOE
5251 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5252 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5253#ifdef CONFIG_IXGBE_DCB
5254 adapter->fcoe.up = 0;
5255#endif
5256#endif
5257 break;
5258 case ixgbe_mac_82599EB:
5259 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5260 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5261 break;
5262 case ixgbe_mac_X540:
5263 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5264 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5265 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5266 break;
5267 case ixgbe_mac_X550EM_x:
5268 case ixgbe_mac_X550:
5269#ifdef CONFIG_IXGBE_DCA
5270 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
5271#endif
5272 break;
5273 default:
5274 break;
5275 }
5276
5277#ifdef IXGBE_FCOE
5278
5279 spin_lock_init(&adapter->fcoe.lock);
5280
5281#endif
5282
5283 spin_lock_init(&adapter->fdir_perfect_lock);
5284
5285#ifdef CONFIG_IXGBE_DCB
5286 switch (hw->mac.type) {
5287 case ixgbe_mac_X540:
5288 case ixgbe_mac_X550:
5289 case ixgbe_mac_X550EM_x:
5290 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
5291 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
5292 break;
5293 default:
5294 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
5295 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
5296 break;
5297 }
5298
5299
5300 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5301 tc = &adapter->dcb_cfg.tc_config[j];
5302 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5303 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5304 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5305 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5306 tc->dcb_pfc = pfc_disabled;
5307 }
5308
5309
5310 tc = &adapter->dcb_cfg.tc_config[0];
5311 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
5312 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
5313
5314 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5315 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5316 adapter->dcb_cfg.pfc_mode_enable = false;
5317 adapter->dcb_set_bitmap = 0x00;
5318 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
5319 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
5320 sizeof(adapter->temp_dcb_cfg));
5321
5322#endif
5323
5324
5325 hw->fc.requested_mode = ixgbe_fc_full;
5326 hw->fc.current_mode = ixgbe_fc_full;
5327 ixgbe_pbthresh_setup(adapter);
5328 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5329 hw->fc.send_xon = true;
5330 hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
5331
5332#ifdef CONFIG_PCI_IOV
5333 if (max_vfs > 0)
5334 e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
5335
5336
5337 if (hw->mac.type != ixgbe_mac_82598EB) {
5338 if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
5339 adapter->num_vfs = 0;
5340 e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
5341 } else {
5342 adapter->num_vfs = max_vfs;
5343 }
5344 }
5345#endif
5346
5347
5348 adapter->rx_itr_setting = 1;
5349 adapter->tx_itr_setting = 1;
5350
5351
5352 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5353 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5354
5355
5356 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
5357
5358
5359 if (ixgbe_init_eeprom_params_generic(hw)) {
5360 e_dev_err("EEPROM initialization failed\n");
5361 return -EIO;
5362 }
5363
5364
5365 set_bit(0, &adapter->fwd_bitmask);
5366 set_bit(__IXGBE_DOWN, &adapter->state);
5367
5368 return 0;
5369}
5370
5371
5372
5373
5374
5375
5376
5377int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5378{
5379 struct device *dev = tx_ring->dev;
5380 int orig_node = dev_to_node(dev);
5381 int ring_node = -1;
5382 int size;
5383
5384 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5385
5386 if (tx_ring->q_vector)
5387 ring_node = tx_ring->q_vector->numa_node;
5388
5389 tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
5390 if (!tx_ring->tx_buffer_info)
5391 tx_ring->tx_buffer_info = vzalloc(size);
5392 if (!tx_ring->tx_buffer_info)
5393 goto err;
5394
5395 u64_stats_init(&tx_ring->syncp);
5396
5397
5398 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
5399 tx_ring->size = ALIGN(tx_ring->size, 4096);
5400
5401 set_dev_node(dev, ring_node);
5402 tx_ring->desc = dma_alloc_coherent(dev,
5403 tx_ring->size,
5404 &tx_ring->dma,
5405 GFP_KERNEL);
5406 set_dev_node(dev, orig_node);
5407 if (!tx_ring->desc)
5408 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
5409 &tx_ring->dma, GFP_KERNEL);
5410 if (!tx_ring->desc)
5411 goto err;
5412
5413 tx_ring->next_to_use = 0;
5414 tx_ring->next_to_clean = 0;
5415 return 0;
5416
5417err:
5418 vfree(tx_ring->tx_buffer_info);
5419 tx_ring->tx_buffer_info = NULL;
5420 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5421 return -ENOMEM;
5422}
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5435{
5436 int i, err = 0;
5437
5438 for (i = 0; i < adapter->num_tx_queues; i++) {
5439 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5440 if (!err)
5441 continue;
5442
5443 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
5444 goto err_setup_tx;
5445 }
5446
5447 return 0;
5448err_setup_tx:
5449
5450 while (i--)
5451 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5452 return err;
5453}
5454
5455
5456
5457
5458
5459
5460
5461int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5462{
5463 struct device *dev = rx_ring->dev;
5464 int orig_node = dev_to_node(dev);
5465 int ring_node = -1;
5466 int size;
5467
5468 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5469
5470 if (rx_ring->q_vector)
5471 ring_node = rx_ring->q_vector->numa_node;
5472
5473 rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
5474 if (!rx_ring->rx_buffer_info)
5475 rx_ring->rx_buffer_info = vzalloc(size);
5476 if (!rx_ring->rx_buffer_info)
5477 goto err;
5478
5479 u64_stats_init(&rx_ring->syncp);
5480
5481
5482 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5483 rx_ring->size = ALIGN(rx_ring->size, 4096);
5484
5485 set_dev_node(dev, ring_node);
5486 rx_ring->desc = dma_alloc_coherent(dev,
5487 rx_ring->size,
5488 &rx_ring->dma,
5489 GFP_KERNEL);
5490 set_dev_node(dev, orig_node);
5491 if (!rx_ring->desc)
5492 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5493 &rx_ring->dma, GFP_KERNEL);
5494 if (!rx_ring->desc)
5495 goto err;
5496
5497 rx_ring->next_to_clean = 0;
5498 rx_ring->next_to_use = 0;
5499
5500 return 0;
5501err:
5502 vfree(rx_ring->rx_buffer_info);
5503 rx_ring->rx_buffer_info = NULL;
5504 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5505 return -ENOMEM;
5506}
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5519{
5520 int i, err = 0;
5521
5522 for (i = 0; i < adapter->num_rx_queues; i++) {
5523 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5524 if (!err)
5525 continue;
5526
5527 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5528 goto err_setup_rx;
5529 }
5530
5531#ifdef IXGBE_FCOE
5532 err = ixgbe_setup_fcoe_ddp_resources(adapter);
5533 if (!err)
5534#endif
5535 return 0;
5536err_setup_rx:
5537
5538 while (i--)
5539 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5540 return err;
5541}
5542
5543
5544
5545
5546
5547
5548
5549void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5550{
5551 ixgbe_clean_tx_ring(tx_ring);
5552
5553 vfree(tx_ring->tx_buffer_info);
5554 tx_ring->tx_buffer_info = NULL;
5555
5556
5557 if (!tx_ring->desc)
5558 return;
5559
5560 dma_free_coherent(tx_ring->dev, tx_ring->size,
5561 tx_ring->desc, tx_ring->dma);
5562
5563 tx_ring->desc = NULL;
5564}
5565
5566
5567
5568
5569
5570
5571
5572static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5573{
5574 int i;
5575
5576 for (i = 0; i < adapter->num_tx_queues; i++)
5577 if (adapter->tx_ring[i]->desc)
5578 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5579}
5580
5581
5582
5583
5584
5585
5586
5587void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5588{
5589 ixgbe_clean_rx_ring(rx_ring);
5590
5591 vfree(rx_ring->rx_buffer_info);
5592 rx_ring->rx_buffer_info = NULL;
5593
5594
5595 if (!rx_ring->desc)
5596 return;
5597
5598 dma_free_coherent(rx_ring->dev, rx_ring->size,
5599 rx_ring->desc, rx_ring->dma);
5600
5601 rx_ring->desc = NULL;
5602}
5603
5604
5605
5606
5607
5608
5609
5610static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5611{
5612 int i;
5613
5614#ifdef IXGBE_FCOE
5615 ixgbe_free_fcoe_ddp_resources(adapter);
5616
5617#endif
5618 for (i = 0; i < adapter->num_rx_queues; i++)
5619 if (adapter->rx_ring[i]->desc)
5620 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5621}
5622
5623
5624
5625
5626
5627
5628
5629
5630static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5631{
5632 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5633 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5634
5635
5636 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5637 return -EINVAL;
5638
5639
5640
5641
5642
5643
5644 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5645 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5646 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5647 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5648
5649 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5650
5651
5652 netdev->mtu = new_mtu;
5653
5654 if (netif_running(netdev))
5655 ixgbe_reinit_locked(adapter);
5656
5657 return 0;
5658}
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672static int ixgbe_open(struct net_device *netdev)
5673{
5674 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5675 int err, queues;
5676
5677
5678 if (test_bit(__IXGBE_TESTING, &adapter->state))
5679 return -EBUSY;
5680
5681 netif_carrier_off(netdev);
5682
5683
5684 err = ixgbe_setup_all_tx_resources(adapter);
5685 if (err)
5686 goto err_setup_tx;
5687
5688
5689 err = ixgbe_setup_all_rx_resources(adapter);
5690 if (err)
5691 goto err_setup_rx;
5692
5693 ixgbe_configure(adapter);
5694
5695 err = ixgbe_request_irq(adapter);
5696 if (err)
5697 goto err_req_irq;
5698
5699
5700 if (adapter->num_rx_pools > 1)
5701 queues = adapter->num_rx_queues_per_pool;
5702 else
5703 queues = adapter->num_tx_queues;
5704
5705 err = netif_set_real_num_tx_queues(netdev, queues);
5706 if (err)
5707 goto err_set_queues;
5708
5709 if (adapter->num_rx_pools > 1 &&
5710 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
5711 queues = IXGBE_MAX_L2A_QUEUES;
5712 else
5713 queues = adapter->num_rx_queues;
5714 err = netif_set_real_num_rx_queues(netdev, queues);
5715 if (err)
5716 goto err_set_queues;
5717
5718 ixgbe_ptp_init(adapter);
5719
5720 ixgbe_up_complete(adapter);
5721
5722#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
5723 vxlan_get_rx_port(netdev);
5724
5725#endif
5726 return 0;
5727
5728err_set_queues:
5729 ixgbe_free_irq(adapter);
5730err_req_irq:
5731 ixgbe_free_all_rx_resources(adapter);
5732err_setup_rx:
5733 ixgbe_free_all_tx_resources(adapter);
5734err_setup_tx:
5735 ixgbe_reset(adapter);
5736
5737 return err;
5738}
5739
5740static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
5741{
5742 ixgbe_ptp_suspend(adapter);
5743
5744 ixgbe_down(adapter);
5745 ixgbe_free_irq(adapter);
5746
5747 ixgbe_free_all_tx_resources(adapter);
5748 ixgbe_free_all_rx_resources(adapter);
5749}
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762static int ixgbe_close(struct net_device *netdev)
5763{
5764 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5765
5766 ixgbe_ptp_stop(adapter);
5767
5768 ixgbe_close_suspend(adapter);
5769
5770 ixgbe_fdir_filter_exit(adapter);
5771
5772 ixgbe_release_hw_control(adapter);
5773
5774 return 0;
5775}
5776
5777#ifdef CONFIG_PM
5778static int ixgbe_resume(struct pci_dev *pdev)
5779{
5780 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5781 struct net_device *netdev = adapter->netdev;
5782 u32 err;
5783
5784 adapter->hw.hw_addr = adapter->io_addr;
5785 pci_set_power_state(pdev, PCI_D0);
5786 pci_restore_state(pdev);
5787
5788
5789
5790
5791 pci_save_state(pdev);
5792
5793 err = pci_enable_device_mem(pdev);
5794 if (err) {
5795 e_dev_err("Cannot enable PCI device from suspend\n");
5796 return err;
5797 }
5798 smp_mb__before_atomic();
5799 clear_bit(__IXGBE_DISABLED, &adapter->state);
5800 pci_set_master(pdev);
5801
5802 pci_wake_from_d3(pdev, false);
5803
5804 ixgbe_reset(adapter);
5805
5806 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5807
5808 rtnl_lock();
5809 err = ixgbe_init_interrupt_scheme(adapter);
5810 if (!err && netif_running(netdev))
5811 err = ixgbe_open(netdev);
5812
5813 rtnl_unlock();
5814
5815 if (err)
5816 return err;
5817
5818 netif_device_attach(netdev);
5819
5820 return 0;
5821}
5822#endif
5823
5824static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5825{
5826 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5827 struct net_device *netdev = adapter->netdev;
5828 struct ixgbe_hw *hw = &adapter->hw;
5829 u32 ctrl, fctrl;
5830 u32 wufc = adapter->wol;
5831#ifdef CONFIG_PM
5832 int retval = 0;
5833#endif
5834
5835 netif_device_detach(netdev);
5836
5837 rtnl_lock();
5838 if (netif_running(netdev))
5839 ixgbe_close_suspend(adapter);
5840 rtnl_unlock();
5841
5842 ixgbe_clear_interrupt_scheme(adapter);
5843
5844#ifdef CONFIG_PM
5845 retval = pci_save_state(pdev);
5846 if (retval)
5847 return retval;
5848
5849#endif
5850 if (hw->mac.ops.stop_link_on_d3)
5851 hw->mac.ops.stop_link_on_d3(hw);
5852
5853 if (wufc) {
5854 ixgbe_set_rx_mode(netdev);
5855
5856
5857 if (hw->mac.ops.enable_tx_laser)
5858 hw->mac.ops.enable_tx_laser(hw);
5859
5860
5861 if (wufc & IXGBE_WUFC_MC) {
5862 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5863 fctrl |= IXGBE_FCTRL_MPE;
5864 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5865 }
5866
5867 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5868 ctrl |= IXGBE_CTRL_GIO_DIS;
5869 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5870
5871 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5872 } else {
5873 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5874 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5875 }
5876
5877 switch (hw->mac.type) {
5878 case ixgbe_mac_82598EB:
5879 pci_wake_from_d3(pdev, false);
5880 break;
5881 case ixgbe_mac_82599EB:
5882 case ixgbe_mac_X540:
5883 case ixgbe_mac_X550:
5884 case ixgbe_mac_X550EM_x:
5885 pci_wake_from_d3(pdev, !!wufc);
5886 break;
5887 default:
5888 break;
5889 }
5890
5891 *enable_wake = !!wufc;
5892
5893 ixgbe_release_hw_control(adapter);
5894
5895 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
5896 pci_disable_device(pdev);
5897
5898 return 0;
5899}
5900
5901#ifdef CONFIG_PM
5902static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5903{
5904 int retval;
5905 bool wake;
5906
5907 retval = __ixgbe_shutdown(pdev, &wake);
5908 if (retval)
5909 return retval;
5910
5911 if (wake) {
5912 pci_prepare_to_sleep(pdev);
5913 } else {
5914 pci_wake_from_d3(pdev, false);
5915 pci_set_power_state(pdev, PCI_D3hot);
5916 }
5917
5918 return 0;
5919}
5920#endif
5921
5922static void ixgbe_shutdown(struct pci_dev *pdev)
5923{
5924 bool wake;
5925
5926 __ixgbe_shutdown(pdev, &wake);
5927
5928 if (system_state == SYSTEM_POWER_OFF) {
5929 pci_wake_from_d3(pdev, wake);
5930 pci_set_power_state(pdev, PCI_D3hot);
5931 }
5932}
5933
5934
5935
5936
5937
5938void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5939{
5940 struct net_device *netdev = adapter->netdev;
5941 struct ixgbe_hw *hw = &adapter->hw;
5942 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5943 u64 total_mpc = 0;
5944 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5945 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5946 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5947 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
5948
5949 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5950 test_bit(__IXGBE_RESETTING, &adapter->state))
5951 return;
5952
5953 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5954 u64 rsc_count = 0;
5955 u64 rsc_flush = 0;
5956 for (i = 0; i < adapter->num_rx_queues; i++) {
5957 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5958 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5959 }
5960 adapter->rsc_total_count = rsc_count;
5961 adapter->rsc_total_flush = rsc_flush;
5962 }
5963
5964 for (i = 0; i < adapter->num_rx_queues; i++) {
5965 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5966 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5967 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5968 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5969 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
5970 bytes += rx_ring->stats.bytes;
5971 packets += rx_ring->stats.packets;
5972 }
5973 adapter->non_eop_descs = non_eop_descs;
5974 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5975 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5976 adapter->hw_csum_rx_error = hw_csum_rx_error;
5977 netdev->stats.rx_bytes = bytes;
5978 netdev->stats.rx_packets = packets;
5979
5980 bytes = 0;
5981 packets = 0;
5982
5983 for (i = 0; i < adapter->num_tx_queues; i++) {
5984 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5985 restart_queue += tx_ring->tx_stats.restart_queue;
5986 tx_busy += tx_ring->tx_stats.tx_busy;
5987 bytes += tx_ring->stats.bytes;
5988 packets += tx_ring->stats.packets;
5989 }
5990 adapter->restart_queue = restart_queue;
5991 adapter->tx_busy = tx_busy;
5992 netdev->stats.tx_bytes = bytes;
5993 netdev->stats.tx_packets = packets;
5994
5995 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5996
5997
5998 for (i = 0; i < 8; i++) {
5999
6000 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
6001 missed_rx += mpc;
6002 hwstats->mpc[i] += mpc;
6003 total_mpc += hwstats->mpc[i];
6004 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
6005 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6006 switch (hw->mac.type) {
6007 case ixgbe_mac_82598EB:
6008 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
6009 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
6010 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
6011 hwstats->pxonrxc[i] +=
6012 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
6013 break;
6014 case ixgbe_mac_82599EB:
6015 case ixgbe_mac_X540:
6016 case ixgbe_mac_X550:
6017 case ixgbe_mac_X550EM_x:
6018 hwstats->pxonrxc[i] +=
6019 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
6020 break;
6021 default:
6022 break;
6023 }
6024 }
6025
6026
6027 for (i = 0; i < 16; i++) {
6028 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
6029 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
6030 if ((hw->mac.type == ixgbe_mac_82599EB) ||
6031 (hw->mac.type == ixgbe_mac_X540) ||
6032 (hw->mac.type == ixgbe_mac_X550) ||
6033 (hw->mac.type == ixgbe_mac_X550EM_x)) {
6034 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
6035 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
6036 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
6037 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
6038 }
6039 }
6040
6041 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6042
6043 hwstats->gprc -= missed_rx;
6044
6045 ixgbe_update_xoff_received(adapter);
6046
6047
6048 switch (hw->mac.type) {
6049 case ixgbe_mac_82598EB:
6050 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
6051 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
6052 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
6053 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
6054 break;
6055 case ixgbe_mac_X540:
6056 case ixgbe_mac_X550:
6057 case ixgbe_mac_X550EM_x:
6058
6059 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
6060 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
6061 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
6062 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
6063 case ixgbe_mac_82599EB:
6064 for (i = 0; i < 16; i++)
6065 adapter->hw_rx_no_dma_resources +=
6066 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
6067 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
6068 IXGBE_READ_REG(hw, IXGBE_GORCH);
6069 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
6070 IXGBE_READ_REG(hw, IXGBE_GOTCH);
6071 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
6072 IXGBE_READ_REG(hw, IXGBE_TORH);
6073 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
6074 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
6075 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6076#ifdef IXGBE_FCOE
6077 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
6078 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
6079 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
6080 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
6081 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
6082 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6083
6084 if (adapter->fcoe.ddp_pool) {
6085 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
6086 struct ixgbe_fcoe_ddp_pool *ddp_pool;
6087 unsigned int cpu;
6088 u64 noddp = 0, noddp_ext_buff = 0;
6089 for_each_possible_cpu(cpu) {
6090 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
6091 noddp += ddp_pool->noddp;
6092 noddp_ext_buff += ddp_pool->noddp_ext_buff;
6093 }
6094 hwstats->fcoe_noddp = noddp;
6095 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
6096 }
6097#endif
6098 break;
6099 default:
6100 break;
6101 }
6102 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
6103 hwstats->bprc += bprc;
6104 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
6105 if (hw->mac.type == ixgbe_mac_82598EB)
6106 hwstats->mprc -= bprc;
6107 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
6108 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
6109 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
6110 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
6111 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
6112 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
6113 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
6114 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6115 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
6116 hwstats->lxontxc += lxon;
6117 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
6118 hwstats->lxofftxc += lxoff;
6119 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
6120 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6121
6122
6123
6124 xon_off_tot = lxon + lxoff;
6125 hwstats->gptc -= xon_off_tot;
6126 hwstats->mptc -= xon_off_tot;
6127 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
6128 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
6129 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
6130 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
6131 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
6132 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
6133 hwstats->ptc64 -= xon_off_tot;
6134 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
6135 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
6136 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
6137 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
6138 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
6139 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
6140
6141
6142 netdev->stats.multicast = hwstats->mprc;
6143
6144
6145 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
6146 netdev->stats.rx_dropped = 0;
6147 netdev->stats.rx_length_errors = hwstats->rlec;
6148 netdev->stats.rx_crc_errors = hwstats->crcerrs;
6149 netdev->stats.rx_missed_errors = total_mpc;
6150}
6151
6152
6153
6154
6155
6156static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
6157{
6158 struct ixgbe_hw *hw = &adapter->hw;
6159 int i;
6160
6161 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
6162 return;
6163
6164 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
6165
6166
6167 if (test_bit(__IXGBE_DOWN, &adapter->state))
6168 return;
6169
6170
6171 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
6172 return;
6173
6174 adapter->fdir_overflow++;
6175
6176 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
6177 for (i = 0; i < adapter->num_tx_queues; i++)
6178 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
6179 &(adapter->tx_ring[i]->state));
6180
6181 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
6182 } else {
6183 e_err(probe, "failed to finish FDIR re-initialization, "
6184 "ignored adding FDIR ATR filters\n");
6185 }
6186}
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
6198{
6199 struct ixgbe_hw *hw = &adapter->hw;
6200 u64 eics = 0;
6201 int i;
6202
6203
6204 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6205 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6206 test_bit(__IXGBE_RESETTING, &adapter->state))
6207 return;
6208
6209
6210 if (netif_carrier_ok(adapter->netdev)) {
6211 for (i = 0; i < adapter->num_tx_queues; i++)
6212 set_check_for_tx_hang(adapter->tx_ring[i]);
6213 }
6214
6215 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
6216
6217
6218
6219
6220
6221 IXGBE_WRITE_REG(hw, IXGBE_EICS,
6222 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
6223 } else {
6224
6225 for (i = 0; i < adapter->num_q_vectors; i++) {
6226 struct ixgbe_q_vector *qv = adapter->q_vector[i];
6227 if (qv->rx.ring || qv->tx.ring)
6228 eics |= ((u64)1 << i);
6229 }
6230 }
6231
6232
6233 ixgbe_irq_rearm_queues(adapter, eics);
6234}
6235
6236
6237
6238
6239
6240
6241static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
6242{
6243 struct ixgbe_hw *hw = &adapter->hw;
6244 u32 link_speed = adapter->link_speed;
6245 bool link_up = adapter->link_up;
6246 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
6247
6248 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6249 return;
6250
6251 if (hw->mac.ops.check_link) {
6252 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
6253 } else {
6254
6255 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6256 link_up = true;
6257 }
6258
6259 if (adapter->ixgbe_ieee_pfc)
6260 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
6261
6262 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
6263 hw->mac.ops.fc_enable(hw);
6264 ixgbe_set_rx_drop_en(adapter);
6265 }
6266
6267 if (link_up ||
6268 time_after(jiffies, (adapter->link_check_timeout +
6269 IXGBE_TRY_LINK_TIMEOUT))) {
6270 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6271 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6272 IXGBE_WRITE_FLUSH(hw);
6273 }
6274
6275 adapter->link_up = link_up;
6276 adapter->link_speed = link_speed;
6277}
6278
6279static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
6280{
6281#ifdef CONFIG_IXGBE_DCB
6282 struct net_device *netdev = adapter->netdev;
6283 struct dcb_app app = {
6284 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
6285 .protocol = 0,
6286 };
6287 u8 up = 0;
6288
6289 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
6290 up = dcb_ieee_getapp_mask(netdev, &app);
6291
6292 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
6293#endif
6294}
6295
6296
6297
6298
6299
6300
6301static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6302{
6303 struct net_device *netdev = adapter->netdev;
6304 struct ixgbe_hw *hw = &adapter->hw;
6305 struct net_device *upper;
6306 struct list_head *iter;
6307 u32 link_speed = adapter->link_speed;
6308 bool flow_rx, flow_tx;
6309
6310
6311 if (netif_carrier_ok(netdev))
6312 return;
6313
6314 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
6315
6316 switch (hw->mac.type) {
6317 case ixgbe_mac_82598EB: {
6318 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6319 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6320 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6321 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6322 }
6323 break;
6324 case ixgbe_mac_X540:
6325 case ixgbe_mac_X550:
6326 case ixgbe_mac_X550EM_x:
6327 case ixgbe_mac_82599EB: {
6328 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6329 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6330 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6331 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6332 }
6333 break;
6334 default:
6335 flow_tx = false;
6336 flow_rx = false;
6337 break;
6338 }
6339
6340 adapter->last_rx_ptp_check = jiffies;
6341
6342 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6343 ixgbe_ptp_start_cyclecounter(adapter);
6344
6345 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
6346 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6347 "10 Gbps" :
6348 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6349 "1 Gbps" :
6350 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6351 "100 Mbps" :
6352 "unknown speed"))),
6353 ((flow_rx && flow_tx) ? "RX/TX" :
6354 (flow_rx ? "RX" :
6355 (flow_tx ? "TX" : "None"))));
6356
6357 netif_carrier_on(netdev);
6358 ixgbe_check_vf_rate_limit(adapter);
6359
6360
6361 netif_tx_wake_all_queues(adapter->netdev);
6362
6363
6364 rtnl_lock();
6365 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
6366 if (netif_is_macvlan(upper)) {
6367 struct macvlan_dev *vlan = netdev_priv(upper);
6368
6369 if (vlan->fwd_priv)
6370 netif_tx_wake_all_queues(upper);
6371 }
6372 }
6373 rtnl_unlock();
6374
6375
6376 ixgbe_update_default_up(adapter);
6377
6378
6379 ixgbe_ping_all_vfs(adapter);
6380}
6381
6382
6383
6384
6385
6386
6387static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
6388{
6389 struct net_device *netdev = adapter->netdev;
6390 struct ixgbe_hw *hw = &adapter->hw;
6391
6392 adapter->link_up = false;
6393 adapter->link_speed = 0;
6394
6395
6396 if (!netif_carrier_ok(netdev))
6397 return;
6398
6399
6400 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6401 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
6402
6403 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
6404 ixgbe_ptp_start_cyclecounter(adapter);
6405
6406 e_info(drv, "NIC Link is Down\n");
6407 netif_carrier_off(netdev);
6408
6409
6410 ixgbe_ping_all_vfs(adapter);
6411}
6412
6413static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
6414{
6415 int i;
6416
6417 for (i = 0; i < adapter->num_tx_queues; i++) {
6418 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6419
6420 if (tx_ring->next_to_use != tx_ring->next_to_clean)
6421 return true;
6422 }
6423
6424 return false;
6425}
6426
6427static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
6428{
6429 struct ixgbe_hw *hw = &adapter->hw;
6430 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
6431 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6432
6433 int i, j;
6434
6435 if (!adapter->num_vfs)
6436 return false;
6437
6438
6439 if (hw->mac.type >= ixgbe_mac_X550)
6440 return false;
6441
6442 for (i = 0; i < adapter->num_vfs; i++) {
6443 for (j = 0; j < q_per_pool; j++) {
6444 u32 h, t;
6445
6446 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
6447 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
6448
6449 if (h != t)
6450 return true;
6451 }
6452 }
6453
6454 return false;
6455}
6456
6457
6458
6459
6460
6461static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6462{
6463 if (!netif_carrier_ok(adapter->netdev)) {
6464 if (ixgbe_ring_tx_pending(adapter) ||
6465 ixgbe_vf_tx_pending(adapter)) {
6466
6467
6468
6469
6470
6471 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
6472 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
6473 }
6474 }
6475}
6476
6477#ifdef CONFIG_PCI_IOV
6478static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
6479 struct pci_dev *vfdev)
6480{
6481 if (!pci_wait_for_pending_transaction(vfdev))
6482 e_dev_warn("Issuing VFLR with pending transactions\n");
6483
6484 e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
6485 pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
6486
6487 msleep(100);
6488}
6489
6490static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
6491{
6492 struct ixgbe_hw *hw = &adapter->hw;
6493 struct pci_dev *pdev = adapter->pdev;
6494 struct pci_dev *vfdev;
6495 u32 gpc;
6496 int pos;
6497 unsigned short vf_id;
6498
6499 if (!(netif_carrier_ok(adapter->netdev)))
6500 return;
6501
6502 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
6503 if (gpc)
6504 return;
6505
6506
6507
6508
6509
6510
6511 if (!pdev)
6512 return;
6513
6514 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
6515 if (!pos)
6516 return;
6517
6518
6519 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
6520
6521
6522 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
6523 while (vfdev) {
6524 if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
6525 u16 status_reg;
6526
6527 pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
6528 if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
6529
6530 ixgbe_issue_vf_flr(adapter, vfdev);
6531 }
6532
6533 vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
6534 }
6535}
6536
6537static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6538{
6539 u32 ssvpc;
6540
6541
6542 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
6543 adapter->num_vfs == 0)
6544 return;
6545
6546 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6547
6548
6549
6550
6551
6552 if (!ssvpc)
6553 return;
6554
6555 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
6556}
6557#else
6558static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
6559{
6560}
6561
6562static void
6563ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
6564{
6565}
6566#endif
6567
6568
6569
6570
6571
6572
6573static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6574{
6575
6576 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6577 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6578 test_bit(__IXGBE_RESETTING, &adapter->state))
6579 return;
6580
6581 ixgbe_watchdog_update_link(adapter);
6582
6583 if (adapter->link_up)
6584 ixgbe_watchdog_link_is_up(adapter);
6585 else
6586 ixgbe_watchdog_link_is_down(adapter);
6587
6588 ixgbe_check_for_bad_vf(adapter);
6589 ixgbe_spoof_check(adapter);
6590 ixgbe_update_stats(adapter);
6591
6592 ixgbe_watchdog_flush_tx(adapter);
6593}
6594
6595
6596
6597
6598
6599static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
6600{
6601 struct ixgbe_hw *hw = &adapter->hw;
6602 s32 err;
6603
6604
6605 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6606 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6607 return;
6608
6609
6610 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6611 return;
6612
6613 err = hw->phy.ops.identify_sfp(hw);
6614 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6615 goto sfp_out;
6616
6617 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6618
6619
6620 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
6621 }
6622
6623
6624 if (err)
6625 goto sfp_out;
6626
6627
6628 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6629 goto sfp_out;
6630
6631 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
6632
6633
6634
6635
6636
6637
6638 if (hw->mac.type == ixgbe_mac_82598EB)
6639 err = hw->phy.ops.reset(hw);
6640 else
6641 err = hw->mac.ops.setup_sfp(hw);
6642
6643 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6644 goto sfp_out;
6645
6646 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6647 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6648
6649sfp_out:
6650 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6651
6652 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6653 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6654 e_dev_err("failed to initialize because an unsupported "
6655 "SFP+ module type was detected.\n");
6656 e_dev_err("Reload the driver after installing a "
6657 "supported module.\n");
6658 unregister_netdev(adapter->netdev);
6659 }
6660}
6661
6662
6663
6664
6665
6666static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6667{
6668 struct ixgbe_hw *hw = &adapter->hw;
6669 u32 speed;
6670 bool autoneg = false;
6671
6672 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6673 return;
6674
6675
6676 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6677 return;
6678
6679 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6680
6681 speed = hw->phy.autoneg_advertised;
6682 if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
6683 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
6684
6685
6686 if (!autoneg) {
6687 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
6688 speed = IXGBE_LINK_SPEED_10GB_FULL;
6689 }
6690 }
6691
6692 if (hw->mac.ops.setup_link)
6693 hw->mac.ops.setup_link(hw, speed, true);
6694
6695 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6696 adapter->link_check_timeout = jiffies;
6697 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6698}
6699
6700
6701
6702
6703
6704static void ixgbe_service_timer(unsigned long data)
6705{
6706 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6707 unsigned long next_event_offset;
6708
6709
6710 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6711 next_event_offset = HZ / 10;
6712 else
6713 next_event_offset = HZ * 2;
6714
6715
6716 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6717
6718 ixgbe_service_event_schedule(adapter);
6719}
6720
6721static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6722{
6723 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6724 return;
6725
6726 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6727
6728
6729 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6730 test_bit(__IXGBE_REMOVING, &adapter->state) ||
6731 test_bit(__IXGBE_RESETTING, &adapter->state))
6732 return;
6733
6734 ixgbe_dump(adapter);
6735 netdev_err(adapter->netdev, "Reset adapter\n");
6736 adapter->tx_timeout_count++;
6737
6738 rtnl_lock();
6739 ixgbe_reinit_locked(adapter);
6740 rtnl_unlock();
6741}
6742
6743
6744
6745
6746
6747static void ixgbe_service_task(struct work_struct *work)
6748{
6749 struct ixgbe_adapter *adapter = container_of(work,
6750 struct ixgbe_adapter,
6751 service_task);
6752 if (ixgbe_removed(adapter->hw.hw_addr)) {
6753 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
6754 rtnl_lock();
6755 ixgbe_down(adapter);
6756 rtnl_unlock();
6757 }
6758 ixgbe_service_event_complete(adapter);
6759 return;
6760 }
6761 ixgbe_reset_subtask(adapter);
6762 ixgbe_sfp_detection_subtask(adapter);
6763 ixgbe_sfp_link_config_subtask(adapter);
6764 ixgbe_check_overtemp_subtask(adapter);
6765 ixgbe_watchdog_subtask(adapter);
6766 ixgbe_fdir_reinit_subtask(adapter);
6767 ixgbe_check_hang_subtask(adapter);
6768
6769 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
6770 ixgbe_ptp_overflow_check(adapter);
6771 ixgbe_ptp_rx_hang(adapter);
6772 }
6773
6774 ixgbe_service_event_complete(adapter);
6775}
6776
6777static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6778 struct ixgbe_tx_buffer *first,
6779 u8 *hdr_len)
6780{
6781 struct sk_buff *skb = first->skb;
6782 u32 vlan_macip_lens, type_tucmd;
6783 u32 mss_l4len_idx, l4len;
6784 int err;
6785
6786 if (skb->ip_summed != CHECKSUM_PARTIAL)
6787 return 0;
6788
6789 if (!skb_is_gso(skb))
6790 return 0;
6791
6792 err = skb_cow_head(skb, 0);
6793 if (err < 0)
6794 return err;
6795
6796
6797 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6798
6799 if (first->protocol == htons(ETH_P_IP)) {
6800 struct iphdr *iph = ip_hdr(skb);
6801 iph->tot_len = 0;
6802 iph->check = 0;
6803 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6804 iph->daddr, 0,
6805 IPPROTO_TCP,
6806 0);
6807 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6808 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6809 IXGBE_TX_FLAGS_CSUM |
6810 IXGBE_TX_FLAGS_IPV4;
6811 } else if (skb_is_gso_v6(skb)) {
6812 ipv6_hdr(skb)->payload_len = 0;
6813 tcp_hdr(skb)->check =
6814 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6815 &ipv6_hdr(skb)->daddr,
6816 0, IPPROTO_TCP, 0);
6817 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6818 IXGBE_TX_FLAGS_CSUM;
6819 }
6820
6821
6822 l4len = tcp_hdrlen(skb);
6823 *hdr_len = skb_transport_offset(skb) + l4len;
6824
6825
6826 first->gso_segs = skb_shinfo(skb)->gso_segs;
6827 first->bytecount += (first->gso_segs - 1) * *hdr_len;
6828
6829
6830 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6831 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6832
6833
6834 vlan_macip_lens = skb_network_header_len(skb);
6835 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6836 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6837
6838 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6839 mss_l4len_idx);
6840
6841 return 1;
6842}
6843
6844static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6845 struct ixgbe_tx_buffer *first)
6846{
6847 struct sk_buff *skb = first->skb;
6848 u32 vlan_macip_lens = 0;
6849 u32 mss_l4len_idx = 0;
6850 u32 type_tucmd = 0;
6851
6852 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6853 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
6854 !(first->tx_flags & IXGBE_TX_FLAGS_CC))
6855 return;
6856 } else {
6857 u8 l4_hdr = 0;
6858 switch (first->protocol) {
6859 case htons(ETH_P_IP):
6860 vlan_macip_lens |= skb_network_header_len(skb);
6861 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6862 l4_hdr = ip_hdr(skb)->protocol;
6863 break;
6864 case htons(ETH_P_IPV6):
6865 vlan_macip_lens |= skb_network_header_len(skb);
6866 l4_hdr = ipv6_hdr(skb)->nexthdr;
6867 break;
6868 default:
6869 if (unlikely(net_ratelimit())) {
6870 dev_warn(tx_ring->dev,
6871 "partial checksum but proto=%x!\n",
6872 first->protocol);
6873 }
6874 break;
6875 }
6876
6877 switch (l4_hdr) {
6878 case IPPROTO_TCP:
6879 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6880 mss_l4len_idx = tcp_hdrlen(skb) <<
6881 IXGBE_ADVTXD_L4LEN_SHIFT;
6882 break;
6883 case IPPROTO_SCTP:
6884 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6885 mss_l4len_idx = sizeof(struct sctphdr) <<
6886 IXGBE_ADVTXD_L4LEN_SHIFT;
6887 break;
6888 case IPPROTO_UDP:
6889 mss_l4len_idx = sizeof(struct udphdr) <<
6890 IXGBE_ADVTXD_L4LEN_SHIFT;
6891 break;
6892 default:
6893 if (unlikely(net_ratelimit())) {
6894 dev_warn(tx_ring->dev,
6895 "partial checksum but l4 proto=%x!\n",
6896 l4_hdr);
6897 }
6898 break;
6899 }
6900
6901
6902 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
6903 }
6904
6905
6906 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6907 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6908
6909 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
6910 type_tucmd, mss_l4len_idx);
6911}
6912
6913#define IXGBE_SET_FLAG(_input, _flag, _result) \
6914 ((_flag <= _result) ? \
6915 ((u32)(_input & _flag) * (_result / _flag)) : \
6916 ((u32)(_input & _flag) / (_flag / _result)))
6917
6918static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6919{
6920
6921 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
6922 IXGBE_ADVTXD_DCMD_DEXT |
6923 IXGBE_ADVTXD_DCMD_IFCS;
6924
6925
6926 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
6927 IXGBE_ADVTXD_DCMD_VLE);
6928
6929
6930 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
6931 IXGBE_ADVTXD_DCMD_TSE);
6932
6933
6934 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
6935 IXGBE_ADVTXD_MAC_TSTAMP);
6936
6937
6938 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
6939
6940 return cmd_type;
6941}
6942
6943static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
6944 u32 tx_flags, unsigned int paylen)
6945{
6946 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
6947
6948
6949 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6950 IXGBE_TX_FLAGS_CSUM,
6951 IXGBE_ADVTXD_POPTS_TXSM);
6952
6953
6954 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6955 IXGBE_TX_FLAGS_IPV4,
6956 IXGBE_ADVTXD_POPTS_IXSM);
6957
6958
6959
6960
6961
6962 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6963 IXGBE_TX_FLAGS_CC,
6964 IXGBE_ADVTXD_CC);
6965
6966 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6967}
6968
6969static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6970{
6971 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6972
6973
6974
6975
6976
6977 smp_mb();
6978
6979
6980
6981
6982 if (likely(ixgbe_desc_unused(tx_ring) < size))
6983 return -EBUSY;
6984
6985
6986 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6987 ++tx_ring->tx_stats.restart_queue;
6988 return 0;
6989}
6990
6991static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6992{
6993 if (likely(ixgbe_desc_unused(tx_ring) >= size))
6994 return 0;
6995
6996 return __ixgbe_maybe_stop_tx(tx_ring, size);
6997}
6998
6999#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
7000 IXGBE_TXD_CMD_RS)
7001
7002static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
7003 struct ixgbe_tx_buffer *first,
7004 const u8 hdr_len)
7005{
7006 struct sk_buff *skb = first->skb;
7007 struct ixgbe_tx_buffer *tx_buffer;
7008 union ixgbe_adv_tx_desc *tx_desc;
7009 struct skb_frag_struct *frag;
7010 dma_addr_t dma;
7011 unsigned int data_len, size;
7012 u32 tx_flags = first->tx_flags;
7013 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
7014 u16 i = tx_ring->next_to_use;
7015
7016 tx_desc = IXGBE_TX_DESC(tx_ring, i);
7017
7018 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
7019
7020 size = skb_headlen(skb);
7021 data_len = skb->data_len;
7022
7023#ifdef IXGBE_FCOE
7024 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
7025 if (data_len < sizeof(struct fcoe_crc_eof)) {
7026 size -= sizeof(struct fcoe_crc_eof) - data_len;
7027 data_len = 0;
7028 } else {
7029 data_len -= sizeof(struct fcoe_crc_eof);
7030 }
7031 }
7032
7033#endif
7034 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
7035
7036 tx_buffer = first;
7037
7038 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
7039 if (dma_mapping_error(tx_ring->dev, dma))
7040 goto dma_error;
7041
7042
7043 dma_unmap_len_set(tx_buffer, len, size);
7044 dma_unmap_addr_set(tx_buffer, dma, dma);
7045
7046 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7047
7048 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
7049 tx_desc->read.cmd_type_len =
7050 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
7051
7052 i++;
7053 tx_desc++;
7054 if (i == tx_ring->count) {
7055 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7056 i = 0;
7057 }
7058 tx_desc->read.olinfo_status = 0;
7059
7060 dma += IXGBE_MAX_DATA_PER_TXD;
7061 size -= IXGBE_MAX_DATA_PER_TXD;
7062
7063 tx_desc->read.buffer_addr = cpu_to_le64(dma);
7064 }
7065
7066 if (likely(!data_len))
7067 break;
7068
7069 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
7070
7071 i++;
7072 tx_desc++;
7073 if (i == tx_ring->count) {
7074 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
7075 i = 0;
7076 }
7077 tx_desc->read.olinfo_status = 0;
7078
7079#ifdef IXGBE_FCOE
7080 size = min_t(unsigned int, data_len, skb_frag_size(frag));
7081#else
7082 size = skb_frag_size(frag);
7083#endif
7084 data_len -= size;
7085
7086 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
7087 DMA_TO_DEVICE);
7088
7089 tx_buffer = &tx_ring->tx_buffer_info[i];
7090 }
7091
7092
7093 cmd_type |= size | IXGBE_TXD_CMD;
7094 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
7095
7096 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
7097
7098
7099 first->time_stamp = jiffies;
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109 wmb();
7110
7111
7112 first->next_to_watch = tx_desc;
7113
7114 i++;
7115 if (i == tx_ring->count)
7116 i = 0;
7117
7118 tx_ring->next_to_use = i;
7119
7120 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
7121
7122 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
7123 writel(i, tx_ring->tail);
7124
7125
7126
7127
7128 mmiowb();
7129 }
7130
7131 return;
7132dma_error:
7133 dev_err(tx_ring->dev, "TX DMA map failed\n");
7134
7135
7136 for (;;) {
7137 tx_buffer = &tx_ring->tx_buffer_info[i];
7138 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
7139 if (tx_buffer == first)
7140 break;
7141 if (i == 0)
7142 i = tx_ring->count;
7143 i--;
7144 }
7145
7146 tx_ring->next_to_use = i;
7147}
7148
7149static void ixgbe_atr(struct ixgbe_ring *ring,
7150 struct ixgbe_tx_buffer *first)
7151{
7152 struct ixgbe_q_vector *q_vector = ring->q_vector;
7153 union ixgbe_atr_hash_dword input = { .dword = 0 };
7154 union ixgbe_atr_hash_dword common = { .dword = 0 };
7155 union {
7156 unsigned char *network;
7157 struct iphdr *ipv4;
7158 struct ipv6hdr *ipv6;
7159 } hdr;
7160 struct tcphdr *th;
7161 __be16 vlan_id;
7162
7163
7164 if (!q_vector)
7165 return;
7166
7167
7168 if (!ring->atr_sample_rate)
7169 return;
7170
7171 ring->atr_count++;
7172
7173
7174 hdr.network = skb_network_header(first->skb);
7175
7176
7177 if ((first->protocol != htons(ETH_P_IPV6) ||
7178 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
7179 (first->protocol != htons(ETH_P_IP) ||
7180 hdr.ipv4->protocol != IPPROTO_TCP))
7181 return;
7182
7183 th = tcp_hdr(first->skb);
7184
7185
7186 if (!th || th->fin)
7187 return;
7188
7189
7190 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
7191 return;
7192
7193
7194 ring->atr_count = 0;
7195
7196 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
7197
7198
7199
7200
7201
7202
7203
7204
7205 input.formatted.vlan_id = vlan_id;
7206
7207
7208
7209
7210
7211 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
7212 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
7213 else
7214 common.port.src ^= th->dest ^ first->protocol;
7215 common.port.dst ^= th->source;
7216
7217 if (first->protocol == htons(ETH_P_IP)) {
7218 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
7219 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
7220 } else {
7221 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
7222 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
7223 hdr.ipv6->saddr.s6_addr32[1] ^
7224 hdr.ipv6->saddr.s6_addr32[2] ^
7225 hdr.ipv6->saddr.s6_addr32[3] ^
7226 hdr.ipv6->daddr.s6_addr32[0] ^
7227 hdr.ipv6->daddr.s6_addr32[1] ^
7228 hdr.ipv6->daddr.s6_addr32[2] ^
7229 hdr.ipv6->daddr.s6_addr32[3];
7230 }
7231
7232
7233 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
7234 input, common, ring->queue_index);
7235}
7236
7237static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
7238 void *accel_priv, select_queue_fallback_t fallback)
7239{
7240 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
7241#ifdef IXGBE_FCOE
7242 struct ixgbe_adapter *adapter;
7243 struct ixgbe_ring_feature *f;
7244 int txq;
7245#endif
7246
7247 if (fwd_adapter)
7248 return skb->queue_mapping + fwd_adapter->tx_base_queue;
7249
7250#ifdef IXGBE_FCOE
7251
7252
7253
7254
7255
7256 switch (vlan_get_protocol(skb)) {
7257 case htons(ETH_P_FCOE):
7258 case htons(ETH_P_FIP):
7259 adapter = netdev_priv(dev);
7260
7261 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7262 break;
7263 default:
7264 return fallback(dev, skb);
7265 }
7266
7267 f = &adapter->ring_feature[RING_F_FCOE];
7268
7269 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
7270 smp_processor_id();
7271
7272 while (txq >= f->indices)
7273 txq -= f->indices;
7274
7275 return txq + f->offset;
7276#else
7277 return fallback(dev, skb);
7278#endif
7279}
7280
7281netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7282 struct ixgbe_adapter *adapter,
7283 struct ixgbe_ring *tx_ring)
7284{
7285 struct ixgbe_tx_buffer *first;
7286 int tso;
7287 u32 tx_flags = 0;
7288 unsigned short f;
7289 u16 count = TXD_USE_COUNT(skb_headlen(skb));
7290 __be16 protocol = skb->protocol;
7291 u8 hdr_len = 0;
7292
7293
7294
7295
7296
7297
7298
7299
7300 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
7301 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
7302
7303 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
7304 tx_ring->tx_stats.tx_busy++;
7305 return NETDEV_TX_BUSY;
7306 }
7307
7308
7309 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
7310 first->skb = skb;
7311 first->bytecount = skb->len;
7312 first->gso_segs = 1;
7313
7314
7315 if (skb_vlan_tag_present(skb)) {
7316 tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
7317 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7318
7319 } else if (protocol == htons(ETH_P_8021Q)) {
7320 struct vlan_hdr *vhdr, _vhdr;
7321 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
7322 if (!vhdr)
7323 goto out_drop;
7324
7325 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
7326 IXGBE_TX_FLAGS_VLAN_SHIFT;
7327 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
7328 }
7329 protocol = vlan_get_protocol(skb);
7330
7331 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
7332 adapter->ptp_clock &&
7333 !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
7334 &adapter->state)) {
7335 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7336 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
7337
7338
7339 adapter->ptp_tx_skb = skb_get(skb);
7340 adapter->ptp_tx_start = jiffies;
7341 schedule_work(&adapter->ptp_tx_work);
7342 }
7343
7344 skb_tx_timestamp(skb);
7345
7346#ifdef CONFIG_PCI_IOV
7347
7348
7349
7350
7351 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7352 tx_flags |= IXGBE_TX_FLAGS_CC;
7353
7354#endif
7355
7356 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
7357 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
7358 (skb->priority != TC_PRIO_CONTROL))) {
7359 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
7360 tx_flags |= (skb->priority & 0x7) <<
7361 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
7362 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
7363 struct vlan_ethhdr *vhdr;
7364
7365 if (skb_cow_head(skb, 0))
7366 goto out_drop;
7367 vhdr = (struct vlan_ethhdr *)skb->data;
7368 vhdr->h_vlan_TCI = htons(tx_flags >>
7369 IXGBE_TX_FLAGS_VLAN_SHIFT);
7370 } else {
7371 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
7372 }
7373 }
7374
7375
7376 first->tx_flags = tx_flags;
7377 first->protocol = protocol;
7378
7379#ifdef IXGBE_FCOE
7380
7381 if ((protocol == htons(ETH_P_FCOE)) &&
7382 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
7383 tso = ixgbe_fso(tx_ring, first, &hdr_len);
7384 if (tso < 0)
7385 goto out_drop;
7386
7387 goto xmit_fcoe;
7388 }
7389
7390#endif
7391 tso = ixgbe_tso(tx_ring, first, &hdr_len);
7392 if (tso < 0)
7393 goto out_drop;
7394 else if (!tso)
7395 ixgbe_tx_csum(tx_ring, first);
7396
7397
7398 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
7399 ixgbe_atr(tx_ring, first);
7400
7401#ifdef IXGBE_FCOE
7402xmit_fcoe:
7403#endif
7404 ixgbe_tx_map(tx_ring, first, hdr_len);
7405
7406 return NETDEV_TX_OK;
7407
7408out_drop:
7409 dev_kfree_skb_any(first->skb);
7410 first->skb = NULL;
7411
7412 return NETDEV_TX_OK;
7413}
7414
7415static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
7416 struct net_device *netdev,
7417 struct ixgbe_ring *ring)
7418{
7419 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7420 struct ixgbe_ring *tx_ring;
7421
7422
7423
7424
7425
7426 if (skb_put_padto(skb, 17))
7427 return NETDEV_TX_OK;
7428
7429 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
7430
7431 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
7432}
7433
7434static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
7435 struct net_device *netdev)
7436{
7437 return __ixgbe_xmit_frame(skb, netdev, NULL);
7438}
7439
7440
7441
7442
7443
7444
7445
7446
7447static int ixgbe_set_mac(struct net_device *netdev, void *p)
7448{
7449 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7450 struct ixgbe_hw *hw = &adapter->hw;
7451 struct sockaddr *addr = p;
7452 int ret;
7453
7454 if (!is_valid_ether_addr(addr->sa_data))
7455 return -EADDRNOTAVAIL;
7456
7457 ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7458 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
7459 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
7460
7461 ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
7462 return ret > 0 ? 0 : ret;
7463}
7464
7465static int
7466ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
7467{
7468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7469 struct ixgbe_hw *hw = &adapter->hw;
7470 u16 value;
7471 int rc;
7472
7473 if (prtad != hw->phy.mdio.prtad)
7474 return -EINVAL;
7475 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
7476 if (!rc)
7477 rc = value;
7478 return rc;
7479}
7480
7481static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
7482 u16 addr, u16 value)
7483{
7484 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7485 struct ixgbe_hw *hw = &adapter->hw;
7486
7487 if (prtad != hw->phy.mdio.prtad)
7488 return -EINVAL;
7489 return hw->phy.ops.write_reg(hw, addr, devad, value);
7490}
7491
7492static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7493{
7494 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7495
7496 switch (cmd) {
7497 case SIOCSHWTSTAMP:
7498 return ixgbe_ptp_set_ts_config(adapter, req);
7499 case SIOCGHWTSTAMP:
7500 return ixgbe_ptp_get_ts_config(adapter, req);
7501 default:
7502 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7503 }
7504}
7505
7506
7507
7508
7509
7510
7511
7512
7513static int ixgbe_add_sanmac_netdev(struct net_device *dev)
7514{
7515 int err = 0;
7516 struct ixgbe_adapter *adapter = netdev_priv(dev);
7517 struct ixgbe_hw *hw = &adapter->hw;
7518
7519 if (is_valid_ether_addr(hw->mac.san_addr)) {
7520 rtnl_lock();
7521 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
7522 rtnl_unlock();
7523
7524
7525 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
7526 }
7527 return err;
7528}
7529
7530
7531
7532
7533
7534
7535
7536
7537static int ixgbe_del_sanmac_netdev(struct net_device *dev)
7538{
7539 int err = 0;
7540 struct ixgbe_adapter *adapter = netdev_priv(dev);
7541 struct ixgbe_mac_info *mac = &adapter->hw.mac;
7542
7543 if (is_valid_ether_addr(mac->san_addr)) {
7544 rtnl_lock();
7545 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7546 rtnl_unlock();
7547 }
7548 return err;
7549}
7550
7551#ifdef CONFIG_NET_POLL_CONTROLLER
7552
7553
7554
7555
7556
7557static void ixgbe_netpoll(struct net_device *netdev)
7558{
7559 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7560 int i;
7561
7562
7563 if (test_bit(__IXGBE_DOWN, &adapter->state))
7564 return;
7565
7566
7567 for (i = 0; i < adapter->num_q_vectors; i++)
7568 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
7569}
7570
7571#endif
7572static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7573 struct rtnl_link_stats64 *stats)
7574{
7575 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7576 int i;
7577
7578 rcu_read_lock();
7579 for (i = 0; i < adapter->num_rx_queues; i++) {
7580 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
7581 u64 bytes, packets;
7582 unsigned int start;
7583
7584 if (ring) {
7585 do {
7586 start = u64_stats_fetch_begin_irq(&ring->syncp);
7587 packets = ring->stats.packets;
7588 bytes = ring->stats.bytes;
7589 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7590 stats->rx_packets += packets;
7591 stats->rx_bytes += bytes;
7592 }
7593 }
7594
7595 for (i = 0; i < adapter->num_tx_queues; i++) {
7596 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
7597 u64 bytes, packets;
7598 unsigned int start;
7599
7600 if (ring) {
7601 do {
7602 start = u64_stats_fetch_begin_irq(&ring->syncp);
7603 packets = ring->stats.packets;
7604 bytes = ring->stats.bytes;
7605 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7606 stats->tx_packets += packets;
7607 stats->tx_bytes += bytes;
7608 }
7609 }
7610 rcu_read_unlock();
7611
7612 stats->multicast = netdev->stats.multicast;
7613 stats->rx_errors = netdev->stats.rx_errors;
7614 stats->rx_length_errors = netdev->stats.rx_length_errors;
7615 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
7616 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
7617 return stats;
7618}
7619
7620#ifdef CONFIG_IXGBE_DCB
7621
7622
7623
7624
7625
7626
7627
7628
7629static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
7630{
7631 struct ixgbe_hw *hw = &adapter->hw;
7632 u32 reg, rsave;
7633 int i;
7634
7635
7636
7637
7638 if (hw->mac.type == ixgbe_mac_82598EB)
7639 return;
7640
7641 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
7642 rsave = reg;
7643
7644 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
7645 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
7646
7647
7648 if (up2tc > tc)
7649 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
7650 }
7651
7652 if (reg != rsave)
7653 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
7654
7655 return;
7656}
7657
7658
7659
7660
7661
7662
7663
7664static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
7665{
7666 struct net_device *dev = adapter->netdev;
7667 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
7668 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
7669 u8 prio;
7670
7671 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
7672 u8 tc = 0;
7673
7674 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
7675 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
7676 else if (ets)
7677 tc = ets->prio_tc[prio];
7678
7679 netdev_set_prio_tc_map(dev, prio, tc);
7680 }
7681}
7682
7683#endif
7684
7685
7686
7687
7688
7689
7690int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7691{
7692 struct ixgbe_adapter *adapter = netdev_priv(dev);
7693 struct ixgbe_hw *hw = &adapter->hw;
7694 bool pools;
7695
7696
7697 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
7698 (hw->mac.type == ixgbe_mac_82598EB &&
7699 tc < MAX_TRAFFIC_CLASS))
7700 return -EINVAL;
7701
7702 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
7703 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
7704 return -EBUSY;
7705
7706
7707
7708
7709
7710 if (netif_running(dev))
7711 ixgbe_close(dev);
7712 ixgbe_clear_interrupt_scheme(adapter);
7713
7714#ifdef CONFIG_IXGBE_DCB
7715 if (tc) {
7716 netdev_set_num_tc(dev, tc);
7717 ixgbe_set_prio_tc_map(adapter);
7718
7719 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
7720
7721 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
7722 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
7723 adapter->hw.fc.requested_mode = ixgbe_fc_none;
7724 }
7725 } else {
7726 netdev_reset_tc(dev);
7727
7728 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7729 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
7730
7731 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
7732
7733 adapter->temp_dcb_cfg.pfc_mode_enable = false;
7734 adapter->dcb_cfg.pfc_mode_enable = false;
7735 }
7736
7737 ixgbe_validate_rtr(adapter, tc);
7738
7739#endif
7740 ixgbe_init_interrupt_scheme(adapter);
7741
7742 if (netif_running(dev))
7743 return ixgbe_open(dev);
7744
7745 return 0;
7746}
7747
7748#ifdef CONFIG_PCI_IOV
7749void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
7750{
7751 struct net_device *netdev = adapter->netdev;
7752
7753 rtnl_lock();
7754 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
7755 rtnl_unlock();
7756}
7757
7758#endif
7759void ixgbe_do_reset(struct net_device *netdev)
7760{
7761 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7762
7763 if (netif_running(netdev))
7764 ixgbe_reinit_locked(adapter);
7765 else
7766 ixgbe_reset(adapter);
7767}
7768
7769static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
7770 netdev_features_t features)
7771{
7772 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7773
7774
7775 if (!(features & NETIF_F_RXCSUM))
7776 features &= ~NETIF_F_LRO;
7777
7778
7779 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
7780 features &= ~NETIF_F_LRO;
7781
7782 return features;
7783}
7784
7785static int ixgbe_set_features(struct net_device *netdev,
7786 netdev_features_t features)
7787{
7788 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7789 netdev_features_t changed = netdev->features ^ features;
7790 bool need_reset = false;
7791
7792
7793 if (!(features & NETIF_F_LRO)) {
7794 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7795 need_reset = true;
7796 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
7797 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
7798 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
7799 if (adapter->rx_itr_setting == 1 ||
7800 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
7801 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
7802 need_reset = true;
7803 } else if ((changed ^ features) & NETIF_F_LRO) {
7804 e_info(probe, "rx-usecs set too low, "
7805 "disabling RSC\n");
7806 }
7807 }
7808
7809
7810
7811
7812
7813 switch (features & NETIF_F_NTUPLE) {
7814 case NETIF_F_NTUPLE:
7815
7816 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
7817 need_reset = true;
7818
7819 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
7820 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7821 break;
7822 default:
7823
7824 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7825 need_reset = true;
7826
7827 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7828
7829
7830 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7831 break;
7832
7833
7834 if (netdev_get_num_tc(netdev) > 1)
7835 break;
7836
7837
7838 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
7839 break;
7840
7841
7842 if (!adapter->atr_sample_rate)
7843 break;
7844
7845 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
7846 break;
7847 }
7848
7849 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7850 ixgbe_vlan_strip_enable(adapter);
7851 else
7852 ixgbe_vlan_strip_disable(adapter);
7853
7854 if (changed & NETIF_F_RXALL)
7855 need_reset = true;
7856
7857 netdev->features = features;
7858 if (need_reset)
7859 ixgbe_do_reset(netdev);
7860
7861 return 0;
7862}
7863
7864
7865
7866
7867
7868
7869
7870static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7871 __be16 port)
7872{
7873 struct ixgbe_adapter *adapter = netdev_priv(dev);
7874 struct ixgbe_hw *hw = &adapter->hw;
7875 u16 new_port = ntohs(port);
7876
7877 if (sa_family == AF_INET6)
7878 return;
7879
7880 if (adapter->vxlan_port == new_port) {
7881 netdev_info(dev, "Port %d already offloaded\n", new_port);
7882 return;
7883 }
7884
7885 if (adapter->vxlan_port) {
7886 netdev_info(dev,
7887 "Hit Max num of UDP ports, not adding port %d\n",
7888 new_port);
7889 return;
7890 }
7891
7892 adapter->vxlan_port = new_port;
7893 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
7894}
7895
7896
7897
7898
7899
7900
7901
7902static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
7903 __be16 port)
7904{
7905 struct ixgbe_adapter *adapter = netdev_priv(dev);
7906 struct ixgbe_hw *hw = &adapter->hw;
7907 u16 new_port = ntohs(port);
7908
7909 if (sa_family == AF_INET6)
7910 return;
7911
7912 if (adapter->vxlan_port != new_port) {
7913 netdev_info(dev, "Port %d was not found, not deleting\n",
7914 new_port);
7915 return;
7916 }
7917
7918 adapter->vxlan_port = 0;
7919 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
7920}
7921
7922static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7923 struct net_device *dev,
7924 const unsigned char *addr, u16 vid,
7925 u16 flags)
7926{
7927
7928 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
7929 if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
7930 return -ENOMEM;
7931 }
7932
7933 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
7934}
7935
7936
7937
7938
7939
7940
7941
7942
7943static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
7944 __u16 mode)
7945{
7946 struct ixgbe_hw *hw = &adapter->hw;
7947 unsigned int p, num_pools;
7948 u32 vmdctl;
7949
7950 switch (mode) {
7951 case BRIDGE_MODE_VEPA:
7952
7953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
7954
7955
7956
7957
7958
7959 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
7960 vmdctl |= IXGBE_VT_CTL_REPLEN;
7961 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
7962
7963
7964
7965
7966 num_pools = adapter->num_vfs + adapter->num_rx_pools;
7967 for (p = 0; p < num_pools; p++) {
7968 if (hw->mac.ops.set_source_address_pruning)
7969 hw->mac.ops.set_source_address_pruning(hw,
7970 true,
7971 p);
7972 }
7973 break;
7974 case BRIDGE_MODE_VEB:
7975
7976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
7977 IXGBE_PFDTXGSWC_VT_LBEN);
7978
7979
7980
7981
7982 vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
7983 if (!adapter->num_vfs)
7984 vmdctl &= ~IXGBE_VT_CTL_REPLEN;
7985 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
7986
7987
7988
7989
7990 num_pools = adapter->num_vfs + adapter->num_rx_pools;
7991 for (p = 0; p < num_pools; p++) {
7992 if (hw->mac.ops.set_source_address_pruning)
7993 hw->mac.ops.set_source_address_pruning(hw,
7994 false,
7995 p);
7996 }
7997 break;
7998 default:
7999 return -EINVAL;
8000 }
8001
8002 adapter->bridge_mode = mode;
8003
8004 e_info(drv, "enabling bridge mode: %s\n",
8005 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8006
8007 return 0;
8008}
8009
8010static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
8011 struct nlmsghdr *nlh, u16 flags)
8012{
8013 struct ixgbe_adapter *adapter = netdev_priv(dev);
8014 struct nlattr *attr, *br_spec;
8015 int rem;
8016
8017 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8018 return -EOPNOTSUPP;
8019
8020 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8021 if (!br_spec)
8022 return -EINVAL;
8023
8024 nla_for_each_nested(attr, br_spec, rem) {
8025 u32 status;
8026 __u16 mode;
8027
8028 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8029 continue;
8030
8031 if (nla_len(attr) < sizeof(mode))
8032 return -EINVAL;
8033
8034 mode = nla_get_u16(attr);
8035 status = ixgbe_configure_bridge_mode(adapter, mode);
8036 if (status)
8037 return status;
8038
8039 break;
8040 }
8041
8042 return 0;
8043}
8044
8045static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8046 struct net_device *dev,
8047 u32 filter_mask, int nlflags)
8048{
8049 struct ixgbe_adapter *adapter = netdev_priv(dev);
8050
8051 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
8052 return 0;
8053
8054 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
8055 adapter->bridge_mode, 0, 0, nlflags);
8056}
8057
8058static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
8059{
8060 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
8061 struct ixgbe_adapter *adapter = netdev_priv(pdev);
8062 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
8063 unsigned int limit;
8064 int pool, err;
8065
8066
8067
8068
8069
8070 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
8071 return ERR_PTR(-EINVAL);
8072
8073#ifdef CONFIG_RPS
8074 if (vdev->num_rx_queues != vdev->num_tx_queues) {
8075 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
8076 vdev->name);
8077 return ERR_PTR(-EINVAL);
8078 }
8079#endif
8080
8081 if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
8082 vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
8083 netdev_info(pdev,
8084 "%s: Supports RX/TX Queue counts 1,2, and 4\n",
8085 pdev->name);
8086 return ERR_PTR(-EINVAL);
8087 }
8088
8089 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
8090 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
8091 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
8092 return ERR_PTR(-EBUSY);
8093
8094 fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
8095 if (!fwd_adapter)
8096 return ERR_PTR(-ENOMEM);
8097
8098 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
8099 adapter->num_rx_pools++;
8100 set_bit(pool, &adapter->fwd_bitmask);
8101 limit = find_last_bit(&adapter->fwd_bitmask, 32);
8102
8103
8104 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
8105 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8106 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
8107
8108
8109 err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8110 if (err)
8111 goto fwd_add_err;
8112 fwd_adapter->pool = pool;
8113 fwd_adapter->real_adapter = adapter;
8114 err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
8115 if (err)
8116 goto fwd_add_err;
8117 netif_tx_start_all_queues(vdev);
8118 return fwd_adapter;
8119fwd_add_err:
8120
8121 netdev_info(pdev,
8122 "%s: dfwd hardware acceleration failed\n", vdev->name);
8123 clear_bit(pool, &adapter->fwd_bitmask);
8124 adapter->num_rx_pools--;
8125 kfree(fwd_adapter);
8126 return ERR_PTR(err);
8127}
8128
8129static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
8130{
8131 struct ixgbe_fwd_adapter *fwd_adapter = priv;
8132 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
8133 unsigned int limit;
8134
8135 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
8136 adapter->num_rx_pools--;
8137
8138 limit = find_last_bit(&adapter->fwd_bitmask, 32);
8139 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
8140 ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
8141 ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
8142 netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
8143 fwd_adapter->pool, adapter->num_rx_pools,
8144 fwd_adapter->rx_base_queue,
8145 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
8146 adapter->fwd_bitmask);
8147 kfree(fwd_adapter);
8148}
8149
8150static const struct net_device_ops ixgbe_netdev_ops = {
8151 .ndo_open = ixgbe_open,
8152 .ndo_stop = ixgbe_close,
8153 .ndo_start_xmit = ixgbe_xmit_frame,
8154 .ndo_select_queue = ixgbe_select_queue,
8155 .ndo_set_rx_mode = ixgbe_set_rx_mode,
8156 .ndo_validate_addr = eth_validate_addr,
8157 .ndo_set_mac_address = ixgbe_set_mac,
8158 .ndo_change_mtu = ixgbe_change_mtu,
8159 .ndo_tx_timeout = ixgbe_tx_timeout,
8160 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
8161 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
8162 .ndo_do_ioctl = ixgbe_ioctl,
8163 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
8164 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
8165 .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
8166 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
8167 .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
8168 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
8169 .ndo_get_stats64 = ixgbe_get_stats64,
8170#ifdef CONFIG_IXGBE_DCB
8171 .ndo_setup_tc = ixgbe_setup_tc,
8172#endif
8173#ifdef CONFIG_NET_POLL_CONTROLLER
8174 .ndo_poll_controller = ixgbe_netpoll,
8175#endif
8176#ifdef CONFIG_NET_RX_BUSY_POLL
8177 .ndo_busy_poll = ixgbe_low_latency_recv,
8178#endif
8179#ifdef IXGBE_FCOE
8180 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
8181 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
8182 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8183 .ndo_fcoe_enable = ixgbe_fcoe_enable,
8184 .ndo_fcoe_disable = ixgbe_fcoe_disable,
8185 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
8186 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
8187#endif
8188 .ndo_set_features = ixgbe_set_features,
8189 .ndo_fix_features = ixgbe_fix_features,
8190 .ndo_fdb_add = ixgbe_ndo_fdb_add,
8191 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
8192 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
8193 .ndo_dfwd_add_station = ixgbe_fwd_add,
8194 .ndo_dfwd_del_station = ixgbe_fwd_del,
8195 .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
8196 .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
8197};
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207
8208static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
8209{
8210 struct pci_dev *entry, *pdev = adapter->pdev;
8211 int physfns = 0;
8212
8213
8214
8215
8216
8217 if (ixgbe_pcie_from_parent(&adapter->hw))
8218 physfns = 4;
8219
8220 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
8221
8222 if (entry->is_virtfn)
8223 continue;
8224
8225
8226
8227
8228
8229
8230
8231 if ((entry->vendor != pdev->vendor) ||
8232 (entry->device != pdev->device))
8233 return -1;
8234
8235 physfns++;
8236 }
8237
8238 return physfns;
8239}
8240
8241
8242
8243
8244
8245
8246
8247
8248
8249
8250
8251int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
8252 u16 subdevice_id)
8253{
8254 struct ixgbe_hw *hw = &adapter->hw;
8255 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
8256 int is_wol_supported = 0;
8257
8258 switch (device_id) {
8259 case IXGBE_DEV_ID_82599_SFP:
8260
8261 switch (subdevice_id) {
8262 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
8263 case IXGBE_SUBDEV_ID_82599_560FLR:
8264
8265 if (hw->bus.func != 0)
8266 break;
8267 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
8268 case IXGBE_SUBDEV_ID_82599_SFP:
8269 case IXGBE_SUBDEV_ID_82599_RNDC:
8270 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
8271 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
8272 is_wol_supported = 1;
8273 break;
8274 }
8275 break;
8276 case IXGBE_DEV_ID_82599EN_SFP:
8277
8278 switch (subdevice_id) {
8279 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
8280 is_wol_supported = 1;
8281 break;
8282 }
8283 break;
8284 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
8285
8286 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
8287 is_wol_supported = 1;
8288 break;
8289 case IXGBE_DEV_ID_82599_KX4:
8290 is_wol_supported = 1;
8291 break;
8292 case IXGBE_DEV_ID_X540T:
8293 case IXGBE_DEV_ID_X540T1:
8294
8295 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
8296 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
8297 (hw->bus.func == 0))) {
8298 is_wol_supported = 1;
8299 }
8300 break;
8301 }
8302
8303 return is_wol_supported;
8304}
8305
8306
8307
8308
8309
8310static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
8311{
8312#ifdef CONFIG_OF
8313 struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
8314 struct ixgbe_hw *hw = &adapter->hw;
8315 const unsigned char *addr;
8316
8317 addr = of_get_mac_address(dp);
8318 if (addr) {
8319 ether_addr_copy(hw->mac.perm_addr, addr);
8320 return;
8321 }
8322#endif
8323
8324#ifdef CONFIG_SPARC
8325 ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
8326#endif
8327}
8328
8329
8330
8331
8332
8333
8334
8335
8336
8337
8338
8339
8340static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8341{
8342 struct net_device *netdev;
8343 struct ixgbe_adapter *adapter = NULL;
8344 struct ixgbe_hw *hw;
8345 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
8346 int i, err, pci_using_dac, expected_gts;
8347 unsigned int indices = MAX_TX_QUEUES;
8348 u8 part_str[IXGBE_PBANUM_LENGTH];
8349 bool disable_dev = false;
8350#ifdef IXGBE_FCOE
8351 u16 device_caps;
8352#endif
8353 u32 eec;
8354
8355
8356
8357
8358 if (pdev->is_virtfn) {
8359 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
8360 pci_name(pdev), pdev->vendor, pdev->device);
8361 return -EINVAL;
8362 }
8363
8364 err = pci_enable_device_mem(pdev);
8365 if (err)
8366 return err;
8367
8368 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
8369 pci_using_dac = 1;
8370 } else {
8371 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8372 if (err) {
8373 dev_err(&pdev->dev,
8374 "No usable DMA configuration, aborting\n");
8375 goto err_dma;
8376 }
8377 pci_using_dac = 0;
8378 }
8379
8380 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8381 IORESOURCE_MEM), ixgbe_driver_name);
8382 if (err) {
8383 dev_err(&pdev->dev,
8384 "pci_request_selected_regions failed 0x%x\n", err);
8385 goto err_pci_reg;
8386 }
8387
8388 pci_enable_pcie_error_reporting(pdev);
8389
8390 pci_set_master(pdev);
8391 pci_save_state(pdev);
8392
8393 if (ii->mac == ixgbe_mac_82598EB) {
8394#ifdef CONFIG_IXGBE_DCB
8395
8396 indices = 4 * MAX_TRAFFIC_CLASS;
8397#else
8398 indices = IXGBE_MAX_RSS_INDICES;
8399#endif
8400 }
8401
8402 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
8403 if (!netdev) {
8404 err = -ENOMEM;
8405 goto err_alloc_etherdev;
8406 }
8407
8408 SET_NETDEV_DEV(netdev, &pdev->dev);
8409
8410 adapter = netdev_priv(netdev);
8411
8412 adapter->netdev = netdev;
8413 adapter->pdev = pdev;
8414 hw = &adapter->hw;
8415 hw->back = adapter;
8416 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
8417
8418 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8419 pci_resource_len(pdev, 0));
8420 adapter->io_addr = hw->hw_addr;
8421 if (!hw->hw_addr) {
8422 err = -EIO;
8423 goto err_ioremap;
8424 }
8425
8426 netdev->netdev_ops = &ixgbe_netdev_ops;
8427 ixgbe_set_ethtool_ops(netdev);
8428 netdev->watchdog_timeo = 5 * HZ;
8429 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
8430
8431
8432 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
8433 hw->mac.type = ii->mac;
8434
8435
8436 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
8437 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
8438 if (ixgbe_removed(hw->hw_addr)) {
8439 err = -EIO;
8440 goto err_ioremap;
8441 }
8442
8443 if (!(eec & (1 << 8)))
8444 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
8445
8446
8447 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
8448 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
8449
8450 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
8451 hw->phy.mdio.mmds = 0;
8452 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8453 hw->phy.mdio.dev = netdev;
8454 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
8455 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
8456
8457 ii->get_invariants(hw);
8458
8459
8460 err = ixgbe_sw_init(adapter);
8461 if (err)
8462 goto err_sw_init;
8463
8464
8465 switch (adapter->hw.mac.type) {
8466 case ixgbe_mac_82599EB:
8467 case ixgbe_mac_X540:
8468 case ixgbe_mac_X550:
8469 case ixgbe_mac_X550EM_x:
8470 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
8471 break;
8472 default:
8473 break;
8474 }
8475
8476
8477
8478
8479
8480 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
8481 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
8482 if (esdp & IXGBE_ESDP_SDP1)
8483 e_crit(probe, "Fan has stopped, replace the adapter\n");
8484 }
8485
8486 if (allow_unsupported_sfp)
8487 hw->allow_unsupported_sfp = allow_unsupported_sfp;
8488
8489
8490 hw->phy.reset_if_overtemp = true;
8491 err = hw->mac.ops.reset_hw(hw);
8492 hw->phy.reset_if_overtemp = false;
8493 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
8494 hw->mac.type == ixgbe_mac_82598EB) {
8495 err = 0;
8496 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
8497 e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
8498 e_dev_err("Reload the driver after installing a supported module.\n");
8499 goto err_sw_init;
8500 } else if (err) {
8501 e_dev_err("HW Init failed: %d\n", err);
8502 goto err_sw_init;
8503 }
8504
8505#ifdef CONFIG_PCI_IOV
8506
8507 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
8508 goto skip_sriov;
8509
8510 ixgbe_init_mbx_params_pf(hw);
8511 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
8512 pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
8513 ixgbe_enable_sriov(adapter);
8514skip_sriov:
8515
8516#endif
8517 netdev->features = NETIF_F_SG |
8518 NETIF_F_IP_CSUM |
8519 NETIF_F_IPV6_CSUM |
8520 NETIF_F_HW_VLAN_CTAG_TX |
8521 NETIF_F_HW_VLAN_CTAG_RX |
8522 NETIF_F_TSO |
8523 NETIF_F_TSO6 |
8524 NETIF_F_RXHASH |
8525 NETIF_F_RXCSUM;
8526
8527 netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
8528
8529 switch (adapter->hw.mac.type) {
8530 case ixgbe_mac_82599EB:
8531 case ixgbe_mac_X540:
8532 case ixgbe_mac_X550:
8533 case ixgbe_mac_X550EM_x:
8534 netdev->features |= NETIF_F_SCTP_CSUM;
8535 netdev->hw_features |= NETIF_F_SCTP_CSUM |
8536 NETIF_F_NTUPLE;
8537 break;
8538 default:
8539 break;
8540 }
8541
8542 netdev->hw_features |= NETIF_F_RXALL;
8543 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
8544
8545 netdev->vlan_features |= NETIF_F_TSO;
8546 netdev->vlan_features |= NETIF_F_TSO6;
8547 netdev->vlan_features |= NETIF_F_IP_CSUM;
8548 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
8549 netdev->vlan_features |= NETIF_F_SG;
8550
8551 netdev->priv_flags |= IFF_UNICAST_FLT;
8552 netdev->priv_flags |= IFF_SUPP_NOFCS;
8553
8554 switch (adapter->hw.mac.type) {
8555 case ixgbe_mac_X550:
8556 case ixgbe_mac_X550EM_x:
8557 netdev->hw_enc_features |= NETIF_F_RXCSUM;
8558 break;
8559 default:
8560 break;
8561 }
8562
8563#ifdef CONFIG_IXGBE_DCB
8564 netdev->dcbnl_ops = &dcbnl_ops;
8565#endif
8566
8567#ifdef IXGBE_FCOE
8568 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
8569 unsigned int fcoe_l;
8570
8571 if (hw->mac.ops.get_device_caps) {
8572 hw->mac.ops.get_device_caps(hw, &device_caps);
8573 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
8574 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
8575 }
8576
8577
8578 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
8579 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
8580
8581 netdev->features |= NETIF_F_FSO |
8582 NETIF_F_FCOE_CRC;
8583
8584 netdev->vlan_features |= NETIF_F_FSO |
8585 NETIF_F_FCOE_CRC |
8586 NETIF_F_FCOE_MTU;
8587 }
8588#endif
8589 if (pci_using_dac) {
8590 netdev->features |= NETIF_F_HIGHDMA;
8591 netdev->vlan_features |= NETIF_F_HIGHDMA;
8592 }
8593
8594 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
8595 netdev->hw_features |= NETIF_F_LRO;
8596 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
8597 netdev->features |= NETIF_F_LRO;
8598
8599
8600 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
8601 e_dev_err("The EEPROM Checksum Is Not Valid\n");
8602 err = -EIO;
8603 goto err_sw_init;
8604 }
8605
8606 ixgbe_get_platform_mac_addr(adapter);
8607
8608 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
8609
8610 if (!is_valid_ether_addr(netdev->dev_addr)) {
8611 e_dev_err("invalid MAC address\n");
8612 err = -EIO;
8613 goto err_sw_init;
8614 }
8615
8616 ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
8617
8618 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
8619 (unsigned long) adapter);
8620
8621 if (ixgbe_removed(hw->hw_addr)) {
8622 err = -EIO;
8623 goto err_sw_init;
8624 }
8625 INIT_WORK(&adapter->service_task, ixgbe_service_task);
8626 set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
8627 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
8628
8629 err = ixgbe_init_interrupt_scheme(adapter);
8630 if (err)
8631 goto err_sw_init;
8632
8633
8634 adapter->wol = 0;
8635 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
8636 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
8637 pdev->subsystem_device);
8638 if (hw->wol_enabled)
8639 adapter->wol = IXGBE_WUFC_MAG;
8640
8641 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
8642
8643
8644 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
8645 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
8646
8647
8648 hw->mac.ops.get_bus_info(hw);
8649 if (ixgbe_pcie_from_parent(hw))
8650 ixgbe_get_parent_bus_info(adapter);
8651
8652
8653
8654
8655
8656
8657 switch (hw->mac.type) {
8658 case ixgbe_mac_82598EB:
8659 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
8660 break;
8661 default:
8662 expected_gts = ixgbe_enumerate_functions(adapter) * 10;
8663 break;
8664 }
8665
8666
8667 if (expected_gts > 0)
8668 ixgbe_check_minimum_link(adapter, expected_gts);
8669
8670 err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
8671 if (err)
8672 strlcpy(part_str, "Unknown", sizeof(part_str));
8673 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
8674 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
8675 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
8676 part_str);
8677 else
8678 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
8679 hw->mac.type, hw->phy.type, part_str);
8680
8681 e_dev_info("%pM\n", netdev->dev_addr);
8682
8683
8684 err = hw->mac.ops.start_hw(hw);
8685 if (err == IXGBE_ERR_EEPROM_VERSION) {
8686
8687 e_dev_warn("This device is a pre-production adapter/LOM. "
8688 "Please be aware there may be issues associated "
8689 "with your hardware. If you are experiencing "
8690 "problems please contact your Intel or hardware "
8691 "representative who provided you with this "
8692 "hardware.\n");
8693 }
8694 strcpy(netdev->name, "eth%d");
8695 err = register_netdev(netdev);
8696 if (err)
8697 goto err_register;
8698
8699 pci_set_drvdata(pdev, adapter);
8700
8701
8702 if (hw->mac.ops.disable_tx_laser)
8703 hw->mac.ops.disable_tx_laser(hw);
8704
8705
8706 netif_carrier_off(netdev);
8707
8708#ifdef CONFIG_IXGBE_DCA
8709 if (dca_add_requester(&pdev->dev) == 0) {
8710 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
8711 ixgbe_setup_dca(adapter);
8712 }
8713#endif
8714 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
8715 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
8716 for (i = 0; i < adapter->num_vfs; i++)
8717 ixgbe_vf_configuration(pdev, (i | 0x10000000));
8718 }
8719
8720
8721
8722
8723 if (hw->mac.ops.set_fw_drv_ver)
8724 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
8725 0xFF);
8726
8727
8728 ixgbe_add_sanmac_netdev(netdev);
8729
8730 e_dev_info("%s\n", ixgbe_default_device_descr);
8731
8732#ifdef CONFIG_IXGBE_HWMON
8733 if (ixgbe_sysfs_init(adapter))
8734 e_err(probe, "failed to allocate sysfs resources\n");
8735#endif
8736
8737 ixgbe_dbg_adapter_init(adapter);
8738
8739
8740 if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
8741 hw->mac.ops.setup_link(hw,
8742 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8743 true);
8744
8745 return 0;
8746
8747err_register:
8748 ixgbe_release_hw_control(adapter);
8749 ixgbe_clear_interrupt_scheme(adapter);
8750err_sw_init:
8751 ixgbe_disable_sriov(adapter);
8752 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
8753 iounmap(adapter->io_addr);
8754 kfree(adapter->mac_table);
8755err_ioremap:
8756 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8757 free_netdev(netdev);
8758err_alloc_etherdev:
8759 pci_release_selected_regions(pdev,
8760 pci_select_bars(pdev, IORESOURCE_MEM));
8761err_pci_reg:
8762err_dma:
8763 if (!adapter || disable_dev)
8764 pci_disable_device(pdev);
8765 return err;
8766}
8767
8768
8769
8770
8771
8772
8773
8774
8775
8776
8777static void ixgbe_remove(struct pci_dev *pdev)
8778{
8779 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8780 struct net_device *netdev;
8781 bool disable_dev;
8782
8783
8784 if (!adapter)
8785 return;
8786
8787 netdev = adapter->netdev;
8788 ixgbe_dbg_adapter_exit(adapter);
8789
8790 set_bit(__IXGBE_REMOVING, &adapter->state);
8791 cancel_work_sync(&adapter->service_task);
8792
8793
8794#ifdef CONFIG_IXGBE_DCA
8795 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
8796 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
8797 dca_remove_requester(&pdev->dev);
8798 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
8799 }
8800
8801#endif
8802#ifdef CONFIG_IXGBE_HWMON
8803 ixgbe_sysfs_exit(adapter);
8804#endif
8805
8806
8807 ixgbe_del_sanmac_netdev(netdev);
8808
8809 if (netdev->reg_state == NETREG_REGISTERED)
8810 unregister_netdev(netdev);
8811
8812#ifdef CONFIG_PCI_IOV
8813
8814
8815
8816
8817 if (max_vfs)
8818 ixgbe_disable_sriov(adapter);
8819#endif
8820 ixgbe_clear_interrupt_scheme(adapter);
8821
8822 ixgbe_release_hw_control(adapter);
8823
8824#ifdef CONFIG_DCB
8825 kfree(adapter->ixgbe_ieee_pfc);
8826 kfree(adapter->ixgbe_ieee_ets);
8827
8828#endif
8829 iounmap(adapter->io_addr);
8830 pci_release_selected_regions(pdev, pci_select_bars(pdev,
8831 IORESOURCE_MEM));
8832
8833 e_dev_info("complete\n");
8834
8835 kfree(adapter->mac_table);
8836 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8837 free_netdev(netdev);
8838
8839 pci_disable_pcie_error_reporting(pdev);
8840
8841 if (disable_dev)
8842 pci_disable_device(pdev);
8843}
8844
8845
8846
8847
8848
8849
8850
8851
8852
8853static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8854 pci_channel_state_t state)
8855{
8856 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8857 struct net_device *netdev = adapter->netdev;
8858
8859#ifdef CONFIG_PCI_IOV
8860 struct ixgbe_hw *hw = &adapter->hw;
8861 struct pci_dev *bdev, *vfdev;
8862 u32 dw0, dw1, dw2, dw3;
8863 int vf, pos;
8864 u16 req_id, pf_func;
8865
8866 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
8867 adapter->num_vfs == 0)
8868 goto skip_bad_vf_detection;
8869
8870 bdev = pdev->bus->self;
8871 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
8872 bdev = bdev->bus->self;
8873
8874 if (!bdev)
8875 goto skip_bad_vf_detection;
8876
8877 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
8878 if (!pos)
8879 goto skip_bad_vf_detection;
8880
8881 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
8882 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
8883 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
8884 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
8885 if (ixgbe_removed(hw->hw_addr))
8886 goto skip_bad_vf_detection;
8887
8888 req_id = dw1 >> 16;
8889
8890 if (!(req_id & 0x0080))
8891 goto skip_bad_vf_detection;
8892
8893 pf_func = req_id & 0x01;
8894 if ((pf_func & 1) == (pdev->devfn & 1)) {
8895 unsigned int device_id;
8896
8897 vf = (req_id & 0x7F) >> 1;
8898 e_dev_err("VF %d has caused a PCIe error\n", vf);
8899 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
8900 "%8.8x\tdw3: %8.8x\n",
8901 dw0, dw1, dw2, dw3);
8902 switch (adapter->hw.mac.type) {
8903 case ixgbe_mac_82599EB:
8904 device_id = IXGBE_82599_VF_DEVICE_ID;
8905 break;
8906 case ixgbe_mac_X540:
8907 device_id = IXGBE_X540_VF_DEVICE_ID;
8908 break;
8909 case ixgbe_mac_X550:
8910 device_id = IXGBE_DEV_ID_X550_VF;
8911 break;
8912 case ixgbe_mac_X550EM_x:
8913 device_id = IXGBE_DEV_ID_X550EM_X_VF;
8914 break;
8915 default:
8916 device_id = 0;
8917 break;
8918 }
8919
8920
8921 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
8922 while (vfdev) {
8923 if (vfdev->devfn == (req_id & 0xFF))
8924 break;
8925 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
8926 device_id, vfdev);
8927 }
8928
8929
8930
8931
8932
8933 if (vfdev) {
8934 ixgbe_issue_vf_flr(adapter, vfdev);
8935
8936 pci_dev_put(vfdev);
8937 }
8938
8939 pci_cleanup_aer_uncorrect_error_status(pdev);
8940 }
8941
8942
8943
8944
8945
8946
8947
8948 adapter->vferr_refcount++;
8949
8950 return PCI_ERS_RESULT_RECOVERED;
8951
8952skip_bad_vf_detection:
8953#endif
8954 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
8955 return PCI_ERS_RESULT_DISCONNECT;
8956
8957 rtnl_lock();
8958 netif_device_detach(netdev);
8959
8960 if (state == pci_channel_io_perm_failure) {
8961 rtnl_unlock();
8962 return PCI_ERS_RESULT_DISCONNECT;
8963 }
8964
8965 if (netif_running(netdev))
8966 ixgbe_down(adapter);
8967
8968 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
8969 pci_disable_device(pdev);
8970 rtnl_unlock();
8971
8972
8973 return PCI_ERS_RESULT_NEED_RESET;
8974}
8975
8976
8977
8978
8979
8980
8981
8982static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
8983{
8984 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8985 pci_ers_result_t result;
8986 int err;
8987
8988 if (pci_enable_device_mem(pdev)) {
8989 e_err(probe, "Cannot re-enable PCI device after reset.\n");
8990 result = PCI_ERS_RESULT_DISCONNECT;
8991 } else {
8992 smp_mb__before_atomic();
8993 clear_bit(__IXGBE_DISABLED, &adapter->state);
8994 adapter->hw.hw_addr = adapter->io_addr;
8995 pci_set_master(pdev);
8996 pci_restore_state(pdev);
8997 pci_save_state(pdev);
8998
8999 pci_wake_from_d3(pdev, false);
9000
9001 ixgbe_reset(adapter);
9002 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
9003 result = PCI_ERS_RESULT_RECOVERED;
9004 }
9005
9006 err = pci_cleanup_aer_uncorrect_error_status(pdev);
9007 if (err) {
9008 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
9009 "failed 0x%0x\n", err);
9010
9011 }
9012
9013 return result;
9014}
9015
9016
9017
9018
9019
9020
9021
9022
9023static void ixgbe_io_resume(struct pci_dev *pdev)
9024{
9025 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
9026 struct net_device *netdev = adapter->netdev;
9027
9028#ifdef CONFIG_PCI_IOV
9029 if (adapter->vferr_refcount) {
9030 e_info(drv, "Resuming after VF err\n");
9031 adapter->vferr_refcount--;
9032 return;
9033 }
9034
9035#endif
9036 if (netif_running(netdev))
9037 ixgbe_up(adapter);
9038
9039 netif_device_attach(netdev);
9040}
9041
9042static const struct pci_error_handlers ixgbe_err_handler = {
9043 .error_detected = ixgbe_io_error_detected,
9044 .slot_reset = ixgbe_io_slot_reset,
9045 .resume = ixgbe_io_resume,
9046};
9047
9048static struct pci_driver ixgbe_driver = {
9049 .name = ixgbe_driver_name,
9050 .id_table = ixgbe_pci_tbl,
9051 .probe = ixgbe_probe,
9052 .remove = ixgbe_remove,
9053#ifdef CONFIG_PM
9054 .suspend = ixgbe_suspend,
9055 .resume = ixgbe_resume,
9056#endif
9057 .shutdown = ixgbe_shutdown,
9058 .sriov_configure = ixgbe_pci_sriov_configure,
9059 .err_handler = &ixgbe_err_handler
9060};
9061
9062
9063
9064
9065
9066
9067
9068static int __init ixgbe_init_module(void)
9069{
9070 int ret;
9071 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
9072 pr_info("%s\n", ixgbe_copyright);
9073
9074 ixgbe_dbg_init();
9075
9076 ret = pci_register_driver(&ixgbe_driver);
9077 if (ret) {
9078 ixgbe_dbg_exit();
9079 return ret;
9080 }
9081
9082#ifdef CONFIG_IXGBE_DCA
9083 dca_register_notify(&dca_notifier);
9084#endif
9085
9086 return 0;
9087}
9088
9089module_init(ixgbe_init_module);
9090
9091
9092
9093
9094
9095
9096
9097static void __exit ixgbe_exit_module(void)
9098{
9099#ifdef CONFIG_IXGBE_DCA
9100 dca_unregister_notify(&dca_notifier);
9101#endif
9102 pci_unregister_driver(&ixgbe_driver);
9103
9104 ixgbe_dbg_exit();
9105}
9106
9107#ifdef CONFIG_IXGBE_DCA
9108static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
9109 void *p)
9110{
9111 int ret_val;
9112
9113 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
9114 __ixgbe_notify_dca);
9115
9116 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
9117}
9118
9119#endif
9120
9121module_exit(ixgbe_exit_module);
9122
9123
9124