1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/interrupt.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/sctp.h>
39#include <linux/pkt_sched.h>
40#include <linux/ipv6.h>
41#include <linux/slab.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
44#include <linux/ethtool.h>
45#include <linux/if.h>
46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
48#include <linux/prefetch.h>
49#include <scsi/fc/fc_fcoe.h>
50
51#include "ixgbe.h"
52#include "ixgbe_common.h"
53#include "ixgbe_dcb_82599.h"
54#include "ixgbe_sriov.h"
55
56char ixgbe_driver_name[] = "ixgbe";
57static const char ixgbe_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Network Driver";
59#ifdef IXGBE_FCOE
60char ixgbe_default_device_descr[] =
61 "Intel(R) 10 Gigabit Network Connection";
62#else
63static char ixgbe_default_device_descr[] =
64 "Intel(R) 10 Gigabit Network Connection";
65#endif
66#define DRV_VERSION "3.13.10-k"
67const char ixgbe_driver_version[] = DRV_VERSION;
68static const char ixgbe_copyright[] =
69 "Copyright (c) 1999-2013 Intel Corporation.";
70
71static const struct ixgbe_info *ixgbe_info_tbl[] = {
72 [board_82598] = &ixgbe_82598_info,
73 [board_82599] = &ixgbe_82599_info,
74 [board_X540] = &ixgbe_X540_info,
75};
76
77
78
79
80
81
82
83
84
85static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
115
116 {0, }
117};
118MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
119
120#ifdef CONFIG_IXGBE_DCA
121static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
122 void *p);
123static struct notifier_block dca_notifier = {
124 .notifier_call = ixgbe_notify_dca,
125 .next = NULL,
126 .priority = 0
127};
128#endif
129
130#ifdef CONFIG_PCI_IOV
131static unsigned int max_vfs;
132module_param(max_vfs, uint, 0);
133MODULE_PARM_DESC(max_vfs,
134 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
135#endif
136
137static unsigned int allow_unsupported_sfp;
138module_param(allow_unsupported_sfp, uint, 0);
139MODULE_PARM_DESC(allow_unsupported_sfp,
140 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
141
142#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
143static int debug = -1;
144module_param(debug, int, 0);
145MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
146
147MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
148MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
149MODULE_LICENSE("GPL");
150MODULE_VERSION(DRV_VERSION);
151
152static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
153 u32 reg, u16 *value)
154{
155 int pos = 0;
156 struct pci_dev *parent_dev;
157 struct pci_bus *parent_bus;
158
159 parent_bus = adapter->pdev->bus->parent;
160 if (!parent_bus)
161 return -1;
162
163 parent_dev = parent_bus->self;
164 if (!parent_dev)
165 return -1;
166
167 pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
168 if (!pos)
169 return -1;
170
171 pci_read_config_word(parent_dev, pos + reg, value);
172 return 0;
173}
174
175static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
176{
177 struct ixgbe_hw *hw = &adapter->hw;
178 u16 link_status = 0;
179 int err;
180
181 hw->bus.type = ixgbe_bus_type_pci_express;
182
183
184
185
186 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
187
188
189 if (err)
190 return err;
191
192 hw->bus.width = ixgbe_convert_bus_width(link_status);
193 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
194
195 return 0;
196}
197
198static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
199{
200 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
201 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
202 schedule_work(&adapter->service_task);
203}
204
205static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
206{
207 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
208
209
210 smp_mb__before_clear_bit();
211 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
212}
213
214struct ixgbe_reg_info {
215 u32 ofs;
216 char *name;
217};
218
219static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
220
221
222 {IXGBE_CTRL, "CTRL"},
223 {IXGBE_STATUS, "STATUS"},
224 {IXGBE_CTRL_EXT, "CTRL_EXT"},
225
226
227 {IXGBE_EICR, "EICR"},
228
229
230 {IXGBE_SRRCTL(0), "SRRCTL"},
231 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
232 {IXGBE_RDLEN(0), "RDLEN"},
233 {IXGBE_RDH(0), "RDH"},
234 {IXGBE_RDT(0), "RDT"},
235 {IXGBE_RXDCTL(0), "RXDCTL"},
236 {IXGBE_RDBAL(0), "RDBAL"},
237 {IXGBE_RDBAH(0), "RDBAH"},
238
239
240 {IXGBE_TDBAL(0), "TDBAL"},
241 {IXGBE_TDBAH(0), "TDBAH"},
242 {IXGBE_TDLEN(0), "TDLEN"},
243 {IXGBE_TDH(0), "TDH"},
244 {IXGBE_TDT(0), "TDT"},
245 {IXGBE_TXDCTL(0), "TXDCTL"},
246
247
248 {}
249};
250
251
252
253
254
255static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
256{
257 int i = 0, j = 0;
258 char rname[16];
259 u32 regs[64];
260
261 switch (reginfo->ofs) {
262 case IXGBE_SRRCTL(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
265 break;
266 case IXGBE_DCA_RXCTRL(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
269 break;
270 case IXGBE_RDLEN(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
273 break;
274 case IXGBE_RDH(0):
275 for (i = 0; i < 64; i++)
276 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
277 break;
278 case IXGBE_RDT(0):
279 for (i = 0; i < 64; i++)
280 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
281 break;
282 case IXGBE_RXDCTL(0):
283 for (i = 0; i < 64; i++)
284 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
285 break;
286 case IXGBE_RDBAL(0):
287 for (i = 0; i < 64; i++)
288 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
289 break;
290 case IXGBE_RDBAH(0):
291 for (i = 0; i < 64; i++)
292 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
293 break;
294 case IXGBE_TDBAL(0):
295 for (i = 0; i < 64; i++)
296 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
297 break;
298 case IXGBE_TDBAH(0):
299 for (i = 0; i < 64; i++)
300 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
301 break;
302 case IXGBE_TDLEN(0):
303 for (i = 0; i < 64; i++)
304 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
305 break;
306 case IXGBE_TDH(0):
307 for (i = 0; i < 64; i++)
308 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
309 break;
310 case IXGBE_TDT(0):
311 for (i = 0; i < 64; i++)
312 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
313 break;
314 case IXGBE_TXDCTL(0):
315 for (i = 0; i < 64; i++)
316 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
317 break;
318 default:
319 pr_info("%-15s %08x\n", reginfo->name,
320 IXGBE_READ_REG(hw, reginfo->ofs));
321 return;
322 }
323
324 for (i = 0; i < 8; i++) {
325 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
326 pr_err("%-15s", rname);
327 for (j = 0; j < 8; j++)
328 pr_cont(" %08x", regs[i*8+j]);
329 pr_cont("\n");
330 }
331
332}
333
334
335
336
337static void ixgbe_dump(struct ixgbe_adapter *adapter)
338{
339 struct net_device *netdev = adapter->netdev;
340 struct ixgbe_hw *hw = &adapter->hw;
341 struct ixgbe_reg_info *reginfo;
342 int n = 0;
343 struct ixgbe_ring *tx_ring;
344 struct ixgbe_tx_buffer *tx_buffer;
345 union ixgbe_adv_tx_desc *tx_desc;
346 struct my_u0 { u64 a; u64 b; } *u0;
347 struct ixgbe_ring *rx_ring;
348 union ixgbe_adv_rx_desc *rx_desc;
349 struct ixgbe_rx_buffer *rx_buffer_info;
350 u32 staterr;
351 int i = 0;
352
353 if (!netif_msg_hw(adapter))
354 return;
355
356
357 if (netdev) {
358 dev_info(&adapter->pdev->dev, "Net device Info\n");
359 pr_info("Device Name state "
360 "trans_start last_rx\n");
361 pr_info("%-15s %016lX %016lX %016lX\n",
362 netdev->name,
363 netdev->state,
364 netdev->trans_start,
365 netdev->last_rx);
366 }
367
368
369 dev_info(&adapter->pdev->dev, "Register Dump\n");
370 pr_info(" Register Name Value\n");
371 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
372 reginfo->name; reginfo++) {
373 ixgbe_regdump(hw, reginfo);
374 }
375
376
377 if (!netdev || !netif_running(netdev))
378 goto exit;
379
380 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
381 pr_info(" %s %s %s %s\n",
382 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
383 "leng", "ntw", "timestamp");
384 for (n = 0; n < adapter->num_tx_queues; n++) {
385 tx_ring = adapter->tx_ring[n];
386 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
387 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
388 n, tx_ring->next_to_use, tx_ring->next_to_clean,
389 (u64)dma_unmap_addr(tx_buffer, dma),
390 dma_unmap_len(tx_buffer, len),
391 tx_buffer->next_to_watch,
392 (u64)tx_buffer->time_stamp);
393 }
394
395
396 if (!netif_msg_tx_done(adapter))
397 goto rx_ring_summary;
398
399 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436 for (n = 0; n < adapter->num_tx_queues; n++) {
437 tx_ring = adapter->tx_ring[n];
438 pr_info("------------------------------------\n");
439 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
440 pr_info("------------------------------------\n");
441 pr_info("%s%s %s %s %s %s\n",
442 "T [desc] [address 63:0 ] ",
443 "[PlPOIdStDDt Ln] [bi->dma ] ",
444 "leng", "ntw", "timestamp", "bi->skb");
445
446 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
447 tx_desc = IXGBE_TX_DESC(tx_ring, i);
448 tx_buffer = &tx_ring->tx_buffer_info[i];
449 u0 = (struct my_u0 *)tx_desc;
450 if (dma_unmap_len(tx_buffer, len) > 0) {
451 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
452 i,
453 le64_to_cpu(u0->a),
454 le64_to_cpu(u0->b),
455 (u64)dma_unmap_addr(tx_buffer, dma),
456 dma_unmap_len(tx_buffer, len),
457 tx_buffer->next_to_watch,
458 (u64)tx_buffer->time_stamp,
459 tx_buffer->skb);
460 if (i == tx_ring->next_to_use &&
461 i == tx_ring->next_to_clean)
462 pr_cont(" NTC/U\n");
463 else if (i == tx_ring->next_to_use)
464 pr_cont(" NTU\n");
465 else if (i == tx_ring->next_to_clean)
466 pr_cont(" NTC\n");
467 else
468 pr_cont("\n");
469
470 if (netif_msg_pktdata(adapter) &&
471 tx_buffer->skb)
472 print_hex_dump(KERN_INFO, "",
473 DUMP_PREFIX_ADDRESS, 16, 1,
474 tx_buffer->skb->data,
475 dma_unmap_len(tx_buffer, len),
476 true);
477 }
478 }
479 }
480
481
482rx_ring_summary:
483 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
484 pr_info("Queue [NTU] [NTC]\n");
485 for (n = 0; n < adapter->num_rx_queues; n++) {
486 rx_ring = adapter->rx_ring[n];
487 pr_info("%5d %5X %5X\n",
488 n, rx_ring->next_to_use, rx_ring->next_to_clean);
489 }
490
491
492 if (!netif_msg_rx_status(adapter))
493 goto exit;
494
495 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542 for (n = 0; n < adapter->num_rx_queues; n++) {
543 rx_ring = adapter->rx_ring[n];
544 pr_info("------------------------------------\n");
545 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
546 pr_info("------------------------------------\n");
547 pr_info("%s%s%s",
548 "R [desc] [ PktBuf A0] ",
549 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
550 "<-- Adv Rx Read format\n");
551 pr_info("%s%s%s",
552 "RWB[desc] [PcsmIpSHl PtRs] ",
553 "[vl er S cks ln] ---------------- [bi->skb ] ",
554 "<-- Adv Rx Write-Back format\n");
555
556 for (i = 0; i < rx_ring->count; i++) {
557 rx_buffer_info = &rx_ring->rx_buffer_info[i];
558 rx_desc = IXGBE_RX_DESC(rx_ring, i);
559 u0 = (struct my_u0 *)rx_desc;
560 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
561 if (staterr & IXGBE_RXD_STAT_DD) {
562
563 pr_info("RWB[0x%03X] %016llX "
564 "%016llX ---------------- %p", i,
565 le64_to_cpu(u0->a),
566 le64_to_cpu(u0->b),
567 rx_buffer_info->skb);
568 } else {
569 pr_info("R [0x%03X] %016llX "
570 "%016llX %016llX %p", i,
571 le64_to_cpu(u0->a),
572 le64_to_cpu(u0->b),
573 (u64)rx_buffer_info->dma,
574 rx_buffer_info->skb);
575
576 if (netif_msg_pktdata(adapter) &&
577 rx_buffer_info->dma) {
578 print_hex_dump(KERN_INFO, "",
579 DUMP_PREFIX_ADDRESS, 16, 1,
580 page_address(rx_buffer_info->page) +
581 rx_buffer_info->page_offset,
582 ixgbe_rx_bufsz(rx_ring), true);
583 }
584 }
585
586 if (i == rx_ring->next_to_use)
587 pr_cont(" NTU\n");
588 else if (i == rx_ring->next_to_clean)
589 pr_cont(" NTC\n");
590 else
591 pr_cont("\n");
592
593 }
594 }
595
596exit:
597 return;
598}
599
600static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
601{
602 u32 ctrl_ext;
603
604
605 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
606 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
607 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
608}
609
610static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
611{
612 u32 ctrl_ext;
613
614
615 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
616 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
617 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
618}
619
620
621
622
623
624
625
626
627
628static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
629 u8 queue, u8 msix_vector)
630{
631 u32 ivar, index;
632 struct ixgbe_hw *hw = &adapter->hw;
633 switch (hw->mac.type) {
634 case ixgbe_mac_82598EB:
635 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
636 if (direction == -1)
637 direction = 0;
638 index = (((direction * 64) + queue) >> 2) & 0x1F;
639 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
640 ivar &= ~(0xFF << (8 * (queue & 0x3)));
641 ivar |= (msix_vector << (8 * (queue & 0x3)));
642 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
643 break;
644 case ixgbe_mac_82599EB:
645 case ixgbe_mac_X540:
646 if (direction == -1) {
647
648 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
649 index = ((queue & 1) * 8);
650 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
651 ivar &= ~(0xFF << index);
652 ivar |= (msix_vector << index);
653 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
654 break;
655 } else {
656
657 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
658 index = ((16 * (queue & 1)) + (8 * direction));
659 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
660 ivar &= ~(0xFF << index);
661 ivar |= (msix_vector << index);
662 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
663 break;
664 }
665 default:
666 break;
667 }
668}
669
670static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
671 u64 qmask)
672{
673 u32 mask;
674
675 switch (adapter->hw.mac.type) {
676 case ixgbe_mac_82598EB:
677 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
678 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
679 break;
680 case ixgbe_mac_82599EB:
681 case ixgbe_mac_X540:
682 mask = (qmask & 0xFFFFFFFF);
683 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
684 mask = (qmask >> 32);
685 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
686 break;
687 default:
688 break;
689 }
690}
691
692void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
693 struct ixgbe_tx_buffer *tx_buffer)
694{
695 if (tx_buffer->skb) {
696 dev_kfree_skb_any(tx_buffer->skb);
697 if (dma_unmap_len(tx_buffer, len))
698 dma_unmap_single(ring->dev,
699 dma_unmap_addr(tx_buffer, dma),
700 dma_unmap_len(tx_buffer, len),
701 DMA_TO_DEVICE);
702 } else if (dma_unmap_len(tx_buffer, len)) {
703 dma_unmap_page(ring->dev,
704 dma_unmap_addr(tx_buffer, dma),
705 dma_unmap_len(tx_buffer, len),
706 DMA_TO_DEVICE);
707 }
708 tx_buffer->next_to_watch = NULL;
709 tx_buffer->skb = NULL;
710 dma_unmap_len_set(tx_buffer, len, 0);
711
712}
713
714static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
715{
716 struct ixgbe_hw *hw = &adapter->hw;
717 struct ixgbe_hw_stats *hwstats = &adapter->stats;
718 int i;
719 u32 data;
720
721 if ((hw->fc.current_mode != ixgbe_fc_full) &&
722 (hw->fc.current_mode != ixgbe_fc_rx_pause))
723 return;
724
725 switch (hw->mac.type) {
726 case ixgbe_mac_82598EB:
727 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
728 break;
729 default:
730 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
731 }
732 hwstats->lxoffrxc += data;
733
734
735 if (!data)
736 return;
737
738 for (i = 0; i < adapter->num_tx_queues; i++)
739 clear_bit(__IXGBE_HANG_CHECK_ARMED,
740 &adapter->tx_ring[i]->state);
741}
742
743static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
744{
745 struct ixgbe_hw *hw = &adapter->hw;
746 struct ixgbe_hw_stats *hwstats = &adapter->stats;
747 u32 xoff[8] = {0};
748 u8 tc;
749 int i;
750 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
751
752 if (adapter->ixgbe_ieee_pfc)
753 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
754
755 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
756 ixgbe_update_xoff_rx_lfc(adapter);
757 return;
758 }
759
760
761 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
762 u32 pxoffrxc;
763
764 switch (hw->mac.type) {
765 case ixgbe_mac_82598EB:
766 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
767 break;
768 default:
769 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
770 }
771 hwstats->pxoffrxc[i] += pxoffrxc;
772
773 tc = netdev_get_prio_tc_map(adapter->netdev, i);
774 xoff[tc] += pxoffrxc;
775 }
776
777
778 for (i = 0; i < adapter->num_tx_queues; i++) {
779 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
780
781 tc = tx_ring->dcb_tc;
782 if (xoff[tc])
783 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
784 }
785}
786
787static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
788{
789 return ring->stats.packets;
790}
791
792static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
793{
794 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
795 struct ixgbe_hw *hw = &adapter->hw;
796
797 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
798 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
799
800 if (head != tail)
801 return (head < tail) ?
802 tail - head : (tail + ring->count - head);
803
804 return 0;
805}
806
807static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
808{
809 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
810 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
811 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
812 bool ret = false;
813
814 clear_check_for_tx_hang(tx_ring);
815
816
817
818
819
820
821
822
823
824
825
826
827
828 if ((tx_done_old == tx_done) && tx_pending) {
829
830 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
831 &tx_ring->state);
832 } else {
833
834 tx_ring->tx_stats.tx_done_old = tx_done;
835
836 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
837 }
838
839 return ret;
840}
841
842
843
844
845
846static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
847{
848
849
850 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
851 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
852 e_warn(drv, "initiating reset due to tx timeout\n");
853 ixgbe_service_event_schedule(adapter);
854 }
855}
856
857
858
859
860
861
862static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
863 struct ixgbe_ring *tx_ring)
864{
865 struct ixgbe_adapter *adapter = q_vector->adapter;
866 struct ixgbe_tx_buffer *tx_buffer;
867 union ixgbe_adv_tx_desc *tx_desc;
868 unsigned int total_bytes = 0, total_packets = 0;
869 unsigned int budget = q_vector->tx.work_limit;
870 unsigned int i = tx_ring->next_to_clean;
871
872 if (test_bit(__IXGBE_DOWN, &adapter->state))
873 return true;
874
875 tx_buffer = &tx_ring->tx_buffer_info[i];
876 tx_desc = IXGBE_TX_DESC(tx_ring, i);
877 i -= tx_ring->count;
878
879 do {
880 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
881
882
883 if (!eop_desc)
884 break;
885
886
887 read_barrier_depends();
888
889
890 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
891 break;
892
893
894 tx_buffer->next_to_watch = NULL;
895
896
897 total_bytes += tx_buffer->bytecount;
898 total_packets += tx_buffer->gso_segs;
899
900
901 dev_kfree_skb_any(tx_buffer->skb);
902
903
904 dma_unmap_single(tx_ring->dev,
905 dma_unmap_addr(tx_buffer, dma),
906 dma_unmap_len(tx_buffer, len),
907 DMA_TO_DEVICE);
908
909
910 tx_buffer->skb = NULL;
911 dma_unmap_len_set(tx_buffer, len, 0);
912
913
914 while (tx_desc != eop_desc) {
915 tx_buffer++;
916 tx_desc++;
917 i++;
918 if (unlikely(!i)) {
919 i -= tx_ring->count;
920 tx_buffer = tx_ring->tx_buffer_info;
921 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
922 }
923
924
925 if (dma_unmap_len(tx_buffer, len)) {
926 dma_unmap_page(tx_ring->dev,
927 dma_unmap_addr(tx_buffer, dma),
928 dma_unmap_len(tx_buffer, len),
929 DMA_TO_DEVICE);
930 dma_unmap_len_set(tx_buffer, len, 0);
931 }
932 }
933
934
935 tx_buffer++;
936 tx_desc++;
937 i++;
938 if (unlikely(!i)) {
939 i -= tx_ring->count;
940 tx_buffer = tx_ring->tx_buffer_info;
941 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
942 }
943
944
945 prefetch(tx_desc);
946
947
948 budget--;
949 } while (likely(budget));
950
951 i += tx_ring->count;
952 tx_ring->next_to_clean = i;
953 u64_stats_update_begin(&tx_ring->syncp);
954 tx_ring->stats.bytes += total_bytes;
955 tx_ring->stats.packets += total_packets;
956 u64_stats_update_end(&tx_ring->syncp);
957 q_vector->tx.total_bytes += total_bytes;
958 q_vector->tx.total_packets += total_packets;
959
960 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
961
962 struct ixgbe_hw *hw = &adapter->hw;
963 e_err(drv, "Detected Tx Unit Hang\n"
964 " Tx Queue <%d>\n"
965 " TDH, TDT <%x>, <%x>\n"
966 " next_to_use <%x>\n"
967 " next_to_clean <%x>\n"
968 "tx_buffer_info[next_to_clean]\n"
969 " time_stamp <%lx>\n"
970 " jiffies <%lx>\n",
971 tx_ring->queue_index,
972 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
973 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
974 tx_ring->next_to_use, i,
975 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
976
977 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
978
979 e_info(probe,
980 "tx hang %d detected on queue %d, resetting adapter\n",
981 adapter->tx_timeout_count + 1, tx_ring->queue_index);
982
983
984 ixgbe_tx_timeout_reset(adapter);
985
986
987 return true;
988 }
989
990 netdev_tx_completed_queue(txring_txq(tx_ring),
991 total_packets, total_bytes);
992
993#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
994 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
995 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
996
997
998
999 smp_mb();
1000 if (__netif_subqueue_stopped(tx_ring->netdev,
1001 tx_ring->queue_index)
1002 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
1003 netif_wake_subqueue(tx_ring->netdev,
1004 tx_ring->queue_index);
1005 ++tx_ring->tx_stats.restart_queue;
1006 }
1007 }
1008
1009 return !!budget;
1010}
1011
1012#ifdef CONFIG_IXGBE_DCA
1013static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
1014 struct ixgbe_ring *tx_ring,
1015 int cpu)
1016{
1017 struct ixgbe_hw *hw = &adapter->hw;
1018 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
1019 u16 reg_offset;
1020
1021 switch (hw->mac.type) {
1022 case ixgbe_mac_82598EB:
1023 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
1024 break;
1025 case ixgbe_mac_82599EB:
1026 case ixgbe_mac_X540:
1027 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
1028 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
1029 break;
1030 default:
1031
1032 return;
1033 }
1034
1035
1036
1037
1038
1039
1040 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1041 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
1042 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
1043
1044 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
1045}
1046
1047static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
1048 struct ixgbe_ring *rx_ring,
1049 int cpu)
1050{
1051 struct ixgbe_hw *hw = &adapter->hw;
1052 u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
1053 u8 reg_idx = rx_ring->reg_idx;
1054
1055
1056 switch (hw->mac.type) {
1057 case ixgbe_mac_82599EB:
1058 case ixgbe_mac_X540:
1059 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
1060 break;
1061 default:
1062 break;
1063 }
1064
1065
1066
1067
1068
1069
1070 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1071 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
1072
1073 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
1074}
1075
1076static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
1077{
1078 struct ixgbe_adapter *adapter = q_vector->adapter;
1079 struct ixgbe_ring *ring;
1080 int cpu = get_cpu();
1081
1082 if (q_vector->cpu == cpu)
1083 goto out_no_update;
1084
1085 ixgbe_for_each_ring(ring, q_vector->tx)
1086 ixgbe_update_tx_dca(adapter, ring, cpu);
1087
1088 ixgbe_for_each_ring(ring, q_vector->rx)
1089 ixgbe_update_rx_dca(adapter, ring, cpu);
1090
1091 q_vector->cpu = cpu;
1092out_no_update:
1093 put_cpu();
1094}
1095
1096static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
1097{
1098 int i;
1099
1100 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1101 return;
1102
1103
1104 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1105
1106 for (i = 0; i < adapter->num_q_vectors; i++) {
1107 adapter->q_vector[i]->cpu = -1;
1108 ixgbe_update_dca(adapter->q_vector[i]);
1109 }
1110}
1111
1112static int __ixgbe_notify_dca(struct device *dev, void *data)
1113{
1114 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1115 unsigned long event = *(unsigned long *)data;
1116
1117 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1118 return 0;
1119
1120 switch (event) {
1121 case DCA_PROVIDER_ADD:
1122
1123 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1124 break;
1125 if (dca_add_requester(dev) == 0) {
1126 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1127 ixgbe_setup_dca(adapter);
1128 break;
1129 }
1130
1131 case DCA_PROVIDER_REMOVE:
1132 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1133 dca_remove_requester(dev);
1134 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1136 }
1137 break;
1138 }
1139
1140 return 0;
1141}
1142
1143#endif
1144static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1145 union ixgbe_adv_rx_desc *rx_desc,
1146 struct sk_buff *skb)
1147{
1148 if (ring->netdev->features & NETIF_F_RXHASH)
1149 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1150}
1151
1152#ifdef IXGBE_FCOE
1153
1154
1155
1156
1157
1158
1159
1160static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1161 union ixgbe_adv_rx_desc *rx_desc)
1162{
1163 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1164
1165 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1166 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1167 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1168 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1169}
1170
1171#endif
1172
1173
1174
1175
1176
1177
1178static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1179 union ixgbe_adv_rx_desc *rx_desc,
1180 struct sk_buff *skb)
1181{
1182 skb_checksum_none_assert(skb);
1183
1184
1185 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1186 return;
1187
1188
1189 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1190 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1191 ring->rx_stats.csum_err++;
1192 return;
1193 }
1194
1195 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1196 return;
1197
1198 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1199 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1200
1201
1202
1203
1204
1205 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1206 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1207 return;
1208
1209 ring->rx_stats.csum_err++;
1210 return;
1211 }
1212
1213
1214 skb->ip_summed = CHECKSUM_UNNECESSARY;
1215}
1216
1217static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1218{
1219 rx_ring->next_to_use = val;
1220
1221
1222 rx_ring->next_to_alloc = val;
1223
1224
1225
1226
1227
1228
1229 wmb();
1230 writel(val, rx_ring->tail);
1231}
1232
1233static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1234 struct ixgbe_rx_buffer *bi)
1235{
1236 struct page *page = bi->page;
1237 dma_addr_t dma = bi->dma;
1238
1239
1240 if (likely(dma))
1241 return true;
1242
1243
1244 if (likely(!page)) {
1245 page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
1246 bi->skb, ixgbe_rx_pg_order(rx_ring));
1247 if (unlikely(!page)) {
1248 rx_ring->rx_stats.alloc_rx_page_failed++;
1249 return false;
1250 }
1251 bi->page = page;
1252 }
1253
1254
1255 dma = dma_map_page(rx_ring->dev, page, 0,
1256 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1257
1258
1259
1260
1261
1262 if (dma_mapping_error(rx_ring->dev, dma)) {
1263 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1264 bi->page = NULL;
1265
1266 rx_ring->rx_stats.alloc_rx_page_failed++;
1267 return false;
1268 }
1269
1270 bi->dma = dma;
1271 bi->page_offset = 0;
1272
1273 return true;
1274}
1275
1276
1277
1278
1279
1280
1281void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1282{
1283 union ixgbe_adv_rx_desc *rx_desc;
1284 struct ixgbe_rx_buffer *bi;
1285 u16 i = rx_ring->next_to_use;
1286
1287
1288 if (!cleaned_count)
1289 return;
1290
1291 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1292 bi = &rx_ring->rx_buffer_info[i];
1293 i -= rx_ring->count;
1294
1295 do {
1296 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1297 break;
1298
1299
1300
1301
1302
1303 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1304
1305 rx_desc++;
1306 bi++;
1307 i++;
1308 if (unlikely(!i)) {
1309 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1310 bi = rx_ring->rx_buffer_info;
1311 i -= rx_ring->count;
1312 }
1313
1314
1315 rx_desc->read.hdr_addr = 0;
1316
1317 cleaned_count--;
1318 } while (cleaned_count);
1319
1320 i += rx_ring->count;
1321
1322 if (rx_ring->next_to_use != i)
1323 ixgbe_release_rx_desc(rx_ring, i);
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static unsigned int ixgbe_get_headlen(unsigned char *data,
1338 unsigned int max_len)
1339{
1340 union {
1341 unsigned char *network;
1342
1343 struct ethhdr *eth;
1344 struct vlan_hdr *vlan;
1345
1346 struct iphdr *ipv4;
1347 struct ipv6hdr *ipv6;
1348 } hdr;
1349 __be16 protocol;
1350 u8 nexthdr = 0;
1351 u8 hlen;
1352
1353
1354 if (max_len < ETH_HLEN)
1355 return max_len;
1356
1357
1358 hdr.network = data;
1359
1360
1361 protocol = hdr.eth->h_proto;
1362 hdr.network += ETH_HLEN;
1363
1364
1365 if (protocol == __constant_htons(ETH_P_8021Q)) {
1366 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1367 return max_len;
1368
1369 protocol = hdr.vlan->h_vlan_encapsulated_proto;
1370 hdr.network += VLAN_HLEN;
1371 }
1372
1373
1374 if (protocol == __constant_htons(ETH_P_IP)) {
1375 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1376 return max_len;
1377
1378
1379 hlen = (hdr.network[0] & 0x0F) << 2;
1380
1381
1382 if (hlen < sizeof(struct iphdr))
1383 return hdr.network - data;
1384
1385
1386 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
1387 nexthdr = hdr.ipv4->protocol;
1388 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1389 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1390 return max_len;
1391
1392
1393 nexthdr = hdr.ipv6->nexthdr;
1394 hlen = sizeof(struct ipv6hdr);
1395#ifdef IXGBE_FCOE
1396 } else if (protocol == __constant_htons(ETH_P_FCOE)) {
1397 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1398 return max_len;
1399 hlen = FCOE_HEADER_LEN;
1400#endif
1401 } else {
1402 return hdr.network - data;
1403 }
1404
1405
1406 hdr.network += hlen;
1407
1408
1409 if (nexthdr == IPPROTO_TCP) {
1410 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1411 return max_len;
1412
1413
1414 hlen = (hdr.network[12] & 0xF0) >> 2;
1415
1416
1417 if (hlen < sizeof(struct tcphdr))
1418 return hdr.network - data;
1419
1420 hdr.network += hlen;
1421 } else if (nexthdr == IPPROTO_UDP) {
1422 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1423 return max_len;
1424
1425 hdr.network += sizeof(struct udphdr);
1426 }
1427
1428
1429
1430
1431
1432
1433
1434 if ((hdr.network - data) < max_len)
1435 return hdr.network - data;
1436 else
1437 return max_len;
1438}
1439
1440static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1441 struct sk_buff *skb)
1442{
1443 u16 hdr_len = skb_headlen(skb);
1444
1445
1446 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1447 IXGBE_CB(skb)->append_cnt);
1448 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1449}
1450
1451static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1452 struct sk_buff *skb)
1453{
1454
1455 if (!IXGBE_CB(skb)->append_cnt)
1456 return;
1457
1458 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1459 rx_ring->rx_stats.rsc_flush++;
1460
1461 ixgbe_set_rsc_gso_size(rx_ring, skb);
1462
1463
1464 IXGBE_CB(skb)->append_cnt = 0;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1478 union ixgbe_adv_rx_desc *rx_desc,
1479 struct sk_buff *skb)
1480{
1481 struct net_device *dev = rx_ring->netdev;
1482
1483 ixgbe_update_rsc_stats(rx_ring, skb);
1484
1485 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1486
1487 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1488
1489 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
1490
1491 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1492 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1493 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1494 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1495 }
1496
1497 skb_record_rx_queue(skb, rx_ring->queue_index);
1498
1499 skb->protocol = eth_type_trans(skb, dev);
1500}
1501
1502static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1503 struct sk_buff *skb)
1504{
1505 struct ixgbe_adapter *adapter = q_vector->adapter;
1506
1507 if (ixgbe_qv_ll_polling(q_vector))
1508 netif_receive_skb(skb);
1509 else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1510 napi_gro_receive(&q_vector->napi, skb);
1511 else
1512 netif_rx(skb);
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1527 union ixgbe_adv_rx_desc *rx_desc,
1528 struct sk_buff *skb)
1529{
1530 u32 ntc = rx_ring->next_to_clean + 1;
1531
1532
1533 ntc = (ntc < rx_ring->count) ? ntc : 0;
1534 rx_ring->next_to_clean = ntc;
1535
1536 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1537
1538
1539 if (ring_is_rsc_enabled(rx_ring)) {
1540 __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1541 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1542
1543 if (unlikely(rsc_enabled)) {
1544 u32 rsc_cnt = le32_to_cpu(rsc_enabled);
1545
1546 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1547 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1548
1549
1550 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1551 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1552 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1553 }
1554 }
1555
1556
1557 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1558 return false;
1559
1560
1561 rx_ring->rx_buffer_info[ntc].skb = skb;
1562 rx_ring->rx_stats.non_eop_descs++;
1563
1564 return true;
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1580 struct sk_buff *skb)
1581{
1582 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1583 unsigned char *va;
1584 unsigned int pull_len;
1585
1586
1587
1588
1589
1590
1591 va = skb_frag_address(frag);
1592
1593
1594
1595
1596
1597 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1598
1599
1600 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1601
1602
1603 skb_frag_size_sub(frag, pull_len);
1604 frag->page_offset += pull_len;
1605 skb->data_len -= pull_len;
1606 skb->tail += pull_len;
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1620 struct sk_buff *skb)
1621{
1622
1623 if (unlikely(IXGBE_CB(skb)->page_released)) {
1624 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1625 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1626 IXGBE_CB(skb)->page_released = false;
1627 } else {
1628 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1629
1630 dma_sync_single_range_for_cpu(rx_ring->dev,
1631 IXGBE_CB(skb)->dma,
1632 frag->page_offset,
1633 ixgbe_rx_bufsz(rx_ring),
1634 DMA_FROM_DEVICE);
1635 }
1636 IXGBE_CB(skb)->dma = 0;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1658 union ixgbe_adv_rx_desc *rx_desc,
1659 struct sk_buff *skb)
1660{
1661 struct net_device *netdev = rx_ring->netdev;
1662
1663
1664 if (unlikely(ixgbe_test_staterr(rx_desc,
1665 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1666 !(netdev->features & NETIF_F_RXALL))) {
1667 dev_kfree_skb_any(skb);
1668 return true;
1669 }
1670
1671
1672 if (skb_is_nonlinear(skb))
1673 ixgbe_pull_tail(rx_ring, skb);
1674
1675#ifdef IXGBE_FCOE
1676
1677 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1678 return false;
1679
1680#endif
1681
1682 if (unlikely(skb->len < 60)) {
1683 int pad_len = 60 - skb->len;
1684
1685 if (skb_pad(skb, pad_len))
1686 return true;
1687 __skb_put(skb, pad_len);
1688 }
1689
1690 return false;
1691}
1692
1693
1694
1695
1696
1697
1698
1699
1700static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1701 struct ixgbe_rx_buffer *old_buff)
1702{
1703 struct ixgbe_rx_buffer *new_buff;
1704 u16 nta = rx_ring->next_to_alloc;
1705
1706 new_buff = &rx_ring->rx_buffer_info[nta];
1707
1708
1709 nta++;
1710 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1711
1712
1713 new_buff->page = old_buff->page;
1714 new_buff->dma = old_buff->dma;
1715 new_buff->page_offset = old_buff->page_offset;
1716
1717
1718 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1719 new_buff->page_offset,
1720 ixgbe_rx_bufsz(rx_ring),
1721 DMA_FROM_DEVICE);
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1740 struct ixgbe_rx_buffer *rx_buffer,
1741 union ixgbe_adv_rx_desc *rx_desc,
1742 struct sk_buff *skb)
1743{
1744 struct page *page = rx_buffer->page;
1745 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
1746#if (PAGE_SIZE < 8192)
1747 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1748#else
1749 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1750 unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
1751 ixgbe_rx_bufsz(rx_ring);
1752#endif
1753
1754 if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1755 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1756
1757 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1758
1759
1760 if (likely(page_to_nid(page) == numa_node_id()))
1761 return true;
1762
1763
1764 put_page(page);
1765 return false;
1766 }
1767
1768 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1769 rx_buffer->page_offset, size, truesize);
1770
1771
1772 if (unlikely(page_to_nid(page) != numa_node_id()))
1773 return false;
1774
1775#if (PAGE_SIZE < 8192)
1776
1777 if (unlikely(page_count(page) != 1))
1778 return false;
1779
1780
1781 rx_buffer->page_offset ^= truesize;
1782
1783
1784
1785
1786
1787
1788 atomic_set(&page->_count, 2);
1789#else
1790
1791 rx_buffer->page_offset += truesize;
1792
1793 if (rx_buffer->page_offset > last_offset)
1794 return false;
1795
1796
1797 get_page(page);
1798#endif
1799
1800 return true;
1801}
1802
1803static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1804 union ixgbe_adv_rx_desc *rx_desc)
1805{
1806 struct ixgbe_rx_buffer *rx_buffer;
1807 struct sk_buff *skb;
1808 struct page *page;
1809
1810 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1811 page = rx_buffer->page;
1812 prefetchw(page);
1813
1814 skb = rx_buffer->skb;
1815
1816 if (likely(!skb)) {
1817 void *page_addr = page_address(page) +
1818 rx_buffer->page_offset;
1819
1820
1821 prefetch(page_addr);
1822#if L1_CACHE_BYTES < 128
1823 prefetch(page_addr + L1_CACHE_BYTES);
1824#endif
1825
1826
1827 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1828 IXGBE_RX_HDR_SIZE);
1829 if (unlikely(!skb)) {
1830 rx_ring->rx_stats.alloc_rx_buff_failed++;
1831 return NULL;
1832 }
1833
1834
1835
1836
1837
1838
1839 prefetchw(skb->data);
1840
1841
1842
1843
1844
1845
1846
1847 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1848 goto dma_sync;
1849
1850 IXGBE_CB(skb)->dma = rx_buffer->dma;
1851 } else {
1852 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
1853 ixgbe_dma_sync_frag(rx_ring, skb);
1854
1855dma_sync:
1856
1857 dma_sync_single_range_for_cpu(rx_ring->dev,
1858 rx_buffer->dma,
1859 rx_buffer->page_offset,
1860 ixgbe_rx_bufsz(rx_ring),
1861 DMA_FROM_DEVICE);
1862 }
1863
1864
1865 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1866
1867 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1868 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1869
1870 IXGBE_CB(skb)->page_released = true;
1871 } else {
1872
1873 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1874 ixgbe_rx_pg_size(rx_ring),
1875 DMA_FROM_DEVICE);
1876 }
1877
1878
1879 rx_buffer->skb = NULL;
1880 rx_buffer->dma = 0;
1881 rx_buffer->page = NULL;
1882
1883 return skb;
1884}
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1900 struct ixgbe_ring *rx_ring,
1901 const int budget)
1902{
1903 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1904#ifdef IXGBE_FCOE
1905 struct ixgbe_adapter *adapter = q_vector->adapter;
1906 int ddp_bytes;
1907 unsigned int mss = 0;
1908#endif
1909 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1910
1911 do {
1912 union ixgbe_adv_rx_desc *rx_desc;
1913 struct sk_buff *skb;
1914
1915
1916 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1917 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1918 cleaned_count = 0;
1919 }
1920
1921 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1922
1923 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
1924 break;
1925
1926
1927
1928
1929
1930
1931 rmb();
1932
1933
1934 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
1935
1936
1937 if (!skb)
1938 break;
1939
1940 cleaned_count++;
1941
1942
1943 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
1944 continue;
1945
1946
1947 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
1948 continue;
1949
1950
1951 total_rx_bytes += skb->len;
1952
1953
1954 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
1955
1956#ifdef IXGBE_FCOE
1957
1958 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
1959 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1960
1961 if (ddp_bytes > 0) {
1962 if (!mss) {
1963 mss = rx_ring->netdev->mtu -
1964 sizeof(struct fcoe_hdr) -
1965 sizeof(struct fc_frame_header) -
1966 sizeof(struct fcoe_crc_eof);
1967 if (mss > 512)
1968 mss &= ~511;
1969 }
1970 total_rx_bytes += ddp_bytes;
1971 total_rx_packets += DIV_ROUND_UP(ddp_bytes,
1972 mss);
1973 }
1974 if (!ddp_bytes) {
1975 dev_kfree_skb_any(skb);
1976 continue;
1977 }
1978 }
1979
1980#endif
1981 skb_mark_napi_id(skb, &q_vector->napi);
1982 ixgbe_rx_skb(q_vector, skb);
1983
1984
1985 total_rx_packets++;
1986 } while (likely(total_rx_packets < budget));
1987
1988 u64_stats_update_begin(&rx_ring->syncp);
1989 rx_ring->stats.packets += total_rx_packets;
1990 rx_ring->stats.bytes += total_rx_bytes;
1991 u64_stats_update_end(&rx_ring->syncp);
1992 q_vector->rx.total_packets += total_rx_packets;
1993 q_vector->rx.total_bytes += total_rx_bytes;
1994
1995 if (cleaned_count)
1996 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1997
1998 return total_rx_packets;
1999}
2000
2001#ifdef CONFIG_NET_RX_BUSY_POLL
2002
2003static int ixgbe_low_latency_recv(struct napi_struct *napi)
2004{
2005 struct ixgbe_q_vector *q_vector =
2006 container_of(napi, struct ixgbe_q_vector, napi);
2007 struct ixgbe_adapter *adapter = q_vector->adapter;
2008 struct ixgbe_ring *ring;
2009 int found = 0;
2010
2011 if (test_bit(__IXGBE_DOWN, &adapter->state))
2012 return LL_FLUSH_FAILED;
2013
2014 if (!ixgbe_qv_lock_poll(q_vector))
2015 return LL_FLUSH_BUSY;
2016
2017 ixgbe_for_each_ring(ring, q_vector->rx) {
2018 found = ixgbe_clean_rx_irq(q_vector, ring, 4);
2019#ifdef LL_EXTENDED_STATS
2020 if (found)
2021 ring->stats.cleaned += found;
2022 else
2023 ring->stats.misses++;
2024#endif
2025 if (found)
2026 break;
2027 }
2028
2029 ixgbe_qv_unlock_poll(q_vector);
2030
2031 return found;
2032}
2033#endif
2034
2035
2036
2037
2038
2039
2040
2041
2042static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
2043{
2044 struct ixgbe_q_vector *q_vector;
2045 int v_idx;
2046 u32 mask;
2047
2048
2049 if (adapter->num_vfs > 32) {
2050 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2051 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2052 }
2053
2054
2055
2056
2057
2058 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
2059 struct ixgbe_ring *ring;
2060 q_vector = adapter->q_vector[v_idx];
2061
2062 ixgbe_for_each_ring(ring, q_vector->rx)
2063 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
2064
2065 ixgbe_for_each_ring(ring, q_vector->tx)
2066 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
2067
2068 ixgbe_write_eitr(q_vector);
2069 }
2070
2071 switch (adapter->hw.mac.type) {
2072 case ixgbe_mac_82598EB:
2073 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2074 v_idx);
2075 break;
2076 case ixgbe_mac_82599EB:
2077 case ixgbe_mac_X540:
2078 ixgbe_set_ivar(adapter, -1, 1, v_idx);
2079 break;
2080 default:
2081 break;
2082 }
2083 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
2084
2085
2086 mask = IXGBE_EIMS_ENABLE_MASK;
2087 mask &= ~(IXGBE_EIMS_OTHER |
2088 IXGBE_EIMS_MAILBOX |
2089 IXGBE_EIMS_LSC);
2090
2091 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
2092}
2093
2094enum latency_range {
2095 lowest_latency = 0,
2096 low_latency = 1,
2097 bulk_latency = 2,
2098 latency_invalid = 255
2099};
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
2117 struct ixgbe_ring_container *ring_container)
2118{
2119 int bytes = ring_container->total_bytes;
2120 int packets = ring_container->total_packets;
2121 u32 timepassed_us;
2122 u64 bytes_perint;
2123 u8 itr_setting = ring_container->itr;
2124
2125 if (packets == 0)
2126 return;
2127
2128
2129
2130
2131
2132
2133
2134 timepassed_us = q_vector->itr >> 2;
2135 if (timepassed_us == 0)
2136 return;
2137
2138 bytes_perint = bytes / timepassed_us;
2139
2140 switch (itr_setting) {
2141 case lowest_latency:
2142 if (bytes_perint > 10)
2143 itr_setting = low_latency;
2144 break;
2145 case low_latency:
2146 if (bytes_perint > 20)
2147 itr_setting = bulk_latency;
2148 else if (bytes_perint <= 10)
2149 itr_setting = lowest_latency;
2150 break;
2151 case bulk_latency:
2152 if (bytes_perint <= 20)
2153 itr_setting = low_latency;
2154 break;
2155 }
2156
2157
2158 ring_container->total_bytes = 0;
2159 ring_container->total_packets = 0;
2160
2161
2162 ring_container->itr = itr_setting;
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
2174{
2175 struct ixgbe_adapter *adapter = q_vector->adapter;
2176 struct ixgbe_hw *hw = &adapter->hw;
2177 int v_idx = q_vector->v_idx;
2178 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
2179
2180 switch (adapter->hw.mac.type) {
2181 case ixgbe_mac_82598EB:
2182
2183 itr_reg |= (itr_reg << 16);
2184 break;
2185 case ixgbe_mac_82599EB:
2186 case ixgbe_mac_X540:
2187
2188
2189
2190
2191 itr_reg |= IXGBE_EITR_CNT_WDIS;
2192 break;
2193 default:
2194 break;
2195 }
2196 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
2197}
2198
2199static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
2200{
2201 u32 new_itr = q_vector->itr;
2202 u8 current_itr;
2203
2204 ixgbe_update_itr(q_vector, &q_vector->tx);
2205 ixgbe_update_itr(q_vector, &q_vector->rx);
2206
2207 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2208
2209 switch (current_itr) {
2210
2211 case lowest_latency:
2212 new_itr = IXGBE_100K_ITR;
2213 break;
2214 case low_latency:
2215 new_itr = IXGBE_20K_ITR;
2216 break;
2217 case bulk_latency:
2218 new_itr = IXGBE_8K_ITR;
2219 break;
2220 default:
2221 break;
2222 }
2223
2224 if (new_itr != q_vector->itr) {
2225
2226 new_itr = (10 * new_itr * q_vector->itr) /
2227 ((9 * new_itr) + q_vector->itr);
2228
2229
2230 q_vector->itr = new_itr;
2231
2232 ixgbe_write_eitr(q_vector);
2233 }
2234}
2235
2236
2237
2238
2239
2240static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2241{
2242 struct ixgbe_hw *hw = &adapter->hw;
2243 u32 eicr = adapter->interrupt_event;
2244
2245 if (test_bit(__IXGBE_DOWN, &adapter->state))
2246 return;
2247
2248 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2249 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2250 return;
2251
2252 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2253
2254 switch (hw->device_id) {
2255 case IXGBE_DEV_ID_82599_T3_LOM:
2256
2257
2258
2259
2260
2261
2262
2263 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
2264 !(eicr & IXGBE_EICR_LSC))
2265 return;
2266
2267 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2268 u32 speed;
2269 bool link_up = false;
2270
2271 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2272
2273 if (link_up)
2274 return;
2275 }
2276
2277
2278 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2279 return;
2280
2281 break;
2282 default:
2283 if (!(eicr & IXGBE_EICR_GPI_SDP0))
2284 return;
2285 break;
2286 }
2287 e_crit(drv,
2288 "Network adapter has been stopped because it has over heated. "
2289 "Restart the computer. If the problem persists, "
2290 "power off the system and replace the adapter\n");
2291
2292 adapter->interrupt_event = 0;
2293}
2294
2295static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2296{
2297 struct ixgbe_hw *hw = &adapter->hw;
2298
2299 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2300 (eicr & IXGBE_EICR_GPI_SDP1)) {
2301 e_crit(probe, "Fan has stopped, replace the adapter\n");
2302
2303 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2304 }
2305}
2306
2307static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2308{
2309 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2310 return;
2311
2312 switch (adapter->hw.mac.type) {
2313 case ixgbe_mac_82599EB:
2314
2315
2316
2317
2318 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2319 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2320 adapter->interrupt_event = eicr;
2321 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2322 ixgbe_service_event_schedule(adapter);
2323 return;
2324 }
2325 return;
2326 case ixgbe_mac_X540:
2327 if (!(eicr & IXGBE_EICR_TS))
2328 return;
2329 break;
2330 default:
2331 return;
2332 }
2333
2334 e_crit(drv,
2335 "Network adapter has been stopped because it has over heated. "
2336 "Restart the computer. If the problem persists, "
2337 "power off the system and replace the adapter\n");
2338}
2339
2340static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2341{
2342 struct ixgbe_hw *hw = &adapter->hw;
2343
2344 if (eicr & IXGBE_EICR_GPI_SDP2) {
2345
2346 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2347 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2348 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2349 ixgbe_service_event_schedule(adapter);
2350 }
2351 }
2352
2353 if (eicr & IXGBE_EICR_GPI_SDP1) {
2354
2355 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2356 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2357 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2358 ixgbe_service_event_schedule(adapter);
2359 }
2360 }
2361}
2362
2363static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2364{
2365 struct ixgbe_hw *hw = &adapter->hw;
2366
2367 adapter->lsc_int++;
2368 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2369 adapter->link_check_timeout = jiffies;
2370 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2371 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2372 IXGBE_WRITE_FLUSH(hw);
2373 ixgbe_service_event_schedule(adapter);
2374 }
2375}
2376
2377static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2378 u64 qmask)
2379{
2380 u32 mask;
2381 struct ixgbe_hw *hw = &adapter->hw;
2382
2383 switch (hw->mac.type) {
2384 case ixgbe_mac_82598EB:
2385 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2386 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2387 break;
2388 case ixgbe_mac_82599EB:
2389 case ixgbe_mac_X540:
2390 mask = (qmask & 0xFFFFFFFF);
2391 if (mask)
2392 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2393 mask = (qmask >> 32);
2394 if (mask)
2395 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2396 break;
2397 default:
2398 break;
2399 }
2400
2401}
2402
2403static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2404 u64 qmask)
2405{
2406 u32 mask;
2407 struct ixgbe_hw *hw = &adapter->hw;
2408
2409 switch (hw->mac.type) {
2410 case ixgbe_mac_82598EB:
2411 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2412 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2413 break;
2414 case ixgbe_mac_82599EB:
2415 case ixgbe_mac_X540:
2416 mask = (qmask & 0xFFFFFFFF);
2417 if (mask)
2418 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2419 mask = (qmask >> 32);
2420 if (mask)
2421 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2422 break;
2423 default:
2424 break;
2425 }
2426
2427}
2428
2429
2430
2431
2432
2433static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2434 bool flush)
2435{
2436 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2437
2438
2439 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2440 mask &= ~IXGBE_EIMS_LSC;
2441
2442 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2443 switch (adapter->hw.mac.type) {
2444 case ixgbe_mac_82599EB:
2445 mask |= IXGBE_EIMS_GPI_SDP0;
2446 break;
2447 case ixgbe_mac_X540:
2448 mask |= IXGBE_EIMS_TS;
2449 break;
2450 default:
2451 break;
2452 }
2453 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2454 mask |= IXGBE_EIMS_GPI_SDP1;
2455 switch (adapter->hw.mac.type) {
2456 case ixgbe_mac_82599EB:
2457 mask |= IXGBE_EIMS_GPI_SDP1;
2458 mask |= IXGBE_EIMS_GPI_SDP2;
2459 case ixgbe_mac_X540:
2460 mask |= IXGBE_EIMS_ECC;
2461 mask |= IXGBE_EIMS_MAILBOX;
2462 break;
2463 default:
2464 break;
2465 }
2466
2467 if (adapter->hw.mac.type == ixgbe_mac_X540)
2468 mask |= IXGBE_EIMS_TIMESYNC;
2469
2470 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2471 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2472 mask |= IXGBE_EIMS_FLOW_DIR;
2473
2474 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2475 if (queues)
2476 ixgbe_irq_enable_queues(adapter, ~0);
2477 if (flush)
2478 IXGBE_WRITE_FLUSH(&adapter->hw);
2479}
2480
2481static irqreturn_t ixgbe_msix_other(int irq, void *data)
2482{
2483 struct ixgbe_adapter *adapter = data;
2484 struct ixgbe_hw *hw = &adapter->hw;
2485 u32 eicr;
2486
2487
2488
2489
2490
2491
2492
2493 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2494
2495
2496
2497
2498
2499
2500
2501
2502 eicr &= 0xFFFF0000;
2503
2504 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2505
2506 if (eicr & IXGBE_EICR_LSC)
2507 ixgbe_check_lsc(adapter);
2508
2509 if (eicr & IXGBE_EICR_MAILBOX)
2510 ixgbe_msg_task(adapter);
2511
2512 switch (hw->mac.type) {
2513 case ixgbe_mac_82599EB:
2514 case ixgbe_mac_X540:
2515 if (eicr & IXGBE_EICR_ECC)
2516 e_info(link, "Received unrecoverable ECC Err, please "
2517 "reboot\n");
2518
2519 if (eicr & IXGBE_EICR_FLOW_DIR) {
2520 int reinit_count = 0;
2521 int i;
2522 for (i = 0; i < adapter->num_tx_queues; i++) {
2523 struct ixgbe_ring *ring = adapter->tx_ring[i];
2524 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2525 &ring->state))
2526 reinit_count++;
2527 }
2528 if (reinit_count) {
2529
2530 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2531 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2532 ixgbe_service_event_schedule(adapter);
2533 }
2534 }
2535 ixgbe_check_sfp_event(adapter, eicr);
2536 ixgbe_check_overtemp_event(adapter, eicr);
2537 break;
2538 default:
2539 break;
2540 }
2541
2542 ixgbe_check_fan_failure(adapter, eicr);
2543
2544 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2545 ixgbe_ptp_check_pps_event(adapter, eicr);
2546
2547
2548 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2549 ixgbe_irq_enable(adapter, false, false);
2550
2551 return IRQ_HANDLED;
2552}
2553
2554static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2555{
2556 struct ixgbe_q_vector *q_vector = data;
2557
2558
2559
2560 if (q_vector->rx.ring || q_vector->tx.ring)
2561 napi_schedule(&q_vector->napi);
2562
2563 return IRQ_HANDLED;
2564}
2565
2566
2567
2568
2569
2570
2571
2572
2573int ixgbe_poll(struct napi_struct *napi, int budget)
2574{
2575 struct ixgbe_q_vector *q_vector =
2576 container_of(napi, struct ixgbe_q_vector, napi);
2577 struct ixgbe_adapter *adapter = q_vector->adapter;
2578 struct ixgbe_ring *ring;
2579 int per_ring_budget;
2580 bool clean_complete = true;
2581
2582#ifdef CONFIG_IXGBE_DCA
2583 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2584 ixgbe_update_dca(q_vector);
2585#endif
2586
2587 ixgbe_for_each_ring(ring, q_vector->tx)
2588 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2589
2590 if (!ixgbe_qv_lock_napi(q_vector))
2591 return budget;
2592
2593
2594
2595 if (q_vector->rx.count > 1)
2596 per_ring_budget = max(budget/q_vector->rx.count, 1);
2597 else
2598 per_ring_budget = budget;
2599
2600 ixgbe_for_each_ring(ring, q_vector->rx)
2601 clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
2602 per_ring_budget) < per_ring_budget);
2603
2604 ixgbe_qv_unlock_napi(q_vector);
2605
2606 if (!clean_complete)
2607 return budget;
2608
2609
2610 napi_complete(napi);
2611 if (adapter->rx_itr_setting & 1)
2612 ixgbe_set_itr(q_vector);
2613 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2614 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2615
2616 return 0;
2617}
2618
2619
2620
2621
2622
2623
2624
2625
2626static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2627{
2628 struct net_device *netdev = adapter->netdev;
2629 int vector, err;
2630 int ri = 0, ti = 0;
2631
2632 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2633 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2634 struct msix_entry *entry = &adapter->msix_entries[vector];
2635
2636 if (q_vector->tx.ring && q_vector->rx.ring) {
2637 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2638 "%s-%s-%d", netdev->name, "TxRx", ri++);
2639 ti++;
2640 } else if (q_vector->rx.ring) {
2641 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2642 "%s-%s-%d", netdev->name, "rx", ri++);
2643 } else if (q_vector->tx.ring) {
2644 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2645 "%s-%s-%d", netdev->name, "tx", ti++);
2646 } else {
2647
2648 continue;
2649 }
2650 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2651 q_vector->name, q_vector);
2652 if (err) {
2653 e_err(probe, "request_irq failed for MSIX interrupt "
2654 "Error: %d\n", err);
2655 goto free_queue_irqs;
2656 }
2657
2658 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2659
2660 irq_set_affinity_hint(entry->vector,
2661 &q_vector->affinity_mask);
2662 }
2663 }
2664
2665 err = request_irq(adapter->msix_entries[vector].vector,
2666 ixgbe_msix_other, 0, netdev->name, adapter);
2667 if (err) {
2668 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2669 goto free_queue_irqs;
2670 }
2671
2672 return 0;
2673
2674free_queue_irqs:
2675 while (vector) {
2676 vector--;
2677 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2678 NULL);
2679 free_irq(adapter->msix_entries[vector].vector,
2680 adapter->q_vector[vector]);
2681 }
2682 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2683 pci_disable_msix(adapter->pdev);
2684 kfree(adapter->msix_entries);
2685 adapter->msix_entries = NULL;
2686 return err;
2687}
2688
2689
2690
2691
2692
2693
2694static irqreturn_t ixgbe_intr(int irq, void *data)
2695{
2696 struct ixgbe_adapter *adapter = data;
2697 struct ixgbe_hw *hw = &adapter->hw;
2698 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2699 u32 eicr;
2700
2701
2702
2703
2704
2705 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2706
2707
2708
2709 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2710 if (!eicr) {
2711
2712
2713
2714
2715
2716
2717
2718 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2719 ixgbe_irq_enable(adapter, true, true);
2720 return IRQ_NONE;
2721 }
2722
2723 if (eicr & IXGBE_EICR_LSC)
2724 ixgbe_check_lsc(adapter);
2725
2726 switch (hw->mac.type) {
2727 case ixgbe_mac_82599EB:
2728 ixgbe_check_sfp_event(adapter, eicr);
2729
2730 case ixgbe_mac_X540:
2731 if (eicr & IXGBE_EICR_ECC)
2732 e_info(link, "Received unrecoverable ECC err, please "
2733 "reboot\n");
2734 ixgbe_check_overtemp_event(adapter, eicr);
2735 break;
2736 default:
2737 break;
2738 }
2739
2740 ixgbe_check_fan_failure(adapter, eicr);
2741 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2742 ixgbe_ptp_check_pps_event(adapter, eicr);
2743
2744
2745 napi_schedule(&q_vector->napi);
2746
2747
2748
2749
2750
2751 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2752 ixgbe_irq_enable(adapter, false, false);
2753
2754 return IRQ_HANDLED;
2755}
2756
2757
2758
2759
2760
2761
2762
2763
2764static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2765{
2766 struct net_device *netdev = adapter->netdev;
2767 int err;
2768
2769 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2770 err = ixgbe_request_msix_irqs(adapter);
2771 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2772 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2773 netdev->name, adapter);
2774 else
2775 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2776 netdev->name, adapter);
2777
2778 if (err)
2779 e_err(probe, "request_irq failed, Error %d\n", err);
2780
2781 return err;
2782}
2783
2784static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2785{
2786 int vector;
2787
2788 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2789 free_irq(adapter->pdev->irq, adapter);
2790 return;
2791 }
2792
2793 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2794 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2795 struct msix_entry *entry = &adapter->msix_entries[vector];
2796
2797
2798 if (!q_vector->rx.ring && !q_vector->tx.ring)
2799 continue;
2800
2801
2802 irq_set_affinity_hint(entry->vector, NULL);
2803
2804 free_irq(entry->vector, q_vector);
2805 }
2806
2807 free_irq(adapter->msix_entries[vector++].vector, adapter);
2808}
2809
2810
2811
2812
2813
2814static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2815{
2816 switch (adapter->hw.mac.type) {
2817 case ixgbe_mac_82598EB:
2818 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2819 break;
2820 case ixgbe_mac_82599EB:
2821 case ixgbe_mac_X540:
2822 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2823 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2825 break;
2826 default:
2827 break;
2828 }
2829 IXGBE_WRITE_FLUSH(&adapter->hw);
2830 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2831 int vector;
2832
2833 for (vector = 0; vector < adapter->num_q_vectors; vector++)
2834 synchronize_irq(adapter->msix_entries[vector].vector);
2835
2836 synchronize_irq(adapter->msix_entries[vector++].vector);
2837 } else {
2838 synchronize_irq(adapter->pdev->irq);
2839 }
2840}
2841
2842
2843
2844
2845
2846static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2847{
2848 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2849
2850 ixgbe_write_eitr(q_vector);
2851
2852 ixgbe_set_ivar(adapter, 0, 0, 0);
2853 ixgbe_set_ivar(adapter, 1, 0, 0);
2854
2855 e_info(hw, "Legacy interrupt IVAR setup done\n");
2856}
2857
2858
2859
2860
2861
2862
2863
2864
2865void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2866 struct ixgbe_ring *ring)
2867{
2868 struct ixgbe_hw *hw = &adapter->hw;
2869 u64 tdba = ring->dma;
2870 int wait_loop = 10;
2871 u32 txdctl = IXGBE_TXDCTL_ENABLE;
2872 u8 reg_idx = ring->reg_idx;
2873
2874
2875 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
2876 IXGBE_WRITE_FLUSH(hw);
2877
2878 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2879 (tdba & DMA_BIT_MASK(32)));
2880 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2881 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2882 ring->count * sizeof(union ixgbe_adv_tx_desc));
2883 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2884 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2885 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897#if IS_ENABLED(CONFIG_BQL)
2898 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
2899#else
2900 if (!ring->q_vector || (ring->q_vector->itr < 8))
2901#endif
2902 txdctl |= (1 << 16);
2903 else
2904 txdctl |= (8 << 16);
2905
2906
2907
2908
2909
2910 txdctl |= (1 << 8) |
2911 32;
2912
2913
2914 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2915 ring->atr_sample_rate = adapter->atr_sample_rate;
2916 ring->atr_count = 0;
2917 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2918 } else {
2919 ring->atr_sample_rate = 0;
2920 }
2921
2922
2923 if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
2924 struct ixgbe_q_vector *q_vector = ring->q_vector;
2925
2926 if (q_vector)
2927 netif_set_xps_queue(adapter->netdev,
2928 &q_vector->affinity_mask,
2929 ring->queue_index);
2930 }
2931
2932 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2933
2934
2935 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2936
2937
2938 if (hw->mac.type == ixgbe_mac_82598EB &&
2939 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2940 return;
2941
2942
2943 do {
2944 usleep_range(1000, 2000);
2945 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2946 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2947 if (!wait_loop)
2948 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2949}
2950
2951static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2952{
2953 struct ixgbe_hw *hw = &adapter->hw;
2954 u32 rttdcs, mtqc;
2955 u8 tcs = netdev_get_num_tc(adapter->netdev);
2956
2957 if (hw->mac.type == ixgbe_mac_82598EB)
2958 return;
2959
2960
2961 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2962 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2963 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2964
2965
2966 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2967 mtqc = IXGBE_MTQC_VT_ENA;
2968 if (tcs > 4)
2969 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2970 else if (tcs > 1)
2971 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2972 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
2973 mtqc |= IXGBE_MTQC_32VF;
2974 else
2975 mtqc |= IXGBE_MTQC_64VF;
2976 } else {
2977 if (tcs > 4)
2978 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2979 else if (tcs > 1)
2980 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2981 else
2982 mtqc = IXGBE_MTQC_64Q_1PB;
2983 }
2984
2985 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
2986
2987
2988 if (tcs) {
2989 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2990 sectx |= IXGBE_SECTX_DCB;
2991 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
2992 }
2993
2994
2995 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2996 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2997}
2998
2999
3000
3001
3002
3003
3004
3005static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
3006{
3007 struct ixgbe_hw *hw = &adapter->hw;
3008 u32 dmatxctl;
3009 u32 i;
3010
3011 ixgbe_setup_mtqc(adapter);
3012
3013 if (hw->mac.type != ixgbe_mac_82598EB) {
3014
3015 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3016 dmatxctl |= IXGBE_DMATXCTL_TE;
3017 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3018 }
3019
3020
3021 for (i = 0; i < adapter->num_tx_queues; i++)
3022 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
3023}
3024
3025static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
3026 struct ixgbe_ring *ring)
3027{
3028 struct ixgbe_hw *hw = &adapter->hw;
3029 u8 reg_idx = ring->reg_idx;
3030 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3031
3032 srrctl |= IXGBE_SRRCTL_DROP_EN;
3033
3034 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3035}
3036
3037static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
3038 struct ixgbe_ring *ring)
3039{
3040 struct ixgbe_hw *hw = &adapter->hw;
3041 u8 reg_idx = ring->reg_idx;
3042 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
3043
3044 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
3045
3046 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3047}
3048
3049#ifdef CONFIG_IXGBE_DCB
3050void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3051#else
3052static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
3053#endif
3054{
3055 int i;
3056 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
3057
3058 if (adapter->ixgbe_ieee_pfc)
3059 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
3071 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
3072 for (i = 0; i < adapter->num_rx_queues; i++)
3073 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
3074 } else {
3075 for (i = 0; i < adapter->num_rx_queues; i++)
3076 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
3077 }
3078}
3079
3080#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3081
3082static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
3083 struct ixgbe_ring *rx_ring)
3084{
3085 struct ixgbe_hw *hw = &adapter->hw;
3086 u32 srrctl;
3087 u8 reg_idx = rx_ring->reg_idx;
3088
3089 if (hw->mac.type == ixgbe_mac_82598EB) {
3090 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
3091
3092
3093
3094
3095
3096 reg_idx &= mask;
3097 }
3098
3099
3100 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
3101
3102
3103 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3104
3105
3106 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3107
3108 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
3109}
3110
3111static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
3112{
3113 struct ixgbe_hw *hw = &adapter->hw;
3114 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
3115 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
3116 0x6A3E67EA, 0x14364D17, 0x3BED200D};
3117 u32 mrqc = 0, reta = 0;
3118 u32 rxcsum;
3119 int i, j;
3120 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
3121
3122
3123
3124
3125
3126
3127 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
3128 rss_i = 2;
3129
3130
3131 for (i = 0; i < 10; i++)
3132 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
3133
3134
3135 for (i = 0, j = 0; i < 128; i++, j++) {
3136 if (j == rss_i)
3137 j = 0;
3138
3139
3140 reta = (reta << 8) | (j * 0x11);
3141 if ((i & 3) == 3)
3142 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3143 }
3144
3145
3146 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3147 rxcsum |= IXGBE_RXCSUM_PCSD;
3148 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3149
3150 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3151 if (adapter->ring_feature[RING_F_RSS].mask)
3152 mrqc = IXGBE_MRQC_RSSEN;
3153 } else {
3154 u8 tcs = netdev_get_num_tc(adapter->netdev);
3155
3156 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3157 if (tcs > 4)
3158 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
3159 else if (tcs > 1)
3160 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
3161 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
3162 mrqc = IXGBE_MRQC_VMDQRSS32EN;
3163 else
3164 mrqc = IXGBE_MRQC_VMDQRSS64EN;
3165 } else {
3166 if (tcs > 4)
3167 mrqc = IXGBE_MRQC_RTRSS8TCEN;
3168 else if (tcs > 1)
3169 mrqc = IXGBE_MRQC_RTRSS4TCEN;
3170 else
3171 mrqc = IXGBE_MRQC_RSSEN;
3172 }
3173 }
3174
3175
3176 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
3177 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
3178 IXGBE_MRQC_RSS_FIELD_IPV6 |
3179 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3180
3181 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3182 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3183 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3184 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3185
3186 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3187}
3188
3189
3190
3191
3192
3193
3194static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
3195 struct ixgbe_ring *ring)
3196{
3197 struct ixgbe_hw *hw = &adapter->hw;
3198 u32 rscctrl;
3199 u8 reg_idx = ring->reg_idx;
3200
3201 if (!ring_is_rsc_enabled(ring))
3202 return;
3203
3204 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
3205 rscctrl |= IXGBE_RSCCTL_RSCEN;
3206
3207
3208
3209
3210
3211 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3212 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
3213}
3214
3215#define IXGBE_MAX_RX_DESC_POLL 10
3216static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3217 struct ixgbe_ring *ring)
3218{
3219 struct ixgbe_hw *hw = &adapter->hw;
3220 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3221 u32 rxdctl;
3222 u8 reg_idx = ring->reg_idx;
3223
3224
3225 if (hw->mac.type == ixgbe_mac_82598EB &&
3226 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3227 return;
3228
3229 do {
3230 usleep_range(1000, 2000);
3231 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3232 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3233
3234 if (!wait_loop) {
3235 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3236 "the polling period\n", reg_idx);
3237 }
3238}
3239
3240void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3241 struct ixgbe_ring *ring)
3242{
3243 struct ixgbe_hw *hw = &adapter->hw;
3244 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3245 u32 rxdctl;
3246 u8 reg_idx = ring->reg_idx;
3247
3248 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3249 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3250
3251
3252 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3253
3254 if (hw->mac.type == ixgbe_mac_82598EB &&
3255 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3256 return;
3257
3258
3259 do {
3260 udelay(10);
3261 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3262 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3263
3264 if (!wait_loop) {
3265 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3266 "the polling period\n", reg_idx);
3267 }
3268}
3269
3270void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3271 struct ixgbe_ring *ring)
3272{
3273 struct ixgbe_hw *hw = &adapter->hw;
3274 u64 rdba = ring->dma;
3275 u32 rxdctl;
3276 u8 reg_idx = ring->reg_idx;
3277
3278
3279 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3280 ixgbe_disable_rx_queue(adapter, ring);
3281
3282 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3283 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3284 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3285 ring->count * sizeof(union ixgbe_adv_rx_desc));
3286 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3287 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3288 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
3289
3290 ixgbe_configure_srrctl(adapter, ring);
3291 ixgbe_configure_rscctl(adapter, ring);
3292
3293 if (hw->mac.type == ixgbe_mac_82598EB) {
3294
3295
3296
3297
3298
3299
3300
3301 rxdctl &= ~0x3FFFFF;
3302 rxdctl |= 0x080420;
3303 }
3304
3305
3306 rxdctl |= IXGBE_RXDCTL_ENABLE;
3307 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3308
3309 ixgbe_rx_desc_queue_enable(adapter, ring);
3310 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3311}
3312
3313static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3314{
3315 struct ixgbe_hw *hw = &adapter->hw;
3316 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3317 int p;
3318
3319
3320 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3321 IXGBE_PSRTYPE_UDPHDR |
3322 IXGBE_PSRTYPE_IPV4HDR |
3323 IXGBE_PSRTYPE_L2HDR |
3324 IXGBE_PSRTYPE_IPV6HDR;
3325
3326 if (hw->mac.type == ixgbe_mac_82598EB)
3327 return;
3328
3329 if (rss_i > 3)
3330 psrtype |= 2 << 29;
3331 else if (rss_i > 1)
3332 psrtype |= 1 << 29;
3333
3334 for (p = 0; p < adapter->num_rx_pools; p++)
3335 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
3336 psrtype);
3337}
3338
3339static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3340{
3341 struct ixgbe_hw *hw = &adapter->hw;
3342 u32 reg_offset, vf_shift;
3343 u32 gcr_ext, vmdctl;
3344 int i;
3345
3346 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3347 return;
3348
3349 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3350 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3351 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3352 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3353 vmdctl |= IXGBE_VT_CTL_REPLEN;
3354 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3355
3356 vf_shift = VMDQ_P(0) % 32;
3357 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3358
3359
3360 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3361 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3362 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3363 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3364 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
3365 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3366
3367
3368 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3369
3370
3371
3372
3373
3374 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3375 case IXGBE_82599_VMDQ_8Q_MASK:
3376 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3377 break;
3378 case IXGBE_82599_VMDQ_4Q_MASK:
3379 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3380 break;
3381 default:
3382 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3383 break;
3384 }
3385
3386 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3387
3388
3389
3390 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3391 adapter->num_vfs);
3392
3393 for (i = 0; i < adapter->num_vfs; i++) {
3394 if (!adapter->vfinfo[i].spoofchk_enabled)
3395 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3396 }
3397}
3398
3399static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3400{
3401 struct ixgbe_hw *hw = &adapter->hw;
3402 struct net_device *netdev = adapter->netdev;
3403 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3404 struct ixgbe_ring *rx_ring;
3405 int i;
3406 u32 mhadd, hlreg0;
3407
3408#ifdef IXGBE_FCOE
3409
3410 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3411 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3412 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3413
3414#endif
3415
3416
3417 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3418 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3419
3420 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3421 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3422 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3423 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3424
3425 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3426 }
3427
3428 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3429
3430 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3431 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3432
3433
3434
3435
3436
3437 for (i = 0; i < adapter->num_rx_queues; i++) {
3438 rx_ring = adapter->rx_ring[i];
3439 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3440 set_ring_rsc_enabled(rx_ring);
3441 else
3442 clear_ring_rsc_enabled(rx_ring);
3443 }
3444}
3445
3446static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3447{
3448 struct ixgbe_hw *hw = &adapter->hw;
3449 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3450
3451 switch (hw->mac.type) {
3452 case ixgbe_mac_82598EB:
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3464 break;
3465 case ixgbe_mac_82599EB:
3466 case ixgbe_mac_X540:
3467
3468 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3469 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3470 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3471
3472 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3473 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3474 break;
3475 default:
3476
3477 return;
3478 }
3479
3480 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3481}
3482
3483
3484
3485
3486
3487
3488
3489static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3490{
3491 struct ixgbe_hw *hw = &adapter->hw;
3492 int i;
3493 u32 rxctrl;
3494
3495
3496 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3497 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3498
3499 ixgbe_setup_psrtype(adapter);
3500 ixgbe_setup_rdrxctl(adapter);
3501
3502
3503 ixgbe_setup_mrqc(adapter);
3504
3505
3506 ixgbe_set_rx_buffer_len(adapter);
3507
3508
3509
3510
3511
3512 for (i = 0; i < adapter->num_rx_queues; i++)
3513 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3514
3515
3516 if (hw->mac.type == ixgbe_mac_82598EB)
3517 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3518
3519
3520 rxctrl |= IXGBE_RXCTRL_RXEN;
3521 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3522}
3523
3524static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
3525 __be16 proto, u16 vid)
3526{
3527 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3528 struct ixgbe_hw *hw = &adapter->hw;
3529
3530
3531 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3532 set_bit(vid, adapter->active_vlans);
3533
3534 return 0;
3535}
3536
3537static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
3538 __be16 proto, u16 vid)
3539{
3540 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3541 struct ixgbe_hw *hw = &adapter->hw;
3542
3543
3544 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3545 clear_bit(vid, adapter->active_vlans);
3546
3547 return 0;
3548}
3549
3550
3551
3552
3553
3554static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3555{
3556 struct ixgbe_hw *hw = &adapter->hw;
3557 u32 vlnctrl;
3558
3559 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3560 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3561 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3562}
3563
3564
3565
3566
3567
3568static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3569{
3570 struct ixgbe_hw *hw = &adapter->hw;
3571 u32 vlnctrl;
3572
3573 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3574 vlnctrl |= IXGBE_VLNCTRL_VFE;
3575 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3576 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3577}
3578
3579
3580
3581
3582
3583static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3584{
3585 struct ixgbe_hw *hw = &adapter->hw;
3586 u32 vlnctrl;
3587 int i, j;
3588
3589 switch (hw->mac.type) {
3590 case ixgbe_mac_82598EB:
3591 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3592 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3593 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3594 break;
3595 case ixgbe_mac_82599EB:
3596 case ixgbe_mac_X540:
3597 for (i = 0; i < adapter->num_rx_queues; i++) {
3598 j = adapter->rx_ring[i]->reg_idx;
3599 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3600 vlnctrl &= ~IXGBE_RXDCTL_VME;
3601 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3602 }
3603 break;
3604 default:
3605 break;
3606 }
3607}
3608
3609
3610
3611
3612
3613static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3614{
3615 struct ixgbe_hw *hw = &adapter->hw;
3616 u32 vlnctrl;
3617 int i, j;
3618
3619 switch (hw->mac.type) {
3620 case ixgbe_mac_82598EB:
3621 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3622 vlnctrl |= IXGBE_VLNCTRL_VME;
3623 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3624 break;
3625 case ixgbe_mac_82599EB:
3626 case ixgbe_mac_X540:
3627 for (i = 0; i < adapter->num_rx_queues; i++) {
3628 j = adapter->rx_ring[i]->reg_idx;
3629 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3630 vlnctrl |= IXGBE_RXDCTL_VME;
3631 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3632 }
3633 break;
3634 default:
3635 break;
3636 }
3637}
3638
3639static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3640{
3641 u16 vid;
3642
3643 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
3644
3645 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3646 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
3647}
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3659{
3660 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3661 struct ixgbe_hw *hw = &adapter->hw;
3662 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3663 int count = 0;
3664
3665
3666 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3667 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3668
3669
3670 if (netdev_uc_count(netdev) > rar_entries)
3671 return -ENOMEM;
3672
3673 if (!netdev_uc_empty(netdev)) {
3674 struct netdev_hw_addr *ha;
3675
3676 if (!hw->mac.ops.set_rar)
3677 return -ENOMEM;
3678
3679 netdev_for_each_uc_addr(ha, netdev) {
3680 if (!rar_entries)
3681 break;
3682 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3683 VMDQ_P(0), IXGBE_RAH_AV);
3684 count++;
3685 }
3686 }
3687
3688 for (; rar_entries > 0 ; rar_entries--)
3689 hw->mac.ops.clear_rar(hw, rar_entries);
3690
3691 return count;
3692}
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703void ixgbe_set_rx_mode(struct net_device *netdev)
3704{
3705 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3706 struct ixgbe_hw *hw = &adapter->hw;
3707 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3708 int count;
3709
3710
3711
3712 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3713
3714
3715 fctrl &= ~IXGBE_FCTRL_SBP;
3716 fctrl |= IXGBE_FCTRL_BAM;
3717 fctrl |= IXGBE_FCTRL_DPF;
3718 fctrl |= IXGBE_FCTRL_PMCF;
3719
3720
3721 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3722
3723 if (netdev->flags & IFF_PROMISC) {
3724 hw->addr_ctrl.user_set_promisc = true;
3725 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3726 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3727
3728 ixgbe_vlan_filter_disable(adapter);
3729 } else {
3730 if (netdev->flags & IFF_ALLMULTI) {
3731 fctrl |= IXGBE_FCTRL_MPE;
3732 vmolr |= IXGBE_VMOLR_MPE;
3733 } else {
3734
3735
3736
3737
3738
3739 hw->mac.ops.update_mc_addr_list(hw, netdev);
3740 vmolr |= IXGBE_VMOLR_ROMPE;
3741 }
3742 ixgbe_vlan_filter_enable(adapter);
3743 hw->addr_ctrl.user_set_promisc = false;
3744 }
3745
3746
3747
3748
3749
3750
3751 count = ixgbe_write_uc_addr_list(netdev);
3752 if (count < 0) {
3753 fctrl |= IXGBE_FCTRL_UPE;
3754 vmolr |= IXGBE_VMOLR_ROPE;
3755 }
3756
3757 if (adapter->num_vfs)
3758 ixgbe_restore_vf_multicasts(adapter);
3759
3760 if (hw->mac.type != ixgbe_mac_82598EB) {
3761 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
3762 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3763 IXGBE_VMOLR_ROPE);
3764 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
3765 }
3766
3767
3768 if (adapter->netdev->features & NETIF_F_RXALL) {
3769
3770
3771 fctrl |= (IXGBE_FCTRL_SBP |
3772 IXGBE_FCTRL_BAM |
3773 IXGBE_FCTRL_PMCF);
3774
3775 fctrl &= ~(IXGBE_FCTRL_DPF);
3776
3777 }
3778
3779 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3780
3781 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3782 ixgbe_vlan_strip_enable(adapter);
3783 else
3784 ixgbe_vlan_strip_disable(adapter);
3785}
3786
3787static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3788{
3789 int q_idx;
3790
3791 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3792 ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
3793 napi_enable(&adapter->q_vector[q_idx]->napi);
3794 }
3795}
3796
3797static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3798{
3799 int q_idx;
3800
3801 local_bh_disable();
3802 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3803 napi_disable(&adapter->q_vector[q_idx]->napi);
3804 while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
3805 pr_info("QV %d locked\n", q_idx);
3806 mdelay(1);
3807 }
3808 }
3809 local_bh_enable();
3810}
3811
3812#ifdef CONFIG_IXGBE_DCB
3813
3814
3815
3816
3817
3818
3819
3820
3821static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3822{
3823 struct ixgbe_hw *hw = &adapter->hw;
3824 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3825
3826 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3827 if (hw->mac.type == ixgbe_mac_82598EB)
3828 netif_set_gso_max_size(adapter->netdev, 65536);
3829 return;
3830 }
3831
3832 if (hw->mac.type == ixgbe_mac_82598EB)
3833 netif_set_gso_max_size(adapter->netdev, 32768);
3834
3835#ifdef IXGBE_FCOE
3836 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3837 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3838#endif
3839
3840
3841 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
3842 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3843 DCB_TX_CONFIG);
3844 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3845 DCB_RX_CONFIG);
3846 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3847 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
3848 ixgbe_dcb_hw_ets(&adapter->hw,
3849 adapter->ixgbe_ieee_ets,
3850 max_frame);
3851 ixgbe_dcb_hw_pfc_config(&adapter->hw,
3852 adapter->ixgbe_ieee_pfc->pfc_en,
3853 adapter->ixgbe_ieee_ets->prio_tc);
3854 }
3855
3856
3857 if (hw->mac.type != ixgbe_mac_82598EB) {
3858 u32 msb = 0;
3859 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
3860
3861 while (rss_i) {
3862 msb++;
3863 rss_i >>= 1;
3864 }
3865
3866
3867 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
3868 }
3869}
3870#endif
3871
3872
3873#define IXGBE_ETH_FRAMING 20
3874
3875
3876
3877
3878
3879
3880
3881static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3882{
3883 struct ixgbe_hw *hw = &adapter->hw;
3884 struct net_device *dev = adapter->netdev;
3885 int link, tc, kb, marker;
3886 u32 dv_id, rx_pba;
3887
3888
3889 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
3890
3891#ifdef IXGBE_FCOE
3892
3893 if ((dev->features & NETIF_F_FCOE_MTU) &&
3894 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
3895 (pb == ixgbe_fcoe_get_tc(adapter)))
3896 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3897
3898#endif
3899
3900 switch (hw->mac.type) {
3901 case ixgbe_mac_X540:
3902 dv_id = IXGBE_DV_X540(link, tc);
3903 break;
3904 default:
3905 dv_id = IXGBE_DV(link, tc);
3906 break;
3907 }
3908
3909
3910 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3911 dv_id += IXGBE_B2BT(tc);
3912
3913
3914 kb = IXGBE_BT2KB(dv_id);
3915 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
3916
3917 marker = rx_pba - kb;
3918
3919
3920
3921
3922
3923 if (marker < 0) {
3924 e_warn(drv, "Packet Buffer(%i) can not provide enough"
3925 "headroom to support flow control."
3926 "Decrease MTU or number of traffic classes\n", pb);
3927 marker = tc + 1;
3928 }
3929
3930 return marker;
3931}
3932
3933
3934
3935
3936
3937
3938
3939static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
3940{
3941 struct ixgbe_hw *hw = &adapter->hw;
3942 struct net_device *dev = adapter->netdev;
3943 int tc;
3944 u32 dv_id;
3945
3946
3947 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
3948
3949
3950 switch (hw->mac.type) {
3951 case ixgbe_mac_X540:
3952 dv_id = IXGBE_LOW_DV_X540(tc);
3953 break;
3954 default:
3955 dv_id = IXGBE_LOW_DV(tc);
3956 break;
3957 }
3958
3959
3960 return IXGBE_BT2KB(dv_id);
3961}
3962
3963
3964
3965
3966static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
3967{
3968 struct ixgbe_hw *hw = &adapter->hw;
3969 int num_tc = netdev_get_num_tc(adapter->netdev);
3970 int i;
3971
3972 if (!num_tc)
3973 num_tc = 1;
3974
3975 hw->fc.low_water = ixgbe_lpbthresh(adapter);
3976
3977 for (i = 0; i < num_tc; i++) {
3978 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
3979
3980
3981 if (hw->fc.low_water > hw->fc.high_water[i])
3982 hw->fc.low_water = 0;
3983 }
3984}
3985
3986static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
3987{
3988 struct ixgbe_hw *hw = &adapter->hw;
3989 int hdrm;
3990 u8 tc = netdev_get_num_tc(adapter->netdev);
3991
3992 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3993 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3994 hdrm = 32 << adapter->fdir_pballoc;
3995 else
3996 hdrm = 0;
3997
3998 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
3999 ixgbe_pbthresh_setup(adapter);
4000}
4001
4002static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
4003{
4004 struct ixgbe_hw *hw = &adapter->hw;
4005 struct hlist_node *node2;
4006 struct ixgbe_fdir_filter *filter;
4007
4008 spin_lock(&adapter->fdir_perfect_lock);
4009
4010 if (!hlist_empty(&adapter->fdir_filter_list))
4011 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
4012
4013 hlist_for_each_entry_safe(filter, node2,
4014 &adapter->fdir_filter_list, fdir_node) {
4015 ixgbe_fdir_write_perfect_filter_82599(hw,
4016 &filter->filter,
4017 filter->sw_idx,
4018 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
4019 IXGBE_FDIR_DROP_QUEUE :
4020 adapter->rx_ring[filter->action]->reg_idx);
4021 }
4022
4023 spin_unlock(&adapter->fdir_perfect_lock);
4024}
4025
4026static void ixgbe_configure(struct ixgbe_adapter *adapter)
4027{
4028 struct ixgbe_hw *hw = &adapter->hw;
4029
4030 ixgbe_configure_pb(adapter);
4031#ifdef CONFIG_IXGBE_DCB
4032 ixgbe_configure_dcb(adapter);
4033#endif
4034
4035
4036
4037
4038 ixgbe_configure_virtualization(adapter);
4039
4040 ixgbe_set_rx_mode(adapter->netdev);
4041 ixgbe_restore_vlan(adapter);
4042
4043 switch (hw->mac.type) {
4044 case ixgbe_mac_82599EB:
4045 case ixgbe_mac_X540:
4046 hw->mac.ops.disable_rx_buff(hw);
4047 break;
4048 default:
4049 break;
4050 }
4051
4052 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
4053 ixgbe_init_fdir_signature_82599(&adapter->hw,
4054 adapter->fdir_pballoc);
4055 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
4056 ixgbe_init_fdir_perfect_82599(&adapter->hw,
4057 adapter->fdir_pballoc);
4058 ixgbe_fdir_filter_restore(adapter);
4059 }
4060
4061 switch (hw->mac.type) {
4062 case ixgbe_mac_82599EB:
4063 case ixgbe_mac_X540:
4064 hw->mac.ops.enable_rx_buff(hw);
4065 break;
4066 default:
4067 break;
4068 }
4069
4070#ifdef IXGBE_FCOE
4071
4072 ixgbe_configure_fcoe(adapter);
4073
4074#endif
4075 ixgbe_configure_tx(adapter);
4076 ixgbe_configure_rx(adapter);
4077}
4078
4079static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
4080{
4081 switch (hw->phy.type) {
4082 case ixgbe_phy_sfp_avago:
4083 case ixgbe_phy_sfp_ftl:
4084 case ixgbe_phy_sfp_intel:
4085 case ixgbe_phy_sfp_unknown:
4086 case ixgbe_phy_sfp_passive_tyco:
4087 case ixgbe_phy_sfp_passive_unknown:
4088 case ixgbe_phy_sfp_active_unknown:
4089 case ixgbe_phy_sfp_ftl_active:
4090 return true;
4091 case ixgbe_phy_nl:
4092 if (hw->mac.type == ixgbe_mac_82598EB)
4093 return true;
4094 default:
4095 return false;
4096 }
4097}
4098
4099
4100
4101
4102
4103static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
4104{
4105
4106
4107
4108
4109
4110
4111 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
4112 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
4113
4114 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
4115}
4116
4117
4118
4119
4120
4121
4122
4123static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
4124{
4125 u32 speed;
4126 bool autoneg, link_up = false;
4127 u32 ret = IXGBE_ERR_LINK_SETUP;
4128
4129 if (hw->mac.ops.check_link)
4130 ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
4131
4132 if (ret)
4133 goto link_cfg_out;
4134
4135 speed = hw->phy.autoneg_advertised;
4136 if ((!speed) && (hw->mac.ops.get_link_capabilities))
4137 ret = hw->mac.ops.get_link_capabilities(hw, &speed,
4138 &autoneg);
4139 if (ret)
4140 goto link_cfg_out;
4141
4142 if (hw->mac.ops.setup_link)
4143 ret = hw->mac.ops.setup_link(hw, speed, link_up);
4144link_cfg_out:
4145 return ret;
4146}
4147
4148static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4149{
4150 struct ixgbe_hw *hw = &adapter->hw;
4151 u32 gpie = 0;
4152
4153 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4154 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
4155 IXGBE_GPIE_OCD;
4156 gpie |= IXGBE_GPIE_EIAME;
4157
4158
4159
4160
4161 switch (hw->mac.type) {
4162 case ixgbe_mac_82598EB:
4163 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4164 break;
4165 case ixgbe_mac_82599EB:
4166 case ixgbe_mac_X540:
4167 default:
4168 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4169 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4170 break;
4171 }
4172 } else {
4173
4174
4175 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4176 }
4177
4178
4179
4180
4181 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
4182 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
4183
4184 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
4185 case IXGBE_82599_VMDQ_8Q_MASK:
4186 gpie |= IXGBE_GPIE_VTMODE_16;
4187 break;
4188 case IXGBE_82599_VMDQ_4Q_MASK:
4189 gpie |= IXGBE_GPIE_VTMODE_32;
4190 break;
4191 default:
4192 gpie |= IXGBE_GPIE_VTMODE_64;
4193 break;
4194 }
4195 }
4196
4197
4198 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
4199 switch (adapter->hw.mac.type) {
4200 case ixgbe_mac_82599EB:
4201 gpie |= IXGBE_SDP0_GPIEN;
4202 break;
4203 case ixgbe_mac_X540:
4204 gpie |= IXGBE_EIMS_TS;
4205 break;
4206 default:
4207 break;
4208 }
4209 }
4210
4211
4212 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
4213 gpie |= IXGBE_SDP1_GPIEN;
4214
4215 if (hw->mac.type == ixgbe_mac_82599EB) {
4216 gpie |= IXGBE_SDP1_GPIEN;
4217 gpie |= IXGBE_SDP2_GPIEN;
4218 }
4219
4220 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4221}
4222
4223static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4224{
4225 struct ixgbe_hw *hw = &adapter->hw;
4226 int err;
4227 u32 ctrl_ext;
4228
4229 ixgbe_get_hw_control(adapter);
4230 ixgbe_setup_gpie(adapter);
4231
4232 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4233 ixgbe_configure_msix(adapter);
4234 else
4235 ixgbe_configure_msi_and_legacy(adapter);
4236
4237
4238 if (hw->mac.ops.enable_tx_laser)
4239 hw->mac.ops.enable_tx_laser(hw);
4240
4241 clear_bit(__IXGBE_DOWN, &adapter->state);
4242 ixgbe_napi_enable_all(adapter);
4243
4244 if (ixgbe_is_sfp(hw)) {
4245 ixgbe_sfp_link_config(adapter);
4246 } else {
4247 err = ixgbe_non_sfp_link_config(hw);
4248 if (err)
4249 e_err(probe, "link_config FAILED %d\n", err);
4250 }
4251
4252
4253 IXGBE_READ_REG(hw, IXGBE_EICR);
4254 ixgbe_irq_enable(adapter, true, true);
4255
4256
4257
4258
4259
4260 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4261 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4262 if (esdp & IXGBE_ESDP_SDP1)
4263 e_crit(drv, "Fan has stopped, replace the adapter\n");
4264 }
4265
4266
4267 netif_tx_start_all_queues(adapter->netdev);
4268
4269
4270
4271 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4272 adapter->link_check_timeout = jiffies;
4273 mod_timer(&adapter->service_timer, jiffies);
4274
4275
4276 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4277 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4278 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4279}
4280
4281void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
4282{
4283 WARN_ON(in_interrupt());
4284
4285 adapter->netdev->trans_start = jiffies;
4286
4287 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
4288 usleep_range(1000, 2000);
4289 ixgbe_down(adapter);
4290
4291
4292
4293
4294
4295
4296 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4297 msleep(2000);
4298 ixgbe_up(adapter);
4299 clear_bit(__IXGBE_RESETTING, &adapter->state);
4300}
4301
4302void ixgbe_up(struct ixgbe_adapter *adapter)
4303{
4304
4305 ixgbe_configure(adapter);
4306
4307 ixgbe_up_complete(adapter);
4308}
4309
4310void ixgbe_reset(struct ixgbe_adapter *adapter)
4311{
4312 struct ixgbe_hw *hw = &adapter->hw;
4313 int err;
4314
4315
4316 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4317 usleep_range(1000, 2000);
4318
4319
4320 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4321 IXGBE_FLAG2_SFP_NEEDS_RESET);
4322 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4323
4324 err = hw->mac.ops.init_hw(hw);
4325 switch (err) {
4326 case 0:
4327 case IXGBE_ERR_SFP_NOT_PRESENT:
4328 case IXGBE_ERR_SFP_NOT_SUPPORTED:
4329 break;
4330 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
4331 e_dev_err("master disable timed out\n");
4332 break;
4333 case IXGBE_ERR_EEPROM_VERSION:
4334
4335 e_dev_warn("This device is a pre-production adapter/LOM. "
4336 "Please be aware there may be issues associated with "
4337 "your hardware. If you are experiencing problems "
4338 "please contact your Intel or hardware "
4339 "representative who provided you with this "
4340 "hardware.\n");
4341 break;
4342 default:
4343 e_dev_err("Hardware Error: %d\n", err);
4344 }
4345
4346 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4347
4348
4349 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
4350
4351
4352 if (hw->mac.san_mac_rar_index)
4353 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4354
4355 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
4356 ixgbe_ptp_reset(adapter);
4357}
4358
4359
4360
4361
4362
4363static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4364{
4365 struct device *dev = rx_ring->dev;
4366 unsigned long size;
4367 u16 i;
4368
4369
4370 if (!rx_ring->rx_buffer_info)
4371 return;
4372
4373
4374 for (i = 0; i < rx_ring->count; i++) {
4375 struct ixgbe_rx_buffer *rx_buffer;
4376
4377 rx_buffer = &rx_ring->rx_buffer_info[i];
4378 if (rx_buffer->skb) {
4379 struct sk_buff *skb = rx_buffer->skb;
4380 if (IXGBE_CB(skb)->page_released) {
4381 dma_unmap_page(dev,
4382 IXGBE_CB(skb)->dma,
4383 ixgbe_rx_bufsz(rx_ring),
4384 DMA_FROM_DEVICE);
4385 IXGBE_CB(skb)->page_released = false;
4386 }
4387 dev_kfree_skb(skb);
4388 }
4389 rx_buffer->skb = NULL;
4390 if (rx_buffer->dma)
4391 dma_unmap_page(dev, rx_buffer->dma,
4392 ixgbe_rx_pg_size(rx_ring),
4393 DMA_FROM_DEVICE);
4394 rx_buffer->dma = 0;
4395 if (rx_buffer->page)
4396 __free_pages(rx_buffer->page,
4397 ixgbe_rx_pg_order(rx_ring));
4398 rx_buffer->page = NULL;
4399 }
4400
4401 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4402 memset(rx_ring->rx_buffer_info, 0, size);
4403
4404
4405 memset(rx_ring->desc, 0, rx_ring->size);
4406
4407 rx_ring->next_to_alloc = 0;
4408 rx_ring->next_to_clean = 0;
4409 rx_ring->next_to_use = 0;
4410}
4411
4412
4413
4414
4415
4416static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
4417{
4418 struct ixgbe_tx_buffer *tx_buffer_info;
4419 unsigned long size;
4420 u16 i;
4421
4422
4423 if (!tx_ring->tx_buffer_info)
4424 return;
4425
4426
4427 for (i = 0; i < tx_ring->count; i++) {
4428 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4429 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4430 }
4431
4432 netdev_tx_reset_queue(txring_txq(tx_ring));
4433
4434 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4435 memset(tx_ring->tx_buffer_info, 0, size);
4436
4437
4438 memset(tx_ring->desc, 0, tx_ring->size);
4439
4440 tx_ring->next_to_use = 0;
4441 tx_ring->next_to_clean = 0;
4442}
4443
4444
4445
4446
4447
4448static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
4449{
4450 int i;
4451
4452 for (i = 0; i < adapter->num_rx_queues; i++)
4453 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
4454}
4455
4456
4457
4458
4459
4460static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4461{
4462 int i;
4463
4464 for (i = 0; i < adapter->num_tx_queues; i++)
4465 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
4466}
4467
4468static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4469{
4470 struct hlist_node *node2;
4471 struct ixgbe_fdir_filter *filter;
4472
4473 spin_lock(&adapter->fdir_perfect_lock);
4474
4475 hlist_for_each_entry_safe(filter, node2,
4476 &adapter->fdir_filter_list, fdir_node) {
4477 hlist_del(&filter->fdir_node);
4478 kfree(filter);
4479 }
4480 adapter->fdir_filter_count = 0;
4481
4482 spin_unlock(&adapter->fdir_perfect_lock);
4483}
4484
4485void ixgbe_down(struct ixgbe_adapter *adapter)
4486{
4487 struct net_device *netdev = adapter->netdev;
4488 struct ixgbe_hw *hw = &adapter->hw;
4489 u32 rxctrl;
4490 int i;
4491
4492
4493 set_bit(__IXGBE_DOWN, &adapter->state);
4494
4495
4496 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4497 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4498
4499
4500 for (i = 0; i < adapter->num_rx_queues; i++)
4501
4502 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4503
4504 usleep_range(10000, 20000);
4505
4506 netif_tx_stop_all_queues(netdev);
4507
4508
4509 netif_carrier_off(netdev);
4510 netif_tx_disable(netdev);
4511
4512 ixgbe_irq_disable(adapter);
4513
4514 ixgbe_napi_disable_all(adapter);
4515
4516 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
4517 IXGBE_FLAG2_RESET_REQUESTED);
4518 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4519
4520 del_timer_sync(&adapter->service_timer);
4521
4522 if (adapter->num_vfs) {
4523
4524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
4525
4526
4527 for (i = 0 ; i < adapter->num_vfs; i++)
4528 adapter->vfinfo[i].clear_to_send = false;
4529
4530
4531 ixgbe_ping_all_vfs(adapter);
4532
4533
4534 ixgbe_disable_tx_rx(adapter);
4535 }
4536
4537
4538 for (i = 0; i < adapter->num_tx_queues; i++) {
4539 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
4540 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
4541 }
4542
4543
4544 switch (hw->mac.type) {
4545 case ixgbe_mac_82599EB:
4546 case ixgbe_mac_X540:
4547 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
4548 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4549 ~IXGBE_DMATXCTL_TE));
4550 break;
4551 default:
4552 break;
4553 }
4554
4555 if (!pci_channel_offline(adapter->pdev))
4556 ixgbe_reset(adapter);
4557
4558
4559 if (hw->mac.ops.disable_tx_laser)
4560 hw->mac.ops.disable_tx_laser(hw);
4561
4562 ixgbe_clean_all_tx_rings(adapter);
4563 ixgbe_clean_all_rx_rings(adapter);
4564
4565#ifdef CONFIG_IXGBE_DCA
4566
4567 ixgbe_setup_dca(adapter);
4568#endif
4569}
4570
4571
4572
4573
4574
4575static void ixgbe_tx_timeout(struct net_device *netdev)
4576{
4577 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4578
4579
4580 ixgbe_tx_timeout_reset(adapter);
4581}
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4592{
4593 struct ixgbe_hw *hw = &adapter->hw;
4594 struct pci_dev *pdev = adapter->pdev;
4595 unsigned int rss, fdir;
4596 u32 fwsm;
4597#ifdef CONFIG_IXGBE_DCB
4598 int j;
4599 struct tc_configuration *tc;
4600#endif
4601
4602
4603
4604 hw->vendor_id = pdev->vendor;
4605 hw->device_id = pdev->device;
4606 hw->revision_id = pdev->revision;
4607 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4608 hw->subsystem_device_id = pdev->subsystem_device;
4609
4610
4611 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
4612 adapter->ring_feature[RING_F_RSS].limit = rss;
4613 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4614 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4615 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
4616 adapter->atr_sample_rate = 20;
4617 fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
4618 adapter->ring_feature[RING_F_FDIR].limit = fdir;
4619 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
4620#ifdef CONFIG_IXGBE_DCA
4621 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
4622#endif
4623#ifdef IXGBE_FCOE
4624 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4625 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4626#ifdef CONFIG_IXGBE_DCB
4627
4628 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4629#endif
4630#endif
4631
4632
4633 switch (hw->mac.type) {
4634 case ixgbe_mac_82598EB:
4635 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
4636 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
4637
4638 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4639 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4640
4641 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
4642 adapter->ring_feature[RING_F_FDIR].limit = 0;
4643 adapter->atr_sample_rate = 0;
4644 adapter->fdir_pballoc = 0;
4645#ifdef IXGBE_FCOE
4646 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
4647 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4648#ifdef CONFIG_IXGBE_DCB
4649 adapter->fcoe.up = 0;
4650#endif
4651#endif
4652 break;
4653 case ixgbe_mac_82599EB:
4654 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4655 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4656 break;
4657 case ixgbe_mac_X540:
4658 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4659 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4660 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4661 break;
4662 default:
4663 break;
4664 }
4665
4666#ifdef IXGBE_FCOE
4667
4668 spin_lock_init(&adapter->fcoe.lock);
4669
4670#endif
4671
4672 spin_lock_init(&adapter->fdir_perfect_lock);
4673
4674#ifdef CONFIG_IXGBE_DCB
4675 switch (hw->mac.type) {
4676 case ixgbe_mac_X540:
4677 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
4678 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
4679 break;
4680 default:
4681 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
4682 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
4683 break;
4684 }
4685
4686
4687 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4688 tc = &adapter->dcb_cfg.tc_config[j];
4689 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4690 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4691 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4692 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4693 tc->dcb_pfc = pfc_disabled;
4694 }
4695
4696
4697 tc = &adapter->dcb_cfg.tc_config[0];
4698 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
4699 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
4700
4701 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4702 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4703 adapter->dcb_cfg.pfc_mode_enable = false;
4704 adapter->dcb_set_bitmap = 0x00;
4705 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
4706 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
4707 sizeof(adapter->temp_dcb_cfg));
4708
4709#endif
4710
4711
4712 hw->fc.requested_mode = ixgbe_fc_full;
4713 hw->fc.current_mode = ixgbe_fc_full;
4714 ixgbe_pbthresh_setup(adapter);
4715 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4716 hw->fc.send_xon = true;
4717 hw->fc.disable_fc_autoneg =
4718 (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
4719
4720#ifdef CONFIG_PCI_IOV
4721
4722 if (hw->mac.type != ixgbe_mac_82598EB)
4723 adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
4724
4725#endif
4726
4727 adapter->rx_itr_setting = 1;
4728 adapter->tx_itr_setting = 1;
4729
4730
4731 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4732 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4733
4734
4735 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
4736
4737
4738 if (ixgbe_init_eeprom_params_generic(hw)) {
4739 e_dev_err("EEPROM initialization failed\n");
4740 return -EIO;
4741 }
4742
4743 set_bit(__IXGBE_DOWN, &adapter->state);
4744
4745 return 0;
4746}
4747
4748
4749
4750
4751
4752
4753
4754int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4755{
4756 struct device *dev = tx_ring->dev;
4757 int orig_node = dev_to_node(dev);
4758 int numa_node = -1;
4759 int size;
4760
4761 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4762
4763 if (tx_ring->q_vector)
4764 numa_node = tx_ring->q_vector->numa_node;
4765
4766 tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
4767 if (!tx_ring->tx_buffer_info)
4768 tx_ring->tx_buffer_info = vzalloc(size);
4769 if (!tx_ring->tx_buffer_info)
4770 goto err;
4771
4772
4773 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4774 tx_ring->size = ALIGN(tx_ring->size, 4096);
4775
4776 set_dev_node(dev, numa_node);
4777 tx_ring->desc = dma_alloc_coherent(dev,
4778 tx_ring->size,
4779 &tx_ring->dma,
4780 GFP_KERNEL);
4781 set_dev_node(dev, orig_node);
4782 if (!tx_ring->desc)
4783 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4784 &tx_ring->dma, GFP_KERNEL);
4785 if (!tx_ring->desc)
4786 goto err;
4787
4788 tx_ring->next_to_use = 0;
4789 tx_ring->next_to_clean = 0;
4790 return 0;
4791
4792err:
4793 vfree(tx_ring->tx_buffer_info);
4794 tx_ring->tx_buffer_info = NULL;
4795 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4796 return -ENOMEM;
4797}
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4810{
4811 int i, err = 0;
4812
4813 for (i = 0; i < adapter->num_tx_queues; i++) {
4814 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
4815 if (!err)
4816 continue;
4817
4818 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
4819 goto err_setup_tx;
4820 }
4821
4822 return 0;
4823err_setup_tx:
4824
4825 while (i--)
4826 ixgbe_free_tx_resources(adapter->tx_ring[i]);
4827 return err;
4828}
4829
4830
4831
4832
4833
4834
4835
4836int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
4837{
4838 struct device *dev = rx_ring->dev;
4839 int orig_node = dev_to_node(dev);
4840 int numa_node = -1;
4841 int size;
4842
4843 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4844
4845 if (rx_ring->q_vector)
4846 numa_node = rx_ring->q_vector->numa_node;
4847
4848 rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
4849 if (!rx_ring->rx_buffer_info)
4850 rx_ring->rx_buffer_info = vzalloc(size);
4851 if (!rx_ring->rx_buffer_info)
4852 goto err;
4853
4854
4855 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4856 rx_ring->size = ALIGN(rx_ring->size, 4096);
4857
4858 set_dev_node(dev, numa_node);
4859 rx_ring->desc = dma_alloc_coherent(dev,
4860 rx_ring->size,
4861 &rx_ring->dma,
4862 GFP_KERNEL);
4863 set_dev_node(dev, orig_node);
4864 if (!rx_ring->desc)
4865 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4866 &rx_ring->dma, GFP_KERNEL);
4867 if (!rx_ring->desc)
4868 goto err;
4869
4870 rx_ring->next_to_clean = 0;
4871 rx_ring->next_to_use = 0;
4872
4873 return 0;
4874err:
4875 vfree(rx_ring->rx_buffer_info);
4876 rx_ring->rx_buffer_info = NULL;
4877 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4878 return -ENOMEM;
4879}
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4892{
4893 int i, err = 0;
4894
4895 for (i = 0; i < adapter->num_rx_queues; i++) {
4896 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
4897 if (!err)
4898 continue;
4899
4900 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
4901 goto err_setup_rx;
4902 }
4903
4904#ifdef IXGBE_FCOE
4905 err = ixgbe_setup_fcoe_ddp_resources(adapter);
4906 if (!err)
4907#endif
4908 return 0;
4909err_setup_rx:
4910
4911 while (i--)
4912 ixgbe_free_rx_resources(adapter->rx_ring[i]);
4913 return err;
4914}
4915
4916
4917
4918
4919
4920
4921
4922void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
4923{
4924 ixgbe_clean_tx_ring(tx_ring);
4925
4926 vfree(tx_ring->tx_buffer_info);
4927 tx_ring->tx_buffer_info = NULL;
4928
4929
4930 if (!tx_ring->desc)
4931 return;
4932
4933 dma_free_coherent(tx_ring->dev, tx_ring->size,
4934 tx_ring->desc, tx_ring->dma);
4935
4936 tx_ring->desc = NULL;
4937}
4938
4939
4940
4941
4942
4943
4944
4945static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4946{
4947 int i;
4948
4949 for (i = 0; i < adapter->num_tx_queues; i++)
4950 if (adapter->tx_ring[i]->desc)
4951 ixgbe_free_tx_resources(adapter->tx_ring[i]);
4952}
4953
4954
4955
4956
4957
4958
4959
4960void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
4961{
4962 ixgbe_clean_rx_ring(rx_ring);
4963
4964 vfree(rx_ring->rx_buffer_info);
4965 rx_ring->rx_buffer_info = NULL;
4966
4967
4968 if (!rx_ring->desc)
4969 return;
4970
4971 dma_free_coherent(rx_ring->dev, rx_ring->size,
4972 rx_ring->desc, rx_ring->dma);
4973
4974 rx_ring->desc = NULL;
4975}
4976
4977
4978
4979
4980
4981
4982
4983static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4984{
4985 int i;
4986
4987#ifdef IXGBE_FCOE
4988 ixgbe_free_fcoe_ddp_resources(adapter);
4989
4990#endif
4991 for (i = 0; i < adapter->num_rx_queues; i++)
4992 if (adapter->rx_ring[i]->desc)
4993 ixgbe_free_rx_resources(adapter->rx_ring[i]);
4994}
4995
4996
4997
4998
4999
5000
5001
5002
5003static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5004{
5005 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5006 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5007
5008
5009 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5010 return -EINVAL;
5011
5012
5013
5014
5015
5016
5017 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
5018 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
5019 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
5020 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
5021
5022 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5023
5024
5025 netdev->mtu = new_mtu;
5026
5027 if (netif_running(netdev))
5028 ixgbe_reinit_locked(adapter);
5029
5030 return 0;
5031}
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045static int ixgbe_open(struct net_device *netdev)
5046{
5047 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5048 int err;
5049
5050
5051 if (test_bit(__IXGBE_TESTING, &adapter->state))
5052 return -EBUSY;
5053
5054 netif_carrier_off(netdev);
5055
5056
5057 err = ixgbe_setup_all_tx_resources(adapter);
5058 if (err)
5059 goto err_setup_tx;
5060
5061
5062 err = ixgbe_setup_all_rx_resources(adapter);
5063 if (err)
5064 goto err_setup_rx;
5065
5066 ixgbe_configure(adapter);
5067
5068 err = ixgbe_request_irq(adapter);
5069 if (err)
5070 goto err_req_irq;
5071
5072
5073 err = netif_set_real_num_tx_queues(netdev,
5074 adapter->num_rx_pools > 1 ? 1 :
5075 adapter->num_tx_queues);
5076 if (err)
5077 goto err_set_queues;
5078
5079
5080 err = netif_set_real_num_rx_queues(netdev,
5081 adapter->num_rx_pools > 1 ? 1 :
5082 adapter->num_rx_queues);
5083 if (err)
5084 goto err_set_queues;
5085
5086 ixgbe_ptp_init(adapter);
5087
5088 ixgbe_up_complete(adapter);
5089
5090 return 0;
5091
5092err_set_queues:
5093 ixgbe_free_irq(adapter);
5094err_req_irq:
5095 ixgbe_free_all_rx_resources(adapter);
5096err_setup_rx:
5097 ixgbe_free_all_tx_resources(adapter);
5098err_setup_tx:
5099 ixgbe_reset(adapter);
5100
5101 return err;
5102}
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115static int ixgbe_close(struct net_device *netdev)
5116{
5117 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5118
5119 ixgbe_ptp_stop(adapter);
5120
5121 ixgbe_down(adapter);
5122 ixgbe_free_irq(adapter);
5123
5124 ixgbe_fdir_filter_exit(adapter);
5125
5126 ixgbe_free_all_tx_resources(adapter);
5127 ixgbe_free_all_rx_resources(adapter);
5128
5129 ixgbe_release_hw_control(adapter);
5130
5131 return 0;
5132}
5133
5134#ifdef CONFIG_PM
5135static int ixgbe_resume(struct pci_dev *pdev)
5136{
5137 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5138 struct net_device *netdev = adapter->netdev;
5139 u32 err;
5140
5141 pci_set_power_state(pdev, PCI_D0);
5142 pci_restore_state(pdev);
5143
5144
5145
5146
5147 pci_save_state(pdev);
5148
5149 err = pci_enable_device_mem(pdev);
5150 if (err) {
5151 e_dev_err("Cannot enable PCI device from suspend\n");
5152 return err;
5153 }
5154 pci_set_master(pdev);
5155
5156 pci_wake_from_d3(pdev, false);
5157
5158 ixgbe_reset(adapter);
5159
5160 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5161
5162 rtnl_lock();
5163 err = ixgbe_init_interrupt_scheme(adapter);
5164 if (!err && netif_running(netdev))
5165 err = ixgbe_open(netdev);
5166
5167 rtnl_unlock();
5168
5169 if (err)
5170 return err;
5171
5172 netif_device_attach(netdev);
5173
5174 return 0;
5175}
5176#endif
5177
5178static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5179{
5180 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5181 struct net_device *netdev = adapter->netdev;
5182 struct ixgbe_hw *hw = &adapter->hw;
5183 u32 ctrl, fctrl;
5184 u32 wufc = adapter->wol;
5185#ifdef CONFIG_PM
5186 int retval = 0;
5187#endif
5188
5189 netif_device_detach(netdev);
5190
5191 rtnl_lock();
5192 if (netif_running(netdev)) {
5193 ixgbe_down(adapter);
5194 ixgbe_free_irq(adapter);
5195 ixgbe_free_all_tx_resources(adapter);
5196 ixgbe_free_all_rx_resources(adapter);
5197 }
5198 rtnl_unlock();
5199
5200 ixgbe_clear_interrupt_scheme(adapter);
5201
5202#ifdef CONFIG_PM
5203 retval = pci_save_state(pdev);
5204 if (retval)
5205 return retval;
5206
5207#endif
5208 if (wufc) {
5209 ixgbe_set_rx_mode(netdev);
5210
5211
5212 if (hw->mac.ops.enable_tx_laser)
5213 hw->mac.ops.enable_tx_laser(hw);
5214
5215
5216 if (wufc & IXGBE_WUFC_MC) {
5217 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5218 fctrl |= IXGBE_FCTRL_MPE;
5219 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5220 }
5221
5222 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5223 ctrl |= IXGBE_CTRL_GIO_DIS;
5224 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5225
5226 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5227 } else {
5228 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5229 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5230 }
5231
5232 switch (hw->mac.type) {
5233 case ixgbe_mac_82598EB:
5234 pci_wake_from_d3(pdev, false);
5235 break;
5236 case ixgbe_mac_82599EB:
5237 case ixgbe_mac_X540:
5238 pci_wake_from_d3(pdev, !!wufc);
5239 break;
5240 default:
5241 break;
5242 }
5243
5244 *enable_wake = !!wufc;
5245
5246 ixgbe_release_hw_control(adapter);
5247
5248 pci_disable_device(pdev);
5249
5250 return 0;
5251}
5252
5253#ifdef CONFIG_PM
5254static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5255{
5256 int retval;
5257 bool wake;
5258
5259 retval = __ixgbe_shutdown(pdev, &wake);
5260 if (retval)
5261 return retval;
5262
5263 if (wake) {
5264 pci_prepare_to_sleep(pdev);
5265 } else {
5266 pci_wake_from_d3(pdev, false);
5267 pci_set_power_state(pdev, PCI_D3hot);
5268 }
5269
5270 return 0;
5271}
5272#endif
5273
5274static void ixgbe_shutdown(struct pci_dev *pdev)
5275{
5276 bool wake;
5277
5278 __ixgbe_shutdown(pdev, &wake);
5279
5280 if (system_state == SYSTEM_POWER_OFF) {
5281 pci_wake_from_d3(pdev, wake);
5282 pci_set_power_state(pdev, PCI_D3hot);
5283 }
5284}
5285
5286
5287
5288
5289
5290void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5291{
5292 struct net_device *netdev = adapter->netdev;
5293 struct ixgbe_hw *hw = &adapter->hw;
5294 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5295 u64 total_mpc = 0;
5296 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5297 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5298 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5299 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
5300
5301 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5302 test_bit(__IXGBE_RESETTING, &adapter->state))
5303 return;
5304
5305 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5306 u64 rsc_count = 0;
5307 u64 rsc_flush = 0;
5308 for (i = 0; i < adapter->num_rx_queues; i++) {
5309 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5310 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5311 }
5312 adapter->rsc_total_count = rsc_count;
5313 adapter->rsc_total_flush = rsc_flush;
5314 }
5315
5316 for (i = 0; i < adapter->num_rx_queues; i++) {
5317 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5318 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5319 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5320 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5321 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
5322 bytes += rx_ring->stats.bytes;
5323 packets += rx_ring->stats.packets;
5324 }
5325 adapter->non_eop_descs = non_eop_descs;
5326 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5327 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5328 adapter->hw_csum_rx_error = hw_csum_rx_error;
5329 netdev->stats.rx_bytes = bytes;
5330 netdev->stats.rx_packets = packets;
5331
5332 bytes = 0;
5333 packets = 0;
5334
5335 for (i = 0; i < adapter->num_tx_queues; i++) {
5336 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5337 restart_queue += tx_ring->tx_stats.restart_queue;
5338 tx_busy += tx_ring->tx_stats.tx_busy;
5339 bytes += tx_ring->stats.bytes;
5340 packets += tx_ring->stats.packets;
5341 }
5342 adapter->restart_queue = restart_queue;
5343 adapter->tx_busy = tx_busy;
5344 netdev->stats.tx_bytes = bytes;
5345 netdev->stats.tx_packets = packets;
5346
5347 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5348
5349
5350 for (i = 0; i < 8; i++) {
5351
5352 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5353 missed_rx += mpc;
5354 hwstats->mpc[i] += mpc;
5355 total_mpc += hwstats->mpc[i];
5356 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5357 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5358 switch (hw->mac.type) {
5359 case ixgbe_mac_82598EB:
5360 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5361 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5362 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5363 hwstats->pxonrxc[i] +=
5364 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5365 break;
5366 case ixgbe_mac_82599EB:
5367 case ixgbe_mac_X540:
5368 hwstats->pxonrxc[i] +=
5369 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5370 break;
5371 default:
5372 break;
5373 }
5374 }
5375
5376
5377 for (i = 0; i < 16; i++) {
5378 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5379 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5380 if ((hw->mac.type == ixgbe_mac_82599EB) ||
5381 (hw->mac.type == ixgbe_mac_X540)) {
5382 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
5383 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
5384 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
5385 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
5386 }
5387 }
5388
5389 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5390
5391 hwstats->gprc -= missed_rx;
5392
5393 ixgbe_update_xoff_received(adapter);
5394
5395
5396 switch (hw->mac.type) {
5397 case ixgbe_mac_82598EB:
5398 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5399 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5400 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5401 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5402 break;
5403 case ixgbe_mac_X540:
5404
5405 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5406 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5407 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5408 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5409 case ixgbe_mac_82599EB:
5410 for (i = 0; i < 16; i++)
5411 adapter->hw_rx_no_dma_resources +=
5412 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5413 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5414 IXGBE_READ_REG(hw, IXGBE_GORCH);
5415 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5416 IXGBE_READ_REG(hw, IXGBE_GOTCH);
5417 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5418 IXGBE_READ_REG(hw, IXGBE_TORH);
5419 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5420 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5421 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5422#ifdef IXGBE_FCOE
5423 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5424 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5425 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5426 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5427 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5428 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5429
5430 if (adapter->fcoe.ddp_pool) {
5431 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5432 struct ixgbe_fcoe_ddp_pool *ddp_pool;
5433 unsigned int cpu;
5434 u64 noddp = 0, noddp_ext_buff = 0;
5435 for_each_possible_cpu(cpu) {
5436 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
5437 noddp += ddp_pool->noddp;
5438 noddp_ext_buff += ddp_pool->noddp_ext_buff;
5439 }
5440 hwstats->fcoe_noddp = noddp;
5441 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
5442 }
5443#endif
5444 break;
5445 default:
5446 break;
5447 }
5448 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5449 hwstats->bprc += bprc;
5450 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5451 if (hw->mac.type == ixgbe_mac_82598EB)
5452 hwstats->mprc -= bprc;
5453 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5454 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5455 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5456 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5457 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5458 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5459 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5460 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5461 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5462 hwstats->lxontxc += lxon;
5463 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5464 hwstats->lxofftxc += lxoff;
5465 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5466 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5467
5468
5469
5470 xon_off_tot = lxon + lxoff;
5471 hwstats->gptc -= xon_off_tot;
5472 hwstats->mptc -= xon_off_tot;
5473 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5474 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5475 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5476 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5477 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5478 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5479 hwstats->ptc64 -= xon_off_tot;
5480 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5481 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5482 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5483 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5484 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5485 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5486
5487
5488 netdev->stats.multicast = hwstats->mprc;
5489
5490
5491 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5492 netdev->stats.rx_dropped = 0;
5493 netdev->stats.rx_length_errors = hwstats->rlec;
5494 netdev->stats.rx_crc_errors = hwstats->crcerrs;
5495 netdev->stats.rx_missed_errors = total_mpc;
5496}
5497
5498
5499
5500
5501
5502static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5503{
5504 struct ixgbe_hw *hw = &adapter->hw;
5505 int i;
5506
5507 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5508 return;
5509
5510 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5511
5512
5513 if (test_bit(__IXGBE_DOWN, &adapter->state))
5514 return;
5515
5516
5517 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5518 return;
5519
5520 adapter->fdir_overflow++;
5521
5522 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5523 for (i = 0; i < adapter->num_tx_queues; i++)
5524 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5525 &(adapter->tx_ring[i]->state));
5526
5527 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5528 } else {
5529 e_err(probe, "failed to finish FDIR re-initialization, "
5530 "ignored adding FDIR ATR filters\n");
5531 }
5532}
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5544{
5545 struct ixgbe_hw *hw = &adapter->hw;
5546 u64 eics = 0;
5547 int i;
5548
5549
5550 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5551 test_bit(__IXGBE_RESETTING, &adapter->state))
5552 return;
5553
5554
5555 if (netif_carrier_ok(adapter->netdev)) {
5556 for (i = 0; i < adapter->num_tx_queues; i++)
5557 set_check_for_tx_hang(adapter->tx_ring[i]);
5558 }
5559
5560 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5561
5562
5563
5564
5565
5566 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5567 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5568 } else {
5569
5570 for (i = 0; i < adapter->num_q_vectors; i++) {
5571 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5572 if (qv->rx.ring || qv->tx.ring)
5573 eics |= ((u64)1 << i);
5574 }
5575 }
5576
5577
5578 ixgbe_irq_rearm_queues(adapter, eics);
5579
5580}
5581
5582
5583
5584
5585
5586
5587static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5588{
5589 struct ixgbe_hw *hw = &adapter->hw;
5590 u32 link_speed = adapter->link_speed;
5591 bool link_up = adapter->link_up;
5592 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
5593
5594 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5595 return;
5596
5597 if (hw->mac.ops.check_link) {
5598 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5599 } else {
5600
5601 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5602 link_up = true;
5603 }
5604
5605 if (adapter->ixgbe_ieee_pfc)
5606 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5607
5608 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
5609 hw->mac.ops.fc_enable(hw);
5610 ixgbe_set_rx_drop_en(adapter);
5611 }
5612
5613 if (link_up ||
5614 time_after(jiffies, (adapter->link_check_timeout +
5615 IXGBE_TRY_LINK_TIMEOUT))) {
5616 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5617 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5618 IXGBE_WRITE_FLUSH(hw);
5619 }
5620
5621 adapter->link_up = link_up;
5622 adapter->link_speed = link_speed;
5623}
5624
5625static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
5626{
5627#ifdef CONFIG_IXGBE_DCB
5628 struct net_device *netdev = adapter->netdev;
5629 struct dcb_app app = {
5630 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
5631 .protocol = 0,
5632 };
5633 u8 up = 0;
5634
5635 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
5636 up = dcb_ieee_getapp_mask(netdev, &app);
5637
5638 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
5639#endif
5640}
5641
5642
5643
5644
5645
5646
5647static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5648{
5649 struct net_device *netdev = adapter->netdev;
5650 struct ixgbe_hw *hw = &adapter->hw;
5651 u32 link_speed = adapter->link_speed;
5652 bool flow_rx, flow_tx;
5653
5654
5655 if (netif_carrier_ok(netdev))
5656 return;
5657
5658 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
5659
5660 switch (hw->mac.type) {
5661 case ixgbe_mac_82598EB: {
5662 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5663 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5664 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5665 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5666 }
5667 break;
5668 case ixgbe_mac_X540:
5669 case ixgbe_mac_82599EB: {
5670 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5671 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5672 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5673 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5674 }
5675 break;
5676 default:
5677 flow_tx = false;
5678 flow_rx = false;
5679 break;
5680 }
5681
5682 adapter->last_rx_ptp_check = jiffies;
5683
5684 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5685 ixgbe_ptp_start_cyclecounter(adapter);
5686
5687 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5688 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5689 "10 Gbps" :
5690 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5691 "1 Gbps" :
5692 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
5693 "100 Mbps" :
5694 "unknown speed"))),
5695 ((flow_rx && flow_tx) ? "RX/TX" :
5696 (flow_rx ? "RX" :
5697 (flow_tx ? "TX" : "None"))));
5698
5699 netif_carrier_on(netdev);
5700 ixgbe_check_vf_rate_limit(adapter);
5701
5702
5703 ixgbe_update_default_up(adapter);
5704
5705
5706 ixgbe_ping_all_vfs(adapter);
5707}
5708
5709
5710
5711
5712
5713
5714static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5715{
5716 struct net_device *netdev = adapter->netdev;
5717 struct ixgbe_hw *hw = &adapter->hw;
5718
5719 adapter->link_up = false;
5720 adapter->link_speed = 0;
5721
5722
5723 if (!netif_carrier_ok(netdev))
5724 return;
5725
5726
5727 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5728 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5729
5730 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5731 ixgbe_ptp_start_cyclecounter(adapter);
5732
5733 e_info(drv, "NIC Link is Down\n");
5734 netif_carrier_off(netdev);
5735
5736
5737 ixgbe_ping_all_vfs(adapter);
5738}
5739
5740
5741
5742
5743
5744static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
5745{
5746 int i;
5747 int some_tx_pending = 0;
5748
5749 if (!netif_carrier_ok(adapter->netdev)) {
5750 for (i = 0; i < adapter->num_tx_queues; i++) {
5751 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5752 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5753 some_tx_pending = 1;
5754 break;
5755 }
5756 }
5757
5758 if (some_tx_pending) {
5759
5760
5761
5762
5763
5764 e_warn(drv, "initiating reset to clear Tx work after link loss\n");
5765 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
5766 }
5767 }
5768}
5769
5770static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5771{
5772 u32 ssvpc;
5773
5774
5775 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
5776 adapter->num_vfs == 0)
5777 return;
5778
5779 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
5780
5781
5782
5783
5784
5785 if (!ssvpc)
5786 return;
5787
5788 e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
5789}
5790
5791
5792
5793
5794
5795static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
5796{
5797
5798 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5799 test_bit(__IXGBE_RESETTING, &adapter->state))
5800 return;
5801
5802 ixgbe_watchdog_update_link(adapter);
5803
5804 if (adapter->link_up)
5805 ixgbe_watchdog_link_is_up(adapter);
5806 else
5807 ixgbe_watchdog_link_is_down(adapter);
5808
5809 ixgbe_spoof_check(adapter);
5810 ixgbe_update_stats(adapter);
5811
5812 ixgbe_watchdog_flush_tx(adapter);
5813}
5814
5815
5816
5817
5818
5819static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5820{
5821 struct ixgbe_hw *hw = &adapter->hw;
5822 s32 err;
5823
5824
5825 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
5826 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5827 return;
5828
5829
5830 if (test_bit(__IXGBE_READ_I2C, &adapter->state))
5831 return;
5832
5833
5834 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5835 return;
5836
5837 err = hw->phy.ops.identify_sfp(hw);
5838 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5839 goto sfp_out;
5840
5841 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
5842
5843
5844 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5845 }
5846
5847
5848 if (err)
5849 goto sfp_out;
5850
5851
5852 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5853 goto sfp_out;
5854
5855 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
5856
5857
5858
5859
5860
5861
5862 if (hw->mac.type == ixgbe_mac_82598EB)
5863 err = hw->phy.ops.reset(hw);
5864 else
5865 err = hw->mac.ops.setup_sfp(hw);
5866
5867 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5868 goto sfp_out;
5869
5870 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
5871 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
5872
5873sfp_out:
5874 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5875
5876 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
5877 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
5878 e_dev_err("failed to initialize because an unsupported "
5879 "SFP+ module type was detected.\n");
5880 e_dev_err("Reload the driver after installing a "
5881 "supported module.\n");
5882 unregister_netdev(adapter->netdev);
5883 }
5884}
5885
5886
5887
5888
5889
5890static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5891{
5892 struct ixgbe_hw *hw = &adapter->hw;
5893 u32 speed;
5894 bool autoneg = false;
5895
5896 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
5897 return;
5898
5899
5900 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5901 return;
5902
5903 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5904
5905 speed = hw->phy.autoneg_advertised;
5906 if ((!speed) && (hw->mac.ops.get_link_capabilities))
5907 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
5908 if (hw->mac.ops.setup_link)
5909 hw->mac.ops.setup_link(hw, speed, true);
5910
5911 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5912 adapter->link_check_timeout = jiffies;
5913 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5914}
5915
5916#ifdef CONFIG_PCI_IOV
5917static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
5918{
5919 int vf;
5920 struct ixgbe_hw *hw = &adapter->hw;
5921 struct net_device *netdev = adapter->netdev;
5922 u32 gpc;
5923 u32 ciaa, ciad;
5924
5925 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
5926 if (gpc)
5927 return;
5928
5929
5930
5931
5932
5933
5934
5935 for (vf = 0; vf < adapter->num_vfs; vf++) {
5936 ciaa = (vf << 16) | 0x80000000;
5937
5938 ciaa |= PCI_COMMAND;
5939 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5940 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
5941 ciaa &= 0x7FFFFFFF;
5942
5943 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5944
5945 ciad >>= 16;
5946 if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
5947 netdev_err(netdev, "VF %d Hung DMA\n", vf);
5948
5949 ciaa = (vf << 16) | 0x80000000;
5950 ciaa |= 0xA8;
5951 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5952 ciad = 0x00008000;
5953 IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
5954 ciaa &= 0x7FFFFFFF;
5955 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5956 }
5957 }
5958}
5959
5960#endif
5961
5962
5963
5964
5965static void ixgbe_service_timer(unsigned long data)
5966{
5967 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5968 unsigned long next_event_offset;
5969 bool ready = true;
5970
5971
5972 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
5973 next_event_offset = HZ / 10;
5974 else
5975 next_event_offset = HZ * 2;
5976
5977#ifdef CONFIG_PCI_IOV
5978
5979
5980
5981
5982 if (!adapter->num_vfs ||
5983 (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5984 goto normal_timer_service;
5985
5986
5987 ixgbe_check_for_bad_vf(adapter);
5988 next_event_offset = HZ / 50;
5989 adapter->timer_event_accumulator++;
5990
5991 if (adapter->timer_event_accumulator >= 100)
5992 adapter->timer_event_accumulator = 0;
5993 else
5994 ready = false;
5995
5996normal_timer_service:
5997#endif
5998
5999 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6000
6001 if (ready)
6002 ixgbe_service_event_schedule(adapter);
6003}
6004
6005static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6006{
6007 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6008 return;
6009
6010 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6011
6012
6013 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6014 test_bit(__IXGBE_RESETTING, &adapter->state))
6015 return;
6016
6017 ixgbe_dump(adapter);
6018 netdev_err(adapter->netdev, "Reset adapter\n");
6019 adapter->tx_timeout_count++;
6020
6021 ixgbe_reinit_locked(adapter);
6022}
6023
6024
6025
6026
6027
6028static void ixgbe_service_task(struct work_struct *work)
6029{
6030 struct ixgbe_adapter *adapter = container_of(work,
6031 struct ixgbe_adapter,
6032 service_task);
6033 ixgbe_reset_subtask(adapter);
6034 ixgbe_sfp_detection_subtask(adapter);
6035 ixgbe_sfp_link_config_subtask(adapter);
6036 ixgbe_check_overtemp_subtask(adapter);
6037 ixgbe_watchdog_subtask(adapter);
6038 ixgbe_fdir_reinit_subtask(adapter);
6039 ixgbe_check_hang_subtask(adapter);
6040
6041 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
6042 ixgbe_ptp_overflow_check(adapter);
6043 ixgbe_ptp_rx_hang(adapter);
6044 }
6045
6046 ixgbe_service_event_complete(adapter);
6047}
6048
6049static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6050 struct ixgbe_tx_buffer *first,
6051 u8 *hdr_len)
6052{
6053 struct sk_buff *skb = first->skb;
6054 u32 vlan_macip_lens, type_tucmd;
6055 u32 mss_l4len_idx, l4len;
6056
6057 if (skb->ip_summed != CHECKSUM_PARTIAL)
6058 return 0;
6059
6060 if (!skb_is_gso(skb))
6061 return 0;
6062
6063 if (skb_header_cloned(skb)) {
6064 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6065 if (err)
6066 return err;
6067 }
6068
6069
6070 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6071
6072 if (first->protocol == __constant_htons(ETH_P_IP)) {
6073 struct iphdr *iph = ip_hdr(skb);
6074 iph->tot_len = 0;
6075 iph->check = 0;
6076 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6077 iph->daddr, 0,
6078 IPPROTO_TCP,
6079 0);
6080 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6081 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6082 IXGBE_TX_FLAGS_CSUM |
6083 IXGBE_TX_FLAGS_IPV4;
6084 } else if (skb_is_gso_v6(skb)) {
6085 ipv6_hdr(skb)->payload_len = 0;
6086 tcp_hdr(skb)->check =
6087 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
6088 &ipv6_hdr(skb)->daddr,
6089 0, IPPROTO_TCP, 0);
6090 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
6091 IXGBE_TX_FLAGS_CSUM;
6092 }
6093
6094
6095 l4len = tcp_hdrlen(skb);
6096 *hdr_len = skb_transport_offset(skb) + l4len;
6097
6098
6099 first->gso_segs = skb_shinfo(skb)->gso_segs;
6100 first->bytecount += (first->gso_segs - 1) * *hdr_len;
6101
6102
6103 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
6104 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
6105
6106
6107 vlan_macip_lens = skb_network_header_len(skb);
6108 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6109 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6110
6111 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
6112 mss_l4len_idx);
6113
6114 return 1;
6115}
6116
6117static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6118 struct ixgbe_tx_buffer *first)
6119{
6120 struct sk_buff *skb = first->skb;
6121 u32 vlan_macip_lens = 0;
6122 u32 mss_l4len_idx = 0;
6123 u32 type_tucmd = 0;
6124
6125 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6126 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
6127 !(first->tx_flags & IXGBE_TX_FLAGS_CC))
6128 return;
6129 } else {
6130 u8 l4_hdr = 0;
6131 switch (first->protocol) {
6132 case __constant_htons(ETH_P_IP):
6133 vlan_macip_lens |= skb_network_header_len(skb);
6134 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6135 l4_hdr = ip_hdr(skb)->protocol;
6136 break;
6137 case __constant_htons(ETH_P_IPV6):
6138 vlan_macip_lens |= skb_network_header_len(skb);
6139 l4_hdr = ipv6_hdr(skb)->nexthdr;
6140 break;
6141 default:
6142 if (unlikely(net_ratelimit())) {
6143 dev_warn(tx_ring->dev,
6144 "partial checksum but proto=%x!\n",
6145 first->protocol);
6146 }
6147 break;
6148 }
6149
6150 switch (l4_hdr) {
6151 case IPPROTO_TCP:
6152 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6153 mss_l4len_idx = tcp_hdrlen(skb) <<
6154 IXGBE_ADVTXD_L4LEN_SHIFT;
6155 break;
6156 case IPPROTO_SCTP:
6157 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6158 mss_l4len_idx = sizeof(struct sctphdr) <<
6159 IXGBE_ADVTXD_L4LEN_SHIFT;
6160 break;
6161 case IPPROTO_UDP:
6162 mss_l4len_idx = sizeof(struct udphdr) <<
6163 IXGBE_ADVTXD_L4LEN_SHIFT;
6164 break;
6165 default:
6166 if (unlikely(net_ratelimit())) {
6167 dev_warn(tx_ring->dev,
6168 "partial checksum but l4 proto=%x!\n",
6169 l4_hdr);
6170 }
6171 break;
6172 }
6173
6174
6175 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
6176 }
6177
6178
6179 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
6180 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
6181
6182 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
6183 type_tucmd, mss_l4len_idx);
6184}
6185
6186#define IXGBE_SET_FLAG(_input, _flag, _result) \
6187 ((_flag <= _result) ? \
6188 ((u32)(_input & _flag) * (_result / _flag)) : \
6189 ((u32)(_input & _flag) / (_flag / _result)))
6190
6191static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6192{
6193
6194 u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
6195 IXGBE_ADVTXD_DCMD_DEXT |
6196 IXGBE_ADVTXD_DCMD_IFCS;
6197
6198
6199 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
6200 IXGBE_ADVTXD_DCMD_VLE);
6201
6202
6203 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
6204 IXGBE_ADVTXD_DCMD_TSE);
6205
6206
6207 cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
6208 IXGBE_ADVTXD_MAC_TSTAMP);
6209
6210
6211 cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
6212
6213 return cmd_type;
6214}
6215
6216static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
6217 u32 tx_flags, unsigned int paylen)
6218{
6219 u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
6220
6221
6222 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6223 IXGBE_TX_FLAGS_CSUM,
6224 IXGBE_ADVTXD_POPTS_TXSM);
6225
6226
6227 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6228 IXGBE_TX_FLAGS_IPV4,
6229 IXGBE_ADVTXD_POPTS_IXSM);
6230
6231
6232
6233
6234
6235 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
6236 IXGBE_TX_FLAGS_CC,
6237 IXGBE_ADVTXD_CC);
6238
6239 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6240}
6241
6242#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6243 IXGBE_TXD_CMD_RS)
6244
6245static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6246 struct ixgbe_tx_buffer *first,
6247 const u8 hdr_len)
6248{
6249 struct sk_buff *skb = first->skb;
6250 struct ixgbe_tx_buffer *tx_buffer;
6251 union ixgbe_adv_tx_desc *tx_desc;
6252 struct skb_frag_struct *frag;
6253 dma_addr_t dma;
6254 unsigned int data_len, size;
6255 u32 tx_flags = first->tx_flags;
6256 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
6257 u16 i = tx_ring->next_to_use;
6258
6259 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6260
6261 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
6262
6263 size = skb_headlen(skb);
6264 data_len = skb->data_len;
6265
6266#ifdef IXGBE_FCOE
6267 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6268 if (data_len < sizeof(struct fcoe_crc_eof)) {
6269 size -= sizeof(struct fcoe_crc_eof) - data_len;
6270 data_len = 0;
6271 } else {
6272 data_len -= sizeof(struct fcoe_crc_eof);
6273 }
6274 }
6275
6276#endif
6277 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6278
6279 tx_buffer = first;
6280
6281 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6282 if (dma_mapping_error(tx_ring->dev, dma))
6283 goto dma_error;
6284
6285
6286 dma_unmap_len_set(tx_buffer, len, size);
6287 dma_unmap_addr_set(tx_buffer, dma, dma);
6288
6289 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6290
6291 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
6292 tx_desc->read.cmd_type_len =
6293 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
6294
6295 i++;
6296 tx_desc++;
6297 if (i == tx_ring->count) {
6298 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6299 i = 0;
6300 }
6301 tx_desc->read.olinfo_status = 0;
6302
6303 dma += IXGBE_MAX_DATA_PER_TXD;
6304 size -= IXGBE_MAX_DATA_PER_TXD;
6305
6306 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6307 }
6308
6309 if (likely(!data_len))
6310 break;
6311
6312 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6313
6314 i++;
6315 tx_desc++;
6316 if (i == tx_ring->count) {
6317 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6318 i = 0;
6319 }
6320 tx_desc->read.olinfo_status = 0;
6321
6322#ifdef IXGBE_FCOE
6323 size = min_t(unsigned int, data_len, skb_frag_size(frag));
6324#else
6325 size = skb_frag_size(frag);
6326#endif
6327 data_len -= size;
6328
6329 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
6330 DMA_TO_DEVICE);
6331
6332 tx_buffer = &tx_ring->tx_buffer_info[i];
6333 }
6334
6335
6336 cmd_type |= size | IXGBE_TXD_CMD;
6337 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6338
6339 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6340
6341
6342 first->time_stamp = jiffies;
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352 wmb();
6353
6354
6355 first->next_to_watch = tx_desc;
6356
6357 i++;
6358 if (i == tx_ring->count)
6359 i = 0;
6360
6361 tx_ring->next_to_use = i;
6362
6363
6364 writel(i, tx_ring->tail);
6365
6366 return;
6367dma_error:
6368 dev_err(tx_ring->dev, "TX DMA map failed\n");
6369
6370
6371 for (;;) {
6372 tx_buffer = &tx_ring->tx_buffer_info[i];
6373 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
6374 if (tx_buffer == first)
6375 break;
6376 if (i == 0)
6377 i = tx_ring->count;
6378 i--;
6379 }
6380
6381 tx_ring->next_to_use = i;
6382}
6383
6384static void ixgbe_atr(struct ixgbe_ring *ring,
6385 struct ixgbe_tx_buffer *first)
6386{
6387 struct ixgbe_q_vector *q_vector = ring->q_vector;
6388 union ixgbe_atr_hash_dword input = { .dword = 0 };
6389 union ixgbe_atr_hash_dword common = { .dword = 0 };
6390 union {
6391 unsigned char *network;
6392 struct iphdr *ipv4;
6393 struct ipv6hdr *ipv6;
6394 } hdr;
6395 struct tcphdr *th;
6396 __be16 vlan_id;
6397
6398
6399 if (!q_vector)
6400 return;
6401
6402
6403 if (!ring->atr_sample_rate)
6404 return;
6405
6406 ring->atr_count++;
6407
6408
6409 hdr.network = skb_network_header(first->skb);
6410
6411
6412 if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
6413 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6414 (first->protocol != __constant_htons(ETH_P_IP) ||
6415 hdr.ipv4->protocol != IPPROTO_TCP))
6416 return;
6417
6418 th = tcp_hdr(first->skb);
6419
6420
6421 if (!th || th->fin)
6422 return;
6423
6424
6425 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6426 return;
6427
6428
6429 ring->atr_count = 0;
6430
6431 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6432
6433
6434
6435
6436
6437
6438
6439
6440 input.formatted.vlan_id = vlan_id;
6441
6442
6443
6444
6445
6446 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
6447 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6448 else
6449 common.port.src ^= th->dest ^ first->protocol;
6450 common.port.dst ^= th->source;
6451
6452 if (first->protocol == __constant_htons(ETH_P_IP)) {
6453 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6454 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6455 } else {
6456 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6457 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6458 hdr.ipv6->saddr.s6_addr32[1] ^
6459 hdr.ipv6->saddr.s6_addr32[2] ^
6460 hdr.ipv6->saddr.s6_addr32[3] ^
6461 hdr.ipv6->daddr.s6_addr32[0] ^
6462 hdr.ipv6->daddr.s6_addr32[1] ^
6463 hdr.ipv6->daddr.s6_addr32[2] ^
6464 hdr.ipv6->daddr.s6_addr32[3];
6465 }
6466
6467
6468 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6469 input, common, ring->queue_index);
6470}
6471
6472static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6473{
6474 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6475
6476
6477
6478 smp_mb();
6479
6480
6481
6482 if (likely(ixgbe_desc_unused(tx_ring) < size))
6483 return -EBUSY;
6484
6485
6486 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6487 ++tx_ring->tx_stats.restart_queue;
6488 return 0;
6489}
6490
6491static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6492{
6493 if (likely(ixgbe_desc_unused(tx_ring) >= size))
6494 return 0;
6495 return __ixgbe_maybe_stop_tx(tx_ring, size);
6496}
6497
6498#ifdef IXGBE_FCOE
6499static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6500{
6501 struct ixgbe_adapter *adapter;
6502 struct ixgbe_ring_feature *f;
6503 int txq;
6504
6505
6506
6507
6508
6509 switch (vlan_get_protocol(skb)) {
6510 case __constant_htons(ETH_P_FCOE):
6511 case __constant_htons(ETH_P_FIP):
6512 adapter = netdev_priv(dev);
6513
6514 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
6515 break;
6516 default:
6517 return __netdev_pick_tx(dev, skb);
6518 }
6519
6520 f = &adapter->ring_feature[RING_F_FCOE];
6521
6522 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6523 smp_processor_id();
6524
6525 while (txq >= f->indices)
6526 txq -= f->indices;
6527
6528 return txq + f->offset;
6529}
6530
6531#endif
6532netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6533 struct ixgbe_adapter *adapter,
6534 struct ixgbe_ring *tx_ring)
6535{
6536 struct ixgbe_tx_buffer *first;
6537 int tso;
6538 u32 tx_flags = 0;
6539 unsigned short f;
6540 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6541 __be16 protocol = skb->protocol;
6542 u8 hdr_len = 0;
6543
6544
6545
6546
6547
6548
6549
6550
6551 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6552 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6553
6554 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6555 tx_ring->tx_stats.tx_busy++;
6556 return NETDEV_TX_BUSY;
6557 }
6558
6559
6560 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6561 first->skb = skb;
6562 first->bytecount = skb->len;
6563 first->gso_segs = 1;
6564
6565
6566 if (vlan_tx_tag_present(skb)) {
6567 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6568 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6569
6570 } else if (protocol == __constant_htons(ETH_P_8021Q)) {
6571 struct vlan_hdr *vhdr, _vhdr;
6572 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6573 if (!vhdr)
6574 goto out_drop;
6575
6576 protocol = vhdr->h_vlan_encapsulated_proto;
6577 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
6578 IXGBE_TX_FLAGS_VLAN_SHIFT;
6579 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
6580 }
6581
6582 skb_tx_timestamp(skb);
6583
6584 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6585 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6586 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6587
6588
6589 adapter->ptp_tx_skb = skb_get(skb);
6590 adapter->ptp_tx_start = jiffies;
6591 schedule_work(&adapter->ptp_tx_work);
6592 }
6593
6594#ifdef CONFIG_PCI_IOV
6595
6596
6597
6598
6599 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6600 tx_flags |= IXGBE_TX_FLAGS_CC;
6601
6602#endif
6603
6604 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
6605 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
6606 (skb->priority != TC_PRIO_CONTROL))) {
6607 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6608 tx_flags |= (skb->priority & 0x7) <<
6609 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
6610 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
6611 struct vlan_ethhdr *vhdr;
6612 if (skb_header_cloned(skb) &&
6613 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6614 goto out_drop;
6615 vhdr = (struct vlan_ethhdr *)skb->data;
6616 vhdr->h_vlan_TCI = htons(tx_flags >>
6617 IXGBE_TX_FLAGS_VLAN_SHIFT);
6618 } else {
6619 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6620 }
6621 }
6622
6623
6624 first->tx_flags = tx_flags;
6625 first->protocol = protocol;
6626
6627#ifdef IXGBE_FCOE
6628
6629 if ((protocol == __constant_htons(ETH_P_FCOE)) &&
6630 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
6631 tso = ixgbe_fso(tx_ring, first, &hdr_len);
6632 if (tso < 0)
6633 goto out_drop;
6634
6635 goto xmit_fcoe;
6636 }
6637
6638#endif
6639 tso = ixgbe_tso(tx_ring, first, &hdr_len);
6640 if (tso < 0)
6641 goto out_drop;
6642 else if (!tso)
6643 ixgbe_tx_csum(tx_ring, first);
6644
6645
6646 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6647 ixgbe_atr(tx_ring, first);
6648
6649#ifdef IXGBE_FCOE
6650xmit_fcoe:
6651#endif
6652 ixgbe_tx_map(tx_ring, first, hdr_len);
6653
6654 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6655
6656 return NETDEV_TX_OK;
6657
6658out_drop:
6659 dev_kfree_skb_any(first->skb);
6660 first->skb = NULL;
6661
6662 return NETDEV_TX_OK;
6663}
6664
6665static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6666 struct net_device *netdev)
6667{
6668 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6669 struct ixgbe_ring *tx_ring;
6670
6671
6672
6673
6674
6675 if (unlikely(skb->len < 17)) {
6676 if (skb_pad(skb, 17 - skb->len))
6677 return NETDEV_TX_OK;
6678 skb->len = 17;
6679 skb_set_tail_pointer(skb, 17);
6680 }
6681
6682 tx_ring = adapter->tx_ring[skb->queue_mapping];
6683 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6684}
6685
6686
6687
6688
6689
6690
6691
6692
6693static int ixgbe_set_mac(struct net_device *netdev, void *p)
6694{
6695 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6696 struct ixgbe_hw *hw = &adapter->hw;
6697 struct sockaddr *addr = p;
6698
6699 if (!is_valid_ether_addr(addr->sa_data))
6700 return -EADDRNOTAVAIL;
6701
6702 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6703 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
6704
6705 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
6706
6707 return 0;
6708}
6709
6710static int
6711ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6712{
6713 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6714 struct ixgbe_hw *hw = &adapter->hw;
6715 u16 value;
6716 int rc;
6717
6718 if (prtad != hw->phy.mdio.prtad)
6719 return -EINVAL;
6720 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6721 if (!rc)
6722 rc = value;
6723 return rc;
6724}
6725
6726static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6727 u16 addr, u16 value)
6728{
6729 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6730 struct ixgbe_hw *hw = &adapter->hw;
6731
6732 if (prtad != hw->phy.mdio.prtad)
6733 return -EINVAL;
6734 return hw->phy.ops.write_reg(hw, addr, devad, value);
6735}
6736
6737static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6738{
6739 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6740
6741 switch (cmd) {
6742 case SIOCSHWTSTAMP:
6743 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
6744 default:
6745 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6746 }
6747}
6748
6749
6750
6751
6752
6753
6754
6755
6756static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6757{
6758 int err = 0;
6759 struct ixgbe_adapter *adapter = netdev_priv(dev);
6760 struct ixgbe_hw *hw = &adapter->hw;
6761
6762 if (is_valid_ether_addr(hw->mac.san_addr)) {
6763 rtnl_lock();
6764 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
6765 rtnl_unlock();
6766
6767
6768 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
6769 }
6770 return err;
6771}
6772
6773
6774
6775
6776
6777
6778
6779
6780static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6781{
6782 int err = 0;
6783 struct ixgbe_adapter *adapter = netdev_priv(dev);
6784 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6785
6786 if (is_valid_ether_addr(mac->san_addr)) {
6787 rtnl_lock();
6788 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6789 rtnl_unlock();
6790 }
6791 return err;
6792}
6793
6794#ifdef CONFIG_NET_POLL_CONTROLLER
6795
6796
6797
6798
6799
6800static void ixgbe_netpoll(struct net_device *netdev)
6801{
6802 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6803 int i;
6804
6805
6806 if (test_bit(__IXGBE_DOWN, &adapter->state))
6807 return;
6808
6809 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
6810 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6811 for (i = 0; i < adapter->num_q_vectors; i++)
6812 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
6813 } else {
6814 ixgbe_intr(adapter->pdev->irq, netdev);
6815 }
6816 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
6817}
6818
6819#endif
6820static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6821 struct rtnl_link_stats64 *stats)
6822{
6823 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6824 int i;
6825
6826 rcu_read_lock();
6827 for (i = 0; i < adapter->num_rx_queues; i++) {
6828 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
6829 u64 bytes, packets;
6830 unsigned int start;
6831
6832 if (ring) {
6833 do {
6834 start = u64_stats_fetch_begin_bh(&ring->syncp);
6835 packets = ring->stats.packets;
6836 bytes = ring->stats.bytes;
6837 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6838 stats->rx_packets += packets;
6839 stats->rx_bytes += bytes;
6840 }
6841 }
6842
6843 for (i = 0; i < adapter->num_tx_queues; i++) {
6844 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
6845 u64 bytes, packets;
6846 unsigned int start;
6847
6848 if (ring) {
6849 do {
6850 start = u64_stats_fetch_begin_bh(&ring->syncp);
6851 packets = ring->stats.packets;
6852 bytes = ring->stats.bytes;
6853 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6854 stats->tx_packets += packets;
6855 stats->tx_bytes += bytes;
6856 }
6857 }
6858 rcu_read_unlock();
6859
6860 stats->multicast = netdev->stats.multicast;
6861 stats->rx_errors = netdev->stats.rx_errors;
6862 stats->rx_length_errors = netdev->stats.rx_length_errors;
6863 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6864 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6865 return stats;
6866}
6867
6868#ifdef CONFIG_IXGBE_DCB
6869
6870
6871
6872
6873
6874
6875
6876
6877static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
6878{
6879 struct ixgbe_hw *hw = &adapter->hw;
6880 u32 reg, rsave;
6881 int i;
6882
6883
6884
6885
6886 if (hw->mac.type == ixgbe_mac_82598EB)
6887 return;
6888
6889 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
6890 rsave = reg;
6891
6892 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
6893 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
6894
6895
6896 if (up2tc > tc)
6897 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
6898 }
6899
6900 if (reg != rsave)
6901 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
6902
6903 return;
6904}
6905
6906
6907
6908
6909
6910
6911
6912static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
6913{
6914 struct net_device *dev = adapter->netdev;
6915 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
6916 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
6917 u8 prio;
6918
6919 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
6920 u8 tc = 0;
6921
6922 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
6923 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
6924 else if (ets)
6925 tc = ets->prio_tc[prio];
6926
6927 netdev_set_prio_tc_map(dev, prio, tc);
6928 }
6929}
6930
6931#endif
6932
6933
6934
6935
6936
6937
6938int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6939{
6940 struct ixgbe_adapter *adapter = netdev_priv(dev);
6941 struct ixgbe_hw *hw = &adapter->hw;
6942
6943
6944 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
6945 (hw->mac.type == ixgbe_mac_82598EB &&
6946 tc < MAX_TRAFFIC_CLASS))
6947 return -EINVAL;
6948
6949
6950
6951
6952
6953 if (netif_running(dev))
6954 ixgbe_close(dev);
6955 ixgbe_clear_interrupt_scheme(adapter);
6956
6957#ifdef CONFIG_IXGBE_DCB
6958 if (tc) {
6959 netdev_set_num_tc(dev, tc);
6960 ixgbe_set_prio_tc_map(adapter);
6961
6962 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
6963
6964 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
6965 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
6966 adapter->hw.fc.requested_mode = ixgbe_fc_none;
6967 }
6968 } else {
6969 netdev_reset_tc(dev);
6970
6971 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6972 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
6973
6974 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6975
6976 adapter->temp_dcb_cfg.pfc_mode_enable = false;
6977 adapter->dcb_cfg.pfc_mode_enable = false;
6978 }
6979
6980 ixgbe_validate_rtr(adapter, tc);
6981
6982#endif
6983 ixgbe_init_interrupt_scheme(adapter);
6984
6985 if (netif_running(dev))
6986 return ixgbe_open(dev);
6987
6988 return 0;
6989}
6990
6991#ifdef CONFIG_PCI_IOV
6992void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
6993{
6994 struct net_device *netdev = adapter->netdev;
6995
6996 rtnl_lock();
6997 ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
6998 rtnl_unlock();
6999}
7000
7001#endif
7002void ixgbe_do_reset(struct net_device *netdev)
7003{
7004 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7005
7006 if (netif_running(netdev))
7007 ixgbe_reinit_locked(adapter);
7008 else
7009 ixgbe_reset(adapter);
7010}
7011
7012static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
7013 netdev_features_t features)
7014{
7015 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7016
7017
7018 if (!(features & NETIF_F_RXCSUM))
7019 features &= ~NETIF_F_LRO;
7020
7021
7022 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
7023 features &= ~NETIF_F_LRO;
7024
7025 return features;
7026}
7027
7028static int ixgbe_set_features(struct net_device *netdev,
7029 netdev_features_t features)
7030{
7031 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7032 netdev_features_t changed = netdev->features ^ features;
7033 bool need_reset = false;
7034
7035
7036 if (!(features & NETIF_F_LRO)) {
7037 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7038 need_reset = true;
7039 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
7040 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
7041 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
7042 if (adapter->rx_itr_setting == 1 ||
7043 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
7044 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
7045 need_reset = true;
7046 } else if ((changed ^ features) & NETIF_F_LRO) {
7047 e_info(probe, "rx-usecs set too low, "
7048 "disabling RSC\n");
7049 }
7050 }
7051
7052
7053
7054
7055
7056 switch (features & NETIF_F_NTUPLE) {
7057 case NETIF_F_NTUPLE:
7058
7059 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
7060 need_reset = true;
7061
7062 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
7063 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7064 break;
7065 default:
7066
7067 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7068 need_reset = true;
7069
7070 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7071
7072
7073 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7074 break;
7075
7076
7077 if (netdev_get_num_tc(netdev) > 1)
7078 break;
7079
7080
7081 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
7082 break;
7083
7084
7085 if (!adapter->atr_sample_rate)
7086 break;
7087
7088 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
7089 break;
7090 }
7091
7092 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7093 ixgbe_vlan_strip_enable(adapter);
7094 else
7095 ixgbe_vlan_strip_disable(adapter);
7096
7097 if (changed & NETIF_F_RXALL)
7098 need_reset = true;
7099
7100 netdev->features = features;
7101 if (need_reset)
7102 ixgbe_do_reset(netdev);
7103
7104 return 0;
7105}
7106
7107static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7108 struct net_device *dev,
7109 const unsigned char *addr,
7110 u16 flags)
7111{
7112 struct ixgbe_adapter *adapter = netdev_priv(dev);
7113 int err;
7114
7115 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7116 return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
7117
7118
7119
7120
7121 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7122 pr_info("%s: FDB only supports static addresses\n",
7123 ixgbe_driver_name);
7124 return -EINVAL;
7125 }
7126
7127 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
7128 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
7129
7130 if (netdev_uc_count(dev) < rar_uc_entries)
7131 err = dev_uc_add_excl(dev, addr);
7132 else
7133 err = -ENOMEM;
7134 } else if (is_multicast_ether_addr(addr)) {
7135 err = dev_mc_add_excl(dev, addr);
7136 } else {
7137 err = -EINVAL;
7138 }
7139
7140
7141 if (err == -EEXIST && !(flags & NLM_F_EXCL))
7142 err = 0;
7143
7144 return err;
7145}
7146
7147static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7148 struct nlmsghdr *nlh)
7149{
7150 struct ixgbe_adapter *adapter = netdev_priv(dev);
7151 struct nlattr *attr, *br_spec;
7152 int rem;
7153
7154 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7155 return -EOPNOTSUPP;
7156
7157 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7158
7159 nla_for_each_nested(attr, br_spec, rem) {
7160 __u16 mode;
7161 u32 reg = 0;
7162
7163 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7164 continue;
7165
7166 mode = nla_get_u16(attr);
7167 if (mode == BRIDGE_MODE_VEPA) {
7168 reg = 0;
7169 adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
7170 } else if (mode == BRIDGE_MODE_VEB) {
7171 reg = IXGBE_PFDTXGSWC_VT_LBEN;
7172 adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
7173 } else
7174 return -EINVAL;
7175
7176 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
7177
7178 e_info(drv, "enabling bridge mode: %s\n",
7179 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
7180 }
7181
7182 return 0;
7183}
7184
7185static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7186 struct net_device *dev,
7187 u32 filter_mask)
7188{
7189 struct ixgbe_adapter *adapter = netdev_priv(dev);
7190 u16 mode;
7191
7192 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7193 return 0;
7194
7195 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
7196 mode = BRIDGE_MODE_VEB;
7197 else
7198 mode = BRIDGE_MODE_VEPA;
7199
7200 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7201}
7202
7203static const struct net_device_ops ixgbe_netdev_ops = {
7204 .ndo_open = ixgbe_open,
7205 .ndo_stop = ixgbe_close,
7206 .ndo_start_xmit = ixgbe_xmit_frame,
7207#ifdef IXGBE_FCOE
7208 .ndo_select_queue = ixgbe_select_queue,
7209#endif
7210 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7211 .ndo_validate_addr = eth_validate_addr,
7212 .ndo_set_mac_address = ixgbe_set_mac,
7213 .ndo_change_mtu = ixgbe_change_mtu,
7214 .ndo_tx_timeout = ixgbe_tx_timeout,
7215 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
7216 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
7217 .ndo_do_ioctl = ixgbe_ioctl,
7218 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
7219 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
7220 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
7221 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
7222 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
7223 .ndo_get_stats64 = ixgbe_get_stats64,
7224#ifdef CONFIG_IXGBE_DCB
7225 .ndo_setup_tc = ixgbe_setup_tc,
7226#endif
7227#ifdef CONFIG_NET_POLL_CONTROLLER
7228 .ndo_poll_controller = ixgbe_netpoll,
7229#endif
7230#ifdef CONFIG_NET_RX_BUSY_POLL
7231 .ndo_busy_poll = ixgbe_low_latency_recv,
7232#endif
7233#ifdef IXGBE_FCOE
7234 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
7235 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
7236 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
7237 .ndo_fcoe_enable = ixgbe_fcoe_enable,
7238 .ndo_fcoe_disable = ixgbe_fcoe_disable,
7239 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
7240 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
7241#endif
7242 .ndo_set_features = ixgbe_set_features,
7243 .ndo_fix_features = ixgbe_fix_features,
7244 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7245 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7246 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7247};
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7260 u16 subdevice_id)
7261{
7262 struct ixgbe_hw *hw = &adapter->hw;
7263 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
7264 int is_wol_supported = 0;
7265
7266 switch (device_id) {
7267 case IXGBE_DEV_ID_82599_SFP:
7268
7269 switch (subdevice_id) {
7270 case IXGBE_SUBDEV_ID_82599_560FLR:
7271
7272 if (hw->bus.func != 0)
7273 break;
7274 case IXGBE_SUBDEV_ID_82599_SP_560FLR:
7275 case IXGBE_SUBDEV_ID_82599_SFP:
7276 case IXGBE_SUBDEV_ID_82599_RNDC:
7277 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
7278 case IXGBE_SUBDEV_ID_82599_LOM_SFP:
7279 is_wol_supported = 1;
7280 break;
7281 }
7282 break;
7283 case IXGBE_DEV_ID_82599EN_SFP:
7284
7285 switch (subdevice_id) {
7286 case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
7287 is_wol_supported = 1;
7288 break;
7289 }
7290 break;
7291 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7292
7293 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7294 is_wol_supported = 1;
7295 break;
7296 case IXGBE_DEV_ID_82599_KX4:
7297 is_wol_supported = 1;
7298 break;
7299 case IXGBE_DEV_ID_X540T:
7300 case IXGBE_DEV_ID_X540T1:
7301
7302 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
7303 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
7304 (hw->bus.func == 0))) {
7305 is_wol_supported = 1;
7306 }
7307 break;
7308 }
7309
7310 return is_wol_supported;
7311}
7312
7313
7314
7315
7316
7317
7318
7319
7320
7321
7322
7323
7324static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7325{
7326 struct net_device *netdev;
7327 struct ixgbe_adapter *adapter = NULL;
7328 struct ixgbe_hw *hw;
7329 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
7330 static int cards_found;
7331 int i, err, pci_using_dac;
7332 unsigned int indices = MAX_TX_QUEUES;
7333 u8 part_str[IXGBE_PBANUM_LENGTH];
7334#ifdef IXGBE_FCOE
7335 u16 device_caps;
7336#endif
7337 u32 eec;
7338
7339
7340
7341
7342 if (pdev->is_virtfn) {
7343 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
7344 pci_name(pdev), pdev->vendor, pdev->device);
7345 return -EINVAL;
7346 }
7347
7348 err = pci_enable_device_mem(pdev);
7349 if (err)
7350 return err;
7351
7352 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
7353 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7354 pci_using_dac = 1;
7355 } else {
7356 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
7357 if (err) {
7358 err = dma_set_coherent_mask(&pdev->dev,
7359 DMA_BIT_MASK(32));
7360 if (err) {
7361 dev_err(&pdev->dev,
7362 "No usable DMA configuration, aborting\n");
7363 goto err_dma;
7364 }
7365 }
7366 pci_using_dac = 0;
7367 }
7368
7369 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7370 IORESOURCE_MEM), ixgbe_driver_name);
7371 if (err) {
7372 dev_err(&pdev->dev,
7373 "pci_request_selected_regions failed 0x%x\n", err);
7374 goto err_pci_reg;
7375 }
7376
7377 pci_enable_pcie_error_reporting(pdev);
7378
7379 pci_set_master(pdev);
7380 pci_save_state(pdev);
7381
7382 if (ii->mac == ixgbe_mac_82598EB) {
7383#ifdef CONFIG_IXGBE_DCB
7384
7385 indices = 4 * MAX_TRAFFIC_CLASS;
7386#else
7387 indices = IXGBE_MAX_RSS_INDICES;
7388#endif
7389 }
7390
7391 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
7392 if (!netdev) {
7393 err = -ENOMEM;
7394 goto err_alloc_etherdev;
7395 }
7396
7397 SET_NETDEV_DEV(netdev, &pdev->dev);
7398
7399 adapter = netdev_priv(netdev);
7400 pci_set_drvdata(pdev, adapter);
7401
7402 adapter->netdev = netdev;
7403 adapter->pdev = pdev;
7404 hw = &adapter->hw;
7405 hw->back = adapter;
7406 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
7407
7408 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7409 pci_resource_len(pdev, 0));
7410 if (!hw->hw_addr) {
7411 err = -EIO;
7412 goto err_ioremap;
7413 }
7414
7415 netdev->netdev_ops = &ixgbe_netdev_ops;
7416 ixgbe_set_ethtool_ops(netdev);
7417 netdev->watchdog_timeo = 5 * HZ;
7418 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
7419
7420 adapter->bd_number = cards_found;
7421
7422
7423 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
7424 hw->mac.type = ii->mac;
7425
7426
7427 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7428 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7429
7430 if (!(eec & (1 << 8)))
7431 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7432
7433
7434 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
7435 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
7436
7437 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7438 hw->phy.mdio.mmds = 0;
7439 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7440 hw->phy.mdio.dev = netdev;
7441 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7442 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
7443
7444 ii->get_invariants(hw);
7445
7446
7447 err = ixgbe_sw_init(adapter);
7448 if (err)
7449 goto err_sw_init;
7450
7451
7452 if (hw->mac.ops.mng_fw_enabled)
7453 hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
7454
7455
7456 switch (adapter->hw.mac.type) {
7457 case ixgbe_mac_82599EB:
7458 case ixgbe_mac_X540:
7459 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7460 break;
7461 default:
7462 break;
7463 }
7464
7465
7466
7467
7468
7469 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7470 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7471 if (esdp & IXGBE_ESDP_SDP1)
7472 e_crit(probe, "Fan has stopped, replace the adapter\n");
7473 }
7474
7475 if (allow_unsupported_sfp)
7476 hw->allow_unsupported_sfp = allow_unsupported_sfp;
7477
7478
7479 hw->phy.reset_if_overtemp = true;
7480 err = hw->mac.ops.reset_hw(hw);
7481 hw->phy.reset_if_overtemp = false;
7482 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7483 hw->mac.type == ixgbe_mac_82598EB) {
7484 err = 0;
7485 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7486 e_dev_err("failed to load because an unsupported SFP+ "
7487 "module type was detected.\n");
7488 e_dev_err("Reload the driver after installing a supported "
7489 "module.\n");
7490 goto err_sw_init;
7491 } else if (err) {
7492 e_dev_err("HW Init failed: %d\n", err);
7493 goto err_sw_init;
7494 }
7495
7496#ifdef CONFIG_PCI_IOV
7497
7498 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
7499 goto skip_sriov;
7500
7501 ixgbe_init_mbx_params_pf(hw);
7502 memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
7503 ixgbe_enable_sriov(adapter);
7504 pci_sriov_set_totalvfs(pdev, 63);
7505skip_sriov:
7506
7507#endif
7508 netdev->features = NETIF_F_SG |
7509 NETIF_F_IP_CSUM |
7510 NETIF_F_IPV6_CSUM |
7511 NETIF_F_HW_VLAN_CTAG_TX |
7512 NETIF_F_HW_VLAN_CTAG_RX |
7513 NETIF_F_HW_VLAN_CTAG_FILTER |
7514 NETIF_F_TSO |
7515 NETIF_F_TSO6 |
7516 NETIF_F_RXHASH |
7517 NETIF_F_RXCSUM;
7518
7519 netdev->hw_features = netdev->features;
7520
7521 switch (adapter->hw.mac.type) {
7522 case ixgbe_mac_82599EB:
7523 case ixgbe_mac_X540:
7524 netdev->features |= NETIF_F_SCTP_CSUM;
7525 netdev->hw_features |= NETIF_F_SCTP_CSUM |
7526 NETIF_F_NTUPLE;
7527 break;
7528 default:
7529 break;
7530 }
7531
7532 netdev->hw_features |= NETIF_F_RXALL;
7533
7534 netdev->vlan_features |= NETIF_F_TSO;
7535 netdev->vlan_features |= NETIF_F_TSO6;
7536 netdev->vlan_features |= NETIF_F_IP_CSUM;
7537 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
7538 netdev->vlan_features |= NETIF_F_SG;
7539
7540 netdev->priv_flags |= IFF_UNICAST_FLT;
7541 netdev->priv_flags |= IFF_SUPP_NOFCS;
7542
7543#ifdef CONFIG_IXGBE_DCB
7544 netdev->dcbnl_ops = &dcbnl_ops;
7545#endif
7546
7547#ifdef IXGBE_FCOE
7548 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
7549 unsigned int fcoe_l;
7550
7551 if (hw->mac.ops.get_device_caps) {
7552 hw->mac.ops.get_device_caps(hw, &device_caps);
7553 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7554 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
7555 }
7556
7557
7558 fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
7559 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
7560
7561 netdev->features |= NETIF_F_FSO |
7562 NETIF_F_FCOE_CRC;
7563
7564 netdev->vlan_features |= NETIF_F_FSO |
7565 NETIF_F_FCOE_CRC |
7566 NETIF_F_FCOE_MTU;
7567 }
7568#endif
7569 if (pci_using_dac) {
7570 netdev->features |= NETIF_F_HIGHDMA;
7571 netdev->vlan_features |= NETIF_F_HIGHDMA;
7572 }
7573
7574 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
7575 netdev->hw_features |= NETIF_F_LRO;
7576 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7577 netdev->features |= NETIF_F_LRO;
7578
7579
7580 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
7581 e_dev_err("The EEPROM Checksum Is Not Valid\n");
7582 err = -EIO;
7583 goto err_sw_init;
7584 }
7585
7586 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7587
7588 if (!is_valid_ether_addr(netdev->dev_addr)) {
7589 e_dev_err("invalid MAC address\n");
7590 err = -EIO;
7591 goto err_sw_init;
7592 }
7593
7594 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
7595 (unsigned long) adapter);
7596
7597 INIT_WORK(&adapter->service_task, ixgbe_service_task);
7598 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
7599
7600 err = ixgbe_init_interrupt_scheme(adapter);
7601 if (err)
7602 goto err_sw_init;
7603
7604
7605 adapter->wol = 0;
7606 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7607 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
7608 pdev->subsystem_device);
7609 if (hw->wol_enabled)
7610 adapter->wol = IXGBE_WUFC_MAG;
7611
7612 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7613
7614
7615 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
7616 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
7617
7618
7619 hw->mac.ops.get_bus_info(hw);
7620 if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
7621 ixgbe_get_parent_bus_info(adapter);
7622
7623
7624 e_dev_info("(PCI Express:%s:%s) %pM\n",
7625 (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
7626 hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7627 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
7628 "Unknown"),
7629 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7630 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7631 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7632 "Unknown"),
7633 netdev->dev_addr);
7634
7635 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7636 if (err)
7637 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
7638 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
7639 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
7640 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7641 part_str);
7642 else
7643 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7644 hw->mac.type, hw->phy.type, part_str);
7645
7646 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
7647 e_dev_warn("PCI-Express bandwidth available for this card is "
7648 "not sufficient for optimal performance.\n");
7649 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7650 "is required.\n");
7651 }
7652
7653
7654 err = hw->mac.ops.start_hw(hw);
7655 if (err == IXGBE_ERR_EEPROM_VERSION) {
7656
7657 e_dev_warn("This device is a pre-production adapter/LOM. "
7658 "Please be aware there may be issues associated "
7659 "with your hardware. If you are experiencing "
7660 "problems please contact your Intel or hardware "
7661 "representative who provided you with this "
7662 "hardware.\n");
7663 }
7664 strcpy(netdev->name, "eth%d");
7665 err = register_netdev(netdev);
7666 if (err)
7667 goto err_register;
7668
7669
7670 if (hw->mac.ops.disable_tx_laser)
7671 hw->mac.ops.disable_tx_laser(hw);
7672
7673
7674 netif_carrier_off(netdev);
7675
7676#ifdef CONFIG_IXGBE_DCA
7677 if (dca_add_requester(&pdev->dev) == 0) {
7678 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
7679 ixgbe_setup_dca(adapter);
7680 }
7681#endif
7682 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
7683 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
7684 for (i = 0; i < adapter->num_vfs; i++)
7685 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7686 }
7687
7688
7689
7690
7691 if (hw->mac.ops.set_fw_drv_ver)
7692 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
7693 0xFF);
7694
7695
7696 ixgbe_add_sanmac_netdev(netdev);
7697
7698 e_dev_info("%s\n", ixgbe_default_device_descr);
7699 cards_found++;
7700
7701#ifdef CONFIG_IXGBE_HWMON
7702 if (ixgbe_sysfs_init(adapter))
7703 e_err(probe, "failed to allocate sysfs resources\n");
7704#endif
7705
7706 ixgbe_dbg_adapter_init(adapter);
7707
7708
7709 if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
7710 hw->mac.ops.setup_link(hw,
7711 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
7712 true);
7713
7714 return 0;
7715
7716err_register:
7717 ixgbe_release_hw_control(adapter);
7718 ixgbe_clear_interrupt_scheme(adapter);
7719err_sw_init:
7720 ixgbe_disable_sriov(adapter);
7721 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7722 iounmap(hw->hw_addr);
7723err_ioremap:
7724 free_netdev(netdev);
7725err_alloc_etherdev:
7726 pci_release_selected_regions(pdev,
7727 pci_select_bars(pdev, IORESOURCE_MEM));
7728err_pci_reg:
7729err_dma:
7730 pci_disable_device(pdev);
7731 return err;
7732}
7733
7734
7735
7736
7737
7738
7739
7740
7741
7742
7743static void ixgbe_remove(struct pci_dev *pdev)
7744{
7745 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7746 struct net_device *netdev = adapter->netdev;
7747
7748 ixgbe_dbg_adapter_exit(adapter);
7749
7750 set_bit(__IXGBE_DOWN, &adapter->state);
7751 cancel_work_sync(&adapter->service_task);
7752
7753
7754#ifdef CONFIG_IXGBE_DCA
7755 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7756 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7757 dca_remove_requester(&pdev->dev);
7758 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7759 }
7760
7761#endif
7762#ifdef CONFIG_IXGBE_HWMON
7763 ixgbe_sysfs_exit(adapter);
7764#endif
7765
7766
7767 ixgbe_del_sanmac_netdev(netdev);
7768
7769 if (netdev->reg_state == NETREG_REGISTERED)
7770 unregister_netdev(netdev);
7771
7772#ifdef CONFIG_PCI_IOV
7773
7774
7775
7776
7777 if (max_vfs)
7778 ixgbe_disable_sriov(adapter);
7779#endif
7780 ixgbe_clear_interrupt_scheme(adapter);
7781
7782 ixgbe_release_hw_control(adapter);
7783
7784#ifdef CONFIG_DCB
7785 kfree(adapter->ixgbe_ieee_pfc);
7786 kfree(adapter->ixgbe_ieee_ets);
7787
7788#endif
7789 iounmap(adapter->hw.hw_addr);
7790 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7791 IORESOURCE_MEM));
7792
7793 e_dev_info("complete\n");
7794
7795 free_netdev(netdev);
7796
7797 pci_disable_pcie_error_reporting(pdev);
7798
7799 pci_disable_device(pdev);
7800}
7801
7802
7803
7804
7805
7806
7807
7808
7809
7810static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7811 pci_channel_state_t state)
7812{
7813 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7814 struct net_device *netdev = adapter->netdev;
7815
7816#ifdef CONFIG_PCI_IOV
7817 struct pci_dev *bdev, *vfdev;
7818 u32 dw0, dw1, dw2, dw3;
7819 int vf, pos;
7820 u16 req_id, pf_func;
7821
7822 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7823 adapter->num_vfs == 0)
7824 goto skip_bad_vf_detection;
7825
7826 bdev = pdev->bus->self;
7827 while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
7828 bdev = bdev->bus->self;
7829
7830 if (!bdev)
7831 goto skip_bad_vf_detection;
7832
7833 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
7834 if (!pos)
7835 goto skip_bad_vf_detection;
7836
7837 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
7838 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
7839 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
7840 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
7841
7842 req_id = dw1 >> 16;
7843
7844 if (!(req_id & 0x0080))
7845 goto skip_bad_vf_detection;
7846
7847 pf_func = req_id & 0x01;
7848 if ((pf_func & 1) == (pdev->devfn & 1)) {
7849 unsigned int device_id;
7850
7851 vf = (req_id & 0x7F) >> 1;
7852 e_dev_err("VF %d has caused a PCIe error\n", vf);
7853 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
7854 "%8.8x\tdw3: %8.8x\n",
7855 dw0, dw1, dw2, dw3);
7856 switch (adapter->hw.mac.type) {
7857 case ixgbe_mac_82599EB:
7858 device_id = IXGBE_82599_VF_DEVICE_ID;
7859 break;
7860 case ixgbe_mac_X540:
7861 device_id = IXGBE_X540_VF_DEVICE_ID;
7862 break;
7863 default:
7864 device_id = 0;
7865 break;
7866 }
7867
7868
7869 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
7870 while (vfdev) {
7871 if (vfdev->devfn == (req_id & 0xFF))
7872 break;
7873 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
7874 device_id, vfdev);
7875 }
7876
7877
7878
7879
7880
7881 if (vfdev) {
7882 e_dev_err("Issuing VFLR to VF %d\n", vf);
7883 pci_write_config_dword(vfdev, 0xA8, 0x00008000);
7884
7885 pci_dev_put(vfdev);
7886 }
7887
7888 pci_cleanup_aer_uncorrect_error_status(pdev);
7889 }
7890
7891
7892
7893
7894
7895
7896
7897 adapter->vferr_refcount++;
7898
7899 return PCI_ERS_RESULT_RECOVERED;
7900
7901skip_bad_vf_detection:
7902#endif
7903 netif_device_detach(netdev);
7904
7905 if (state == pci_channel_io_perm_failure)
7906 return PCI_ERS_RESULT_DISCONNECT;
7907
7908 if (netif_running(netdev))
7909 ixgbe_down(adapter);
7910 pci_disable_device(pdev);
7911
7912
7913 return PCI_ERS_RESULT_NEED_RESET;
7914}
7915
7916
7917
7918
7919
7920
7921
7922static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7923{
7924 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7925 pci_ers_result_t result;
7926 int err;
7927
7928 if (pci_enable_device_mem(pdev)) {
7929 e_err(probe, "Cannot re-enable PCI device after reset.\n");
7930 result = PCI_ERS_RESULT_DISCONNECT;
7931 } else {
7932 pci_set_master(pdev);
7933 pci_restore_state(pdev);
7934 pci_save_state(pdev);
7935
7936 pci_wake_from_d3(pdev, false);
7937
7938 ixgbe_reset(adapter);
7939 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7940 result = PCI_ERS_RESULT_RECOVERED;
7941 }
7942
7943 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7944 if (err) {
7945 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7946 "failed 0x%0x\n", err);
7947
7948 }
7949
7950 return result;
7951}
7952
7953
7954
7955
7956
7957
7958
7959
7960static void ixgbe_io_resume(struct pci_dev *pdev)
7961{
7962 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7963 struct net_device *netdev = adapter->netdev;
7964
7965#ifdef CONFIG_PCI_IOV
7966 if (adapter->vferr_refcount) {
7967 e_info(drv, "Resuming after VF err\n");
7968 adapter->vferr_refcount--;
7969 return;
7970 }
7971
7972#endif
7973 if (netif_running(netdev))
7974 ixgbe_up(adapter);
7975
7976 netif_device_attach(netdev);
7977}
7978
7979static const struct pci_error_handlers ixgbe_err_handler = {
7980 .error_detected = ixgbe_io_error_detected,
7981 .slot_reset = ixgbe_io_slot_reset,
7982 .resume = ixgbe_io_resume,
7983};
7984
7985static struct pci_driver ixgbe_driver = {
7986 .name = ixgbe_driver_name,
7987 .id_table = ixgbe_pci_tbl,
7988 .probe = ixgbe_probe,
7989 .remove = ixgbe_remove,
7990#ifdef CONFIG_PM
7991 .suspend = ixgbe_suspend,
7992 .resume = ixgbe_resume,
7993#endif
7994 .shutdown = ixgbe_shutdown,
7995 .sriov_configure = ixgbe_pci_sriov_configure,
7996 .err_handler = &ixgbe_err_handler
7997};
7998
7999
8000
8001
8002
8003
8004
8005static int __init ixgbe_init_module(void)
8006{
8007 int ret;
8008 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
8009 pr_info("%s\n", ixgbe_copyright);
8010
8011 ixgbe_dbg_init();
8012
8013 ret = pci_register_driver(&ixgbe_driver);
8014 if (ret) {
8015 ixgbe_dbg_exit();
8016 return ret;
8017 }
8018
8019#ifdef CONFIG_IXGBE_DCA
8020 dca_register_notify(&dca_notifier);
8021#endif
8022
8023 return 0;
8024}
8025
8026module_init(ixgbe_init_module);
8027
8028
8029
8030
8031
8032
8033
8034static void __exit ixgbe_exit_module(void)
8035{
8036#ifdef CONFIG_IXGBE_DCA
8037 dca_unregister_notify(&dca_notifier);
8038#endif
8039 pci_unregister_driver(&ixgbe_driver);
8040
8041 ixgbe_dbg_exit();
8042
8043 rcu_barrier();
8044}
8045
8046#ifdef CONFIG_IXGBE_DCA
8047static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
8048 void *p)
8049{
8050 int ret_val;
8051
8052 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
8053 __ixgbe_notify_dca);
8054
8055 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
8056}
8057
8058#endif
8059
8060module_exit(ixgbe_exit_module);
8061
8062
8063