1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/interrupt.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/sctp.h>
39#include <linux/pkt_sched.h>
40#include <linux/ipv6.h>
41#include <linux/slab.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
44#include <linux/ethtool.h>
45#include <linux/if.h>
46#include <linux/if_vlan.h>
47#include <linux/prefetch.h>
48#include <scsi/fc/fc_fcoe.h>
49
50#include "ixgbe.h"
51#include "ixgbe_common.h"
52#include "ixgbe_dcb_82599.h"
53#include "ixgbe_sriov.h"
54
55char ixgbe_driver_name[] = "ixgbe";
56static const char ixgbe_driver_string[] =
57 "Intel(R) 10 Gigabit PCI Express Network Driver";
58#ifdef IXGBE_FCOE
59char ixgbe_default_device_descr[] =
60 "Intel(R) 10 Gigabit Network Connection";
61#else
62static char ixgbe_default_device_descr[] =
63 "Intel(R) 10 Gigabit Network Connection";
64#endif
65#define MAJ 3
66#define MIN 9
67#define BUILD 15
68#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
69 __stringify(BUILD) "-k"
70const char ixgbe_driver_version[] = DRV_VERSION;
71static const char ixgbe_copyright[] =
72 "Copyright (c) 1999-2012 Intel Corporation.";
73
74static const struct ixgbe_info *ixgbe_info_tbl[] = {
75 [board_82598] = &ixgbe_82598_info,
76 [board_82599] = &ixgbe_82599_info,
77 [board_X540] = &ixgbe_X540_info,
78};
79
80
81
82
83
84
85
86
87
88static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
108 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
117
118 {0, }
119};
120MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
121
122#ifdef CONFIG_IXGBE_DCA
123static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
124 void *p);
125static struct notifier_block dca_notifier = {
126 .notifier_call = ixgbe_notify_dca,
127 .next = NULL,
128 .priority = 0
129};
130#endif
131
132#ifdef CONFIG_PCI_IOV
133static unsigned int max_vfs;
134module_param(max_vfs, uint, 0);
135MODULE_PARM_DESC(max_vfs,
136 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
137#endif
138
139static unsigned int allow_unsupported_sfp;
140module_param(allow_unsupported_sfp, uint, 0);
141MODULE_PARM_DESC(allow_unsupported_sfp,
142 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
143
144#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
145static int debug = -1;
146module_param(debug, int, 0);
147MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
148
149MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
150MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
151MODULE_LICENSE("GPL");
152MODULE_VERSION(DRV_VERSION);
153
154static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
155{
156 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
157 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
158 schedule_work(&adapter->service_task);
159}
160
161static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
162{
163 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
164
165
166 smp_mb__before_clear_bit();
167 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
168}
169
170struct ixgbe_reg_info {
171 u32 ofs;
172 char *name;
173};
174
175static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
176
177
178 {IXGBE_CTRL, "CTRL"},
179 {IXGBE_STATUS, "STATUS"},
180 {IXGBE_CTRL_EXT, "CTRL_EXT"},
181
182
183 {IXGBE_EICR, "EICR"},
184
185
186 {IXGBE_SRRCTL(0), "SRRCTL"},
187 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
188 {IXGBE_RDLEN(0), "RDLEN"},
189 {IXGBE_RDH(0), "RDH"},
190 {IXGBE_RDT(0), "RDT"},
191 {IXGBE_RXDCTL(0), "RXDCTL"},
192 {IXGBE_RDBAL(0), "RDBAL"},
193 {IXGBE_RDBAH(0), "RDBAH"},
194
195
196 {IXGBE_TDBAL(0), "TDBAL"},
197 {IXGBE_TDBAH(0), "TDBAH"},
198 {IXGBE_TDLEN(0), "TDLEN"},
199 {IXGBE_TDH(0), "TDH"},
200 {IXGBE_TDT(0), "TDT"},
201 {IXGBE_TXDCTL(0), "TXDCTL"},
202
203
204 {}
205};
206
207
208
209
210
211static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
212{
213 int i = 0, j = 0;
214 char rname[16];
215 u32 regs[64];
216
217 switch (reginfo->ofs) {
218 case IXGBE_SRRCTL(0):
219 for (i = 0; i < 64; i++)
220 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
221 break;
222 case IXGBE_DCA_RXCTRL(0):
223 for (i = 0; i < 64; i++)
224 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
225 break;
226 case IXGBE_RDLEN(0):
227 for (i = 0; i < 64; i++)
228 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
229 break;
230 case IXGBE_RDH(0):
231 for (i = 0; i < 64; i++)
232 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
233 break;
234 case IXGBE_RDT(0):
235 for (i = 0; i < 64; i++)
236 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
237 break;
238 case IXGBE_RXDCTL(0):
239 for (i = 0; i < 64; i++)
240 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
241 break;
242 case IXGBE_RDBAL(0):
243 for (i = 0; i < 64; i++)
244 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
245 break;
246 case IXGBE_RDBAH(0):
247 for (i = 0; i < 64; i++)
248 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
249 break;
250 case IXGBE_TDBAL(0):
251 for (i = 0; i < 64; i++)
252 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
253 break;
254 case IXGBE_TDBAH(0):
255 for (i = 0; i < 64; i++)
256 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
257 break;
258 case IXGBE_TDLEN(0):
259 for (i = 0; i < 64; i++)
260 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
261 break;
262 case IXGBE_TDH(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
265 break;
266 case IXGBE_TDT(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
269 break;
270 case IXGBE_TXDCTL(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
273 break;
274 default:
275 pr_info("%-15s %08x\n", reginfo->name,
276 IXGBE_READ_REG(hw, reginfo->ofs));
277 return;
278 }
279
280 for (i = 0; i < 8; i++) {
281 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
282 pr_err("%-15s", rname);
283 for (j = 0; j < 8; j++)
284 pr_cont(" %08x", regs[i*8+j]);
285 pr_cont("\n");
286 }
287
288}
289
290
291
292
293static void ixgbe_dump(struct ixgbe_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296 struct ixgbe_hw *hw = &adapter->hw;
297 struct ixgbe_reg_info *reginfo;
298 int n = 0;
299 struct ixgbe_ring *tx_ring;
300 struct ixgbe_tx_buffer *tx_buffer;
301 union ixgbe_adv_tx_desc *tx_desc;
302 struct my_u0 { u64 a; u64 b; } *u0;
303 struct ixgbe_ring *rx_ring;
304 union ixgbe_adv_rx_desc *rx_desc;
305 struct ixgbe_rx_buffer *rx_buffer_info;
306 u32 staterr;
307 int i = 0;
308
309 if (!netif_msg_hw(adapter))
310 return;
311
312
313 if (netdev) {
314 dev_info(&adapter->pdev->dev, "Net device Info\n");
315 pr_info("Device Name state "
316 "trans_start last_rx\n");
317 pr_info("%-15s %016lX %016lX %016lX\n",
318 netdev->name,
319 netdev->state,
320 netdev->trans_start,
321 netdev->last_rx);
322 }
323
324
325 dev_info(&adapter->pdev->dev, "Register Dump\n");
326 pr_info(" Register Name Value\n");
327 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
328 reginfo->name; reginfo++) {
329 ixgbe_regdump(hw, reginfo);
330 }
331
332
333 if (!netdev || !netif_running(netdev))
334 goto exit;
335
336 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
337 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
338 for (n = 0; n < adapter->num_tx_queues; n++) {
339 tx_ring = adapter->tx_ring[n];
340 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
341 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
342 n, tx_ring->next_to_use, tx_ring->next_to_clean,
343 (u64)dma_unmap_addr(tx_buffer, dma),
344 dma_unmap_len(tx_buffer, len),
345 tx_buffer->next_to_watch,
346 (u64)tx_buffer->time_stamp);
347 }
348
349
350 if (!netif_msg_tx_done(adapter))
351 goto rx_ring_summary;
352
353 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
354
355
356
357
358
359
360
361
362
363
364
365
366 for (n = 0; n < adapter->num_tx_queues; n++) {
367 tx_ring = adapter->tx_ring[n];
368 pr_info("------------------------------------\n");
369 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
370 pr_info("------------------------------------\n");
371 pr_info("T [desc] [address 63:0 ] "
372 "[PlPOIdStDDt Ln] [bi->dma ] "
373 "leng ntw timestamp bi->skb\n");
374
375 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
376 tx_desc = IXGBE_TX_DESC(tx_ring, i);
377 tx_buffer = &tx_ring->tx_buffer_info[i];
378 u0 = (struct my_u0 *)tx_desc;
379 pr_info("T [0x%03X] %016llX %016llX %016llX"
380 " %04X %p %016llX %p", i,
381 le64_to_cpu(u0->a),
382 le64_to_cpu(u0->b),
383 (u64)dma_unmap_addr(tx_buffer, dma),
384 dma_unmap_len(tx_buffer, len),
385 tx_buffer->next_to_watch,
386 (u64)tx_buffer->time_stamp,
387 tx_buffer->skb);
388 if (i == tx_ring->next_to_use &&
389 i == tx_ring->next_to_clean)
390 pr_cont(" NTC/U\n");
391 else if (i == tx_ring->next_to_use)
392 pr_cont(" NTU\n");
393 else if (i == tx_ring->next_to_clean)
394 pr_cont(" NTC\n");
395 else
396 pr_cont("\n");
397
398 if (netif_msg_pktdata(adapter) &&
399 tx_buffer->skb)
400 print_hex_dump(KERN_INFO, "",
401 DUMP_PREFIX_ADDRESS, 16, 1,
402 tx_buffer->skb->data,
403 dma_unmap_len(tx_buffer, len),
404 true);
405 }
406 }
407
408
409rx_ring_summary:
410 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
411 pr_info("Queue [NTU] [NTC]\n");
412 for (n = 0; n < adapter->num_rx_queues; n++) {
413 rx_ring = adapter->rx_ring[n];
414 pr_info("%5d %5X %5X\n",
415 n, rx_ring->next_to_use, rx_ring->next_to_clean);
416 }
417
418
419 if (!netif_msg_rx_status(adapter))
420 goto exit;
421
422 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 for (n = 0; n < adapter->num_rx_queues; n++) {
445 rx_ring = adapter->rx_ring[n];
446 pr_info("------------------------------------\n");
447 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
448 pr_info("------------------------------------\n");
449 pr_info("R [desc] [ PktBuf A0] "
450 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
451 "<-- Adv Rx Read format\n");
452 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
453 "[vl er S cks ln] ---------------- [bi->skb] "
454 "<-- Adv Rx Write-Back format\n");
455
456 for (i = 0; i < rx_ring->count; i++) {
457 rx_buffer_info = &rx_ring->rx_buffer_info[i];
458 rx_desc = IXGBE_RX_DESC(rx_ring, i);
459 u0 = (struct my_u0 *)rx_desc;
460 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
461 if (staterr & IXGBE_RXD_STAT_DD) {
462
463 pr_info("RWB[0x%03X] %016llX "
464 "%016llX ---------------- %p", i,
465 le64_to_cpu(u0->a),
466 le64_to_cpu(u0->b),
467 rx_buffer_info->skb);
468 } else {
469 pr_info("R [0x%03X] %016llX "
470 "%016llX %016llX %p", i,
471 le64_to_cpu(u0->a),
472 le64_to_cpu(u0->b),
473 (u64)rx_buffer_info->dma,
474 rx_buffer_info->skb);
475
476 if (netif_msg_pktdata(adapter) &&
477 rx_buffer_info->dma) {
478 print_hex_dump(KERN_INFO, "",
479 DUMP_PREFIX_ADDRESS, 16, 1,
480 page_address(rx_buffer_info->page) +
481 rx_buffer_info->page_offset,
482 ixgbe_rx_bufsz(rx_ring), true);
483 }
484 }
485
486 if (i == rx_ring->next_to_use)
487 pr_cont(" NTU\n");
488 else if (i == rx_ring->next_to_clean)
489 pr_cont(" NTC\n");
490 else
491 pr_cont("\n");
492
493 }
494 }
495
496exit:
497 return;
498}
499
500static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
501{
502 u32 ctrl_ext;
503
504
505 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
506 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
507 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
508}
509
510static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
511{
512 u32 ctrl_ext;
513
514
515 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
516 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
517 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
518}
519
520
521
522
523
524
525
526
527
528static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
529 u8 queue, u8 msix_vector)
530{
531 u32 ivar, index;
532 struct ixgbe_hw *hw = &adapter->hw;
533 switch (hw->mac.type) {
534 case ixgbe_mac_82598EB:
535 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
536 if (direction == -1)
537 direction = 0;
538 index = (((direction * 64) + queue) >> 2) & 0x1F;
539 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
540 ivar &= ~(0xFF << (8 * (queue & 0x3)));
541 ivar |= (msix_vector << (8 * (queue & 0x3)));
542 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
543 break;
544 case ixgbe_mac_82599EB:
545 case ixgbe_mac_X540:
546 if (direction == -1) {
547
548 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
549 index = ((queue & 1) * 8);
550 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
551 ivar &= ~(0xFF << index);
552 ivar |= (msix_vector << index);
553 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
554 break;
555 } else {
556
557 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
558 index = ((16 * (queue & 1)) + (8 * direction));
559 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
560 ivar &= ~(0xFF << index);
561 ivar |= (msix_vector << index);
562 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
563 break;
564 }
565 default:
566 break;
567 }
568}
569
570static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
571 u64 qmask)
572{
573 u32 mask;
574
575 switch (adapter->hw.mac.type) {
576 case ixgbe_mac_82598EB:
577 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
578 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
579 break;
580 case ixgbe_mac_82599EB:
581 case ixgbe_mac_X540:
582 mask = (qmask & 0xFFFFFFFF);
583 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
584 mask = (qmask >> 32);
585 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
586 break;
587 default:
588 break;
589 }
590}
591
592void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
593 struct ixgbe_tx_buffer *tx_buffer)
594{
595 if (tx_buffer->skb) {
596 dev_kfree_skb_any(tx_buffer->skb);
597 if (dma_unmap_len(tx_buffer, len))
598 dma_unmap_single(ring->dev,
599 dma_unmap_addr(tx_buffer, dma),
600 dma_unmap_len(tx_buffer, len),
601 DMA_TO_DEVICE);
602 } else if (dma_unmap_len(tx_buffer, len)) {
603 dma_unmap_page(ring->dev,
604 dma_unmap_addr(tx_buffer, dma),
605 dma_unmap_len(tx_buffer, len),
606 DMA_TO_DEVICE);
607 }
608 tx_buffer->next_to_watch = NULL;
609 tx_buffer->skb = NULL;
610 dma_unmap_len_set(tx_buffer, len, 0);
611
612}
613
614static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
615{
616 struct ixgbe_hw *hw = &adapter->hw;
617 struct ixgbe_hw_stats *hwstats = &adapter->stats;
618 int i;
619 u32 data;
620
621 if ((hw->fc.current_mode != ixgbe_fc_full) &&
622 (hw->fc.current_mode != ixgbe_fc_rx_pause))
623 return;
624
625 switch (hw->mac.type) {
626 case ixgbe_mac_82598EB:
627 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
628 break;
629 default:
630 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
631 }
632 hwstats->lxoffrxc += data;
633
634
635 if (!data)
636 return;
637
638 for (i = 0; i < adapter->num_tx_queues; i++)
639 clear_bit(__IXGBE_HANG_CHECK_ARMED,
640 &adapter->tx_ring[i]->state);
641}
642
643static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
644{
645 struct ixgbe_hw *hw = &adapter->hw;
646 struct ixgbe_hw_stats *hwstats = &adapter->stats;
647 u32 xoff[8] = {0};
648 int i;
649 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
650
651 if (adapter->ixgbe_ieee_pfc)
652 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
653
654 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
655 ixgbe_update_xoff_rx_lfc(adapter);
656 return;
657 }
658
659
660 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
661 switch (hw->mac.type) {
662 case ixgbe_mac_82598EB:
663 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
664 break;
665 default:
666 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
667 }
668 hwstats->pxoffrxc[i] += xoff[i];
669 }
670
671
672 for (i = 0; i < adapter->num_tx_queues; i++) {
673 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
674 u8 tc = tx_ring->dcb_tc;
675
676 if (xoff[tc])
677 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
678 }
679}
680
681static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
682{
683 return ring->stats.packets;
684}
685
686static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
687{
688 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
689 struct ixgbe_hw *hw = &adapter->hw;
690
691 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
692 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
693
694 if (head != tail)
695 return (head < tail) ?
696 tail - head : (tail + ring->count - head);
697
698 return 0;
699}
700
701static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
702{
703 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
704 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
705 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
706 bool ret = false;
707
708 clear_check_for_tx_hang(tx_ring);
709
710
711
712
713
714
715
716
717
718
719
720
721
722 if ((tx_done_old == tx_done) && tx_pending) {
723
724 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
725 &tx_ring->state);
726 } else {
727
728 tx_ring->tx_stats.tx_done_old = tx_done;
729
730 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
731 }
732
733 return ret;
734}
735
736
737
738
739
740static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
741{
742
743
744 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
745 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
746 ixgbe_service_event_schedule(adapter);
747 }
748}
749
750
751
752
753
754
755static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
756 struct ixgbe_ring *tx_ring)
757{
758 struct ixgbe_adapter *adapter = q_vector->adapter;
759 struct ixgbe_tx_buffer *tx_buffer;
760 union ixgbe_adv_tx_desc *tx_desc;
761 unsigned int total_bytes = 0, total_packets = 0;
762 unsigned int budget = q_vector->tx.work_limit;
763 unsigned int i = tx_ring->next_to_clean;
764
765 if (test_bit(__IXGBE_DOWN, &adapter->state))
766 return true;
767
768 tx_buffer = &tx_ring->tx_buffer_info[i];
769 tx_desc = IXGBE_TX_DESC(tx_ring, i);
770 i -= tx_ring->count;
771
772 do {
773 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
774
775
776 if (!eop_desc)
777 break;
778
779
780 rmb();
781
782
783 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
784 break;
785
786
787 tx_buffer->next_to_watch = NULL;
788
789
790 total_bytes += tx_buffer->bytecount;
791 total_packets += tx_buffer->gso_segs;
792
793#ifdef CONFIG_IXGBE_PTP
794 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
795 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
796#endif
797
798
799 dev_kfree_skb_any(tx_buffer->skb);
800
801
802 dma_unmap_single(tx_ring->dev,
803 dma_unmap_addr(tx_buffer, dma),
804 dma_unmap_len(tx_buffer, len),
805 DMA_TO_DEVICE);
806
807
808 tx_buffer->skb = NULL;
809 dma_unmap_len_set(tx_buffer, len, 0);
810
811
812 while (tx_desc != eop_desc) {
813 tx_buffer++;
814 tx_desc++;
815 i++;
816 if (unlikely(!i)) {
817 i -= tx_ring->count;
818 tx_buffer = tx_ring->tx_buffer_info;
819 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
820 }
821
822
823 if (dma_unmap_len(tx_buffer, len)) {
824 dma_unmap_page(tx_ring->dev,
825 dma_unmap_addr(tx_buffer, dma),
826 dma_unmap_len(tx_buffer, len),
827 DMA_TO_DEVICE);
828 dma_unmap_len_set(tx_buffer, len, 0);
829 }
830 }
831
832
833 tx_buffer++;
834 tx_desc++;
835 i++;
836 if (unlikely(!i)) {
837 i -= tx_ring->count;
838 tx_buffer = tx_ring->tx_buffer_info;
839 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
840 }
841
842
843 prefetch(tx_desc);
844
845
846 budget--;
847 } while (likely(budget));
848
849 i += tx_ring->count;
850 tx_ring->next_to_clean = i;
851 u64_stats_update_begin(&tx_ring->syncp);
852 tx_ring->stats.bytes += total_bytes;
853 tx_ring->stats.packets += total_packets;
854 u64_stats_update_end(&tx_ring->syncp);
855 q_vector->tx.total_bytes += total_bytes;
856 q_vector->tx.total_packets += total_packets;
857
858 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
859
860 struct ixgbe_hw *hw = &adapter->hw;
861 e_err(drv, "Detected Tx Unit Hang\n"
862 " Tx Queue <%d>\n"
863 " TDH, TDT <%x>, <%x>\n"
864 " next_to_use <%x>\n"
865 " next_to_clean <%x>\n"
866 "tx_buffer_info[next_to_clean]\n"
867 " time_stamp <%lx>\n"
868 " jiffies <%lx>\n",
869 tx_ring->queue_index,
870 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
871 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
872 tx_ring->next_to_use, i,
873 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
874
875 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
876
877 e_info(probe,
878 "tx hang %d detected on queue %d, resetting adapter\n",
879 adapter->tx_timeout_count + 1, tx_ring->queue_index);
880
881
882 ixgbe_tx_timeout_reset(adapter);
883
884
885 return true;
886 }
887
888 netdev_tx_completed_queue(txring_txq(tx_ring),
889 total_packets, total_bytes);
890
891#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
892 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
893 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
894
895
896
897 smp_mb();
898 if (__netif_subqueue_stopped(tx_ring->netdev,
899 tx_ring->queue_index)
900 && !test_bit(__IXGBE_DOWN, &adapter->state)) {
901 netif_wake_subqueue(tx_ring->netdev,
902 tx_ring->queue_index);
903 ++tx_ring->tx_stats.restart_queue;
904 }
905 }
906
907 return !!budget;
908}
909
910#ifdef CONFIG_IXGBE_DCA
911static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
912 struct ixgbe_ring *tx_ring,
913 int cpu)
914{
915 struct ixgbe_hw *hw = &adapter->hw;
916 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
917 u16 reg_offset;
918
919 switch (hw->mac.type) {
920 case ixgbe_mac_82598EB:
921 reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
922 break;
923 case ixgbe_mac_82599EB:
924 case ixgbe_mac_X540:
925 reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
926 txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
927 break;
928 default:
929
930 return;
931 }
932
933
934
935
936
937
938 txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
939 IXGBE_DCA_TXCTRL_DATA_RRO_EN |
940 IXGBE_DCA_TXCTRL_DESC_DCA_EN;
941
942 IXGBE_WRITE_REG(hw, reg_offset, txctrl);
943}
944
945static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
946 struct ixgbe_ring *rx_ring,
947 int cpu)
948{
949 struct ixgbe_hw *hw = &adapter->hw;
950 u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
951 u8 reg_idx = rx_ring->reg_idx;
952
953
954 switch (hw->mac.type) {
955 case ixgbe_mac_82599EB:
956 case ixgbe_mac_X540:
957 rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
958 break;
959 default:
960 break;
961 }
962
963
964
965
966
967
968 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
969 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
970 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
971
972 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
973}
974
975static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
976{
977 struct ixgbe_adapter *adapter = q_vector->adapter;
978 struct ixgbe_ring *ring;
979 int cpu = get_cpu();
980
981 if (q_vector->cpu == cpu)
982 goto out_no_update;
983
984 ixgbe_for_each_ring(ring, q_vector->tx)
985 ixgbe_update_tx_dca(adapter, ring, cpu);
986
987 ixgbe_for_each_ring(ring, q_vector->rx)
988 ixgbe_update_rx_dca(adapter, ring, cpu);
989
990 q_vector->cpu = cpu;
991out_no_update:
992 put_cpu();
993}
994
995static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
996{
997 int i;
998
999 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1000 return;
1001
1002
1003 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1004
1005 for (i = 0; i < adapter->num_q_vectors; i++) {
1006 adapter->q_vector[i]->cpu = -1;
1007 ixgbe_update_dca(adapter->q_vector[i]);
1008 }
1009}
1010
1011static int __ixgbe_notify_dca(struct device *dev, void *data)
1012{
1013 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
1014 unsigned long event = *(unsigned long *)data;
1015
1016 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
1017 return 0;
1018
1019 switch (event) {
1020 case DCA_PROVIDER_ADD:
1021
1022 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1023 break;
1024 if (dca_add_requester(dev) == 0) {
1025 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1026 ixgbe_setup_dca(adapter);
1027 break;
1028 }
1029
1030 case DCA_PROVIDER_REMOVE:
1031 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1032 dca_remove_requester(dev);
1033 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1034 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1035 }
1036 break;
1037 }
1038
1039 return 0;
1040}
1041
1042#endif
1043static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1044 union ixgbe_adv_rx_desc *rx_desc,
1045 struct sk_buff *skb)
1046{
1047 if (ring->netdev->features & NETIF_F_RXHASH)
1048 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1049}
1050
1051#ifdef IXGBE_FCOE
1052
1053
1054
1055
1056
1057
1058
1059static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
1060 union ixgbe_adv_rx_desc *rx_desc)
1061{
1062 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1063
1064 return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
1065 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1066 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1067 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1068}
1069
1070#endif
1071
1072
1073
1074
1075
1076
1077static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
1078 union ixgbe_adv_rx_desc *rx_desc,
1079 struct sk_buff *skb)
1080{
1081 skb_checksum_none_assert(skb);
1082
1083
1084 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1085 return;
1086
1087
1088 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1089 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1090 ring->rx_stats.csum_err++;
1091 return;
1092 }
1093
1094 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1095 return;
1096
1097 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1098 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1099
1100
1101
1102
1103
1104 if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
1105 test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
1106 return;
1107
1108 ring->rx_stats.csum_err++;
1109 return;
1110 }
1111
1112
1113 skb->ip_summed = CHECKSUM_UNNECESSARY;
1114}
1115
1116static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1117{
1118 rx_ring->next_to_use = val;
1119
1120
1121 rx_ring->next_to_alloc = val;
1122
1123
1124
1125
1126
1127
1128 wmb();
1129 writel(val, rx_ring->tail);
1130}
1131
1132static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1133 struct ixgbe_rx_buffer *bi)
1134{
1135 struct page *page = bi->page;
1136 dma_addr_t dma = bi->dma;
1137
1138
1139 if (likely(dma))
1140 return true;
1141
1142
1143 if (likely(!page)) {
1144 page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
1145 bi->skb, ixgbe_rx_pg_order(rx_ring));
1146 if (unlikely(!page)) {
1147 rx_ring->rx_stats.alloc_rx_page_failed++;
1148 return false;
1149 }
1150 bi->page = page;
1151 }
1152
1153
1154 dma = dma_map_page(rx_ring->dev, page, 0,
1155 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1156
1157
1158
1159
1160
1161 if (dma_mapping_error(rx_ring->dev, dma)) {
1162 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1163 bi->page = NULL;
1164
1165 rx_ring->rx_stats.alloc_rx_page_failed++;
1166 return false;
1167 }
1168
1169 bi->dma = dma;
1170 bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
1171
1172 return true;
1173}
1174
1175
1176
1177
1178
1179
1180void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1181{
1182 union ixgbe_adv_rx_desc *rx_desc;
1183 struct ixgbe_rx_buffer *bi;
1184 u16 i = rx_ring->next_to_use;
1185
1186
1187 if (!cleaned_count)
1188 return;
1189
1190 rx_desc = IXGBE_RX_DESC(rx_ring, i);
1191 bi = &rx_ring->rx_buffer_info[i];
1192 i -= rx_ring->count;
1193
1194 do {
1195 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1196 break;
1197
1198
1199
1200
1201
1202 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1203
1204 rx_desc++;
1205 bi++;
1206 i++;
1207 if (unlikely(!i)) {
1208 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
1209 bi = rx_ring->rx_buffer_info;
1210 i -= rx_ring->count;
1211 }
1212
1213
1214 rx_desc->read.hdr_addr = 0;
1215
1216 cleaned_count--;
1217 } while (cleaned_count);
1218
1219 i += rx_ring->count;
1220
1221 if (rx_ring->next_to_use != i)
1222 ixgbe_release_rx_desc(rx_ring, i);
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236static unsigned int ixgbe_get_headlen(unsigned char *data,
1237 unsigned int max_len)
1238{
1239 union {
1240 unsigned char *network;
1241
1242 struct ethhdr *eth;
1243 struct vlan_hdr *vlan;
1244
1245 struct iphdr *ipv4;
1246 } hdr;
1247 __be16 protocol;
1248 u8 nexthdr = 0;
1249 u8 hlen;
1250
1251
1252 if (max_len < ETH_HLEN)
1253 return max_len;
1254
1255
1256 hdr.network = data;
1257
1258
1259 protocol = hdr.eth->h_proto;
1260 hdr.network += ETH_HLEN;
1261
1262
1263 if (protocol == __constant_htons(ETH_P_8021Q)) {
1264 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1265 return max_len;
1266
1267 protocol = hdr.vlan->h_vlan_encapsulated_proto;
1268 hdr.network += VLAN_HLEN;
1269 }
1270
1271
1272 if (protocol == __constant_htons(ETH_P_IP)) {
1273 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1274 return max_len;
1275
1276
1277 hlen = (hdr.network[0] & 0x0F) << 2;
1278
1279
1280 if (hlen < sizeof(struct iphdr))
1281 return hdr.network - data;
1282
1283
1284 nexthdr = hdr.ipv4->protocol;
1285 hdr.network += hlen;
1286#ifdef IXGBE_FCOE
1287 } else if (protocol == __constant_htons(ETH_P_FCOE)) {
1288 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1289 return max_len;
1290 hdr.network += FCOE_HEADER_LEN;
1291#endif
1292 } else {
1293 return hdr.network - data;
1294 }
1295
1296
1297 if (nexthdr == IPPROTO_TCP) {
1298 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1299 return max_len;
1300
1301
1302 hlen = (hdr.network[12] & 0xF0) >> 2;
1303
1304
1305 if (hlen < sizeof(struct tcphdr))
1306 return hdr.network - data;
1307
1308 hdr.network += hlen;
1309 }
1310
1311
1312
1313
1314
1315
1316
1317 if ((hdr.network - data) < max_len)
1318 return hdr.network - data;
1319 else
1320 return max_len;
1321}
1322
1323static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
1324 union ixgbe_adv_rx_desc *rx_desc,
1325 struct sk_buff *skb)
1326{
1327 __le32 rsc_enabled;
1328 u32 rsc_cnt;
1329
1330 if (!ring_is_rsc_enabled(rx_ring))
1331 return;
1332
1333 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
1334 cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
1335
1336
1337 if (!rsc_enabled)
1338 return;
1339
1340 rsc_cnt = le32_to_cpu(rsc_enabled);
1341 rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
1342
1343 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1344}
1345
1346static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1347 struct sk_buff *skb)
1348{
1349 u16 hdr_len = skb_headlen(skb);
1350
1351
1352 skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
1353 IXGBE_CB(skb)->append_cnt);
1354}
1355
1356static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
1357 struct sk_buff *skb)
1358{
1359
1360 if (!IXGBE_CB(skb)->append_cnt)
1361 return;
1362
1363 rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
1364 rx_ring->rx_stats.rsc_flush++;
1365
1366 ixgbe_set_rsc_gso_size(rx_ring, skb);
1367
1368
1369 IXGBE_CB(skb)->append_cnt = 0;
1370}
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1383 union ixgbe_adv_rx_desc *rx_desc,
1384 struct sk_buff *skb)
1385{
1386 struct net_device *dev = rx_ring->netdev;
1387
1388 ixgbe_update_rsc_stats(rx_ring, skb);
1389
1390 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1391
1392 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1393
1394#ifdef CONFIG_IXGBE_PTP
1395 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
1396#endif
1397
1398 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1399 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1400 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1401 __vlan_hwaccel_put_tag(skb, vid);
1402 }
1403
1404 skb_record_rx_queue(skb, rx_ring->queue_index);
1405
1406 skb->protocol = eth_type_trans(skb, dev);
1407}
1408
1409static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
1410 struct sk_buff *skb)
1411{
1412 struct ixgbe_adapter *adapter = q_vector->adapter;
1413
1414 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1415 napi_gro_receive(&q_vector->napi, skb);
1416 else
1417 netif_rx(skb);
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
1432 union ixgbe_adv_rx_desc *rx_desc,
1433 struct sk_buff *skb)
1434{
1435 u32 ntc = rx_ring->next_to_clean + 1;
1436
1437
1438 ntc = (ntc < rx_ring->count) ? ntc : 0;
1439 rx_ring->next_to_clean = ntc;
1440
1441 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
1442
1443 if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
1444 return false;
1445
1446
1447 if (IXGBE_CB(skb)->append_cnt) {
1448 ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
1449 ntc &= IXGBE_RXDADV_NEXTP_MASK;
1450 ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
1451 }
1452
1453
1454 rx_ring->rx_buffer_info[ntc].skb = skb;
1455 rx_ring->rx_stats.non_eop_descs++;
1456
1457 return true;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1479 union ixgbe_adv_rx_desc *rx_desc,
1480 struct sk_buff *skb)
1481{
1482 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1483 struct net_device *netdev = rx_ring->netdev;
1484 unsigned char *va;
1485 unsigned int pull_len;
1486
1487
1488 if (unlikely(IXGBE_CB(skb)->page_released)) {
1489 dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
1490 ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
1491 IXGBE_CB(skb)->page_released = false;
1492 } else {
1493 dma_sync_single_range_for_cpu(rx_ring->dev,
1494 IXGBE_CB(skb)->dma,
1495 frag->page_offset,
1496 ixgbe_rx_bufsz(rx_ring),
1497 DMA_FROM_DEVICE);
1498 }
1499 IXGBE_CB(skb)->dma = 0;
1500
1501
1502 if (unlikely(ixgbe_test_staterr(rx_desc,
1503 IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
1504 !(netdev->features & NETIF_F_RXALL))) {
1505 dev_kfree_skb_any(skb);
1506 return true;
1507 }
1508
1509
1510
1511
1512
1513
1514 va = skb_frag_address(frag);
1515
1516
1517
1518
1519
1520 pull_len = skb_frag_size(frag);
1521 if (pull_len > IXGBE_RX_HDR_SIZE)
1522 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1523
1524
1525 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1526
1527
1528 skb_frag_size_sub(frag, pull_len);
1529 frag->page_offset += pull_len;
1530 skb->data_len -= pull_len;
1531 skb->tail += pull_len;
1532
1533
1534
1535
1536
1537 if (skb_frag_size(frag) == 0) {
1538 BUG_ON(skb_shinfo(skb)->nr_frags != 1);
1539 skb_shinfo(skb)->nr_frags = 0;
1540 __skb_frag_unref(frag);
1541 skb->truesize -= ixgbe_rx_bufsz(rx_ring);
1542 }
1543
1544#ifdef IXGBE_FCOE
1545
1546 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1547 return false;
1548
1549#endif
1550
1551 if (unlikely(skb->len < 60)) {
1552 int pad_len = 60 - skb->len;
1553
1554 if (skb_pad(skb, pad_len))
1555 return true;
1556 __skb_put(skb, pad_len);
1557 }
1558
1559 return false;
1560}
1561
1562
1563
1564
1565
1566
1567
1568static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
1569{
1570 struct page *page = rx_buffer->page;
1571
1572
1573 return likely(page_count(page) == 1) &&
1574 likely(page_to_nid(page) == numa_node_id());
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1585 struct ixgbe_rx_buffer *old_buff)
1586{
1587 struct ixgbe_rx_buffer *new_buff;
1588 u16 nta = rx_ring->next_to_alloc;
1589 u16 bufsz = ixgbe_rx_bufsz(rx_ring);
1590
1591 new_buff = &rx_ring->rx_buffer_info[nta];
1592
1593
1594 nta++;
1595 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1596
1597
1598 new_buff->page = old_buff->page;
1599 new_buff->dma = old_buff->dma;
1600
1601
1602 new_buff->page_offset = old_buff->page_offset ^ bufsz;
1603
1604
1605 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
1606 new_buff->page_offset, bufsz,
1607 DMA_FROM_DEVICE);
1608
1609
1610 get_page(new_buff->page);
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1625 struct ixgbe_rx_buffer *rx_buffer,
1626 struct sk_buff *skb, int size)
1627{
1628 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1629 rx_buffer->page, rx_buffer->page_offset,
1630 size);
1631 skb->len += size;
1632 skb->data_len += size;
1633 skb->truesize += ixgbe_rx_bufsz(rx_ring);
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1650 struct ixgbe_ring *rx_ring,
1651 int budget)
1652{
1653 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1654#ifdef IXGBE_FCOE
1655 struct ixgbe_adapter *adapter = q_vector->adapter;
1656 int ddp_bytes = 0;
1657#endif
1658 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1659
1660 do {
1661 struct ixgbe_rx_buffer *rx_buffer;
1662 union ixgbe_adv_rx_desc *rx_desc;
1663 struct sk_buff *skb;
1664 struct page *page;
1665 u16 ntc;
1666
1667
1668 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1669 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1670 cleaned_count = 0;
1671 }
1672
1673 ntc = rx_ring->next_to_clean;
1674 rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
1675 rx_buffer = &rx_ring->rx_buffer_info[ntc];
1676
1677 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
1678 break;
1679
1680
1681
1682
1683
1684
1685 rmb();
1686
1687 page = rx_buffer->page;
1688 prefetchw(page);
1689
1690 skb = rx_buffer->skb;
1691
1692 if (likely(!skb)) {
1693 void *page_addr = page_address(page) +
1694 rx_buffer->page_offset;
1695
1696
1697 prefetch(page_addr);
1698#if L1_CACHE_BYTES < 128
1699 prefetch(page_addr + L1_CACHE_BYTES);
1700#endif
1701
1702
1703 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1704 IXGBE_RX_HDR_SIZE);
1705 if (unlikely(!skb)) {
1706 rx_ring->rx_stats.alloc_rx_buff_failed++;
1707 break;
1708 }
1709
1710
1711
1712
1713
1714
1715 prefetchw(skb->data);
1716
1717
1718
1719
1720
1721
1722
1723 IXGBE_CB(skb)->dma = rx_buffer->dma;
1724 } else {
1725
1726 dma_sync_single_range_for_cpu(rx_ring->dev,
1727 rx_buffer->dma,
1728 rx_buffer->page_offset,
1729 ixgbe_rx_bufsz(rx_ring),
1730 DMA_FROM_DEVICE);
1731 }
1732
1733
1734 ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
1735 le16_to_cpu(rx_desc->wb.upper.length));
1736
1737 if (ixgbe_can_reuse_page(rx_buffer)) {
1738
1739 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1740 } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
1741
1742 IXGBE_CB(skb)->page_released = true;
1743 } else {
1744
1745 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
1746 ixgbe_rx_pg_size(rx_ring),
1747 DMA_FROM_DEVICE);
1748 }
1749
1750
1751 rx_buffer->skb = NULL;
1752 rx_buffer->dma = 0;
1753 rx_buffer->page = NULL;
1754
1755 ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
1756
1757 cleaned_count++;
1758
1759
1760 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
1761 continue;
1762
1763
1764 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
1765 continue;
1766
1767
1768 total_rx_bytes += skb->len;
1769 total_rx_packets++;
1770
1771
1772 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
1773
1774#ifdef IXGBE_FCOE
1775
1776 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
1777 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1778 if (!ddp_bytes) {
1779 dev_kfree_skb_any(skb);
1780 continue;
1781 }
1782 }
1783
1784#endif
1785 ixgbe_rx_skb(q_vector, skb);
1786
1787
1788 budget--;
1789 } while (likely(budget));
1790
1791#ifdef IXGBE_FCOE
1792
1793 if (ddp_bytes > 0) {
1794 unsigned int mss;
1795
1796 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1797 sizeof(struct fc_frame_header) -
1798 sizeof(struct fcoe_crc_eof);
1799 if (mss > 512)
1800 mss &= ~511;
1801 total_rx_bytes += ddp_bytes;
1802 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1803 }
1804
1805#endif
1806 u64_stats_update_begin(&rx_ring->syncp);
1807 rx_ring->stats.packets += total_rx_packets;
1808 rx_ring->stats.bytes += total_rx_bytes;
1809 u64_stats_update_end(&rx_ring->syncp);
1810 q_vector->rx.total_packets += total_rx_packets;
1811 q_vector->rx.total_bytes += total_rx_bytes;
1812
1813 if (cleaned_count)
1814 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1815
1816 return !!budget;
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1827{
1828 struct ixgbe_q_vector *q_vector;
1829 int v_idx;
1830 u32 mask;
1831
1832
1833 if (adapter->num_vfs > 32) {
1834 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1835 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1836 }
1837
1838
1839
1840
1841
1842 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1843 struct ixgbe_ring *ring;
1844 q_vector = adapter->q_vector[v_idx];
1845
1846 ixgbe_for_each_ring(ring, q_vector->rx)
1847 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1848
1849 ixgbe_for_each_ring(ring, q_vector->tx)
1850 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1851
1852 if (q_vector->tx.ring && !q_vector->rx.ring) {
1853
1854 if (adapter->tx_itr_setting == 1)
1855 q_vector->itr = IXGBE_10K_ITR;
1856 else
1857 q_vector->itr = adapter->tx_itr_setting;
1858 } else {
1859
1860 if (adapter->rx_itr_setting == 1)
1861 q_vector->itr = IXGBE_20K_ITR;
1862 else
1863 q_vector->itr = adapter->rx_itr_setting;
1864 }
1865
1866 ixgbe_write_eitr(q_vector);
1867 }
1868
1869 switch (adapter->hw.mac.type) {
1870 case ixgbe_mac_82598EB:
1871 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1872 v_idx);
1873 break;
1874 case ixgbe_mac_82599EB:
1875 case ixgbe_mac_X540:
1876 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1877 break;
1878 default:
1879 break;
1880 }
1881 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1882
1883
1884 mask = IXGBE_EIMS_ENABLE_MASK;
1885 mask &= ~(IXGBE_EIMS_OTHER |
1886 IXGBE_EIMS_MAILBOX |
1887 IXGBE_EIMS_LSC);
1888
1889 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1890}
1891
1892enum latency_range {
1893 lowest_latency = 0,
1894 low_latency = 1,
1895 bulk_latency = 2,
1896 latency_invalid = 255
1897};
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
1915 struct ixgbe_ring_container *ring_container)
1916{
1917 int bytes = ring_container->total_bytes;
1918 int packets = ring_container->total_packets;
1919 u32 timepassed_us;
1920 u64 bytes_perint;
1921 u8 itr_setting = ring_container->itr;
1922
1923 if (packets == 0)
1924 return;
1925
1926
1927
1928
1929
1930
1931
1932 timepassed_us = q_vector->itr >> 2;
1933 bytes_perint = bytes / timepassed_us;
1934
1935 switch (itr_setting) {
1936 case lowest_latency:
1937 if (bytes_perint > 10)
1938 itr_setting = low_latency;
1939 break;
1940 case low_latency:
1941 if (bytes_perint > 20)
1942 itr_setting = bulk_latency;
1943 else if (bytes_perint <= 10)
1944 itr_setting = lowest_latency;
1945 break;
1946 case bulk_latency:
1947 if (bytes_perint <= 20)
1948 itr_setting = low_latency;
1949 break;
1950 }
1951
1952
1953 ring_container->total_bytes = 0;
1954 ring_container->total_packets = 0;
1955
1956
1957 ring_container->itr = itr_setting;
1958}
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1969{
1970 struct ixgbe_adapter *adapter = q_vector->adapter;
1971 struct ixgbe_hw *hw = &adapter->hw;
1972 int v_idx = q_vector->v_idx;
1973 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1974
1975 switch (adapter->hw.mac.type) {
1976 case ixgbe_mac_82598EB:
1977
1978 itr_reg |= (itr_reg << 16);
1979 break;
1980 case ixgbe_mac_82599EB:
1981 case ixgbe_mac_X540:
1982
1983
1984
1985
1986 itr_reg |= IXGBE_EITR_CNT_WDIS;
1987 break;
1988 default:
1989 break;
1990 }
1991 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1992}
1993
1994static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
1995{
1996 u32 new_itr = q_vector->itr;
1997 u8 current_itr;
1998
1999 ixgbe_update_itr(q_vector, &q_vector->tx);
2000 ixgbe_update_itr(q_vector, &q_vector->rx);
2001
2002 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
2003
2004 switch (current_itr) {
2005
2006 case lowest_latency:
2007 new_itr = IXGBE_100K_ITR;
2008 break;
2009 case low_latency:
2010 new_itr = IXGBE_20K_ITR;
2011 break;
2012 case bulk_latency:
2013 new_itr = IXGBE_8K_ITR;
2014 break;
2015 default:
2016 break;
2017 }
2018
2019 if (new_itr != q_vector->itr) {
2020
2021 new_itr = (10 * new_itr * q_vector->itr) /
2022 ((9 * new_itr) + q_vector->itr);
2023
2024
2025 q_vector->itr = new_itr;
2026
2027 ixgbe_write_eitr(q_vector);
2028 }
2029}
2030
2031
2032
2033
2034
2035static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
2036{
2037 struct ixgbe_hw *hw = &adapter->hw;
2038 u32 eicr = adapter->interrupt_event;
2039
2040 if (test_bit(__IXGBE_DOWN, &adapter->state))
2041 return;
2042
2043 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2044 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
2045 return;
2046
2047 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2048
2049 switch (hw->device_id) {
2050 case IXGBE_DEV_ID_82599_T3_LOM:
2051
2052
2053
2054
2055
2056
2057
2058 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
2059 !(eicr & IXGBE_EICR_LSC))
2060 return;
2061
2062 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
2063 u32 autoneg;
2064 bool link_up = false;
2065
2066 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2067
2068 if (link_up)
2069 return;
2070 }
2071
2072
2073 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
2074 return;
2075
2076 break;
2077 default:
2078 if (!(eicr & IXGBE_EICR_GPI_SDP0))
2079 return;
2080 break;
2081 }
2082 e_crit(drv,
2083 "Network adapter has been stopped because it has over heated. "
2084 "Restart the computer. If the problem persists, "
2085 "power off the system and replace the adapter\n");
2086
2087 adapter->interrupt_event = 0;
2088}
2089
2090static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
2091{
2092 struct ixgbe_hw *hw = &adapter->hw;
2093
2094 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
2095 (eicr & IXGBE_EICR_GPI_SDP1)) {
2096 e_crit(probe, "Fan has stopped, replace the adapter\n");
2097
2098 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2099 }
2100}
2101
2102static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
2103{
2104 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
2105 return;
2106
2107 switch (adapter->hw.mac.type) {
2108 case ixgbe_mac_82599EB:
2109
2110
2111
2112
2113 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2114 (!test_bit(__IXGBE_DOWN, &adapter->state))) {
2115 adapter->interrupt_event = eicr;
2116 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2117 ixgbe_service_event_schedule(adapter);
2118 return;
2119 }
2120 return;
2121 case ixgbe_mac_X540:
2122 if (!(eicr & IXGBE_EICR_TS))
2123 return;
2124 break;
2125 default:
2126 return;
2127 }
2128
2129 e_crit(drv,
2130 "Network adapter has been stopped because it has over heated. "
2131 "Restart the computer. If the problem persists, "
2132 "power off the system and replace the adapter\n");
2133}
2134
2135static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
2136{
2137 struct ixgbe_hw *hw = &adapter->hw;
2138
2139 if (eicr & IXGBE_EICR_GPI_SDP2) {
2140
2141 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2142 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2143 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
2144 ixgbe_service_event_schedule(adapter);
2145 }
2146 }
2147
2148 if (eicr & IXGBE_EICR_GPI_SDP1) {
2149
2150 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2151 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2152 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
2153 ixgbe_service_event_schedule(adapter);
2154 }
2155 }
2156}
2157
2158static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
2159{
2160 struct ixgbe_hw *hw = &adapter->hw;
2161
2162 adapter->lsc_int++;
2163 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2164 adapter->link_check_timeout = jiffies;
2165 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2166 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2167 IXGBE_WRITE_FLUSH(hw);
2168 ixgbe_service_event_schedule(adapter);
2169 }
2170}
2171
2172static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
2173 u64 qmask)
2174{
2175 u32 mask;
2176 struct ixgbe_hw *hw = &adapter->hw;
2177
2178 switch (hw->mac.type) {
2179 case ixgbe_mac_82598EB:
2180 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2181 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2182 break;
2183 case ixgbe_mac_82599EB:
2184 case ixgbe_mac_X540:
2185 mask = (qmask & 0xFFFFFFFF);
2186 if (mask)
2187 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2188 mask = (qmask >> 32);
2189 if (mask)
2190 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2191 break;
2192 default:
2193 break;
2194 }
2195
2196}
2197
2198static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
2199 u64 qmask)
2200{
2201 u32 mask;
2202 struct ixgbe_hw *hw = &adapter->hw;
2203
2204 switch (hw->mac.type) {
2205 case ixgbe_mac_82598EB:
2206 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
2207 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2208 break;
2209 case ixgbe_mac_82599EB:
2210 case ixgbe_mac_X540:
2211 mask = (qmask & 0xFFFFFFFF);
2212 if (mask)
2213 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2214 mask = (qmask >> 32);
2215 if (mask)
2216 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2217 break;
2218 default:
2219 break;
2220 }
2221
2222}
2223
2224
2225
2226
2227
2228static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2229 bool flush)
2230{
2231 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2232
2233
2234 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
2235 mask &= ~IXGBE_EIMS_LSC;
2236
2237 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2238 switch (adapter->hw.mac.type) {
2239 case ixgbe_mac_82599EB:
2240 mask |= IXGBE_EIMS_GPI_SDP0;
2241 break;
2242 case ixgbe_mac_X540:
2243 mask |= IXGBE_EIMS_TS;
2244 break;
2245 default:
2246 break;
2247 }
2248 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2249 mask |= IXGBE_EIMS_GPI_SDP1;
2250 switch (adapter->hw.mac.type) {
2251 case ixgbe_mac_82599EB:
2252 mask |= IXGBE_EIMS_GPI_SDP1;
2253 mask |= IXGBE_EIMS_GPI_SDP2;
2254 case ixgbe_mac_X540:
2255 mask |= IXGBE_EIMS_ECC;
2256 mask |= IXGBE_EIMS_MAILBOX;
2257 break;
2258 default:
2259 break;
2260 }
2261 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2262 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
2263 mask |= IXGBE_EIMS_FLOW_DIR;
2264
2265 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2266 if (queues)
2267 ixgbe_irq_enable_queues(adapter, ~0);
2268 if (flush)
2269 IXGBE_WRITE_FLUSH(&adapter->hw);
2270}
2271
2272static irqreturn_t ixgbe_msix_other(int irq, void *data)
2273{
2274 struct ixgbe_adapter *adapter = data;
2275 struct ixgbe_hw *hw = &adapter->hw;
2276 u32 eicr;
2277
2278
2279
2280
2281
2282
2283
2284 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2285 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2286
2287 if (eicr & IXGBE_EICR_LSC)
2288 ixgbe_check_lsc(adapter);
2289
2290 if (eicr & IXGBE_EICR_MAILBOX)
2291 ixgbe_msg_task(adapter);
2292
2293 switch (hw->mac.type) {
2294 case ixgbe_mac_82599EB:
2295 case ixgbe_mac_X540:
2296 if (eicr & IXGBE_EICR_ECC)
2297 e_info(link, "Received unrecoverable ECC Err, please "
2298 "reboot\n");
2299
2300 if (eicr & IXGBE_EICR_FLOW_DIR) {
2301 int reinit_count = 0;
2302 int i;
2303 for (i = 0; i < adapter->num_tx_queues; i++) {
2304 struct ixgbe_ring *ring = adapter->tx_ring[i];
2305 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
2306 &ring->state))
2307 reinit_count++;
2308 }
2309 if (reinit_count) {
2310
2311 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2312 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
2313 ixgbe_service_event_schedule(adapter);
2314 }
2315 }
2316 ixgbe_check_sfp_event(adapter, eicr);
2317 ixgbe_check_overtemp_event(adapter, eicr);
2318 break;
2319 default:
2320 break;
2321 }
2322
2323 ixgbe_check_fan_failure(adapter, eicr);
2324#ifdef CONFIG_IXGBE_PTP
2325 ixgbe_ptp_check_pps_event(adapter, eicr);
2326#endif
2327
2328
2329 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2330 ixgbe_irq_enable(adapter, false, false);
2331
2332 return IRQ_HANDLED;
2333}
2334
2335static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
2336{
2337 struct ixgbe_q_vector *q_vector = data;
2338
2339
2340
2341 if (q_vector->rx.ring || q_vector->tx.ring)
2342 napi_schedule(&q_vector->napi);
2343
2344 return IRQ_HANDLED;
2345}
2346
2347
2348
2349
2350
2351
2352
2353
2354int ixgbe_poll(struct napi_struct *napi, int budget)
2355{
2356 struct ixgbe_q_vector *q_vector =
2357 container_of(napi, struct ixgbe_q_vector, napi);
2358 struct ixgbe_adapter *adapter = q_vector->adapter;
2359 struct ixgbe_ring *ring;
2360 int per_ring_budget;
2361 bool clean_complete = true;
2362
2363#ifdef CONFIG_IXGBE_DCA
2364 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2365 ixgbe_update_dca(q_vector);
2366#endif
2367
2368 ixgbe_for_each_ring(ring, q_vector->tx)
2369 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2370
2371
2372
2373 if (q_vector->rx.count > 1)
2374 per_ring_budget = max(budget/q_vector->rx.count, 1);
2375 else
2376 per_ring_budget = budget;
2377
2378 ixgbe_for_each_ring(ring, q_vector->rx)
2379 clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
2380 per_ring_budget);
2381
2382
2383 if (!clean_complete)
2384 return budget;
2385
2386
2387 napi_complete(napi);
2388 if (adapter->rx_itr_setting & 1)
2389 ixgbe_set_itr(q_vector);
2390 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2391 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
2392
2393 return 0;
2394}
2395
2396
2397
2398
2399
2400
2401
2402
2403static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2404{
2405 struct net_device *netdev = adapter->netdev;
2406 int vector, err;
2407 int ri = 0, ti = 0;
2408
2409 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2410 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2411 struct msix_entry *entry = &adapter->msix_entries[vector];
2412
2413 if (q_vector->tx.ring && q_vector->rx.ring) {
2414 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2415 "%s-%s-%d", netdev->name, "TxRx", ri++);
2416 ti++;
2417 } else if (q_vector->rx.ring) {
2418 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2419 "%s-%s-%d", netdev->name, "rx", ri++);
2420 } else if (q_vector->tx.ring) {
2421 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2422 "%s-%s-%d", netdev->name, "tx", ti++);
2423 } else {
2424
2425 continue;
2426 }
2427 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2428 q_vector->name, q_vector);
2429 if (err) {
2430 e_err(probe, "request_irq failed for MSIX interrupt "
2431 "Error: %d\n", err);
2432 goto free_queue_irqs;
2433 }
2434
2435 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2436
2437 irq_set_affinity_hint(entry->vector,
2438 &q_vector->affinity_mask);
2439 }
2440 }
2441
2442 err = request_irq(adapter->msix_entries[vector].vector,
2443 ixgbe_msix_other, 0, netdev->name, adapter);
2444 if (err) {
2445 e_err(probe, "request_irq for msix_other failed: %d\n", err);
2446 goto free_queue_irqs;
2447 }
2448
2449 return 0;
2450
2451free_queue_irqs:
2452 while (vector) {
2453 vector--;
2454 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2455 NULL);
2456 free_irq(adapter->msix_entries[vector].vector,
2457 adapter->q_vector[vector]);
2458 }
2459 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2460 pci_disable_msix(adapter->pdev);
2461 kfree(adapter->msix_entries);
2462 adapter->msix_entries = NULL;
2463 return err;
2464}
2465
2466
2467
2468
2469
2470
2471static irqreturn_t ixgbe_intr(int irq, void *data)
2472{
2473 struct ixgbe_adapter *adapter = data;
2474 struct ixgbe_hw *hw = &adapter->hw;
2475 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2476 u32 eicr;
2477
2478
2479
2480
2481
2482 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2483
2484
2485
2486 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2487 if (!eicr) {
2488
2489
2490
2491
2492
2493
2494
2495 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2496 ixgbe_irq_enable(adapter, true, true);
2497 return IRQ_NONE;
2498 }
2499
2500 if (eicr & IXGBE_EICR_LSC)
2501 ixgbe_check_lsc(adapter);
2502
2503 switch (hw->mac.type) {
2504 case ixgbe_mac_82599EB:
2505 ixgbe_check_sfp_event(adapter, eicr);
2506
2507 case ixgbe_mac_X540:
2508 if (eicr & IXGBE_EICR_ECC)
2509 e_info(link, "Received unrecoverable ECC err, please "
2510 "reboot\n");
2511 ixgbe_check_overtemp_event(adapter, eicr);
2512 break;
2513 default:
2514 break;
2515 }
2516
2517 ixgbe_check_fan_failure(adapter, eicr);
2518#ifdef CONFIG_IXGBE_PTP
2519 ixgbe_ptp_check_pps_event(adapter, eicr);
2520#endif
2521
2522
2523 napi_schedule(&q_vector->napi);
2524
2525
2526
2527
2528
2529 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2530 ixgbe_irq_enable(adapter, false, false);
2531
2532 return IRQ_HANDLED;
2533}
2534
2535
2536
2537
2538
2539
2540
2541
2542static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2543{
2544 struct net_device *netdev = adapter->netdev;
2545 int err;
2546
2547 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2548 err = ixgbe_request_msix_irqs(adapter);
2549 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2550 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2551 netdev->name, adapter);
2552 else
2553 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2554 netdev->name, adapter);
2555
2556 if (err)
2557 e_err(probe, "request_irq failed, Error %d\n", err);
2558
2559 return err;
2560}
2561
2562static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2563{
2564 int vector;
2565
2566 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2567 free_irq(adapter->pdev->irq, adapter);
2568 return;
2569 }
2570
2571 for (vector = 0; vector < adapter->num_q_vectors; vector++) {
2572 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2573 struct msix_entry *entry = &adapter->msix_entries[vector];
2574
2575
2576 if (!q_vector->rx.ring && !q_vector->tx.ring)
2577 continue;
2578
2579
2580 irq_set_affinity_hint(entry->vector, NULL);
2581
2582 free_irq(entry->vector, q_vector);
2583 }
2584
2585 free_irq(adapter->msix_entries[vector++].vector, adapter);
2586}
2587
2588
2589
2590
2591
2592static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2593{
2594 switch (adapter->hw.mac.type) {
2595 case ixgbe_mac_82598EB:
2596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2597 break;
2598 case ixgbe_mac_82599EB:
2599 case ixgbe_mac_X540:
2600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2602 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2603 break;
2604 default:
2605 break;
2606 }
2607 IXGBE_WRITE_FLUSH(&adapter->hw);
2608 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2609 int vector;
2610
2611 for (vector = 0; vector < adapter->num_q_vectors; vector++)
2612 synchronize_irq(adapter->msix_entries[vector].vector);
2613
2614 synchronize_irq(adapter->msix_entries[vector++].vector);
2615 } else {
2616 synchronize_irq(adapter->pdev->irq);
2617 }
2618}
2619
2620
2621
2622
2623
2624static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2625{
2626 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2627
2628
2629 if (adapter->rx_itr_setting == 1)
2630 q_vector->itr = IXGBE_20K_ITR;
2631 else
2632 q_vector->itr = adapter->rx_itr_setting;
2633
2634 ixgbe_write_eitr(q_vector);
2635
2636 ixgbe_set_ivar(adapter, 0, 0, 0);
2637 ixgbe_set_ivar(adapter, 1, 0, 0);
2638
2639 e_info(hw, "Legacy interrupt IVAR setup done\n");
2640}
2641
2642
2643
2644
2645
2646
2647
2648
2649void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2650 struct ixgbe_ring *ring)
2651{
2652 struct ixgbe_hw *hw = &adapter->hw;
2653 u64 tdba = ring->dma;
2654 int wait_loop = 10;
2655 u32 txdctl = IXGBE_TXDCTL_ENABLE;
2656 u8 reg_idx = ring->reg_idx;
2657
2658
2659 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
2660 IXGBE_WRITE_FLUSH(hw);
2661
2662 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2663 (tdba & DMA_BIT_MASK(32)));
2664 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2665 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2666 ring->count * sizeof(union ixgbe_adv_tx_desc));
2667 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2668 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2669 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679 if (!ring->q_vector || (ring->q_vector->itr < 8))
2680 txdctl |= (1 << 16);
2681 else
2682 txdctl |= (8 << 16);
2683
2684
2685
2686
2687
2688 txdctl |= (1 << 8) |
2689 32;
2690
2691
2692 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2693 ring->atr_sample_rate = adapter->atr_sample_rate;
2694 ring->atr_count = 0;
2695 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2696 } else {
2697 ring->atr_sample_rate = 0;
2698 }
2699
2700 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2701
2702
2703 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2704
2705
2706 if (hw->mac.type == ixgbe_mac_82598EB &&
2707 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2708 return;
2709
2710
2711 do {
2712 usleep_range(1000, 2000);
2713 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2714 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2715 if (!wait_loop)
2716 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2717}
2718
2719static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2720{
2721 struct ixgbe_hw *hw = &adapter->hw;
2722 u32 rttdcs, mtqc;
2723 u8 tcs = netdev_get_num_tc(adapter->netdev);
2724
2725 if (hw->mac.type == ixgbe_mac_82598EB)
2726 return;
2727
2728
2729 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2730 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2731 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2732
2733
2734 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2735 mtqc = IXGBE_MTQC_VT_ENA;
2736 if (tcs > 4)
2737 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2738 else if (tcs > 1)
2739 mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2740 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
2741 mtqc |= IXGBE_MTQC_32VF;
2742 else
2743 mtqc |= IXGBE_MTQC_64VF;
2744 } else {
2745 if (tcs > 4)
2746 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2747 else if (tcs > 1)
2748 mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2749 else
2750 mtqc = IXGBE_MTQC_64Q_1PB;
2751 }
2752
2753 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
2754
2755
2756 if (tcs) {
2757 u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2758 sectx |= IXGBE_SECTX_DCB;
2759 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
2760 }
2761
2762
2763 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2764 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2765}
2766
2767
2768
2769
2770
2771
2772
2773static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2774{
2775 struct ixgbe_hw *hw = &adapter->hw;
2776 u32 dmatxctl;
2777 u32 i;
2778
2779 ixgbe_setup_mtqc(adapter);
2780
2781 if (hw->mac.type != ixgbe_mac_82598EB) {
2782
2783 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2784 dmatxctl |= IXGBE_DMATXCTL_TE;
2785 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2786 }
2787
2788
2789 for (i = 0; i < adapter->num_tx_queues; i++)
2790 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2791}
2792
2793static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
2794 struct ixgbe_ring *ring)
2795{
2796 struct ixgbe_hw *hw = &adapter->hw;
2797 u8 reg_idx = ring->reg_idx;
2798 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2799
2800 srrctl |= IXGBE_SRRCTL_DROP_EN;
2801
2802 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2803}
2804
2805static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
2806 struct ixgbe_ring *ring)
2807{
2808 struct ixgbe_hw *hw = &adapter->hw;
2809 u8 reg_idx = ring->reg_idx;
2810 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
2811
2812 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2813
2814 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2815}
2816
2817#ifdef CONFIG_IXGBE_DCB
2818void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2819#else
2820static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
2821#endif
2822{
2823 int i;
2824 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
2825
2826 if (adapter->ixgbe_ieee_pfc)
2827 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838 if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
2839 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
2840 for (i = 0; i < adapter->num_rx_queues; i++)
2841 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
2842 } else {
2843 for (i = 0; i < adapter->num_rx_queues; i++)
2844 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
2845 }
2846}
2847
2848#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2849
2850static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2851 struct ixgbe_ring *rx_ring)
2852{
2853 struct ixgbe_hw *hw = &adapter->hw;
2854 u32 srrctl;
2855 u8 reg_idx = rx_ring->reg_idx;
2856
2857 if (hw->mac.type == ixgbe_mac_82598EB) {
2858 u16 mask = adapter->ring_feature[RING_F_RSS].mask;
2859
2860
2861
2862
2863
2864 reg_idx &= mask;
2865 }
2866
2867
2868 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
2869
2870
2871#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
2872 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2873#else
2874 srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2875#endif
2876
2877
2878 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2879
2880 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
2881}
2882
2883static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2884{
2885 struct ixgbe_hw *hw = &adapter->hw;
2886 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2887 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2888 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2889 u32 mrqc = 0, reta = 0;
2890 u32 rxcsum;
2891 int i, j;
2892 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2893
2894
2895
2896
2897
2898
2899 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
2900 rss_i = 2;
2901
2902
2903 for (i = 0; i < 10; i++)
2904 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2905
2906
2907 for (i = 0, j = 0; i < 128; i++, j++) {
2908 if (j == rss_i)
2909 j = 0;
2910
2911
2912 reta = (reta << 8) | (j * 0x11);
2913 if ((i & 3) == 3)
2914 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2915 }
2916
2917
2918 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2919 rxcsum |= IXGBE_RXCSUM_PCSD;
2920 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2921
2922 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2923 if (adapter->ring_feature[RING_F_RSS].mask)
2924 mrqc = IXGBE_MRQC_RSSEN;
2925 } else {
2926 u8 tcs = netdev_get_num_tc(adapter->netdev);
2927
2928 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2929 if (tcs > 4)
2930 mrqc = IXGBE_MRQC_VMDQRT8TCEN;
2931 else if (tcs > 1)
2932 mrqc = IXGBE_MRQC_VMDQRT4TCEN;
2933 else if (adapter->ring_feature[RING_F_RSS].indices == 4)
2934 mrqc = IXGBE_MRQC_VMDQRSS32EN;
2935 else
2936 mrqc = IXGBE_MRQC_VMDQRSS64EN;
2937 } else {
2938 if (tcs > 4)
2939 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2940 else if (tcs > 1)
2941 mrqc = IXGBE_MRQC_RTRSS4TCEN;
2942 else
2943 mrqc = IXGBE_MRQC_RSSEN;
2944 }
2945 }
2946
2947
2948 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
2949 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2950 IXGBE_MRQC_RSS_FIELD_IPV6 |
2951 IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2952
2953 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2954 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2955 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2956 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2957
2958 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2959}
2960
2961
2962
2963
2964
2965
2966static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2967 struct ixgbe_ring *ring)
2968{
2969 struct ixgbe_hw *hw = &adapter->hw;
2970 u32 rscctrl;
2971 u8 reg_idx = ring->reg_idx;
2972
2973 if (!ring_is_rsc_enabled(ring))
2974 return;
2975
2976 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2977 rscctrl |= IXGBE_RSCCTL_RSCEN;
2978
2979
2980
2981
2982
2983#if (PAGE_SIZE <= 8192)
2984 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2985#elif (PAGE_SIZE <= 16384)
2986 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2987#else
2988 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2989#endif
2990 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2991}
2992
2993#define IXGBE_MAX_RX_DESC_POLL 10
2994static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2995 struct ixgbe_ring *ring)
2996{
2997 struct ixgbe_hw *hw = &adapter->hw;
2998 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2999 u32 rxdctl;
3000 u8 reg_idx = ring->reg_idx;
3001
3002
3003 if (hw->mac.type == ixgbe_mac_82598EB &&
3004 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3005 return;
3006
3007 do {
3008 usleep_range(1000, 2000);
3009 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3010 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3011
3012 if (!wait_loop) {
3013 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3014 "the polling period\n", reg_idx);
3015 }
3016}
3017
3018void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3019 struct ixgbe_ring *ring)
3020{
3021 struct ixgbe_hw *hw = &adapter->hw;
3022 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3023 u32 rxdctl;
3024 u8 reg_idx = ring->reg_idx;
3025
3026 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3027 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3028
3029
3030 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3031
3032 if (hw->mac.type == ixgbe_mac_82598EB &&
3033 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3034 return;
3035
3036
3037 do {
3038 udelay(10);
3039 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3040 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3041
3042 if (!wait_loop) {
3043 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3044 "the polling period\n", reg_idx);
3045 }
3046}
3047
3048void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3049 struct ixgbe_ring *ring)
3050{
3051 struct ixgbe_hw *hw = &adapter->hw;
3052 u64 rdba = ring->dma;
3053 u32 rxdctl;
3054 u8 reg_idx = ring->reg_idx;
3055
3056
3057 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3058 ixgbe_disable_rx_queue(adapter, ring);
3059
3060 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3061 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3062 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3063 ring->count * sizeof(union ixgbe_adv_rx_desc));
3064 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3065 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
3066 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
3067
3068 ixgbe_configure_srrctl(adapter, ring);
3069 ixgbe_configure_rscctl(adapter, ring);
3070
3071
3072 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3073 hw->mac.type == ixgbe_mac_X540) {
3074 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3075 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3076 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3077 }
3078
3079 if (hw->mac.type == ixgbe_mac_82598EB) {
3080
3081
3082
3083
3084
3085
3086
3087 rxdctl &= ~0x3FFFFF;
3088 rxdctl |= 0x080420;
3089 }
3090
3091
3092 rxdctl |= IXGBE_RXDCTL_ENABLE;
3093 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3094
3095 ixgbe_rx_desc_queue_enable(adapter, ring);
3096 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
3097}
3098
3099static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3100{
3101 struct ixgbe_hw *hw = &adapter->hw;
3102 int rss_i = adapter->ring_feature[RING_F_RSS].indices;
3103 int p;
3104
3105
3106 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3107 IXGBE_PSRTYPE_UDPHDR |
3108 IXGBE_PSRTYPE_IPV4HDR |
3109 IXGBE_PSRTYPE_L2HDR |
3110 IXGBE_PSRTYPE_IPV6HDR;
3111
3112 if (hw->mac.type == ixgbe_mac_82598EB)
3113 return;
3114
3115 if (rss_i > 3)
3116 psrtype |= 2 << 29;
3117 else if (rss_i > 1)
3118 psrtype |= 1 << 29;
3119
3120 for (p = 0; p < adapter->num_rx_pools; p++)
3121 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
3122 psrtype);
3123}
3124
3125static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3126{
3127 struct ixgbe_hw *hw = &adapter->hw;
3128 u32 reg_offset, vf_shift;
3129 u32 gcr_ext, vmdctl;
3130 int i;
3131
3132 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3133 return;
3134
3135 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3136 vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
3137 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
3138 vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
3139 vmdctl |= IXGBE_VT_CTL_REPLEN;
3140 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
3141
3142 vf_shift = VMDQ_P(0) % 32;
3143 reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
3144
3145
3146 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
3147 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3148 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3149 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3150 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3151
3152
3153 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
3154
3155
3156
3157
3158
3159 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3160 case IXGBE_82599_VMDQ_8Q_MASK:
3161 gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
3162 break;
3163 case IXGBE_82599_VMDQ_4Q_MASK:
3164 gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
3165 break;
3166 default:
3167 gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
3168 break;
3169 }
3170
3171 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3172
3173
3174 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3175
3176
3177 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3178 adapter->num_vfs);
3179
3180 for (i = 0; i < adapter->num_vfs; i++) {
3181 if (!adapter->vfinfo[i].spoofchk_enabled)
3182 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
3183 }
3184}
3185
3186static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3187{
3188 struct ixgbe_hw *hw = &adapter->hw;
3189 struct net_device *netdev = adapter->netdev;
3190 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3191 struct ixgbe_ring *rx_ring;
3192 int i;
3193 u32 mhadd, hlreg0;
3194
3195#ifdef IXGBE_FCOE
3196
3197 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3198 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3199 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3200
3201#endif
3202 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3203 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3204 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3205 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3206
3207 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3208 }
3209
3210
3211 max_frame += VLAN_HLEN;
3212
3213 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3214
3215 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3216 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3217
3218
3219
3220
3221
3222 for (i = 0; i < adapter->num_rx_queues; i++) {
3223 rx_ring = adapter->rx_ring[i];
3224 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3225 set_ring_rsc_enabled(rx_ring);
3226 else
3227 clear_ring_rsc_enabled(rx_ring);
3228 }
3229}
3230
3231static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3232{
3233 struct ixgbe_hw *hw = &adapter->hw;
3234 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3235
3236 switch (hw->mac.type) {
3237 case ixgbe_mac_82598EB:
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3249 break;
3250 case ixgbe_mac_82599EB:
3251 case ixgbe_mac_X540:
3252
3253 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3254 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3255 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3256
3257 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3258 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3259 break;
3260 default:
3261
3262 return;
3263 }
3264
3265 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3266}
3267
3268
3269
3270
3271
3272
3273
3274static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3275{
3276 struct ixgbe_hw *hw = &adapter->hw;
3277 int i;
3278 u32 rxctrl;
3279
3280
3281 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3282 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3283
3284 ixgbe_setup_psrtype(adapter);
3285 ixgbe_setup_rdrxctl(adapter);
3286
3287
3288 ixgbe_setup_mrqc(adapter);
3289
3290
3291 ixgbe_set_rx_buffer_len(adapter);
3292
3293
3294
3295
3296
3297 for (i = 0; i < adapter->num_rx_queues; i++)
3298 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3299
3300
3301 if (hw->mac.type == ixgbe_mac_82598EB)
3302 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3303
3304
3305 rxctrl |= IXGBE_RXCTRL_RXEN;
3306 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3307}
3308
3309static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3310{
3311 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3312 struct ixgbe_hw *hw = &adapter->hw;
3313
3314
3315 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
3316 set_bit(vid, adapter->active_vlans);
3317
3318 return 0;
3319}
3320
3321static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3322{
3323 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3324 struct ixgbe_hw *hw = &adapter->hw;
3325
3326
3327 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
3328 clear_bit(vid, adapter->active_vlans);
3329
3330 return 0;
3331}
3332
3333
3334
3335
3336
3337static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3338{
3339 struct ixgbe_hw *hw = &adapter->hw;
3340 u32 vlnctrl;
3341
3342 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3343 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3344 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3345}
3346
3347
3348
3349
3350
3351static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3352{
3353 struct ixgbe_hw *hw = &adapter->hw;
3354 u32 vlnctrl;
3355
3356 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3357 vlnctrl |= IXGBE_VLNCTRL_VFE;
3358 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3359 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3360}
3361
3362
3363
3364
3365
3366static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3367{
3368 struct ixgbe_hw *hw = &adapter->hw;
3369 u32 vlnctrl;
3370 int i, j;
3371
3372 switch (hw->mac.type) {
3373 case ixgbe_mac_82598EB:
3374 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3375 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3376 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3377 break;
3378 case ixgbe_mac_82599EB:
3379 case ixgbe_mac_X540:
3380 for (i = 0; i < adapter->num_rx_queues; i++) {
3381 j = adapter->rx_ring[i]->reg_idx;
3382 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3383 vlnctrl &= ~IXGBE_RXDCTL_VME;
3384 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3385 }
3386 break;
3387 default:
3388 break;
3389 }
3390}
3391
3392
3393
3394
3395
3396static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3397{
3398 struct ixgbe_hw *hw = &adapter->hw;
3399 u32 vlnctrl;
3400 int i, j;
3401
3402 switch (hw->mac.type) {
3403 case ixgbe_mac_82598EB:
3404 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3405 vlnctrl |= IXGBE_VLNCTRL_VME;
3406 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3407 break;
3408 case ixgbe_mac_82599EB:
3409 case ixgbe_mac_X540:
3410 for (i = 0; i < adapter->num_rx_queues; i++) {
3411 j = adapter->rx_ring[i]->reg_idx;
3412 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3413 vlnctrl |= IXGBE_RXDCTL_VME;
3414 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3415 }
3416 break;
3417 default:
3418 break;
3419 }
3420}
3421
3422static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3423{
3424 u16 vid;
3425
3426 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3427
3428 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3429 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
3430}
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3442{
3443 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3444 struct ixgbe_hw *hw = &adapter->hw;
3445 unsigned int rar_entries = hw->mac.num_rar_entries - 1;
3446 int count = 0;
3447
3448
3449 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3450 rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
3451
3452
3453 if (netdev_uc_count(netdev) > rar_entries)
3454 return -ENOMEM;
3455
3456 if (!netdev_uc_empty(netdev)) {
3457 struct netdev_hw_addr *ha;
3458
3459 if (!hw->mac.ops.set_rar)
3460 return -ENOMEM;
3461
3462 netdev_for_each_uc_addr(ha, netdev) {
3463 if (!rar_entries)
3464 break;
3465 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3466 VMDQ_P(0), IXGBE_RAH_AV);
3467 count++;
3468 }
3469 }
3470
3471 for (; rar_entries > 0 ; rar_entries--)
3472 hw->mac.ops.clear_rar(hw, rar_entries);
3473
3474 return count;
3475}
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486void ixgbe_set_rx_mode(struct net_device *netdev)
3487{
3488 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3489 struct ixgbe_hw *hw = &adapter->hw;
3490 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3491 int count;
3492
3493
3494
3495 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3496
3497
3498 fctrl &= ~IXGBE_FCTRL_SBP;
3499 fctrl |= IXGBE_FCTRL_BAM;
3500 fctrl |= IXGBE_FCTRL_DPF;
3501 fctrl |= IXGBE_FCTRL_PMCF;
3502
3503
3504 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3505
3506 if (netdev->flags & IFF_PROMISC) {
3507 hw->addr_ctrl.user_set_promisc = true;
3508 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3509 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3510
3511 ixgbe_vlan_filter_disable(adapter);
3512 } else {
3513 if (netdev->flags & IFF_ALLMULTI) {
3514 fctrl |= IXGBE_FCTRL_MPE;
3515 vmolr |= IXGBE_VMOLR_MPE;
3516 } else {
3517
3518
3519
3520
3521
3522 hw->mac.ops.update_mc_addr_list(hw, netdev);
3523 vmolr |= IXGBE_VMOLR_ROMPE;
3524 }
3525 ixgbe_vlan_filter_enable(adapter);
3526 hw->addr_ctrl.user_set_promisc = false;
3527 }
3528
3529
3530
3531
3532
3533
3534 count = ixgbe_write_uc_addr_list(netdev);
3535 if (count < 0) {
3536 fctrl |= IXGBE_FCTRL_UPE;
3537 vmolr |= IXGBE_VMOLR_ROPE;
3538 }
3539
3540 if (adapter->num_vfs)
3541 ixgbe_restore_vf_multicasts(adapter);
3542
3543 if (hw->mac.type != ixgbe_mac_82598EB) {
3544 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
3545 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3546 IXGBE_VMOLR_ROPE);
3547 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
3548 }
3549
3550
3551 if (adapter->netdev->features & NETIF_F_RXALL) {
3552
3553
3554 fctrl |= (IXGBE_FCTRL_SBP |
3555 IXGBE_FCTRL_BAM |
3556 IXGBE_FCTRL_PMCF);
3557
3558 fctrl &= ~(IXGBE_FCTRL_DPF);
3559
3560 }
3561
3562 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3563
3564 if (netdev->features & NETIF_F_HW_VLAN_RX)
3565 ixgbe_vlan_strip_enable(adapter);
3566 else
3567 ixgbe_vlan_strip_disable(adapter);
3568}
3569
3570static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3571{
3572 int q_idx;
3573
3574 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
3575 napi_enable(&adapter->q_vector[q_idx]->napi);
3576}
3577
3578static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3579{
3580 int q_idx;
3581
3582 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
3583 napi_disable(&adapter->q_vector[q_idx]->napi);
3584}
3585
3586#ifdef CONFIG_IXGBE_DCB
3587
3588
3589
3590
3591
3592
3593
3594
3595static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3596{
3597 struct ixgbe_hw *hw = &adapter->hw;
3598 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3599
3600 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3601 if (hw->mac.type == ixgbe_mac_82598EB)
3602 netif_set_gso_max_size(adapter->netdev, 65536);
3603 return;
3604 }
3605
3606 if (hw->mac.type == ixgbe_mac_82598EB)
3607 netif_set_gso_max_size(adapter->netdev, 32768);
3608
3609 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3610
3611#ifdef IXGBE_FCOE
3612 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3613 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3614#endif
3615
3616
3617 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
3618 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3619 DCB_TX_CONFIG);
3620 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3621 DCB_RX_CONFIG);
3622 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3623 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
3624 ixgbe_dcb_hw_ets(&adapter->hw,
3625 adapter->ixgbe_ieee_ets,
3626 max_frame);
3627 ixgbe_dcb_hw_pfc_config(&adapter->hw,
3628 adapter->ixgbe_ieee_pfc->pfc_en,
3629 adapter->ixgbe_ieee_ets->prio_tc);
3630 }
3631
3632
3633 if (hw->mac.type != ixgbe_mac_82598EB) {
3634 u32 msb = 0;
3635 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
3636
3637 while (rss_i) {
3638 msb++;
3639 rss_i >>= 1;
3640 }
3641
3642
3643 IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
3644 }
3645}
3646#endif
3647
3648
3649#define IXGBE_ETH_FRAMING 20
3650
3651
3652
3653
3654
3655
3656
3657static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
3658{
3659 struct ixgbe_hw *hw = &adapter->hw;
3660 struct net_device *dev = adapter->netdev;
3661 int link, tc, kb, marker;
3662 u32 dv_id, rx_pba;
3663
3664
3665 tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
3666
3667#ifdef IXGBE_FCOE
3668
3669 if ((dev->features & NETIF_F_FCOE_MTU) &&
3670 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
3671 (pb == ixgbe_fcoe_get_tc(adapter)))
3672 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3673
3674#endif
3675
3676 switch (hw->mac.type) {
3677 case ixgbe_mac_X540:
3678 dv_id = IXGBE_DV_X540(link, tc);
3679 break;
3680 default:
3681 dv_id = IXGBE_DV(link, tc);
3682 break;
3683 }
3684
3685
3686 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3687 dv_id += IXGBE_B2BT(tc);
3688
3689
3690 kb = IXGBE_BT2KB(dv_id);
3691 rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
3692
3693 marker = rx_pba - kb;
3694
3695
3696
3697
3698
3699 if (marker < 0) {
3700 e_warn(drv, "Packet Buffer(%i) can not provide enough"
3701 "headroom to support flow control."
3702 "Decrease MTU or number of traffic classes\n", pb);
3703 marker = tc + 1;
3704 }
3705
3706 return marker;
3707}
3708
3709
3710
3711
3712
3713
3714
3715static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
3716{
3717 struct ixgbe_hw *hw = &adapter->hw;
3718 struct net_device *dev = adapter->netdev;
3719 int tc;
3720 u32 dv_id;
3721
3722
3723 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
3724
3725
3726 switch (hw->mac.type) {
3727 case ixgbe_mac_X540:
3728 dv_id = IXGBE_LOW_DV_X540(tc);
3729 break;
3730 default:
3731 dv_id = IXGBE_LOW_DV(tc);
3732 break;
3733 }
3734
3735
3736 return IXGBE_BT2KB(dv_id);
3737}
3738
3739
3740
3741
3742static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
3743{
3744 struct ixgbe_hw *hw = &adapter->hw;
3745 int num_tc = netdev_get_num_tc(adapter->netdev);
3746 int i;
3747
3748 if (!num_tc)
3749 num_tc = 1;
3750
3751 hw->fc.low_water = ixgbe_lpbthresh(adapter);
3752
3753 for (i = 0; i < num_tc; i++) {
3754 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
3755
3756
3757 if (hw->fc.low_water > hw->fc.high_water[i])
3758 hw->fc.low_water = 0;
3759 }
3760}
3761
3762static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
3763{
3764 struct ixgbe_hw *hw = &adapter->hw;
3765 int hdrm;
3766 u8 tc = netdev_get_num_tc(adapter->netdev);
3767
3768 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3769 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3770 hdrm = 32 << adapter->fdir_pballoc;
3771 else
3772 hdrm = 0;
3773
3774 hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
3775 ixgbe_pbthresh_setup(adapter);
3776}
3777
3778static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
3779{
3780 struct ixgbe_hw *hw = &adapter->hw;
3781 struct hlist_node *node, *node2;
3782 struct ixgbe_fdir_filter *filter;
3783
3784 spin_lock(&adapter->fdir_perfect_lock);
3785
3786 if (!hlist_empty(&adapter->fdir_filter_list))
3787 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
3788
3789 hlist_for_each_entry_safe(filter, node, node2,
3790 &adapter->fdir_filter_list, fdir_node) {
3791 ixgbe_fdir_write_perfect_filter_82599(hw,
3792 &filter->filter,
3793 filter->sw_idx,
3794 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
3795 IXGBE_FDIR_DROP_QUEUE :
3796 adapter->rx_ring[filter->action]->reg_idx);
3797 }
3798
3799 spin_unlock(&adapter->fdir_perfect_lock);
3800}
3801
3802static void ixgbe_configure(struct ixgbe_adapter *adapter)
3803{
3804 struct ixgbe_hw *hw = &adapter->hw;
3805
3806 ixgbe_configure_pb(adapter);
3807#ifdef CONFIG_IXGBE_DCB
3808 ixgbe_configure_dcb(adapter);
3809#endif
3810
3811 ixgbe_set_rx_mode(adapter->netdev);
3812 ixgbe_restore_vlan(adapter);
3813
3814 switch (hw->mac.type) {
3815 case ixgbe_mac_82599EB:
3816 case ixgbe_mac_X540:
3817 hw->mac.ops.disable_rx_buff(hw);
3818 break;
3819 default:
3820 break;
3821 }
3822
3823 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3824 ixgbe_init_fdir_signature_82599(&adapter->hw,
3825 adapter->fdir_pballoc);
3826 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3827 ixgbe_init_fdir_perfect_82599(&adapter->hw,
3828 adapter->fdir_pballoc);
3829 ixgbe_fdir_filter_restore(adapter);
3830 }
3831
3832 switch (hw->mac.type) {
3833 case ixgbe_mac_82599EB:
3834 case ixgbe_mac_X540:
3835 hw->mac.ops.enable_rx_buff(hw);
3836 break;
3837 default:
3838 break;
3839 }
3840
3841 ixgbe_configure_virtualization(adapter);
3842
3843#ifdef IXGBE_FCOE
3844
3845 ixgbe_configure_fcoe(adapter);
3846
3847#endif
3848 ixgbe_configure_tx(adapter);
3849 ixgbe_configure_rx(adapter);
3850}
3851
3852static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3853{
3854 switch (hw->phy.type) {
3855 case ixgbe_phy_sfp_avago:
3856 case ixgbe_phy_sfp_ftl:
3857 case ixgbe_phy_sfp_intel:
3858 case ixgbe_phy_sfp_unknown:
3859 case ixgbe_phy_sfp_passive_tyco:
3860 case ixgbe_phy_sfp_passive_unknown:
3861 case ixgbe_phy_sfp_active_unknown:
3862 case ixgbe_phy_sfp_ftl_active:
3863 return true;
3864 case ixgbe_phy_nl:
3865 if (hw->mac.type == ixgbe_mac_82598EB)
3866 return true;
3867 default:
3868 return false;
3869 }
3870}
3871
3872
3873
3874
3875
3876static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3877{
3878
3879
3880
3881
3882
3883
3884 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3885 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
3886
3887 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
3888}
3889
3890
3891
3892
3893
3894
3895
3896static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3897{
3898 u32 autoneg;
3899 bool negotiation, link_up = false;
3900 u32 ret = IXGBE_ERR_LINK_SETUP;
3901
3902 if (hw->mac.ops.check_link)
3903 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3904
3905 if (ret)
3906 goto link_cfg_out;
3907
3908 autoneg = hw->phy.autoneg_advertised;
3909 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3910 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3911 &negotiation);
3912 if (ret)
3913 goto link_cfg_out;
3914
3915 if (hw->mac.ops.setup_link)
3916 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
3917link_cfg_out:
3918 return ret;
3919}
3920
3921static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3922{
3923 struct ixgbe_hw *hw = &adapter->hw;
3924 u32 gpie = 0;
3925
3926 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3927 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3928 IXGBE_GPIE_OCD;
3929 gpie |= IXGBE_GPIE_EIAME;
3930
3931
3932
3933
3934 switch (hw->mac.type) {
3935 case ixgbe_mac_82598EB:
3936 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3937 break;
3938 case ixgbe_mac_82599EB:
3939 case ixgbe_mac_X540:
3940 default:
3941 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3942 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3943 break;
3944 }
3945 } else {
3946
3947
3948 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3949 }
3950
3951
3952
3953
3954 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3955 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3956
3957 switch (adapter->ring_feature[RING_F_VMDQ].mask) {
3958 case IXGBE_82599_VMDQ_8Q_MASK:
3959 gpie |= IXGBE_GPIE_VTMODE_16;
3960 break;
3961 case IXGBE_82599_VMDQ_4Q_MASK:
3962 gpie |= IXGBE_GPIE_VTMODE_32;
3963 break;
3964 default:
3965 gpie |= IXGBE_GPIE_VTMODE_64;
3966 break;
3967 }
3968 }
3969
3970
3971 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
3972 switch (adapter->hw.mac.type) {
3973 case ixgbe_mac_82599EB:
3974 gpie |= IXGBE_SDP0_GPIEN;
3975 break;
3976 case ixgbe_mac_X540:
3977 gpie |= IXGBE_EIMS_TS;
3978 break;
3979 default:
3980 break;
3981 }
3982 }
3983
3984
3985 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3986 gpie |= IXGBE_SDP1_GPIEN;
3987
3988 if (hw->mac.type == ixgbe_mac_82599EB) {
3989 gpie |= IXGBE_SDP1_GPIEN;
3990 gpie |= IXGBE_SDP2_GPIEN;
3991 }
3992
3993 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3994}
3995
3996static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
3997{
3998 struct ixgbe_hw *hw = &adapter->hw;
3999 int err;
4000 u32 ctrl_ext;
4001
4002 ixgbe_get_hw_control(adapter);
4003 ixgbe_setup_gpie(adapter);
4004
4005 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4006 ixgbe_configure_msix(adapter);
4007 else
4008 ixgbe_configure_msi_and_legacy(adapter);
4009
4010
4011 if (hw->mac.ops.enable_tx_laser &&
4012 ((hw->phy.multispeed_fiber) ||
4013 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4014 (hw->mac.type == ixgbe_mac_82599EB))))
4015 hw->mac.ops.enable_tx_laser(hw);
4016
4017 clear_bit(__IXGBE_DOWN, &adapter->state);
4018 ixgbe_napi_enable_all(adapter);
4019
4020 if (ixgbe_is_sfp(hw)) {
4021 ixgbe_sfp_link_config(adapter);
4022 } else {
4023 err = ixgbe_non_sfp_link_config(hw);
4024 if (err)
4025 e_err(probe, "link_config FAILED %d\n", err);
4026 }
4027
4028
4029 IXGBE_READ_REG(hw, IXGBE_EICR);
4030 ixgbe_irq_enable(adapter, true, true);
4031
4032
4033
4034
4035
4036 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
4037 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
4038 if (esdp & IXGBE_ESDP_SDP1)
4039 e_crit(drv, "Fan has stopped, replace the adapter\n");
4040 }
4041
4042
4043 netif_tx_start_all_queues(adapter->netdev);
4044
4045
4046
4047 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4048 adapter->link_check_timeout = jiffies;
4049 mod_timer(&adapter->service_timer, jiffies);
4050
4051
4052 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4053 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4054 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4055}
4056
4057void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
4058{
4059 WARN_ON(in_interrupt());
4060
4061 adapter->netdev->trans_start = jiffies;
4062
4063 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
4064 usleep_range(1000, 2000);
4065 ixgbe_down(adapter);
4066
4067
4068
4069
4070
4071
4072 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4073 msleep(2000);
4074 ixgbe_up(adapter);
4075 clear_bit(__IXGBE_RESETTING, &adapter->state);
4076}
4077
4078void ixgbe_up(struct ixgbe_adapter *adapter)
4079{
4080
4081 ixgbe_configure(adapter);
4082
4083 ixgbe_up_complete(adapter);
4084}
4085
4086void ixgbe_reset(struct ixgbe_adapter *adapter)
4087{
4088 struct ixgbe_hw *hw = &adapter->hw;
4089 int err;
4090
4091
4092 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4093 usleep_range(1000, 2000);
4094
4095
4096 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4097 IXGBE_FLAG2_SFP_NEEDS_RESET);
4098 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4099
4100 err = hw->mac.ops.init_hw(hw);
4101 switch (err) {
4102 case 0:
4103 case IXGBE_ERR_SFP_NOT_PRESENT:
4104 case IXGBE_ERR_SFP_NOT_SUPPORTED:
4105 break;
4106 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
4107 e_dev_err("master disable timed out\n");
4108 break;
4109 case IXGBE_ERR_EEPROM_VERSION:
4110
4111 e_dev_warn("This device is a pre-production adapter/LOM. "
4112 "Please be aware there may be issues associated with "
4113 "your hardware. If you are experiencing problems "
4114 "please contact your Intel or hardware "
4115 "representative who provided you with this "
4116 "hardware.\n");
4117 break;
4118 default:
4119 e_dev_err("Hardware Error: %d\n", err);
4120 }
4121
4122 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4123
4124
4125 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
4126
4127
4128 if (hw->mac.san_mac_rar_index)
4129 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4130}
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
4142{
4143 struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
4144 u16 i;
4145
4146 for (i = 0; i < rx_ring->count; i += 2) {
4147 rx_buffer[0].page_offset = 0;
4148 rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
4149 rx_buffer = &rx_buffer[2];
4150 }
4151}
4152
4153
4154
4155
4156
4157static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4158{
4159 struct device *dev = rx_ring->dev;
4160 unsigned long size;
4161 u16 i;
4162
4163
4164 if (!rx_ring->rx_buffer_info)
4165 return;
4166
4167
4168 for (i = 0; i < rx_ring->count; i++) {
4169 struct ixgbe_rx_buffer *rx_buffer;
4170
4171 rx_buffer = &rx_ring->rx_buffer_info[i];
4172 if (rx_buffer->skb) {
4173 struct sk_buff *skb = rx_buffer->skb;
4174 if (IXGBE_CB(skb)->page_released) {
4175 dma_unmap_page(dev,
4176 IXGBE_CB(skb)->dma,
4177 ixgbe_rx_bufsz(rx_ring),
4178 DMA_FROM_DEVICE);
4179 IXGBE_CB(skb)->page_released = false;
4180 }
4181 dev_kfree_skb(skb);
4182 }
4183 rx_buffer->skb = NULL;
4184 if (rx_buffer->dma)
4185 dma_unmap_page(dev, rx_buffer->dma,
4186 ixgbe_rx_pg_size(rx_ring),
4187 DMA_FROM_DEVICE);
4188 rx_buffer->dma = 0;
4189 if (rx_buffer->page)
4190 __free_pages(rx_buffer->page,
4191 ixgbe_rx_pg_order(rx_ring));
4192 rx_buffer->page = NULL;
4193 }
4194
4195 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4196 memset(rx_ring->rx_buffer_info, 0, size);
4197
4198 ixgbe_init_rx_page_offset(rx_ring);
4199
4200
4201 memset(rx_ring->desc, 0, rx_ring->size);
4202
4203 rx_ring->next_to_alloc = 0;
4204 rx_ring->next_to_clean = 0;
4205 rx_ring->next_to_use = 0;
4206}
4207
4208
4209
4210
4211
4212static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
4213{
4214 struct ixgbe_tx_buffer *tx_buffer_info;
4215 unsigned long size;
4216 u16 i;
4217
4218
4219 if (!tx_ring->tx_buffer_info)
4220 return;
4221
4222
4223 for (i = 0; i < tx_ring->count; i++) {
4224 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4225 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4226 }
4227
4228 netdev_tx_reset_queue(txring_txq(tx_ring));
4229
4230 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4231 memset(tx_ring->tx_buffer_info, 0, size);
4232
4233
4234 memset(tx_ring->desc, 0, tx_ring->size);
4235
4236 tx_ring->next_to_use = 0;
4237 tx_ring->next_to_clean = 0;
4238}
4239
4240
4241
4242
4243
4244static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
4245{
4246 int i;
4247
4248 for (i = 0; i < adapter->num_rx_queues; i++)
4249 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
4250}
4251
4252
4253
4254
4255
4256static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
4257{
4258 int i;
4259
4260 for (i = 0; i < adapter->num_tx_queues; i++)
4261 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
4262}
4263
4264static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4265{
4266 struct hlist_node *node, *node2;
4267 struct ixgbe_fdir_filter *filter;
4268
4269 spin_lock(&adapter->fdir_perfect_lock);
4270
4271 hlist_for_each_entry_safe(filter, node, node2,
4272 &adapter->fdir_filter_list, fdir_node) {
4273 hlist_del(&filter->fdir_node);
4274 kfree(filter);
4275 }
4276 adapter->fdir_filter_count = 0;
4277
4278 spin_unlock(&adapter->fdir_perfect_lock);
4279}
4280
4281void ixgbe_down(struct ixgbe_adapter *adapter)
4282{
4283 struct net_device *netdev = adapter->netdev;
4284 struct ixgbe_hw *hw = &adapter->hw;
4285 u32 rxctrl;
4286 int i;
4287
4288
4289 set_bit(__IXGBE_DOWN, &adapter->state);
4290
4291
4292 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4293 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4294
4295
4296 for (i = 0; i < adapter->num_rx_queues; i++)
4297
4298 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4299
4300 usleep_range(10000, 20000);
4301
4302 netif_tx_stop_all_queues(netdev);
4303
4304
4305 netif_carrier_off(netdev);
4306 netif_tx_disable(netdev);
4307
4308 ixgbe_irq_disable(adapter);
4309
4310 ixgbe_napi_disable_all(adapter);
4311
4312 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
4313 IXGBE_FLAG2_RESET_REQUESTED);
4314 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4315
4316 del_timer_sync(&adapter->service_timer);
4317
4318 if (adapter->num_vfs) {
4319
4320 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
4321
4322
4323 for (i = 0 ; i < adapter->num_vfs; i++)
4324 adapter->vfinfo[i].clear_to_send = false;
4325
4326
4327 ixgbe_ping_all_vfs(adapter);
4328
4329
4330 ixgbe_disable_tx_rx(adapter);
4331 }
4332
4333
4334 for (i = 0; i < adapter->num_tx_queues; i++) {
4335 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
4336 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
4337 }
4338
4339
4340 switch (hw->mac.type) {
4341 case ixgbe_mac_82599EB:
4342 case ixgbe_mac_X540:
4343 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
4344 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4345 ~IXGBE_DMATXCTL_TE));
4346 break;
4347 default:
4348 break;
4349 }
4350
4351 if (!pci_channel_offline(adapter->pdev))
4352 ixgbe_reset(adapter);
4353
4354
4355 if (hw->mac.ops.disable_tx_laser &&
4356 ((hw->phy.multispeed_fiber) ||
4357 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4358 (hw->mac.type == ixgbe_mac_82599EB))))
4359 hw->mac.ops.disable_tx_laser(hw);
4360
4361 ixgbe_clean_all_tx_rings(adapter);
4362 ixgbe_clean_all_rx_rings(adapter);
4363
4364#ifdef CONFIG_IXGBE_DCA
4365
4366 ixgbe_setup_dca(adapter);
4367#endif
4368}
4369
4370
4371
4372
4373
4374static void ixgbe_tx_timeout(struct net_device *netdev)
4375{
4376 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4377
4378
4379 ixgbe_tx_timeout_reset(adapter);
4380}
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4391{
4392 struct ixgbe_hw *hw = &adapter->hw;
4393 struct pci_dev *pdev = adapter->pdev;
4394 unsigned int rss;
4395#ifdef CONFIG_IXGBE_DCB
4396 int j;
4397 struct tc_configuration *tc;
4398#endif
4399
4400
4401
4402 hw->vendor_id = pdev->vendor;
4403 hw->device_id = pdev->device;
4404 hw->revision_id = pdev->revision;
4405 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4406 hw->subsystem_device_id = pdev->subsystem_device;
4407
4408
4409 rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
4410 adapter->ring_feature[RING_F_RSS].limit = rss;
4411 switch (hw->mac.type) {
4412 case ixgbe_mac_82598EB:
4413 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4414 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4415 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
4416 break;
4417 case ixgbe_mac_X540:
4418 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4419 case ixgbe_mac_82599EB:
4420 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
4421 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4422 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4423 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4424 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4425
4426 adapter->atr_sample_rate = 20;
4427 adapter->ring_feature[RING_F_FDIR].limit =
4428 IXGBE_MAX_FDIR_INDICES;
4429 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
4430#ifdef IXGBE_FCOE
4431 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4432 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4433#ifdef CONFIG_IXGBE_DCB
4434
4435 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4436#endif
4437#endif
4438 break;
4439 default:
4440 break;
4441 }
4442
4443#ifdef IXGBE_FCOE
4444
4445 spin_lock_init(&adapter->fcoe.lock);
4446
4447#endif
4448
4449 spin_lock_init(&adapter->fdir_perfect_lock);
4450
4451#ifdef CONFIG_IXGBE_DCB
4452 switch (hw->mac.type) {
4453 case ixgbe_mac_X540:
4454 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
4455 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
4456 break;
4457 default:
4458 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
4459 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
4460 break;
4461 }
4462
4463
4464 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4465 tc = &adapter->dcb_cfg.tc_config[j];
4466 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4467 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4468 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4469 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4470 tc->dcb_pfc = pfc_disabled;
4471 }
4472
4473
4474 tc = &adapter->dcb_cfg.tc_config[0];
4475 tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
4476 tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
4477
4478 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4479 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4480 adapter->dcb_cfg.pfc_mode_enable = false;
4481 adapter->dcb_set_bitmap = 0x00;
4482 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
4483 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
4484 sizeof(adapter->temp_dcb_cfg));
4485
4486#endif
4487
4488
4489 hw->fc.requested_mode = ixgbe_fc_full;
4490 hw->fc.current_mode = ixgbe_fc_full;
4491 ixgbe_pbthresh_setup(adapter);
4492 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4493 hw->fc.send_xon = true;
4494 hw->fc.disable_fc_autoneg = false;
4495
4496#ifdef CONFIG_PCI_IOV
4497
4498 if (hw->mac.type != ixgbe_mac_82598EB)
4499 adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
4500
4501#endif
4502
4503 adapter->rx_itr_setting = 1;
4504 adapter->tx_itr_setting = 1;
4505
4506
4507 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4508 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4509
4510
4511 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
4512
4513
4514 if (ixgbe_init_eeprom_params_generic(hw)) {
4515 e_dev_err("EEPROM initialization failed\n");
4516 return -EIO;
4517 }
4518
4519 set_bit(__IXGBE_DOWN, &adapter->state);
4520
4521 return 0;
4522}
4523
4524
4525
4526
4527
4528
4529
4530int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4531{
4532 struct device *dev = tx_ring->dev;
4533 int orig_node = dev_to_node(dev);
4534 int numa_node = -1;
4535 int size;
4536
4537 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4538
4539 if (tx_ring->q_vector)
4540 numa_node = tx_ring->q_vector->numa_node;
4541
4542 tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
4543 if (!tx_ring->tx_buffer_info)
4544 tx_ring->tx_buffer_info = vzalloc(size);
4545 if (!tx_ring->tx_buffer_info)
4546 goto err;
4547
4548
4549 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4550 tx_ring->size = ALIGN(tx_ring->size, 4096);
4551
4552 set_dev_node(dev, numa_node);
4553 tx_ring->desc = dma_alloc_coherent(dev,
4554 tx_ring->size,
4555 &tx_ring->dma,
4556 GFP_KERNEL);
4557 set_dev_node(dev, orig_node);
4558 if (!tx_ring->desc)
4559 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4560 &tx_ring->dma, GFP_KERNEL);
4561 if (!tx_ring->desc)
4562 goto err;
4563
4564 tx_ring->next_to_use = 0;
4565 tx_ring->next_to_clean = 0;
4566 return 0;
4567
4568err:
4569 vfree(tx_ring->tx_buffer_info);
4570 tx_ring->tx_buffer_info = NULL;
4571 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4572 return -ENOMEM;
4573}
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4586{
4587 int i, err = 0;
4588
4589 for (i = 0; i < adapter->num_tx_queues; i++) {
4590 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
4591 if (!err)
4592 continue;
4593
4594 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
4595 goto err_setup_tx;
4596 }
4597
4598 return 0;
4599err_setup_tx:
4600
4601 while (i--)
4602 ixgbe_free_tx_resources(adapter->tx_ring[i]);
4603 return err;
4604}
4605
4606
4607
4608
4609
4610
4611
4612int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
4613{
4614 struct device *dev = rx_ring->dev;
4615 int orig_node = dev_to_node(dev);
4616 int numa_node = -1;
4617 int size;
4618
4619 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4620
4621 if (rx_ring->q_vector)
4622 numa_node = rx_ring->q_vector->numa_node;
4623
4624 rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
4625 if (!rx_ring->rx_buffer_info)
4626 rx_ring->rx_buffer_info = vzalloc(size);
4627 if (!rx_ring->rx_buffer_info)
4628 goto err;
4629
4630
4631 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4632 rx_ring->size = ALIGN(rx_ring->size, 4096);
4633
4634 set_dev_node(dev, numa_node);
4635 rx_ring->desc = dma_alloc_coherent(dev,
4636 rx_ring->size,
4637 &rx_ring->dma,
4638 GFP_KERNEL);
4639 set_dev_node(dev, orig_node);
4640 if (!rx_ring->desc)
4641 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4642 &rx_ring->dma, GFP_KERNEL);
4643 if (!rx_ring->desc)
4644 goto err;
4645
4646 rx_ring->next_to_clean = 0;
4647 rx_ring->next_to_use = 0;
4648
4649 ixgbe_init_rx_page_offset(rx_ring);
4650
4651 return 0;
4652err:
4653 vfree(rx_ring->rx_buffer_info);
4654 rx_ring->rx_buffer_info = NULL;
4655 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4656 return -ENOMEM;
4657}
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4670{
4671 int i, err = 0;
4672
4673 for (i = 0; i < adapter->num_rx_queues; i++) {
4674 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
4675 if (!err)
4676 continue;
4677
4678 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
4679 goto err_setup_rx;
4680 }
4681
4682#ifdef IXGBE_FCOE
4683 err = ixgbe_setup_fcoe_ddp_resources(adapter);
4684 if (!err)
4685#endif
4686 return 0;
4687err_setup_rx:
4688
4689 while (i--)
4690 ixgbe_free_rx_resources(adapter->rx_ring[i]);
4691 return err;
4692}
4693
4694
4695
4696
4697
4698
4699
4700void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
4701{
4702 ixgbe_clean_tx_ring(tx_ring);
4703
4704 vfree(tx_ring->tx_buffer_info);
4705 tx_ring->tx_buffer_info = NULL;
4706
4707
4708 if (!tx_ring->desc)
4709 return;
4710
4711 dma_free_coherent(tx_ring->dev, tx_ring->size,
4712 tx_ring->desc, tx_ring->dma);
4713
4714 tx_ring->desc = NULL;
4715}
4716
4717
4718
4719
4720
4721
4722
4723static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4724{
4725 int i;
4726
4727 for (i = 0; i < adapter->num_tx_queues; i++)
4728 if (adapter->tx_ring[i]->desc)
4729 ixgbe_free_tx_resources(adapter->tx_ring[i]);
4730}
4731
4732
4733
4734
4735
4736
4737
4738void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
4739{
4740 ixgbe_clean_rx_ring(rx_ring);
4741
4742 vfree(rx_ring->rx_buffer_info);
4743 rx_ring->rx_buffer_info = NULL;
4744
4745
4746 if (!rx_ring->desc)
4747 return;
4748
4749 dma_free_coherent(rx_ring->dev, rx_ring->size,
4750 rx_ring->desc, rx_ring->dma);
4751
4752 rx_ring->desc = NULL;
4753}
4754
4755
4756
4757
4758
4759
4760
4761static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4762{
4763 int i;
4764
4765#ifdef IXGBE_FCOE
4766 ixgbe_free_fcoe_ddp_resources(adapter);
4767
4768#endif
4769 for (i = 0; i < adapter->num_rx_queues; i++)
4770 if (adapter->rx_ring[i]->desc)
4771 ixgbe_free_rx_resources(adapter->rx_ring[i]);
4772}
4773
4774
4775
4776
4777
4778
4779
4780
4781static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4782{
4783 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4784 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4785
4786
4787 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
4788 return -EINVAL;
4789
4790
4791
4792
4793
4794
4795 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
4796 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
4797 (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
4798 return -EINVAL;
4799
4800 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4801
4802
4803 netdev->mtu = new_mtu;
4804
4805 if (netif_running(netdev))
4806 ixgbe_reinit_locked(adapter);
4807
4808 return 0;
4809}
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823static int ixgbe_open(struct net_device *netdev)
4824{
4825 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4826 int err;
4827
4828
4829 if (test_bit(__IXGBE_TESTING, &adapter->state))
4830 return -EBUSY;
4831
4832 netif_carrier_off(netdev);
4833
4834
4835 err = ixgbe_setup_all_tx_resources(adapter);
4836 if (err)
4837 goto err_setup_tx;
4838
4839
4840 err = ixgbe_setup_all_rx_resources(adapter);
4841 if (err)
4842 goto err_setup_rx;
4843
4844 ixgbe_configure(adapter);
4845
4846 err = ixgbe_request_irq(adapter);
4847 if (err)
4848 goto err_req_irq;
4849
4850
4851 err = netif_set_real_num_tx_queues(netdev,
4852 adapter->num_rx_pools > 1 ? 1 :
4853 adapter->num_tx_queues);
4854 if (err)
4855 goto err_set_queues;
4856
4857
4858 err = netif_set_real_num_rx_queues(netdev,
4859 adapter->num_rx_pools > 1 ? 1 :
4860 adapter->num_rx_queues);
4861 if (err)
4862 goto err_set_queues;
4863
4864 ixgbe_up_complete(adapter);
4865
4866 return 0;
4867
4868err_set_queues:
4869 ixgbe_free_irq(adapter);
4870err_req_irq:
4871 ixgbe_free_all_rx_resources(adapter);
4872err_setup_rx:
4873 ixgbe_free_all_tx_resources(adapter);
4874err_setup_tx:
4875 ixgbe_reset(adapter);
4876
4877 return err;
4878}
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891static int ixgbe_close(struct net_device *netdev)
4892{
4893 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4894
4895 ixgbe_down(adapter);
4896 ixgbe_free_irq(adapter);
4897
4898 ixgbe_fdir_filter_exit(adapter);
4899
4900 ixgbe_free_all_tx_resources(adapter);
4901 ixgbe_free_all_rx_resources(adapter);
4902
4903 ixgbe_release_hw_control(adapter);
4904
4905 return 0;
4906}
4907
4908#ifdef CONFIG_PM
4909static int ixgbe_resume(struct pci_dev *pdev)
4910{
4911 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
4912 struct net_device *netdev = adapter->netdev;
4913 u32 err;
4914
4915 pci_set_power_state(pdev, PCI_D0);
4916 pci_restore_state(pdev);
4917
4918
4919
4920
4921 pci_save_state(pdev);
4922
4923 err = pci_enable_device_mem(pdev);
4924 if (err) {
4925 e_dev_err("Cannot enable PCI device from suspend\n");
4926 return err;
4927 }
4928 pci_set_master(pdev);
4929
4930 pci_wake_from_d3(pdev, false);
4931
4932 ixgbe_reset(adapter);
4933
4934 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4935
4936 rtnl_lock();
4937 err = ixgbe_init_interrupt_scheme(adapter);
4938 if (!err && netif_running(netdev))
4939 err = ixgbe_open(netdev);
4940
4941 rtnl_unlock();
4942
4943 if (err)
4944 return err;
4945
4946 netif_device_attach(netdev);
4947
4948 return 0;
4949}
4950#endif
4951
4952static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4953{
4954 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
4955 struct net_device *netdev = adapter->netdev;
4956 struct ixgbe_hw *hw = &adapter->hw;
4957 u32 ctrl, fctrl;
4958 u32 wufc = adapter->wol;
4959#ifdef CONFIG_PM
4960 int retval = 0;
4961#endif
4962
4963 netif_device_detach(netdev);
4964
4965 if (netif_running(netdev)) {
4966 rtnl_lock();
4967 ixgbe_down(adapter);
4968 ixgbe_free_irq(adapter);
4969 ixgbe_free_all_tx_resources(adapter);
4970 ixgbe_free_all_rx_resources(adapter);
4971 rtnl_unlock();
4972 }
4973
4974 ixgbe_clear_interrupt_scheme(adapter);
4975
4976#ifdef CONFIG_PM
4977 retval = pci_save_state(pdev);
4978 if (retval)
4979 return retval;
4980
4981#endif
4982 if (wufc) {
4983 ixgbe_set_rx_mode(netdev);
4984
4985
4986
4987
4988
4989 if (hw->mac.ops.enable_tx_laser &&
4990 (hw->phy.multispeed_fiber ||
4991 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
4992 hw->mac.type == ixgbe_mac_82599EB)))
4993 hw->mac.ops.enable_tx_laser(hw);
4994
4995
4996 if (wufc & IXGBE_WUFC_MC) {
4997 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4998 fctrl |= IXGBE_FCTRL_MPE;
4999 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5000 }
5001
5002 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5003 ctrl |= IXGBE_CTRL_GIO_DIS;
5004 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5005
5006 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5007 } else {
5008 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5009 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5010 }
5011
5012 switch (hw->mac.type) {
5013 case ixgbe_mac_82598EB:
5014 pci_wake_from_d3(pdev, false);
5015 break;
5016 case ixgbe_mac_82599EB:
5017 case ixgbe_mac_X540:
5018 pci_wake_from_d3(pdev, !!wufc);
5019 break;
5020 default:
5021 break;
5022 }
5023
5024 *enable_wake = !!wufc;
5025
5026 ixgbe_release_hw_control(adapter);
5027
5028 pci_disable_device(pdev);
5029
5030 return 0;
5031}
5032
5033#ifdef CONFIG_PM
5034static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5035{
5036 int retval;
5037 bool wake;
5038
5039 retval = __ixgbe_shutdown(pdev, &wake);
5040 if (retval)
5041 return retval;
5042
5043 if (wake) {
5044 pci_prepare_to_sleep(pdev);
5045 } else {
5046 pci_wake_from_d3(pdev, false);
5047 pci_set_power_state(pdev, PCI_D3hot);
5048 }
5049
5050 return 0;
5051}
5052#endif
5053
5054static void ixgbe_shutdown(struct pci_dev *pdev)
5055{
5056 bool wake;
5057
5058 __ixgbe_shutdown(pdev, &wake);
5059
5060 if (system_state == SYSTEM_POWER_OFF) {
5061 pci_wake_from_d3(pdev, wake);
5062 pci_set_power_state(pdev, PCI_D3hot);
5063 }
5064}
5065
5066
5067
5068
5069
5070void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5071{
5072 struct net_device *netdev = adapter->netdev;
5073 struct ixgbe_hw *hw = &adapter->hw;
5074 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5075 u64 total_mpc = 0;
5076 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5077 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5078 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5079 u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
5080
5081 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5082 test_bit(__IXGBE_RESETTING, &adapter->state))
5083 return;
5084
5085 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5086 u64 rsc_count = 0;
5087 u64 rsc_flush = 0;
5088 for (i = 0; i < adapter->num_rx_queues; i++) {
5089 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5090 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5091 }
5092 adapter->rsc_total_count = rsc_count;
5093 adapter->rsc_total_flush = rsc_flush;
5094 }
5095
5096 for (i = 0; i < adapter->num_rx_queues; i++) {
5097 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5098 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5099 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5100 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5101 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
5102 bytes += rx_ring->stats.bytes;
5103 packets += rx_ring->stats.packets;
5104 }
5105 adapter->non_eop_descs = non_eop_descs;
5106 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5107 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5108 adapter->hw_csum_rx_error = hw_csum_rx_error;
5109 netdev->stats.rx_bytes = bytes;
5110 netdev->stats.rx_packets = packets;
5111
5112 bytes = 0;
5113 packets = 0;
5114
5115 for (i = 0; i < adapter->num_tx_queues; i++) {
5116 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5117 restart_queue += tx_ring->tx_stats.restart_queue;
5118 tx_busy += tx_ring->tx_stats.tx_busy;
5119 bytes += tx_ring->stats.bytes;
5120 packets += tx_ring->stats.packets;
5121 }
5122 adapter->restart_queue = restart_queue;
5123 adapter->tx_busy = tx_busy;
5124 netdev->stats.tx_bytes = bytes;
5125 netdev->stats.tx_packets = packets;
5126
5127 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5128
5129
5130 for (i = 0; i < 8; i++) {
5131
5132 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5133 missed_rx += mpc;
5134 hwstats->mpc[i] += mpc;
5135 total_mpc += hwstats->mpc[i];
5136 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5137 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5138 switch (hw->mac.type) {
5139 case ixgbe_mac_82598EB:
5140 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5141 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5142 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5143 hwstats->pxonrxc[i] +=
5144 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5145 break;
5146 case ixgbe_mac_82599EB:
5147 case ixgbe_mac_X540:
5148 hwstats->pxonrxc[i] +=
5149 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5150 break;
5151 default:
5152 break;
5153 }
5154 }
5155
5156
5157 for (i = 0; i < 16; i++) {
5158 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5159 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5160 if ((hw->mac.type == ixgbe_mac_82599EB) ||
5161 (hw->mac.type == ixgbe_mac_X540)) {
5162 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
5163 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
5164 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
5165 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
5166 }
5167 }
5168
5169 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5170
5171 hwstats->gprc -= missed_rx;
5172
5173 ixgbe_update_xoff_received(adapter);
5174
5175
5176 switch (hw->mac.type) {
5177 case ixgbe_mac_82598EB:
5178 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5179 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5180 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5181 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5182 break;
5183 case ixgbe_mac_X540:
5184
5185 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5186 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5187 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5188 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5189 case ixgbe_mac_82599EB:
5190 for (i = 0; i < 16; i++)
5191 adapter->hw_rx_no_dma_resources +=
5192 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5193 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5194 IXGBE_READ_REG(hw, IXGBE_GORCH);
5195 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5196 IXGBE_READ_REG(hw, IXGBE_GOTCH);
5197 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5198 IXGBE_READ_REG(hw, IXGBE_TORH);
5199 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5200 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5201 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5202#ifdef IXGBE_FCOE
5203 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5204 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5205 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5206 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5207 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5208 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5209
5210 if (adapter->fcoe.ddp_pool) {
5211 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
5212 struct ixgbe_fcoe_ddp_pool *ddp_pool;
5213 unsigned int cpu;
5214 u64 noddp = 0, noddp_ext_buff = 0;
5215 for_each_possible_cpu(cpu) {
5216 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
5217 noddp += ddp_pool->noddp;
5218 noddp_ext_buff += ddp_pool->noddp_ext_buff;
5219 }
5220 hwstats->fcoe_noddp = noddp;
5221 hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
5222 }
5223#endif
5224 break;
5225 default:
5226 break;
5227 }
5228 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5229 hwstats->bprc += bprc;
5230 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5231 if (hw->mac.type == ixgbe_mac_82598EB)
5232 hwstats->mprc -= bprc;
5233 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5234 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5235 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5236 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5237 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5238 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5239 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5240 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5241 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5242 hwstats->lxontxc += lxon;
5243 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5244 hwstats->lxofftxc += lxoff;
5245 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5246 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5247
5248
5249
5250 xon_off_tot = lxon + lxoff;
5251 hwstats->gptc -= xon_off_tot;
5252 hwstats->mptc -= xon_off_tot;
5253 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5254 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5255 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5256 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5257 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5258 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5259 hwstats->ptc64 -= xon_off_tot;
5260 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5261 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5262 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5263 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5264 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5265 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5266
5267
5268 netdev->stats.multicast = hwstats->mprc;
5269
5270
5271 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5272 netdev->stats.rx_dropped = 0;
5273 netdev->stats.rx_length_errors = hwstats->rlec;
5274 netdev->stats.rx_crc_errors = hwstats->crcerrs;
5275 netdev->stats.rx_missed_errors = total_mpc;
5276}
5277
5278
5279
5280
5281
5282static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
5283{
5284 struct ixgbe_hw *hw = &adapter->hw;
5285 int i;
5286
5287 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5288 return;
5289
5290 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
5291
5292
5293 if (test_bit(__IXGBE_DOWN, &adapter->state))
5294 return;
5295
5296
5297 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5298 return;
5299
5300 adapter->fdir_overflow++;
5301
5302 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5303 for (i = 0; i < adapter->num_tx_queues; i++)
5304 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5305 &(adapter->tx_ring[i]->state));
5306
5307 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
5308 } else {
5309 e_err(probe, "failed to finish FDIR re-initialization, "
5310 "ignored adding FDIR ATR filters\n");
5311 }
5312}
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5324{
5325 struct ixgbe_hw *hw = &adapter->hw;
5326 u64 eics = 0;
5327 int i;
5328
5329
5330 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5331 test_bit(__IXGBE_RESETTING, &adapter->state))
5332 return;
5333
5334
5335 if (netif_carrier_ok(adapter->netdev)) {
5336 for (i = 0; i < adapter->num_tx_queues; i++)
5337 set_check_for_tx_hang(adapter->tx_ring[i]);
5338 }
5339
5340 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5341
5342
5343
5344
5345
5346 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5347 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5348 } else {
5349
5350 for (i = 0; i < adapter->num_q_vectors; i++) {
5351 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5352 if (qv->rx.ring || qv->tx.ring)
5353 eics |= ((u64)1 << i);
5354 }
5355 }
5356
5357
5358 ixgbe_irq_rearm_queues(adapter, eics);
5359
5360}
5361
5362
5363
5364
5365
5366
5367static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5368{
5369 struct ixgbe_hw *hw = &adapter->hw;
5370 u32 link_speed = adapter->link_speed;
5371 bool link_up = adapter->link_up;
5372 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
5373
5374 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5375 return;
5376
5377 if (hw->mac.ops.check_link) {
5378 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5379 } else {
5380
5381 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5382 link_up = true;
5383 }
5384
5385 if (adapter->ixgbe_ieee_pfc)
5386 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5387
5388 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
5389 hw->mac.ops.fc_enable(hw);
5390 ixgbe_set_rx_drop_en(adapter);
5391 }
5392
5393 if (link_up ||
5394 time_after(jiffies, (adapter->link_check_timeout +
5395 IXGBE_TRY_LINK_TIMEOUT))) {
5396 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5397 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5398 IXGBE_WRITE_FLUSH(hw);
5399 }
5400
5401 adapter->link_up = link_up;
5402 adapter->link_speed = link_speed;
5403}
5404
5405
5406
5407
5408
5409
5410static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5411{
5412 struct net_device *netdev = adapter->netdev;
5413 struct ixgbe_hw *hw = &adapter->hw;
5414 u32 link_speed = adapter->link_speed;
5415 bool flow_rx, flow_tx;
5416
5417
5418 if (netif_carrier_ok(netdev))
5419 return;
5420
5421 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
5422
5423 switch (hw->mac.type) {
5424 case ixgbe_mac_82598EB: {
5425 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5426 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5427 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5428 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5429 }
5430 break;
5431 case ixgbe_mac_X540:
5432 case ixgbe_mac_82599EB: {
5433 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5434 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5435 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5436 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5437 }
5438 break;
5439 default:
5440 flow_tx = false;
5441 flow_rx = false;
5442 break;
5443 }
5444
5445#ifdef CONFIG_IXGBE_PTP
5446 ixgbe_ptp_start_cyclecounter(adapter);
5447#endif
5448
5449 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5450 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5451 "10 Gbps" :
5452 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5453 "1 Gbps" :
5454 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
5455 "100 Mbps" :
5456 "unknown speed"))),
5457 ((flow_rx && flow_tx) ? "RX/TX" :
5458 (flow_rx ? "RX" :
5459 (flow_tx ? "TX" : "None"))));
5460
5461 netif_carrier_on(netdev);
5462 ixgbe_check_vf_rate_limit(adapter);
5463
5464
5465 ixgbe_ping_all_vfs(adapter);
5466}
5467
5468
5469
5470
5471
5472
5473static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5474{
5475 struct net_device *netdev = adapter->netdev;
5476 struct ixgbe_hw *hw = &adapter->hw;
5477
5478 adapter->link_up = false;
5479 adapter->link_speed = 0;
5480
5481
5482 if (!netif_carrier_ok(netdev))
5483 return;
5484
5485
5486 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5487 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5488
5489#ifdef CONFIG_IXGBE_PTP
5490 ixgbe_ptp_start_cyclecounter(adapter);
5491#endif
5492
5493 e_info(drv, "NIC Link is Down\n");
5494 netif_carrier_off(netdev);
5495
5496
5497 ixgbe_ping_all_vfs(adapter);
5498}
5499
5500
5501
5502
5503
5504static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
5505{
5506 int i;
5507 int some_tx_pending = 0;
5508
5509 if (!netif_carrier_ok(adapter->netdev)) {
5510 for (i = 0; i < adapter->num_tx_queues; i++) {
5511 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5512 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5513 some_tx_pending = 1;
5514 break;
5515 }
5516 }
5517
5518 if (some_tx_pending) {
5519
5520
5521
5522
5523
5524 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
5525 }
5526 }
5527}
5528
5529static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
5530{
5531 u32 ssvpc;
5532
5533
5534 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
5535 return;
5536
5537 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
5538
5539
5540
5541
5542
5543 if (!ssvpc)
5544 return;
5545
5546 e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
5547}
5548
5549
5550
5551
5552
5553static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
5554{
5555
5556 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5557 test_bit(__IXGBE_RESETTING, &adapter->state))
5558 return;
5559
5560 ixgbe_watchdog_update_link(adapter);
5561
5562 if (adapter->link_up)
5563 ixgbe_watchdog_link_is_up(adapter);
5564 else
5565 ixgbe_watchdog_link_is_down(adapter);
5566
5567 ixgbe_spoof_check(adapter);
5568 ixgbe_update_stats(adapter);
5569
5570 ixgbe_watchdog_flush_tx(adapter);
5571}
5572
5573
5574
5575
5576
5577static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
5578{
5579 struct ixgbe_hw *hw = &adapter->hw;
5580 s32 err;
5581
5582
5583 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
5584 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5585 return;
5586
5587
5588 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5589 return;
5590
5591 err = hw->phy.ops.identify_sfp(hw);
5592 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5593 goto sfp_out;
5594
5595 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
5596
5597
5598 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
5599 }
5600
5601
5602 if (err)
5603 goto sfp_out;
5604
5605
5606 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
5607 goto sfp_out;
5608
5609 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
5610
5611
5612
5613
5614
5615
5616 if (hw->mac.type == ixgbe_mac_82598EB)
5617 err = hw->phy.ops.reset(hw);
5618 else
5619 err = hw->mac.ops.setup_sfp(hw);
5620
5621 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
5622 goto sfp_out;
5623
5624 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
5625 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
5626
5627sfp_out:
5628 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5629
5630 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
5631 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
5632 e_dev_err("failed to initialize because an unsupported "
5633 "SFP+ module type was detected.\n");
5634 e_dev_err("Reload the driver after installing a "
5635 "supported module.\n");
5636 unregister_netdev(adapter->netdev);
5637 }
5638}
5639
5640
5641
5642
5643
5644static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
5645{
5646 struct ixgbe_hw *hw = &adapter->hw;
5647 u32 autoneg;
5648 bool negotiation;
5649
5650 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
5651 return;
5652
5653
5654 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
5655 return;
5656
5657 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
5658
5659 autoneg = hw->phy.autoneg_advertised;
5660 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
5661 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
5662 if (hw->mac.ops.setup_link)
5663 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
5664
5665 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5666 adapter->link_check_timeout = jiffies;
5667 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
5668}
5669
5670#ifdef CONFIG_PCI_IOV
5671static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
5672{
5673 int vf;
5674 struct ixgbe_hw *hw = &adapter->hw;
5675 struct net_device *netdev = adapter->netdev;
5676 u32 gpc;
5677 u32 ciaa, ciad;
5678
5679 gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
5680 if (gpc)
5681 return;
5682
5683
5684
5685
5686
5687
5688
5689 for (vf = 0; vf < adapter->num_vfs; vf++) {
5690 ciaa = (vf << 16) | 0x80000000;
5691
5692 ciaa |= PCI_COMMAND;
5693 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5694 ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599);
5695 ciaa &= 0x7FFFFFFF;
5696
5697 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5698
5699 ciad >>= 16;
5700 if (ciad & PCI_STATUS_REC_MASTER_ABORT) {
5701 netdev_err(netdev, "VF %d Hung DMA\n", vf);
5702
5703 ciaa = (vf << 16) | 0x80000000;
5704 ciaa |= 0xA8;
5705 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5706 ciad = 0x00008000;
5707 IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad);
5708 ciaa &= 0x7FFFFFFF;
5709 IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa);
5710 }
5711 }
5712}
5713
5714#endif
5715
5716
5717
5718
5719static void ixgbe_service_timer(unsigned long data)
5720{
5721 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5722 unsigned long next_event_offset;
5723 bool ready = true;
5724
5725
5726 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
5727 next_event_offset = HZ / 10;
5728 else
5729 next_event_offset = HZ * 2;
5730
5731#ifdef CONFIG_PCI_IOV
5732
5733
5734
5735
5736 if (!adapter->num_vfs ||
5737 (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5738 goto normal_timer_service;
5739
5740
5741 ixgbe_check_for_bad_vf(adapter);
5742 next_event_offset = HZ / 50;
5743 adapter->timer_event_accumulator++;
5744
5745 if (adapter->timer_event_accumulator >= 100)
5746 adapter->timer_event_accumulator = 0;
5747 else
5748 ready = false;
5749
5750normal_timer_service:
5751#endif
5752
5753 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
5754
5755 if (ready)
5756 ixgbe_service_event_schedule(adapter);
5757}
5758
5759static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
5760{
5761 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
5762 return;
5763
5764 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
5765
5766
5767 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5768 test_bit(__IXGBE_RESETTING, &adapter->state))
5769 return;
5770
5771 ixgbe_dump(adapter);
5772 netdev_err(adapter->netdev, "Reset adapter\n");
5773 adapter->tx_timeout_count++;
5774
5775 ixgbe_reinit_locked(adapter);
5776}
5777
5778
5779
5780
5781
5782static void ixgbe_service_task(struct work_struct *work)
5783{
5784 struct ixgbe_adapter *adapter = container_of(work,
5785 struct ixgbe_adapter,
5786 service_task);
5787
5788 ixgbe_reset_subtask(adapter);
5789 ixgbe_sfp_detection_subtask(adapter);
5790 ixgbe_sfp_link_config_subtask(adapter);
5791 ixgbe_check_overtemp_subtask(adapter);
5792 ixgbe_watchdog_subtask(adapter);
5793 ixgbe_fdir_reinit_subtask(adapter);
5794 ixgbe_check_hang_subtask(adapter);
5795#ifdef CONFIG_IXGBE_PTP
5796 ixgbe_ptp_overflow_check(adapter);
5797#endif
5798
5799 ixgbe_service_event_complete(adapter);
5800}
5801
5802static int ixgbe_tso(struct ixgbe_ring *tx_ring,
5803 struct ixgbe_tx_buffer *first,
5804 u8 *hdr_len)
5805{
5806 struct sk_buff *skb = first->skb;
5807 u32 vlan_macip_lens, type_tucmd;
5808 u32 mss_l4len_idx, l4len;
5809
5810 if (!skb_is_gso(skb))
5811 return 0;
5812
5813 if (skb_header_cloned(skb)) {
5814 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5815 if (err)
5816 return err;
5817 }
5818
5819
5820 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
5821
5822 if (first->protocol == __constant_htons(ETH_P_IP)) {
5823 struct iphdr *iph = ip_hdr(skb);
5824 iph->tot_len = 0;
5825 iph->check = 0;
5826 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5827 iph->daddr, 0,
5828 IPPROTO_TCP,
5829 0);
5830 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
5831 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
5832 IXGBE_TX_FLAGS_CSUM |
5833 IXGBE_TX_FLAGS_IPV4;
5834 } else if (skb_is_gso_v6(skb)) {
5835 ipv6_hdr(skb)->payload_len = 0;
5836 tcp_hdr(skb)->check =
5837 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5838 &ipv6_hdr(skb)->daddr,
5839 0, IPPROTO_TCP, 0);
5840 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
5841 IXGBE_TX_FLAGS_CSUM;
5842 }
5843
5844
5845 l4len = tcp_hdrlen(skb);
5846 *hdr_len = skb_transport_offset(skb) + l4len;
5847
5848
5849 first->gso_segs = skb_shinfo(skb)->gso_segs;
5850 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5851
5852
5853 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
5854 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
5855 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
5856
5857
5858 vlan_macip_lens = skb_network_header_len(skb);
5859 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
5860 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
5861
5862 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
5863 mss_l4len_idx);
5864
5865 return 1;
5866}
5867
5868static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
5869 struct ixgbe_tx_buffer *first)
5870{
5871 struct sk_buff *skb = first->skb;
5872 u32 vlan_macip_lens = 0;
5873 u32 mss_l4len_idx = 0;
5874 u32 type_tucmd = 0;
5875
5876 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5877 if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
5878 !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
5879 return;
5880 } else {
5881 u8 l4_hdr = 0;
5882 switch (first->protocol) {
5883 case __constant_htons(ETH_P_IP):
5884 vlan_macip_lens |= skb_network_header_len(skb);
5885 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
5886 l4_hdr = ip_hdr(skb)->protocol;
5887 break;
5888 case __constant_htons(ETH_P_IPV6):
5889 vlan_macip_lens |= skb_network_header_len(skb);
5890 l4_hdr = ipv6_hdr(skb)->nexthdr;
5891 break;
5892 default:
5893 if (unlikely(net_ratelimit())) {
5894 dev_warn(tx_ring->dev,
5895 "partial checksum but proto=%x!\n",
5896 first->protocol);
5897 }
5898 break;
5899 }
5900
5901 switch (l4_hdr) {
5902 case IPPROTO_TCP:
5903 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5904 mss_l4len_idx = tcp_hdrlen(skb) <<
5905 IXGBE_ADVTXD_L4LEN_SHIFT;
5906 break;
5907 case IPPROTO_SCTP:
5908 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5909 mss_l4len_idx = sizeof(struct sctphdr) <<
5910 IXGBE_ADVTXD_L4LEN_SHIFT;
5911 break;
5912 case IPPROTO_UDP:
5913 mss_l4len_idx = sizeof(struct udphdr) <<
5914 IXGBE_ADVTXD_L4LEN_SHIFT;
5915 break;
5916 default:
5917 if (unlikely(net_ratelimit())) {
5918 dev_warn(tx_ring->dev,
5919 "partial checksum but l4 proto=%x!\n",
5920 l4_hdr);
5921 }
5922 break;
5923 }
5924
5925
5926 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
5927 }
5928
5929
5930 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
5931 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
5932
5933 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
5934 type_tucmd, mss_l4len_idx);
5935}
5936
5937static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5938{
5939
5940 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
5941 IXGBE_ADVTXD_DCMD_IFCS |
5942 IXGBE_ADVTXD_DCMD_DEXT);
5943
5944
5945 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
5946 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
5947
5948#ifdef CONFIG_IXGBE_PTP
5949 if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
5950 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
5951#endif
5952
5953
5954#ifdef IXGBE_FCOE
5955 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
5956#else
5957 if (tx_flags & IXGBE_TX_FLAGS_TSO)
5958#endif
5959 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
5960
5961 return cmd_type;
5962}
5963
5964static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
5965 u32 tx_flags, unsigned int paylen)
5966{
5967 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
5968
5969
5970 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
5971 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
5972
5973
5974 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
5975 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
5976
5977
5978#ifdef IXGBE_FCOE
5979 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
5980#else
5981 if (tx_flags & IXGBE_TX_FLAGS_TSO)
5982#endif
5983 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
5984
5985
5986
5987
5988
5989#ifdef IXGBE_FCOE
5990 if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
5991#else
5992 if (tx_flags & IXGBE_TX_FLAGS_TXSW)
5993#endif
5994 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
5995
5996 tx_desc->read.olinfo_status = olinfo_status;
5997}
5998
5999#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6000 IXGBE_TXD_CMD_RS)
6001
6002static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6003 struct ixgbe_tx_buffer *first,
6004 const u8 hdr_len)
6005{
6006 dma_addr_t dma;
6007 struct sk_buff *skb = first->skb;
6008 struct ixgbe_tx_buffer *tx_buffer;
6009 union ixgbe_adv_tx_desc *tx_desc;
6010 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
6011 unsigned int data_len = skb->data_len;
6012 unsigned int size = skb_headlen(skb);
6013 unsigned int paylen = skb->len - hdr_len;
6014 u32 tx_flags = first->tx_flags;
6015 __le32 cmd_type;
6016 u16 i = tx_ring->next_to_use;
6017
6018 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6019
6020 ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
6021 cmd_type = ixgbe_tx_cmd_type(tx_flags);
6022
6023#ifdef IXGBE_FCOE
6024 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6025 if (data_len < sizeof(struct fcoe_crc_eof)) {
6026 size -= sizeof(struct fcoe_crc_eof) - data_len;
6027 data_len = 0;
6028 } else {
6029 data_len -= sizeof(struct fcoe_crc_eof);
6030 }
6031 }
6032
6033#endif
6034 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6035 if (dma_mapping_error(tx_ring->dev, dma))
6036 goto dma_error;
6037
6038
6039 dma_unmap_len_set(first, len, size);
6040 dma_unmap_addr_set(first, dma, dma);
6041
6042 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6043
6044 for (;;) {
6045 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
6046 tx_desc->read.cmd_type_len =
6047 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
6048
6049 i++;
6050 tx_desc++;
6051 if (i == tx_ring->count) {
6052 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6053 i = 0;
6054 }
6055
6056 dma += IXGBE_MAX_DATA_PER_TXD;
6057 size -= IXGBE_MAX_DATA_PER_TXD;
6058
6059 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6060 tx_desc->read.olinfo_status = 0;
6061 }
6062
6063 if (likely(!data_len))
6064 break;
6065
6066 if (unlikely(skb->no_fcs))
6067 cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
6068 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
6069
6070 i++;
6071 tx_desc++;
6072 if (i == tx_ring->count) {
6073 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6074 i = 0;
6075 }
6076
6077#ifdef IXGBE_FCOE
6078 size = min_t(unsigned int, data_len, skb_frag_size(frag));
6079#else
6080 size = skb_frag_size(frag);
6081#endif
6082 data_len -= size;
6083
6084 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
6085 DMA_TO_DEVICE);
6086 if (dma_mapping_error(tx_ring->dev, dma))
6087 goto dma_error;
6088
6089 tx_buffer = &tx_ring->tx_buffer_info[i];
6090 dma_unmap_len_set(tx_buffer, len, size);
6091 dma_unmap_addr_set(tx_buffer, dma, dma);
6092
6093 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6094 tx_desc->read.olinfo_status = 0;
6095
6096 frag++;
6097 }
6098
6099
6100 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
6101 tx_desc->read.cmd_type_len = cmd_type;
6102
6103 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6104
6105
6106 first->time_stamp = jiffies;
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116 wmb();
6117
6118
6119 first->next_to_watch = tx_desc;
6120
6121 i++;
6122 if (i == tx_ring->count)
6123 i = 0;
6124
6125 tx_ring->next_to_use = i;
6126
6127
6128 writel(i, tx_ring->tail);
6129
6130 return;
6131dma_error:
6132 dev_err(tx_ring->dev, "TX DMA map failed\n");
6133
6134
6135 for (;;) {
6136 tx_buffer = &tx_ring->tx_buffer_info[i];
6137 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
6138 if (tx_buffer == first)
6139 break;
6140 if (i == 0)
6141 i = tx_ring->count;
6142 i--;
6143 }
6144
6145 tx_ring->next_to_use = i;
6146}
6147
6148static void ixgbe_atr(struct ixgbe_ring *ring,
6149 struct ixgbe_tx_buffer *first)
6150{
6151 struct ixgbe_q_vector *q_vector = ring->q_vector;
6152 union ixgbe_atr_hash_dword input = { .dword = 0 };
6153 union ixgbe_atr_hash_dword common = { .dword = 0 };
6154 union {
6155 unsigned char *network;
6156 struct iphdr *ipv4;
6157 struct ipv6hdr *ipv6;
6158 } hdr;
6159 struct tcphdr *th;
6160 __be16 vlan_id;
6161
6162
6163 if (!q_vector)
6164 return;
6165
6166
6167 if (!ring->atr_sample_rate)
6168 return;
6169
6170 ring->atr_count++;
6171
6172
6173 hdr.network = skb_network_header(first->skb);
6174
6175
6176 if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
6177 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6178 (first->protocol != __constant_htons(ETH_P_IP) ||
6179 hdr.ipv4->protocol != IPPROTO_TCP))
6180 return;
6181
6182 th = tcp_hdr(first->skb);
6183
6184
6185 if (!th || th->fin)
6186 return;
6187
6188
6189 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6190 return;
6191
6192
6193 ring->atr_count = 0;
6194
6195 vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6196
6197
6198
6199
6200
6201
6202
6203
6204 input.formatted.vlan_id = vlan_id;
6205
6206
6207
6208
6209
6210 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
6211 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6212 else
6213 common.port.src ^= th->dest ^ first->protocol;
6214 common.port.dst ^= th->source;
6215
6216 if (first->protocol == __constant_htons(ETH_P_IP)) {
6217 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6218 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6219 } else {
6220 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6221 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6222 hdr.ipv6->saddr.s6_addr32[1] ^
6223 hdr.ipv6->saddr.s6_addr32[2] ^
6224 hdr.ipv6->saddr.s6_addr32[3] ^
6225 hdr.ipv6->daddr.s6_addr32[0] ^
6226 hdr.ipv6->daddr.s6_addr32[1] ^
6227 hdr.ipv6->daddr.s6_addr32[2] ^
6228 hdr.ipv6->daddr.s6_addr32[3];
6229 }
6230
6231
6232 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6233 input, common, ring->queue_index);
6234}
6235
6236static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6237{
6238 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6239
6240
6241
6242 smp_mb();
6243
6244
6245
6246 if (likely(ixgbe_desc_unused(tx_ring) < size))
6247 return -EBUSY;
6248
6249
6250 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6251 ++tx_ring->tx_stats.restart_queue;
6252 return 0;
6253}
6254
6255static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6256{
6257 if (likely(ixgbe_desc_unused(tx_ring) >= size))
6258 return 0;
6259 return __ixgbe_maybe_stop_tx(tx_ring, size);
6260}
6261
6262static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6263{
6264 struct ixgbe_adapter *adapter = netdev_priv(dev);
6265 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6266 smp_processor_id();
6267#ifdef IXGBE_FCOE
6268 __be16 protocol = vlan_get_protocol(skb);
6269
6270 if (((protocol == htons(ETH_P_FCOE)) ||
6271 (protocol == htons(ETH_P_FIP))) &&
6272 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6273 struct ixgbe_ring_feature *f;
6274
6275 f = &adapter->ring_feature[RING_F_FCOE];
6276
6277 while (txq >= f->indices)
6278 txq -= f->indices;
6279 txq += adapter->ring_feature[RING_F_FCOE].offset;
6280
6281 return txq;
6282 }
6283#endif
6284
6285 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6286 while (unlikely(txq >= dev->real_num_tx_queues))
6287 txq -= dev->real_num_tx_queues;
6288 return txq;
6289 }
6290
6291 return skb_tx_hash(dev, skb);
6292}
6293
6294netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6295 struct ixgbe_adapter *adapter,
6296 struct ixgbe_ring *tx_ring)
6297{
6298 struct ixgbe_tx_buffer *first;
6299 int tso;
6300 u32 tx_flags = 0;
6301#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6302 unsigned short f;
6303#endif
6304 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6305 __be16 protocol = skb->protocol;
6306 u8 hdr_len = 0;
6307
6308
6309
6310
6311
6312
6313
6314
6315#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6316 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6317 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6318#else
6319 count += skb_shinfo(skb)->nr_frags;
6320#endif
6321 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6322 tx_ring->tx_stats.tx_busy++;
6323 return NETDEV_TX_BUSY;
6324 }
6325
6326
6327 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6328 first->skb = skb;
6329 first->bytecount = skb->len;
6330 first->gso_segs = 1;
6331
6332
6333 if (vlan_tx_tag_present(skb)) {
6334 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6335 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6336
6337 } else if (protocol == __constant_htons(ETH_P_8021Q)) {
6338 struct vlan_hdr *vhdr, _vhdr;
6339 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6340 if (!vhdr)
6341 goto out_drop;
6342
6343 protocol = vhdr->h_vlan_encapsulated_proto;
6344 tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
6345 IXGBE_TX_FLAGS_VLAN_SHIFT;
6346 tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
6347 }
6348
6349 skb_tx_timestamp(skb);
6350
6351#ifdef CONFIG_IXGBE_PTP
6352 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6353 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6354 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6355 }
6356#endif
6357
6358#ifdef CONFIG_PCI_IOV
6359
6360
6361
6362
6363 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6364 tx_flags |= IXGBE_TX_FLAGS_TXSW;
6365
6366#endif
6367
6368 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
6369 ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
6370 (skb->priority != TC_PRIO_CONTROL))) {
6371 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6372 tx_flags |= (skb->priority & 0x7) <<
6373 IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
6374 if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
6375 struct vlan_ethhdr *vhdr;
6376 if (skb_header_cloned(skb) &&
6377 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6378 goto out_drop;
6379 vhdr = (struct vlan_ethhdr *)skb->data;
6380 vhdr->h_vlan_TCI = htons(tx_flags >>
6381 IXGBE_TX_FLAGS_VLAN_SHIFT);
6382 } else {
6383 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6384 }
6385 }
6386
6387
6388 first->tx_flags = tx_flags;
6389 first->protocol = protocol;
6390
6391#ifdef IXGBE_FCOE
6392
6393 if ((protocol == __constant_htons(ETH_P_FCOE)) &&
6394 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
6395 tso = ixgbe_fso(tx_ring, first, &hdr_len);
6396 if (tso < 0)
6397 goto out_drop;
6398
6399 goto xmit_fcoe;
6400 }
6401
6402#endif
6403 tso = ixgbe_tso(tx_ring, first, &hdr_len);
6404 if (tso < 0)
6405 goto out_drop;
6406 else if (!tso)
6407 ixgbe_tx_csum(tx_ring, first);
6408
6409
6410 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6411 ixgbe_atr(tx_ring, first);
6412
6413#ifdef IXGBE_FCOE
6414xmit_fcoe:
6415#endif
6416 ixgbe_tx_map(tx_ring, first, hdr_len);
6417
6418 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6419
6420 return NETDEV_TX_OK;
6421
6422out_drop:
6423 dev_kfree_skb_any(first->skb);
6424 first->skb = NULL;
6425
6426 return NETDEV_TX_OK;
6427}
6428
6429static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6430 struct net_device *netdev)
6431{
6432 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6433 struct ixgbe_ring *tx_ring;
6434
6435
6436
6437
6438
6439 if (unlikely(skb->len < 17)) {
6440 if (skb_pad(skb, 17 - skb->len))
6441 return NETDEV_TX_OK;
6442 skb->len = 17;
6443 }
6444
6445 tx_ring = adapter->tx_ring[skb->queue_mapping];
6446 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6447}
6448
6449
6450
6451
6452
6453
6454
6455
6456static int ixgbe_set_mac(struct net_device *netdev, void *p)
6457{
6458 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6459 struct ixgbe_hw *hw = &adapter->hw;
6460 struct sockaddr *addr = p;
6461
6462 if (!is_valid_ether_addr(addr->sa_data))
6463 return -EADDRNOTAVAIL;
6464
6465 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6466 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
6467
6468 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
6469
6470 return 0;
6471}
6472
6473static int
6474ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6475{
6476 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6477 struct ixgbe_hw *hw = &adapter->hw;
6478 u16 value;
6479 int rc;
6480
6481 if (prtad != hw->phy.mdio.prtad)
6482 return -EINVAL;
6483 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6484 if (!rc)
6485 rc = value;
6486 return rc;
6487}
6488
6489static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6490 u16 addr, u16 value)
6491{
6492 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6493 struct ixgbe_hw *hw = &adapter->hw;
6494
6495 if (prtad != hw->phy.mdio.prtad)
6496 return -EINVAL;
6497 return hw->phy.ops.write_reg(hw, addr, devad, value);
6498}
6499
6500static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6501{
6502 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6503
6504 switch (cmd) {
6505#ifdef CONFIG_IXGBE_PTP
6506 case SIOCSHWTSTAMP:
6507 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
6508#endif
6509 default:
6510 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6511 }
6512}
6513
6514
6515
6516
6517
6518
6519
6520
6521static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6522{
6523 int err = 0;
6524 struct ixgbe_adapter *adapter = netdev_priv(dev);
6525 struct ixgbe_hw *hw = &adapter->hw;
6526
6527 if (is_valid_ether_addr(hw->mac.san_addr)) {
6528 rtnl_lock();
6529 err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
6530 rtnl_unlock();
6531
6532
6533 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
6534 }
6535 return err;
6536}
6537
6538
6539
6540
6541
6542
6543
6544
6545static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6546{
6547 int err = 0;
6548 struct ixgbe_adapter *adapter = netdev_priv(dev);
6549 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6550
6551 if (is_valid_ether_addr(mac->san_addr)) {
6552 rtnl_lock();
6553 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6554 rtnl_unlock();
6555 }
6556 return err;
6557}
6558
6559#ifdef CONFIG_NET_POLL_CONTROLLER
6560
6561
6562
6563
6564
6565static void ixgbe_netpoll(struct net_device *netdev)
6566{
6567 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6568 int i;
6569
6570
6571 if (test_bit(__IXGBE_DOWN, &adapter->state))
6572 return;
6573
6574 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
6575 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6576 for (i = 0; i < adapter->num_q_vectors; i++)
6577 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
6578 } else {
6579 ixgbe_intr(adapter->pdev->irq, netdev);
6580 }
6581 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
6582}
6583
6584#endif
6585static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6586 struct rtnl_link_stats64 *stats)
6587{
6588 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6589 int i;
6590
6591 rcu_read_lock();
6592 for (i = 0; i < adapter->num_rx_queues; i++) {
6593 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
6594 u64 bytes, packets;
6595 unsigned int start;
6596
6597 if (ring) {
6598 do {
6599 start = u64_stats_fetch_begin_bh(&ring->syncp);
6600 packets = ring->stats.packets;
6601 bytes = ring->stats.bytes;
6602 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6603 stats->rx_packets += packets;
6604 stats->rx_bytes += bytes;
6605 }
6606 }
6607
6608 for (i = 0; i < adapter->num_tx_queues; i++) {
6609 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
6610 u64 bytes, packets;
6611 unsigned int start;
6612
6613 if (ring) {
6614 do {
6615 start = u64_stats_fetch_begin_bh(&ring->syncp);
6616 packets = ring->stats.packets;
6617 bytes = ring->stats.bytes;
6618 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6619 stats->tx_packets += packets;
6620 stats->tx_bytes += bytes;
6621 }
6622 }
6623 rcu_read_unlock();
6624
6625 stats->multicast = netdev->stats.multicast;
6626 stats->rx_errors = netdev->stats.rx_errors;
6627 stats->rx_length_errors = netdev->stats.rx_length_errors;
6628 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6629 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6630 return stats;
6631}
6632
6633#ifdef CONFIG_IXGBE_DCB
6634
6635
6636
6637
6638
6639
6640
6641
6642static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
6643{
6644 struct ixgbe_hw *hw = &adapter->hw;
6645 u32 reg, rsave;
6646 int i;
6647
6648
6649
6650
6651 if (hw->mac.type == ixgbe_mac_82598EB)
6652 return;
6653
6654 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
6655 rsave = reg;
6656
6657 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
6658 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
6659
6660
6661 if (up2tc > tc)
6662 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
6663 }
6664
6665 if (reg != rsave)
6666 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
6667
6668 return;
6669}
6670
6671
6672
6673
6674
6675
6676
6677static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
6678{
6679 struct net_device *dev = adapter->netdev;
6680 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
6681 struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
6682 u8 prio;
6683
6684 for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
6685 u8 tc = 0;
6686
6687 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
6688 tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
6689 else if (ets)
6690 tc = ets->prio_tc[prio];
6691
6692 netdev_set_prio_tc_map(dev, prio, tc);
6693 }
6694}
6695
6696
6697
6698
6699
6700
6701
6702int ixgbe_setup_tc(struct net_device *dev, u8 tc)
6703{
6704 struct ixgbe_adapter *adapter = netdev_priv(dev);
6705 struct ixgbe_hw *hw = &adapter->hw;
6706
6707
6708 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
6709 (hw->mac.type == ixgbe_mac_82598EB &&
6710 tc < MAX_TRAFFIC_CLASS))
6711 return -EINVAL;
6712
6713
6714
6715
6716
6717 if (netif_running(dev))
6718 ixgbe_close(dev);
6719 ixgbe_clear_interrupt_scheme(adapter);
6720
6721 if (tc) {
6722 netdev_set_num_tc(dev, tc);
6723 ixgbe_set_prio_tc_map(adapter);
6724
6725 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
6726
6727 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
6728 adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
6729 adapter->hw.fc.requested_mode = ixgbe_fc_none;
6730 }
6731 } else {
6732 netdev_reset_tc(dev);
6733
6734 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6735 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
6736
6737 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
6738
6739 adapter->temp_dcb_cfg.pfc_mode_enable = false;
6740 adapter->dcb_cfg.pfc_mode_enable = false;
6741 }
6742
6743 ixgbe_init_interrupt_scheme(adapter);
6744 ixgbe_validate_rtr(adapter, tc);
6745 if (netif_running(dev))
6746 ixgbe_open(dev);
6747
6748 return 0;
6749}
6750
6751#endif
6752void ixgbe_do_reset(struct net_device *netdev)
6753{
6754 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6755
6756 if (netif_running(netdev))
6757 ixgbe_reinit_locked(adapter);
6758 else
6759 ixgbe_reset(adapter);
6760}
6761
6762static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6763 netdev_features_t features)
6764{
6765 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6766
6767
6768 if (!(features & NETIF_F_RXCSUM))
6769 features &= ~NETIF_F_LRO;
6770
6771
6772 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
6773 features &= ~NETIF_F_LRO;
6774
6775 return features;
6776}
6777
6778static int ixgbe_set_features(struct net_device *netdev,
6779 netdev_features_t features)
6780{
6781 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6782 netdev_features_t changed = netdev->features ^ features;
6783 bool need_reset = false;
6784
6785
6786 if (!(features & NETIF_F_LRO)) {
6787 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
6788 need_reset = true;
6789 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
6790 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
6791 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
6792 if (adapter->rx_itr_setting == 1 ||
6793 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
6794 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
6795 need_reset = true;
6796 } else if ((changed ^ features) & NETIF_F_LRO) {
6797 e_info(probe, "rx-usecs set too low, "
6798 "disabling RSC\n");
6799 }
6800 }
6801
6802
6803
6804
6805
6806 switch (features & NETIF_F_NTUPLE) {
6807 case NETIF_F_NTUPLE:
6808
6809 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
6810 need_reset = true;
6811
6812 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
6813 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
6814 break;
6815 default:
6816
6817 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6818 need_reset = true;
6819
6820 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
6821
6822
6823 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6824 break;
6825
6826
6827 if (netdev_get_num_tc(netdev) > 1)
6828 break;
6829
6830
6831 if (adapter->ring_feature[RING_F_RSS].limit <= 1)
6832 break;
6833
6834
6835 if (!adapter->atr_sample_rate)
6836 break;
6837
6838 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
6839 break;
6840 }
6841
6842 if (features & NETIF_F_HW_VLAN_RX)
6843 ixgbe_vlan_strip_enable(adapter);
6844 else
6845 ixgbe_vlan_strip_disable(adapter);
6846
6847 if (changed & NETIF_F_RXALL)
6848 need_reset = true;
6849
6850 netdev->features = features;
6851 if (need_reset)
6852 ixgbe_do_reset(netdev);
6853
6854 return 0;
6855}
6856
6857static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
6858 struct net_device *dev,
6859 unsigned char *addr,
6860 u16 flags)
6861{
6862 struct ixgbe_adapter *adapter = netdev_priv(dev);
6863 int err;
6864
6865 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
6866 return -EOPNOTSUPP;
6867
6868 if (ndm->ndm_state & NUD_PERMANENT) {
6869 pr_info("%s: FDB only supports static addresses\n",
6870 ixgbe_driver_name);
6871 return -EINVAL;
6872 }
6873
6874 if (is_unicast_ether_addr(addr)) {
6875 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
6876
6877 if (netdev_uc_count(dev) < rar_uc_entries)
6878 err = dev_uc_add_excl(dev, addr);
6879 else
6880 err = -ENOMEM;
6881 } else if (is_multicast_ether_addr(addr)) {
6882 err = dev_mc_add_excl(dev, addr);
6883 } else {
6884 err = -EINVAL;
6885 }
6886
6887
6888 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6889 err = 0;
6890
6891 return err;
6892}
6893
6894static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
6895 struct net_device *dev,
6896 unsigned char *addr)
6897{
6898 struct ixgbe_adapter *adapter = netdev_priv(dev);
6899 int err = -EOPNOTSUPP;
6900
6901 if (ndm->ndm_state & NUD_PERMANENT) {
6902 pr_info("%s: FDB only supports static addresses\n",
6903 ixgbe_driver_name);
6904 return -EINVAL;
6905 }
6906
6907 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6908 if (is_unicast_ether_addr(addr))
6909 err = dev_uc_del(dev, addr);
6910 else if (is_multicast_ether_addr(addr))
6911 err = dev_mc_del(dev, addr);
6912 else
6913 err = -EINVAL;
6914 }
6915
6916 return err;
6917}
6918
6919static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
6920 struct netlink_callback *cb,
6921 struct net_device *dev,
6922 int idx)
6923{
6924 struct ixgbe_adapter *adapter = netdev_priv(dev);
6925
6926 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6927 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6928
6929 return idx;
6930}
6931
6932static const struct net_device_ops ixgbe_netdev_ops = {
6933 .ndo_open = ixgbe_open,
6934 .ndo_stop = ixgbe_close,
6935 .ndo_start_xmit = ixgbe_xmit_frame,
6936 .ndo_select_queue = ixgbe_select_queue,
6937 .ndo_set_rx_mode = ixgbe_set_rx_mode,
6938 .ndo_validate_addr = eth_validate_addr,
6939 .ndo_set_mac_address = ixgbe_set_mac,
6940 .ndo_change_mtu = ixgbe_change_mtu,
6941 .ndo_tx_timeout = ixgbe_tx_timeout,
6942 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6943 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6944 .ndo_do_ioctl = ixgbe_ioctl,
6945 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6946 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6947 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6948 .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
6949 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
6950 .ndo_get_stats64 = ixgbe_get_stats64,
6951#ifdef CONFIG_IXGBE_DCB
6952 .ndo_setup_tc = ixgbe_setup_tc,
6953#endif
6954#ifdef CONFIG_NET_POLL_CONTROLLER
6955 .ndo_poll_controller = ixgbe_netpoll,
6956#endif
6957#ifdef IXGBE_FCOE
6958 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
6959 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
6960 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
6961 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6962 .ndo_fcoe_disable = ixgbe_fcoe_disable,
6963 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
6964 .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
6965#endif
6966 .ndo_set_features = ixgbe_set_features,
6967 .ndo_fix_features = ixgbe_fix_features,
6968 .ndo_fdb_add = ixgbe_ndo_fdb_add,
6969 .ndo_fdb_del = ixgbe_ndo_fdb_del,
6970 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
6971};
6972
6973
6974
6975
6976
6977
6978
6979
6980
6981
6982
6983int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
6984 u16 subdevice_id)
6985{
6986 struct ixgbe_hw *hw = &adapter->hw;
6987 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
6988 int is_wol_supported = 0;
6989
6990 switch (device_id) {
6991 case IXGBE_DEV_ID_82599_SFP:
6992
6993 switch (subdevice_id) {
6994 case IXGBE_SUBDEV_ID_82599_560FLR:
6995
6996 if (hw->bus.func != 0)
6997 break;
6998 case IXGBE_SUBDEV_ID_82599_SFP:
6999 case IXGBE_SUBDEV_ID_82599_RNDC:
7000 is_wol_supported = 1;
7001 break;
7002 }
7003 break;
7004 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7005
7006 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7007 is_wol_supported = 1;
7008 break;
7009 case IXGBE_DEV_ID_82599_KX4:
7010 is_wol_supported = 1;
7011 break;
7012 case IXGBE_DEV_ID_X540T:
7013
7014 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
7015 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
7016 (hw->bus.func == 0))) {
7017 is_wol_supported = 1;
7018 }
7019 break;
7020 }
7021
7022 return is_wol_supported;
7023}
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036static int __devinit ixgbe_probe(struct pci_dev *pdev,
7037 const struct pci_device_id *ent)
7038{
7039 struct net_device *netdev;
7040 struct ixgbe_adapter *adapter = NULL;
7041 struct ixgbe_hw *hw;
7042 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
7043 static int cards_found;
7044 int i, err, pci_using_dac;
7045 u8 part_str[IXGBE_PBANUM_LENGTH];
7046 unsigned int indices = num_possible_cpus();
7047 unsigned int dcb_max = 0;
7048#ifdef IXGBE_FCOE
7049 u16 device_caps;
7050#endif
7051 u32 eec;
7052
7053
7054
7055
7056 if (pdev->is_virtfn) {
7057 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
7058 pci_name(pdev), pdev->vendor, pdev->device);
7059 return -EINVAL;
7060 }
7061
7062 err = pci_enable_device_mem(pdev);
7063 if (err)
7064 return err;
7065
7066 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
7067 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7068 pci_using_dac = 1;
7069 } else {
7070 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
7071 if (err) {
7072 err = dma_set_coherent_mask(&pdev->dev,
7073 DMA_BIT_MASK(32));
7074 if (err) {
7075 dev_err(&pdev->dev,
7076 "No usable DMA configuration, aborting\n");
7077 goto err_dma;
7078 }
7079 }
7080 pci_using_dac = 0;
7081 }
7082
7083 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7084 IORESOURCE_MEM), ixgbe_driver_name);
7085 if (err) {
7086 dev_err(&pdev->dev,
7087 "pci_request_selected_regions failed 0x%x\n", err);
7088 goto err_pci_reg;
7089 }
7090
7091 pci_enable_pcie_error_reporting(pdev);
7092
7093 pci_set_master(pdev);
7094 pci_save_state(pdev);
7095
7096#ifdef CONFIG_IXGBE_DCB
7097 if (ii->mac == ixgbe_mac_82598EB)
7098 dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
7099 IXGBE_MAX_RSS_INDICES);
7100 else
7101 dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
7102 IXGBE_MAX_FDIR_INDICES);
7103#endif
7104
7105 if (ii->mac == ixgbe_mac_82598EB)
7106 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
7107 else
7108 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7109
7110#ifdef IXGBE_FCOE
7111 indices += min_t(unsigned int, num_possible_cpus(),
7112 IXGBE_MAX_FCOE_INDICES);
7113#endif
7114 indices = max_t(unsigned int, dcb_max, indices);
7115 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
7116 if (!netdev) {
7117 err = -ENOMEM;
7118 goto err_alloc_etherdev;
7119 }
7120
7121 SET_NETDEV_DEV(netdev, &pdev->dev);
7122
7123 adapter = netdev_priv(netdev);
7124 pci_set_drvdata(pdev, adapter);
7125
7126 adapter->netdev = netdev;
7127 adapter->pdev = pdev;
7128 hw = &adapter->hw;
7129 hw->back = adapter;
7130 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
7131
7132 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7133 pci_resource_len(pdev, 0));
7134 if (!hw->hw_addr) {
7135 err = -EIO;
7136 goto err_ioremap;
7137 }
7138
7139 for (i = 1; i <= 5; i++) {
7140 if (pci_resource_len(pdev, i) == 0)
7141 continue;
7142 }
7143
7144 netdev->netdev_ops = &ixgbe_netdev_ops;
7145 ixgbe_set_ethtool_ops(netdev);
7146 netdev->watchdog_timeo = 5 * HZ;
7147 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
7148
7149 adapter->bd_number = cards_found;
7150
7151
7152 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
7153 hw->mac.type = ii->mac;
7154
7155
7156 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7157 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7158
7159 if (!(eec & (1 << 8)))
7160 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7161
7162
7163 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
7164 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
7165
7166 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7167 hw->phy.mdio.mmds = 0;
7168 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7169 hw->phy.mdio.dev = netdev;
7170 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7171 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
7172
7173 ii->get_invariants(hw);
7174
7175
7176 err = ixgbe_sw_init(adapter);
7177 if (err)
7178 goto err_sw_init;
7179
7180
7181 switch (adapter->hw.mac.type) {
7182 case ixgbe_mac_82599EB:
7183 case ixgbe_mac_X540:
7184 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7185 break;
7186 default:
7187 break;
7188 }
7189
7190
7191
7192
7193
7194 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7195 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7196 if (esdp & IXGBE_ESDP_SDP1)
7197 e_crit(probe, "Fan has stopped, replace the adapter\n");
7198 }
7199
7200 if (allow_unsupported_sfp)
7201 hw->allow_unsupported_sfp = allow_unsupported_sfp;
7202
7203
7204 hw->phy.reset_if_overtemp = true;
7205 err = hw->mac.ops.reset_hw(hw);
7206 hw->phy.reset_if_overtemp = false;
7207 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7208 hw->mac.type == ixgbe_mac_82598EB) {
7209 err = 0;
7210 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7211 e_dev_err("failed to load because an unsupported SFP+ "
7212 "module type was detected.\n");
7213 e_dev_err("Reload the driver after installing a supported "
7214 "module.\n");
7215 goto err_sw_init;
7216 } else if (err) {
7217 e_dev_err("HW Init failed: %d\n", err);
7218 goto err_sw_init;
7219 }
7220
7221#ifdef CONFIG_PCI_IOV
7222 ixgbe_enable_sriov(adapter, ii);
7223
7224#endif
7225 netdev->features = NETIF_F_SG |
7226 NETIF_F_IP_CSUM |
7227 NETIF_F_IPV6_CSUM |
7228 NETIF_F_HW_VLAN_TX |
7229 NETIF_F_HW_VLAN_RX |
7230 NETIF_F_HW_VLAN_FILTER |
7231 NETIF_F_TSO |
7232 NETIF_F_TSO6 |
7233 NETIF_F_RXHASH |
7234 NETIF_F_RXCSUM;
7235
7236 netdev->hw_features = netdev->features;
7237
7238 switch (adapter->hw.mac.type) {
7239 case ixgbe_mac_82599EB:
7240 case ixgbe_mac_X540:
7241 netdev->features |= NETIF_F_SCTP_CSUM;
7242 netdev->hw_features |= NETIF_F_SCTP_CSUM |
7243 NETIF_F_NTUPLE;
7244 break;
7245 default:
7246 break;
7247 }
7248
7249 netdev->hw_features |= NETIF_F_RXALL;
7250
7251 netdev->vlan_features |= NETIF_F_TSO;
7252 netdev->vlan_features |= NETIF_F_TSO6;
7253 netdev->vlan_features |= NETIF_F_IP_CSUM;
7254 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
7255 netdev->vlan_features |= NETIF_F_SG;
7256
7257 netdev->priv_flags |= IFF_UNICAST_FLT;
7258 netdev->priv_flags |= IFF_SUPP_NOFCS;
7259
7260#ifdef CONFIG_IXGBE_DCB
7261 netdev->dcbnl_ops = &dcbnl_ops;
7262#endif
7263
7264#ifdef IXGBE_FCOE
7265 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
7266 if (hw->mac.ops.get_device_caps) {
7267 hw->mac.ops.get_device_caps(hw, &device_caps);
7268 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7269 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
7270 }
7271
7272 adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
7273
7274 netdev->features |= NETIF_F_FSO |
7275 NETIF_F_FCOE_CRC;
7276
7277 netdev->vlan_features |= NETIF_F_FSO |
7278 NETIF_F_FCOE_CRC |
7279 NETIF_F_FCOE_MTU;
7280 }
7281#endif
7282 if (pci_using_dac) {
7283 netdev->features |= NETIF_F_HIGHDMA;
7284 netdev->vlan_features |= NETIF_F_HIGHDMA;
7285 }
7286
7287 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
7288 netdev->hw_features |= NETIF_F_LRO;
7289 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7290 netdev->features |= NETIF_F_LRO;
7291
7292
7293 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
7294 e_dev_err("The EEPROM Checksum Is Not Valid\n");
7295 err = -EIO;
7296 goto err_sw_init;
7297 }
7298
7299 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7300 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
7301
7302 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
7303 e_dev_err("invalid MAC address\n");
7304 err = -EIO;
7305 goto err_sw_init;
7306 }
7307
7308 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
7309 (unsigned long) adapter);
7310
7311 INIT_WORK(&adapter->service_task, ixgbe_service_task);
7312 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
7313
7314 err = ixgbe_init_interrupt_scheme(adapter);
7315 if (err)
7316 goto err_sw_init;
7317
7318
7319 adapter->wol = 0;
7320 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7321 if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
7322 adapter->wol = IXGBE_WUFC_MAG;
7323
7324 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7325
7326#ifdef CONFIG_IXGBE_PTP
7327 ixgbe_ptp_init(adapter);
7328#endif
7329
7330
7331 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
7332 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
7333
7334
7335 hw->mac.ops.get_bus_info(hw);
7336
7337
7338 e_dev_info("(PCI Express:%s:%s) %pM\n",
7339 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7340 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
7341 "Unknown"),
7342 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7343 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7344 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7345 "Unknown"),
7346 netdev->dev_addr);
7347
7348 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7349 if (err)
7350 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
7351 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
7352 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
7353 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7354 part_str);
7355 else
7356 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7357 hw->mac.type, hw->phy.type, part_str);
7358
7359 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
7360 e_dev_warn("PCI-Express bandwidth available for this card is "
7361 "not sufficient for optimal performance.\n");
7362 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7363 "is required.\n");
7364 }
7365
7366
7367 err = hw->mac.ops.start_hw(hw);
7368 if (err == IXGBE_ERR_EEPROM_VERSION) {
7369
7370 e_dev_warn("This device is a pre-production adapter/LOM. "
7371 "Please be aware there may be issues associated "
7372 "with your hardware. If you are experiencing "
7373 "problems please contact your Intel or hardware "
7374 "representative who provided you with this "
7375 "hardware.\n");
7376 }
7377 strcpy(netdev->name, "eth%d");
7378 err = register_netdev(netdev);
7379 if (err)
7380 goto err_register;
7381
7382
7383 if (hw->mac.ops.disable_tx_laser &&
7384 ((hw->phy.multispeed_fiber) ||
7385 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7386 (hw->mac.type == ixgbe_mac_82599EB))))
7387 hw->mac.ops.disable_tx_laser(hw);
7388
7389
7390 netif_carrier_off(netdev);
7391
7392#ifdef CONFIG_IXGBE_DCA
7393 if (dca_add_requester(&pdev->dev) == 0) {
7394 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
7395 ixgbe_setup_dca(adapter);
7396 }
7397#endif
7398 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
7399 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
7400 for (i = 0; i < adapter->num_vfs; i++)
7401 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7402 }
7403
7404
7405
7406
7407 if (hw->mac.ops.set_fw_drv_ver)
7408 hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
7409 0xFF);
7410
7411
7412 ixgbe_add_sanmac_netdev(netdev);
7413
7414 e_dev_info("%s\n", ixgbe_default_device_descr);
7415 cards_found++;
7416
7417#ifdef CONFIG_IXGBE_HWMON
7418 if (ixgbe_sysfs_init(adapter))
7419 e_err(probe, "failed to allocate sysfs resources\n");
7420#endif
7421
7422 return 0;
7423
7424err_register:
7425 ixgbe_release_hw_control(adapter);
7426 ixgbe_clear_interrupt_scheme(adapter);
7427err_sw_init:
7428 ixgbe_disable_sriov(adapter);
7429 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
7430 iounmap(hw->hw_addr);
7431err_ioremap:
7432 free_netdev(netdev);
7433err_alloc_etherdev:
7434 pci_release_selected_regions(pdev,
7435 pci_select_bars(pdev, IORESOURCE_MEM));
7436err_pci_reg:
7437err_dma:
7438 pci_disable_device(pdev);
7439 return err;
7440}
7441
7442
7443
7444
7445
7446
7447
7448
7449
7450
7451static void __devexit ixgbe_remove(struct pci_dev *pdev)
7452{
7453 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7454 struct net_device *netdev = adapter->netdev;
7455
7456 set_bit(__IXGBE_DOWN, &adapter->state);
7457 cancel_work_sync(&adapter->service_task);
7458
7459#ifdef CONFIG_IXGBE_PTP
7460 ixgbe_ptp_stop(adapter);
7461#endif
7462
7463#ifdef CONFIG_IXGBE_DCA
7464 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7465 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7466 dca_remove_requester(&pdev->dev);
7467 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7468 }
7469
7470#endif
7471#ifdef CONFIG_IXGBE_HWMON
7472 ixgbe_sysfs_exit(adapter);
7473#endif
7474
7475
7476 ixgbe_del_sanmac_netdev(netdev);
7477
7478 if (netdev->reg_state == NETREG_REGISTERED)
7479 unregister_netdev(netdev);
7480
7481 ixgbe_disable_sriov(adapter);
7482
7483 ixgbe_clear_interrupt_scheme(adapter);
7484
7485 ixgbe_release_hw_control(adapter);
7486
7487#ifdef CONFIG_DCB
7488 kfree(adapter->ixgbe_ieee_pfc);
7489 kfree(adapter->ixgbe_ieee_ets);
7490
7491#endif
7492 iounmap(adapter->hw.hw_addr);
7493 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7494 IORESOURCE_MEM));
7495
7496 e_dev_info("complete\n");
7497
7498 free_netdev(netdev);
7499
7500 pci_disable_pcie_error_reporting(pdev);
7501
7502 pci_disable_device(pdev);
7503}
7504
7505
7506
7507
7508
7509
7510
7511
7512
7513static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7514 pci_channel_state_t state)
7515{
7516 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7517 struct net_device *netdev = adapter->netdev;
7518
7519#ifdef CONFIG_PCI_IOV
7520 struct pci_dev *bdev, *vfdev;
7521 u32 dw0, dw1, dw2, dw3;
7522 int vf, pos;
7523 u16 req_id, pf_func;
7524
7525 if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
7526 adapter->num_vfs == 0)
7527 goto skip_bad_vf_detection;
7528
7529 bdev = pdev->bus->self;
7530 while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
7531 bdev = bdev->bus->self;
7532
7533 if (!bdev)
7534 goto skip_bad_vf_detection;
7535
7536 pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
7537 if (!pos)
7538 goto skip_bad_vf_detection;
7539
7540 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
7541 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
7542 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
7543 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
7544
7545 req_id = dw1 >> 16;
7546
7547 if (!(req_id & 0x0080))
7548 goto skip_bad_vf_detection;
7549
7550 pf_func = req_id & 0x01;
7551 if ((pf_func & 1) == (pdev->devfn & 1)) {
7552 unsigned int device_id;
7553
7554 vf = (req_id & 0x7F) >> 1;
7555 e_dev_err("VF %d has caused a PCIe error\n", vf);
7556 e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
7557 "%8.8x\tdw3: %8.8x\n",
7558 dw0, dw1, dw2, dw3);
7559 switch (adapter->hw.mac.type) {
7560 case ixgbe_mac_82599EB:
7561 device_id = IXGBE_82599_VF_DEVICE_ID;
7562 break;
7563 case ixgbe_mac_X540:
7564 device_id = IXGBE_X540_VF_DEVICE_ID;
7565 break;
7566 default:
7567 device_id = 0;
7568 break;
7569 }
7570
7571
7572 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
7573 while (vfdev) {
7574 if (vfdev->devfn == (req_id & 0xFF))
7575 break;
7576 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
7577 device_id, vfdev);
7578 }
7579
7580
7581
7582
7583
7584 if (vfdev) {
7585 e_dev_err("Issuing VFLR to VF %d\n", vf);
7586 pci_write_config_dword(vfdev, 0xA8, 0x00008000);
7587 }
7588
7589 pci_cleanup_aer_uncorrect_error_status(pdev);
7590 }
7591
7592
7593
7594
7595
7596
7597
7598 adapter->vferr_refcount++;
7599
7600 return PCI_ERS_RESULT_RECOVERED;
7601
7602skip_bad_vf_detection:
7603#endif
7604 netif_device_detach(netdev);
7605
7606 if (state == pci_channel_io_perm_failure)
7607 return PCI_ERS_RESULT_DISCONNECT;
7608
7609 if (netif_running(netdev))
7610 ixgbe_down(adapter);
7611 pci_disable_device(pdev);
7612
7613
7614 return PCI_ERS_RESULT_NEED_RESET;
7615}
7616
7617
7618
7619
7620
7621
7622
7623static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7624{
7625 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7626 pci_ers_result_t result;
7627 int err;
7628
7629 if (pci_enable_device_mem(pdev)) {
7630 e_err(probe, "Cannot re-enable PCI device after reset.\n");
7631 result = PCI_ERS_RESULT_DISCONNECT;
7632 } else {
7633 pci_set_master(pdev);
7634 pci_restore_state(pdev);
7635 pci_save_state(pdev);
7636
7637 pci_wake_from_d3(pdev, false);
7638
7639 ixgbe_reset(adapter);
7640 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7641 result = PCI_ERS_RESULT_RECOVERED;
7642 }
7643
7644 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7645 if (err) {
7646 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7647 "failed 0x%0x\n", err);
7648
7649 }
7650
7651 return result;
7652}
7653
7654
7655
7656
7657
7658
7659
7660
7661static void ixgbe_io_resume(struct pci_dev *pdev)
7662{
7663 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7664 struct net_device *netdev = adapter->netdev;
7665
7666#ifdef CONFIG_PCI_IOV
7667 if (adapter->vferr_refcount) {
7668 e_info(drv, "Resuming after VF err\n");
7669 adapter->vferr_refcount--;
7670 return;
7671 }
7672
7673#endif
7674 if (netif_running(netdev))
7675 ixgbe_up(adapter);
7676
7677 netif_device_attach(netdev);
7678}
7679
7680static struct pci_error_handlers ixgbe_err_handler = {
7681 .error_detected = ixgbe_io_error_detected,
7682 .slot_reset = ixgbe_io_slot_reset,
7683 .resume = ixgbe_io_resume,
7684};
7685
7686static struct pci_driver ixgbe_driver = {
7687 .name = ixgbe_driver_name,
7688 .id_table = ixgbe_pci_tbl,
7689 .probe = ixgbe_probe,
7690 .remove = __devexit_p(ixgbe_remove),
7691#ifdef CONFIG_PM
7692 .suspend = ixgbe_suspend,
7693 .resume = ixgbe_resume,
7694#endif
7695 .shutdown = ixgbe_shutdown,
7696 .err_handler = &ixgbe_err_handler
7697};
7698
7699
7700
7701
7702
7703
7704
7705static int __init ixgbe_init_module(void)
7706{
7707 int ret;
7708 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7709 pr_info("%s\n", ixgbe_copyright);
7710
7711#ifdef CONFIG_IXGBE_DCA
7712 dca_register_notify(&dca_notifier);
7713#endif
7714
7715 ret = pci_register_driver(&ixgbe_driver);
7716 return ret;
7717}
7718
7719module_init(ixgbe_init_module);
7720
7721
7722
7723
7724
7725
7726
7727static void __exit ixgbe_exit_module(void)
7728{
7729#ifdef CONFIG_IXGBE_DCA
7730 dca_unregister_notify(&dca_notifier);
7731#endif
7732 pci_unregister_driver(&ixgbe_driver);
7733 rcu_barrier();
7734}
7735
7736#ifdef CONFIG_IXGBE_DCA
7737static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7738 void *p)
7739{
7740 int ret_val;
7741
7742 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
7743 __ixgbe_notify_dca);
7744
7745 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7746}
7747
7748#endif
7749
7750module_exit(ixgbe_exit_module);
7751
7752
7753