1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/init.h>
29#include <linux/bitops.h>
30#include <linux/vmalloc.h>
31#include <linux/pagemap.h>
32#include <linux/netdevice.h>
33#include <linux/ipv6.h>
34#include <linux/slab.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
37#include <net/pkt_sched.h>
38#include <linux/net_tstamp.h>
39#include <linux/mii.h>
40#include <linux/ethtool.h>
41#include <linux/if.h>
42#include <linux/if_vlan.h>
43#include <linux/pci.h>
44#include <linux/pci-aspm.h>
45#include <linux/delay.h>
46#include <linux/interrupt.h>
47#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/sctp.h>
50#include <linux/if_ether.h>
51#include <linux/aer.h>
52#include <linux/prefetch.h>
53#include <linux/pm_runtime.h>
54#include <linux/etherdevice.h>
55#ifdef CONFIG_IGB_DCA
56#include <linux/dca.h>
57#endif
58#include <linux/i2c.h>
59#include "igb.h"
60
61#define MAJ 5
62#define MIN 4
63#define BUILD 0
64#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
65__stringify(BUILD) "-k"
66
67enum queue_mode {
68 QUEUE_MODE_STRICT_PRIORITY,
69 QUEUE_MODE_STREAM_RESERVATION,
70};
71
72enum tx_queue_prio {
73 TX_QUEUE_PRIO_HIGH,
74 TX_QUEUE_PRIO_LOW,
75};
76
77char igb_driver_name[] = "igb";
78char igb_driver_version[] = DRV_VERSION;
79static const char igb_driver_string[] =
80 "Intel(R) Gigabit Ethernet Network Driver";
81static const char igb_copyright[] =
82 "Copyright (c) 2007-2014 Intel Corporation.";
83
84static const struct e1000_info *igb_info_tbl[] = {
85 [board_82575] = &e1000_82575_info,
86};
87
88static const struct pci_device_id igb_pci_tbl[] = {
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
112 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
113 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
114 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
115 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
116 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
117 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
118 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
119 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
120 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
121 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
122 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
123 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
124
125 {0, }
126};
127
128MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
129
130static int igb_setup_all_tx_resources(struct igb_adapter *);
131static int igb_setup_all_rx_resources(struct igb_adapter *);
132static void igb_free_all_tx_resources(struct igb_adapter *);
133static void igb_free_all_rx_resources(struct igb_adapter *);
134static void igb_setup_mrqc(struct igb_adapter *);
135static int igb_probe(struct pci_dev *, const struct pci_device_id *);
136static void igb_remove(struct pci_dev *pdev);
137static int igb_sw_init(struct igb_adapter *);
138int igb_open(struct net_device *);
139int igb_close(struct net_device *);
140static void igb_configure(struct igb_adapter *);
141static void igb_configure_tx(struct igb_adapter *);
142static void igb_configure_rx(struct igb_adapter *);
143static void igb_clean_all_tx_rings(struct igb_adapter *);
144static void igb_clean_all_rx_rings(struct igb_adapter *);
145static void igb_clean_tx_ring(struct igb_ring *);
146static void igb_clean_rx_ring(struct igb_ring *);
147static void igb_set_rx_mode(struct net_device *);
148static void igb_update_phy_info(struct timer_list *);
149static void igb_watchdog(struct timer_list *);
150static void igb_watchdog_task(struct work_struct *);
151static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
152static void igb_get_stats64(struct net_device *dev,
153 struct rtnl_link_stats64 *stats);
154static int igb_change_mtu(struct net_device *, int);
155static int igb_set_mac(struct net_device *, void *);
156static void igb_set_uta(struct igb_adapter *adapter, bool set);
157static irqreturn_t igb_intr(int irq, void *);
158static irqreturn_t igb_intr_msi(int irq, void *);
159static irqreturn_t igb_msix_other(int irq, void *);
160static irqreturn_t igb_msix_ring(int irq, void *);
161#ifdef CONFIG_IGB_DCA
162static void igb_update_dca(struct igb_q_vector *);
163static void igb_setup_dca(struct igb_adapter *);
164#endif
165static int igb_poll(struct napi_struct *, int);
166static bool igb_clean_tx_irq(struct igb_q_vector *, int);
167static int igb_clean_rx_irq(struct igb_q_vector *, int);
168static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
169static void igb_tx_timeout(struct net_device *);
170static void igb_reset_task(struct work_struct *);
171static void igb_vlan_mode(struct net_device *netdev,
172 netdev_features_t features);
173static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
174static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
175static void igb_restore_vlan(struct igb_adapter *);
176static void igb_rar_set_index(struct igb_adapter *, u32);
177static void igb_ping_all_vfs(struct igb_adapter *);
178static void igb_msg_task(struct igb_adapter *);
179static void igb_vmm_control(struct igb_adapter *);
180static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
181static void igb_flush_mac_table(struct igb_adapter *);
182static int igb_available_rars(struct igb_adapter *, u8);
183static void igb_set_default_mac_filter(struct igb_adapter *);
184static int igb_uc_sync(struct net_device *, const unsigned char *);
185static int igb_uc_unsync(struct net_device *, const unsigned char *);
186static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
187static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
188static int igb_ndo_set_vf_vlan(struct net_device *netdev,
189 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
190static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
191static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
192 bool setting);
193static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
194 struct ifla_vf_info *ivi);
195static void igb_check_vf_rate_limit(struct igb_adapter *);
196static void igb_nfc_filter_exit(struct igb_adapter *adapter);
197static void igb_nfc_filter_restore(struct igb_adapter *adapter);
198
199#ifdef CONFIG_PCI_IOV
200static int igb_vf_configure(struct igb_adapter *adapter, int vf);
201static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
202static int igb_disable_sriov(struct pci_dev *dev);
203static int igb_pci_disable_sriov(struct pci_dev *dev);
204#endif
205
206static int igb_suspend(struct device *);
207static int igb_resume(struct device *);
208static int igb_runtime_suspend(struct device *dev);
209static int igb_runtime_resume(struct device *dev);
210static int igb_runtime_idle(struct device *dev);
211static const struct dev_pm_ops igb_pm_ops = {
212 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
213 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
214 igb_runtime_idle)
215};
216static void igb_shutdown(struct pci_dev *);
217static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
218#ifdef CONFIG_IGB_DCA
219static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
220static struct notifier_block dca_notifier = {
221 .notifier_call = igb_notify_dca,
222 .next = NULL,
223 .priority = 0
224};
225#endif
226#ifdef CONFIG_NET_POLL_CONTROLLER
227
228static void igb_netpoll(struct net_device *);
229#endif
230#ifdef CONFIG_PCI_IOV
231static unsigned int max_vfs;
232module_param(max_vfs, uint, 0);
233MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
234#endif
235
236static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
237 pci_channel_state_t);
238static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
239static void igb_io_resume(struct pci_dev *);
240
241static const struct pci_error_handlers igb_err_handler = {
242 .error_detected = igb_io_error_detected,
243 .slot_reset = igb_io_slot_reset,
244 .resume = igb_io_resume,
245};
246
247static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
248
249static struct pci_driver igb_driver = {
250 .name = igb_driver_name,
251 .id_table = igb_pci_tbl,
252 .probe = igb_probe,
253 .remove = igb_remove,
254#ifdef CONFIG_PM
255 .driver.pm = &igb_pm_ops,
256#endif
257 .shutdown = igb_shutdown,
258 .sriov_configure = igb_pci_sriov_configure,
259 .err_handler = &igb_err_handler
260};
261
262MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
263MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
264MODULE_LICENSE("GPL");
265MODULE_VERSION(DRV_VERSION);
266
267#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
268static int debug = -1;
269module_param(debug, int, 0);
270MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
271
272struct igb_reg_info {
273 u32 ofs;
274 char *name;
275};
276
277static const struct igb_reg_info igb_reg_info_tbl[] = {
278
279
280 {E1000_CTRL, "CTRL"},
281 {E1000_STATUS, "STATUS"},
282 {E1000_CTRL_EXT, "CTRL_EXT"},
283
284
285 {E1000_ICR, "ICR"},
286
287
288 {E1000_RCTL, "RCTL"},
289 {E1000_RDLEN(0), "RDLEN"},
290 {E1000_RDH(0), "RDH"},
291 {E1000_RDT(0), "RDT"},
292 {E1000_RXDCTL(0), "RXDCTL"},
293 {E1000_RDBAL(0), "RDBAL"},
294 {E1000_RDBAH(0), "RDBAH"},
295
296
297 {E1000_TCTL, "TCTL"},
298 {E1000_TDBAL(0), "TDBAL"},
299 {E1000_TDBAH(0), "TDBAH"},
300 {E1000_TDLEN(0), "TDLEN"},
301 {E1000_TDH(0), "TDH"},
302 {E1000_TDT(0), "TDT"},
303 {E1000_TXDCTL(0), "TXDCTL"},
304 {E1000_TDFH, "TDFH"},
305 {E1000_TDFT, "TDFT"},
306 {E1000_TDFHS, "TDFHS"},
307 {E1000_TDFPC, "TDFPC"},
308
309
310 {}
311};
312
313
314static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
315{
316 int n = 0;
317 char rname[16];
318 u32 regs[8];
319
320 switch (reginfo->ofs) {
321 case E1000_RDLEN(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_RDLEN(n));
324 break;
325 case E1000_RDH(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_RDH(n));
328 break;
329 case E1000_RDT(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_RDT(n));
332 break;
333 case E1000_RXDCTL(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_RXDCTL(n));
336 break;
337 case E1000_RDBAL(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_RDBAL(n));
340 break;
341 case E1000_RDBAH(0):
342 for (n = 0; n < 4; n++)
343 regs[n] = rd32(E1000_RDBAH(n));
344 break;
345 case E1000_TDBAL(0):
346 for (n = 0; n < 4; n++)
347 regs[n] = rd32(E1000_RDBAL(n));
348 break;
349 case E1000_TDBAH(0):
350 for (n = 0; n < 4; n++)
351 regs[n] = rd32(E1000_TDBAH(n));
352 break;
353 case E1000_TDLEN(0):
354 for (n = 0; n < 4; n++)
355 regs[n] = rd32(E1000_TDLEN(n));
356 break;
357 case E1000_TDH(0):
358 for (n = 0; n < 4; n++)
359 regs[n] = rd32(E1000_TDH(n));
360 break;
361 case E1000_TDT(0):
362 for (n = 0; n < 4; n++)
363 regs[n] = rd32(E1000_TDT(n));
364 break;
365 case E1000_TXDCTL(0):
366 for (n = 0; n < 4; n++)
367 regs[n] = rd32(E1000_TXDCTL(n));
368 break;
369 default:
370 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
371 return;
372 }
373
374 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
375 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
376 regs[2], regs[3]);
377}
378
379
380static void igb_dump(struct igb_adapter *adapter)
381{
382 struct net_device *netdev = adapter->netdev;
383 struct e1000_hw *hw = &adapter->hw;
384 struct igb_reg_info *reginfo;
385 struct igb_ring *tx_ring;
386 union e1000_adv_tx_desc *tx_desc;
387 struct my_u0 { u64 a; u64 b; } *u0;
388 struct igb_ring *rx_ring;
389 union e1000_adv_rx_desc *rx_desc;
390 u32 staterr;
391 u16 i, n;
392
393 if (!netif_msg_hw(adapter))
394 return;
395
396
397 if (netdev) {
398 dev_info(&adapter->pdev->dev, "Net device Info\n");
399 pr_info("Device Name state trans_start\n");
400 pr_info("%-15s %016lX %016lX\n", netdev->name,
401 netdev->state, dev_trans_start(netdev));
402 }
403
404
405 dev_info(&adapter->pdev->dev, "Register Dump\n");
406 pr_info(" Register Name Value\n");
407 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
408 reginfo->name; reginfo++) {
409 igb_regdump(hw, reginfo);
410 }
411
412
413 if (!netdev || !netif_running(netdev))
414 goto exit;
415
416 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
417 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
418 for (n = 0; n < adapter->num_tx_queues; n++) {
419 struct igb_tx_buffer *buffer_info;
420 tx_ring = adapter->tx_ring[n];
421 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
422 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
423 n, tx_ring->next_to_use, tx_ring->next_to_clean,
424 (u64)dma_unmap_addr(buffer_info, dma),
425 dma_unmap_len(buffer_info, len),
426 buffer_info->next_to_watch,
427 (u64)buffer_info->time_stamp);
428 }
429
430
431 if (!netif_msg_tx_done(adapter))
432 goto rx_ring_summary;
433
434 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
435
436
437
438
439
440
441
442
443
444
445
446
447 for (n = 0; n < adapter->num_tx_queues; n++) {
448 tx_ring = adapter->tx_ring[n];
449 pr_info("------------------------------------\n");
450 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
451 pr_info("------------------------------------\n");
452 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
453
454 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
455 const char *next_desc;
456 struct igb_tx_buffer *buffer_info;
457 tx_desc = IGB_TX_DESC(tx_ring, i);
458 buffer_info = &tx_ring->tx_buffer_info[i];
459 u0 = (struct my_u0 *)tx_desc;
460 if (i == tx_ring->next_to_use &&
461 i == tx_ring->next_to_clean)
462 next_desc = " NTC/U";
463 else if (i == tx_ring->next_to_use)
464 next_desc = " NTU";
465 else if (i == tx_ring->next_to_clean)
466 next_desc = " NTC";
467 else
468 next_desc = "";
469
470 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
471 i, le64_to_cpu(u0->a),
472 le64_to_cpu(u0->b),
473 (u64)dma_unmap_addr(buffer_info, dma),
474 dma_unmap_len(buffer_info, len),
475 buffer_info->next_to_watch,
476 (u64)buffer_info->time_stamp,
477 buffer_info->skb, next_desc);
478
479 if (netif_msg_pktdata(adapter) && buffer_info->skb)
480 print_hex_dump(KERN_INFO, "",
481 DUMP_PREFIX_ADDRESS,
482 16, 1, buffer_info->skb->data,
483 dma_unmap_len(buffer_info, len),
484 true);
485 }
486 }
487
488
489rx_ring_summary:
490 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
491 pr_info("Queue [NTU] [NTC]\n");
492 for (n = 0; n < adapter->num_rx_queues; n++) {
493 rx_ring = adapter->rx_ring[n];
494 pr_info(" %5d %5X %5X\n",
495 n, rx_ring->next_to_use, rx_ring->next_to_clean);
496 }
497
498
499 if (!netif_msg_rx_status(adapter))
500 goto exit;
501
502 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525 for (n = 0; n < adapter->num_rx_queues; n++) {
526 rx_ring = adapter->rx_ring[n];
527 pr_info("------------------------------------\n");
528 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
529 pr_info("------------------------------------\n");
530 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
531 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
532
533 for (i = 0; i < rx_ring->count; i++) {
534 const char *next_desc;
535 struct igb_rx_buffer *buffer_info;
536 buffer_info = &rx_ring->rx_buffer_info[i];
537 rx_desc = IGB_RX_DESC(rx_ring, i);
538 u0 = (struct my_u0 *)rx_desc;
539 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
540
541 if (i == rx_ring->next_to_use)
542 next_desc = " NTU";
543 else if (i == rx_ring->next_to_clean)
544 next_desc = " NTC";
545 else
546 next_desc = "";
547
548 if (staterr & E1000_RXD_STAT_DD) {
549
550 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
551 "RWB", i,
552 le64_to_cpu(u0->a),
553 le64_to_cpu(u0->b),
554 next_desc);
555 } else {
556 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
557 "R ", i,
558 le64_to_cpu(u0->a),
559 le64_to_cpu(u0->b),
560 (u64)buffer_info->dma,
561 next_desc);
562
563 if (netif_msg_pktdata(adapter) &&
564 buffer_info->dma && buffer_info->page) {
565 print_hex_dump(KERN_INFO, "",
566 DUMP_PREFIX_ADDRESS,
567 16, 1,
568 page_address(buffer_info->page) +
569 buffer_info->page_offset,
570 igb_rx_bufsz(rx_ring), true);
571 }
572 }
573 }
574 }
575
576exit:
577 return;
578}
579
580
581
582
583
584
585
586
587static int igb_get_i2c_data(void *data)
588{
589 struct igb_adapter *adapter = (struct igb_adapter *)data;
590 struct e1000_hw *hw = &adapter->hw;
591 s32 i2cctl = rd32(E1000_I2CPARAMS);
592
593 return !!(i2cctl & E1000_I2C_DATA_IN);
594}
595
596
597
598
599
600
601
602
603static void igb_set_i2c_data(void *data, int state)
604{
605 struct igb_adapter *adapter = (struct igb_adapter *)data;
606 struct e1000_hw *hw = &adapter->hw;
607 s32 i2cctl = rd32(E1000_I2CPARAMS);
608
609 if (state)
610 i2cctl |= E1000_I2C_DATA_OUT;
611 else
612 i2cctl &= ~E1000_I2C_DATA_OUT;
613
614 i2cctl &= ~E1000_I2C_DATA_OE_N;
615 i2cctl |= E1000_I2C_CLK_OE_N;
616 wr32(E1000_I2CPARAMS, i2cctl);
617 wrfl();
618
619}
620
621
622
623
624
625
626
627
628static void igb_set_i2c_clk(void *data, int state)
629{
630 struct igb_adapter *adapter = (struct igb_adapter *)data;
631 struct e1000_hw *hw = &adapter->hw;
632 s32 i2cctl = rd32(E1000_I2CPARAMS);
633
634 if (state) {
635 i2cctl |= E1000_I2C_CLK_OUT;
636 i2cctl &= ~E1000_I2C_CLK_OE_N;
637 } else {
638 i2cctl &= ~E1000_I2C_CLK_OUT;
639 i2cctl &= ~E1000_I2C_CLK_OE_N;
640 }
641 wr32(E1000_I2CPARAMS, i2cctl);
642 wrfl();
643}
644
645
646
647
648
649
650
651static int igb_get_i2c_clk(void *data)
652{
653 struct igb_adapter *adapter = (struct igb_adapter *)data;
654 struct e1000_hw *hw = &adapter->hw;
655 s32 i2cctl = rd32(E1000_I2CPARAMS);
656
657 return !!(i2cctl & E1000_I2C_CLK_IN);
658}
659
660static const struct i2c_algo_bit_data igb_i2c_algo = {
661 .setsda = igb_set_i2c_data,
662 .setscl = igb_set_i2c_clk,
663 .getsda = igb_get_i2c_data,
664 .getscl = igb_get_i2c_clk,
665 .udelay = 5,
666 .timeout = 20,
667};
668
669
670
671
672
673
674
675struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
676{
677 struct igb_adapter *adapter = hw->back;
678 return adapter->netdev;
679}
680
681
682
683
684
685
686
687static int __init igb_init_module(void)
688{
689 int ret;
690
691 pr_info("%s - version %s\n",
692 igb_driver_string, igb_driver_version);
693 pr_info("%s\n", igb_copyright);
694
695#ifdef CONFIG_IGB_DCA
696 dca_register_notify(&dca_notifier);
697#endif
698 ret = pci_register_driver(&igb_driver);
699 return ret;
700}
701
702module_init(igb_init_module);
703
704
705
706
707
708
709
710static void __exit igb_exit_module(void)
711{
712#ifdef CONFIG_IGB_DCA
713 dca_unregister_notify(&dca_notifier);
714#endif
715 pci_unregister_driver(&igb_driver);
716}
717
718module_exit(igb_exit_module);
719
720#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
721
722
723
724
725
726
727
728static void igb_cache_ring_register(struct igb_adapter *adapter)
729{
730 int i = 0, j = 0;
731 u32 rbase_offset = adapter->vfs_allocated_count;
732
733 switch (adapter->hw.mac.type) {
734 case e1000_82576:
735
736
737
738
739
740 if (adapter->vfs_allocated_count) {
741 for (; i < adapter->rss_queues; i++)
742 adapter->rx_ring[i]->reg_idx = rbase_offset +
743 Q_IDX_82576(i);
744 }
745
746 case e1000_82575:
747 case e1000_82580:
748 case e1000_i350:
749 case e1000_i354:
750 case e1000_i210:
751 case e1000_i211:
752
753 default:
754 for (; i < adapter->num_rx_queues; i++)
755 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
756 for (; j < adapter->num_tx_queues; j++)
757 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
758 break;
759 }
760}
761
762u32 igb_rd32(struct e1000_hw *hw, u32 reg)
763{
764 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
765 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
766 u32 value = 0;
767
768 if (E1000_REMOVED(hw_addr))
769 return ~value;
770
771 value = readl(&hw_addr[reg]);
772
773
774 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
775 struct net_device *netdev = igb->netdev;
776 hw->hw_addr = NULL;
777 netif_device_detach(netdev);
778 netdev_err(netdev, "PCIe link lost, device now detached\n");
779 }
780
781 return value;
782}
783
784
785
786
787
788
789
790
791
792
793
794
795
796static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
797 int index, int offset)
798{
799 u32 ivar = array_rd32(E1000_IVAR0, index);
800
801
802 ivar &= ~((u32)0xFF << offset);
803
804
805 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
806
807 array_wr32(E1000_IVAR0, index, ivar);
808}
809
810#define IGB_N0_QUEUE -1
811static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
812{
813 struct igb_adapter *adapter = q_vector->adapter;
814 struct e1000_hw *hw = &adapter->hw;
815 int rx_queue = IGB_N0_QUEUE;
816 int tx_queue = IGB_N0_QUEUE;
817 u32 msixbm = 0;
818
819 if (q_vector->rx.ring)
820 rx_queue = q_vector->rx.ring->reg_idx;
821 if (q_vector->tx.ring)
822 tx_queue = q_vector->tx.ring->reg_idx;
823
824 switch (hw->mac.type) {
825 case e1000_82575:
826
827
828
829
830
831 if (rx_queue > IGB_N0_QUEUE)
832 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
833 if (tx_queue > IGB_N0_QUEUE)
834 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
835 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
836 msixbm |= E1000_EIMS_OTHER;
837 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
838 q_vector->eims_value = msixbm;
839 break;
840 case e1000_82576:
841
842
843
844
845
846 if (rx_queue > IGB_N0_QUEUE)
847 igb_write_ivar(hw, msix_vector,
848 rx_queue & 0x7,
849 (rx_queue & 0x8) << 1);
850 if (tx_queue > IGB_N0_QUEUE)
851 igb_write_ivar(hw, msix_vector,
852 tx_queue & 0x7,
853 ((tx_queue & 0x8) << 1) + 8);
854 q_vector->eims_value = BIT(msix_vector);
855 break;
856 case e1000_82580:
857 case e1000_i350:
858 case e1000_i354:
859 case e1000_i210:
860 case e1000_i211:
861
862
863
864
865
866
867 if (rx_queue > IGB_N0_QUEUE)
868 igb_write_ivar(hw, msix_vector,
869 rx_queue >> 1,
870 (rx_queue & 0x1) << 4);
871 if (tx_queue > IGB_N0_QUEUE)
872 igb_write_ivar(hw, msix_vector,
873 tx_queue >> 1,
874 ((tx_queue & 0x1) << 4) + 8);
875 q_vector->eims_value = BIT(msix_vector);
876 break;
877 default:
878 BUG();
879 break;
880 }
881
882
883 adapter->eims_enable_mask |= q_vector->eims_value;
884
885
886 q_vector->set_itr = 1;
887}
888
889
890
891
892
893
894
895
896static void igb_configure_msix(struct igb_adapter *adapter)
897{
898 u32 tmp;
899 int i, vector = 0;
900 struct e1000_hw *hw = &adapter->hw;
901
902 adapter->eims_enable_mask = 0;
903
904
905 switch (hw->mac.type) {
906 case e1000_82575:
907 tmp = rd32(E1000_CTRL_EXT);
908
909 tmp |= E1000_CTRL_EXT_PBA_CLR;
910
911
912 tmp |= E1000_CTRL_EXT_EIAME;
913 tmp |= E1000_CTRL_EXT_IRCA;
914
915 wr32(E1000_CTRL_EXT, tmp);
916
917
918 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
919 adapter->eims_other = E1000_EIMS_OTHER;
920
921 break;
922
923 case e1000_82576:
924 case e1000_82580:
925 case e1000_i350:
926 case e1000_i354:
927 case e1000_i210:
928 case e1000_i211:
929
930
931
932 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
933 E1000_GPIE_PBA | E1000_GPIE_EIAME |
934 E1000_GPIE_NSICR);
935
936
937 adapter->eims_other = BIT(vector);
938 tmp = (vector++ | E1000_IVAR_VALID) << 8;
939
940 wr32(E1000_IVAR_MISC, tmp);
941 break;
942 default:
943
944 break;
945 }
946
947 adapter->eims_enable_mask |= adapter->eims_other;
948
949 for (i = 0; i < adapter->num_q_vectors; i++)
950 igb_assign_vector(adapter->q_vector[i], vector++);
951
952 wrfl();
953}
954
955
956
957
958
959
960
961
962static int igb_request_msix(struct igb_adapter *adapter)
963{
964 struct net_device *netdev = adapter->netdev;
965 int i, err = 0, vector = 0, free_vector = 0;
966
967 err = request_irq(adapter->msix_entries[vector].vector,
968 igb_msix_other, 0, netdev->name, adapter);
969 if (err)
970 goto err_out;
971
972 for (i = 0; i < adapter->num_q_vectors; i++) {
973 struct igb_q_vector *q_vector = adapter->q_vector[i];
974
975 vector++;
976
977 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
978
979 if (q_vector->rx.ring && q_vector->tx.ring)
980 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
981 q_vector->rx.ring->queue_index);
982 else if (q_vector->tx.ring)
983 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
984 q_vector->tx.ring->queue_index);
985 else if (q_vector->rx.ring)
986 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
987 q_vector->rx.ring->queue_index);
988 else
989 sprintf(q_vector->name, "%s-unused", netdev->name);
990
991 err = request_irq(adapter->msix_entries[vector].vector,
992 igb_msix_ring, 0, q_vector->name,
993 q_vector);
994 if (err)
995 goto err_free;
996 }
997
998 igb_configure_msix(adapter);
999 return 0;
1000
1001err_free:
1002
1003 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
1004
1005 vector--;
1006 for (i = 0; i < vector; i++) {
1007 free_irq(adapter->msix_entries[free_vector++].vector,
1008 adapter->q_vector[i]);
1009 }
1010err_out:
1011 return err;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1022{
1023 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1024
1025 adapter->q_vector[v_idx] = NULL;
1026
1027
1028
1029
1030 if (q_vector)
1031 kfree_rcu(q_vector, rcu);
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1043{
1044 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1045
1046
1047
1048
1049 if (!q_vector)
1050 return;
1051
1052 if (q_vector->tx.ring)
1053 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1054
1055 if (q_vector->rx.ring)
1056 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1057
1058 netif_napi_del(&q_vector->napi);
1059
1060}
1061
1062static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1063{
1064 int v_idx = adapter->num_q_vectors;
1065
1066 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1067 pci_disable_msix(adapter->pdev);
1068 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1069 pci_disable_msi(adapter->pdev);
1070
1071 while (v_idx--)
1072 igb_reset_q_vector(adapter, v_idx);
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static void igb_free_q_vectors(struct igb_adapter *adapter)
1084{
1085 int v_idx = adapter->num_q_vectors;
1086
1087 adapter->num_tx_queues = 0;
1088 adapter->num_rx_queues = 0;
1089 adapter->num_q_vectors = 0;
1090
1091 while (v_idx--) {
1092 igb_reset_q_vector(adapter, v_idx);
1093 igb_free_q_vector(adapter, v_idx);
1094 }
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1105{
1106 igb_free_q_vectors(adapter);
1107 igb_reset_interrupt_capability(adapter);
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1119{
1120 int err;
1121 int numvecs, i;
1122
1123 if (!msix)
1124 goto msi_only;
1125 adapter->flags |= IGB_FLAG_HAS_MSIX;
1126
1127
1128 adapter->num_rx_queues = adapter->rss_queues;
1129 if (adapter->vfs_allocated_count)
1130 adapter->num_tx_queues = 1;
1131 else
1132 adapter->num_tx_queues = adapter->rss_queues;
1133
1134
1135 numvecs = adapter->num_rx_queues;
1136
1137
1138 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1139 numvecs += adapter->num_tx_queues;
1140
1141
1142 adapter->num_q_vectors = numvecs;
1143
1144
1145 numvecs++;
1146 for (i = 0; i < numvecs; i++)
1147 adapter->msix_entries[i].entry = i;
1148
1149 err = pci_enable_msix_range(adapter->pdev,
1150 adapter->msix_entries,
1151 numvecs,
1152 numvecs);
1153 if (err > 0)
1154 return;
1155
1156 igb_reset_interrupt_capability(adapter);
1157
1158
1159msi_only:
1160 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1161#ifdef CONFIG_PCI_IOV
1162
1163 if (adapter->vf_data) {
1164 struct e1000_hw *hw = &adapter->hw;
1165
1166 pci_disable_sriov(adapter->pdev);
1167 msleep(500);
1168
1169 kfree(adapter->vf_mac_list);
1170 adapter->vf_mac_list = NULL;
1171 kfree(adapter->vf_data);
1172 adapter->vf_data = NULL;
1173 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1174 wrfl();
1175 msleep(100);
1176 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1177 }
1178#endif
1179 adapter->vfs_allocated_count = 0;
1180 adapter->rss_queues = 1;
1181 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1182 adapter->num_rx_queues = 1;
1183 adapter->num_tx_queues = 1;
1184 adapter->num_q_vectors = 1;
1185 if (!pci_enable_msi(adapter->pdev))
1186 adapter->flags |= IGB_FLAG_HAS_MSI;
1187}
1188
1189static void igb_add_ring(struct igb_ring *ring,
1190 struct igb_ring_container *head)
1191{
1192 head->ring = ring;
1193 head->count++;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static int igb_alloc_q_vector(struct igb_adapter *adapter,
1209 int v_count, int v_idx,
1210 int txr_count, int txr_idx,
1211 int rxr_count, int rxr_idx)
1212{
1213 struct igb_q_vector *q_vector;
1214 struct igb_ring *ring;
1215 int ring_count, size;
1216
1217
1218 if (txr_count > 1 || rxr_count > 1)
1219 return -ENOMEM;
1220
1221 ring_count = txr_count + rxr_count;
1222 size = sizeof(struct igb_q_vector) +
1223 (sizeof(struct igb_ring) * ring_count);
1224
1225
1226 q_vector = adapter->q_vector[v_idx];
1227 if (!q_vector) {
1228 q_vector = kzalloc(size, GFP_KERNEL);
1229 } else if (size > ksize(q_vector)) {
1230 kfree_rcu(q_vector, rcu);
1231 q_vector = kzalloc(size, GFP_KERNEL);
1232 } else {
1233 memset(q_vector, 0, size);
1234 }
1235 if (!q_vector)
1236 return -ENOMEM;
1237
1238
1239 netif_napi_add(adapter->netdev, &q_vector->napi,
1240 igb_poll, 64);
1241
1242
1243 adapter->q_vector[v_idx] = q_vector;
1244 q_vector->adapter = adapter;
1245
1246
1247 q_vector->tx.work_limit = adapter->tx_work_limit;
1248
1249
1250 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1251 q_vector->itr_val = IGB_START_ITR;
1252
1253
1254 ring = q_vector->ring;
1255
1256
1257 if (rxr_count) {
1258
1259 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1260 q_vector->itr_val = adapter->rx_itr_setting;
1261 } else {
1262
1263 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1264 q_vector->itr_val = adapter->tx_itr_setting;
1265 }
1266
1267 if (txr_count) {
1268
1269 ring->dev = &adapter->pdev->dev;
1270 ring->netdev = adapter->netdev;
1271
1272
1273 ring->q_vector = q_vector;
1274
1275
1276 igb_add_ring(ring, &q_vector->tx);
1277
1278
1279 if (adapter->hw.mac.type == e1000_82575)
1280 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1281
1282
1283 ring->count = adapter->tx_ring_count;
1284 ring->queue_index = txr_idx;
1285
1286 ring->cbs_enable = false;
1287 ring->idleslope = 0;
1288 ring->sendslope = 0;
1289 ring->hicredit = 0;
1290 ring->locredit = 0;
1291
1292 u64_stats_init(&ring->tx_syncp);
1293 u64_stats_init(&ring->tx_syncp2);
1294
1295
1296 adapter->tx_ring[txr_idx] = ring;
1297
1298
1299 ring++;
1300 }
1301
1302 if (rxr_count) {
1303
1304 ring->dev = &adapter->pdev->dev;
1305 ring->netdev = adapter->netdev;
1306
1307
1308 ring->q_vector = q_vector;
1309
1310
1311 igb_add_ring(ring, &q_vector->rx);
1312
1313
1314 if (adapter->hw.mac.type >= e1000_82576)
1315 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1316
1317
1318
1319
1320 if (adapter->hw.mac.type >= e1000_i350)
1321 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1322
1323
1324 ring->count = adapter->rx_ring_count;
1325 ring->queue_index = rxr_idx;
1326
1327 u64_stats_init(&ring->rx_syncp);
1328
1329
1330 adapter->rx_ring[rxr_idx] = ring;
1331 }
1332
1333 return 0;
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1345{
1346 int q_vectors = adapter->num_q_vectors;
1347 int rxr_remaining = adapter->num_rx_queues;
1348 int txr_remaining = adapter->num_tx_queues;
1349 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1350 int err;
1351
1352 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1353 for (; rxr_remaining; v_idx++) {
1354 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1355 0, 0, 1, rxr_idx);
1356
1357 if (err)
1358 goto err_out;
1359
1360
1361 rxr_remaining--;
1362 rxr_idx++;
1363 }
1364 }
1365
1366 for (; v_idx < q_vectors; v_idx++) {
1367 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1368 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1369
1370 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1371 tqpv, txr_idx, rqpv, rxr_idx);
1372
1373 if (err)
1374 goto err_out;
1375
1376
1377 rxr_remaining -= rqpv;
1378 txr_remaining -= tqpv;
1379 rxr_idx++;
1380 txr_idx++;
1381 }
1382
1383 return 0;
1384
1385err_out:
1386 adapter->num_tx_queues = 0;
1387 adapter->num_rx_queues = 0;
1388 adapter->num_q_vectors = 0;
1389
1390 while (v_idx--)
1391 igb_free_q_vector(adapter, v_idx);
1392
1393 return -ENOMEM;
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1404{
1405 struct pci_dev *pdev = adapter->pdev;
1406 int err;
1407
1408 igb_set_interrupt_capability(adapter, msix);
1409
1410 err = igb_alloc_q_vectors(adapter);
1411 if (err) {
1412 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1413 goto err_alloc_q_vectors;
1414 }
1415
1416 igb_cache_ring_register(adapter);
1417
1418 return 0;
1419
1420err_alloc_q_vectors:
1421 igb_reset_interrupt_capability(adapter);
1422 return err;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432static int igb_request_irq(struct igb_adapter *adapter)
1433{
1434 struct net_device *netdev = adapter->netdev;
1435 struct pci_dev *pdev = adapter->pdev;
1436 int err = 0;
1437
1438 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1439 err = igb_request_msix(adapter);
1440 if (!err)
1441 goto request_done;
1442
1443 igb_free_all_tx_resources(adapter);
1444 igb_free_all_rx_resources(adapter);
1445
1446 igb_clear_interrupt_scheme(adapter);
1447 err = igb_init_interrupt_scheme(adapter, false);
1448 if (err)
1449 goto request_done;
1450
1451 igb_setup_all_tx_resources(adapter);
1452 igb_setup_all_rx_resources(adapter);
1453 igb_configure(adapter);
1454 }
1455
1456 igb_assign_vector(adapter->q_vector[0], 0);
1457
1458 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1459 err = request_irq(pdev->irq, igb_intr_msi, 0,
1460 netdev->name, adapter);
1461 if (!err)
1462 goto request_done;
1463
1464
1465 igb_reset_interrupt_capability(adapter);
1466 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1467 }
1468
1469 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1470 netdev->name, adapter);
1471
1472 if (err)
1473 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1474 err);
1475
1476request_done:
1477 return err;
1478}
1479
1480static void igb_free_irq(struct igb_adapter *adapter)
1481{
1482 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1483 int vector = 0, i;
1484
1485 free_irq(adapter->msix_entries[vector++].vector, adapter);
1486
1487 for (i = 0; i < adapter->num_q_vectors; i++)
1488 free_irq(adapter->msix_entries[vector++].vector,
1489 adapter->q_vector[i]);
1490 } else {
1491 free_irq(adapter->pdev->irq, adapter);
1492 }
1493}
1494
1495
1496
1497
1498
1499static void igb_irq_disable(struct igb_adapter *adapter)
1500{
1501 struct e1000_hw *hw = &adapter->hw;
1502
1503
1504
1505
1506
1507 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1508 u32 regval = rd32(E1000_EIAM);
1509
1510 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1511 wr32(E1000_EIMC, adapter->eims_enable_mask);
1512 regval = rd32(E1000_EIAC);
1513 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1514 }
1515
1516 wr32(E1000_IAM, 0);
1517 wr32(E1000_IMC, ~0);
1518 wrfl();
1519 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1520 int i;
1521
1522 for (i = 0; i < adapter->num_q_vectors; i++)
1523 synchronize_irq(adapter->msix_entries[i].vector);
1524 } else {
1525 synchronize_irq(adapter->pdev->irq);
1526 }
1527}
1528
1529
1530
1531
1532
1533static void igb_irq_enable(struct igb_adapter *adapter)
1534{
1535 struct e1000_hw *hw = &adapter->hw;
1536
1537 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1538 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1539 u32 regval = rd32(E1000_EIAC);
1540
1541 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1542 regval = rd32(E1000_EIAM);
1543 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1544 wr32(E1000_EIMS, adapter->eims_enable_mask);
1545 if (adapter->vfs_allocated_count) {
1546 wr32(E1000_MBVFIMR, 0xFF);
1547 ims |= E1000_IMS_VMMB;
1548 }
1549 wr32(E1000_IMS, ims);
1550 } else {
1551 wr32(E1000_IMS, IMS_ENABLE_MASK |
1552 E1000_IMS_DRSTA);
1553 wr32(E1000_IAM, IMS_ENABLE_MASK |
1554 E1000_IMS_DRSTA);
1555 }
1556}
1557
1558static void igb_update_mng_vlan(struct igb_adapter *adapter)
1559{
1560 struct e1000_hw *hw = &adapter->hw;
1561 u16 pf_id = adapter->vfs_allocated_count;
1562 u16 vid = adapter->hw.mng_cookie.vlan_id;
1563 u16 old_vid = adapter->mng_vlan_id;
1564
1565 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1566
1567 igb_vfta_set(hw, vid, pf_id, true, true);
1568 adapter->mng_vlan_id = vid;
1569 } else {
1570 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1571 }
1572
1573 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1574 (vid != old_vid) &&
1575 !test_bit(old_vid, adapter->active_vlans)) {
1576
1577 igb_vfta_set(hw, vid, pf_id, false, true);
1578 }
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static void igb_release_hw_control(struct igb_adapter *adapter)
1590{
1591 struct e1000_hw *hw = &adapter->hw;
1592 u32 ctrl_ext;
1593
1594
1595 ctrl_ext = rd32(E1000_CTRL_EXT);
1596 wr32(E1000_CTRL_EXT,
1597 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608static void igb_get_hw_control(struct igb_adapter *adapter)
1609{
1610 struct e1000_hw *hw = &adapter->hw;
1611 u32 ctrl_ext;
1612
1613
1614 ctrl_ext = rd32(E1000_CTRL_EXT);
1615 wr32(E1000_CTRL_EXT,
1616 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1617}
1618
1619static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1620{
1621 struct net_device *netdev = adapter->netdev;
1622 struct e1000_hw *hw = &adapter->hw;
1623
1624 WARN_ON(hw->mac.type != e1000_i210);
1625
1626 if (enable)
1627 adapter->flags |= IGB_FLAG_FQTSS;
1628 else
1629 adapter->flags &= ~IGB_FLAG_FQTSS;
1630
1631 if (netif_running(netdev))
1632 schedule_work(&adapter->reset_task);
1633}
1634
1635static bool is_fqtss_enabled(struct igb_adapter *adapter)
1636{
1637 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1638}
1639
1640static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1641 enum tx_queue_prio prio)
1642{
1643 u32 val;
1644
1645 WARN_ON(hw->mac.type != e1000_i210);
1646 WARN_ON(queue < 0 || queue > 4);
1647
1648 val = rd32(E1000_I210_TXDCTL(queue));
1649
1650 if (prio == TX_QUEUE_PRIO_HIGH)
1651 val |= E1000_TXDCTL_PRIORITY;
1652 else
1653 val &= ~E1000_TXDCTL_PRIORITY;
1654
1655 wr32(E1000_I210_TXDCTL(queue), val);
1656}
1657
1658static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1659{
1660 u32 val;
1661
1662 WARN_ON(hw->mac.type != e1000_i210);
1663 WARN_ON(queue < 0 || queue > 1);
1664
1665 val = rd32(E1000_I210_TQAVCC(queue));
1666
1667 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1668 val |= E1000_TQAVCC_QUEUEMODE;
1669 else
1670 val &= ~E1000_TQAVCC_QUEUEMODE;
1671
1672 wr32(E1000_I210_TQAVCC(queue), val);
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
1690 bool enable, int idleslope, int sendslope,
1691 int hicredit, int locredit)
1692{
1693 struct net_device *netdev = adapter->netdev;
1694 struct e1000_hw *hw = &adapter->hw;
1695 u32 tqavcc;
1696 u16 value;
1697
1698 WARN_ON(hw->mac.type != e1000_i210);
1699 WARN_ON(queue < 0 || queue > 1);
1700
1701 if (enable) {
1702 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1703 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
1763
1764 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1765 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1766 tqavcc |= value;
1767 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1768
1769 wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
1770 } else {
1771 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1772 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1773
1774
1775 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1776 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1777 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1778
1779
1780 wr32(E1000_I210_TQAVHC(queue), 0);
1781 }
1782
1783
1784
1785
1786
1787
1788 netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1789 (enable) ? "enabled" : "disabled", queue,
1790 idleslope, sendslope, hicredit, locredit);
1791}
1792
1793static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1794 bool enable, int idleslope, int sendslope,
1795 int hicredit, int locredit)
1796{
1797 struct igb_ring *ring;
1798
1799 if (queue < 0 || queue > adapter->num_tx_queues)
1800 return -EINVAL;
1801
1802 ring = adapter->tx_ring[queue];
1803
1804 ring->cbs_enable = enable;
1805 ring->idleslope = idleslope;
1806 ring->sendslope = sendslope;
1807 ring->hicredit = hicredit;
1808 ring->locredit = locredit;
1809
1810 return 0;
1811}
1812
1813static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1814{
1815 struct igb_ring *ring;
1816 int i;
1817
1818 for (i = 0; i < adapter->num_tx_queues; i++) {
1819 ring = adapter->tx_ring[i];
1820
1821 if (ring->cbs_enable)
1822 return true;
1823 }
1824
1825 return false;
1826}
1827
1828static void igb_setup_tx_mode(struct igb_adapter *adapter)
1829{
1830 struct net_device *netdev = adapter->netdev;
1831 struct e1000_hw *hw = &adapter->hw;
1832 u32 val;
1833
1834
1835 if (hw->mac.type != e1000_i210)
1836 return;
1837
1838 if (is_fqtss_enabled(adapter)) {
1839 int i, max_queue;
1840
1841
1842
1843
1844
1845 val = rd32(E1000_I210_TQAVCTRL);
1846 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
1847 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1848 wr32(E1000_I210_TQAVCTRL, val);
1849
1850
1851
1852
1853 val = rd32(E1000_TXPBS);
1854 val &= ~I210_TXPBSIZE_MASK;
1855 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1856 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1857 wr32(E1000_TXPBS, val);
1858
1859 val = rd32(E1000_RXPBS);
1860 val &= ~I210_RXPBSIZE_MASK;
1861 val |= I210_RXPBSIZE_PB_32KB;
1862 wr32(E1000_RXPBS, val);
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875 val = (4096 - 1) / 64;
1876 wr32(E1000_I210_DTXMXPKTSZ, val);
1877
1878
1879
1880
1881
1882
1883 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1884 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1885
1886 for (i = 0; i < max_queue; i++) {
1887 struct igb_ring *ring = adapter->tx_ring[i];
1888
1889 igb_configure_cbs(adapter, i, ring->cbs_enable,
1890 ring->idleslope, ring->sendslope,
1891 ring->hicredit, ring->locredit);
1892 }
1893 } else {
1894 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1895 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1896 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1897
1898 val = rd32(E1000_I210_TQAVCTRL);
1899
1900
1901
1902
1903 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1904 wr32(E1000_I210_TQAVCTRL, val);
1905 }
1906
1907 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1908 "enabled" : "disabled");
1909}
1910
1911
1912
1913
1914
1915static void igb_configure(struct igb_adapter *adapter)
1916{
1917 struct net_device *netdev = adapter->netdev;
1918 int i;
1919
1920 igb_get_hw_control(adapter);
1921 igb_set_rx_mode(netdev);
1922 igb_setup_tx_mode(adapter);
1923
1924 igb_restore_vlan(adapter);
1925
1926 igb_setup_tctl(adapter);
1927 igb_setup_mrqc(adapter);
1928 igb_setup_rctl(adapter);
1929
1930 igb_nfc_filter_restore(adapter);
1931 igb_configure_tx(adapter);
1932 igb_configure_rx(adapter);
1933
1934 igb_rx_fifo_flush_82575(&adapter->hw);
1935
1936
1937
1938
1939
1940 for (i = 0; i < adapter->num_rx_queues; i++) {
1941 struct igb_ring *ring = adapter->rx_ring[i];
1942 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
1943 }
1944}
1945
1946
1947
1948
1949
1950void igb_power_up_link(struct igb_adapter *adapter)
1951{
1952 igb_reset_phy(&adapter->hw);
1953
1954 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1955 igb_power_up_phy_copper(&adapter->hw);
1956 else
1957 igb_power_up_serdes_link_82575(&adapter->hw);
1958
1959 igb_setup_link(&adapter->hw);
1960}
1961
1962
1963
1964
1965
1966static void igb_power_down_link(struct igb_adapter *adapter)
1967{
1968 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1969 igb_power_down_phy_copper_82575(&adapter->hw);
1970 else
1971 igb_shutdown_serdes_link_82575(&adapter->hw);
1972}
1973
1974
1975
1976
1977
1978static void igb_check_swap_media(struct igb_adapter *adapter)
1979{
1980 struct e1000_hw *hw = &adapter->hw;
1981 u32 ctrl_ext, connsw;
1982 bool swap_now = false;
1983
1984 ctrl_ext = rd32(E1000_CTRL_EXT);
1985 connsw = rd32(E1000_CONNSW);
1986
1987
1988
1989
1990
1991 if ((hw->phy.media_type == e1000_media_type_copper) &&
1992 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
1993 swap_now = true;
1994 } else if (!(connsw & E1000_CONNSW_SERDESD)) {
1995
1996 if (adapter->copper_tries < 4) {
1997 adapter->copper_tries++;
1998 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
1999 wr32(E1000_CONNSW, connsw);
2000 return;
2001 } else {
2002 adapter->copper_tries = 0;
2003 if ((connsw & E1000_CONNSW_PHYSD) &&
2004 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2005 swap_now = true;
2006 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2007 wr32(E1000_CONNSW, connsw);
2008 }
2009 }
2010 }
2011
2012 if (!swap_now)
2013 return;
2014
2015 switch (hw->phy.media_type) {
2016 case e1000_media_type_copper:
2017 netdev_info(adapter->netdev,
2018 "MAS: changing media to fiber/serdes\n");
2019 ctrl_ext |=
2020 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2021 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2022 adapter->copper_tries = 0;
2023 break;
2024 case e1000_media_type_internal_serdes:
2025 case e1000_media_type_fiber:
2026 netdev_info(adapter->netdev,
2027 "MAS: changing media to copper\n");
2028 ctrl_ext &=
2029 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2030 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2031 break;
2032 default:
2033
2034 netdev_err(adapter->netdev,
2035 "AMS: Invalid media type found, returning\n");
2036 break;
2037 }
2038 wr32(E1000_CTRL_EXT, ctrl_ext);
2039}
2040
2041
2042
2043
2044
2045int igb_up(struct igb_adapter *adapter)
2046{
2047 struct e1000_hw *hw = &adapter->hw;
2048 int i;
2049
2050
2051 igb_configure(adapter);
2052
2053 clear_bit(__IGB_DOWN, &adapter->state);
2054
2055 for (i = 0; i < adapter->num_q_vectors; i++)
2056 napi_enable(&(adapter->q_vector[i]->napi));
2057
2058 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2059 igb_configure_msix(adapter);
2060 else
2061 igb_assign_vector(adapter->q_vector[0], 0);
2062
2063
2064 rd32(E1000_ICR);
2065 igb_irq_enable(adapter);
2066
2067
2068 if (adapter->vfs_allocated_count) {
2069 u32 reg_data = rd32(E1000_CTRL_EXT);
2070
2071 reg_data |= E1000_CTRL_EXT_PFRSTD;
2072 wr32(E1000_CTRL_EXT, reg_data);
2073 }
2074
2075 netif_tx_start_all_queues(adapter->netdev);
2076
2077
2078 hw->mac.get_link_status = 1;
2079 schedule_work(&adapter->watchdog_task);
2080
2081 if ((adapter->flags & IGB_FLAG_EEE) &&
2082 (!hw->dev_spec._82575.eee_disable))
2083 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2084
2085 return 0;
2086}
2087
2088void igb_down(struct igb_adapter *adapter)
2089{
2090 struct net_device *netdev = adapter->netdev;
2091 struct e1000_hw *hw = &adapter->hw;
2092 u32 tctl, rctl;
2093 int i;
2094
2095
2096
2097
2098 set_bit(__IGB_DOWN, &adapter->state);
2099
2100
2101 rctl = rd32(E1000_RCTL);
2102 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2103
2104
2105 igb_nfc_filter_exit(adapter);
2106
2107 netif_carrier_off(netdev);
2108 netif_tx_stop_all_queues(netdev);
2109
2110
2111 tctl = rd32(E1000_TCTL);
2112 tctl &= ~E1000_TCTL_EN;
2113 wr32(E1000_TCTL, tctl);
2114
2115 wrfl();
2116 usleep_range(10000, 11000);
2117
2118 igb_irq_disable(adapter);
2119
2120 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2121
2122 for (i = 0; i < adapter->num_q_vectors; i++) {
2123 if (adapter->q_vector[i]) {
2124 napi_synchronize(&adapter->q_vector[i]->napi);
2125 napi_disable(&adapter->q_vector[i]->napi);
2126 }
2127 }
2128
2129 del_timer_sync(&adapter->watchdog_timer);
2130 del_timer_sync(&adapter->phy_info_timer);
2131
2132
2133 spin_lock(&adapter->stats64_lock);
2134 igb_update_stats(adapter);
2135 spin_unlock(&adapter->stats64_lock);
2136
2137 adapter->link_speed = 0;
2138 adapter->link_duplex = 0;
2139
2140 if (!pci_channel_offline(adapter->pdev))
2141 igb_reset(adapter);
2142
2143
2144 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2145
2146 igb_clean_all_tx_rings(adapter);
2147 igb_clean_all_rx_rings(adapter);
2148#ifdef CONFIG_IGB_DCA
2149
2150
2151 igb_setup_dca(adapter);
2152#endif
2153}
2154
2155void igb_reinit_locked(struct igb_adapter *adapter)
2156{
2157 WARN_ON(in_interrupt());
2158 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2159 usleep_range(1000, 2000);
2160 igb_down(adapter);
2161 igb_up(adapter);
2162 clear_bit(__IGB_RESETTING, &adapter->state);
2163}
2164
2165
2166
2167
2168
2169static void igb_enable_mas(struct igb_adapter *adapter)
2170{
2171 struct e1000_hw *hw = &adapter->hw;
2172 u32 connsw = rd32(E1000_CONNSW);
2173
2174
2175 if ((hw->phy.media_type == e1000_media_type_copper) &&
2176 (!(connsw & E1000_CONNSW_SERDESD))) {
2177 connsw |= E1000_CONNSW_ENRGSRC;
2178 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2179 wr32(E1000_CONNSW, connsw);
2180 wrfl();
2181 }
2182}
2183
2184void igb_reset(struct igb_adapter *adapter)
2185{
2186 struct pci_dev *pdev = adapter->pdev;
2187 struct e1000_hw *hw = &adapter->hw;
2188 struct e1000_mac_info *mac = &hw->mac;
2189 struct e1000_fc_info *fc = &hw->fc;
2190 u32 pba, hwm;
2191
2192
2193
2194
2195 switch (mac->type) {
2196 case e1000_i350:
2197 case e1000_i354:
2198 case e1000_82580:
2199 pba = rd32(E1000_RXPBS);
2200 pba = igb_rxpbs_adjust_82580(pba);
2201 break;
2202 case e1000_82576:
2203 pba = rd32(E1000_RXPBS);
2204 pba &= E1000_RXPBS_SIZE_MASK_82576;
2205 break;
2206 case e1000_82575:
2207 case e1000_i210:
2208 case e1000_i211:
2209 default:
2210 pba = E1000_PBA_34K;
2211 break;
2212 }
2213
2214 if (mac->type == e1000_82575) {
2215 u32 min_rx_space, min_tx_space, needed_tx_space;
2216
2217
2218 wr32(E1000_PBA, pba);
2219
2220
2221
2222
2223
2224
2225
2226
2227 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2228
2229
2230
2231
2232
2233
2234 min_tx_space = adapter->max_frame_size;
2235 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2236 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2237
2238
2239 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2240
2241
2242
2243
2244
2245 if (needed_tx_space < pba) {
2246 pba -= needed_tx_space;
2247
2248
2249
2250
2251 if (pba < min_rx_space)
2252 pba = min_rx_space;
2253 }
2254
2255
2256 wr32(E1000_PBA, pba);
2257 }
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2268
2269 fc->high_water = hwm & 0xFFFFFFF0;
2270 fc->low_water = fc->high_water - 16;
2271 fc->pause_time = 0xFFFF;
2272 fc->send_xon = 1;
2273 fc->current_mode = fc->requested_mode;
2274
2275
2276 if (adapter->vfs_allocated_count) {
2277 int i;
2278
2279 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2280 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2281
2282
2283 igb_ping_all_vfs(adapter);
2284
2285
2286 wr32(E1000_VFRE, 0);
2287 wr32(E1000_VFTE, 0);
2288 }
2289
2290
2291 hw->mac.ops.reset_hw(hw);
2292 wr32(E1000_WUC, 0);
2293
2294 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2295
2296 adapter->ei.get_invariants(hw);
2297 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2298 }
2299 if ((mac->type == e1000_82575) &&
2300 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2301 igb_enable_mas(adapter);
2302 }
2303 if (hw->mac.ops.init_hw(hw))
2304 dev_err(&pdev->dev, "Hardware Error\n");
2305
2306
2307 igb_flush_mac_table(adapter);
2308 __dev_uc_unsync(adapter->netdev, NULL);
2309
2310
2311 igb_set_default_mac_filter(adapter);
2312
2313
2314
2315
2316 if (!hw->mac.autoneg)
2317 igb_force_mac_fc(hw);
2318
2319 igb_init_dmac(adapter, pba);
2320#ifdef CONFIG_IGB_HWMON
2321
2322 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2323 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2324
2325
2326
2327 if (adapter->ets)
2328 mac->ops.init_thermal_sensor_thresh(hw);
2329 }
2330 }
2331#endif
2332
2333 if (hw->phy.media_type == e1000_media_type_copper) {
2334 switch (mac->type) {
2335 case e1000_i350:
2336 case e1000_i210:
2337 case e1000_i211:
2338 igb_set_eee_i350(hw, true, true);
2339 break;
2340 case e1000_i354:
2341 igb_set_eee_i354(hw, true, true);
2342 break;
2343 default:
2344 break;
2345 }
2346 }
2347 if (!netif_running(adapter->netdev))
2348 igb_power_down_link(adapter);
2349
2350 igb_update_mng_vlan(adapter);
2351
2352
2353 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2354
2355
2356 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2357 igb_ptp_reset(adapter);
2358
2359 igb_get_phy_info(hw);
2360}
2361
2362static netdev_features_t igb_fix_features(struct net_device *netdev,
2363 netdev_features_t features)
2364{
2365
2366
2367
2368 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2369 features |= NETIF_F_HW_VLAN_CTAG_TX;
2370 else
2371 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2372
2373 return features;
2374}
2375
2376static int igb_set_features(struct net_device *netdev,
2377 netdev_features_t features)
2378{
2379 netdev_features_t changed = netdev->features ^ features;
2380 struct igb_adapter *adapter = netdev_priv(netdev);
2381
2382 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2383 igb_vlan_mode(netdev, features);
2384
2385 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2386 return 0;
2387
2388 if (!(features & NETIF_F_NTUPLE)) {
2389 struct hlist_node *node2;
2390 struct igb_nfc_filter *rule;
2391
2392 spin_lock(&adapter->nfc_lock);
2393 hlist_for_each_entry_safe(rule, node2,
2394 &adapter->nfc_filter_list, nfc_node) {
2395 igb_erase_filter(adapter, rule);
2396 hlist_del(&rule->nfc_node);
2397 kfree(rule);
2398 }
2399 spin_unlock(&adapter->nfc_lock);
2400 adapter->nfc_filter_count = 0;
2401 }
2402
2403 netdev->features = features;
2404
2405 if (netif_running(netdev))
2406 igb_reinit_locked(adapter);
2407 else
2408 igb_reset(adapter);
2409
2410 return 0;
2411}
2412
2413static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2414 struct net_device *dev,
2415 const unsigned char *addr, u16 vid,
2416 u16 flags)
2417{
2418
2419 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2420 struct igb_adapter *adapter = netdev_priv(dev);
2421 int vfn = adapter->vfs_allocated_count;
2422
2423 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2424 return -ENOMEM;
2425 }
2426
2427 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2428}
2429
2430#define IGB_MAX_MAC_HDR_LEN 127
2431#define IGB_MAX_NETWORK_HDR_LEN 511
2432
2433static netdev_features_t
2434igb_features_check(struct sk_buff *skb, struct net_device *dev,
2435 netdev_features_t features)
2436{
2437 unsigned int network_hdr_len, mac_hdr_len;
2438
2439
2440 mac_hdr_len = skb_network_header(skb) - skb->data;
2441 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2442 return features & ~(NETIF_F_HW_CSUM |
2443 NETIF_F_SCTP_CRC |
2444 NETIF_F_HW_VLAN_CTAG_TX |
2445 NETIF_F_TSO |
2446 NETIF_F_TSO6);
2447
2448 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2449 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2450 return features & ~(NETIF_F_HW_CSUM |
2451 NETIF_F_SCTP_CRC |
2452 NETIF_F_TSO |
2453 NETIF_F_TSO6);
2454
2455
2456
2457
2458 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2459 features &= ~NETIF_F_TSO;
2460
2461 return features;
2462}
2463
2464static int igb_offload_cbs(struct igb_adapter *adapter,
2465 struct tc_cbs_qopt_offload *qopt)
2466{
2467 struct e1000_hw *hw = &adapter->hw;
2468 int err;
2469
2470
2471 if (hw->mac.type != e1000_i210)
2472 return -EOPNOTSUPP;
2473
2474
2475 if (qopt->queue < 0 || qopt->queue > 1)
2476 return -EINVAL;
2477
2478 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2479 qopt->idleslope, qopt->sendslope,
2480 qopt->hicredit, qopt->locredit);
2481 if (err)
2482 return err;
2483
2484 if (is_fqtss_enabled(adapter)) {
2485 igb_configure_cbs(adapter, qopt->queue, qopt->enable,
2486 qopt->idleslope, qopt->sendslope,
2487 qopt->hicredit, qopt->locredit);
2488
2489 if (!is_any_cbs_enabled(adapter))
2490 enable_fqtss(adapter, false);
2491
2492 } else {
2493 enable_fqtss(adapter, true);
2494 }
2495
2496 return 0;
2497}
2498
2499static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2500 void *type_data)
2501{
2502 struct igb_adapter *adapter = netdev_priv(dev);
2503
2504 switch (type) {
2505 case TC_SETUP_QDISC_CBS:
2506 return igb_offload_cbs(adapter, type_data);
2507
2508 default:
2509 return -EOPNOTSUPP;
2510 }
2511}
2512
2513static const struct net_device_ops igb_netdev_ops = {
2514 .ndo_open = igb_open,
2515 .ndo_stop = igb_close,
2516 .ndo_start_xmit = igb_xmit_frame,
2517 .ndo_get_stats64 = igb_get_stats64,
2518 .ndo_set_rx_mode = igb_set_rx_mode,
2519 .ndo_set_mac_address = igb_set_mac,
2520 .ndo_change_mtu = igb_change_mtu,
2521 .ndo_do_ioctl = igb_ioctl,
2522 .ndo_tx_timeout = igb_tx_timeout,
2523 .ndo_validate_addr = eth_validate_addr,
2524 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2525 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2526 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2527 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2528 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2529 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2530 .ndo_get_vf_config = igb_ndo_get_vf_config,
2531#ifdef CONFIG_NET_POLL_CONTROLLER
2532 .ndo_poll_controller = igb_netpoll,
2533#endif
2534 .ndo_fix_features = igb_fix_features,
2535 .ndo_set_features = igb_set_features,
2536 .ndo_fdb_add = igb_ndo_fdb_add,
2537 .ndo_features_check = igb_features_check,
2538 .ndo_setup_tc = igb_setup_tc,
2539};
2540
2541
2542
2543
2544
2545void igb_set_fw_version(struct igb_adapter *adapter)
2546{
2547 struct e1000_hw *hw = &adapter->hw;
2548 struct e1000_fw_version fw;
2549
2550 igb_get_fw_version(hw, &fw);
2551
2552 switch (hw->mac.type) {
2553 case e1000_i210:
2554 case e1000_i211:
2555 if (!(igb_get_flash_presence_i210(hw))) {
2556 snprintf(adapter->fw_version,
2557 sizeof(adapter->fw_version),
2558 "%2d.%2d-%d",
2559 fw.invm_major, fw.invm_minor,
2560 fw.invm_img_type);
2561 break;
2562 }
2563
2564 default:
2565
2566 if (fw.or_valid) {
2567 snprintf(adapter->fw_version,
2568 sizeof(adapter->fw_version),
2569 "%d.%d, 0x%08x, %d.%d.%d",
2570 fw.eep_major, fw.eep_minor, fw.etrack_id,
2571 fw.or_major, fw.or_build, fw.or_patch);
2572
2573 } else if (fw.etrack_id != 0X0000) {
2574 snprintf(adapter->fw_version,
2575 sizeof(adapter->fw_version),
2576 "%d.%d, 0x%08x",
2577 fw.eep_major, fw.eep_minor, fw.etrack_id);
2578 } else {
2579 snprintf(adapter->fw_version,
2580 sizeof(adapter->fw_version),
2581 "%d.%d.%d",
2582 fw.eep_major, fw.eep_minor, fw.eep_build);
2583 }
2584 break;
2585 }
2586}
2587
2588
2589
2590
2591
2592
2593static void igb_init_mas(struct igb_adapter *adapter)
2594{
2595 struct e1000_hw *hw = &adapter->hw;
2596 u16 eeprom_data;
2597
2598 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2599 switch (hw->bus.func) {
2600 case E1000_FUNC_0:
2601 if (eeprom_data & IGB_MAS_ENABLE_0) {
2602 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2603 netdev_info(adapter->netdev,
2604 "MAS: Enabling Media Autosense for port %d\n",
2605 hw->bus.func);
2606 }
2607 break;
2608 case E1000_FUNC_1:
2609 if (eeprom_data & IGB_MAS_ENABLE_1) {
2610 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2611 netdev_info(adapter->netdev,
2612 "MAS: Enabling Media Autosense for port %d\n",
2613 hw->bus.func);
2614 }
2615 break;
2616 case E1000_FUNC_2:
2617 if (eeprom_data & IGB_MAS_ENABLE_2) {
2618 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2619 netdev_info(adapter->netdev,
2620 "MAS: Enabling Media Autosense for port %d\n",
2621 hw->bus.func);
2622 }
2623 break;
2624 case E1000_FUNC_3:
2625 if (eeprom_data & IGB_MAS_ENABLE_3) {
2626 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2627 netdev_info(adapter->netdev,
2628 "MAS: Enabling Media Autosense for port %d\n",
2629 hw->bus.func);
2630 }
2631 break;
2632 default:
2633
2634 netdev_err(adapter->netdev,
2635 "MAS: Invalid port configuration, returning\n");
2636 break;
2637 }
2638}
2639
2640
2641
2642
2643
2644static s32 igb_init_i2c(struct igb_adapter *adapter)
2645{
2646 s32 status = 0;
2647
2648
2649 if (adapter->hw.mac.type != e1000_i350)
2650 return 0;
2651
2652
2653
2654
2655
2656 adapter->i2c_adap.owner = THIS_MODULE;
2657 adapter->i2c_algo = igb_i2c_algo;
2658 adapter->i2c_algo.data = adapter;
2659 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2660 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2661 strlcpy(adapter->i2c_adap.name, "igb BB",
2662 sizeof(adapter->i2c_adap.name));
2663 status = i2c_bit_add_bus(&adapter->i2c_adap);
2664 return status;
2665}
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2679{
2680 struct net_device *netdev;
2681 struct igb_adapter *adapter;
2682 struct e1000_hw *hw;
2683 u16 eeprom_data = 0;
2684 s32 ret_val;
2685 static int global_quad_port_a;
2686 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
2687 int err, pci_using_dac;
2688 u8 part_str[E1000_PBANUM_LENGTH];
2689
2690
2691
2692
2693 if (pdev->is_virtfn) {
2694 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
2695 pci_name(pdev), pdev->vendor, pdev->device);
2696 return -EINVAL;
2697 }
2698
2699 err = pci_enable_device_mem(pdev);
2700 if (err)
2701 return err;
2702
2703 pci_using_dac = 0;
2704 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2705 if (!err) {
2706 pci_using_dac = 1;
2707 } else {
2708 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2709 if (err) {
2710 dev_err(&pdev->dev,
2711 "No usable DMA configuration, aborting\n");
2712 goto err_dma;
2713 }
2714 }
2715
2716 err = pci_request_mem_regions(pdev, igb_driver_name);
2717 if (err)
2718 goto err_pci_reg;
2719
2720 pci_enable_pcie_error_reporting(pdev);
2721
2722 pci_set_master(pdev);
2723 pci_save_state(pdev);
2724
2725 err = -ENOMEM;
2726 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
2727 IGB_MAX_TX_QUEUES);
2728 if (!netdev)
2729 goto err_alloc_etherdev;
2730
2731 SET_NETDEV_DEV(netdev, &pdev->dev);
2732
2733 pci_set_drvdata(pdev, netdev);
2734 adapter = netdev_priv(netdev);
2735 adapter->netdev = netdev;
2736 adapter->pdev = pdev;
2737 hw = &adapter->hw;
2738 hw->back = adapter;
2739 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2740
2741 err = -EIO;
2742 adapter->io_addr = pci_iomap(pdev, 0, 0);
2743 if (!adapter->io_addr)
2744 goto err_ioremap;
2745
2746 hw->hw_addr = adapter->io_addr;
2747
2748 netdev->netdev_ops = &igb_netdev_ops;
2749 igb_set_ethtool_ops(netdev);
2750 netdev->watchdog_timeo = 5 * HZ;
2751
2752 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2753
2754 netdev->mem_start = pci_resource_start(pdev, 0);
2755 netdev->mem_end = pci_resource_end(pdev, 0);
2756
2757
2758 hw->vendor_id = pdev->vendor;
2759 hw->device_id = pdev->device;
2760 hw->revision_id = pdev->revision;
2761 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2762 hw->subsystem_device_id = pdev->subsystem_device;
2763
2764
2765 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2766 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2767 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2768
2769 err = ei->get_invariants(hw);
2770 if (err)
2771 goto err_sw_init;
2772
2773
2774 err = igb_sw_init(adapter);
2775 if (err)
2776 goto err_sw_init;
2777
2778 igb_get_bus_info_pcie(hw);
2779
2780 hw->phy.autoneg_wait_to_complete = false;
2781
2782
2783 if (hw->phy.media_type == e1000_media_type_copper) {
2784 hw->phy.mdix = AUTO_ALL_MODES;
2785 hw->phy.disable_polarity_correction = false;
2786 hw->phy.ms_type = e1000_ms_hw_default;
2787 }
2788
2789 if (igb_check_reset_block(hw))
2790 dev_info(&pdev->dev,
2791 "PHY reset is blocked due to SOL/IDER session.\n");
2792
2793
2794
2795
2796
2797 netdev->features |= NETIF_F_SG |
2798 NETIF_F_TSO |
2799 NETIF_F_TSO6 |
2800 NETIF_F_RXHASH |
2801 NETIF_F_RXCSUM |
2802 NETIF_F_HW_CSUM;
2803
2804 if (hw->mac.type >= e1000_82576)
2805 netdev->features |= NETIF_F_SCTP_CRC;
2806
2807#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2808 NETIF_F_GSO_GRE_CSUM | \
2809 NETIF_F_GSO_IPXIP4 | \
2810 NETIF_F_GSO_IPXIP6 | \
2811 NETIF_F_GSO_UDP_TUNNEL | \
2812 NETIF_F_GSO_UDP_TUNNEL_CSUM)
2813
2814 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
2815 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
2816
2817
2818 netdev->hw_features |= netdev->features |
2819 NETIF_F_HW_VLAN_CTAG_RX |
2820 NETIF_F_HW_VLAN_CTAG_TX |
2821 NETIF_F_RXALL;
2822
2823 if (hw->mac.type >= e1000_i350)
2824 netdev->hw_features |= NETIF_F_NTUPLE;
2825
2826 if (pci_using_dac)
2827 netdev->features |= NETIF_F_HIGHDMA;
2828
2829 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
2830 netdev->mpls_features |= NETIF_F_HW_CSUM;
2831 netdev->hw_enc_features |= netdev->vlan_features;
2832
2833
2834 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2835 NETIF_F_HW_VLAN_CTAG_RX |
2836 NETIF_F_HW_VLAN_CTAG_TX;
2837
2838 netdev->priv_flags |= IFF_SUPP_NOFCS;
2839
2840 netdev->priv_flags |= IFF_UNICAST_FLT;
2841
2842
2843 netdev->min_mtu = ETH_MIN_MTU;
2844 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
2845
2846 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
2847
2848
2849
2850
2851 hw->mac.ops.reset_hw(hw);
2852
2853
2854
2855
2856 switch (hw->mac.type) {
2857 case e1000_i210:
2858 case e1000_i211:
2859 if (igb_get_flash_presence_i210(hw)) {
2860 if (hw->nvm.ops.validate(hw) < 0) {
2861 dev_err(&pdev->dev,
2862 "The NVM Checksum Is Not Valid\n");
2863 err = -EIO;
2864 goto err_eeprom;
2865 }
2866 }
2867 break;
2868 default:
2869 if (hw->nvm.ops.validate(hw) < 0) {
2870 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2871 err = -EIO;
2872 goto err_eeprom;
2873 }
2874 break;
2875 }
2876
2877 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
2878
2879 if (hw->mac.ops.read_mac_addr(hw))
2880 dev_err(&pdev->dev, "NVM Read Error\n");
2881 }
2882
2883 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2884
2885 if (!is_valid_ether_addr(netdev->dev_addr)) {
2886 dev_err(&pdev->dev, "Invalid MAC Address\n");
2887 err = -EIO;
2888 goto err_eeprom;
2889 }
2890
2891 igb_set_default_mac_filter(adapter);
2892
2893
2894 igb_set_fw_version(adapter);
2895
2896
2897 if (hw->mac.type == e1000_i210) {
2898 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2899 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2900 }
2901
2902 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
2903 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
2904
2905 INIT_WORK(&adapter->reset_task, igb_reset_task);
2906 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2907
2908
2909 adapter->fc_autoneg = true;
2910 hw->mac.autoneg = true;
2911 hw->phy.autoneg_advertised = 0x2f;
2912
2913 hw->fc.requested_mode = e1000_fc_default;
2914 hw->fc.current_mode = e1000_fc_default;
2915
2916 igb_validate_mdi_setting(hw);
2917
2918
2919 if (hw->bus.func == 0)
2920 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2921
2922
2923 if (hw->mac.type >= e1000_82580)
2924 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2925 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2926 &eeprom_data);
2927 else if (hw->bus.func == 1)
2928 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2929
2930 if (eeprom_data & IGB_EEPROM_APME)
2931 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2932
2933
2934
2935
2936
2937 switch (pdev->device) {
2938 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2939 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2940 break;
2941 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2942 case E1000_DEV_ID_82576_FIBER:
2943 case E1000_DEV_ID_82576_SERDES:
2944
2945
2946
2947 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2948 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2949 break;
2950 case E1000_DEV_ID_82576_QUAD_COPPER:
2951 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
2952
2953 if (global_quad_port_a != 0)
2954 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2955 else
2956 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2957
2958 if (++global_quad_port_a == 4)
2959 global_quad_port_a = 0;
2960 break;
2961 default:
2962
2963 if (!device_can_wakeup(&adapter->pdev->dev))
2964 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2965 }
2966
2967
2968 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2969 adapter->wol |= E1000_WUFC_MAG;
2970
2971
2972 if ((hw->mac.type == e1000_i350) &&
2973 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2974 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2975 adapter->wol = 0;
2976 }
2977
2978
2979
2980
2981 if (((hw->mac.type == e1000_i350) ||
2982 (hw->mac.type == e1000_i354)) &&
2983 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
2984 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2985 adapter->wol = 0;
2986 }
2987 if (hw->mac.type == e1000_i350) {
2988 if (((pdev->subsystem_device == 0x5001) ||
2989 (pdev->subsystem_device == 0x5002)) &&
2990 (hw->bus.func == 0)) {
2991 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2992 adapter->wol = 0;
2993 }
2994 if (pdev->subsystem_device == 0x1F52)
2995 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2996 }
2997
2998 device_set_wakeup_enable(&adapter->pdev->dev,
2999 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3000
3001
3002 igb_reset(adapter);
3003
3004
3005 err = igb_init_i2c(adapter);
3006 if (err) {
3007 dev_err(&pdev->dev, "failed to init i2c interface\n");
3008 goto err_eeprom;
3009 }
3010
3011
3012
3013
3014 igb_get_hw_control(adapter);
3015
3016 strcpy(netdev->name, "eth%d");
3017 err = register_netdev(netdev);
3018 if (err)
3019 goto err_register;
3020
3021
3022 netif_carrier_off(netdev);
3023
3024#ifdef CONFIG_IGB_DCA
3025 if (dca_add_requester(&pdev->dev) == 0) {
3026 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3027 dev_info(&pdev->dev, "DCA enabled\n");
3028 igb_setup_dca(adapter);
3029 }
3030
3031#endif
3032#ifdef CONFIG_IGB_HWMON
3033
3034 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3035 u16 ets_word;
3036
3037
3038
3039
3040 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3041 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3042 adapter->ets = true;
3043 else
3044 adapter->ets = false;
3045 if (igb_sysfs_init(adapter))
3046 dev_err(&pdev->dev,
3047 "failed to allocate sysfs resources\n");
3048 } else {
3049 adapter->ets = false;
3050 }
3051#endif
3052
3053 adapter->ei = *ei;
3054 if (hw->dev_spec._82575.mas_capable)
3055 igb_init_mas(adapter);
3056
3057
3058 igb_ptp_init(adapter);
3059
3060 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3061
3062 if (hw->mac.type != e1000_i354) {
3063 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3064 netdev->name,
3065 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3066 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3067 "unknown"),
3068 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3069 "Width x4" :
3070 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3071 "Width x2" :
3072 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3073 "Width x1" : "unknown"), netdev->dev_addr);
3074 }
3075
3076 if ((hw->mac.type >= e1000_i210 ||
3077 igb_get_flash_presence_i210(hw))) {
3078 ret_val = igb_read_part_string(hw, part_str,
3079 E1000_PBANUM_LENGTH);
3080 } else {
3081 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3082 }
3083
3084 if (ret_val)
3085 strcpy(part_str, "Unknown");
3086 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3087 dev_info(&pdev->dev,
3088 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3089 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3090 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3091 adapter->num_rx_queues, adapter->num_tx_queues);
3092 if (hw->phy.media_type == e1000_media_type_copper) {
3093 switch (hw->mac.type) {
3094 case e1000_i350:
3095 case e1000_i210:
3096 case e1000_i211:
3097
3098 err = igb_set_eee_i350(hw, true, true);
3099 if ((!err) &&
3100 (!hw->dev_spec._82575.eee_disable)) {
3101 adapter->eee_advert =
3102 MDIO_EEE_100TX | MDIO_EEE_1000T;
3103 adapter->flags |= IGB_FLAG_EEE;
3104 }
3105 break;
3106 case e1000_i354:
3107 if ((rd32(E1000_CTRL_EXT) &
3108 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3109 err = igb_set_eee_i354(hw, true, true);
3110 if ((!err) &&
3111 (!hw->dev_spec._82575.eee_disable)) {
3112 adapter->eee_advert =
3113 MDIO_EEE_100TX | MDIO_EEE_1000T;
3114 adapter->flags |= IGB_FLAG_EEE;
3115 }
3116 }
3117 break;
3118 default:
3119 break;
3120 }
3121 }
3122 pm_runtime_put_noidle(&pdev->dev);
3123 return 0;
3124
3125err_register:
3126 igb_release_hw_control(adapter);
3127 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3128err_eeprom:
3129 if (!igb_check_reset_block(hw))
3130 igb_reset_phy(hw);
3131
3132 if (hw->flash_address)
3133 iounmap(hw->flash_address);
3134err_sw_init:
3135 kfree(adapter->mac_table);
3136 kfree(adapter->shadow_vfta);
3137 igb_clear_interrupt_scheme(adapter);
3138#ifdef CONFIG_PCI_IOV
3139 igb_disable_sriov(pdev);
3140#endif
3141 pci_iounmap(pdev, adapter->io_addr);
3142err_ioremap:
3143 free_netdev(netdev);
3144err_alloc_etherdev:
3145 pci_release_mem_regions(pdev);
3146err_pci_reg:
3147err_dma:
3148 pci_disable_device(pdev);
3149 return err;
3150}
3151
3152#ifdef CONFIG_PCI_IOV
3153static int igb_disable_sriov(struct pci_dev *pdev)
3154{
3155 struct net_device *netdev = pci_get_drvdata(pdev);
3156 struct igb_adapter *adapter = netdev_priv(netdev);
3157 struct e1000_hw *hw = &adapter->hw;
3158
3159
3160 if (adapter->vf_data) {
3161
3162 if (pci_vfs_assigned(pdev)) {
3163 dev_warn(&pdev->dev,
3164 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3165 return -EPERM;
3166 } else {
3167 pci_disable_sriov(pdev);
3168 msleep(500);
3169 }
3170
3171 kfree(adapter->vf_mac_list);
3172 adapter->vf_mac_list = NULL;
3173 kfree(adapter->vf_data);
3174 adapter->vf_data = NULL;
3175 adapter->vfs_allocated_count = 0;
3176 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3177 wrfl();
3178 msleep(100);
3179 dev_info(&pdev->dev, "IOV Disabled\n");
3180
3181
3182 adapter->flags |= IGB_FLAG_DMAC;
3183 }
3184
3185 return 0;
3186}
3187
3188static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3189{
3190 struct net_device *netdev = pci_get_drvdata(pdev);
3191 struct igb_adapter *adapter = netdev_priv(netdev);
3192 int old_vfs = pci_num_vf(pdev);
3193 struct vf_mac_filter *mac_list;
3194 int err = 0;
3195 int num_vf_mac_filters, i;
3196
3197 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3198 err = -EPERM;
3199 goto out;
3200 }
3201 if (!num_vfs)
3202 goto out;
3203
3204 if (old_vfs) {
3205 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3206 old_vfs, max_vfs);
3207 adapter->vfs_allocated_count = old_vfs;
3208 } else
3209 adapter->vfs_allocated_count = num_vfs;
3210
3211 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3212 sizeof(struct vf_data_storage), GFP_KERNEL);
3213
3214
3215 if (!adapter->vf_data) {
3216 adapter->vfs_allocated_count = 0;
3217 err = -ENOMEM;
3218 goto out;
3219 }
3220
3221
3222
3223
3224
3225
3226 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3227 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3228 adapter->vfs_allocated_count);
3229
3230 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3231 sizeof(struct vf_mac_filter),
3232 GFP_KERNEL);
3233
3234 mac_list = adapter->vf_mac_list;
3235 INIT_LIST_HEAD(&adapter->vf_macs.l);
3236
3237 if (adapter->vf_mac_list) {
3238
3239 for (i = 0; i < num_vf_mac_filters; i++) {
3240 mac_list->vf = -1;
3241 mac_list->free = true;
3242 list_add(&mac_list->l, &adapter->vf_macs.l);
3243 mac_list++;
3244 }
3245 } else {
3246
3247
3248
3249 dev_err(&pdev->dev,
3250 "Unable to allocate memory for VF MAC filter list\n");
3251 }
3252
3253
3254 if (!old_vfs) {
3255 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3256 if (err)
3257 goto err_out;
3258 }
3259 dev_info(&pdev->dev, "%d VFs allocated\n",
3260 adapter->vfs_allocated_count);
3261 for (i = 0; i < adapter->vfs_allocated_count; i++)
3262 igb_vf_configure(adapter, i);
3263
3264
3265 adapter->flags &= ~IGB_FLAG_DMAC;
3266 goto out;
3267
3268err_out:
3269 kfree(adapter->vf_mac_list);
3270 adapter->vf_mac_list = NULL;
3271 kfree(adapter->vf_data);
3272 adapter->vf_data = NULL;
3273 adapter->vfs_allocated_count = 0;
3274out:
3275 return err;
3276}
3277
3278#endif
3279
3280
3281
3282
3283static void igb_remove_i2c(struct igb_adapter *adapter)
3284{
3285
3286 i2c_del_adapter(&adapter->i2c_adap);
3287}
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298static void igb_remove(struct pci_dev *pdev)
3299{
3300 struct net_device *netdev = pci_get_drvdata(pdev);
3301 struct igb_adapter *adapter = netdev_priv(netdev);
3302 struct e1000_hw *hw = &adapter->hw;
3303
3304 pm_runtime_get_noresume(&pdev->dev);
3305#ifdef CONFIG_IGB_HWMON
3306 igb_sysfs_exit(adapter);
3307#endif
3308 igb_remove_i2c(adapter);
3309 igb_ptp_stop(adapter);
3310
3311
3312
3313 set_bit(__IGB_DOWN, &adapter->state);
3314 del_timer_sync(&adapter->watchdog_timer);
3315 del_timer_sync(&adapter->phy_info_timer);
3316
3317 cancel_work_sync(&adapter->reset_task);
3318 cancel_work_sync(&adapter->watchdog_task);
3319
3320#ifdef CONFIG_IGB_DCA
3321 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3322 dev_info(&pdev->dev, "DCA disabled\n");
3323 dca_remove_requester(&pdev->dev);
3324 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3325 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3326 }
3327#endif
3328
3329
3330
3331
3332 igb_release_hw_control(adapter);
3333
3334#ifdef CONFIG_PCI_IOV
3335 igb_disable_sriov(pdev);
3336#endif
3337
3338 unregister_netdev(netdev);
3339
3340 igb_clear_interrupt_scheme(adapter);
3341
3342 pci_iounmap(pdev, adapter->io_addr);
3343 if (hw->flash_address)
3344 iounmap(hw->flash_address);
3345 pci_release_mem_regions(pdev);
3346
3347 kfree(adapter->mac_table);
3348 kfree(adapter->shadow_vfta);
3349 free_netdev(netdev);
3350
3351 pci_disable_pcie_error_reporting(pdev);
3352
3353 pci_disable_device(pdev);
3354}
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365static void igb_probe_vfs(struct igb_adapter *adapter)
3366{
3367#ifdef CONFIG_PCI_IOV
3368 struct pci_dev *pdev = adapter->pdev;
3369 struct e1000_hw *hw = &adapter->hw;
3370
3371
3372 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3373 return;
3374
3375
3376
3377
3378
3379 igb_set_interrupt_capability(adapter, true);
3380 igb_reset_interrupt_capability(adapter);
3381
3382 pci_sriov_set_totalvfs(pdev, 7);
3383 igb_enable_sriov(pdev, max_vfs);
3384
3385#endif
3386}
3387
3388unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3389{
3390 struct e1000_hw *hw = &adapter->hw;
3391 unsigned int max_rss_queues;
3392
3393
3394 switch (hw->mac.type) {
3395 case e1000_i211:
3396 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3397 break;
3398 case e1000_82575:
3399 case e1000_i210:
3400 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3401 break;
3402 case e1000_i350:
3403
3404 if (!!adapter->vfs_allocated_count) {
3405 max_rss_queues = 1;
3406 break;
3407 }
3408
3409 case e1000_82576:
3410 if (!!adapter->vfs_allocated_count) {
3411 max_rss_queues = 2;
3412 break;
3413 }
3414
3415 case e1000_82580:
3416 case e1000_i354:
3417 default:
3418 max_rss_queues = IGB_MAX_RX_QUEUES;
3419 break;
3420 }
3421
3422 return max_rss_queues;
3423}
3424
3425static void igb_init_queue_configuration(struct igb_adapter *adapter)
3426{
3427 u32 max_rss_queues;
3428
3429 max_rss_queues = igb_get_max_rss_queues(adapter);
3430 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3431
3432 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3433}
3434
3435void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3436 const u32 max_rss_queues)
3437{
3438 struct e1000_hw *hw = &adapter->hw;
3439
3440
3441 switch (hw->mac.type) {
3442 case e1000_82575:
3443 case e1000_i211:
3444
3445 break;
3446 case e1000_82576:
3447 case e1000_82580:
3448 case e1000_i350:
3449 case e1000_i354:
3450 case e1000_i210:
3451 default:
3452
3453
3454
3455 if (adapter->rss_queues > (max_rss_queues / 2))
3456 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3457 else
3458 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3459 break;
3460 }
3461}
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471static int igb_sw_init(struct igb_adapter *adapter)
3472{
3473 struct e1000_hw *hw = &adapter->hw;
3474 struct net_device *netdev = adapter->netdev;
3475 struct pci_dev *pdev = adapter->pdev;
3476
3477 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3478
3479
3480 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3481 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3482
3483
3484 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3485 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3486
3487
3488 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3489
3490 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3491 VLAN_HLEN;
3492 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3493
3494 spin_lock_init(&adapter->nfc_lock);
3495 spin_lock_init(&adapter->stats64_lock);
3496#ifdef CONFIG_PCI_IOV
3497 switch (hw->mac.type) {
3498 case e1000_82576:
3499 case e1000_i350:
3500 if (max_vfs > 7) {
3501 dev_warn(&pdev->dev,
3502 "Maximum of 7 VFs per PF, using max\n");
3503 max_vfs = adapter->vfs_allocated_count = 7;
3504 } else
3505 adapter->vfs_allocated_count = max_vfs;
3506 if (adapter->vfs_allocated_count)
3507 dev_warn(&pdev->dev,
3508 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3509 break;
3510 default:
3511 break;
3512 }
3513#endif
3514
3515
3516 adapter->flags |= IGB_FLAG_HAS_MSIX;
3517
3518 adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
3519 hw->mac.rar_entry_count, GFP_ATOMIC);
3520 if (!adapter->mac_table)
3521 return -ENOMEM;
3522
3523 igb_probe_vfs(adapter);
3524
3525 igb_init_queue_configuration(adapter);
3526
3527
3528 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3529 GFP_ATOMIC);
3530 if (!adapter->shadow_vfta)
3531 return -ENOMEM;
3532
3533
3534 if (igb_init_interrupt_scheme(adapter, true)) {
3535 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3536 return -ENOMEM;
3537 }
3538
3539
3540 igb_irq_disable(adapter);
3541
3542 if (hw->mac.type >= e1000_i350)
3543 adapter->flags &= ~IGB_FLAG_DMAC;
3544
3545 set_bit(__IGB_DOWN, &adapter->state);
3546 return 0;
3547}
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561static int __igb_open(struct net_device *netdev, bool resuming)
3562{
3563 struct igb_adapter *adapter = netdev_priv(netdev);
3564 struct e1000_hw *hw = &adapter->hw;
3565 struct pci_dev *pdev = adapter->pdev;
3566 int err;
3567 int i;
3568
3569
3570 if (test_bit(__IGB_TESTING, &adapter->state)) {
3571 WARN_ON(resuming);
3572 return -EBUSY;
3573 }
3574
3575 if (!resuming)
3576 pm_runtime_get_sync(&pdev->dev);
3577
3578 netif_carrier_off(netdev);
3579
3580
3581 err = igb_setup_all_tx_resources(adapter);
3582 if (err)
3583 goto err_setup_tx;
3584
3585
3586 err = igb_setup_all_rx_resources(adapter);
3587 if (err)
3588 goto err_setup_rx;
3589
3590 igb_power_up_link(adapter);
3591
3592
3593
3594
3595
3596
3597 igb_configure(adapter);
3598
3599 err = igb_request_irq(adapter);
3600 if (err)
3601 goto err_req_irq;
3602
3603
3604 err = netif_set_real_num_tx_queues(adapter->netdev,
3605 adapter->num_tx_queues);
3606 if (err)
3607 goto err_set_queues;
3608
3609 err = netif_set_real_num_rx_queues(adapter->netdev,
3610 adapter->num_rx_queues);
3611 if (err)
3612 goto err_set_queues;
3613
3614
3615 clear_bit(__IGB_DOWN, &adapter->state);
3616
3617 for (i = 0; i < adapter->num_q_vectors; i++)
3618 napi_enable(&(adapter->q_vector[i]->napi));
3619
3620
3621 rd32(E1000_ICR);
3622
3623 igb_irq_enable(adapter);
3624
3625
3626 if (adapter->vfs_allocated_count) {
3627 u32 reg_data = rd32(E1000_CTRL_EXT);
3628
3629 reg_data |= E1000_CTRL_EXT_PFRSTD;
3630 wr32(E1000_CTRL_EXT, reg_data);
3631 }
3632
3633 netif_tx_start_all_queues(netdev);
3634
3635 if (!resuming)
3636 pm_runtime_put(&pdev->dev);
3637
3638
3639 hw->mac.get_link_status = 1;
3640 schedule_work(&adapter->watchdog_task);
3641
3642 return 0;
3643
3644err_set_queues:
3645 igb_free_irq(adapter);
3646err_req_irq:
3647 igb_release_hw_control(adapter);
3648 igb_power_down_link(adapter);
3649 igb_free_all_rx_resources(adapter);
3650err_setup_rx:
3651 igb_free_all_tx_resources(adapter);
3652err_setup_tx:
3653 igb_reset(adapter);
3654 if (!resuming)
3655 pm_runtime_put(&pdev->dev);
3656
3657 return err;
3658}
3659
3660int igb_open(struct net_device *netdev)
3661{
3662 return __igb_open(netdev, false);
3663}
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676static int __igb_close(struct net_device *netdev, bool suspending)
3677{
3678 struct igb_adapter *adapter = netdev_priv(netdev);
3679 struct pci_dev *pdev = adapter->pdev;
3680
3681 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
3682
3683 if (!suspending)
3684 pm_runtime_get_sync(&pdev->dev);
3685
3686 igb_down(adapter);
3687 igb_free_irq(adapter);
3688
3689 igb_free_all_tx_resources(adapter);
3690 igb_free_all_rx_resources(adapter);
3691
3692 if (!suspending)
3693 pm_runtime_put_sync(&pdev->dev);
3694 return 0;
3695}
3696
3697int igb_close(struct net_device *netdev)
3698{
3699 if (netif_device_present(netdev) || netdev->dismantle)
3700 return __igb_close(netdev, false);
3701 return 0;
3702}
3703
3704
3705
3706
3707
3708
3709
3710int igb_setup_tx_resources(struct igb_ring *tx_ring)
3711{
3712 struct device *dev = tx_ring->dev;
3713 int size;
3714
3715 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3716
3717 tx_ring->tx_buffer_info = vmalloc(size);
3718 if (!tx_ring->tx_buffer_info)
3719 goto err;
3720
3721
3722 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
3723 tx_ring->size = ALIGN(tx_ring->size, 4096);
3724
3725 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
3726 &tx_ring->dma, GFP_KERNEL);
3727 if (!tx_ring->desc)
3728 goto err;
3729
3730 tx_ring->next_to_use = 0;
3731 tx_ring->next_to_clean = 0;
3732
3733 return 0;
3734
3735err:
3736 vfree(tx_ring->tx_buffer_info);
3737 tx_ring->tx_buffer_info = NULL;
3738 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
3739 return -ENOMEM;
3740}
3741
3742
3743
3744
3745
3746
3747
3748
3749static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
3750{
3751 struct pci_dev *pdev = adapter->pdev;
3752 int i, err = 0;
3753
3754 for (i = 0; i < adapter->num_tx_queues; i++) {
3755 err = igb_setup_tx_resources(adapter->tx_ring[i]);
3756 if (err) {
3757 dev_err(&pdev->dev,
3758 "Allocation for Tx Queue %u failed\n", i);
3759 for (i--; i >= 0; i--)
3760 igb_free_tx_resources(adapter->tx_ring[i]);
3761 break;
3762 }
3763 }
3764
3765 return err;
3766}
3767
3768
3769
3770
3771
3772void igb_setup_tctl(struct igb_adapter *adapter)
3773{
3774 struct e1000_hw *hw = &adapter->hw;
3775 u32 tctl;
3776
3777
3778 wr32(E1000_TXDCTL(0), 0);
3779
3780
3781 tctl = rd32(E1000_TCTL);
3782 tctl &= ~E1000_TCTL_CT;
3783 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
3784 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
3785
3786 igb_config_collision_dist(hw);
3787
3788
3789 tctl |= E1000_TCTL_EN;
3790
3791 wr32(E1000_TCTL, tctl);
3792}
3793
3794
3795
3796
3797
3798
3799
3800
3801void igb_configure_tx_ring(struct igb_adapter *adapter,
3802 struct igb_ring *ring)
3803{
3804 struct e1000_hw *hw = &adapter->hw;
3805 u32 txdctl = 0;
3806 u64 tdba = ring->dma;
3807 int reg_idx = ring->reg_idx;
3808
3809
3810 wr32(E1000_TXDCTL(reg_idx), 0);
3811 wrfl();
3812 mdelay(10);
3813
3814 wr32(E1000_TDLEN(reg_idx),
3815 ring->count * sizeof(union e1000_adv_tx_desc));
3816 wr32(E1000_TDBAL(reg_idx),
3817 tdba & 0x00000000ffffffffULL);
3818 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
3819
3820 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
3821 wr32(E1000_TDH(reg_idx), 0);
3822 writel(0, ring->tail);
3823
3824 txdctl |= IGB_TX_PTHRESH;
3825 txdctl |= IGB_TX_HTHRESH << 8;
3826 txdctl |= IGB_TX_WTHRESH << 16;
3827
3828
3829 memset(ring->tx_buffer_info, 0,
3830 sizeof(struct igb_tx_buffer) * ring->count);
3831
3832 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3833 wr32(E1000_TXDCTL(reg_idx), txdctl);
3834}
3835
3836
3837
3838
3839
3840
3841
3842static void igb_configure_tx(struct igb_adapter *adapter)
3843{
3844 int i;
3845
3846 for (i = 0; i < adapter->num_tx_queues; i++)
3847 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
3848}
3849
3850
3851
3852
3853
3854
3855
3856int igb_setup_rx_resources(struct igb_ring *rx_ring)
3857{
3858 struct device *dev = rx_ring->dev;
3859 int size;
3860
3861 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3862
3863 rx_ring->rx_buffer_info = vmalloc(size);
3864 if (!rx_ring->rx_buffer_info)
3865 goto err;
3866
3867
3868 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
3869 rx_ring->size = ALIGN(rx_ring->size, 4096);
3870
3871 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3872 &rx_ring->dma, GFP_KERNEL);
3873 if (!rx_ring->desc)
3874 goto err;
3875
3876 rx_ring->next_to_alloc = 0;
3877 rx_ring->next_to_clean = 0;
3878 rx_ring->next_to_use = 0;
3879
3880 return 0;
3881
3882err:
3883 vfree(rx_ring->rx_buffer_info);
3884 rx_ring->rx_buffer_info = NULL;
3885 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
3886 return -ENOMEM;
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3897{
3898 struct pci_dev *pdev = adapter->pdev;
3899 int i, err = 0;
3900
3901 for (i = 0; i < adapter->num_rx_queues; i++) {
3902 err = igb_setup_rx_resources(adapter->rx_ring[i]);
3903 if (err) {
3904 dev_err(&pdev->dev,
3905 "Allocation for Rx Queue %u failed\n", i);
3906 for (i--; i >= 0; i--)
3907 igb_free_rx_resources(adapter->rx_ring[i]);
3908 break;
3909 }
3910 }
3911
3912 return err;
3913}
3914
3915
3916
3917
3918
3919static void igb_setup_mrqc(struct igb_adapter *adapter)
3920{
3921 struct e1000_hw *hw = &adapter->hw;
3922 u32 mrqc, rxcsum;
3923 u32 j, num_rx_queues;
3924 u32 rss_key[10];
3925
3926 netdev_rss_key_fill(rss_key, sizeof(rss_key));
3927 for (j = 0; j < 10; j++)
3928 wr32(E1000_RSSRK(j), rss_key[j]);
3929
3930 num_rx_queues = adapter->rss_queues;
3931
3932 switch (hw->mac.type) {
3933 case e1000_82576:
3934
3935 if (adapter->vfs_allocated_count)
3936 num_rx_queues = 2;
3937 break;
3938 default:
3939 break;
3940 }
3941
3942 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3943 for (j = 0; j < IGB_RETA_SIZE; j++)
3944 adapter->rss_indir_tbl[j] =
3945 (j * num_rx_queues) / IGB_RETA_SIZE;
3946 adapter->rss_indir_tbl_init = num_rx_queues;
3947 }
3948 igb_write_rss_indir_tbl(adapter);
3949
3950
3951
3952
3953
3954 rxcsum = rd32(E1000_RXCSUM);
3955 rxcsum |= E1000_RXCSUM_PCSD;
3956
3957 if (adapter->hw.mac.type >= e1000_82576)
3958
3959 rxcsum |= E1000_RXCSUM_CRCOFL;
3960
3961
3962 wr32(E1000_RXCSUM, rxcsum);
3963
3964
3965
3966
3967 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3968 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3969 E1000_MRQC_RSS_FIELD_IPV6 |
3970 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3971 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
3972
3973 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3974 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3975 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3976 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3977
3978
3979
3980
3981
3982 if (adapter->vfs_allocated_count) {
3983 if (hw->mac.type > e1000_82575) {
3984
3985 u32 vtctl = rd32(E1000_VT_CTL);
3986
3987 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3988 E1000_VT_CTL_DISABLE_DEF_POOL);
3989 vtctl |= adapter->vfs_allocated_count <<
3990 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3991 wr32(E1000_VT_CTL, vtctl);
3992 }
3993 if (adapter->rss_queues > 1)
3994 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
3995 else
3996 mrqc |= E1000_MRQC_ENABLE_VMDQ;
3997 } else {
3998 if (hw->mac.type != e1000_i211)
3999 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4000 }
4001 igb_vmm_control(adapter);
4002
4003 wr32(E1000_MRQC, mrqc);
4004}
4005
4006
4007
4008
4009
4010void igb_setup_rctl(struct igb_adapter *adapter)
4011{
4012 struct e1000_hw *hw = &adapter->hw;
4013 u32 rctl;
4014
4015 rctl = rd32(E1000_RCTL);
4016
4017 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4018 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4019
4020 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4021 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4022
4023
4024
4025
4026
4027 rctl |= E1000_RCTL_SECRC;
4028
4029
4030 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4031
4032
4033 rctl |= E1000_RCTL_LPE;
4034
4035
4036 wr32(E1000_RXDCTL(0), 0);
4037
4038
4039
4040
4041
4042 if (adapter->vfs_allocated_count) {
4043
4044 wr32(E1000_QDE, ALL_QUEUES);
4045 }
4046
4047
4048 if (adapter->netdev->features & NETIF_F_RXALL) {
4049
4050
4051
4052 rctl |= (E1000_RCTL_SBP |
4053 E1000_RCTL_BAM |
4054 E1000_RCTL_PMCF);
4055
4056 rctl &= ~(E1000_RCTL_DPF |
4057 E1000_RCTL_CFIEN);
4058
4059
4060
4061 }
4062
4063 wr32(E1000_RCTL, rctl);
4064}
4065
4066static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4067 int vfn)
4068{
4069 struct e1000_hw *hw = &adapter->hw;
4070 u32 vmolr;
4071
4072 if (size > MAX_JUMBO_FRAME_SIZE)
4073 size = MAX_JUMBO_FRAME_SIZE;
4074
4075 vmolr = rd32(E1000_VMOLR(vfn));
4076 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4077 vmolr |= size | E1000_VMOLR_LPE;
4078 wr32(E1000_VMOLR(vfn), vmolr);
4079
4080 return 0;
4081}
4082
4083static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4084 int vfn, bool enable)
4085{
4086 struct e1000_hw *hw = &adapter->hw;
4087 u32 val, reg;
4088
4089 if (hw->mac.type < e1000_82576)
4090 return;
4091
4092 if (hw->mac.type == e1000_i350)
4093 reg = E1000_DVMOLR(vfn);
4094 else
4095 reg = E1000_VMOLR(vfn);
4096
4097 val = rd32(reg);
4098 if (enable)
4099 val |= E1000_VMOLR_STRVLAN;
4100 else
4101 val &= ~(E1000_VMOLR_STRVLAN);
4102 wr32(reg, val);
4103}
4104
4105static inline void igb_set_vmolr(struct igb_adapter *adapter,
4106 int vfn, bool aupe)
4107{
4108 struct e1000_hw *hw = &adapter->hw;
4109 u32 vmolr;
4110
4111
4112
4113
4114 if (hw->mac.type < e1000_82576)
4115 return;
4116
4117 vmolr = rd32(E1000_VMOLR(vfn));
4118 if (aupe)
4119 vmolr |= E1000_VMOLR_AUPE;
4120 else
4121 vmolr &= ~(E1000_VMOLR_AUPE);
4122
4123
4124 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4125
4126 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4127 vmolr |= E1000_VMOLR_RSSE;
4128
4129
4130
4131 if (vfn <= adapter->vfs_allocated_count)
4132 vmolr |= E1000_VMOLR_BAM;
4133
4134 wr32(E1000_VMOLR(vfn), vmolr);
4135}
4136
4137
4138
4139
4140
4141
4142
4143
4144void igb_configure_rx_ring(struct igb_adapter *adapter,
4145 struct igb_ring *ring)
4146{
4147 struct e1000_hw *hw = &adapter->hw;
4148 union e1000_adv_rx_desc *rx_desc;
4149 u64 rdba = ring->dma;
4150 int reg_idx = ring->reg_idx;
4151 u32 srrctl = 0, rxdctl = 0;
4152
4153
4154 wr32(E1000_RXDCTL(reg_idx), 0);
4155
4156
4157 wr32(E1000_RDBAL(reg_idx),
4158 rdba & 0x00000000ffffffffULL);
4159 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4160 wr32(E1000_RDLEN(reg_idx),
4161 ring->count * sizeof(union e1000_adv_rx_desc));
4162
4163
4164 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4165 wr32(E1000_RDH(reg_idx), 0);
4166 writel(0, ring->tail);
4167
4168
4169 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4170 if (ring_uses_large_buffer(ring))
4171 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4172 else
4173 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4174 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4175 if (hw->mac.type >= e1000_82580)
4176 srrctl |= E1000_SRRCTL_TIMESTAMP;
4177
4178 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4179 srrctl |= E1000_SRRCTL_DROP_EN;
4180
4181 wr32(E1000_SRRCTL(reg_idx), srrctl);
4182
4183
4184 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4185
4186 rxdctl |= IGB_RX_PTHRESH;
4187 rxdctl |= IGB_RX_HTHRESH << 8;
4188 rxdctl |= IGB_RX_WTHRESH << 16;
4189
4190
4191 memset(ring->rx_buffer_info, 0,
4192 sizeof(struct igb_rx_buffer) * ring->count);
4193
4194
4195 rx_desc = IGB_RX_DESC(ring, 0);
4196 rx_desc->wb.upper.length = 0;
4197
4198
4199 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4200 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4201}
4202
4203static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4204 struct igb_ring *rx_ring)
4205{
4206
4207 clear_ring_build_skb_enabled(rx_ring);
4208 clear_ring_uses_large_buffer(rx_ring);
4209
4210 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4211 return;
4212
4213 set_ring_build_skb_enabled(rx_ring);
4214
4215#if (PAGE_SIZE < 8192)
4216 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4217 return;
4218
4219 set_ring_uses_large_buffer(rx_ring);
4220#endif
4221}
4222
4223
4224
4225
4226
4227
4228
4229static void igb_configure_rx(struct igb_adapter *adapter)
4230{
4231 int i;
4232
4233
4234 igb_set_default_mac_filter(adapter);
4235
4236
4237
4238
4239 for (i = 0; i < adapter->num_rx_queues; i++) {
4240 struct igb_ring *rx_ring = adapter->rx_ring[i];
4241
4242 igb_set_rx_buffer_len(adapter, rx_ring);
4243 igb_configure_rx_ring(adapter, rx_ring);
4244 }
4245}
4246
4247
4248
4249
4250
4251
4252
4253void igb_free_tx_resources(struct igb_ring *tx_ring)
4254{
4255 igb_clean_tx_ring(tx_ring);
4256
4257 vfree(tx_ring->tx_buffer_info);
4258 tx_ring->tx_buffer_info = NULL;
4259
4260
4261 if (!tx_ring->desc)
4262 return;
4263
4264 dma_free_coherent(tx_ring->dev, tx_ring->size,
4265 tx_ring->desc, tx_ring->dma);
4266
4267 tx_ring->desc = NULL;
4268}
4269
4270
4271
4272
4273
4274
4275
4276static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4277{
4278 int i;
4279
4280 for (i = 0; i < adapter->num_tx_queues; i++)
4281 if (adapter->tx_ring[i])
4282 igb_free_tx_resources(adapter->tx_ring[i]);
4283}
4284
4285
4286
4287
4288
4289static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4290{
4291 u16 i = tx_ring->next_to_clean;
4292 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4293
4294 while (i != tx_ring->next_to_use) {
4295 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4296
4297
4298 dev_kfree_skb_any(tx_buffer->skb);
4299
4300
4301 dma_unmap_single(tx_ring->dev,
4302 dma_unmap_addr(tx_buffer, dma),
4303 dma_unmap_len(tx_buffer, len),
4304 DMA_TO_DEVICE);
4305
4306
4307 eop_desc = tx_buffer->next_to_watch;
4308 tx_desc = IGB_TX_DESC(tx_ring, i);
4309
4310
4311 while (tx_desc != eop_desc) {
4312 tx_buffer++;
4313 tx_desc++;
4314 i++;
4315 if (unlikely(i == tx_ring->count)) {
4316 i = 0;
4317 tx_buffer = tx_ring->tx_buffer_info;
4318 tx_desc = IGB_TX_DESC(tx_ring, 0);
4319 }
4320
4321
4322 if (dma_unmap_len(tx_buffer, len))
4323 dma_unmap_page(tx_ring->dev,
4324 dma_unmap_addr(tx_buffer, dma),
4325 dma_unmap_len(tx_buffer, len),
4326 DMA_TO_DEVICE);
4327 }
4328
4329
4330 tx_buffer++;
4331 i++;
4332 if (unlikely(i == tx_ring->count)) {
4333 i = 0;
4334 tx_buffer = tx_ring->tx_buffer_info;
4335 }
4336 }
4337
4338
4339 netdev_tx_reset_queue(txring_txq(tx_ring));
4340
4341
4342 tx_ring->next_to_use = 0;
4343 tx_ring->next_to_clean = 0;
4344}
4345
4346
4347
4348
4349
4350static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4351{
4352 int i;
4353
4354 for (i = 0; i < adapter->num_tx_queues; i++)
4355 if (adapter->tx_ring[i])
4356 igb_clean_tx_ring(adapter->tx_ring[i]);
4357}
4358
4359
4360
4361
4362
4363
4364
4365void igb_free_rx_resources(struct igb_ring *rx_ring)
4366{
4367 igb_clean_rx_ring(rx_ring);
4368
4369 vfree(rx_ring->rx_buffer_info);
4370 rx_ring->rx_buffer_info = NULL;
4371
4372
4373 if (!rx_ring->desc)
4374 return;
4375
4376 dma_free_coherent(rx_ring->dev, rx_ring->size,
4377 rx_ring->desc, rx_ring->dma);
4378
4379 rx_ring->desc = NULL;
4380}
4381
4382
4383
4384
4385
4386
4387
4388static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4389{
4390 int i;
4391
4392 for (i = 0; i < adapter->num_rx_queues; i++)
4393 if (adapter->rx_ring[i])
4394 igb_free_rx_resources(adapter->rx_ring[i]);
4395}
4396
4397
4398
4399
4400
4401static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4402{
4403 u16 i = rx_ring->next_to_clean;
4404
4405 if (rx_ring->skb)
4406 dev_kfree_skb(rx_ring->skb);
4407 rx_ring->skb = NULL;
4408
4409
4410 while (i != rx_ring->next_to_alloc) {
4411 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4412
4413
4414
4415
4416 dma_sync_single_range_for_cpu(rx_ring->dev,
4417 buffer_info->dma,
4418 buffer_info->page_offset,
4419 igb_rx_bufsz(rx_ring),
4420 DMA_FROM_DEVICE);
4421
4422
4423 dma_unmap_page_attrs(rx_ring->dev,
4424 buffer_info->dma,
4425 igb_rx_pg_size(rx_ring),
4426 DMA_FROM_DEVICE,
4427 IGB_RX_DMA_ATTR);
4428 __page_frag_cache_drain(buffer_info->page,
4429 buffer_info->pagecnt_bias);
4430
4431 i++;
4432 if (i == rx_ring->count)
4433 i = 0;
4434 }
4435
4436 rx_ring->next_to_alloc = 0;
4437 rx_ring->next_to_clean = 0;
4438 rx_ring->next_to_use = 0;
4439}
4440
4441
4442
4443
4444
4445static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4446{
4447 int i;
4448
4449 for (i = 0; i < adapter->num_rx_queues; i++)
4450 if (adapter->rx_ring[i])
4451 igb_clean_rx_ring(adapter->rx_ring[i]);
4452}
4453
4454
4455
4456
4457
4458
4459
4460
4461static int igb_set_mac(struct net_device *netdev, void *p)
4462{
4463 struct igb_adapter *adapter = netdev_priv(netdev);
4464 struct e1000_hw *hw = &adapter->hw;
4465 struct sockaddr *addr = p;
4466
4467 if (!is_valid_ether_addr(addr->sa_data))
4468 return -EADDRNOTAVAIL;
4469
4470 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4471 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4472
4473
4474 igb_set_default_mac_filter(adapter);
4475
4476 return 0;
4477}
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488static int igb_write_mc_addr_list(struct net_device *netdev)
4489{
4490 struct igb_adapter *adapter = netdev_priv(netdev);
4491 struct e1000_hw *hw = &adapter->hw;
4492 struct netdev_hw_addr *ha;
4493 u8 *mta_list;
4494 int i;
4495
4496 if (netdev_mc_empty(netdev)) {
4497
4498 igb_update_mc_addr_list(hw, NULL, 0);
4499 igb_restore_vf_multicasts(adapter);
4500 return 0;
4501 }
4502
4503 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
4504 if (!mta_list)
4505 return -ENOMEM;
4506
4507
4508 i = 0;
4509 netdev_for_each_mc_addr(ha, netdev)
4510 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4511
4512 igb_update_mc_addr_list(hw, mta_list, i);
4513 kfree(mta_list);
4514
4515 return netdev_mc_count(netdev);
4516}
4517
4518static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4519{
4520 struct e1000_hw *hw = &adapter->hw;
4521 u32 i, pf_id;
4522
4523 switch (hw->mac.type) {
4524 case e1000_i210:
4525 case e1000_i211:
4526 case e1000_i350:
4527
4528 if (adapter->netdev->features & NETIF_F_NTUPLE)
4529 break;
4530
4531 case e1000_82576:
4532 case e1000_82580:
4533 case e1000_i354:
4534
4535 if (adapter->vfs_allocated_count)
4536 break;
4537
4538 default:
4539 return 1;
4540 }
4541
4542
4543 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4544 return 0;
4545
4546 if (!adapter->vfs_allocated_count)
4547 goto set_vfta;
4548
4549
4550 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4551
4552 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4553 u32 vlvf = rd32(E1000_VLVF(i));
4554
4555 vlvf |= BIT(pf_id);
4556 wr32(E1000_VLVF(i), vlvf);
4557 }
4558
4559set_vfta:
4560
4561 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4562 hw->mac.ops.write_vfta(hw, i, ~0U);
4563
4564
4565 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4566
4567 return 0;
4568}
4569
4570#define VFTA_BLOCK_SIZE 8
4571static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4572{
4573 struct e1000_hw *hw = &adapter->hw;
4574 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4575 u32 vid_start = vfta_offset * 32;
4576 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4577 u32 i, vid, word, bits, pf_id;
4578
4579
4580 vid = adapter->mng_vlan_id;
4581 if (vid >= vid_start && vid < vid_end)
4582 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4583
4584 if (!adapter->vfs_allocated_count)
4585 goto set_vfta;
4586
4587 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4588
4589 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4590 u32 vlvf = rd32(E1000_VLVF(i));
4591
4592
4593 vid = vlvf & VLAN_VID_MASK;
4594
4595
4596 if (vid < vid_start || vid >= vid_end)
4597 continue;
4598
4599 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4600
4601 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4602
4603
4604 if (test_bit(vid, adapter->active_vlans))
4605 continue;
4606 }
4607
4608
4609 bits = ~BIT(pf_id);
4610 bits &= rd32(E1000_VLVF(i));
4611 wr32(E1000_VLVF(i), bits);
4612 }
4613
4614set_vfta:
4615
4616 for (i = VFTA_BLOCK_SIZE; i--;) {
4617 vid = (vfta_offset + i) * 32;
4618 word = vid / BITS_PER_LONG;
4619 bits = vid % BITS_PER_LONG;
4620
4621 vfta[i] |= adapter->active_vlans[word] >> bits;
4622
4623 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4624 }
4625}
4626
4627static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4628{
4629 u32 i;
4630
4631
4632 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4633 return;
4634
4635
4636 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4637
4638 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4639 igb_scrub_vfta(adapter, i);
4640}
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651static void igb_set_rx_mode(struct net_device *netdev)
4652{
4653 struct igb_adapter *adapter = netdev_priv(netdev);
4654 struct e1000_hw *hw = &adapter->hw;
4655 unsigned int vfn = adapter->vfs_allocated_count;
4656 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4657 int count;
4658
4659
4660 if (netdev->flags & IFF_PROMISC) {
4661 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
4662 vmolr |= E1000_VMOLR_MPME;
4663
4664
4665 if (hw->mac.type == e1000_82576)
4666 vmolr |= E1000_VMOLR_ROPE;
4667 } else {
4668 if (netdev->flags & IFF_ALLMULTI) {
4669 rctl |= E1000_RCTL_MPE;
4670 vmolr |= E1000_VMOLR_MPME;
4671 } else {
4672
4673
4674
4675
4676 count = igb_write_mc_addr_list(netdev);
4677 if (count < 0) {
4678 rctl |= E1000_RCTL_MPE;
4679 vmolr |= E1000_VMOLR_MPME;
4680 } else if (count) {
4681 vmolr |= E1000_VMOLR_ROMPE;
4682 }
4683 }
4684 }
4685
4686
4687
4688
4689
4690 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
4691 rctl |= E1000_RCTL_UPE;
4692 vmolr |= E1000_VMOLR_ROPE;
4693 }
4694
4695
4696 rctl |= E1000_RCTL_VFE;
4697
4698
4699 if ((netdev->flags & IFF_PROMISC) ||
4700 (netdev->features & NETIF_F_RXALL)) {
4701
4702 if (igb_vlan_promisc_enable(adapter))
4703 rctl &= ~E1000_RCTL_VFE;
4704 } else {
4705 igb_vlan_promisc_disable(adapter);
4706 }
4707
4708
4709 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
4710 E1000_RCTL_VFE);
4711 wr32(E1000_RCTL, rctl);
4712
4713#if (PAGE_SIZE < 8192)
4714 if (!adapter->vfs_allocated_count) {
4715 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4716 rlpml = IGB_MAX_FRAME_BUILD_SKB;
4717 }
4718#endif
4719 wr32(E1000_RLPML, rlpml);
4720
4721
4722
4723
4724
4725
4726 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
4727 return;
4728
4729
4730 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
4731
4732 vmolr |= rd32(E1000_VMOLR(vfn)) &
4733 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
4734
4735
4736 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4737#if (PAGE_SIZE < 8192)
4738 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4739 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
4740 else
4741#endif
4742 vmolr |= MAX_JUMBO_FRAME_SIZE;
4743 vmolr |= E1000_VMOLR_LPE;
4744
4745 wr32(E1000_VMOLR(vfn), vmolr);
4746
4747 igb_restore_vf_multicasts(adapter);
4748}
4749
4750static void igb_check_wvbr(struct igb_adapter *adapter)
4751{
4752 struct e1000_hw *hw = &adapter->hw;
4753 u32 wvbr = 0;
4754
4755 switch (hw->mac.type) {
4756 case e1000_82576:
4757 case e1000_i350:
4758 wvbr = rd32(E1000_WVBR);
4759 if (!wvbr)
4760 return;
4761 break;
4762 default:
4763 break;
4764 }
4765
4766 adapter->wvbr |= wvbr;
4767}
4768
4769#define IGB_STAGGERED_QUEUE_OFFSET 8
4770
4771static void igb_spoof_check(struct igb_adapter *adapter)
4772{
4773 int j;
4774
4775 if (!adapter->wvbr)
4776 return;
4777
4778 for (j = 0; j < adapter->vfs_allocated_count; j++) {
4779 if (adapter->wvbr & BIT(j) ||
4780 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
4781 dev_warn(&adapter->pdev->dev,
4782 "Spoof event(s) detected on VF %d\n", j);
4783 adapter->wvbr &=
4784 ~(BIT(j) |
4785 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
4786 }
4787 }
4788}
4789
4790
4791
4792
4793static void igb_update_phy_info(struct timer_list *t)
4794{
4795 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4796 igb_get_phy_info(&adapter->hw);
4797}
4798
4799
4800
4801
4802
4803bool igb_has_link(struct igb_adapter *adapter)
4804{
4805 struct e1000_hw *hw = &adapter->hw;
4806 bool link_active = false;
4807
4808
4809
4810
4811
4812
4813 switch (hw->phy.media_type) {
4814 case e1000_media_type_copper:
4815 if (!hw->mac.get_link_status)
4816 return true;
4817 case e1000_media_type_internal_serdes:
4818 hw->mac.ops.check_for_link(hw);
4819 link_active = !hw->mac.get_link_status;
4820 break;
4821 default:
4822 case e1000_media_type_unknown:
4823 break;
4824 }
4825
4826 if (((hw->mac.type == e1000_i210) ||
4827 (hw->mac.type == e1000_i211)) &&
4828 (hw->phy.id == I210_I_PHY_ID)) {
4829 if (!netif_carrier_ok(adapter->netdev)) {
4830 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4831 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
4832 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
4833 adapter->link_check_timeout = jiffies;
4834 }
4835 }
4836
4837 return link_active;
4838}
4839
4840static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
4841{
4842 bool ret = false;
4843 u32 ctrl_ext, thstat;
4844
4845
4846 if (hw->mac.type == e1000_i350) {
4847 thstat = rd32(E1000_THSTAT);
4848 ctrl_ext = rd32(E1000_CTRL_EXT);
4849
4850 if ((hw->phy.media_type == e1000_media_type_copper) &&
4851 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
4852 ret = !!(thstat & event);
4853 }
4854
4855 return ret;
4856}
4857
4858
4859
4860
4861
4862
4863static void igb_check_lvmmc(struct igb_adapter *adapter)
4864{
4865 struct e1000_hw *hw = &adapter->hw;
4866 u32 lvmmc;
4867
4868 lvmmc = rd32(E1000_LVMMC);
4869 if (lvmmc) {
4870 if (unlikely(net_ratelimit())) {
4871 netdev_warn(adapter->netdev,
4872 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
4873 lvmmc);
4874 }
4875 }
4876}
4877
4878
4879
4880
4881
4882static void igb_watchdog(struct timer_list *t)
4883{
4884 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4885
4886 schedule_work(&adapter->watchdog_task);
4887}
4888
4889static void igb_watchdog_task(struct work_struct *work)
4890{
4891 struct igb_adapter *adapter = container_of(work,
4892 struct igb_adapter,
4893 watchdog_task);
4894 struct e1000_hw *hw = &adapter->hw;
4895 struct e1000_phy_info *phy = &hw->phy;
4896 struct net_device *netdev = adapter->netdev;
4897 u32 link;
4898 int i;
4899 u32 connsw;
4900 u16 phy_data, retry_count = 20;
4901
4902 link = igb_has_link(adapter);
4903
4904 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
4905 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4906 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4907 else
4908 link = false;
4909 }
4910
4911
4912 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
4913 if (hw->phy.media_type == e1000_media_type_copper) {
4914 connsw = rd32(E1000_CONNSW);
4915 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
4916 link = 0;
4917 }
4918 }
4919 if (link) {
4920
4921 if (hw->dev_spec._82575.media_changed) {
4922 hw->dev_spec._82575.media_changed = false;
4923 adapter->flags |= IGB_FLAG_MEDIA_RESET;
4924 igb_reset(adapter);
4925 }
4926
4927 pm_runtime_resume(netdev->dev.parent);
4928
4929 if (!netif_carrier_ok(netdev)) {
4930 u32 ctrl;
4931
4932 hw->mac.ops.get_speed_and_duplex(hw,
4933 &adapter->link_speed,
4934 &adapter->link_duplex);
4935
4936 ctrl = rd32(E1000_CTRL);
4937
4938 netdev_info(netdev,
4939 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4940 netdev->name,
4941 adapter->link_speed,
4942 adapter->link_duplex == FULL_DUPLEX ?
4943 "Full" : "Half",
4944 (ctrl & E1000_CTRL_TFCE) &&
4945 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
4946 (ctrl & E1000_CTRL_RFCE) ? "RX" :
4947 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
4948
4949
4950 if ((adapter->flags & IGB_FLAG_EEE) &&
4951 (adapter->link_duplex == HALF_DUPLEX)) {
4952 dev_info(&adapter->pdev->dev,
4953 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
4954 adapter->hw.dev_spec._82575.eee_disable = true;
4955 adapter->flags &= ~IGB_FLAG_EEE;
4956 }
4957
4958
4959 igb_check_downshift(hw);
4960 if (phy->speed_downgraded)
4961 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4962
4963
4964 if (igb_thermal_sensor_event(hw,
4965 E1000_THSTAT_LINK_THROTTLE))
4966 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
4967
4968
4969 adapter->tx_timeout_factor = 1;
4970 switch (adapter->link_speed) {
4971 case SPEED_10:
4972 adapter->tx_timeout_factor = 14;
4973 break;
4974 case SPEED_100:
4975
4976 break;
4977 }
4978
4979 if (adapter->link_speed != SPEED_1000)
4980 goto no_wait;
4981
4982
4983retry_read_status:
4984 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
4985 &phy_data)) {
4986 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4987 retry_count) {
4988 msleep(100);
4989 retry_count--;
4990 goto retry_read_status;
4991 } else if (!retry_count) {
4992 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
4993 }
4994 } else {
4995 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
4996 }
4997no_wait:
4998 netif_carrier_on(netdev);
4999
5000 igb_ping_all_vfs(adapter);
5001 igb_check_vf_rate_limit(adapter);
5002
5003
5004 if (!test_bit(__IGB_DOWN, &adapter->state))
5005 mod_timer(&adapter->phy_info_timer,
5006 round_jiffies(jiffies + 2 * HZ));
5007 }
5008 } else {
5009 if (netif_carrier_ok(netdev)) {
5010 adapter->link_speed = 0;
5011 adapter->link_duplex = 0;
5012
5013
5014 if (igb_thermal_sensor_event(hw,
5015 E1000_THSTAT_PWR_DOWN)) {
5016 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5017 }
5018
5019
5020 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5021 netdev->name);
5022 netif_carrier_off(netdev);
5023
5024 igb_ping_all_vfs(adapter);
5025
5026
5027 if (!test_bit(__IGB_DOWN, &adapter->state))
5028 mod_timer(&adapter->phy_info_timer,
5029 round_jiffies(jiffies + 2 * HZ));
5030
5031
5032 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5033 igb_check_swap_media(adapter);
5034 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5035 schedule_work(&adapter->reset_task);
5036
5037 return;
5038 }
5039 }
5040 pm_schedule_suspend(netdev->dev.parent,
5041 MSEC_PER_SEC * 5);
5042
5043
5044 } else if (!netif_carrier_ok(netdev) &&
5045 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5046 igb_check_swap_media(adapter);
5047 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5048 schedule_work(&adapter->reset_task);
5049
5050 return;
5051 }
5052 }
5053 }
5054
5055 spin_lock(&adapter->stats64_lock);
5056 igb_update_stats(adapter);
5057 spin_unlock(&adapter->stats64_lock);
5058
5059 for (i = 0; i < adapter->num_tx_queues; i++) {
5060 struct igb_ring *tx_ring = adapter->tx_ring[i];
5061 if (!netif_carrier_ok(netdev)) {
5062
5063
5064
5065
5066
5067 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5068 adapter->tx_timeout_count++;
5069 schedule_work(&adapter->reset_task);
5070
5071 return;
5072 }
5073 }
5074
5075
5076 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5077 }
5078
5079
5080 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5081 u32 eics = 0;
5082
5083 for (i = 0; i < adapter->num_q_vectors; i++)
5084 eics |= adapter->q_vector[i]->eims_value;
5085 wr32(E1000_EICS, eics);
5086 } else {
5087 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5088 }
5089
5090 igb_spoof_check(adapter);
5091 igb_ptp_rx_hang(adapter);
5092 igb_ptp_tx_hang(adapter);
5093
5094
5095 if ((adapter->hw.mac.type == e1000_i350) ||
5096 (adapter->hw.mac.type == e1000_i354))
5097 igb_check_lvmmc(adapter);
5098
5099
5100 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5101 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5102 mod_timer(&adapter->watchdog_timer,
5103 round_jiffies(jiffies + HZ));
5104 else
5105 mod_timer(&adapter->watchdog_timer,
5106 round_jiffies(jiffies + 2 * HZ));
5107 }
5108}
5109
5110enum latency_range {
5111 lowest_latency = 0,
5112 low_latency = 1,
5113 bulk_latency = 2,
5114 latency_invalid = 255
5115};
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5133{
5134 int new_val = q_vector->itr_val;
5135 int avg_wire_size = 0;
5136 struct igb_adapter *adapter = q_vector->adapter;
5137 unsigned int packets;
5138
5139
5140
5141
5142 if (adapter->link_speed != SPEED_1000) {
5143 new_val = IGB_4K_ITR;
5144 goto set_itr_val;
5145 }
5146
5147 packets = q_vector->rx.total_packets;
5148 if (packets)
5149 avg_wire_size = q_vector->rx.total_bytes / packets;
5150
5151 packets = q_vector->tx.total_packets;
5152 if (packets)
5153 avg_wire_size = max_t(u32, avg_wire_size,
5154 q_vector->tx.total_bytes / packets);
5155
5156
5157 if (!avg_wire_size)
5158 goto clear_counts;
5159
5160
5161 avg_wire_size += 24;
5162
5163
5164 avg_wire_size = min(avg_wire_size, 3000);
5165
5166
5167 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5168 new_val = avg_wire_size / 3;
5169 else
5170 new_val = avg_wire_size / 2;
5171
5172
5173 if (new_val < IGB_20K_ITR &&
5174 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5175 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5176 new_val = IGB_20K_ITR;
5177
5178set_itr_val:
5179 if (new_val != q_vector->itr_val) {
5180 q_vector->itr_val = new_val;
5181 q_vector->set_itr = 1;
5182 }
5183clear_counts:
5184 q_vector->rx.total_bytes = 0;
5185 q_vector->rx.total_packets = 0;
5186 q_vector->tx.total_bytes = 0;
5187 q_vector->tx.total_packets = 0;
5188}
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206static void igb_update_itr(struct igb_q_vector *q_vector,
5207 struct igb_ring_container *ring_container)
5208{
5209 unsigned int packets = ring_container->total_packets;
5210 unsigned int bytes = ring_container->total_bytes;
5211 u8 itrval = ring_container->itr;
5212
5213
5214 if (packets == 0)
5215 return;
5216
5217 switch (itrval) {
5218 case lowest_latency:
5219
5220 if (bytes/packets > 8000)
5221 itrval = bulk_latency;
5222 else if ((packets < 5) && (bytes > 512))
5223 itrval = low_latency;
5224 break;
5225 case low_latency:
5226 if (bytes > 10000) {
5227
5228 if (bytes/packets > 8000)
5229 itrval = bulk_latency;
5230 else if ((packets < 10) || ((bytes/packets) > 1200))
5231 itrval = bulk_latency;
5232 else if ((packets > 35))
5233 itrval = lowest_latency;
5234 } else if (bytes/packets > 2000) {
5235 itrval = bulk_latency;
5236 } else if (packets <= 2 && bytes < 512) {
5237 itrval = lowest_latency;
5238 }
5239 break;
5240 case bulk_latency:
5241 if (bytes > 25000) {
5242 if (packets > 35)
5243 itrval = low_latency;
5244 } else if (bytes < 1500) {
5245 itrval = low_latency;
5246 }
5247 break;
5248 }
5249
5250
5251 ring_container->total_bytes = 0;
5252 ring_container->total_packets = 0;
5253
5254
5255 ring_container->itr = itrval;
5256}
5257
5258static void igb_set_itr(struct igb_q_vector *q_vector)
5259{
5260 struct igb_adapter *adapter = q_vector->adapter;
5261 u32 new_itr = q_vector->itr_val;
5262 u8 current_itr = 0;
5263
5264
5265 if (adapter->link_speed != SPEED_1000) {
5266 current_itr = 0;
5267 new_itr = IGB_4K_ITR;
5268 goto set_itr_now;
5269 }
5270
5271 igb_update_itr(q_vector, &q_vector->tx);
5272 igb_update_itr(q_vector, &q_vector->rx);
5273
5274 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5275
5276
5277 if (current_itr == lowest_latency &&
5278 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5279 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5280 current_itr = low_latency;
5281
5282 switch (current_itr) {
5283
5284 case lowest_latency:
5285 new_itr = IGB_70K_ITR;
5286 break;
5287 case low_latency:
5288 new_itr = IGB_20K_ITR;
5289 break;
5290 case bulk_latency:
5291 new_itr = IGB_4K_ITR;
5292 break;
5293 default:
5294 break;
5295 }
5296
5297set_itr_now:
5298 if (new_itr != q_vector->itr_val) {
5299
5300
5301
5302
5303 new_itr = new_itr > q_vector->itr_val ?
5304 max((new_itr * q_vector->itr_val) /
5305 (new_itr + (q_vector->itr_val >> 2)),
5306 new_itr) : new_itr;
5307
5308
5309
5310
5311
5312
5313 q_vector->itr_val = new_itr;
5314 q_vector->set_itr = 1;
5315 }
5316}
5317
5318static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
5319 u32 type_tucmd, u32 mss_l4len_idx)
5320{
5321 struct e1000_adv_tx_context_desc *context_desc;
5322 u16 i = tx_ring->next_to_use;
5323
5324 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5325
5326 i++;
5327 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5328
5329
5330 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5331
5332
5333 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5334 mss_l4len_idx |= tx_ring->reg_idx << 4;
5335
5336 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5337 context_desc->seqnum_seed = 0;
5338 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5339 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5340}
5341
5342static int igb_tso(struct igb_ring *tx_ring,
5343 struct igb_tx_buffer *first,
5344 u8 *hdr_len)
5345{
5346 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5347 struct sk_buff *skb = first->skb;
5348 union {
5349 struct iphdr *v4;
5350 struct ipv6hdr *v6;
5351 unsigned char *hdr;
5352 } ip;
5353 union {
5354 struct tcphdr *tcp;
5355 unsigned char *hdr;
5356 } l4;
5357 u32 paylen, l4_offset;
5358 int err;
5359
5360 if (skb->ip_summed != CHECKSUM_PARTIAL)
5361 return 0;
5362
5363 if (!skb_is_gso(skb))
5364 return 0;
5365
5366 err = skb_cow_head(skb, 0);
5367 if (err < 0)
5368 return err;
5369
5370 ip.hdr = skb_network_header(skb);
5371 l4.hdr = skb_checksum_start(skb);
5372
5373
5374 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5375
5376
5377 if (ip.v4->version == 4) {
5378 unsigned char *csum_start = skb_checksum_start(skb);
5379 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5380
5381
5382
5383
5384 ip.v4->check = csum_fold(csum_partial(trans_start,
5385 csum_start - trans_start,
5386 0));
5387 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5388
5389 ip.v4->tot_len = 0;
5390 first->tx_flags |= IGB_TX_FLAGS_TSO |
5391 IGB_TX_FLAGS_CSUM |
5392 IGB_TX_FLAGS_IPV4;
5393 } else {
5394 ip.v6->payload_len = 0;
5395 first->tx_flags |= IGB_TX_FLAGS_TSO |
5396 IGB_TX_FLAGS_CSUM;
5397 }
5398
5399
5400 l4_offset = l4.hdr - skb->data;
5401
5402
5403 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5404
5405
5406 paylen = skb->len - l4_offset;
5407 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
5408
5409
5410 first->gso_segs = skb_shinfo(skb)->gso_segs;
5411 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5412
5413
5414 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5415 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5416
5417
5418 vlan_macip_lens = l4.hdr - ip.hdr;
5419 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5420 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5421
5422 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
5423
5424 return 1;
5425}
5426
5427static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5428{
5429 unsigned int offset = 0;
5430
5431 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5432
5433 return offset == skb_checksum_start_offset(skb);
5434}
5435
5436static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5437{
5438 struct sk_buff *skb = first->skb;
5439 u32 vlan_macip_lens = 0;
5440 u32 type_tucmd = 0;
5441
5442 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5443csum_failed:
5444 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
5445 return;
5446 goto no_csum;
5447 }
5448
5449 switch (skb->csum_offset) {
5450 case offsetof(struct tcphdr, check):
5451 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5452
5453 case offsetof(struct udphdr, check):
5454 break;
5455 case offsetof(struct sctphdr, checksum):
5456
5457 if (((first->protocol == htons(ETH_P_IP)) &&
5458 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5459 ((first->protocol == htons(ETH_P_IPV6)) &&
5460 igb_ipv6_csum_is_sctp(skb))) {
5461 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5462 break;
5463 }
5464 default:
5465 skb_checksum_help(skb);
5466 goto csum_failed;
5467 }
5468
5469
5470 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5471 vlan_macip_lens = skb_checksum_start_offset(skb) -
5472 skb_network_offset(skb);
5473no_csum:
5474 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5475 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5476
5477 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
5478}
5479
5480#define IGB_SET_FLAG(_input, _flag, _result) \
5481 ((_flag <= _result) ? \
5482 ((u32)(_input & _flag) * (_result / _flag)) : \
5483 ((u32)(_input & _flag) / (_flag / _result)))
5484
5485static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5486{
5487
5488 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5489 E1000_ADVTXD_DCMD_DEXT |
5490 E1000_ADVTXD_DCMD_IFCS;
5491
5492
5493 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5494 (E1000_ADVTXD_DCMD_VLE));
5495
5496
5497 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5498 (E1000_ADVTXD_DCMD_TSE));
5499
5500
5501 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5502 (E1000_ADVTXD_MAC_TSTAMP));
5503
5504
5505 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5506
5507 return cmd_type;
5508}
5509
5510static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5511 union e1000_adv_tx_desc *tx_desc,
5512 u32 tx_flags, unsigned int paylen)
5513{
5514 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5515
5516
5517 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5518 olinfo_status |= tx_ring->reg_idx << 4;
5519
5520
5521 olinfo_status |= IGB_SET_FLAG(tx_flags,
5522 IGB_TX_FLAGS_CSUM,
5523 (E1000_TXD_POPTS_TXSM << 8));
5524
5525
5526 olinfo_status |= IGB_SET_FLAG(tx_flags,
5527 IGB_TX_FLAGS_IPV4,
5528 (E1000_TXD_POPTS_IXSM << 8));
5529
5530 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5531}
5532
5533static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5534{
5535 struct net_device *netdev = tx_ring->netdev;
5536
5537 netif_stop_subqueue(netdev, tx_ring->queue_index);
5538
5539
5540
5541
5542
5543 smp_mb();
5544
5545
5546
5547
5548 if (igb_desc_unused(tx_ring) < size)
5549 return -EBUSY;
5550
5551
5552 netif_wake_subqueue(netdev, tx_ring->queue_index);
5553
5554 u64_stats_update_begin(&tx_ring->tx_syncp2);
5555 tx_ring->tx_stats.restart_queue2++;
5556 u64_stats_update_end(&tx_ring->tx_syncp2);
5557
5558 return 0;
5559}
5560
5561static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5562{
5563 if (igb_desc_unused(tx_ring) >= size)
5564 return 0;
5565 return __igb_maybe_stop_tx(tx_ring, size);
5566}
5567
5568static int igb_tx_map(struct igb_ring *tx_ring,
5569 struct igb_tx_buffer *first,
5570 const u8 hdr_len)
5571{
5572 struct sk_buff *skb = first->skb;
5573 struct igb_tx_buffer *tx_buffer;
5574 union e1000_adv_tx_desc *tx_desc;
5575 struct skb_frag_struct *frag;
5576 dma_addr_t dma;
5577 unsigned int data_len, size;
5578 u32 tx_flags = first->tx_flags;
5579 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5580 u16 i = tx_ring->next_to_use;
5581
5582 tx_desc = IGB_TX_DESC(tx_ring, i);
5583
5584 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5585
5586 size = skb_headlen(skb);
5587 data_len = skb->data_len;
5588
5589 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5590
5591 tx_buffer = first;
5592
5593 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5594 if (dma_mapping_error(tx_ring->dev, dma))
5595 goto dma_error;
5596
5597
5598 dma_unmap_len_set(tx_buffer, len, size);
5599 dma_unmap_addr_set(tx_buffer, dma, dma);
5600
5601 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5602
5603 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5604 tx_desc->read.cmd_type_len =
5605 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5606
5607 i++;
5608 tx_desc++;
5609 if (i == tx_ring->count) {
5610 tx_desc = IGB_TX_DESC(tx_ring, 0);
5611 i = 0;
5612 }
5613 tx_desc->read.olinfo_status = 0;
5614
5615 dma += IGB_MAX_DATA_PER_TXD;
5616 size -= IGB_MAX_DATA_PER_TXD;
5617
5618 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5619 }
5620
5621 if (likely(!data_len))
5622 break;
5623
5624 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5625
5626 i++;
5627 tx_desc++;
5628 if (i == tx_ring->count) {
5629 tx_desc = IGB_TX_DESC(tx_ring, 0);
5630 i = 0;
5631 }
5632 tx_desc->read.olinfo_status = 0;
5633
5634 size = skb_frag_size(frag);
5635 data_len -= size;
5636
5637 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
5638 size, DMA_TO_DEVICE);
5639
5640 tx_buffer = &tx_ring->tx_buffer_info[i];
5641 }
5642
5643
5644 cmd_type |= size | IGB_TXD_DCMD;
5645 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
5646
5647 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
5648
5649
5650 first->time_stamp = jiffies;
5651
5652
5653
5654
5655
5656
5657
5658
5659 wmb();
5660
5661
5662 first->next_to_watch = tx_desc;
5663
5664 i++;
5665 if (i == tx_ring->count)
5666 i = 0;
5667
5668 tx_ring->next_to_use = i;
5669
5670
5671 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
5672
5673 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
5674 writel(i, tx_ring->tail);
5675
5676
5677
5678
5679 mmiowb();
5680 }
5681 return 0;
5682
5683dma_error:
5684 dev_err(tx_ring->dev, "TX DMA map failed\n");
5685 tx_buffer = &tx_ring->tx_buffer_info[i];
5686
5687
5688 while (tx_buffer != first) {
5689 if (dma_unmap_len(tx_buffer, len))
5690 dma_unmap_page(tx_ring->dev,
5691 dma_unmap_addr(tx_buffer, dma),
5692 dma_unmap_len(tx_buffer, len),
5693 DMA_TO_DEVICE);
5694 dma_unmap_len_set(tx_buffer, len, 0);
5695
5696 if (i-- == 0)
5697 i += tx_ring->count;
5698 tx_buffer = &tx_ring->tx_buffer_info[i];
5699 }
5700
5701 if (dma_unmap_len(tx_buffer, len))
5702 dma_unmap_single(tx_ring->dev,
5703 dma_unmap_addr(tx_buffer, dma),
5704 dma_unmap_len(tx_buffer, len),
5705 DMA_TO_DEVICE);
5706 dma_unmap_len_set(tx_buffer, len, 0);
5707
5708 dev_kfree_skb_any(tx_buffer->skb);
5709 tx_buffer->skb = NULL;
5710
5711 tx_ring->next_to_use = i;
5712
5713 return -1;
5714}
5715
5716netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5717 struct igb_ring *tx_ring)
5718{
5719 struct igb_tx_buffer *first;
5720 int tso;
5721 u32 tx_flags = 0;
5722 unsigned short f;
5723 u16 count = TXD_USE_COUNT(skb_headlen(skb));
5724 __be16 protocol = vlan_get_protocol(skb);
5725 u8 hdr_len = 0;
5726
5727
5728
5729
5730
5731
5732
5733 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5734 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5735
5736 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
5737
5738 return NETDEV_TX_BUSY;
5739 }
5740
5741
5742 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
5743 first->skb = skb;
5744 first->bytecount = skb->len;
5745 first->gso_segs = 1;
5746
5747 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
5748 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
5749
5750 if (adapter->tstamp_config.tx_type & HWTSTAMP_TX_ON &&
5751 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
5752 &adapter->state)) {
5753 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5754 tx_flags |= IGB_TX_FLAGS_TSTAMP;
5755
5756 adapter->ptp_tx_skb = skb_get(skb);
5757 adapter->ptp_tx_start = jiffies;
5758 if (adapter->hw.mac.type == e1000_82576)
5759 schedule_work(&adapter->ptp_tx_work);
5760 } else {
5761 adapter->tx_hwtstamp_skipped++;
5762 }
5763 }
5764
5765 skb_tx_timestamp(skb);
5766
5767 if (skb_vlan_tag_present(skb)) {
5768 tx_flags |= IGB_TX_FLAGS_VLAN;
5769 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
5770 }
5771
5772
5773 first->tx_flags = tx_flags;
5774 first->protocol = protocol;
5775
5776 tso = igb_tso(tx_ring, first, &hdr_len);
5777 if (tso < 0)
5778 goto out_drop;
5779 else if (!tso)
5780 igb_tx_csum(tx_ring, first);
5781
5782 if (igb_tx_map(tx_ring, first, hdr_len))
5783 goto cleanup_tx_tstamp;
5784
5785 return NETDEV_TX_OK;
5786
5787out_drop:
5788 dev_kfree_skb_any(first->skb);
5789 first->skb = NULL;
5790cleanup_tx_tstamp:
5791 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
5792 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
5793
5794 dev_kfree_skb_any(adapter->ptp_tx_skb);
5795 adapter->ptp_tx_skb = NULL;
5796 if (adapter->hw.mac.type == e1000_82576)
5797 cancel_work_sync(&adapter->ptp_tx_work);
5798 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
5799 }
5800
5801 return NETDEV_TX_OK;
5802}
5803
5804static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
5805 struct sk_buff *skb)
5806{
5807 unsigned int r_idx = skb->queue_mapping;
5808
5809 if (r_idx >= adapter->num_tx_queues)
5810 r_idx = r_idx % adapter->num_tx_queues;
5811
5812 return adapter->tx_ring[r_idx];
5813}
5814
5815static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
5816 struct net_device *netdev)
5817{
5818 struct igb_adapter *adapter = netdev_priv(netdev);
5819
5820
5821
5822
5823 if (skb_put_padto(skb, 17))
5824 return NETDEV_TX_OK;
5825
5826 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
5827}
5828
5829
5830
5831
5832
5833static void igb_tx_timeout(struct net_device *netdev)
5834{
5835 struct igb_adapter *adapter = netdev_priv(netdev);
5836 struct e1000_hw *hw = &adapter->hw;
5837
5838
5839 adapter->tx_timeout_count++;
5840
5841 if (hw->mac.type >= e1000_82580)
5842 hw->dev_spec._82575.global_device_reset = true;
5843
5844 schedule_work(&adapter->reset_task);
5845 wr32(E1000_EICS,
5846 (adapter->eims_enable_mask & ~adapter->eims_other));
5847}
5848
5849static void igb_reset_task(struct work_struct *work)
5850{
5851 struct igb_adapter *adapter;
5852 adapter = container_of(work, struct igb_adapter, reset_task);
5853
5854 igb_dump(adapter);
5855 netdev_err(adapter->netdev, "Reset adapter\n");
5856 igb_reinit_locked(adapter);
5857}
5858
5859
5860
5861
5862
5863
5864static void igb_get_stats64(struct net_device *netdev,
5865 struct rtnl_link_stats64 *stats)
5866{
5867 struct igb_adapter *adapter = netdev_priv(netdev);
5868
5869 spin_lock(&adapter->stats64_lock);
5870 igb_update_stats(adapter);
5871 memcpy(stats, &adapter->stats64, sizeof(*stats));
5872 spin_unlock(&adapter->stats64_lock);
5873}
5874
5875
5876
5877
5878
5879
5880
5881
5882static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5883{
5884 struct igb_adapter *adapter = netdev_priv(netdev);
5885 struct pci_dev *pdev = adapter->pdev;
5886 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5887
5888
5889 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5890 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5891
5892 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
5893 usleep_range(1000, 2000);
5894
5895
5896 adapter->max_frame_size = max_frame;
5897
5898 if (netif_running(netdev))
5899 igb_down(adapter);
5900
5901 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
5902 netdev->mtu, new_mtu);
5903 netdev->mtu = new_mtu;
5904
5905 if (netif_running(netdev))
5906 igb_up(adapter);
5907 else
5908 igb_reset(adapter);
5909
5910 clear_bit(__IGB_RESETTING, &adapter->state);
5911
5912 return 0;
5913}
5914
5915
5916
5917
5918
5919void igb_update_stats(struct igb_adapter *adapter)
5920{
5921 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
5922 struct e1000_hw *hw = &adapter->hw;
5923 struct pci_dev *pdev = adapter->pdev;
5924 u32 reg, mpc;
5925 int i;
5926 u64 bytes, packets;
5927 unsigned int start;
5928 u64 _bytes, _packets;
5929
5930
5931
5932
5933 if (adapter->link_speed == 0)
5934 return;
5935 if (pci_channel_offline(pdev))
5936 return;
5937
5938 bytes = 0;
5939 packets = 0;
5940
5941 rcu_read_lock();
5942 for (i = 0; i < adapter->num_rx_queues; i++) {
5943 struct igb_ring *ring = adapter->rx_ring[i];
5944 u32 rqdpc = rd32(E1000_RQDPC(i));
5945 if (hw->mac.type >= e1000_i210)
5946 wr32(E1000_RQDPC(i), 0);
5947
5948 if (rqdpc) {
5949 ring->rx_stats.drops += rqdpc;
5950 net_stats->rx_fifo_errors += rqdpc;
5951 }
5952
5953 do {
5954 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
5955 _bytes = ring->rx_stats.bytes;
5956 _packets = ring->rx_stats.packets;
5957 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
5958 bytes += _bytes;
5959 packets += _packets;
5960 }
5961
5962 net_stats->rx_bytes = bytes;
5963 net_stats->rx_packets = packets;
5964
5965 bytes = 0;
5966 packets = 0;
5967 for (i = 0; i < adapter->num_tx_queues; i++) {
5968 struct igb_ring *ring = adapter->tx_ring[i];
5969 do {
5970 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
5971 _bytes = ring->tx_stats.bytes;
5972 _packets = ring->tx_stats.packets;
5973 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
5974 bytes += _bytes;
5975 packets += _packets;
5976 }
5977 net_stats->tx_bytes = bytes;
5978 net_stats->tx_packets = packets;
5979 rcu_read_unlock();
5980
5981
5982 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
5983 adapter->stats.gprc += rd32(E1000_GPRC);
5984 adapter->stats.gorc += rd32(E1000_GORCL);
5985 rd32(E1000_GORCH);
5986 adapter->stats.bprc += rd32(E1000_BPRC);
5987 adapter->stats.mprc += rd32(E1000_MPRC);
5988 adapter->stats.roc += rd32(E1000_ROC);
5989
5990 adapter->stats.prc64 += rd32(E1000_PRC64);
5991 adapter->stats.prc127 += rd32(E1000_PRC127);
5992 adapter->stats.prc255 += rd32(E1000_PRC255);
5993 adapter->stats.prc511 += rd32(E1000_PRC511);
5994 adapter->stats.prc1023 += rd32(E1000_PRC1023);
5995 adapter->stats.prc1522 += rd32(E1000_PRC1522);
5996 adapter->stats.symerrs += rd32(E1000_SYMERRS);
5997 adapter->stats.sec += rd32(E1000_SEC);
5998
5999 mpc = rd32(E1000_MPC);
6000 adapter->stats.mpc += mpc;
6001 net_stats->rx_fifo_errors += mpc;
6002 adapter->stats.scc += rd32(E1000_SCC);
6003 adapter->stats.ecol += rd32(E1000_ECOL);
6004 adapter->stats.mcc += rd32(E1000_MCC);
6005 adapter->stats.latecol += rd32(E1000_LATECOL);
6006 adapter->stats.dc += rd32(E1000_DC);
6007 adapter->stats.rlec += rd32(E1000_RLEC);
6008 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6009 adapter->stats.xontxc += rd32(E1000_XONTXC);
6010 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6011 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6012 adapter->stats.fcruc += rd32(E1000_FCRUC);
6013 adapter->stats.gptc += rd32(E1000_GPTC);
6014 adapter->stats.gotc += rd32(E1000_GOTCL);
6015 rd32(E1000_GOTCH);
6016 adapter->stats.rnbc += rd32(E1000_RNBC);
6017 adapter->stats.ruc += rd32(E1000_RUC);
6018 adapter->stats.rfc += rd32(E1000_RFC);
6019 adapter->stats.rjc += rd32(E1000_RJC);
6020 adapter->stats.tor += rd32(E1000_TORH);
6021 adapter->stats.tot += rd32(E1000_TOTH);
6022 adapter->stats.tpr += rd32(E1000_TPR);
6023
6024 adapter->stats.ptc64 += rd32(E1000_PTC64);
6025 adapter->stats.ptc127 += rd32(E1000_PTC127);
6026 adapter->stats.ptc255 += rd32(E1000_PTC255);
6027 adapter->stats.ptc511 += rd32(E1000_PTC511);
6028 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6029 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6030
6031 adapter->stats.mptc += rd32(E1000_MPTC);
6032 adapter->stats.bptc += rd32(E1000_BPTC);
6033
6034 adapter->stats.tpt += rd32(E1000_TPT);
6035 adapter->stats.colc += rd32(E1000_COLC);
6036
6037 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6038
6039 reg = rd32(E1000_CTRL_EXT);
6040 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6041 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6042
6043
6044 if ((hw->mac.type != e1000_i210) &&
6045 (hw->mac.type != e1000_i211))
6046 adapter->stats.tncrs += rd32(E1000_TNCRS);
6047 }
6048
6049 adapter->stats.tsctc += rd32(E1000_TSCTC);
6050 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6051
6052 adapter->stats.iac += rd32(E1000_IAC);
6053 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6054 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6055 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6056 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6057 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6058 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6059 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6060 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6061
6062
6063 net_stats->multicast = adapter->stats.mprc;
6064 net_stats->collisions = adapter->stats.colc;
6065
6066
6067
6068
6069
6070
6071 net_stats->rx_errors = adapter->stats.rxerrc +
6072 adapter->stats.crcerrs + adapter->stats.algnerrc +
6073 adapter->stats.ruc + adapter->stats.roc +
6074 adapter->stats.cexterr;
6075 net_stats->rx_length_errors = adapter->stats.ruc +
6076 adapter->stats.roc;
6077 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6078 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6079 net_stats->rx_missed_errors = adapter->stats.mpc;
6080
6081
6082 net_stats->tx_errors = adapter->stats.ecol +
6083 adapter->stats.latecol;
6084 net_stats->tx_aborted_errors = adapter->stats.ecol;
6085 net_stats->tx_window_errors = adapter->stats.latecol;
6086 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6087
6088
6089
6090
6091 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6092 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6093 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6094
6095
6096 reg = rd32(E1000_MANC);
6097 if (reg & E1000_MANC_EN_BMC2OS) {
6098 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6099 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6100 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6101 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6102 }
6103}
6104
6105static void igb_tsync_interrupt(struct igb_adapter *adapter)
6106{
6107 struct e1000_hw *hw = &adapter->hw;
6108 struct ptp_clock_event event;
6109 struct timespec64 ts;
6110 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6111
6112 if (tsicr & TSINTR_SYS_WRAP) {
6113 event.type = PTP_CLOCK_PPS;
6114 if (adapter->ptp_caps.pps)
6115 ptp_clock_event(adapter->ptp_clock, &event);
6116 ack |= TSINTR_SYS_WRAP;
6117 }
6118
6119 if (tsicr & E1000_TSICR_TXTS) {
6120
6121 schedule_work(&adapter->ptp_tx_work);
6122 ack |= E1000_TSICR_TXTS;
6123 }
6124
6125 if (tsicr & TSINTR_TT0) {
6126 spin_lock(&adapter->tmreg_lock);
6127 ts = timespec64_add(adapter->perout[0].start,
6128 adapter->perout[0].period);
6129
6130 wr32(E1000_TRGTTIML0, ts.tv_nsec);
6131 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6132 tsauxc = rd32(E1000_TSAUXC);
6133 tsauxc |= TSAUXC_EN_TT0;
6134 wr32(E1000_TSAUXC, tsauxc);
6135 adapter->perout[0].start = ts;
6136 spin_unlock(&adapter->tmreg_lock);
6137 ack |= TSINTR_TT0;
6138 }
6139
6140 if (tsicr & TSINTR_TT1) {
6141 spin_lock(&adapter->tmreg_lock);
6142 ts = timespec64_add(adapter->perout[1].start,
6143 adapter->perout[1].period);
6144 wr32(E1000_TRGTTIML1, ts.tv_nsec);
6145 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6146 tsauxc = rd32(E1000_TSAUXC);
6147 tsauxc |= TSAUXC_EN_TT1;
6148 wr32(E1000_TSAUXC, tsauxc);
6149 adapter->perout[1].start = ts;
6150 spin_unlock(&adapter->tmreg_lock);
6151 ack |= TSINTR_TT1;
6152 }
6153
6154 if (tsicr & TSINTR_AUTT0) {
6155 nsec = rd32(E1000_AUXSTMPL0);
6156 sec = rd32(E1000_AUXSTMPH0);
6157 event.type = PTP_CLOCK_EXTTS;
6158 event.index = 0;
6159 event.timestamp = sec * 1000000000ULL + nsec;
6160 ptp_clock_event(adapter->ptp_clock, &event);
6161 ack |= TSINTR_AUTT0;
6162 }
6163
6164 if (tsicr & TSINTR_AUTT1) {
6165 nsec = rd32(E1000_AUXSTMPL1);
6166 sec = rd32(E1000_AUXSTMPH1);
6167 event.type = PTP_CLOCK_EXTTS;
6168 event.index = 1;
6169 event.timestamp = sec * 1000000000ULL + nsec;
6170 ptp_clock_event(adapter->ptp_clock, &event);
6171 ack |= TSINTR_AUTT1;
6172 }
6173
6174
6175 wr32(E1000_TSICR, ack);
6176}
6177
6178static irqreturn_t igb_msix_other(int irq, void *data)
6179{
6180 struct igb_adapter *adapter = data;
6181 struct e1000_hw *hw = &adapter->hw;
6182 u32 icr = rd32(E1000_ICR);
6183
6184
6185 if (icr & E1000_ICR_DRSTA)
6186 schedule_work(&adapter->reset_task);
6187
6188 if (icr & E1000_ICR_DOUTSYNC) {
6189
6190 adapter->stats.doosync++;
6191
6192
6193
6194
6195 igb_check_wvbr(adapter);
6196 }
6197
6198
6199 if (icr & E1000_ICR_VMMB)
6200 igb_msg_task(adapter);
6201
6202 if (icr & E1000_ICR_LSC) {
6203 hw->mac.get_link_status = 1;
6204
6205 if (!test_bit(__IGB_DOWN, &adapter->state))
6206 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6207 }
6208
6209 if (icr & E1000_ICR_TS)
6210 igb_tsync_interrupt(adapter);
6211
6212 wr32(E1000_EIMS, adapter->eims_other);
6213
6214 return IRQ_HANDLED;
6215}
6216
6217static void igb_write_itr(struct igb_q_vector *q_vector)
6218{
6219 struct igb_adapter *adapter = q_vector->adapter;
6220 u32 itr_val = q_vector->itr_val & 0x7FFC;
6221
6222 if (!q_vector->set_itr)
6223 return;
6224
6225 if (!itr_val)
6226 itr_val = 0x4;
6227
6228 if (adapter->hw.mac.type == e1000_82575)
6229 itr_val |= itr_val << 16;
6230 else
6231 itr_val |= E1000_EITR_CNT_IGNR;
6232
6233 writel(itr_val, q_vector->itr_register);
6234 q_vector->set_itr = 0;
6235}
6236
6237static irqreturn_t igb_msix_ring(int irq, void *data)
6238{
6239 struct igb_q_vector *q_vector = data;
6240
6241
6242 igb_write_itr(q_vector);
6243
6244 napi_schedule(&q_vector->napi);
6245
6246 return IRQ_HANDLED;
6247}
6248
6249#ifdef CONFIG_IGB_DCA
6250static void igb_update_tx_dca(struct igb_adapter *adapter,
6251 struct igb_ring *tx_ring,
6252 int cpu)
6253{
6254 struct e1000_hw *hw = &adapter->hw;
6255 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6256
6257 if (hw->mac.type != e1000_82575)
6258 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6259
6260
6261
6262
6263
6264 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6265 E1000_DCA_TXCTRL_DATA_RRO_EN |
6266 E1000_DCA_TXCTRL_DESC_DCA_EN;
6267
6268 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6269}
6270
6271static void igb_update_rx_dca(struct igb_adapter *adapter,
6272 struct igb_ring *rx_ring,
6273 int cpu)
6274{
6275 struct e1000_hw *hw = &adapter->hw;
6276 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6277
6278 if (hw->mac.type != e1000_82575)
6279 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6280
6281
6282
6283
6284
6285 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6286 E1000_DCA_RXCTRL_DESC_DCA_EN;
6287
6288 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6289}
6290
6291static void igb_update_dca(struct igb_q_vector *q_vector)
6292{
6293 struct igb_adapter *adapter = q_vector->adapter;
6294 int cpu = get_cpu();
6295
6296 if (q_vector->cpu == cpu)
6297 goto out_no_update;
6298
6299 if (q_vector->tx.ring)
6300 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6301
6302 if (q_vector->rx.ring)
6303 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6304
6305 q_vector->cpu = cpu;
6306out_no_update:
6307 put_cpu();
6308}
6309
6310static void igb_setup_dca(struct igb_adapter *adapter)
6311{
6312 struct e1000_hw *hw = &adapter->hw;
6313 int i;
6314
6315 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6316 return;
6317
6318
6319 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6320
6321 for (i = 0; i < adapter->num_q_vectors; i++) {
6322 adapter->q_vector[i]->cpu = -1;
6323 igb_update_dca(adapter->q_vector[i]);
6324 }
6325}
6326
6327static int __igb_notify_dca(struct device *dev, void *data)
6328{
6329 struct net_device *netdev = dev_get_drvdata(dev);
6330 struct igb_adapter *adapter = netdev_priv(netdev);
6331 struct pci_dev *pdev = adapter->pdev;
6332 struct e1000_hw *hw = &adapter->hw;
6333 unsigned long event = *(unsigned long *)data;
6334
6335 switch (event) {
6336 case DCA_PROVIDER_ADD:
6337
6338 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6339 break;
6340 if (dca_add_requester(dev) == 0) {
6341 adapter->flags |= IGB_FLAG_DCA_ENABLED;
6342 dev_info(&pdev->dev, "DCA enabled\n");
6343 igb_setup_dca(adapter);
6344 break;
6345 }
6346
6347 case DCA_PROVIDER_REMOVE:
6348 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6349
6350
6351
6352 dca_remove_requester(dev);
6353 dev_info(&pdev->dev, "DCA disabled\n");
6354 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6355 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6356 }
6357 break;
6358 }
6359
6360 return 0;
6361}
6362
6363static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6364 void *p)
6365{
6366 int ret_val;
6367
6368 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6369 __igb_notify_dca);
6370
6371 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6372}
6373#endif
6374
6375#ifdef CONFIG_PCI_IOV
6376static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6377{
6378 unsigned char mac_addr[ETH_ALEN];
6379
6380 eth_zero_addr(mac_addr);
6381 igb_set_vf_mac(adapter, vf, mac_addr);
6382
6383
6384 adapter->vf_data[vf].spoofchk_enabled = true;
6385
6386 return 0;
6387}
6388
6389#endif
6390static void igb_ping_all_vfs(struct igb_adapter *adapter)
6391{
6392 struct e1000_hw *hw = &adapter->hw;
6393 u32 ping;
6394 int i;
6395
6396 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6397 ping = E1000_PF_CONTROL_MSG;
6398 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6399 ping |= E1000_VT_MSGTYPE_CTS;
6400 igb_write_mbx(hw, &ping, 1, i);
6401 }
6402}
6403
6404static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6405{
6406 struct e1000_hw *hw = &adapter->hw;
6407 u32 vmolr = rd32(E1000_VMOLR(vf));
6408 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6409
6410 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6411 IGB_VF_FLAG_MULTI_PROMISC);
6412 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6413
6414 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6415 vmolr |= E1000_VMOLR_MPME;
6416 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6417 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6418 } else {
6419
6420
6421
6422
6423 if (vf_data->num_vf_mc_hashes > 30) {
6424 vmolr |= E1000_VMOLR_MPME;
6425 } else if (vf_data->num_vf_mc_hashes) {
6426 int j;
6427
6428 vmolr |= E1000_VMOLR_ROMPE;
6429 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6430 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6431 }
6432 }
6433
6434 wr32(E1000_VMOLR(vf), vmolr);
6435
6436
6437 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6438 return -EINVAL;
6439
6440 return 0;
6441}
6442
6443static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6444 u32 *msgbuf, u32 vf)
6445{
6446 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6447 u16 *hash_list = (u16 *)&msgbuf[1];
6448 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6449 int i;
6450
6451
6452
6453
6454
6455 vf_data->num_vf_mc_hashes = n;
6456
6457
6458 if (n > 30)
6459 n = 30;
6460
6461
6462 for (i = 0; i < n; i++)
6463 vf_data->vf_mc_hashes[i] = hash_list[i];
6464
6465
6466 igb_set_rx_mode(adapter->netdev);
6467
6468 return 0;
6469}
6470
6471static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6472{
6473 struct e1000_hw *hw = &adapter->hw;
6474 struct vf_data_storage *vf_data;
6475 int i, j;
6476
6477 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6478 u32 vmolr = rd32(E1000_VMOLR(i));
6479
6480 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6481
6482 vf_data = &adapter->vf_data[i];
6483
6484 if ((vf_data->num_vf_mc_hashes > 30) ||
6485 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6486 vmolr |= E1000_VMOLR_MPME;
6487 } else if (vf_data->num_vf_mc_hashes) {
6488 vmolr |= E1000_VMOLR_ROMPE;
6489 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6490 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6491 }
6492 wr32(E1000_VMOLR(i), vmolr);
6493 }
6494}
6495
6496static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6497{
6498 struct e1000_hw *hw = &adapter->hw;
6499 u32 pool_mask, vlvf_mask, i;
6500
6501
6502 pool_mask = E1000_VLVF_POOLSEL_MASK;
6503 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6504
6505
6506 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6507 adapter->vfs_allocated_count);
6508
6509
6510 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6511 u32 vlvf = rd32(E1000_VLVF(i));
6512 u32 vfta_mask, vid, vfta;
6513
6514
6515 if (!(vlvf & vlvf_mask))
6516 continue;
6517
6518
6519 vlvf ^= vlvf_mask;
6520
6521
6522 if (vlvf & pool_mask)
6523 goto update_vlvfb;
6524
6525
6526 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6527 goto update_vlvf;
6528
6529 vid = vlvf & E1000_VLVF_VLANID_MASK;
6530 vfta_mask = BIT(vid % 32);
6531
6532
6533 vfta = adapter->shadow_vfta[vid / 32];
6534 if (vfta & vfta_mask)
6535 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6536update_vlvf:
6537
6538 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6539 vlvf &= E1000_VLVF_POOLSEL_MASK;
6540 else
6541 vlvf = 0;
6542update_vlvfb:
6543
6544 wr32(E1000_VLVF(i), vlvf);
6545 }
6546}
6547
6548static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6549{
6550 u32 vlvf;
6551 int idx;
6552
6553
6554 if (vlan == 0)
6555 return 0;
6556
6557
6558 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6559 vlvf = rd32(E1000_VLVF(idx));
6560 if ((vlvf & VLAN_VID_MASK) == vlan)
6561 break;
6562 }
6563
6564 return idx;
6565}
6566
6567static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6568{
6569 struct e1000_hw *hw = &adapter->hw;
6570 u32 bits, pf_id;
6571 int idx;
6572
6573 idx = igb_find_vlvf_entry(hw, vid);
6574 if (!idx)
6575 return;
6576
6577
6578
6579
6580 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6581 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6582 bits &= rd32(E1000_VLVF(idx));
6583
6584
6585 if (!bits) {
6586 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6587 wr32(E1000_VLVF(idx), BIT(pf_id));
6588 else
6589 wr32(E1000_VLVF(idx), 0);
6590 }
6591}
6592
6593static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6594 bool add, u32 vf)
6595{
6596 int pf_id = adapter->vfs_allocated_count;
6597 struct e1000_hw *hw = &adapter->hw;
6598 int err;
6599
6600
6601
6602
6603
6604
6605 if (add && test_bit(vid, adapter->active_vlans)) {
6606 err = igb_vfta_set(hw, vid, pf_id, true, false);
6607 if (err)
6608 return err;
6609 }
6610
6611 err = igb_vfta_set(hw, vid, vf, add, false);
6612
6613 if (add && !err)
6614 return err;
6615
6616
6617
6618
6619
6620 if (test_bit(vid, adapter->active_vlans) ||
6621 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6622 igb_update_pf_vlvf(adapter, vid);
6623
6624 return err;
6625}
6626
6627static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6628{
6629 struct e1000_hw *hw = &adapter->hw;
6630
6631 if (vid)
6632 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6633 else
6634 wr32(E1000_VMVIR(vf), 0);
6635}
6636
6637static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6638 u16 vlan, u8 qos)
6639{
6640 int err;
6641
6642 err = igb_set_vf_vlan(adapter, vlan, true, vf);
6643 if (err)
6644 return err;
6645
6646 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
6647 igb_set_vmolr(adapter, vf, !vlan);
6648
6649
6650 if (vlan != adapter->vf_data[vf].pf_vlan)
6651 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6652 false, vf);
6653
6654 adapter->vf_data[vf].pf_vlan = vlan;
6655 adapter->vf_data[vf].pf_qos = qos;
6656 igb_set_vf_vlan_strip(adapter, vf, true);
6657 dev_info(&adapter->pdev->dev,
6658 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
6659 if (test_bit(__IGB_DOWN, &adapter->state)) {
6660 dev_warn(&adapter->pdev->dev,
6661 "The VF VLAN has been set, but the PF device is not up.\n");
6662 dev_warn(&adapter->pdev->dev,
6663 "Bring the PF device up before attempting to use the VF device.\n");
6664 }
6665
6666 return err;
6667}
6668
6669static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
6670{
6671
6672 igb_set_vf_vlan(adapter, 0, true, vf);
6673
6674 igb_set_vmvir(adapter, 0, vf);
6675 igb_set_vmolr(adapter, vf, true);
6676
6677
6678 if (adapter->vf_data[vf].pf_vlan)
6679 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6680 false, vf);
6681
6682 adapter->vf_data[vf].pf_vlan = 0;
6683 adapter->vf_data[vf].pf_qos = 0;
6684 igb_set_vf_vlan_strip(adapter, vf, false);
6685
6686 return 0;
6687}
6688
6689static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
6690 u16 vlan, u8 qos, __be16 vlan_proto)
6691{
6692 struct igb_adapter *adapter = netdev_priv(netdev);
6693
6694 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
6695 return -EINVAL;
6696
6697 if (vlan_proto != htons(ETH_P_8021Q))
6698 return -EPROTONOSUPPORT;
6699
6700 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
6701 igb_disable_port_vlan(adapter, vf);
6702}
6703
6704static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6705{
6706 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6707 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
6708 int ret;
6709
6710 if (adapter->vf_data[vf].pf_vlan)
6711 return -1;
6712
6713
6714 if (!vid && !add)
6715 return 0;
6716
6717 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
6718 if (!ret)
6719 igb_set_vf_vlan_strip(adapter, vf, !!vid);
6720 return ret;
6721}
6722
6723static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
6724{
6725 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6726
6727
6728 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
6729 vf_data->last_nack = jiffies;
6730
6731
6732 igb_clear_vf_vfta(adapter, vf);
6733 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
6734 igb_set_vmvir(adapter, vf_data->pf_vlan |
6735 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
6736 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
6737 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
6738
6739
6740 adapter->vf_data[vf].num_vf_mc_hashes = 0;
6741
6742
6743 igb_set_rx_mode(adapter->netdev);
6744}
6745
6746static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
6747{
6748 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6749
6750
6751 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
6752 eth_zero_addr(vf_mac);
6753
6754
6755 igb_vf_reset(adapter, vf);
6756}
6757
6758static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
6759{
6760 struct e1000_hw *hw = &adapter->hw;
6761 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6762 u32 reg, msgbuf[3];
6763 u8 *addr = (u8 *)(&msgbuf[1]);
6764
6765
6766 igb_vf_reset(adapter, vf);
6767
6768
6769 igb_set_vf_mac(adapter, vf, vf_mac);
6770
6771
6772 reg = rd32(E1000_VFTE);
6773 wr32(E1000_VFTE, reg | BIT(vf));
6774 reg = rd32(E1000_VFRE);
6775 wr32(E1000_VFRE, reg | BIT(vf));
6776
6777 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
6778
6779
6780 if (!is_zero_ether_addr(vf_mac)) {
6781 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6782 memcpy(addr, vf_mac, ETH_ALEN);
6783 } else {
6784 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6785 }
6786 igb_write_mbx(hw, msgbuf, 3, vf);
6787}
6788
6789static void igb_flush_mac_table(struct igb_adapter *adapter)
6790{
6791 struct e1000_hw *hw = &adapter->hw;
6792 int i;
6793
6794 for (i = 0; i < hw->mac.rar_entry_count; i++) {
6795 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
6796 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
6797 adapter->mac_table[i].queue = 0;
6798 igb_rar_set_index(adapter, i);
6799 }
6800}
6801
6802static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
6803{
6804 struct e1000_hw *hw = &adapter->hw;
6805
6806 int rar_entries = hw->mac.rar_entry_count -
6807 adapter->vfs_allocated_count;
6808 int i, count = 0;
6809
6810 for (i = 0; i < rar_entries; i++) {
6811
6812 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
6813 continue;
6814
6815
6816 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
6817 (adapter->mac_table[i].queue != queue))
6818 continue;
6819
6820 count++;
6821 }
6822
6823 return count;
6824}
6825
6826
6827static void igb_set_default_mac_filter(struct igb_adapter *adapter)
6828{
6829 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
6830
6831 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
6832 mac_table->queue = adapter->vfs_allocated_count;
6833 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
6834
6835 igb_rar_set_index(adapter, 0);
6836}
6837
6838static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
6839 const u8 queue)
6840{
6841 struct e1000_hw *hw = &adapter->hw;
6842 int rar_entries = hw->mac.rar_entry_count -
6843 adapter->vfs_allocated_count;
6844 int i;
6845
6846 if (is_zero_ether_addr(addr))
6847 return -EINVAL;
6848
6849
6850
6851
6852
6853 for (i = 0; i < rar_entries; i++) {
6854 if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
6855 continue;
6856
6857 ether_addr_copy(adapter->mac_table[i].addr, addr);
6858 adapter->mac_table[i].queue = queue;
6859 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE;
6860
6861 igb_rar_set_index(adapter, i);
6862 return i;
6863 }
6864
6865 return -ENOSPC;
6866}
6867
6868static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
6869 const u8 queue)
6870{
6871 struct e1000_hw *hw = &adapter->hw;
6872 int rar_entries = hw->mac.rar_entry_count -
6873 adapter->vfs_allocated_count;
6874 int i;
6875
6876 if (is_zero_ether_addr(addr))
6877 return -EINVAL;
6878
6879
6880
6881
6882
6883 for (i = 0; i < rar_entries; i++) {
6884 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
6885 continue;
6886 if (adapter->mac_table[i].queue != queue)
6887 continue;
6888 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
6889 continue;
6890
6891 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
6892 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
6893 adapter->mac_table[i].queue = 0;
6894
6895 igb_rar_set_index(adapter, i);
6896 return 0;
6897 }
6898
6899 return -ENOENT;
6900}
6901
6902static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
6903{
6904 struct igb_adapter *adapter = netdev_priv(netdev);
6905 int ret;
6906
6907 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
6908
6909 return min_t(int, ret, 0);
6910}
6911
6912static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
6913{
6914 struct igb_adapter *adapter = netdev_priv(netdev);
6915
6916 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
6917
6918 return 0;
6919}
6920
6921static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
6922 const u32 info, const u8 *addr)
6923{
6924 struct pci_dev *pdev = adapter->pdev;
6925 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6926 struct list_head *pos;
6927 struct vf_mac_filter *entry = NULL;
6928 int ret = 0;
6929
6930 switch (info) {
6931 case E1000_VF_MAC_FILTER_CLR:
6932
6933 list_for_each(pos, &adapter->vf_macs.l) {
6934 entry = list_entry(pos, struct vf_mac_filter, l);
6935 if (entry->vf == vf) {
6936 entry->vf = -1;
6937 entry->free = true;
6938 igb_del_mac_filter(adapter, entry->vf_mac, vf);
6939 }
6940 }
6941 break;
6942 case E1000_VF_MAC_FILTER_ADD:
6943 if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
6944 dev_warn(&pdev->dev,
6945 "VF %d requested MAC filter but is administratively denied\n",
6946 vf);
6947 return -EINVAL;
6948 }
6949
6950 if (!is_valid_ether_addr(addr)) {
6951 dev_warn(&pdev->dev,
6952 "VF %d attempted to set invalid MAC filter\n",
6953 vf);
6954 return -EINVAL;
6955 }
6956
6957
6958 list_for_each(pos, &adapter->vf_macs.l) {
6959 entry = list_entry(pos, struct vf_mac_filter, l);
6960 if (entry->free)
6961 break;
6962 }
6963
6964 if (entry && entry->free) {
6965 entry->free = false;
6966 entry->vf = vf;
6967 ether_addr_copy(entry->vf_mac, addr);
6968
6969 ret = igb_add_mac_filter(adapter, addr, vf);
6970 ret = min_t(int, ret, 0);
6971 } else {
6972 ret = -ENOSPC;
6973 }
6974
6975 if (ret == -ENOSPC)
6976 dev_warn(&pdev->dev,
6977 "VF %d has requested MAC filter but there is no space for it\n",
6978 vf);
6979 break;
6980 default:
6981 ret = -EINVAL;
6982 break;
6983 }
6984
6985 return ret;
6986}
6987
6988static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
6989{
6990 struct pci_dev *pdev = adapter->pdev;
6991 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6992 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
6993
6994
6995
6996
6997 unsigned char *addr = (unsigned char *)&msg[1];
6998 int ret = 0;
6999
7000 if (!info) {
7001 if (vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) {
7002 dev_warn(&pdev->dev,
7003 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7004 vf);
7005 return -EINVAL;
7006 }
7007
7008 if (!is_valid_ether_addr(addr)) {
7009 dev_warn(&pdev->dev,
7010 "VF %d attempted to set invalid MAC\n",
7011 vf);
7012 return -EINVAL;
7013 }
7014
7015 ret = igb_set_vf_mac(adapter, vf, addr);
7016 } else {
7017 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7018 }
7019
7020 return ret;
7021}
7022
7023static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7024{
7025 struct e1000_hw *hw = &adapter->hw;
7026 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7027 u32 msg = E1000_VT_MSGTYPE_NACK;
7028
7029
7030 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7031 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7032 igb_write_mbx(hw, &msg, 1, vf);
7033 vf_data->last_nack = jiffies;
7034 }
7035}
7036
7037static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7038{
7039 struct pci_dev *pdev = adapter->pdev;
7040 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7041 struct e1000_hw *hw = &adapter->hw;
7042 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7043 s32 retval;
7044
7045 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7046
7047 if (retval) {
7048
7049 dev_err(&pdev->dev, "Error receiving message from VF\n");
7050 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7051 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7052 goto unlock;
7053 goto out;
7054 }
7055
7056
7057 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7058 goto unlock;
7059
7060
7061
7062
7063 if (msgbuf[0] == E1000_VF_RESET) {
7064
7065 igb_vf_reset_msg(adapter, vf);
7066 return;
7067 }
7068
7069 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7070 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7071 goto unlock;
7072 retval = -1;
7073 goto out;
7074 }
7075
7076 switch ((msgbuf[0] & 0xFFFF)) {
7077 case E1000_VF_SET_MAC_ADDR:
7078 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7079 break;
7080 case E1000_VF_SET_PROMISC:
7081 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7082 break;
7083 case E1000_VF_SET_MULTICAST:
7084 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7085 break;
7086 case E1000_VF_SET_LPE:
7087 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7088 break;
7089 case E1000_VF_SET_VLAN:
7090 retval = -1;
7091 if (vf_data->pf_vlan)
7092 dev_warn(&pdev->dev,
7093 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7094 vf);
7095 else
7096 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7097 break;
7098 default:
7099 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7100 retval = -1;
7101 break;
7102 }
7103
7104 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7105out:
7106
7107 if (retval)
7108 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7109 else
7110 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7111
7112
7113 igb_write_mbx(hw, msgbuf, 1, vf);
7114 return;
7115
7116unlock:
7117 igb_unlock_mbx(hw, vf);
7118}
7119
7120static void igb_msg_task(struct igb_adapter *adapter)
7121{
7122 struct e1000_hw *hw = &adapter->hw;
7123 u32 vf;
7124
7125 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7126
7127 if (!igb_check_for_rst(hw, vf))
7128 igb_vf_reset_event(adapter, vf);
7129
7130
7131 if (!igb_check_for_msg(hw, vf))
7132 igb_rcv_msg_from_vf(adapter, vf);
7133
7134
7135 if (!igb_check_for_ack(hw, vf))
7136 igb_rcv_ack_from_vf(adapter, vf);
7137 }
7138}
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151static void igb_set_uta(struct igb_adapter *adapter, bool set)
7152{
7153 struct e1000_hw *hw = &adapter->hw;
7154 u32 uta = set ? ~0 : 0;
7155 int i;
7156
7157
7158 if (!adapter->vfs_allocated_count)
7159 return;
7160
7161 for (i = hw->mac.uta_reg_count; i--;)
7162 array_wr32(E1000_UTA, i, uta);
7163}
7164
7165
7166
7167
7168
7169
7170static irqreturn_t igb_intr_msi(int irq, void *data)
7171{
7172 struct igb_adapter *adapter = data;
7173 struct igb_q_vector *q_vector = adapter->q_vector[0];
7174 struct e1000_hw *hw = &adapter->hw;
7175
7176 u32 icr = rd32(E1000_ICR);
7177
7178 igb_write_itr(q_vector);
7179
7180 if (icr & E1000_ICR_DRSTA)
7181 schedule_work(&adapter->reset_task);
7182
7183 if (icr & E1000_ICR_DOUTSYNC) {
7184
7185 adapter->stats.doosync++;
7186 }
7187
7188 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7189 hw->mac.get_link_status = 1;
7190 if (!test_bit(__IGB_DOWN, &adapter->state))
7191 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7192 }
7193
7194 if (icr & E1000_ICR_TS)
7195 igb_tsync_interrupt(adapter);
7196
7197 napi_schedule(&q_vector->napi);
7198
7199 return IRQ_HANDLED;
7200}
7201
7202
7203
7204
7205
7206
7207static irqreturn_t igb_intr(int irq, void *data)
7208{
7209 struct igb_adapter *adapter = data;
7210 struct igb_q_vector *q_vector = adapter->q_vector[0];
7211 struct e1000_hw *hw = &adapter->hw;
7212
7213
7214
7215 u32 icr = rd32(E1000_ICR);
7216
7217
7218
7219
7220 if (!(icr & E1000_ICR_INT_ASSERTED))
7221 return IRQ_NONE;
7222
7223 igb_write_itr(q_vector);
7224
7225 if (icr & E1000_ICR_DRSTA)
7226 schedule_work(&adapter->reset_task);
7227
7228 if (icr & E1000_ICR_DOUTSYNC) {
7229
7230 adapter->stats.doosync++;
7231 }
7232
7233 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7234 hw->mac.get_link_status = 1;
7235
7236 if (!test_bit(__IGB_DOWN, &adapter->state))
7237 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7238 }
7239
7240 if (icr & E1000_ICR_TS)
7241 igb_tsync_interrupt(adapter);
7242
7243 napi_schedule(&q_vector->napi);
7244
7245 return IRQ_HANDLED;
7246}
7247
7248static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7249{
7250 struct igb_adapter *adapter = q_vector->adapter;
7251 struct e1000_hw *hw = &adapter->hw;
7252
7253 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7254 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7255 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7256 igb_set_itr(q_vector);
7257 else
7258 igb_update_ring_itr(q_vector);
7259 }
7260
7261 if (!test_bit(__IGB_DOWN, &adapter->state)) {
7262 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7263 wr32(E1000_EIMS, q_vector->eims_value);
7264 else
7265 igb_irq_enable(adapter);
7266 }
7267}
7268
7269
7270
7271
7272
7273
7274static int igb_poll(struct napi_struct *napi, int budget)
7275{
7276 struct igb_q_vector *q_vector = container_of(napi,
7277 struct igb_q_vector,
7278 napi);
7279 bool clean_complete = true;
7280 int work_done = 0;
7281
7282#ifdef CONFIG_IGB_DCA
7283 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7284 igb_update_dca(q_vector);
7285#endif
7286 if (q_vector->tx.ring)
7287 clean_complete = igb_clean_tx_irq(q_vector, budget);
7288
7289 if (q_vector->rx.ring) {
7290 int cleaned = igb_clean_rx_irq(q_vector, budget);
7291
7292 work_done += cleaned;
7293 if (cleaned >= budget)
7294 clean_complete = false;
7295 }
7296
7297
7298 if (!clean_complete)
7299 return budget;
7300
7301
7302 napi_complete_done(napi, work_done);
7303 igb_ring_irq_enable(q_vector);
7304
7305 return 0;
7306}
7307
7308
7309
7310
7311
7312
7313
7314
7315static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7316{
7317 struct igb_adapter *adapter = q_vector->adapter;
7318 struct igb_ring *tx_ring = q_vector->tx.ring;
7319 struct igb_tx_buffer *tx_buffer;
7320 union e1000_adv_tx_desc *tx_desc;
7321 unsigned int total_bytes = 0, total_packets = 0;
7322 unsigned int budget = q_vector->tx.work_limit;
7323 unsigned int i = tx_ring->next_to_clean;
7324
7325 if (test_bit(__IGB_DOWN, &adapter->state))
7326 return true;
7327
7328 tx_buffer = &tx_ring->tx_buffer_info[i];
7329 tx_desc = IGB_TX_DESC(tx_ring, i);
7330 i -= tx_ring->count;
7331
7332 do {
7333 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7334
7335
7336 if (!eop_desc)
7337 break;
7338
7339
7340 smp_rmb();
7341
7342
7343 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7344 break;
7345
7346
7347 tx_buffer->next_to_watch = NULL;
7348
7349
7350 total_bytes += tx_buffer->bytecount;
7351 total_packets += tx_buffer->gso_segs;
7352
7353
7354 napi_consume_skb(tx_buffer->skb, napi_budget);
7355
7356
7357 dma_unmap_single(tx_ring->dev,
7358 dma_unmap_addr(tx_buffer, dma),
7359 dma_unmap_len(tx_buffer, len),
7360 DMA_TO_DEVICE);
7361
7362
7363 dma_unmap_len_set(tx_buffer, len, 0);
7364
7365
7366 while (tx_desc != eop_desc) {
7367 tx_buffer++;
7368 tx_desc++;
7369 i++;
7370 if (unlikely(!i)) {
7371 i -= tx_ring->count;
7372 tx_buffer = tx_ring->tx_buffer_info;
7373 tx_desc = IGB_TX_DESC(tx_ring, 0);
7374 }
7375
7376
7377 if (dma_unmap_len(tx_buffer, len)) {
7378 dma_unmap_page(tx_ring->dev,
7379 dma_unmap_addr(tx_buffer, dma),
7380 dma_unmap_len(tx_buffer, len),
7381 DMA_TO_DEVICE);
7382 dma_unmap_len_set(tx_buffer, len, 0);
7383 }
7384 }
7385
7386
7387 tx_buffer++;
7388 tx_desc++;
7389 i++;
7390 if (unlikely(!i)) {
7391 i -= tx_ring->count;
7392 tx_buffer = tx_ring->tx_buffer_info;
7393 tx_desc = IGB_TX_DESC(tx_ring, 0);
7394 }
7395
7396
7397 prefetch(tx_desc);
7398
7399
7400 budget--;
7401 } while (likely(budget));
7402
7403 netdev_tx_completed_queue(txring_txq(tx_ring),
7404 total_packets, total_bytes);
7405 i += tx_ring->count;
7406 tx_ring->next_to_clean = i;
7407 u64_stats_update_begin(&tx_ring->tx_syncp);
7408 tx_ring->tx_stats.bytes += total_bytes;
7409 tx_ring->tx_stats.packets += total_packets;
7410 u64_stats_update_end(&tx_ring->tx_syncp);
7411 q_vector->tx.total_bytes += total_bytes;
7412 q_vector->tx.total_packets += total_packets;
7413
7414 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7415 struct e1000_hw *hw = &adapter->hw;
7416
7417
7418
7419
7420 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7421 if (tx_buffer->next_to_watch &&
7422 time_after(jiffies, tx_buffer->time_stamp +
7423 (adapter->tx_timeout_factor * HZ)) &&
7424 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7425
7426
7427 dev_err(tx_ring->dev,
7428 "Detected Tx Unit Hang\n"
7429 " Tx Queue <%d>\n"
7430 " TDH <%x>\n"
7431 " TDT <%x>\n"
7432 " next_to_use <%x>\n"
7433 " next_to_clean <%x>\n"
7434 "buffer_info[next_to_clean]\n"
7435 " time_stamp <%lx>\n"
7436 " next_to_watch <%p>\n"
7437 " jiffies <%lx>\n"
7438 " desc.status <%x>\n",
7439 tx_ring->queue_index,
7440 rd32(E1000_TDH(tx_ring->reg_idx)),
7441 readl(tx_ring->tail),
7442 tx_ring->next_to_use,
7443 tx_ring->next_to_clean,
7444 tx_buffer->time_stamp,
7445 tx_buffer->next_to_watch,
7446 jiffies,
7447 tx_buffer->next_to_watch->wb.status);
7448 netif_stop_subqueue(tx_ring->netdev,
7449 tx_ring->queue_index);
7450
7451
7452 return true;
7453 }
7454 }
7455
7456#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7457 if (unlikely(total_packets &&
7458 netif_carrier_ok(tx_ring->netdev) &&
7459 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7460
7461
7462
7463 smp_mb();
7464 if (__netif_subqueue_stopped(tx_ring->netdev,
7465 tx_ring->queue_index) &&
7466 !(test_bit(__IGB_DOWN, &adapter->state))) {
7467 netif_wake_subqueue(tx_ring->netdev,
7468 tx_ring->queue_index);
7469
7470 u64_stats_update_begin(&tx_ring->tx_syncp);
7471 tx_ring->tx_stats.restart_queue++;
7472 u64_stats_update_end(&tx_ring->tx_syncp);
7473 }
7474 }
7475
7476 return !!budget;
7477}
7478
7479
7480
7481
7482
7483
7484
7485
7486static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7487 struct igb_rx_buffer *old_buff)
7488{
7489 struct igb_rx_buffer *new_buff;
7490 u16 nta = rx_ring->next_to_alloc;
7491
7492 new_buff = &rx_ring->rx_buffer_info[nta];
7493
7494
7495 nta++;
7496 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7497
7498
7499
7500
7501
7502 new_buff->dma = old_buff->dma;
7503 new_buff->page = old_buff->page;
7504 new_buff->page_offset = old_buff->page_offset;
7505 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
7506}
7507
7508static inline bool igb_page_is_reserved(struct page *page)
7509{
7510 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7511}
7512
7513static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7514{
7515 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7516 struct page *page = rx_buffer->page;
7517
7518
7519 if (unlikely(igb_page_is_reserved(page)))
7520 return false;
7521
7522#if (PAGE_SIZE < 8192)
7523
7524 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7525 return false;
7526#else
7527#define IGB_LAST_OFFSET \
7528 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7529
7530 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7531 return false;
7532#endif
7533
7534
7535
7536
7537
7538 if (unlikely(!pagecnt_bias)) {
7539 page_ref_add(page, USHRT_MAX);
7540 rx_buffer->pagecnt_bias = USHRT_MAX;
7541 }
7542
7543 return true;
7544}
7545
7546
7547
7548
7549
7550
7551
7552
7553
7554
7555static void igb_add_rx_frag(struct igb_ring *rx_ring,
7556 struct igb_rx_buffer *rx_buffer,
7557 struct sk_buff *skb,
7558 unsigned int size)
7559{
7560#if (PAGE_SIZE < 8192)
7561 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7562#else
7563 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
7564 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
7565 SKB_DATA_ALIGN(size);
7566#endif
7567 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
7568 rx_buffer->page_offset, size, truesize);
7569#if (PAGE_SIZE < 8192)
7570 rx_buffer->page_offset ^= truesize;
7571#else
7572 rx_buffer->page_offset += truesize;
7573#endif
7574}
7575
7576static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
7577 struct igb_rx_buffer *rx_buffer,
7578 union e1000_adv_rx_desc *rx_desc,
7579 unsigned int size)
7580{
7581 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7582#if (PAGE_SIZE < 8192)
7583 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7584#else
7585 unsigned int truesize = SKB_DATA_ALIGN(size);
7586#endif
7587 unsigned int headlen;
7588 struct sk_buff *skb;
7589
7590
7591 prefetch(va);
7592#if L1_CACHE_BYTES < 128
7593 prefetch(va + L1_CACHE_BYTES);
7594#endif
7595
7596
7597 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
7598 if (unlikely(!skb))
7599 return NULL;
7600
7601 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
7602 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
7603 va += IGB_TS_HDR_LEN;
7604 size -= IGB_TS_HDR_LEN;
7605 }
7606
7607
7608 headlen = size;
7609 if (headlen > IGB_RX_HDR_LEN)
7610 headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
7611
7612
7613 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
7614
7615
7616 size -= headlen;
7617 if (size) {
7618 skb_add_rx_frag(skb, 0, rx_buffer->page,
7619 (va + headlen) - page_address(rx_buffer->page),
7620 size, truesize);
7621#if (PAGE_SIZE < 8192)
7622 rx_buffer->page_offset ^= truesize;
7623#else
7624 rx_buffer->page_offset += truesize;
7625#endif
7626 } else {
7627 rx_buffer->pagecnt_bias++;
7628 }
7629
7630 return skb;
7631}
7632
7633static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
7634 struct igb_rx_buffer *rx_buffer,
7635 union e1000_adv_rx_desc *rx_desc,
7636 unsigned int size)
7637{
7638 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
7639#if (PAGE_SIZE < 8192)
7640 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7641#else
7642 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
7643 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
7644#endif
7645 struct sk_buff *skb;
7646
7647
7648 prefetch(va);
7649#if L1_CACHE_BYTES < 128
7650 prefetch(va + L1_CACHE_BYTES);
7651#endif
7652
7653
7654 skb = build_skb(va - IGB_SKB_PAD, truesize);
7655 if (unlikely(!skb))
7656 return NULL;
7657
7658
7659 skb_reserve(skb, IGB_SKB_PAD);
7660 __skb_put(skb, size);
7661
7662
7663 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
7664 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
7665 __skb_pull(skb, IGB_TS_HDR_LEN);
7666 }
7667
7668
7669#if (PAGE_SIZE < 8192)
7670 rx_buffer->page_offset ^= truesize;
7671#else
7672 rx_buffer->page_offset += truesize;
7673#endif
7674
7675 return skb;
7676}
7677
7678static inline void igb_rx_checksum(struct igb_ring *ring,
7679 union e1000_adv_rx_desc *rx_desc,
7680 struct sk_buff *skb)
7681{
7682 skb_checksum_none_assert(skb);
7683
7684
7685 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
7686 return;
7687
7688
7689 if (!(ring->netdev->features & NETIF_F_RXCSUM))
7690 return;
7691
7692
7693 if (igb_test_staterr(rx_desc,
7694 E1000_RXDEXT_STATERR_TCPE |
7695 E1000_RXDEXT_STATERR_IPE)) {
7696
7697
7698
7699
7700 if (!((skb->len == 60) &&
7701 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
7702 u64_stats_update_begin(&ring->rx_syncp);
7703 ring->rx_stats.csum_err++;
7704 u64_stats_update_end(&ring->rx_syncp);
7705 }
7706
7707 return;
7708 }
7709
7710 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
7711 E1000_RXD_STAT_UDPCS))
7712 skb->ip_summed = CHECKSUM_UNNECESSARY;
7713
7714 dev_dbg(ring->dev, "cksum success: bits %08X\n",
7715 le32_to_cpu(rx_desc->wb.upper.status_error));
7716}
7717
7718static inline void igb_rx_hash(struct igb_ring *ring,
7719 union e1000_adv_rx_desc *rx_desc,
7720 struct sk_buff *skb)
7721{
7722 if (ring->netdev->features & NETIF_F_RXHASH)
7723 skb_set_hash(skb,
7724 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
7725 PKT_HASH_TYPE_L3);
7726}
7727
7728
7729
7730
7731
7732
7733
7734
7735
7736
7737
7738
7739static bool igb_is_non_eop(struct igb_ring *rx_ring,
7740 union e1000_adv_rx_desc *rx_desc)
7741{
7742 u32 ntc = rx_ring->next_to_clean + 1;
7743
7744
7745 ntc = (ntc < rx_ring->count) ? ntc : 0;
7746 rx_ring->next_to_clean = ntc;
7747
7748 prefetch(IGB_RX_DESC(rx_ring, ntc));
7749
7750 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
7751 return false;
7752
7753 return true;
7754}
7755
7756
7757
7758
7759
7760
7761
7762
7763
7764
7765
7766
7767
7768
7769
7770static bool igb_cleanup_headers(struct igb_ring *rx_ring,
7771 union e1000_adv_rx_desc *rx_desc,
7772 struct sk_buff *skb)
7773{
7774 if (unlikely((igb_test_staterr(rx_desc,
7775 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
7776 struct net_device *netdev = rx_ring->netdev;
7777 if (!(netdev->features & NETIF_F_RXALL)) {
7778 dev_kfree_skb_any(skb);
7779 return true;
7780 }
7781 }
7782
7783
7784 if (eth_skb_pad(skb))
7785 return true;
7786
7787 return false;
7788}
7789
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799
7800static void igb_process_skb_fields(struct igb_ring *rx_ring,
7801 union e1000_adv_rx_desc *rx_desc,
7802 struct sk_buff *skb)
7803{
7804 struct net_device *dev = rx_ring->netdev;
7805
7806 igb_rx_hash(rx_ring, rx_desc, skb);
7807
7808 igb_rx_checksum(rx_ring, rx_desc, skb);
7809
7810 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
7811 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
7812 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
7813
7814 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
7815 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
7816 u16 vid;
7817
7818 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
7819 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
7820 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
7821 else
7822 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
7823
7824 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
7825 }
7826
7827 skb_record_rx_queue(skb, rx_ring->queue_index);
7828
7829 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
7830}
7831
7832static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
7833 const unsigned int size)
7834{
7835 struct igb_rx_buffer *rx_buffer;
7836
7837 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
7838 prefetchw(rx_buffer->page);
7839
7840
7841 dma_sync_single_range_for_cpu(rx_ring->dev,
7842 rx_buffer->dma,
7843 rx_buffer->page_offset,
7844 size,
7845 DMA_FROM_DEVICE);
7846
7847 rx_buffer->pagecnt_bias--;
7848
7849 return rx_buffer;
7850}
7851
7852static void igb_put_rx_buffer(struct igb_ring *rx_ring,
7853 struct igb_rx_buffer *rx_buffer)
7854{
7855 if (igb_can_reuse_rx_page(rx_buffer)) {
7856
7857 igb_reuse_rx_page(rx_ring, rx_buffer);
7858 } else {
7859
7860
7861
7862 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
7863 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
7864 IGB_RX_DMA_ATTR);
7865 __page_frag_cache_drain(rx_buffer->page,
7866 rx_buffer->pagecnt_bias);
7867 }
7868
7869
7870 rx_buffer->page = NULL;
7871}
7872
7873static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7874{
7875 struct igb_ring *rx_ring = q_vector->rx.ring;
7876 struct sk_buff *skb = rx_ring->skb;
7877 unsigned int total_bytes = 0, total_packets = 0;
7878 u16 cleaned_count = igb_desc_unused(rx_ring);
7879
7880 while (likely(total_packets < budget)) {
7881 union e1000_adv_rx_desc *rx_desc;
7882 struct igb_rx_buffer *rx_buffer;
7883 unsigned int size;
7884
7885
7886 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
7887 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7888 cleaned_count = 0;
7889 }
7890
7891 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
7892 size = le16_to_cpu(rx_desc->wb.upper.length);
7893 if (!size)
7894 break;
7895
7896
7897
7898
7899
7900 dma_rmb();
7901
7902 rx_buffer = igb_get_rx_buffer(rx_ring, size);
7903
7904
7905 if (skb)
7906 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
7907 else if (ring_uses_build_skb(rx_ring))
7908 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
7909 else
7910 skb = igb_construct_skb(rx_ring, rx_buffer,
7911 rx_desc, size);
7912
7913
7914 if (!skb) {
7915 rx_ring->rx_stats.alloc_failed++;
7916 rx_buffer->pagecnt_bias++;
7917 break;
7918 }
7919
7920 igb_put_rx_buffer(rx_ring, rx_buffer);
7921 cleaned_count++;
7922
7923
7924 if (igb_is_non_eop(rx_ring, rx_desc))
7925 continue;
7926
7927
7928 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
7929 skb = NULL;
7930 continue;
7931 }
7932
7933
7934 total_bytes += skb->len;
7935
7936
7937 igb_process_skb_fields(rx_ring, rx_desc, skb);
7938
7939 napi_gro_receive(&q_vector->napi, skb);
7940
7941
7942 skb = NULL;
7943
7944
7945 total_packets++;
7946 }
7947
7948
7949 rx_ring->skb = skb;
7950
7951 u64_stats_update_begin(&rx_ring->rx_syncp);
7952 rx_ring->rx_stats.packets += total_packets;
7953 rx_ring->rx_stats.bytes += total_bytes;
7954 u64_stats_update_end(&rx_ring->rx_syncp);
7955 q_vector->rx.total_packets += total_packets;
7956 q_vector->rx.total_bytes += total_bytes;
7957
7958 if (cleaned_count)
7959 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7960
7961 return total_packets;
7962}
7963
7964static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
7965{
7966 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
7967}
7968
7969static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7970 struct igb_rx_buffer *bi)
7971{
7972 struct page *page = bi->page;
7973 dma_addr_t dma;
7974
7975
7976 if (likely(page))
7977 return true;
7978
7979
7980 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
7981 if (unlikely(!page)) {
7982 rx_ring->rx_stats.alloc_failed++;
7983 return false;
7984 }
7985
7986
7987 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
7988 igb_rx_pg_size(rx_ring),
7989 DMA_FROM_DEVICE,
7990 IGB_RX_DMA_ATTR);
7991
7992
7993
7994
7995 if (dma_mapping_error(rx_ring->dev, dma)) {
7996 __free_pages(page, igb_rx_pg_order(rx_ring));
7997
7998 rx_ring->rx_stats.alloc_failed++;
7999 return false;
8000 }
8001
8002 bi->dma = dma;
8003 bi->page = page;
8004 bi->page_offset = igb_rx_offset(rx_ring);
8005 bi->pagecnt_bias = 1;
8006
8007 return true;
8008}
8009
8010
8011
8012
8013
8014void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8015{
8016 union e1000_adv_rx_desc *rx_desc;
8017 struct igb_rx_buffer *bi;
8018 u16 i = rx_ring->next_to_use;
8019 u16 bufsz;
8020
8021
8022 if (!cleaned_count)
8023 return;
8024
8025 rx_desc = IGB_RX_DESC(rx_ring, i);
8026 bi = &rx_ring->rx_buffer_info[i];
8027 i -= rx_ring->count;
8028
8029 bufsz = igb_rx_bufsz(rx_ring);
8030
8031 do {
8032 if (!igb_alloc_mapped_page(rx_ring, bi))
8033 break;
8034
8035
8036 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8037 bi->page_offset, bufsz,
8038 DMA_FROM_DEVICE);
8039
8040
8041
8042
8043 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8044
8045 rx_desc++;
8046 bi++;
8047 i++;
8048 if (unlikely(!i)) {
8049 rx_desc = IGB_RX_DESC(rx_ring, 0);
8050 bi = rx_ring->rx_buffer_info;
8051 i -= rx_ring->count;
8052 }
8053
8054
8055 rx_desc->wb.upper.length = 0;
8056
8057 cleaned_count--;
8058 } while (cleaned_count);
8059
8060 i += rx_ring->count;
8061
8062 if (rx_ring->next_to_use != i) {
8063
8064 rx_ring->next_to_use = i;
8065
8066
8067 rx_ring->next_to_alloc = i;
8068
8069
8070
8071
8072
8073
8074 wmb();
8075 writel(i, rx_ring->tail);
8076 }
8077}
8078
8079
8080
8081
8082
8083
8084
8085static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8086{
8087 struct igb_adapter *adapter = netdev_priv(netdev);
8088 struct mii_ioctl_data *data = if_mii(ifr);
8089
8090 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8091 return -EOPNOTSUPP;
8092
8093 switch (cmd) {
8094 case SIOCGMIIPHY:
8095 data->phy_id = adapter->hw.phy.addr;
8096 break;
8097 case SIOCGMIIREG:
8098 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8099 &data->val_out))
8100 return -EIO;
8101 break;
8102 case SIOCSMIIREG:
8103 default:
8104 return -EOPNOTSUPP;
8105 }
8106 return 0;
8107}
8108
8109
8110
8111
8112
8113
8114
8115static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8116{
8117 switch (cmd) {
8118 case SIOCGMIIPHY:
8119 case SIOCGMIIREG:
8120 case SIOCSMIIREG:
8121 return igb_mii_ioctl(netdev, ifr, cmd);
8122 case SIOCGHWTSTAMP:
8123 return igb_ptp_get_ts_config(netdev, ifr);
8124 case SIOCSHWTSTAMP:
8125 return igb_ptp_set_ts_config(netdev, ifr);
8126 default:
8127 return -EOPNOTSUPP;
8128 }
8129}
8130
8131void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8132{
8133 struct igb_adapter *adapter = hw->back;
8134
8135 pci_read_config_word(adapter->pdev, reg, value);
8136}
8137
8138void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8139{
8140 struct igb_adapter *adapter = hw->back;
8141
8142 pci_write_config_word(adapter->pdev, reg, *value);
8143}
8144
8145s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8146{
8147 struct igb_adapter *adapter = hw->back;
8148
8149 if (pcie_capability_read_word(adapter->pdev, reg, value))
8150 return -E1000_ERR_CONFIG;
8151
8152 return 0;
8153}
8154
8155s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8156{
8157 struct igb_adapter *adapter = hw->back;
8158
8159 if (pcie_capability_write_word(adapter->pdev, reg, *value))
8160 return -E1000_ERR_CONFIG;
8161
8162 return 0;
8163}
8164
8165static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8166{
8167 struct igb_adapter *adapter = netdev_priv(netdev);
8168 struct e1000_hw *hw = &adapter->hw;
8169 u32 ctrl, rctl;
8170 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8171
8172 if (enable) {
8173
8174 ctrl = rd32(E1000_CTRL);
8175 ctrl |= E1000_CTRL_VME;
8176 wr32(E1000_CTRL, ctrl);
8177
8178
8179 rctl = rd32(E1000_RCTL);
8180 rctl &= ~E1000_RCTL_CFIEN;
8181 wr32(E1000_RCTL, rctl);
8182 } else {
8183
8184 ctrl = rd32(E1000_CTRL);
8185 ctrl &= ~E1000_CTRL_VME;
8186 wr32(E1000_CTRL, ctrl);
8187 }
8188
8189 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8190}
8191
8192static int igb_vlan_rx_add_vid(struct net_device *netdev,
8193 __be16 proto, u16 vid)
8194{
8195 struct igb_adapter *adapter = netdev_priv(netdev);
8196 struct e1000_hw *hw = &adapter->hw;
8197 int pf_id = adapter->vfs_allocated_count;
8198
8199
8200 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8201 igb_vfta_set(hw, vid, pf_id, true, !!vid);
8202
8203 set_bit(vid, adapter->active_vlans);
8204
8205 return 0;
8206}
8207
8208static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8209 __be16 proto, u16 vid)
8210{
8211 struct igb_adapter *adapter = netdev_priv(netdev);
8212 int pf_id = adapter->vfs_allocated_count;
8213 struct e1000_hw *hw = &adapter->hw;
8214
8215
8216 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8217 igb_vfta_set(hw, vid, pf_id, false, true);
8218
8219 clear_bit(vid, adapter->active_vlans);
8220
8221 return 0;
8222}
8223
8224static void igb_restore_vlan(struct igb_adapter *adapter)
8225{
8226 u16 vid = 1;
8227
8228 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8229 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8230
8231 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8232 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8233}
8234
8235int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8236{
8237 struct pci_dev *pdev = adapter->pdev;
8238 struct e1000_mac_info *mac = &adapter->hw.mac;
8239
8240 mac->autoneg = 0;
8241
8242
8243
8244
8245 if ((spd & 1) || (dplx & ~1))
8246 goto err_inval;
8247
8248
8249
8250
8251 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8252 switch (spd + dplx) {
8253 case SPEED_10 + DUPLEX_HALF:
8254 case SPEED_10 + DUPLEX_FULL:
8255 case SPEED_100 + DUPLEX_HALF:
8256 goto err_inval;
8257 default:
8258 break;
8259 }
8260 }
8261
8262 switch (spd + dplx) {
8263 case SPEED_10 + DUPLEX_HALF:
8264 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8265 break;
8266 case SPEED_10 + DUPLEX_FULL:
8267 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8268 break;
8269 case SPEED_100 + DUPLEX_HALF:
8270 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8271 break;
8272 case SPEED_100 + DUPLEX_FULL:
8273 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8274 break;
8275 case SPEED_1000 + DUPLEX_FULL:
8276 mac->autoneg = 1;
8277 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8278 break;
8279 case SPEED_1000 + DUPLEX_HALF:
8280 default:
8281 goto err_inval;
8282 }
8283
8284
8285 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8286
8287 return 0;
8288
8289err_inval:
8290 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8291 return -EINVAL;
8292}
8293
8294static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8295 bool runtime)
8296{
8297 struct net_device *netdev = pci_get_drvdata(pdev);
8298 struct igb_adapter *adapter = netdev_priv(netdev);
8299 struct e1000_hw *hw = &adapter->hw;
8300 u32 ctrl, rctl, status;
8301 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8302#ifdef CONFIG_PM
8303 int retval = 0;
8304#endif
8305
8306 rtnl_lock();
8307 netif_device_detach(netdev);
8308
8309 if (netif_running(netdev))
8310 __igb_close(netdev, true);
8311
8312 igb_ptp_suspend(adapter);
8313
8314 igb_clear_interrupt_scheme(adapter);
8315 rtnl_unlock();
8316
8317#ifdef CONFIG_PM
8318 retval = pci_save_state(pdev);
8319 if (retval)
8320 return retval;
8321#endif
8322
8323 status = rd32(E1000_STATUS);
8324 if (status & E1000_STATUS_LU)
8325 wufc &= ~E1000_WUFC_LNKC;
8326
8327 if (wufc) {
8328 igb_setup_rctl(adapter);
8329 igb_set_rx_mode(netdev);
8330
8331
8332 if (wufc & E1000_WUFC_MC) {
8333 rctl = rd32(E1000_RCTL);
8334 rctl |= E1000_RCTL_MPE;
8335 wr32(E1000_RCTL, rctl);
8336 }
8337
8338 ctrl = rd32(E1000_CTRL);
8339
8340 #define E1000_CTRL_ADVD3WUC 0x00100000
8341
8342 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8343 ctrl |= E1000_CTRL_ADVD3WUC;
8344 wr32(E1000_CTRL, ctrl);
8345
8346
8347 igb_disable_pcie_master(hw);
8348
8349 wr32(E1000_WUC, E1000_WUC_PME_EN);
8350 wr32(E1000_WUFC, wufc);
8351 } else {
8352 wr32(E1000_WUC, 0);
8353 wr32(E1000_WUFC, 0);
8354 }
8355
8356 *enable_wake = wufc || adapter->en_mng_pt;
8357 if (!*enable_wake)
8358 igb_power_down_link(adapter);
8359 else
8360 igb_power_up_link(adapter);
8361
8362
8363
8364
8365 igb_release_hw_control(adapter);
8366
8367 pci_disable_device(pdev);
8368
8369 return 0;
8370}
8371
8372static void igb_deliver_wake_packet(struct net_device *netdev)
8373{
8374 struct igb_adapter *adapter = netdev_priv(netdev);
8375 struct e1000_hw *hw = &adapter->hw;
8376 struct sk_buff *skb;
8377 u32 wupl;
8378
8379 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8380
8381
8382
8383
8384 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8385 return;
8386
8387 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8388 if (!skb)
8389 return;
8390
8391 skb_put(skb, wupl);
8392
8393
8394 wupl = roundup(wupl, 4);
8395
8396 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8397
8398 skb->protocol = eth_type_trans(skb, netdev);
8399 netif_rx(skb);
8400}
8401
8402static int __maybe_unused igb_suspend(struct device *dev)
8403{
8404 int retval;
8405 bool wake;
8406 struct pci_dev *pdev = to_pci_dev(dev);
8407
8408 retval = __igb_shutdown(pdev, &wake, 0);
8409 if (retval)
8410 return retval;
8411
8412 if (wake) {
8413 pci_prepare_to_sleep(pdev);
8414 } else {
8415 pci_wake_from_d3(pdev, false);
8416 pci_set_power_state(pdev, PCI_D3hot);
8417 }
8418
8419 return 0;
8420}
8421
8422static int __maybe_unused igb_resume(struct device *dev)
8423{
8424 struct pci_dev *pdev = to_pci_dev(dev);
8425 struct net_device *netdev = pci_get_drvdata(pdev);
8426 struct igb_adapter *adapter = netdev_priv(netdev);
8427 struct e1000_hw *hw = &adapter->hw;
8428 u32 err, val;
8429
8430 pci_set_power_state(pdev, PCI_D0);
8431 pci_restore_state(pdev);
8432 pci_save_state(pdev);
8433
8434 if (!pci_device_is_present(pdev))
8435 return -ENODEV;
8436 err = pci_enable_device_mem(pdev);
8437 if (err) {
8438 dev_err(&pdev->dev,
8439 "igb: Cannot enable PCI device from suspend\n");
8440 return err;
8441 }
8442 pci_set_master(pdev);
8443
8444 pci_enable_wake(pdev, PCI_D3hot, 0);
8445 pci_enable_wake(pdev, PCI_D3cold, 0);
8446
8447 if (igb_init_interrupt_scheme(adapter, true)) {
8448 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8449 return -ENOMEM;
8450 }
8451
8452 igb_reset(adapter);
8453
8454
8455
8456
8457 igb_get_hw_control(adapter);
8458
8459 val = rd32(E1000_WUS);
8460 if (val & WAKE_PKT_WUS)
8461 igb_deliver_wake_packet(netdev);
8462
8463 wr32(E1000_WUS, ~0);
8464
8465 rtnl_lock();
8466 if (!err && netif_running(netdev))
8467 err = __igb_open(netdev, true);
8468
8469 if (!err)
8470 netif_device_attach(netdev);
8471 rtnl_unlock();
8472
8473 return err;
8474}
8475
8476static int __maybe_unused igb_runtime_idle(struct device *dev)
8477{
8478 struct pci_dev *pdev = to_pci_dev(dev);
8479 struct net_device *netdev = pci_get_drvdata(pdev);
8480 struct igb_adapter *adapter = netdev_priv(netdev);
8481
8482 if (!igb_has_link(adapter))
8483 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8484
8485 return -EBUSY;
8486}
8487
8488static int __maybe_unused igb_runtime_suspend(struct device *dev)
8489{
8490 struct pci_dev *pdev = to_pci_dev(dev);
8491 int retval;
8492 bool wake;
8493
8494 retval = __igb_shutdown(pdev, &wake, 1);
8495 if (retval)
8496 return retval;
8497
8498 if (wake) {
8499 pci_prepare_to_sleep(pdev);
8500 } else {
8501 pci_wake_from_d3(pdev, false);
8502 pci_set_power_state(pdev, PCI_D3hot);
8503 }
8504
8505 return 0;
8506}
8507
8508static int __maybe_unused igb_runtime_resume(struct device *dev)
8509{
8510 return igb_resume(dev);
8511}
8512
8513static void igb_shutdown(struct pci_dev *pdev)
8514{
8515 bool wake;
8516
8517 __igb_shutdown(pdev, &wake, 0);
8518
8519 if (system_state == SYSTEM_POWER_OFF) {
8520 pci_wake_from_d3(pdev, wake);
8521 pci_set_power_state(pdev, PCI_D3hot);
8522 }
8523}
8524
8525#ifdef CONFIG_PCI_IOV
8526static int igb_sriov_reinit(struct pci_dev *dev)
8527{
8528 struct net_device *netdev = pci_get_drvdata(dev);
8529 struct igb_adapter *adapter = netdev_priv(netdev);
8530 struct pci_dev *pdev = adapter->pdev;
8531
8532 rtnl_lock();
8533
8534 if (netif_running(netdev))
8535 igb_close(netdev);
8536 else
8537 igb_reset(adapter);
8538
8539 igb_clear_interrupt_scheme(adapter);
8540
8541 igb_init_queue_configuration(adapter);
8542
8543 if (igb_init_interrupt_scheme(adapter, true)) {
8544 rtnl_unlock();
8545 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8546 return -ENOMEM;
8547 }
8548
8549 if (netif_running(netdev))
8550 igb_open(netdev);
8551
8552 rtnl_unlock();
8553
8554 return 0;
8555}
8556
8557static int igb_pci_disable_sriov(struct pci_dev *dev)
8558{
8559 int err = igb_disable_sriov(dev);
8560
8561 if (!err)
8562 err = igb_sriov_reinit(dev);
8563
8564 return err;
8565}
8566
8567static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8568{
8569 int err = igb_enable_sriov(dev, num_vfs);
8570
8571 if (err)
8572 goto out;
8573
8574 err = igb_sriov_reinit(dev);
8575 if (!err)
8576 return num_vfs;
8577
8578out:
8579 return err;
8580}
8581
8582#endif
8583static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
8584{
8585#ifdef CONFIG_PCI_IOV
8586 if (num_vfs == 0)
8587 return igb_pci_disable_sriov(dev);
8588 else
8589 return igb_pci_enable_sriov(dev, num_vfs);
8590#endif
8591 return 0;
8592}
8593
8594#ifdef CONFIG_NET_POLL_CONTROLLER
8595
8596
8597
8598
8599static void igb_netpoll(struct net_device *netdev)
8600{
8601 struct igb_adapter *adapter = netdev_priv(netdev);
8602 struct e1000_hw *hw = &adapter->hw;
8603 struct igb_q_vector *q_vector;
8604 int i;
8605
8606 for (i = 0; i < adapter->num_q_vectors; i++) {
8607 q_vector = adapter->q_vector[i];
8608 if (adapter->flags & IGB_FLAG_HAS_MSIX)
8609 wr32(E1000_EIMC, q_vector->eims_value);
8610 else
8611 igb_irq_disable(adapter);
8612 napi_schedule(&q_vector->napi);
8613 }
8614}
8615#endif
8616
8617
8618
8619
8620
8621
8622
8623
8624
8625static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
8626 pci_channel_state_t state)
8627{
8628 struct net_device *netdev = pci_get_drvdata(pdev);
8629 struct igb_adapter *adapter = netdev_priv(netdev);
8630
8631 netif_device_detach(netdev);
8632
8633 if (state == pci_channel_io_perm_failure)
8634 return PCI_ERS_RESULT_DISCONNECT;
8635
8636 if (netif_running(netdev))
8637 igb_down(adapter);
8638 pci_disable_device(pdev);
8639
8640
8641 return PCI_ERS_RESULT_NEED_RESET;
8642}
8643
8644
8645
8646
8647
8648
8649
8650
8651static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
8652{
8653 struct net_device *netdev = pci_get_drvdata(pdev);
8654 struct igb_adapter *adapter = netdev_priv(netdev);
8655 struct e1000_hw *hw = &adapter->hw;
8656 pci_ers_result_t result;
8657 int err;
8658
8659 if (pci_enable_device_mem(pdev)) {
8660 dev_err(&pdev->dev,
8661 "Cannot re-enable PCI device after reset.\n");
8662 result = PCI_ERS_RESULT_DISCONNECT;
8663 } else {
8664 pci_set_master(pdev);
8665 pci_restore_state(pdev);
8666 pci_save_state(pdev);
8667
8668 pci_enable_wake(pdev, PCI_D3hot, 0);
8669 pci_enable_wake(pdev, PCI_D3cold, 0);
8670
8671
8672
8673
8674 hw->hw_addr = adapter->io_addr;
8675
8676 igb_reset(adapter);
8677 wr32(E1000_WUS, ~0);
8678 result = PCI_ERS_RESULT_RECOVERED;
8679 }
8680
8681 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8682 if (err) {
8683 dev_err(&pdev->dev,
8684 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8685 err);
8686
8687 }
8688
8689 return result;
8690}
8691
8692
8693
8694
8695
8696
8697
8698
8699
8700static void igb_io_resume(struct pci_dev *pdev)
8701{
8702 struct net_device *netdev = pci_get_drvdata(pdev);
8703 struct igb_adapter *adapter = netdev_priv(netdev);
8704
8705 if (netif_running(netdev)) {
8706 if (igb_up(adapter)) {
8707 dev_err(&pdev->dev, "igb_up failed after reset\n");
8708 return;
8709 }
8710 }
8711
8712 netif_device_attach(netdev);
8713
8714
8715
8716
8717 igb_get_hw_control(adapter);
8718}
8719
8720
8721
8722
8723
8724
8725static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
8726{
8727 struct e1000_hw *hw = &adapter->hw;
8728 u32 rar_low, rar_high;
8729 u8 *addr = adapter->mac_table[index].addr;
8730
8731
8732
8733
8734
8735
8736 rar_low = le32_to_cpup((__le32 *)(addr));
8737 rar_high = le16_to_cpup((__le16 *)(addr + 4));
8738
8739
8740 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
8741 if (is_valid_ether_addr(addr))
8742 rar_high |= E1000_RAH_AV;
8743
8744 if (hw->mac.type == e1000_82575)
8745 rar_high |= E1000_RAH_POOL_1 *
8746 adapter->mac_table[index].queue;
8747 else
8748 rar_high |= E1000_RAH_POOL_1 <<
8749 adapter->mac_table[index].queue;
8750 }
8751
8752 wr32(E1000_RAL(index), rar_low);
8753 wrfl();
8754 wr32(E1000_RAH(index), rar_high);
8755 wrfl();
8756}
8757
8758static int igb_set_vf_mac(struct igb_adapter *adapter,
8759 int vf, unsigned char *mac_addr)
8760{
8761 struct e1000_hw *hw = &adapter->hw;
8762
8763
8764
8765 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
8766 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
8767
8768 ether_addr_copy(vf_mac_addr, mac_addr);
8769 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
8770 adapter->mac_table[rar_entry].queue = vf;
8771 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
8772 igb_rar_set_index(adapter, rar_entry);
8773
8774 return 0;
8775}
8776
8777static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
8778{
8779 struct igb_adapter *adapter = netdev_priv(netdev);
8780
8781 if (vf >= adapter->vfs_allocated_count)
8782 return -EINVAL;
8783
8784
8785
8786
8787
8788
8789
8790 if (is_zero_ether_addr(mac)) {
8791 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
8792 dev_info(&adapter->pdev->dev,
8793 "remove administratively set MAC on VF %d\n",
8794 vf);
8795 } else if (is_valid_ether_addr(mac)) {
8796 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
8797 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
8798 mac, vf);
8799 dev_info(&adapter->pdev->dev,
8800 "Reload the VF driver to make this change effective.");
8801
8802 if (test_bit(__IGB_DOWN, &adapter->state)) {
8803 dev_warn(&adapter->pdev->dev,
8804 "The VF MAC address has been set, but the PF device is not up.\n");
8805 dev_warn(&adapter->pdev->dev,
8806 "Bring the PF device up before attempting to use the VF device.\n");
8807 }
8808 } else {
8809 return -EINVAL;
8810 }
8811 return igb_set_vf_mac(adapter, vf, mac);
8812}
8813
8814static int igb_link_mbps(int internal_link_speed)
8815{
8816 switch (internal_link_speed) {
8817 case SPEED_100:
8818 return 100;
8819 case SPEED_1000:
8820 return 1000;
8821 default:
8822 return 0;
8823 }
8824}
8825
8826static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
8827 int link_speed)
8828{
8829 int rf_dec, rf_int;
8830 u32 bcnrc_val;
8831
8832 if (tx_rate != 0) {
8833
8834 rf_int = link_speed / tx_rate;
8835 rf_dec = (link_speed - (rf_int * tx_rate));
8836 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
8837 tx_rate;
8838
8839 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
8840 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
8841 E1000_RTTBCNRC_RF_INT_MASK);
8842 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
8843 } else {
8844 bcnrc_val = 0;
8845 }
8846
8847 wr32(E1000_RTTDQSEL, vf);
8848
8849
8850
8851 wr32(E1000_RTTBCNRM, 0x14);
8852 wr32(E1000_RTTBCNRC, bcnrc_val);
8853}
8854
8855static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
8856{
8857 int actual_link_speed, i;
8858 bool reset_rate = false;
8859
8860
8861 if ((adapter->vf_rate_link_speed == 0) ||
8862 (adapter->hw.mac.type != e1000_82576))
8863 return;
8864
8865 actual_link_speed = igb_link_mbps(adapter->link_speed);
8866 if (actual_link_speed != adapter->vf_rate_link_speed) {
8867 reset_rate = true;
8868 adapter->vf_rate_link_speed = 0;
8869 dev_info(&adapter->pdev->dev,
8870 "Link speed has been changed. VF Transmit rate is disabled\n");
8871 }
8872
8873 for (i = 0; i < adapter->vfs_allocated_count; i++) {
8874 if (reset_rate)
8875 adapter->vf_data[i].tx_rate = 0;
8876
8877 igb_set_vf_rate_limit(&adapter->hw, i,
8878 adapter->vf_data[i].tx_rate,
8879 actual_link_speed);
8880 }
8881}
8882
8883static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
8884 int min_tx_rate, int max_tx_rate)
8885{
8886 struct igb_adapter *adapter = netdev_priv(netdev);
8887 struct e1000_hw *hw = &adapter->hw;
8888 int actual_link_speed;
8889
8890 if (hw->mac.type != e1000_82576)
8891 return -EOPNOTSUPP;
8892
8893 if (min_tx_rate)
8894 return -EINVAL;
8895
8896 actual_link_speed = igb_link_mbps(adapter->link_speed);
8897 if ((vf >= adapter->vfs_allocated_count) ||
8898 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
8899 (max_tx_rate < 0) ||
8900 (max_tx_rate > actual_link_speed))
8901 return -EINVAL;
8902
8903 adapter->vf_rate_link_speed = actual_link_speed;
8904 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
8905 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
8906
8907 return 0;
8908}
8909
8910static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
8911 bool setting)
8912{
8913 struct igb_adapter *adapter = netdev_priv(netdev);
8914 struct e1000_hw *hw = &adapter->hw;
8915 u32 reg_val, reg_offset;
8916
8917 if (!adapter->vfs_allocated_count)
8918 return -EOPNOTSUPP;
8919
8920 if (vf >= adapter->vfs_allocated_count)
8921 return -EINVAL;
8922
8923 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
8924 reg_val = rd32(reg_offset);
8925 if (setting)
8926 reg_val |= (BIT(vf) |
8927 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
8928 else
8929 reg_val &= ~(BIT(vf) |
8930 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
8931 wr32(reg_offset, reg_val);
8932
8933 adapter->vf_data[vf].spoofchk_enabled = setting;
8934 return 0;
8935}
8936
8937static int igb_ndo_get_vf_config(struct net_device *netdev,
8938 int vf, struct ifla_vf_info *ivi)
8939{
8940 struct igb_adapter *adapter = netdev_priv(netdev);
8941 if (vf >= adapter->vfs_allocated_count)
8942 return -EINVAL;
8943 ivi->vf = vf;
8944 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
8945 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
8946 ivi->min_tx_rate = 0;
8947 ivi->vlan = adapter->vf_data[vf].pf_vlan;
8948 ivi->qos = adapter->vf_data[vf].pf_qos;
8949 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
8950 return 0;
8951}
8952
8953static void igb_vmm_control(struct igb_adapter *adapter)
8954{
8955 struct e1000_hw *hw = &adapter->hw;
8956 u32 reg;
8957
8958 switch (hw->mac.type) {
8959 case e1000_82575:
8960 case e1000_i210:
8961 case e1000_i211:
8962 case e1000_i354:
8963 default:
8964
8965 return;
8966 case e1000_82576:
8967
8968 reg = rd32(E1000_DTXCTL);
8969 reg |= E1000_DTXCTL_VLAN_ADDED;
8970 wr32(E1000_DTXCTL, reg);
8971
8972 case e1000_82580:
8973
8974 reg = rd32(E1000_RPLOLR);
8975 reg |= E1000_RPLOLR_STRVLAN;
8976 wr32(E1000_RPLOLR, reg);
8977
8978 case e1000_i350:
8979
8980 break;
8981 }
8982
8983 if (adapter->vfs_allocated_count) {
8984 igb_vmdq_set_loopback_pf(hw, true);
8985 igb_vmdq_set_replication_pf(hw, true);
8986 igb_vmdq_set_anti_spoofing_pf(hw, true,
8987 adapter->vfs_allocated_count);
8988 } else {
8989 igb_vmdq_set_loopback_pf(hw, false);
8990 igb_vmdq_set_replication_pf(hw, false);
8991 }
8992}
8993
8994static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
8995{
8996 struct e1000_hw *hw = &adapter->hw;
8997 u32 dmac_thr;
8998 u16 hwm;
8999
9000 if (hw->mac.type > e1000_82580) {
9001 if (adapter->flags & IGB_FLAG_DMAC) {
9002 u32 reg;
9003
9004
9005 wr32(E1000_DMCTXTH, 0);
9006
9007
9008
9009
9010
9011 hwm = 64 * (pba - 6);
9012 reg = rd32(E1000_FCRTC);
9013 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9014 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9015 & E1000_FCRTC_RTH_COAL_MASK);
9016 wr32(E1000_FCRTC, reg);
9017
9018
9019
9020
9021 dmac_thr = pba - 10;
9022 reg = rd32(E1000_DMACR);
9023 reg &= ~E1000_DMACR_DMACTHR_MASK;
9024 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9025 & E1000_DMACR_DMACTHR_MASK);
9026
9027
9028 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9029
9030
9031 reg |= (1000 >> 5);
9032
9033
9034 if (hw->mac.type != e1000_i354)
9035 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9036
9037 wr32(E1000_DMACR, reg);
9038
9039
9040
9041
9042 wr32(E1000_DMCRTRH, 0);
9043
9044 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9045
9046 wr32(E1000_DMCTLX, reg);
9047
9048
9049
9050
9051 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9052 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9053
9054
9055
9056
9057 reg = rd32(E1000_PCIEMISC);
9058 reg &= ~E1000_PCIEMISC_LX_DECISION;
9059 wr32(E1000_PCIEMISC, reg);
9060 }
9061 } else if (hw->mac.type == e1000_82580) {
9062 u32 reg = rd32(E1000_PCIEMISC);
9063
9064 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9065 wr32(E1000_DMACR, 0);
9066 }
9067}
9068
9069
9070
9071
9072
9073
9074
9075
9076
9077
9078
9079s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9080 u8 dev_addr, u8 *data)
9081{
9082 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9083 struct i2c_client *this_client = adapter->i2c_client;
9084 s32 status;
9085 u16 swfw_mask = 0;
9086
9087 if (!this_client)
9088 return E1000_ERR_I2C;
9089
9090 swfw_mask = E1000_SWFW_PHY0_SM;
9091
9092 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9093 return E1000_ERR_SWFW_SYNC;
9094
9095 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9096 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9097
9098 if (status < 0)
9099 return E1000_ERR_I2C;
9100 else {
9101 *data = status;
9102 return 0;
9103 }
9104}
9105
9106
9107
9108
9109
9110
9111
9112
9113
9114
9115
9116s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9117 u8 dev_addr, u8 data)
9118{
9119 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9120 struct i2c_client *this_client = adapter->i2c_client;
9121 s32 status;
9122 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9123
9124 if (!this_client)
9125 return E1000_ERR_I2C;
9126
9127 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9128 return E1000_ERR_SWFW_SYNC;
9129 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9130 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9131
9132 if (status)
9133 return E1000_ERR_I2C;
9134 else
9135 return 0;
9136
9137}
9138
9139int igb_reinit_queues(struct igb_adapter *adapter)
9140{
9141 struct net_device *netdev = adapter->netdev;
9142 struct pci_dev *pdev = adapter->pdev;
9143 int err = 0;
9144
9145 if (netif_running(netdev))
9146 igb_close(netdev);
9147
9148 igb_reset_interrupt_capability(adapter);
9149
9150 if (igb_init_interrupt_scheme(adapter, true)) {
9151 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9152 return -ENOMEM;
9153 }
9154
9155 if (netif_running(netdev))
9156 err = igb_open(netdev);
9157
9158 return err;
9159}
9160
9161static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9162{
9163 struct igb_nfc_filter *rule;
9164
9165 spin_lock(&adapter->nfc_lock);
9166
9167 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9168 igb_erase_filter(adapter, rule);
9169
9170 spin_unlock(&adapter->nfc_lock);
9171}
9172
9173static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9174{
9175 struct igb_nfc_filter *rule;
9176
9177 spin_lock(&adapter->nfc_lock);
9178
9179 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9180 igb_add_filter(adapter, rule);
9181
9182 spin_unlock(&adapter->nfc_lock);
9183}
9184
9185