1
2
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/bitops.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/netdevice.h>
13#include <linux/ipv6.h>
14#include <linux/slab.h>
15#include <net/checksum.h>
16#include <net/ip6_checksum.h>
17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
19#include <linux/net_tstamp.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/ip.h>
28#include <linux/tcp.h>
29#include <linux/sctp.h>
30#include <linux/if_ether.h>
31#include <linux/aer.h>
32#include <linux/prefetch.h>
33#include <linux/pm_runtime.h>
34#include <linux/etherdevice.h>
35#ifdef CONFIG_IGB_DCA
36#include <linux/dca.h>
37#endif
38#include <linux/i2c.h>
39#include "igb.h"
40
41#define MAJ 5
42#define MIN 6
43#define BUILD 0
44#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
45__stringify(BUILD) "-k"
46
47enum queue_mode {
48 QUEUE_MODE_STRICT_PRIORITY,
49 QUEUE_MODE_STREAM_RESERVATION,
50};
51
52enum tx_queue_prio {
53 TX_QUEUE_PRIO_HIGH,
54 TX_QUEUE_PRIO_LOW,
55};
56
57char igb_driver_name[] = "igb";
58char igb_driver_version[] = DRV_VERSION;
59static const char igb_driver_string[] =
60 "Intel(R) Gigabit Ethernet Network Driver";
61static const char igb_copyright[] =
62 "Copyright (c) 2007-2014 Intel Corporation.";
63
64static const struct e1000_info *igb_info_tbl[] = {
65 [board_82575] = &e1000_82575_info,
66};
67
68static const struct pci_device_id igb_pci_tbl[] = {
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
104
105 {0, }
106};
107
108MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
109
110static int igb_setup_all_tx_resources(struct igb_adapter *);
111static int igb_setup_all_rx_resources(struct igb_adapter *);
112static void igb_free_all_tx_resources(struct igb_adapter *);
113static void igb_free_all_rx_resources(struct igb_adapter *);
114static void igb_setup_mrqc(struct igb_adapter *);
115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void igb_remove(struct pci_dev *pdev);
117static int igb_sw_init(struct igb_adapter *);
118int igb_open(struct net_device *);
119int igb_close(struct net_device *);
120static void igb_configure(struct igb_adapter *);
121static void igb_configure_tx(struct igb_adapter *);
122static void igb_configure_rx(struct igb_adapter *);
123static void igb_clean_all_tx_rings(struct igb_adapter *);
124static void igb_clean_all_rx_rings(struct igb_adapter *);
125static void igb_clean_tx_ring(struct igb_ring *);
126static void igb_clean_rx_ring(struct igb_ring *);
127static void igb_set_rx_mode(struct net_device *);
128static void igb_update_phy_info(struct timer_list *);
129static void igb_watchdog(struct timer_list *);
130static void igb_watchdog_task(struct work_struct *);
131static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
132static void igb_get_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats);
134static int igb_change_mtu(struct net_device *, int);
135static int igb_set_mac(struct net_device *, void *);
136static void igb_set_uta(struct igb_adapter *adapter, bool set);
137static irqreturn_t igb_intr(int irq, void *);
138static irqreturn_t igb_intr_msi(int irq, void *);
139static irqreturn_t igb_msix_other(int irq, void *);
140static irqreturn_t igb_msix_ring(int irq, void *);
141#ifdef CONFIG_IGB_DCA
142static void igb_update_dca(struct igb_q_vector *);
143static void igb_setup_dca(struct igb_adapter *);
144#endif
145static int igb_poll(struct napi_struct *, int);
146static bool igb_clean_tx_irq(struct igb_q_vector *, int);
147static int igb_clean_rx_irq(struct igb_q_vector *, int);
148static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149static void igb_tx_timeout(struct net_device *);
150static void igb_reset_task(struct work_struct *);
151static void igb_vlan_mode(struct net_device *netdev,
152 netdev_features_t features);
153static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
154static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
155static void igb_restore_vlan(struct igb_adapter *);
156static void igb_rar_set_index(struct igb_adapter *, u32);
157static void igb_ping_all_vfs(struct igb_adapter *);
158static void igb_msg_task(struct igb_adapter *);
159static void igb_vmm_control(struct igb_adapter *);
160static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
161static void igb_flush_mac_table(struct igb_adapter *);
162static int igb_available_rars(struct igb_adapter *, u8);
163static void igb_set_default_mac_filter(struct igb_adapter *);
164static int igb_uc_sync(struct net_device *, const unsigned char *);
165static int igb_uc_unsync(struct net_device *, const unsigned char *);
166static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
167static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
168static int igb_ndo_set_vf_vlan(struct net_device *netdev,
169 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
170static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
171static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
172 bool setting);
173static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
174 bool setting);
175static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
176 struct ifla_vf_info *ivi);
177static void igb_check_vf_rate_limit(struct igb_adapter *);
178static void igb_nfc_filter_exit(struct igb_adapter *adapter);
179static void igb_nfc_filter_restore(struct igb_adapter *adapter);
180
181#ifdef CONFIG_PCI_IOV
182static int igb_vf_configure(struct igb_adapter *adapter, int vf);
183static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
184static int igb_disable_sriov(struct pci_dev *dev);
185static int igb_pci_disable_sriov(struct pci_dev *dev);
186#endif
187
188static int igb_suspend(struct device *);
189static int igb_resume(struct device *);
190static int igb_runtime_suspend(struct device *dev);
191static int igb_runtime_resume(struct device *dev);
192static int igb_runtime_idle(struct device *dev);
193static const struct dev_pm_ops igb_pm_ops = {
194 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
195 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
196 igb_runtime_idle)
197};
198static void igb_shutdown(struct pci_dev *);
199static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
200#ifdef CONFIG_IGB_DCA
201static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
202static struct notifier_block dca_notifier = {
203 .notifier_call = igb_notify_dca,
204 .next = NULL,
205 .priority = 0
206};
207#endif
208#ifdef CONFIG_PCI_IOV
209static unsigned int max_vfs;
210module_param(max_vfs, uint, 0);
211MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
212#endif
213
214static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217static void igb_io_resume(struct pci_dev *);
218
219static const struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223};
224
225static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
226
227static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = igb_remove,
232#ifdef CONFIG_PM
233 .driver.pm = &igb_pm_ops,
234#endif
235 .shutdown = igb_shutdown,
236 .sriov_configure = igb_pci_sriov_configure,
237 .err_handler = &igb_err_handler
238};
239
240MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242MODULE_LICENSE("GPL v2");
243MODULE_VERSION(DRV_VERSION);
244
245#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246static int debug = -1;
247module_param(debug, int, 0);
248MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
249
250struct igb_reg_info {
251 u32 ofs;
252 char *name;
253};
254
255static const struct igb_reg_info igb_reg_info_tbl[] = {
256
257
258 {E1000_CTRL, "CTRL"},
259 {E1000_STATUS, "STATUS"},
260 {E1000_CTRL_EXT, "CTRL_EXT"},
261
262
263 {E1000_ICR, "ICR"},
264
265
266 {E1000_RCTL, "RCTL"},
267 {E1000_RDLEN(0), "RDLEN"},
268 {E1000_RDH(0), "RDH"},
269 {E1000_RDT(0), "RDT"},
270 {E1000_RXDCTL(0), "RXDCTL"},
271 {E1000_RDBAL(0), "RDBAL"},
272 {E1000_RDBAH(0), "RDBAH"},
273
274
275 {E1000_TCTL, "TCTL"},
276 {E1000_TDBAL(0), "TDBAL"},
277 {E1000_TDBAH(0), "TDBAH"},
278 {E1000_TDLEN(0), "TDLEN"},
279 {E1000_TDH(0), "TDH"},
280 {E1000_TDT(0), "TDT"},
281 {E1000_TXDCTL(0), "TXDCTL"},
282 {E1000_TDFH, "TDFH"},
283 {E1000_TDFT, "TDFT"},
284 {E1000_TDFHS, "TDFHS"},
285 {E1000_TDFPC, "TDFPC"},
286
287
288 {}
289};
290
291
292static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
293{
294 int n = 0;
295 char rname[16];
296 u32 regs[8];
297
298 switch (reginfo->ofs) {
299 case E1000_RDLEN(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_RDLEN(n));
302 break;
303 case E1000_RDH(0):
304 for (n = 0; n < 4; n++)
305 regs[n] = rd32(E1000_RDH(n));
306 break;
307 case E1000_RDT(0):
308 for (n = 0; n < 4; n++)
309 regs[n] = rd32(E1000_RDT(n));
310 break;
311 case E1000_RXDCTL(0):
312 for (n = 0; n < 4; n++)
313 regs[n] = rd32(E1000_RXDCTL(n));
314 break;
315 case E1000_RDBAL(0):
316 for (n = 0; n < 4; n++)
317 regs[n] = rd32(E1000_RDBAL(n));
318 break;
319 case E1000_RDBAH(0):
320 for (n = 0; n < 4; n++)
321 regs[n] = rd32(E1000_RDBAH(n));
322 break;
323 case E1000_TDBAL(0):
324 for (n = 0; n < 4; n++)
325 regs[n] = rd32(E1000_RDBAL(n));
326 break;
327 case E1000_TDBAH(0):
328 for (n = 0; n < 4; n++)
329 regs[n] = rd32(E1000_TDBAH(n));
330 break;
331 case E1000_TDLEN(0):
332 for (n = 0; n < 4; n++)
333 regs[n] = rd32(E1000_TDLEN(n));
334 break;
335 case E1000_TDH(0):
336 for (n = 0; n < 4; n++)
337 regs[n] = rd32(E1000_TDH(n));
338 break;
339 case E1000_TDT(0):
340 for (n = 0; n < 4; n++)
341 regs[n] = rd32(E1000_TDT(n));
342 break;
343 case E1000_TXDCTL(0):
344 for (n = 0; n < 4; n++)
345 regs[n] = rd32(E1000_TXDCTL(n));
346 break;
347 default:
348 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
349 return;
350 }
351
352 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
353 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
354 regs[2], regs[3]);
355}
356
357
358static void igb_dump(struct igb_adapter *adapter)
359{
360 struct net_device *netdev = adapter->netdev;
361 struct e1000_hw *hw = &adapter->hw;
362 struct igb_reg_info *reginfo;
363 struct igb_ring *tx_ring;
364 union e1000_adv_tx_desc *tx_desc;
365 struct my_u0 { u64 a; u64 b; } *u0;
366 struct igb_ring *rx_ring;
367 union e1000_adv_rx_desc *rx_desc;
368 u32 staterr;
369 u16 i, n;
370
371 if (!netif_msg_hw(adapter))
372 return;
373
374
375 if (netdev) {
376 dev_info(&adapter->pdev->dev, "Net device Info\n");
377 pr_info("Device Name state trans_start\n");
378 pr_info("%-15s %016lX %016lX\n", netdev->name,
379 netdev->state, dev_trans_start(netdev));
380 }
381
382
383 dev_info(&adapter->pdev->dev, "Register Dump\n");
384 pr_info(" Register Name Value\n");
385 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
386 reginfo->name; reginfo++) {
387 igb_regdump(hw, reginfo);
388 }
389
390
391 if (!netdev || !netif_running(netdev))
392 goto exit;
393
394 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
395 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
396 for (n = 0; n < adapter->num_tx_queues; n++) {
397 struct igb_tx_buffer *buffer_info;
398 tx_ring = adapter->tx_ring[n];
399 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
400 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
401 n, tx_ring->next_to_use, tx_ring->next_to_clean,
402 (u64)dma_unmap_addr(buffer_info, dma),
403 dma_unmap_len(buffer_info, len),
404 buffer_info->next_to_watch,
405 (u64)buffer_info->time_stamp);
406 }
407
408
409 if (!netif_msg_tx_done(adapter))
410 goto rx_ring_summary;
411
412 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
413
414
415
416
417
418
419
420
421
422
423
424
425 for (n = 0; n < adapter->num_tx_queues; n++) {
426 tx_ring = adapter->tx_ring[n];
427 pr_info("------------------------------------\n");
428 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
429 pr_info("------------------------------------\n");
430 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
431
432 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
433 const char *next_desc;
434 struct igb_tx_buffer *buffer_info;
435 tx_desc = IGB_TX_DESC(tx_ring, i);
436 buffer_info = &tx_ring->tx_buffer_info[i];
437 u0 = (struct my_u0 *)tx_desc;
438 if (i == tx_ring->next_to_use &&
439 i == tx_ring->next_to_clean)
440 next_desc = " NTC/U";
441 else if (i == tx_ring->next_to_use)
442 next_desc = " NTU";
443 else if (i == tx_ring->next_to_clean)
444 next_desc = " NTC";
445 else
446 next_desc = "";
447
448 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
449 i, le64_to_cpu(u0->a),
450 le64_to_cpu(u0->b),
451 (u64)dma_unmap_addr(buffer_info, dma),
452 dma_unmap_len(buffer_info, len),
453 buffer_info->next_to_watch,
454 (u64)buffer_info->time_stamp,
455 buffer_info->skb, next_desc);
456
457 if (netif_msg_pktdata(adapter) && buffer_info->skb)
458 print_hex_dump(KERN_INFO, "",
459 DUMP_PREFIX_ADDRESS,
460 16, 1, buffer_info->skb->data,
461 dma_unmap_len(buffer_info, len),
462 true);
463 }
464 }
465
466
467rx_ring_summary:
468 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
469 pr_info("Queue [NTU] [NTC]\n");
470 for (n = 0; n < adapter->num_rx_queues; n++) {
471 rx_ring = adapter->rx_ring[n];
472 pr_info(" %5d %5X %5X\n",
473 n, rx_ring->next_to_use, rx_ring->next_to_clean);
474 }
475
476
477 if (!netif_msg_rx_status(adapter))
478 goto exit;
479
480 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503 for (n = 0; n < adapter->num_rx_queues; n++) {
504 rx_ring = adapter->rx_ring[n];
505 pr_info("------------------------------------\n");
506 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
507 pr_info("------------------------------------\n");
508 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
509 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
510
511 for (i = 0; i < rx_ring->count; i++) {
512 const char *next_desc;
513 struct igb_rx_buffer *buffer_info;
514 buffer_info = &rx_ring->rx_buffer_info[i];
515 rx_desc = IGB_RX_DESC(rx_ring, i);
516 u0 = (struct my_u0 *)rx_desc;
517 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
518
519 if (i == rx_ring->next_to_use)
520 next_desc = " NTU";
521 else if (i == rx_ring->next_to_clean)
522 next_desc = " NTC";
523 else
524 next_desc = "";
525
526 if (staterr & E1000_RXD_STAT_DD) {
527
528 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
529 "RWB", i,
530 le64_to_cpu(u0->a),
531 le64_to_cpu(u0->b),
532 next_desc);
533 } else {
534 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
535 "R ", i,
536 le64_to_cpu(u0->a),
537 le64_to_cpu(u0->b),
538 (u64)buffer_info->dma,
539 next_desc);
540
541 if (netif_msg_pktdata(adapter) &&
542 buffer_info->dma && buffer_info->page) {
543 print_hex_dump(KERN_INFO, "",
544 DUMP_PREFIX_ADDRESS,
545 16, 1,
546 page_address(buffer_info->page) +
547 buffer_info->page_offset,
548 igb_rx_bufsz(rx_ring), true);
549 }
550 }
551 }
552 }
553
554exit:
555 return;
556}
557
558
559
560
561
562
563
564
565static int igb_get_i2c_data(void *data)
566{
567 struct igb_adapter *adapter = (struct igb_adapter *)data;
568 struct e1000_hw *hw = &adapter->hw;
569 s32 i2cctl = rd32(E1000_I2CPARAMS);
570
571 return !!(i2cctl & E1000_I2C_DATA_IN);
572}
573
574
575
576
577
578
579
580
581static void igb_set_i2c_data(void *data, int state)
582{
583 struct igb_adapter *adapter = (struct igb_adapter *)data;
584 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS);
586
587 if (state)
588 i2cctl |= E1000_I2C_DATA_OUT;
589 else
590 i2cctl &= ~E1000_I2C_DATA_OUT;
591
592 i2cctl &= ~E1000_I2C_DATA_OE_N;
593 i2cctl |= E1000_I2C_CLK_OE_N;
594 wr32(E1000_I2CPARAMS, i2cctl);
595 wrfl();
596
597}
598
599
600
601
602
603
604
605
606static void igb_set_i2c_clk(void *data, int state)
607{
608 struct igb_adapter *adapter = (struct igb_adapter *)data;
609 struct e1000_hw *hw = &adapter->hw;
610 s32 i2cctl = rd32(E1000_I2CPARAMS);
611
612 if (state) {
613 i2cctl |= E1000_I2C_CLK_OUT;
614 i2cctl &= ~E1000_I2C_CLK_OE_N;
615 } else {
616 i2cctl &= ~E1000_I2C_CLK_OUT;
617 i2cctl &= ~E1000_I2C_CLK_OE_N;
618 }
619 wr32(E1000_I2CPARAMS, i2cctl);
620 wrfl();
621}
622
623
624
625
626
627
628
629static int igb_get_i2c_clk(void *data)
630{
631 struct igb_adapter *adapter = (struct igb_adapter *)data;
632 struct e1000_hw *hw = &adapter->hw;
633 s32 i2cctl = rd32(E1000_I2CPARAMS);
634
635 return !!(i2cctl & E1000_I2C_CLK_IN);
636}
637
638static const struct i2c_algo_bit_data igb_i2c_algo = {
639 .setsda = igb_set_i2c_data,
640 .setscl = igb_set_i2c_clk,
641 .getsda = igb_get_i2c_data,
642 .getscl = igb_get_i2c_clk,
643 .udelay = 5,
644 .timeout = 20,
645};
646
647
648
649
650
651
652
653struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
654{
655 struct igb_adapter *adapter = hw->back;
656 return adapter->netdev;
657}
658
659
660
661
662
663
664
665static int __init igb_init_module(void)
666{
667 int ret;
668
669 pr_info("%s - version %s\n",
670 igb_driver_string, igb_driver_version);
671 pr_info("%s\n", igb_copyright);
672
673#ifdef CONFIG_IGB_DCA
674 dca_register_notify(&dca_notifier);
675#endif
676 ret = pci_register_driver(&igb_driver);
677 return ret;
678}
679
680module_init(igb_init_module);
681
682
683
684
685
686
687
688static void __exit igb_exit_module(void)
689{
690#ifdef CONFIG_IGB_DCA
691 dca_unregister_notify(&dca_notifier);
692#endif
693 pci_unregister_driver(&igb_driver);
694}
695
696module_exit(igb_exit_module);
697
698#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
699
700
701
702
703
704
705
706static void igb_cache_ring_register(struct igb_adapter *adapter)
707{
708 int i = 0, j = 0;
709 u32 rbase_offset = adapter->vfs_allocated_count;
710
711 switch (adapter->hw.mac.type) {
712 case e1000_82576:
713
714
715
716
717
718 if (adapter->vfs_allocated_count) {
719 for (; i < adapter->rss_queues; i++)
720 adapter->rx_ring[i]->reg_idx = rbase_offset +
721 Q_IDX_82576(i);
722 }
723
724 case e1000_82575:
725 case e1000_82580:
726 case e1000_i350:
727 case e1000_i354:
728 case e1000_i210:
729 case e1000_i211:
730
731 default:
732 for (; i < adapter->num_rx_queues; i++)
733 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
734 for (; j < adapter->num_tx_queues; j++)
735 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
736 break;
737 }
738}
739
740u32 igb_rd32(struct e1000_hw *hw, u32 reg)
741{
742 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
743 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
744 u32 value = 0;
745
746 if (E1000_REMOVED(hw_addr))
747 return ~value;
748
749 value = readl(&hw_addr[reg]);
750
751
752 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
753 struct net_device *netdev = igb->netdev;
754 hw->hw_addr = NULL;
755 netdev_err(netdev, "PCIe link lost\n");
756 WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
757 }
758
759 return value;
760}
761
762
763
764
765
766
767
768
769
770
771
772
773
774static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
775 int index, int offset)
776{
777 u32 ivar = array_rd32(E1000_IVAR0, index);
778
779
780 ivar &= ~((u32)0xFF << offset);
781
782
783 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
784
785 array_wr32(E1000_IVAR0, index, ivar);
786}
787
788#define IGB_N0_QUEUE -1
789static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
790{
791 struct igb_adapter *adapter = q_vector->adapter;
792 struct e1000_hw *hw = &adapter->hw;
793 int rx_queue = IGB_N0_QUEUE;
794 int tx_queue = IGB_N0_QUEUE;
795 u32 msixbm = 0;
796
797 if (q_vector->rx.ring)
798 rx_queue = q_vector->rx.ring->reg_idx;
799 if (q_vector->tx.ring)
800 tx_queue = q_vector->tx.ring->reg_idx;
801
802 switch (hw->mac.type) {
803 case e1000_82575:
804
805
806
807
808
809 if (rx_queue > IGB_N0_QUEUE)
810 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
811 if (tx_queue > IGB_N0_QUEUE)
812 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
813 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
814 msixbm |= E1000_EIMS_OTHER;
815 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
816 q_vector->eims_value = msixbm;
817 break;
818 case e1000_82576:
819
820
821
822
823
824 if (rx_queue > IGB_N0_QUEUE)
825 igb_write_ivar(hw, msix_vector,
826 rx_queue & 0x7,
827 (rx_queue & 0x8) << 1);
828 if (tx_queue > IGB_N0_QUEUE)
829 igb_write_ivar(hw, msix_vector,
830 tx_queue & 0x7,
831 ((tx_queue & 0x8) << 1) + 8);
832 q_vector->eims_value = BIT(msix_vector);
833 break;
834 case e1000_82580:
835 case e1000_i350:
836 case e1000_i354:
837 case e1000_i210:
838 case e1000_i211:
839
840
841
842
843
844
845 if (rx_queue > IGB_N0_QUEUE)
846 igb_write_ivar(hw, msix_vector,
847 rx_queue >> 1,
848 (rx_queue & 0x1) << 4);
849 if (tx_queue > IGB_N0_QUEUE)
850 igb_write_ivar(hw, msix_vector,
851 tx_queue >> 1,
852 ((tx_queue & 0x1) << 4) + 8);
853 q_vector->eims_value = BIT(msix_vector);
854 break;
855 default:
856 BUG();
857 break;
858 }
859
860
861 adapter->eims_enable_mask |= q_vector->eims_value;
862
863
864 q_vector->set_itr = 1;
865}
866
867
868
869
870
871
872
873
874static void igb_configure_msix(struct igb_adapter *adapter)
875{
876 u32 tmp;
877 int i, vector = 0;
878 struct e1000_hw *hw = &adapter->hw;
879
880 adapter->eims_enable_mask = 0;
881
882
883 switch (hw->mac.type) {
884 case e1000_82575:
885 tmp = rd32(E1000_CTRL_EXT);
886
887 tmp |= E1000_CTRL_EXT_PBA_CLR;
888
889
890 tmp |= E1000_CTRL_EXT_EIAME;
891 tmp |= E1000_CTRL_EXT_IRCA;
892
893 wr32(E1000_CTRL_EXT, tmp);
894
895
896 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
897 adapter->eims_other = E1000_EIMS_OTHER;
898
899 break;
900
901 case e1000_82576:
902 case e1000_82580:
903 case e1000_i350:
904 case e1000_i354:
905 case e1000_i210:
906 case e1000_i211:
907
908
909
910 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
911 E1000_GPIE_PBA | E1000_GPIE_EIAME |
912 E1000_GPIE_NSICR);
913
914
915 adapter->eims_other = BIT(vector);
916 tmp = (vector++ | E1000_IVAR_VALID) << 8;
917
918 wr32(E1000_IVAR_MISC, tmp);
919 break;
920 default:
921
922 break;
923 }
924
925 adapter->eims_enable_mask |= adapter->eims_other;
926
927 for (i = 0; i < adapter->num_q_vectors; i++)
928 igb_assign_vector(adapter->q_vector[i], vector++);
929
930 wrfl();
931}
932
933
934
935
936
937
938
939
940static int igb_request_msix(struct igb_adapter *adapter)
941{
942 struct net_device *netdev = adapter->netdev;
943 int i, err = 0, vector = 0, free_vector = 0;
944
945 err = request_irq(adapter->msix_entries[vector].vector,
946 igb_msix_other, 0, netdev->name, adapter);
947 if (err)
948 goto err_out;
949
950 for (i = 0; i < adapter->num_q_vectors; i++) {
951 struct igb_q_vector *q_vector = adapter->q_vector[i];
952
953 vector++;
954
955 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
956
957 if (q_vector->rx.ring && q_vector->tx.ring)
958 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
959 q_vector->rx.ring->queue_index);
960 else if (q_vector->tx.ring)
961 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
962 q_vector->tx.ring->queue_index);
963 else if (q_vector->rx.ring)
964 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
965 q_vector->rx.ring->queue_index);
966 else
967 sprintf(q_vector->name, "%s-unused", netdev->name);
968
969 err = request_irq(adapter->msix_entries[vector].vector,
970 igb_msix_ring, 0, q_vector->name,
971 q_vector);
972 if (err)
973 goto err_free;
974 }
975
976 igb_configure_msix(adapter);
977 return 0;
978
979err_free:
980
981 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
982
983 vector--;
984 for (i = 0; i < vector; i++) {
985 free_irq(adapter->msix_entries[free_vector++].vector,
986 adapter->q_vector[i]);
987 }
988err_out:
989 return err;
990}
991
992
993
994
995
996
997
998
999static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1000{
1001 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1002
1003 adapter->q_vector[v_idx] = NULL;
1004
1005
1006
1007
1008 if (q_vector)
1009 kfree_rcu(q_vector, rcu);
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1021{
1022 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1023
1024
1025
1026
1027 if (!q_vector)
1028 return;
1029
1030 if (q_vector->tx.ring)
1031 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1032
1033 if (q_vector->rx.ring)
1034 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1035
1036 netif_napi_del(&q_vector->napi);
1037
1038}
1039
1040static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1041{
1042 int v_idx = adapter->num_q_vectors;
1043
1044 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1045 pci_disable_msix(adapter->pdev);
1046 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1047 pci_disable_msi(adapter->pdev);
1048
1049 while (v_idx--)
1050 igb_reset_q_vector(adapter, v_idx);
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061static void igb_free_q_vectors(struct igb_adapter *adapter)
1062{
1063 int v_idx = adapter->num_q_vectors;
1064
1065 adapter->num_tx_queues = 0;
1066 adapter->num_rx_queues = 0;
1067 adapter->num_q_vectors = 0;
1068
1069 while (v_idx--) {
1070 igb_reset_q_vector(adapter, v_idx);
1071 igb_free_q_vector(adapter, v_idx);
1072 }
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1083{
1084 igb_free_q_vectors(adapter);
1085 igb_reset_interrupt_capability(adapter);
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1097{
1098 int err;
1099 int numvecs, i;
1100
1101 if (!msix)
1102 goto msi_only;
1103 adapter->flags |= IGB_FLAG_HAS_MSIX;
1104
1105
1106 adapter->num_rx_queues = adapter->rss_queues;
1107 if (adapter->vfs_allocated_count)
1108 adapter->num_tx_queues = 1;
1109 else
1110 adapter->num_tx_queues = adapter->rss_queues;
1111
1112
1113 numvecs = adapter->num_rx_queues;
1114
1115
1116 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1117 numvecs += adapter->num_tx_queues;
1118
1119
1120 adapter->num_q_vectors = numvecs;
1121
1122
1123 numvecs++;
1124 for (i = 0; i < numvecs; i++)
1125 adapter->msix_entries[i].entry = i;
1126
1127 err = pci_enable_msix_range(adapter->pdev,
1128 adapter->msix_entries,
1129 numvecs,
1130 numvecs);
1131 if (err > 0)
1132 return;
1133
1134 igb_reset_interrupt_capability(adapter);
1135
1136
1137msi_only:
1138 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1139#ifdef CONFIG_PCI_IOV
1140
1141 if (adapter->vf_data) {
1142 struct e1000_hw *hw = &adapter->hw;
1143
1144 pci_disable_sriov(adapter->pdev);
1145 msleep(500);
1146
1147 kfree(adapter->vf_mac_list);
1148 adapter->vf_mac_list = NULL;
1149 kfree(adapter->vf_data);
1150 adapter->vf_data = NULL;
1151 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1152 wrfl();
1153 msleep(100);
1154 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1155 }
1156#endif
1157 adapter->vfs_allocated_count = 0;
1158 adapter->rss_queues = 1;
1159 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1160 adapter->num_rx_queues = 1;
1161 adapter->num_tx_queues = 1;
1162 adapter->num_q_vectors = 1;
1163 if (!pci_enable_msi(adapter->pdev))
1164 adapter->flags |= IGB_FLAG_HAS_MSI;
1165}
1166
1167static void igb_add_ring(struct igb_ring *ring,
1168 struct igb_ring_container *head)
1169{
1170 head->ring = ring;
1171 head->count++;
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186static int igb_alloc_q_vector(struct igb_adapter *adapter,
1187 int v_count, int v_idx,
1188 int txr_count, int txr_idx,
1189 int rxr_count, int rxr_idx)
1190{
1191 struct igb_q_vector *q_vector;
1192 struct igb_ring *ring;
1193 int ring_count;
1194 size_t size;
1195
1196
1197 if (txr_count > 1 || rxr_count > 1)
1198 return -ENOMEM;
1199
1200 ring_count = txr_count + rxr_count;
1201 size = struct_size(q_vector, ring, ring_count);
1202
1203
1204 q_vector = adapter->q_vector[v_idx];
1205 if (!q_vector) {
1206 q_vector = kzalloc(size, GFP_KERNEL);
1207 } else if (size > ksize(q_vector)) {
1208 kfree_rcu(q_vector, rcu);
1209 q_vector = kzalloc(size, GFP_KERNEL);
1210 } else {
1211 memset(q_vector, 0, size);
1212 }
1213 if (!q_vector)
1214 return -ENOMEM;
1215
1216
1217 netif_napi_add(adapter->netdev, &q_vector->napi,
1218 igb_poll, 64);
1219
1220
1221 adapter->q_vector[v_idx] = q_vector;
1222 q_vector->adapter = adapter;
1223
1224
1225 q_vector->tx.work_limit = adapter->tx_work_limit;
1226
1227
1228 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1229 q_vector->itr_val = IGB_START_ITR;
1230
1231
1232 ring = q_vector->ring;
1233
1234
1235 if (rxr_count) {
1236
1237 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1238 q_vector->itr_val = adapter->rx_itr_setting;
1239 } else {
1240
1241 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1242 q_vector->itr_val = adapter->tx_itr_setting;
1243 }
1244
1245 if (txr_count) {
1246
1247 ring->dev = &adapter->pdev->dev;
1248 ring->netdev = adapter->netdev;
1249
1250
1251 ring->q_vector = q_vector;
1252
1253
1254 igb_add_ring(ring, &q_vector->tx);
1255
1256
1257 if (adapter->hw.mac.type == e1000_82575)
1258 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1259
1260
1261 ring->count = adapter->tx_ring_count;
1262 ring->queue_index = txr_idx;
1263
1264 ring->cbs_enable = false;
1265 ring->idleslope = 0;
1266 ring->sendslope = 0;
1267 ring->hicredit = 0;
1268 ring->locredit = 0;
1269
1270 u64_stats_init(&ring->tx_syncp);
1271 u64_stats_init(&ring->tx_syncp2);
1272
1273
1274 adapter->tx_ring[txr_idx] = ring;
1275
1276
1277 ring++;
1278 }
1279
1280 if (rxr_count) {
1281
1282 ring->dev = &adapter->pdev->dev;
1283 ring->netdev = adapter->netdev;
1284
1285
1286 ring->q_vector = q_vector;
1287
1288
1289 igb_add_ring(ring, &q_vector->rx);
1290
1291
1292 if (adapter->hw.mac.type >= e1000_82576)
1293 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1294
1295
1296
1297
1298 if (adapter->hw.mac.type >= e1000_i350)
1299 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1300
1301
1302 ring->count = adapter->rx_ring_count;
1303 ring->queue_index = rxr_idx;
1304
1305 u64_stats_init(&ring->rx_syncp);
1306
1307
1308 adapter->rx_ring[rxr_idx] = ring;
1309 }
1310
1311 return 0;
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1323{
1324 int q_vectors = adapter->num_q_vectors;
1325 int rxr_remaining = adapter->num_rx_queues;
1326 int txr_remaining = adapter->num_tx_queues;
1327 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1328 int err;
1329
1330 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1331 for (; rxr_remaining; v_idx++) {
1332 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1333 0, 0, 1, rxr_idx);
1334
1335 if (err)
1336 goto err_out;
1337
1338
1339 rxr_remaining--;
1340 rxr_idx++;
1341 }
1342 }
1343
1344 for (; v_idx < q_vectors; v_idx++) {
1345 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1346 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1347
1348 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1349 tqpv, txr_idx, rqpv, rxr_idx);
1350
1351 if (err)
1352 goto err_out;
1353
1354
1355 rxr_remaining -= rqpv;
1356 txr_remaining -= tqpv;
1357 rxr_idx++;
1358 txr_idx++;
1359 }
1360
1361 return 0;
1362
1363err_out:
1364 adapter->num_tx_queues = 0;
1365 adapter->num_rx_queues = 0;
1366 adapter->num_q_vectors = 0;
1367
1368 while (v_idx--)
1369 igb_free_q_vector(adapter, v_idx);
1370
1371 return -ENOMEM;
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1382{
1383 struct pci_dev *pdev = adapter->pdev;
1384 int err;
1385
1386 igb_set_interrupt_capability(adapter, msix);
1387
1388 err = igb_alloc_q_vectors(adapter);
1389 if (err) {
1390 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1391 goto err_alloc_q_vectors;
1392 }
1393
1394 igb_cache_ring_register(adapter);
1395
1396 return 0;
1397
1398err_alloc_q_vectors:
1399 igb_reset_interrupt_capability(adapter);
1400 return err;
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410static int igb_request_irq(struct igb_adapter *adapter)
1411{
1412 struct net_device *netdev = adapter->netdev;
1413 struct pci_dev *pdev = adapter->pdev;
1414 int err = 0;
1415
1416 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1417 err = igb_request_msix(adapter);
1418 if (!err)
1419 goto request_done;
1420
1421 igb_free_all_tx_resources(adapter);
1422 igb_free_all_rx_resources(adapter);
1423
1424 igb_clear_interrupt_scheme(adapter);
1425 err = igb_init_interrupt_scheme(adapter, false);
1426 if (err)
1427 goto request_done;
1428
1429 igb_setup_all_tx_resources(adapter);
1430 igb_setup_all_rx_resources(adapter);
1431 igb_configure(adapter);
1432 }
1433
1434 igb_assign_vector(adapter->q_vector[0], 0);
1435
1436 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1437 err = request_irq(pdev->irq, igb_intr_msi, 0,
1438 netdev->name, adapter);
1439 if (!err)
1440 goto request_done;
1441
1442
1443 igb_reset_interrupt_capability(adapter);
1444 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1445 }
1446
1447 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1448 netdev->name, adapter);
1449
1450 if (err)
1451 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1452 err);
1453
1454request_done:
1455 return err;
1456}
1457
1458static void igb_free_irq(struct igb_adapter *adapter)
1459{
1460 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1461 int vector = 0, i;
1462
1463 free_irq(adapter->msix_entries[vector++].vector, adapter);
1464
1465 for (i = 0; i < adapter->num_q_vectors; i++)
1466 free_irq(adapter->msix_entries[vector++].vector,
1467 adapter->q_vector[i]);
1468 } else {
1469 free_irq(adapter->pdev->irq, adapter);
1470 }
1471}
1472
1473
1474
1475
1476
1477static void igb_irq_disable(struct igb_adapter *adapter)
1478{
1479 struct e1000_hw *hw = &adapter->hw;
1480
1481
1482
1483
1484
1485 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1486 u32 regval = rd32(E1000_EIAM);
1487
1488 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1489 wr32(E1000_EIMC, adapter->eims_enable_mask);
1490 regval = rd32(E1000_EIAC);
1491 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1492 }
1493
1494 wr32(E1000_IAM, 0);
1495 wr32(E1000_IMC, ~0);
1496 wrfl();
1497 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1498 int i;
1499
1500 for (i = 0; i < adapter->num_q_vectors; i++)
1501 synchronize_irq(adapter->msix_entries[i].vector);
1502 } else {
1503 synchronize_irq(adapter->pdev->irq);
1504 }
1505}
1506
1507
1508
1509
1510
1511static void igb_irq_enable(struct igb_adapter *adapter)
1512{
1513 struct e1000_hw *hw = &adapter->hw;
1514
1515 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1516 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1517 u32 regval = rd32(E1000_EIAC);
1518
1519 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1520 regval = rd32(E1000_EIAM);
1521 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1522 wr32(E1000_EIMS, adapter->eims_enable_mask);
1523 if (adapter->vfs_allocated_count) {
1524 wr32(E1000_MBVFIMR, 0xFF);
1525 ims |= E1000_IMS_VMMB;
1526 }
1527 wr32(E1000_IMS, ims);
1528 } else {
1529 wr32(E1000_IMS, IMS_ENABLE_MASK |
1530 E1000_IMS_DRSTA);
1531 wr32(E1000_IAM, IMS_ENABLE_MASK |
1532 E1000_IMS_DRSTA);
1533 }
1534}
1535
1536static void igb_update_mng_vlan(struct igb_adapter *adapter)
1537{
1538 struct e1000_hw *hw = &adapter->hw;
1539 u16 pf_id = adapter->vfs_allocated_count;
1540 u16 vid = adapter->hw.mng_cookie.vlan_id;
1541 u16 old_vid = adapter->mng_vlan_id;
1542
1543 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1544
1545 igb_vfta_set(hw, vid, pf_id, true, true);
1546 adapter->mng_vlan_id = vid;
1547 } else {
1548 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1549 }
1550
1551 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1552 (vid != old_vid) &&
1553 !test_bit(old_vid, adapter->active_vlans)) {
1554
1555 igb_vfta_set(hw, vid, pf_id, false, true);
1556 }
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567static void igb_release_hw_control(struct igb_adapter *adapter)
1568{
1569 struct e1000_hw *hw = &adapter->hw;
1570 u32 ctrl_ext;
1571
1572
1573 ctrl_ext = rd32(E1000_CTRL_EXT);
1574 wr32(E1000_CTRL_EXT,
1575 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586static void igb_get_hw_control(struct igb_adapter *adapter)
1587{
1588 struct e1000_hw *hw = &adapter->hw;
1589 u32 ctrl_ext;
1590
1591
1592 ctrl_ext = rd32(E1000_CTRL_EXT);
1593 wr32(E1000_CTRL_EXT,
1594 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1595}
1596
1597static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1598{
1599 struct net_device *netdev = adapter->netdev;
1600 struct e1000_hw *hw = &adapter->hw;
1601
1602 WARN_ON(hw->mac.type != e1000_i210);
1603
1604 if (enable)
1605 adapter->flags |= IGB_FLAG_FQTSS;
1606 else
1607 adapter->flags &= ~IGB_FLAG_FQTSS;
1608
1609 if (netif_running(netdev))
1610 schedule_work(&adapter->reset_task);
1611}
1612
1613static bool is_fqtss_enabled(struct igb_adapter *adapter)
1614{
1615 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1616}
1617
1618static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1619 enum tx_queue_prio prio)
1620{
1621 u32 val;
1622
1623 WARN_ON(hw->mac.type != e1000_i210);
1624 WARN_ON(queue < 0 || queue > 4);
1625
1626 val = rd32(E1000_I210_TXDCTL(queue));
1627
1628 if (prio == TX_QUEUE_PRIO_HIGH)
1629 val |= E1000_TXDCTL_PRIORITY;
1630 else
1631 val &= ~E1000_TXDCTL_PRIORITY;
1632
1633 wr32(E1000_I210_TXDCTL(queue), val);
1634}
1635
1636static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1637{
1638 u32 val;
1639
1640 WARN_ON(hw->mac.type != e1000_i210);
1641 WARN_ON(queue < 0 || queue > 1);
1642
1643 val = rd32(E1000_I210_TQAVCC(queue));
1644
1645 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1646 val |= E1000_TQAVCC_QUEUEMODE;
1647 else
1648 val &= ~E1000_TQAVCC_QUEUEMODE;
1649
1650 wr32(E1000_I210_TQAVCC(queue), val);
1651}
1652
1653static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1654{
1655 int i;
1656
1657 for (i = 0; i < adapter->num_tx_queues; i++) {
1658 if (adapter->tx_ring[i]->cbs_enable)
1659 return true;
1660 }
1661
1662 return false;
1663}
1664
1665static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1666{
1667 int i;
1668
1669 for (i = 0; i < adapter->num_tx_queues; i++) {
1670 if (adapter->tx_ring[i]->launchtime_enable)
1671 return true;
1672 }
1673
1674 return false;
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1688{
1689 struct igb_ring *ring = adapter->tx_ring[queue];
1690 struct net_device *netdev = adapter->netdev;
1691 struct e1000_hw *hw = &adapter->hw;
1692 u32 tqavcc, tqavctrl;
1693 u16 value;
1694
1695 WARN_ON(hw->mac.type != e1000_i210);
1696 WARN_ON(queue < 0 || queue > 1);
1697
1698
1699
1700
1701
1702 if (ring->cbs_enable || ring->launchtime_enable) {
1703 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1704 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1705 } else {
1706 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1707 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1708 }
1709
1710
1711 if (ring->cbs_enable || queue == 0) {
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721 if (queue == 0 && !ring->cbs_enable) {
1722
1723 ring->idleslope = 1000000;
1724 ring->hicredit = ETH_FRAME_LEN;
1725 }
1726
1727
1728
1729
1730
1731 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1732 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1733 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1793
1794 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1795 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1796 tqavcc |= value;
1797 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1798
1799 wr32(E1000_I210_TQAVHC(queue),
1800 0x80000000 + ring->hicredit * 0x7735);
1801 } else {
1802
1803
1804 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1805 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1806 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1807
1808
1809 wr32(E1000_I210_TQAVHC(queue), 0);
1810
1811
1812
1813
1814
1815 if (!is_any_cbs_enabled(adapter)) {
1816 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1817 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1818 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1819 }
1820 }
1821
1822
1823 if (ring->launchtime_enable) {
1824
1825
1826
1827
1828
1829
1830
1831
1832 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1833 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1834 E1000_TQAVCTRL_FETCHTIME_DELTA;
1835 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1836 } else {
1837
1838
1839
1840
1841 if (!is_any_txtime_enabled(adapter)) {
1842 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1843 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1844 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1845 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1846 }
1847 }
1848
1849
1850
1851
1852
1853
1854 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1855 ring->cbs_enable ? "enabled" : "disabled",
1856 ring->launchtime_enable ? "enabled" : "disabled",
1857 queue,
1858 ring->idleslope, ring->sendslope,
1859 ring->hicredit, ring->locredit);
1860}
1861
1862static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1863 bool enable)
1864{
1865 struct igb_ring *ring;
1866
1867 if (queue < 0 || queue > adapter->num_tx_queues)
1868 return -EINVAL;
1869
1870 ring = adapter->tx_ring[queue];
1871 ring->launchtime_enable = enable;
1872
1873 return 0;
1874}
1875
1876static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1877 bool enable, int idleslope, int sendslope,
1878 int hicredit, int locredit)
1879{
1880 struct igb_ring *ring;
1881
1882 if (queue < 0 || queue > adapter->num_tx_queues)
1883 return -EINVAL;
1884
1885 ring = adapter->tx_ring[queue];
1886
1887 ring->cbs_enable = enable;
1888 ring->idleslope = idleslope;
1889 ring->sendslope = sendslope;
1890 ring->hicredit = hicredit;
1891 ring->locredit = locredit;
1892
1893 return 0;
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static void igb_setup_tx_mode(struct igb_adapter *adapter)
1906{
1907 struct net_device *netdev = adapter->netdev;
1908 struct e1000_hw *hw = &adapter->hw;
1909 u32 val;
1910
1911
1912 if (hw->mac.type != e1000_i210)
1913 return;
1914
1915 if (is_fqtss_enabled(adapter)) {
1916 int i, max_queue;
1917
1918
1919
1920
1921
1922 val = rd32(E1000_I210_TQAVCTRL);
1923 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1924 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1925 wr32(E1000_I210_TQAVCTRL, val);
1926
1927
1928
1929
1930 val = rd32(E1000_TXPBS);
1931 val &= ~I210_TXPBSIZE_MASK;
1932 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1933 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1934 wr32(E1000_TXPBS, val);
1935
1936 val = rd32(E1000_RXPBS);
1937 val &= ~I210_RXPBSIZE_MASK;
1938 val |= I210_RXPBSIZE_PB_30KB;
1939 wr32(E1000_RXPBS, val);
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952 val = (4096 - 1) / 64;
1953 wr32(E1000_I210_DTXMXPKTSZ, val);
1954
1955
1956
1957
1958
1959
1960 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1961 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1962
1963 for (i = 0; i < max_queue; i++) {
1964 igb_config_tx_modes(adapter, i);
1965 }
1966 } else {
1967 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1968 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1969 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1970
1971 val = rd32(E1000_I210_TQAVCTRL);
1972
1973
1974
1975
1976 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1977 wr32(E1000_I210_TQAVCTRL, val);
1978 }
1979
1980 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1981 "enabled" : "disabled");
1982}
1983
1984
1985
1986
1987
1988static void igb_configure(struct igb_adapter *adapter)
1989{
1990 struct net_device *netdev = adapter->netdev;
1991 int i;
1992
1993 igb_get_hw_control(adapter);
1994 igb_set_rx_mode(netdev);
1995 igb_setup_tx_mode(adapter);
1996
1997 igb_restore_vlan(adapter);
1998
1999 igb_setup_tctl(adapter);
2000 igb_setup_mrqc(adapter);
2001 igb_setup_rctl(adapter);
2002
2003 igb_nfc_filter_restore(adapter);
2004 igb_configure_tx(adapter);
2005 igb_configure_rx(adapter);
2006
2007 igb_rx_fifo_flush_82575(&adapter->hw);
2008
2009
2010
2011
2012
2013 for (i = 0; i < adapter->num_rx_queues; i++) {
2014 struct igb_ring *ring = adapter->rx_ring[i];
2015 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2016 }
2017}
2018
2019
2020
2021
2022
2023void igb_power_up_link(struct igb_adapter *adapter)
2024{
2025 igb_reset_phy(&adapter->hw);
2026
2027 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2028 igb_power_up_phy_copper(&adapter->hw);
2029 else
2030 igb_power_up_serdes_link_82575(&adapter->hw);
2031
2032 igb_setup_link(&adapter->hw);
2033}
2034
2035
2036
2037
2038
2039static void igb_power_down_link(struct igb_adapter *adapter)
2040{
2041 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2042 igb_power_down_phy_copper_82575(&adapter->hw);
2043 else
2044 igb_shutdown_serdes_link_82575(&adapter->hw);
2045}
2046
2047
2048
2049
2050
2051static void igb_check_swap_media(struct igb_adapter *adapter)
2052{
2053 struct e1000_hw *hw = &adapter->hw;
2054 u32 ctrl_ext, connsw;
2055 bool swap_now = false;
2056
2057 ctrl_ext = rd32(E1000_CTRL_EXT);
2058 connsw = rd32(E1000_CONNSW);
2059
2060
2061
2062
2063
2064 if ((hw->phy.media_type == e1000_media_type_copper) &&
2065 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2066 swap_now = true;
2067 } else if (!(connsw & E1000_CONNSW_SERDESD)) {
2068
2069 if (adapter->copper_tries < 4) {
2070 adapter->copper_tries++;
2071 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2072 wr32(E1000_CONNSW, connsw);
2073 return;
2074 } else {
2075 adapter->copper_tries = 0;
2076 if ((connsw & E1000_CONNSW_PHYSD) &&
2077 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2078 swap_now = true;
2079 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2080 wr32(E1000_CONNSW, connsw);
2081 }
2082 }
2083 }
2084
2085 if (!swap_now)
2086 return;
2087
2088 switch (hw->phy.media_type) {
2089 case e1000_media_type_copper:
2090 netdev_info(adapter->netdev,
2091 "MAS: changing media to fiber/serdes\n");
2092 ctrl_ext |=
2093 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2094 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2095 adapter->copper_tries = 0;
2096 break;
2097 case e1000_media_type_internal_serdes:
2098 case e1000_media_type_fiber:
2099 netdev_info(adapter->netdev,
2100 "MAS: changing media to copper\n");
2101 ctrl_ext &=
2102 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2103 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2104 break;
2105 default:
2106
2107 netdev_err(adapter->netdev,
2108 "AMS: Invalid media type found, returning\n");
2109 break;
2110 }
2111 wr32(E1000_CTRL_EXT, ctrl_ext);
2112}
2113
2114
2115
2116
2117
2118int igb_up(struct igb_adapter *adapter)
2119{
2120 struct e1000_hw *hw = &adapter->hw;
2121 int i;
2122
2123
2124 igb_configure(adapter);
2125
2126 clear_bit(__IGB_DOWN, &adapter->state);
2127
2128 for (i = 0; i < adapter->num_q_vectors; i++)
2129 napi_enable(&(adapter->q_vector[i]->napi));
2130
2131 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2132 igb_configure_msix(adapter);
2133 else
2134 igb_assign_vector(adapter->q_vector[0], 0);
2135
2136
2137 rd32(E1000_TSICR);
2138 rd32(E1000_ICR);
2139 igb_irq_enable(adapter);
2140
2141
2142 if (adapter->vfs_allocated_count) {
2143 u32 reg_data = rd32(E1000_CTRL_EXT);
2144
2145 reg_data |= E1000_CTRL_EXT_PFRSTD;
2146 wr32(E1000_CTRL_EXT, reg_data);
2147 }
2148
2149 netif_tx_start_all_queues(adapter->netdev);
2150
2151
2152 hw->mac.get_link_status = 1;
2153 schedule_work(&adapter->watchdog_task);
2154
2155 if ((adapter->flags & IGB_FLAG_EEE) &&
2156 (!hw->dev_spec._82575.eee_disable))
2157 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2158
2159 return 0;
2160}
2161
2162void igb_down(struct igb_adapter *adapter)
2163{
2164 struct net_device *netdev = adapter->netdev;
2165 struct e1000_hw *hw = &adapter->hw;
2166 u32 tctl, rctl;
2167 int i;
2168
2169
2170
2171
2172 set_bit(__IGB_DOWN, &adapter->state);
2173
2174
2175 rctl = rd32(E1000_RCTL);
2176 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2177
2178
2179 igb_nfc_filter_exit(adapter);
2180
2181 netif_carrier_off(netdev);
2182 netif_tx_stop_all_queues(netdev);
2183
2184
2185 tctl = rd32(E1000_TCTL);
2186 tctl &= ~E1000_TCTL_EN;
2187 wr32(E1000_TCTL, tctl);
2188
2189 wrfl();
2190 usleep_range(10000, 11000);
2191
2192 igb_irq_disable(adapter);
2193
2194 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2195
2196 for (i = 0; i < adapter->num_q_vectors; i++) {
2197 if (adapter->q_vector[i]) {
2198 napi_synchronize(&adapter->q_vector[i]->napi);
2199 napi_disable(&adapter->q_vector[i]->napi);
2200 }
2201 }
2202
2203 del_timer_sync(&adapter->watchdog_timer);
2204 del_timer_sync(&adapter->phy_info_timer);
2205
2206
2207 spin_lock(&adapter->stats64_lock);
2208 igb_update_stats(adapter);
2209 spin_unlock(&adapter->stats64_lock);
2210
2211 adapter->link_speed = 0;
2212 adapter->link_duplex = 0;
2213
2214 if (!pci_channel_offline(adapter->pdev))
2215 igb_reset(adapter);
2216
2217
2218 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2219
2220 igb_clean_all_tx_rings(adapter);
2221 igb_clean_all_rx_rings(adapter);
2222#ifdef CONFIG_IGB_DCA
2223
2224
2225 igb_setup_dca(adapter);
2226#endif
2227}
2228
2229void igb_reinit_locked(struct igb_adapter *adapter)
2230{
2231 WARN_ON(in_interrupt());
2232 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2233 usleep_range(1000, 2000);
2234 igb_down(adapter);
2235 igb_up(adapter);
2236 clear_bit(__IGB_RESETTING, &adapter->state);
2237}
2238
2239
2240
2241
2242
2243static void igb_enable_mas(struct igb_adapter *adapter)
2244{
2245 struct e1000_hw *hw = &adapter->hw;
2246 u32 connsw = rd32(E1000_CONNSW);
2247
2248
2249 if ((hw->phy.media_type == e1000_media_type_copper) &&
2250 (!(connsw & E1000_CONNSW_SERDESD))) {
2251 connsw |= E1000_CONNSW_ENRGSRC;
2252 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2253 wr32(E1000_CONNSW, connsw);
2254 wrfl();
2255 }
2256}
2257
2258void igb_reset(struct igb_adapter *adapter)
2259{
2260 struct pci_dev *pdev = adapter->pdev;
2261 struct e1000_hw *hw = &adapter->hw;
2262 struct e1000_mac_info *mac = &hw->mac;
2263 struct e1000_fc_info *fc = &hw->fc;
2264 u32 pba, hwm;
2265
2266
2267
2268
2269 switch (mac->type) {
2270 case e1000_i350:
2271 case e1000_i354:
2272 case e1000_82580:
2273 pba = rd32(E1000_RXPBS);
2274 pba = igb_rxpbs_adjust_82580(pba);
2275 break;
2276 case e1000_82576:
2277 pba = rd32(E1000_RXPBS);
2278 pba &= E1000_RXPBS_SIZE_MASK_82576;
2279 break;
2280 case e1000_82575:
2281 case e1000_i210:
2282 case e1000_i211:
2283 default:
2284 pba = E1000_PBA_34K;
2285 break;
2286 }
2287
2288 if (mac->type == e1000_82575) {
2289 u32 min_rx_space, min_tx_space, needed_tx_space;
2290
2291
2292 wr32(E1000_PBA, pba);
2293
2294
2295
2296
2297
2298
2299
2300
2301 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2302
2303
2304
2305
2306
2307
2308 min_tx_space = adapter->max_frame_size;
2309 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2310 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2311
2312
2313 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2314
2315
2316
2317
2318
2319 if (needed_tx_space < pba) {
2320 pba -= needed_tx_space;
2321
2322
2323
2324
2325 if (pba < min_rx_space)
2326 pba = min_rx_space;
2327 }
2328
2329
2330 wr32(E1000_PBA, pba);
2331 }
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2342
2343 fc->high_water = hwm & 0xFFFFFFF0;
2344 fc->low_water = fc->high_water - 16;
2345 fc->pause_time = 0xFFFF;
2346 fc->send_xon = 1;
2347 fc->current_mode = fc->requested_mode;
2348
2349
2350 if (adapter->vfs_allocated_count) {
2351 int i;
2352
2353 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2354 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2355
2356
2357 igb_ping_all_vfs(adapter);
2358
2359
2360 wr32(E1000_VFRE, 0);
2361 wr32(E1000_VFTE, 0);
2362 }
2363
2364
2365 hw->mac.ops.reset_hw(hw);
2366 wr32(E1000_WUC, 0);
2367
2368 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2369
2370 adapter->ei.get_invariants(hw);
2371 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2372 }
2373 if ((mac->type == e1000_82575) &&
2374 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2375 igb_enable_mas(adapter);
2376 }
2377 if (hw->mac.ops.init_hw(hw))
2378 dev_err(&pdev->dev, "Hardware Error\n");
2379
2380
2381 igb_flush_mac_table(adapter);
2382 __dev_uc_unsync(adapter->netdev, NULL);
2383
2384
2385 igb_set_default_mac_filter(adapter);
2386
2387
2388
2389
2390 if (!hw->mac.autoneg)
2391 igb_force_mac_fc(hw);
2392
2393 igb_init_dmac(adapter, pba);
2394#ifdef CONFIG_IGB_HWMON
2395
2396 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2397 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2398
2399
2400
2401 if (adapter->ets)
2402 mac->ops.init_thermal_sensor_thresh(hw);
2403 }
2404 }
2405#endif
2406
2407 if (hw->phy.media_type == e1000_media_type_copper) {
2408 switch (mac->type) {
2409 case e1000_i350:
2410 case e1000_i210:
2411 case e1000_i211:
2412 igb_set_eee_i350(hw, true, true);
2413 break;
2414 case e1000_i354:
2415 igb_set_eee_i354(hw, true, true);
2416 break;
2417 default:
2418 break;
2419 }
2420 }
2421 if (!netif_running(adapter->netdev))
2422 igb_power_down_link(adapter);
2423
2424 igb_update_mng_vlan(adapter);
2425
2426
2427 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2428
2429
2430 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2431 igb_ptp_reset(adapter);
2432
2433 igb_get_phy_info(hw);
2434}
2435
2436static netdev_features_t igb_fix_features(struct net_device *netdev,
2437 netdev_features_t features)
2438{
2439
2440
2441
2442 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2443 features |= NETIF_F_HW_VLAN_CTAG_TX;
2444 else
2445 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2446
2447 return features;
2448}
2449
2450static int igb_set_features(struct net_device *netdev,
2451 netdev_features_t features)
2452{
2453 netdev_features_t changed = netdev->features ^ features;
2454 struct igb_adapter *adapter = netdev_priv(netdev);
2455
2456 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2457 igb_vlan_mode(netdev, features);
2458
2459 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2460 return 0;
2461
2462 if (!(features & NETIF_F_NTUPLE)) {
2463 struct hlist_node *node2;
2464 struct igb_nfc_filter *rule;
2465
2466 spin_lock(&adapter->nfc_lock);
2467 hlist_for_each_entry_safe(rule, node2,
2468 &adapter->nfc_filter_list, nfc_node) {
2469 igb_erase_filter(adapter, rule);
2470 hlist_del(&rule->nfc_node);
2471 kfree(rule);
2472 }
2473 spin_unlock(&adapter->nfc_lock);
2474 adapter->nfc_filter_count = 0;
2475 }
2476
2477 netdev->features = features;
2478
2479 if (netif_running(netdev))
2480 igb_reinit_locked(adapter);
2481 else
2482 igb_reset(adapter);
2483
2484 return 1;
2485}
2486
2487static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2488 struct net_device *dev,
2489 const unsigned char *addr, u16 vid,
2490 u16 flags,
2491 struct netlink_ext_ack *extack)
2492{
2493
2494 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2495 struct igb_adapter *adapter = netdev_priv(dev);
2496 int vfn = adapter->vfs_allocated_count;
2497
2498 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2499 return -ENOMEM;
2500 }
2501
2502 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2503}
2504
2505#define IGB_MAX_MAC_HDR_LEN 127
2506#define IGB_MAX_NETWORK_HDR_LEN 511
2507
2508static netdev_features_t
2509igb_features_check(struct sk_buff *skb, struct net_device *dev,
2510 netdev_features_t features)
2511{
2512 unsigned int network_hdr_len, mac_hdr_len;
2513
2514
2515 mac_hdr_len = skb_network_header(skb) - skb->data;
2516 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2517 return features & ~(NETIF_F_HW_CSUM |
2518 NETIF_F_SCTP_CRC |
2519 NETIF_F_HW_VLAN_CTAG_TX |
2520 NETIF_F_TSO |
2521 NETIF_F_TSO6);
2522
2523 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2524 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2525 return features & ~(NETIF_F_HW_CSUM |
2526 NETIF_F_SCTP_CRC |
2527 NETIF_F_TSO |
2528 NETIF_F_TSO6);
2529
2530
2531
2532
2533 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2534 features &= ~NETIF_F_TSO;
2535
2536 return features;
2537}
2538
2539static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2540{
2541 if (!is_fqtss_enabled(adapter)) {
2542 enable_fqtss(adapter, true);
2543 return;
2544 }
2545
2546 igb_config_tx_modes(adapter, queue);
2547
2548 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2549 enable_fqtss(adapter, false);
2550}
2551
2552static int igb_offload_cbs(struct igb_adapter *adapter,
2553 struct tc_cbs_qopt_offload *qopt)
2554{
2555 struct e1000_hw *hw = &adapter->hw;
2556 int err;
2557
2558
2559 if (hw->mac.type != e1000_i210)
2560 return -EOPNOTSUPP;
2561
2562
2563 if (qopt->queue < 0 || qopt->queue > 1)
2564 return -EINVAL;
2565
2566 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2567 qopt->idleslope, qopt->sendslope,
2568 qopt->hicredit, qopt->locredit);
2569 if (err)
2570 return err;
2571
2572 igb_offload_apply(adapter, qopt->queue);
2573
2574 return 0;
2575}
2576
2577#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2578#define VLAN_PRIO_FULL_MASK (0x07)
2579
2580static int igb_parse_cls_flower(struct igb_adapter *adapter,
2581 struct flow_cls_offload *f,
2582 int traffic_class,
2583 struct igb_nfc_filter *input)
2584{
2585 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2586 struct flow_dissector *dissector = rule->match.dissector;
2587 struct netlink_ext_ack *extack = f->common.extack;
2588
2589 if (dissector->used_keys &
2590 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2591 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2592 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2593 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2594 NL_SET_ERR_MSG_MOD(extack,
2595 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2596 return -EOPNOTSUPP;
2597 }
2598
2599 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2600 struct flow_match_eth_addrs match;
2601
2602 flow_rule_match_eth_addrs(rule, &match);
2603 if (!is_zero_ether_addr(match.mask->dst)) {
2604 if (!is_broadcast_ether_addr(match.mask->dst)) {
2605 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2606 return -EINVAL;
2607 }
2608
2609 input->filter.match_flags |=
2610 IGB_FILTER_FLAG_DST_MAC_ADDR;
2611 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2612 }
2613
2614 if (!is_zero_ether_addr(match.mask->src)) {
2615 if (!is_broadcast_ether_addr(match.mask->src)) {
2616 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2617 return -EINVAL;
2618 }
2619
2620 input->filter.match_flags |=
2621 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2622 ether_addr_copy(input->filter.src_addr, match.key->src);
2623 }
2624 }
2625
2626 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2627 struct flow_match_basic match;
2628
2629 flow_rule_match_basic(rule, &match);
2630 if (match.mask->n_proto) {
2631 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2632 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2633 return -EINVAL;
2634 }
2635
2636 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2637 input->filter.etype = match.key->n_proto;
2638 }
2639 }
2640
2641 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2642 struct flow_match_vlan match;
2643
2644 flow_rule_match_vlan(rule, &match);
2645 if (match.mask->vlan_priority) {
2646 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2647 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2648 return -EINVAL;
2649 }
2650
2651 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2652 input->filter.vlan_tci = match.key->vlan_priority;
2653 }
2654 }
2655
2656 input->action = traffic_class;
2657 input->cookie = f->cookie;
2658
2659 return 0;
2660}
2661
2662static int igb_configure_clsflower(struct igb_adapter *adapter,
2663 struct flow_cls_offload *cls_flower)
2664{
2665 struct netlink_ext_ack *extack = cls_flower->common.extack;
2666 struct igb_nfc_filter *filter, *f;
2667 int err, tc;
2668
2669 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2670 if (tc < 0) {
2671 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2672 return -EINVAL;
2673 }
2674
2675 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2676 if (!filter)
2677 return -ENOMEM;
2678
2679 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2680 if (err < 0)
2681 goto err_parse;
2682
2683 spin_lock(&adapter->nfc_lock);
2684
2685 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2686 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2687 err = -EEXIST;
2688 NL_SET_ERR_MSG_MOD(extack,
2689 "This filter is already set in ethtool");
2690 goto err_locked;
2691 }
2692 }
2693
2694 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2695 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2696 err = -EEXIST;
2697 NL_SET_ERR_MSG_MOD(extack,
2698 "This filter is already set in cls_flower");
2699 goto err_locked;
2700 }
2701 }
2702
2703 err = igb_add_filter(adapter, filter);
2704 if (err < 0) {
2705 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2706 goto err_locked;
2707 }
2708
2709 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2710
2711 spin_unlock(&adapter->nfc_lock);
2712
2713 return 0;
2714
2715err_locked:
2716 spin_unlock(&adapter->nfc_lock);
2717
2718err_parse:
2719 kfree(filter);
2720
2721 return err;
2722}
2723
2724static int igb_delete_clsflower(struct igb_adapter *adapter,
2725 struct flow_cls_offload *cls_flower)
2726{
2727 struct igb_nfc_filter *filter;
2728 int err;
2729
2730 spin_lock(&adapter->nfc_lock);
2731
2732 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2733 if (filter->cookie == cls_flower->cookie)
2734 break;
2735
2736 if (!filter) {
2737 err = -ENOENT;
2738 goto out;
2739 }
2740
2741 err = igb_erase_filter(adapter, filter);
2742 if (err < 0)
2743 goto out;
2744
2745 hlist_del(&filter->nfc_node);
2746 kfree(filter);
2747
2748out:
2749 spin_unlock(&adapter->nfc_lock);
2750
2751 return err;
2752}
2753
2754static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2755 struct flow_cls_offload *cls_flower)
2756{
2757 switch (cls_flower->command) {
2758 case FLOW_CLS_REPLACE:
2759 return igb_configure_clsflower(adapter, cls_flower);
2760 case FLOW_CLS_DESTROY:
2761 return igb_delete_clsflower(adapter, cls_flower);
2762 case FLOW_CLS_STATS:
2763 return -EOPNOTSUPP;
2764 default:
2765 return -EOPNOTSUPP;
2766 }
2767}
2768
2769static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2770 void *cb_priv)
2771{
2772 struct igb_adapter *adapter = cb_priv;
2773
2774 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2775 return -EOPNOTSUPP;
2776
2777 switch (type) {
2778 case TC_SETUP_CLSFLOWER:
2779 return igb_setup_tc_cls_flower(adapter, type_data);
2780
2781 default:
2782 return -EOPNOTSUPP;
2783 }
2784}
2785
2786static int igb_offload_txtime(struct igb_adapter *adapter,
2787 struct tc_etf_qopt_offload *qopt)
2788{
2789 struct e1000_hw *hw = &adapter->hw;
2790 int err;
2791
2792
2793 if (hw->mac.type != e1000_i210)
2794 return -EOPNOTSUPP;
2795
2796
2797 if (qopt->queue < 0 || qopt->queue > 1)
2798 return -EINVAL;
2799
2800 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2801 if (err)
2802 return err;
2803
2804 igb_offload_apply(adapter, qopt->queue);
2805
2806 return 0;
2807}
2808
2809static LIST_HEAD(igb_block_cb_list);
2810
2811static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2812 void *type_data)
2813{
2814 struct igb_adapter *adapter = netdev_priv(dev);
2815
2816 switch (type) {
2817 case TC_SETUP_QDISC_CBS:
2818 return igb_offload_cbs(adapter, type_data);
2819 case TC_SETUP_BLOCK:
2820 return flow_block_cb_setup_simple(type_data,
2821 &igb_block_cb_list,
2822 igb_setup_tc_block_cb,
2823 adapter, adapter, true);
2824
2825 case TC_SETUP_QDISC_ETF:
2826 return igb_offload_txtime(adapter, type_data);
2827
2828 default:
2829 return -EOPNOTSUPP;
2830 }
2831}
2832
2833static const struct net_device_ops igb_netdev_ops = {
2834 .ndo_open = igb_open,
2835 .ndo_stop = igb_close,
2836 .ndo_start_xmit = igb_xmit_frame,
2837 .ndo_get_stats64 = igb_get_stats64,
2838 .ndo_set_rx_mode = igb_set_rx_mode,
2839 .ndo_set_mac_address = igb_set_mac,
2840 .ndo_change_mtu = igb_change_mtu,
2841 .ndo_do_ioctl = igb_ioctl,
2842 .ndo_tx_timeout = igb_tx_timeout,
2843 .ndo_validate_addr = eth_validate_addr,
2844 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2845 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2846 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2847 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2848 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2849 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2850 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2851 .ndo_get_vf_config = igb_ndo_get_vf_config,
2852 .ndo_fix_features = igb_fix_features,
2853 .ndo_set_features = igb_set_features,
2854 .ndo_fdb_add = igb_ndo_fdb_add,
2855 .ndo_features_check = igb_features_check,
2856 .ndo_setup_tc = igb_setup_tc,
2857};
2858
2859
2860
2861
2862
2863void igb_set_fw_version(struct igb_adapter *adapter)
2864{
2865 struct e1000_hw *hw = &adapter->hw;
2866 struct e1000_fw_version fw;
2867
2868 igb_get_fw_version(hw, &fw);
2869
2870 switch (hw->mac.type) {
2871 case e1000_i210:
2872 case e1000_i211:
2873 if (!(igb_get_flash_presence_i210(hw))) {
2874 snprintf(adapter->fw_version,
2875 sizeof(adapter->fw_version),
2876 "%2d.%2d-%d",
2877 fw.invm_major, fw.invm_minor,
2878 fw.invm_img_type);
2879 break;
2880 }
2881
2882 default:
2883
2884 if (fw.or_valid) {
2885 snprintf(adapter->fw_version,
2886 sizeof(adapter->fw_version),
2887 "%d.%d, 0x%08x, %d.%d.%d",
2888 fw.eep_major, fw.eep_minor, fw.etrack_id,
2889 fw.or_major, fw.or_build, fw.or_patch);
2890
2891 } else if (fw.etrack_id != 0X0000) {
2892 snprintf(adapter->fw_version,
2893 sizeof(adapter->fw_version),
2894 "%d.%d, 0x%08x",
2895 fw.eep_major, fw.eep_minor, fw.etrack_id);
2896 } else {
2897 snprintf(adapter->fw_version,
2898 sizeof(adapter->fw_version),
2899 "%d.%d.%d",
2900 fw.eep_major, fw.eep_minor, fw.eep_build);
2901 }
2902 break;
2903 }
2904}
2905
2906
2907
2908
2909
2910
2911static void igb_init_mas(struct igb_adapter *adapter)
2912{
2913 struct e1000_hw *hw = &adapter->hw;
2914 u16 eeprom_data;
2915
2916 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2917 switch (hw->bus.func) {
2918 case E1000_FUNC_0:
2919 if (eeprom_data & IGB_MAS_ENABLE_0) {
2920 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2921 netdev_info(adapter->netdev,
2922 "MAS: Enabling Media Autosense for port %d\n",
2923 hw->bus.func);
2924 }
2925 break;
2926 case E1000_FUNC_1:
2927 if (eeprom_data & IGB_MAS_ENABLE_1) {
2928 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2929 netdev_info(adapter->netdev,
2930 "MAS: Enabling Media Autosense for port %d\n",
2931 hw->bus.func);
2932 }
2933 break;
2934 case E1000_FUNC_2:
2935 if (eeprom_data & IGB_MAS_ENABLE_2) {
2936 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2937 netdev_info(adapter->netdev,
2938 "MAS: Enabling Media Autosense for port %d\n",
2939 hw->bus.func);
2940 }
2941 break;
2942 case E1000_FUNC_3:
2943 if (eeprom_data & IGB_MAS_ENABLE_3) {
2944 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2945 netdev_info(adapter->netdev,
2946 "MAS: Enabling Media Autosense for port %d\n",
2947 hw->bus.func);
2948 }
2949 break;
2950 default:
2951
2952 netdev_err(adapter->netdev,
2953 "MAS: Invalid port configuration, returning\n");
2954 break;
2955 }
2956}
2957
2958
2959
2960
2961
2962static s32 igb_init_i2c(struct igb_adapter *adapter)
2963{
2964 s32 status = 0;
2965
2966
2967 if (adapter->hw.mac.type != e1000_i350)
2968 return 0;
2969
2970
2971
2972
2973
2974 adapter->i2c_adap.owner = THIS_MODULE;
2975 adapter->i2c_algo = igb_i2c_algo;
2976 adapter->i2c_algo.data = adapter;
2977 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2978 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2979 strlcpy(adapter->i2c_adap.name, "igb BB",
2980 sizeof(adapter->i2c_adap.name));
2981 status = i2c_bit_add_bus(&adapter->i2c_adap);
2982 return status;
2983}
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2997{
2998 struct net_device *netdev;
2999 struct igb_adapter *adapter;
3000 struct e1000_hw *hw;
3001 u16 eeprom_data = 0;
3002 s32 ret_val;
3003 static int global_quad_port_a;
3004 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3005 int err, pci_using_dac;
3006 u8 part_str[E1000_PBANUM_LENGTH];
3007
3008
3009
3010
3011 if (pdev->is_virtfn) {
3012 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
3013 pci_name(pdev), pdev->vendor, pdev->device);
3014 return -EINVAL;
3015 }
3016
3017 err = pci_enable_device_mem(pdev);
3018 if (err)
3019 return err;
3020
3021 pci_using_dac = 0;
3022 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3023 if (!err) {
3024 pci_using_dac = 1;
3025 } else {
3026 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3027 if (err) {
3028 dev_err(&pdev->dev,
3029 "No usable DMA configuration, aborting\n");
3030 goto err_dma;
3031 }
3032 }
3033
3034 err = pci_request_mem_regions(pdev, igb_driver_name);
3035 if (err)
3036 goto err_pci_reg;
3037
3038 pci_enable_pcie_error_reporting(pdev);
3039
3040 pci_set_master(pdev);
3041 pci_save_state(pdev);
3042
3043 err = -ENOMEM;
3044 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3045 IGB_MAX_TX_QUEUES);
3046 if (!netdev)
3047 goto err_alloc_etherdev;
3048
3049 SET_NETDEV_DEV(netdev, &pdev->dev);
3050
3051 pci_set_drvdata(pdev, netdev);
3052 adapter = netdev_priv(netdev);
3053 adapter->netdev = netdev;
3054 adapter->pdev = pdev;
3055 hw = &adapter->hw;
3056 hw->back = adapter;
3057 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3058
3059 err = -EIO;
3060 adapter->io_addr = pci_iomap(pdev, 0, 0);
3061 if (!adapter->io_addr)
3062 goto err_ioremap;
3063
3064 hw->hw_addr = adapter->io_addr;
3065
3066 netdev->netdev_ops = &igb_netdev_ops;
3067 igb_set_ethtool_ops(netdev);
3068 netdev->watchdog_timeo = 5 * HZ;
3069
3070 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3071
3072 netdev->mem_start = pci_resource_start(pdev, 0);
3073 netdev->mem_end = pci_resource_end(pdev, 0);
3074
3075
3076 hw->vendor_id = pdev->vendor;
3077 hw->device_id = pdev->device;
3078 hw->revision_id = pdev->revision;
3079 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3080 hw->subsystem_device_id = pdev->subsystem_device;
3081
3082
3083 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3084 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3085 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3086
3087 err = ei->get_invariants(hw);
3088 if (err)
3089 goto err_sw_init;
3090
3091
3092 err = igb_sw_init(adapter);
3093 if (err)
3094 goto err_sw_init;
3095
3096 igb_get_bus_info_pcie(hw);
3097
3098 hw->phy.autoneg_wait_to_complete = false;
3099
3100
3101 if (hw->phy.media_type == e1000_media_type_copper) {
3102 hw->phy.mdix = AUTO_ALL_MODES;
3103 hw->phy.disable_polarity_correction = false;
3104 hw->phy.ms_type = e1000_ms_hw_default;
3105 }
3106
3107 if (igb_check_reset_block(hw))
3108 dev_info(&pdev->dev,
3109 "PHY reset is blocked due to SOL/IDER session.\n");
3110
3111
3112
3113
3114
3115 netdev->features |= NETIF_F_SG |
3116 NETIF_F_TSO |
3117 NETIF_F_TSO6 |
3118 NETIF_F_RXHASH |
3119 NETIF_F_RXCSUM |
3120 NETIF_F_HW_CSUM;
3121
3122 if (hw->mac.type >= e1000_82576)
3123 netdev->features |= NETIF_F_SCTP_CRC;
3124
3125 if (hw->mac.type >= e1000_i350)
3126 netdev->features |= NETIF_F_HW_TC;
3127
3128#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3129 NETIF_F_GSO_GRE_CSUM | \
3130 NETIF_F_GSO_IPXIP4 | \
3131 NETIF_F_GSO_IPXIP6 | \
3132 NETIF_F_GSO_UDP_TUNNEL | \
3133 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3134
3135 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3136 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3137
3138
3139 netdev->hw_features |= netdev->features |
3140 NETIF_F_HW_VLAN_CTAG_RX |
3141 NETIF_F_HW_VLAN_CTAG_TX |
3142 NETIF_F_RXALL;
3143
3144 if (hw->mac.type >= e1000_i350)
3145 netdev->hw_features |= NETIF_F_NTUPLE;
3146
3147 if (pci_using_dac)
3148 netdev->features |= NETIF_F_HIGHDMA;
3149
3150 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3151 netdev->mpls_features |= NETIF_F_HW_CSUM;
3152 netdev->hw_enc_features |= netdev->vlan_features;
3153
3154
3155 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3156 NETIF_F_HW_VLAN_CTAG_RX |
3157 NETIF_F_HW_VLAN_CTAG_TX;
3158
3159 netdev->priv_flags |= IFF_SUPP_NOFCS;
3160
3161 netdev->priv_flags |= IFF_UNICAST_FLT;
3162
3163
3164 netdev->min_mtu = ETH_MIN_MTU;
3165 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3166
3167 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3168
3169
3170
3171
3172 hw->mac.ops.reset_hw(hw);
3173
3174
3175
3176
3177 switch (hw->mac.type) {
3178 case e1000_i210:
3179 case e1000_i211:
3180 if (igb_get_flash_presence_i210(hw)) {
3181 if (hw->nvm.ops.validate(hw) < 0) {
3182 dev_err(&pdev->dev,
3183 "The NVM Checksum Is Not Valid\n");
3184 err = -EIO;
3185 goto err_eeprom;
3186 }
3187 }
3188 break;
3189 default:
3190 if (hw->nvm.ops.validate(hw) < 0) {
3191 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3192 err = -EIO;
3193 goto err_eeprom;
3194 }
3195 break;
3196 }
3197
3198 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3199
3200 if (hw->mac.ops.read_mac_addr(hw))
3201 dev_err(&pdev->dev, "NVM Read Error\n");
3202 }
3203
3204 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3205
3206 if (!is_valid_ether_addr(netdev->dev_addr)) {
3207 dev_err(&pdev->dev, "Invalid MAC Address\n");
3208 err = -EIO;
3209 goto err_eeprom;
3210 }
3211
3212 igb_set_default_mac_filter(adapter);
3213
3214
3215 igb_set_fw_version(adapter);
3216
3217
3218 if (hw->mac.type == e1000_i210) {
3219 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3220 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3221 }
3222
3223 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3224 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3225
3226 INIT_WORK(&adapter->reset_task, igb_reset_task);
3227 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3228
3229
3230 adapter->fc_autoneg = true;
3231 hw->mac.autoneg = true;
3232 hw->phy.autoneg_advertised = 0x2f;
3233
3234 hw->fc.requested_mode = e1000_fc_default;
3235 hw->fc.current_mode = e1000_fc_default;
3236
3237 igb_validate_mdi_setting(hw);
3238
3239
3240 if (hw->bus.func == 0)
3241 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3242
3243
3244 if (hw->mac.type >= e1000_82580)
3245 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3246 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3247 &eeprom_data);
3248 else if (hw->bus.func == 1)
3249 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3250
3251 if (eeprom_data & IGB_EEPROM_APME)
3252 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3253
3254
3255
3256
3257
3258 switch (pdev->device) {
3259 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3260 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3261 break;
3262 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3263 case E1000_DEV_ID_82576_FIBER:
3264 case E1000_DEV_ID_82576_SERDES:
3265
3266
3267
3268 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3269 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3270 break;
3271 case E1000_DEV_ID_82576_QUAD_COPPER:
3272 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3273
3274 if (global_quad_port_a != 0)
3275 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3276 else
3277 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3278
3279 if (++global_quad_port_a == 4)
3280 global_quad_port_a = 0;
3281 break;
3282 default:
3283
3284 if (!device_can_wakeup(&adapter->pdev->dev))
3285 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3286 }
3287
3288
3289 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3290 adapter->wol |= E1000_WUFC_MAG;
3291
3292
3293 if ((hw->mac.type == e1000_i350) &&
3294 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3295 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3296 adapter->wol = 0;
3297 }
3298
3299
3300
3301
3302 if (((hw->mac.type == e1000_i350) ||
3303 (hw->mac.type == e1000_i354)) &&
3304 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3305 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3306 adapter->wol = 0;
3307 }
3308 if (hw->mac.type == e1000_i350) {
3309 if (((pdev->subsystem_device == 0x5001) ||
3310 (pdev->subsystem_device == 0x5002)) &&
3311 (hw->bus.func == 0)) {
3312 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3313 adapter->wol = 0;
3314 }
3315 if (pdev->subsystem_device == 0x1F52)
3316 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3317 }
3318
3319 device_set_wakeup_enable(&adapter->pdev->dev,
3320 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3321
3322
3323 igb_reset(adapter);
3324
3325
3326 err = igb_init_i2c(adapter);
3327 if (err) {
3328 dev_err(&pdev->dev, "failed to init i2c interface\n");
3329 goto err_eeprom;
3330 }
3331
3332
3333
3334
3335 igb_get_hw_control(adapter);
3336
3337 strcpy(netdev->name, "eth%d");
3338 err = register_netdev(netdev);
3339 if (err)
3340 goto err_register;
3341
3342
3343 netif_carrier_off(netdev);
3344
3345#ifdef CONFIG_IGB_DCA
3346 if (dca_add_requester(&pdev->dev) == 0) {
3347 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3348 dev_info(&pdev->dev, "DCA enabled\n");
3349 igb_setup_dca(adapter);
3350 }
3351
3352#endif
3353#ifdef CONFIG_IGB_HWMON
3354
3355 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3356 u16 ets_word;
3357
3358
3359
3360
3361 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3362 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3363 adapter->ets = true;
3364 else
3365 adapter->ets = false;
3366 if (igb_sysfs_init(adapter))
3367 dev_err(&pdev->dev,
3368 "failed to allocate sysfs resources\n");
3369 } else {
3370 adapter->ets = false;
3371 }
3372#endif
3373
3374 adapter->ei = *ei;
3375 if (hw->dev_spec._82575.mas_capable)
3376 igb_init_mas(adapter);
3377
3378
3379 igb_ptp_init(adapter);
3380
3381 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3382
3383 if (hw->mac.type != e1000_i354) {
3384 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3385 netdev->name,
3386 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3387 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3388 "unknown"),
3389 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3390 "Width x4" :
3391 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3392 "Width x2" :
3393 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3394 "Width x1" : "unknown"), netdev->dev_addr);
3395 }
3396
3397 if ((hw->mac.type >= e1000_i210 ||
3398 igb_get_flash_presence_i210(hw))) {
3399 ret_val = igb_read_part_string(hw, part_str,
3400 E1000_PBANUM_LENGTH);
3401 } else {
3402 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3403 }
3404
3405 if (ret_val)
3406 strcpy(part_str, "Unknown");
3407 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3408 dev_info(&pdev->dev,
3409 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3410 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3411 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3412 adapter->num_rx_queues, adapter->num_tx_queues);
3413 if (hw->phy.media_type == e1000_media_type_copper) {
3414 switch (hw->mac.type) {
3415 case e1000_i350:
3416 case e1000_i210:
3417 case e1000_i211:
3418
3419 err = igb_set_eee_i350(hw, true, true);
3420 if ((!err) &&
3421 (!hw->dev_spec._82575.eee_disable)) {
3422 adapter->eee_advert =
3423 MDIO_EEE_100TX | MDIO_EEE_1000T;
3424 adapter->flags |= IGB_FLAG_EEE;
3425 }
3426 break;
3427 case e1000_i354:
3428 if ((rd32(E1000_CTRL_EXT) &
3429 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3430 err = igb_set_eee_i354(hw, true, true);
3431 if ((!err) &&
3432 (!hw->dev_spec._82575.eee_disable)) {
3433 adapter->eee_advert =
3434 MDIO_EEE_100TX | MDIO_EEE_1000T;
3435 adapter->flags |= IGB_FLAG_EEE;
3436 }
3437 }
3438 break;
3439 default:
3440 break;
3441 }
3442 }
3443
3444 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
3445
3446 pm_runtime_put_noidle(&pdev->dev);
3447 return 0;
3448
3449err_register:
3450 igb_release_hw_control(adapter);
3451 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3452err_eeprom:
3453 if (!igb_check_reset_block(hw))
3454 igb_reset_phy(hw);
3455
3456 if (hw->flash_address)
3457 iounmap(hw->flash_address);
3458err_sw_init:
3459 kfree(adapter->mac_table);
3460 kfree(adapter->shadow_vfta);
3461 igb_clear_interrupt_scheme(adapter);
3462#ifdef CONFIG_PCI_IOV
3463 igb_disable_sriov(pdev);
3464#endif
3465 pci_iounmap(pdev, adapter->io_addr);
3466err_ioremap:
3467 free_netdev(netdev);
3468err_alloc_etherdev:
3469 pci_release_mem_regions(pdev);
3470err_pci_reg:
3471err_dma:
3472 pci_disable_device(pdev);
3473 return err;
3474}
3475
3476#ifdef CONFIG_PCI_IOV
3477static int igb_disable_sriov(struct pci_dev *pdev)
3478{
3479 struct net_device *netdev = pci_get_drvdata(pdev);
3480 struct igb_adapter *adapter = netdev_priv(netdev);
3481 struct e1000_hw *hw = &adapter->hw;
3482
3483
3484 if (adapter->vf_data) {
3485
3486 if (pci_vfs_assigned(pdev)) {
3487 dev_warn(&pdev->dev,
3488 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3489 return -EPERM;
3490 } else {
3491 pci_disable_sriov(pdev);
3492 msleep(500);
3493 }
3494
3495 kfree(adapter->vf_mac_list);
3496 adapter->vf_mac_list = NULL;
3497 kfree(adapter->vf_data);
3498 adapter->vf_data = NULL;
3499 adapter->vfs_allocated_count = 0;
3500 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3501 wrfl();
3502 msleep(100);
3503 dev_info(&pdev->dev, "IOV Disabled\n");
3504
3505
3506 adapter->flags |= IGB_FLAG_DMAC;
3507 }
3508
3509 return 0;
3510}
3511
3512static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3513{
3514 struct net_device *netdev = pci_get_drvdata(pdev);
3515 struct igb_adapter *adapter = netdev_priv(netdev);
3516 int old_vfs = pci_num_vf(pdev);
3517 struct vf_mac_filter *mac_list;
3518 int err = 0;
3519 int num_vf_mac_filters, i;
3520
3521 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3522 err = -EPERM;
3523 goto out;
3524 }
3525 if (!num_vfs)
3526 goto out;
3527
3528 if (old_vfs) {
3529 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3530 old_vfs, max_vfs);
3531 adapter->vfs_allocated_count = old_vfs;
3532 } else
3533 adapter->vfs_allocated_count = num_vfs;
3534
3535 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3536 sizeof(struct vf_data_storage), GFP_KERNEL);
3537
3538
3539 if (!adapter->vf_data) {
3540 adapter->vfs_allocated_count = 0;
3541 err = -ENOMEM;
3542 goto out;
3543 }
3544
3545
3546
3547
3548
3549
3550 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3551 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3552 adapter->vfs_allocated_count);
3553
3554 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3555 sizeof(struct vf_mac_filter),
3556 GFP_KERNEL);
3557
3558 mac_list = adapter->vf_mac_list;
3559 INIT_LIST_HEAD(&adapter->vf_macs.l);
3560
3561 if (adapter->vf_mac_list) {
3562
3563 for (i = 0; i < num_vf_mac_filters; i++) {
3564 mac_list->vf = -1;
3565 mac_list->free = true;
3566 list_add(&mac_list->l, &adapter->vf_macs.l);
3567 mac_list++;
3568 }
3569 } else {
3570
3571
3572
3573 dev_err(&pdev->dev,
3574 "Unable to allocate memory for VF MAC filter list\n");
3575 }
3576
3577
3578 if (!old_vfs) {
3579 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3580 if (err)
3581 goto err_out;
3582 }
3583 dev_info(&pdev->dev, "%d VFs allocated\n",
3584 adapter->vfs_allocated_count);
3585 for (i = 0; i < adapter->vfs_allocated_count; i++)
3586 igb_vf_configure(adapter, i);
3587
3588
3589 adapter->flags &= ~IGB_FLAG_DMAC;
3590 goto out;
3591
3592err_out:
3593 kfree(adapter->vf_mac_list);
3594 adapter->vf_mac_list = NULL;
3595 kfree(adapter->vf_data);
3596 adapter->vf_data = NULL;
3597 adapter->vfs_allocated_count = 0;
3598out:
3599 return err;
3600}
3601
3602#endif
3603
3604
3605
3606
3607static void igb_remove_i2c(struct igb_adapter *adapter)
3608{
3609
3610 i2c_del_adapter(&adapter->i2c_adap);
3611}
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622static void igb_remove(struct pci_dev *pdev)
3623{
3624 struct net_device *netdev = pci_get_drvdata(pdev);
3625 struct igb_adapter *adapter = netdev_priv(netdev);
3626 struct e1000_hw *hw = &adapter->hw;
3627
3628 pm_runtime_get_noresume(&pdev->dev);
3629#ifdef CONFIG_IGB_HWMON
3630 igb_sysfs_exit(adapter);
3631#endif
3632 igb_remove_i2c(adapter);
3633 igb_ptp_stop(adapter);
3634
3635
3636
3637 set_bit(__IGB_DOWN, &adapter->state);
3638 del_timer_sync(&adapter->watchdog_timer);
3639 del_timer_sync(&adapter->phy_info_timer);
3640
3641 cancel_work_sync(&adapter->reset_task);
3642 cancel_work_sync(&adapter->watchdog_task);
3643
3644#ifdef CONFIG_IGB_DCA
3645 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3646 dev_info(&pdev->dev, "DCA disabled\n");
3647 dca_remove_requester(&pdev->dev);
3648 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3649 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3650 }
3651#endif
3652
3653
3654
3655
3656 igb_release_hw_control(adapter);
3657
3658#ifdef CONFIG_PCI_IOV
3659 igb_disable_sriov(pdev);
3660#endif
3661
3662 unregister_netdev(netdev);
3663
3664 igb_clear_interrupt_scheme(adapter);
3665
3666 pci_iounmap(pdev, adapter->io_addr);
3667 if (hw->flash_address)
3668 iounmap(hw->flash_address);
3669 pci_release_mem_regions(pdev);
3670
3671 kfree(adapter->mac_table);
3672 kfree(adapter->shadow_vfta);
3673 free_netdev(netdev);
3674
3675 pci_disable_pcie_error_reporting(pdev);
3676
3677 pci_disable_device(pdev);
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689static void igb_probe_vfs(struct igb_adapter *adapter)
3690{
3691#ifdef CONFIG_PCI_IOV
3692 struct pci_dev *pdev = adapter->pdev;
3693 struct e1000_hw *hw = &adapter->hw;
3694
3695
3696 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3697 return;
3698
3699
3700
3701
3702
3703 igb_set_interrupt_capability(adapter, true);
3704 igb_reset_interrupt_capability(adapter);
3705
3706 pci_sriov_set_totalvfs(pdev, 7);
3707 igb_enable_sriov(pdev, max_vfs);
3708
3709#endif
3710}
3711
3712unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3713{
3714 struct e1000_hw *hw = &adapter->hw;
3715 unsigned int max_rss_queues;
3716
3717
3718 switch (hw->mac.type) {
3719 case e1000_i211:
3720 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3721 break;
3722 case e1000_82575:
3723 case e1000_i210:
3724 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3725 break;
3726 case e1000_i350:
3727
3728 if (!!adapter->vfs_allocated_count) {
3729 max_rss_queues = 1;
3730 break;
3731 }
3732
3733 case e1000_82576:
3734 if (!!adapter->vfs_allocated_count) {
3735 max_rss_queues = 2;
3736 break;
3737 }
3738
3739 case e1000_82580:
3740 case e1000_i354:
3741 default:
3742 max_rss_queues = IGB_MAX_RX_QUEUES;
3743 break;
3744 }
3745
3746 return max_rss_queues;
3747}
3748
3749static void igb_init_queue_configuration(struct igb_adapter *adapter)
3750{
3751 u32 max_rss_queues;
3752
3753 max_rss_queues = igb_get_max_rss_queues(adapter);
3754 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3755
3756 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3757}
3758
3759void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3760 const u32 max_rss_queues)
3761{
3762 struct e1000_hw *hw = &adapter->hw;
3763
3764
3765 switch (hw->mac.type) {
3766 case e1000_82575:
3767 case e1000_i211:
3768
3769 break;
3770 case e1000_82576:
3771 case e1000_82580:
3772 case e1000_i350:
3773 case e1000_i354:
3774 case e1000_i210:
3775 default:
3776
3777
3778
3779 if (adapter->rss_queues > (max_rss_queues / 2))
3780 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3781 else
3782 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3783 break;
3784 }
3785}
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795static int igb_sw_init(struct igb_adapter *adapter)
3796{
3797 struct e1000_hw *hw = &adapter->hw;
3798 struct net_device *netdev = adapter->netdev;
3799 struct pci_dev *pdev = adapter->pdev;
3800
3801 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3802
3803
3804 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3805 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3806
3807
3808 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3809 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3810
3811
3812 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3813
3814 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3815 VLAN_HLEN;
3816 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3817
3818 spin_lock_init(&adapter->nfc_lock);
3819 spin_lock_init(&adapter->stats64_lock);
3820#ifdef CONFIG_PCI_IOV
3821 switch (hw->mac.type) {
3822 case e1000_82576:
3823 case e1000_i350:
3824 if (max_vfs > 7) {
3825 dev_warn(&pdev->dev,
3826 "Maximum of 7 VFs per PF, using max\n");
3827 max_vfs = adapter->vfs_allocated_count = 7;
3828 } else
3829 adapter->vfs_allocated_count = max_vfs;
3830 if (adapter->vfs_allocated_count)
3831 dev_warn(&pdev->dev,
3832 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3833 break;
3834 default:
3835 break;
3836 }
3837#endif
3838
3839
3840 adapter->flags |= IGB_FLAG_HAS_MSIX;
3841
3842 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3843 sizeof(struct igb_mac_addr),
3844 GFP_KERNEL);
3845 if (!adapter->mac_table)
3846 return -ENOMEM;
3847
3848 igb_probe_vfs(adapter);
3849
3850 igb_init_queue_configuration(adapter);
3851
3852
3853 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3854 GFP_KERNEL);
3855 if (!adapter->shadow_vfta)
3856 return -ENOMEM;
3857
3858
3859 if (igb_init_interrupt_scheme(adapter, true)) {
3860 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3861 return -ENOMEM;
3862 }
3863
3864
3865 igb_irq_disable(adapter);
3866
3867 if (hw->mac.type >= e1000_i350)
3868 adapter->flags &= ~IGB_FLAG_DMAC;
3869
3870 set_bit(__IGB_DOWN, &adapter->state);
3871 return 0;
3872}
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886static int __igb_open(struct net_device *netdev, bool resuming)
3887{
3888 struct igb_adapter *adapter = netdev_priv(netdev);
3889 struct e1000_hw *hw = &adapter->hw;
3890 struct pci_dev *pdev = adapter->pdev;
3891 int err;
3892 int i;
3893
3894
3895 if (test_bit(__IGB_TESTING, &adapter->state)) {
3896 WARN_ON(resuming);
3897 return -EBUSY;
3898 }
3899
3900 if (!resuming)
3901 pm_runtime_get_sync(&pdev->dev);
3902
3903 netif_carrier_off(netdev);
3904
3905
3906 err = igb_setup_all_tx_resources(adapter);
3907 if (err)
3908 goto err_setup_tx;
3909
3910
3911 err = igb_setup_all_rx_resources(adapter);
3912 if (err)
3913 goto err_setup_rx;
3914
3915 igb_power_up_link(adapter);
3916
3917
3918
3919
3920
3921
3922 igb_configure(adapter);
3923
3924 err = igb_request_irq(adapter);
3925 if (err)
3926 goto err_req_irq;
3927
3928
3929 err = netif_set_real_num_tx_queues(adapter->netdev,
3930 adapter->num_tx_queues);
3931 if (err)
3932 goto err_set_queues;
3933
3934 err = netif_set_real_num_rx_queues(adapter->netdev,
3935 adapter->num_rx_queues);
3936 if (err)
3937 goto err_set_queues;
3938
3939
3940 clear_bit(__IGB_DOWN, &adapter->state);
3941
3942 for (i = 0; i < adapter->num_q_vectors; i++)
3943 napi_enable(&(adapter->q_vector[i]->napi));
3944
3945
3946 rd32(E1000_TSICR);
3947 rd32(E1000_ICR);
3948
3949 igb_irq_enable(adapter);
3950
3951
3952 if (adapter->vfs_allocated_count) {
3953 u32 reg_data = rd32(E1000_CTRL_EXT);
3954
3955 reg_data |= E1000_CTRL_EXT_PFRSTD;
3956 wr32(E1000_CTRL_EXT, reg_data);
3957 }
3958
3959 netif_tx_start_all_queues(netdev);
3960
3961 if (!resuming)
3962 pm_runtime_put(&pdev->dev);
3963
3964
3965 hw->mac.get_link_status = 1;
3966 schedule_work(&adapter->watchdog_task);
3967
3968 return 0;
3969
3970err_set_queues:
3971 igb_free_irq(adapter);
3972err_req_irq:
3973 igb_release_hw_control(adapter);
3974 igb_power_down_link(adapter);
3975 igb_free_all_rx_resources(adapter);
3976err_setup_rx:
3977 igb_free_all_tx_resources(adapter);
3978err_setup_tx:
3979 igb_reset(adapter);
3980 if (!resuming)
3981 pm_runtime_put(&pdev->dev);
3982
3983 return err;
3984}
3985
3986int igb_open(struct net_device *netdev)
3987{
3988 return __igb_open(netdev, false);
3989}
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002static int __igb_close(struct net_device *netdev, bool suspending)
4003{
4004 struct igb_adapter *adapter = netdev_priv(netdev);
4005 struct pci_dev *pdev = adapter->pdev;
4006
4007 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4008
4009 if (!suspending)
4010 pm_runtime_get_sync(&pdev->dev);
4011
4012 igb_down(adapter);
4013 igb_free_irq(adapter);
4014
4015 igb_free_all_tx_resources(adapter);
4016 igb_free_all_rx_resources(adapter);
4017
4018 if (!suspending)
4019 pm_runtime_put_sync(&pdev->dev);
4020 return 0;
4021}
4022
4023int igb_close(struct net_device *netdev)
4024{
4025 if (netif_device_present(netdev) || netdev->dismantle)
4026 return __igb_close(netdev, false);
4027 return 0;
4028}
4029
4030
4031
4032
4033
4034
4035
4036int igb_setup_tx_resources(struct igb_ring *tx_ring)
4037{
4038 struct device *dev = tx_ring->dev;
4039 int size;
4040
4041 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4042
4043 tx_ring->tx_buffer_info = vmalloc(size);
4044 if (!tx_ring->tx_buffer_info)
4045 goto err;
4046
4047
4048 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4049 tx_ring->size = ALIGN(tx_ring->size, 4096);
4050
4051 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4052 &tx_ring->dma, GFP_KERNEL);
4053 if (!tx_ring->desc)
4054 goto err;
4055
4056 tx_ring->next_to_use = 0;
4057 tx_ring->next_to_clean = 0;
4058
4059 return 0;
4060
4061err:
4062 vfree(tx_ring->tx_buffer_info);
4063 tx_ring->tx_buffer_info = NULL;
4064 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4065 return -ENOMEM;
4066}
4067
4068
4069
4070
4071
4072
4073
4074
4075static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4076{
4077 struct pci_dev *pdev = adapter->pdev;
4078 int i, err = 0;
4079
4080 for (i = 0; i < adapter->num_tx_queues; i++) {
4081 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4082 if (err) {
4083 dev_err(&pdev->dev,
4084 "Allocation for Tx Queue %u failed\n", i);
4085 for (i--; i >= 0; i--)
4086 igb_free_tx_resources(adapter->tx_ring[i]);
4087 break;
4088 }
4089 }
4090
4091 return err;
4092}
4093
4094
4095
4096
4097
4098void igb_setup_tctl(struct igb_adapter *adapter)
4099{
4100 struct e1000_hw *hw = &adapter->hw;
4101 u32 tctl;
4102
4103
4104 wr32(E1000_TXDCTL(0), 0);
4105
4106
4107 tctl = rd32(E1000_TCTL);
4108 tctl &= ~E1000_TCTL_CT;
4109 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4110 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4111
4112 igb_config_collision_dist(hw);
4113
4114
4115 tctl |= E1000_TCTL_EN;
4116
4117 wr32(E1000_TCTL, tctl);
4118}
4119
4120
4121
4122
4123
4124
4125
4126
4127void igb_configure_tx_ring(struct igb_adapter *adapter,
4128 struct igb_ring *ring)
4129{
4130 struct e1000_hw *hw = &adapter->hw;
4131 u32 txdctl = 0;
4132 u64 tdba = ring->dma;
4133 int reg_idx = ring->reg_idx;
4134
4135 wr32(E1000_TDLEN(reg_idx),
4136 ring->count * sizeof(union e1000_adv_tx_desc));
4137 wr32(E1000_TDBAL(reg_idx),
4138 tdba & 0x00000000ffffffffULL);
4139 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4140
4141 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4142 wr32(E1000_TDH(reg_idx), 0);
4143 writel(0, ring->tail);
4144
4145 txdctl |= IGB_TX_PTHRESH;
4146 txdctl |= IGB_TX_HTHRESH << 8;
4147 txdctl |= IGB_TX_WTHRESH << 16;
4148
4149
4150 memset(ring->tx_buffer_info, 0,
4151 sizeof(struct igb_tx_buffer) * ring->count);
4152
4153 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4154 wr32(E1000_TXDCTL(reg_idx), txdctl);
4155}
4156
4157
4158
4159
4160
4161
4162
4163static void igb_configure_tx(struct igb_adapter *adapter)
4164{
4165 struct e1000_hw *hw = &adapter->hw;
4166 int i;
4167
4168
4169 for (i = 0; i < adapter->num_tx_queues; i++)
4170 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4171
4172 wrfl();
4173 usleep_range(10000, 20000);
4174
4175 for (i = 0; i < adapter->num_tx_queues; i++)
4176 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4177}
4178
4179
4180
4181
4182
4183
4184
4185int igb_setup_rx_resources(struct igb_ring *rx_ring)
4186{
4187 struct device *dev = rx_ring->dev;
4188 int size;
4189
4190 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4191
4192 rx_ring->rx_buffer_info = vmalloc(size);
4193 if (!rx_ring->rx_buffer_info)
4194 goto err;
4195
4196
4197 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4198 rx_ring->size = ALIGN(rx_ring->size, 4096);
4199
4200 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4201 &rx_ring->dma, GFP_KERNEL);
4202 if (!rx_ring->desc)
4203 goto err;
4204
4205 rx_ring->next_to_alloc = 0;
4206 rx_ring->next_to_clean = 0;
4207 rx_ring->next_to_use = 0;
4208
4209 return 0;
4210
4211err:
4212 vfree(rx_ring->rx_buffer_info);
4213 rx_ring->rx_buffer_info = NULL;
4214 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4215 return -ENOMEM;
4216}
4217
4218
4219
4220
4221
4222
4223
4224
4225static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4226{
4227 struct pci_dev *pdev = adapter->pdev;
4228 int i, err = 0;
4229
4230 for (i = 0; i < adapter->num_rx_queues; i++) {
4231 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4232 if (err) {
4233 dev_err(&pdev->dev,
4234 "Allocation for Rx Queue %u failed\n", i);
4235 for (i--; i >= 0; i--)
4236 igb_free_rx_resources(adapter->rx_ring[i]);
4237 break;
4238 }
4239 }
4240
4241 return err;
4242}
4243
4244
4245
4246
4247
4248static void igb_setup_mrqc(struct igb_adapter *adapter)
4249{
4250 struct e1000_hw *hw = &adapter->hw;
4251 u32 mrqc, rxcsum;
4252 u32 j, num_rx_queues;
4253 u32 rss_key[10];
4254
4255 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4256 for (j = 0; j < 10; j++)
4257 wr32(E1000_RSSRK(j), rss_key[j]);
4258
4259 num_rx_queues = adapter->rss_queues;
4260
4261 switch (hw->mac.type) {
4262 case e1000_82576:
4263
4264 if (adapter->vfs_allocated_count)
4265 num_rx_queues = 2;
4266 break;
4267 default:
4268 break;
4269 }
4270
4271 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4272 for (j = 0; j < IGB_RETA_SIZE; j++)
4273 adapter->rss_indir_tbl[j] =
4274 (j * num_rx_queues) / IGB_RETA_SIZE;
4275 adapter->rss_indir_tbl_init = num_rx_queues;
4276 }
4277 igb_write_rss_indir_tbl(adapter);
4278
4279
4280
4281
4282
4283 rxcsum = rd32(E1000_RXCSUM);
4284 rxcsum |= E1000_RXCSUM_PCSD;
4285
4286 if (adapter->hw.mac.type >= e1000_82576)
4287
4288 rxcsum |= E1000_RXCSUM_CRCOFL;
4289
4290
4291 wr32(E1000_RXCSUM, rxcsum);
4292
4293
4294
4295
4296 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4297 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4298 E1000_MRQC_RSS_FIELD_IPV6 |
4299 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4300 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4301
4302 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4303 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4304 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4305 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4306
4307
4308
4309
4310
4311 if (adapter->vfs_allocated_count) {
4312 if (hw->mac.type > e1000_82575) {
4313
4314 u32 vtctl = rd32(E1000_VT_CTL);
4315
4316 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4317 E1000_VT_CTL_DISABLE_DEF_POOL);
4318 vtctl |= adapter->vfs_allocated_count <<
4319 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4320 wr32(E1000_VT_CTL, vtctl);
4321 }
4322 if (adapter->rss_queues > 1)
4323 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4324 else
4325 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4326 } else {
4327 if (hw->mac.type != e1000_i211)
4328 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4329 }
4330 igb_vmm_control(adapter);
4331
4332 wr32(E1000_MRQC, mrqc);
4333}
4334
4335
4336
4337
4338
4339void igb_setup_rctl(struct igb_adapter *adapter)
4340{
4341 struct e1000_hw *hw = &adapter->hw;
4342 u32 rctl;
4343
4344 rctl = rd32(E1000_RCTL);
4345
4346 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4347 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4348
4349 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4350 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4351
4352
4353
4354
4355
4356 rctl |= E1000_RCTL_SECRC;
4357
4358
4359 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4360
4361
4362 rctl |= E1000_RCTL_LPE;
4363
4364
4365 wr32(E1000_RXDCTL(0), 0);
4366
4367
4368
4369
4370
4371 if (adapter->vfs_allocated_count) {
4372
4373 wr32(E1000_QDE, ALL_QUEUES);
4374 }
4375
4376
4377 if (adapter->netdev->features & NETIF_F_RXALL) {
4378
4379
4380
4381 rctl |= (E1000_RCTL_SBP |
4382 E1000_RCTL_BAM |
4383 E1000_RCTL_PMCF);
4384
4385 rctl &= ~(E1000_RCTL_DPF |
4386 E1000_RCTL_CFIEN);
4387
4388
4389
4390 }
4391
4392 wr32(E1000_RCTL, rctl);
4393}
4394
4395static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4396 int vfn)
4397{
4398 struct e1000_hw *hw = &adapter->hw;
4399 u32 vmolr;
4400
4401 if (size > MAX_JUMBO_FRAME_SIZE)
4402 size = MAX_JUMBO_FRAME_SIZE;
4403
4404 vmolr = rd32(E1000_VMOLR(vfn));
4405 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4406 vmolr |= size | E1000_VMOLR_LPE;
4407 wr32(E1000_VMOLR(vfn), vmolr);
4408
4409 return 0;
4410}
4411
4412static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4413 int vfn, bool enable)
4414{
4415 struct e1000_hw *hw = &adapter->hw;
4416 u32 val, reg;
4417
4418 if (hw->mac.type < e1000_82576)
4419 return;
4420
4421 if (hw->mac.type == e1000_i350)
4422 reg = E1000_DVMOLR(vfn);
4423 else
4424 reg = E1000_VMOLR(vfn);
4425
4426 val = rd32(reg);
4427 if (enable)
4428 val |= E1000_VMOLR_STRVLAN;
4429 else
4430 val &= ~(E1000_VMOLR_STRVLAN);
4431 wr32(reg, val);
4432}
4433
4434static inline void igb_set_vmolr(struct igb_adapter *adapter,
4435 int vfn, bool aupe)
4436{
4437 struct e1000_hw *hw = &adapter->hw;
4438 u32 vmolr;
4439
4440
4441
4442
4443 if (hw->mac.type < e1000_82576)
4444 return;
4445
4446 vmolr = rd32(E1000_VMOLR(vfn));
4447 if (aupe)
4448 vmolr |= E1000_VMOLR_AUPE;
4449 else
4450 vmolr &= ~(E1000_VMOLR_AUPE);
4451
4452
4453 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4454
4455 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4456 vmolr |= E1000_VMOLR_RSSE;
4457
4458
4459
4460 if (vfn <= adapter->vfs_allocated_count)
4461 vmolr |= E1000_VMOLR_BAM;
4462
4463 wr32(E1000_VMOLR(vfn), vmolr);
4464}
4465
4466
4467
4468
4469
4470
4471
4472
4473void igb_configure_rx_ring(struct igb_adapter *adapter,
4474 struct igb_ring *ring)
4475{
4476 struct e1000_hw *hw = &adapter->hw;
4477 union e1000_adv_rx_desc *rx_desc;
4478 u64 rdba = ring->dma;
4479 int reg_idx = ring->reg_idx;
4480 u32 srrctl = 0, rxdctl = 0;
4481
4482
4483 wr32(E1000_RXDCTL(reg_idx), 0);
4484
4485
4486 wr32(E1000_RDBAL(reg_idx),
4487 rdba & 0x00000000ffffffffULL);
4488 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4489 wr32(E1000_RDLEN(reg_idx),
4490 ring->count * sizeof(union e1000_adv_rx_desc));
4491
4492
4493 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4494 wr32(E1000_RDH(reg_idx), 0);
4495 writel(0, ring->tail);
4496
4497
4498 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4499 if (ring_uses_large_buffer(ring))
4500 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4501 else
4502 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4503 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4504 if (hw->mac.type >= e1000_82580)
4505 srrctl |= E1000_SRRCTL_TIMESTAMP;
4506
4507 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4508 srrctl |= E1000_SRRCTL_DROP_EN;
4509
4510 wr32(E1000_SRRCTL(reg_idx), srrctl);
4511
4512
4513 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4514
4515 rxdctl |= IGB_RX_PTHRESH;
4516 rxdctl |= IGB_RX_HTHRESH << 8;
4517 rxdctl |= IGB_RX_WTHRESH << 16;
4518
4519
4520 memset(ring->rx_buffer_info, 0,
4521 sizeof(struct igb_rx_buffer) * ring->count);
4522
4523
4524 rx_desc = IGB_RX_DESC(ring, 0);
4525 rx_desc->wb.upper.length = 0;
4526
4527
4528 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4529 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4530}
4531
4532static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4533 struct igb_ring *rx_ring)
4534{
4535
4536 clear_ring_build_skb_enabled(rx_ring);
4537 clear_ring_uses_large_buffer(rx_ring);
4538
4539 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4540 return;
4541
4542 set_ring_build_skb_enabled(rx_ring);
4543
4544#if (PAGE_SIZE < 8192)
4545 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4546 return;
4547
4548 set_ring_uses_large_buffer(rx_ring);
4549#endif
4550}
4551
4552
4553
4554
4555
4556
4557
4558static void igb_configure_rx(struct igb_adapter *adapter)
4559{
4560 int i;
4561
4562
4563 igb_set_default_mac_filter(adapter);
4564
4565
4566
4567
4568 for (i = 0; i < adapter->num_rx_queues; i++) {
4569 struct igb_ring *rx_ring = adapter->rx_ring[i];
4570
4571 igb_set_rx_buffer_len(adapter, rx_ring);
4572 igb_configure_rx_ring(adapter, rx_ring);
4573 }
4574}
4575
4576
4577
4578
4579
4580
4581
4582void igb_free_tx_resources(struct igb_ring *tx_ring)
4583{
4584 igb_clean_tx_ring(tx_ring);
4585
4586 vfree(tx_ring->tx_buffer_info);
4587 tx_ring->tx_buffer_info = NULL;
4588
4589
4590 if (!tx_ring->desc)
4591 return;
4592
4593 dma_free_coherent(tx_ring->dev, tx_ring->size,
4594 tx_ring->desc, tx_ring->dma);
4595
4596 tx_ring->desc = NULL;
4597}
4598
4599
4600
4601
4602
4603
4604
4605static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4606{
4607 int i;
4608
4609 for (i = 0; i < adapter->num_tx_queues; i++)
4610 if (adapter->tx_ring[i])
4611 igb_free_tx_resources(adapter->tx_ring[i]);
4612}
4613
4614
4615
4616
4617
4618static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4619{
4620 u16 i = tx_ring->next_to_clean;
4621 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4622
4623 while (i != tx_ring->next_to_use) {
4624 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4625
4626
4627 dev_kfree_skb_any(tx_buffer->skb);
4628
4629
4630 dma_unmap_single(tx_ring->dev,
4631 dma_unmap_addr(tx_buffer, dma),
4632 dma_unmap_len(tx_buffer, len),
4633 DMA_TO_DEVICE);
4634
4635
4636 eop_desc = tx_buffer->next_to_watch;
4637 tx_desc = IGB_TX_DESC(tx_ring, i);
4638
4639
4640 while (tx_desc != eop_desc) {
4641 tx_buffer++;
4642 tx_desc++;
4643 i++;
4644 if (unlikely(i == tx_ring->count)) {
4645 i = 0;
4646 tx_buffer = tx_ring->tx_buffer_info;
4647 tx_desc = IGB_TX_DESC(tx_ring, 0);
4648 }
4649
4650
4651 if (dma_unmap_len(tx_buffer, len))
4652 dma_unmap_page(tx_ring->dev,
4653 dma_unmap_addr(tx_buffer, dma),
4654 dma_unmap_len(tx_buffer, len),
4655 DMA_TO_DEVICE);
4656 }
4657
4658
4659 tx_buffer++;
4660 i++;
4661 if (unlikely(i == tx_ring->count)) {
4662 i = 0;
4663 tx_buffer = tx_ring->tx_buffer_info;
4664 }
4665 }
4666
4667
4668 netdev_tx_reset_queue(txring_txq(tx_ring));
4669
4670
4671 tx_ring->next_to_use = 0;
4672 tx_ring->next_to_clean = 0;
4673}
4674
4675
4676
4677
4678
4679static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4680{
4681 int i;
4682
4683 for (i = 0; i < adapter->num_tx_queues; i++)
4684 if (adapter->tx_ring[i])
4685 igb_clean_tx_ring(adapter->tx_ring[i]);
4686}
4687
4688
4689
4690
4691
4692
4693
4694void igb_free_rx_resources(struct igb_ring *rx_ring)
4695{
4696 igb_clean_rx_ring(rx_ring);
4697
4698 vfree(rx_ring->rx_buffer_info);
4699 rx_ring->rx_buffer_info = NULL;
4700
4701
4702 if (!rx_ring->desc)
4703 return;
4704
4705 dma_free_coherent(rx_ring->dev, rx_ring->size,
4706 rx_ring->desc, rx_ring->dma);
4707
4708 rx_ring->desc = NULL;
4709}
4710
4711
4712
4713
4714
4715
4716
4717static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4718{
4719 int i;
4720
4721 for (i = 0; i < adapter->num_rx_queues; i++)
4722 if (adapter->rx_ring[i])
4723 igb_free_rx_resources(adapter->rx_ring[i]);
4724}
4725
4726
4727
4728
4729
4730static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4731{
4732 u16 i = rx_ring->next_to_clean;
4733
4734 if (rx_ring->skb)
4735 dev_kfree_skb(rx_ring->skb);
4736 rx_ring->skb = NULL;
4737
4738
4739 while (i != rx_ring->next_to_alloc) {
4740 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4741
4742
4743
4744
4745 dma_sync_single_range_for_cpu(rx_ring->dev,
4746 buffer_info->dma,
4747 buffer_info->page_offset,
4748 igb_rx_bufsz(rx_ring),
4749 DMA_FROM_DEVICE);
4750
4751
4752 dma_unmap_page_attrs(rx_ring->dev,
4753 buffer_info->dma,
4754 igb_rx_pg_size(rx_ring),
4755 DMA_FROM_DEVICE,
4756 IGB_RX_DMA_ATTR);
4757 __page_frag_cache_drain(buffer_info->page,
4758 buffer_info->pagecnt_bias);
4759
4760 i++;
4761 if (i == rx_ring->count)
4762 i = 0;
4763 }
4764
4765 rx_ring->next_to_alloc = 0;
4766 rx_ring->next_to_clean = 0;
4767 rx_ring->next_to_use = 0;
4768}
4769
4770
4771
4772
4773
4774static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4775{
4776 int i;
4777
4778 for (i = 0; i < adapter->num_rx_queues; i++)
4779 if (adapter->rx_ring[i])
4780 igb_clean_rx_ring(adapter->rx_ring[i]);
4781}
4782
4783
4784
4785
4786
4787
4788
4789
4790static int igb_set_mac(struct net_device *netdev, void *p)
4791{
4792 struct igb_adapter *adapter = netdev_priv(netdev);
4793 struct e1000_hw *hw = &adapter->hw;
4794 struct sockaddr *addr = p;
4795
4796 if (!is_valid_ether_addr(addr->sa_data))
4797 return -EADDRNOTAVAIL;
4798
4799 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4800 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4801
4802
4803 igb_set_default_mac_filter(adapter);
4804
4805 return 0;
4806}
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817static int igb_write_mc_addr_list(struct net_device *netdev)
4818{
4819 struct igb_adapter *adapter = netdev_priv(netdev);
4820 struct e1000_hw *hw = &adapter->hw;
4821 struct netdev_hw_addr *ha;
4822 u8 *mta_list;
4823 int i;
4824
4825 if (netdev_mc_empty(netdev)) {
4826
4827 igb_update_mc_addr_list(hw, NULL, 0);
4828 igb_restore_vf_multicasts(adapter);
4829 return 0;
4830 }
4831
4832 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
4833 if (!mta_list)
4834 return -ENOMEM;
4835
4836
4837 i = 0;
4838 netdev_for_each_mc_addr(ha, netdev)
4839 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4840
4841 igb_update_mc_addr_list(hw, mta_list, i);
4842 kfree(mta_list);
4843
4844 return netdev_mc_count(netdev);
4845}
4846
4847static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4848{
4849 struct e1000_hw *hw = &adapter->hw;
4850 u32 i, pf_id;
4851
4852 switch (hw->mac.type) {
4853 case e1000_i210:
4854 case e1000_i211:
4855 case e1000_i350:
4856
4857 if (adapter->netdev->features & NETIF_F_NTUPLE)
4858 break;
4859
4860 case e1000_82576:
4861 case e1000_82580:
4862 case e1000_i354:
4863
4864 if (adapter->vfs_allocated_count)
4865 break;
4866
4867 default:
4868 return 1;
4869 }
4870
4871
4872 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4873 return 0;
4874
4875 if (!adapter->vfs_allocated_count)
4876 goto set_vfta;
4877
4878
4879 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4880
4881 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4882 u32 vlvf = rd32(E1000_VLVF(i));
4883
4884 vlvf |= BIT(pf_id);
4885 wr32(E1000_VLVF(i), vlvf);
4886 }
4887
4888set_vfta:
4889
4890 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4891 hw->mac.ops.write_vfta(hw, i, ~0U);
4892
4893
4894 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4895
4896 return 0;
4897}
4898
4899#define VFTA_BLOCK_SIZE 8
4900static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4901{
4902 struct e1000_hw *hw = &adapter->hw;
4903 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4904 u32 vid_start = vfta_offset * 32;
4905 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4906 u32 i, vid, word, bits, pf_id;
4907
4908
4909 vid = adapter->mng_vlan_id;
4910 if (vid >= vid_start && vid < vid_end)
4911 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4912
4913 if (!adapter->vfs_allocated_count)
4914 goto set_vfta;
4915
4916 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4917
4918 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4919 u32 vlvf = rd32(E1000_VLVF(i));
4920
4921
4922 vid = vlvf & VLAN_VID_MASK;
4923
4924
4925 if (vid < vid_start || vid >= vid_end)
4926 continue;
4927
4928 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4929
4930 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4931
4932
4933 if (test_bit(vid, adapter->active_vlans))
4934 continue;
4935 }
4936
4937
4938 bits = ~BIT(pf_id);
4939 bits &= rd32(E1000_VLVF(i));
4940 wr32(E1000_VLVF(i), bits);
4941 }
4942
4943set_vfta:
4944
4945 for (i = VFTA_BLOCK_SIZE; i--;) {
4946 vid = (vfta_offset + i) * 32;
4947 word = vid / BITS_PER_LONG;
4948 bits = vid % BITS_PER_LONG;
4949
4950 vfta[i] |= adapter->active_vlans[word] >> bits;
4951
4952 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4953 }
4954}
4955
4956static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4957{
4958 u32 i;
4959
4960
4961 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4962 return;
4963
4964
4965 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4966
4967 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4968 igb_scrub_vfta(adapter, i);
4969}
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980static void igb_set_rx_mode(struct net_device *netdev)
4981{
4982 struct igb_adapter *adapter = netdev_priv(netdev);
4983 struct e1000_hw *hw = &adapter->hw;
4984 unsigned int vfn = adapter->vfs_allocated_count;
4985 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4986 int count;
4987
4988
4989 if (netdev->flags & IFF_PROMISC) {
4990 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
4991 vmolr |= E1000_VMOLR_MPME;
4992
4993
4994 if (hw->mac.type == e1000_82576)
4995 vmolr |= E1000_VMOLR_ROPE;
4996 } else {
4997 if (netdev->flags & IFF_ALLMULTI) {
4998 rctl |= E1000_RCTL_MPE;
4999 vmolr |= E1000_VMOLR_MPME;
5000 } else {
5001
5002
5003
5004
5005 count = igb_write_mc_addr_list(netdev);
5006 if (count < 0) {
5007 rctl |= E1000_RCTL_MPE;
5008 vmolr |= E1000_VMOLR_MPME;
5009 } else if (count) {
5010 vmolr |= E1000_VMOLR_ROMPE;
5011 }
5012 }
5013 }
5014
5015
5016
5017
5018
5019 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5020 rctl |= E1000_RCTL_UPE;
5021 vmolr |= E1000_VMOLR_ROPE;
5022 }
5023
5024
5025 rctl |= E1000_RCTL_VFE;
5026
5027
5028 if ((netdev->flags & IFF_PROMISC) ||
5029 (netdev->features & NETIF_F_RXALL)) {
5030
5031 if (igb_vlan_promisc_enable(adapter))
5032 rctl &= ~E1000_RCTL_VFE;
5033 } else {
5034 igb_vlan_promisc_disable(adapter);
5035 }
5036
5037
5038 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5039 E1000_RCTL_VFE);
5040 wr32(E1000_RCTL, rctl);
5041
5042#if (PAGE_SIZE < 8192)
5043 if (!adapter->vfs_allocated_count) {
5044 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5045 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5046 }
5047#endif
5048 wr32(E1000_RLPML, rlpml);
5049
5050
5051
5052
5053
5054
5055 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5056 return;
5057
5058
5059 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5060
5061 vmolr |= rd32(E1000_VMOLR(vfn)) &
5062 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5063
5064
5065 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5066#if (PAGE_SIZE < 8192)
5067 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5068 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5069 else
5070#endif
5071 vmolr |= MAX_JUMBO_FRAME_SIZE;
5072 vmolr |= E1000_VMOLR_LPE;
5073
5074 wr32(E1000_VMOLR(vfn), vmolr);
5075
5076 igb_restore_vf_multicasts(adapter);
5077}
5078
5079static void igb_check_wvbr(struct igb_adapter *adapter)
5080{
5081 struct e1000_hw *hw = &adapter->hw;
5082 u32 wvbr = 0;
5083
5084 switch (hw->mac.type) {
5085 case e1000_82576:
5086 case e1000_i350:
5087 wvbr = rd32(E1000_WVBR);
5088 if (!wvbr)
5089 return;
5090 break;
5091 default:
5092 break;
5093 }
5094
5095 adapter->wvbr |= wvbr;
5096}
5097
5098#define IGB_STAGGERED_QUEUE_OFFSET 8
5099
5100static void igb_spoof_check(struct igb_adapter *adapter)
5101{
5102 int j;
5103
5104 if (!adapter->wvbr)
5105 return;
5106
5107 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5108 if (adapter->wvbr & BIT(j) ||
5109 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5110 dev_warn(&adapter->pdev->dev,
5111 "Spoof event(s) detected on VF %d\n", j);
5112 adapter->wvbr &=
5113 ~(BIT(j) |
5114 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5115 }
5116 }
5117}
5118
5119
5120
5121
5122static void igb_update_phy_info(struct timer_list *t)
5123{
5124 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5125 igb_get_phy_info(&adapter->hw);
5126}
5127
5128
5129
5130
5131
5132bool igb_has_link(struct igb_adapter *adapter)
5133{
5134 struct e1000_hw *hw = &adapter->hw;
5135 bool link_active = false;
5136
5137
5138
5139
5140
5141
5142 switch (hw->phy.media_type) {
5143 case e1000_media_type_copper:
5144 if (!hw->mac.get_link_status)
5145 return true;
5146
5147 case e1000_media_type_internal_serdes:
5148 hw->mac.ops.check_for_link(hw);
5149 link_active = !hw->mac.get_link_status;
5150 break;
5151 default:
5152 case e1000_media_type_unknown:
5153 break;
5154 }
5155
5156 if (((hw->mac.type == e1000_i210) ||
5157 (hw->mac.type == e1000_i211)) &&
5158 (hw->phy.id == I210_I_PHY_ID)) {
5159 if (!netif_carrier_ok(adapter->netdev)) {
5160 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5161 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5162 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5163 adapter->link_check_timeout = jiffies;
5164 }
5165 }
5166
5167 return link_active;
5168}
5169
5170static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5171{
5172 bool ret = false;
5173 u32 ctrl_ext, thstat;
5174
5175
5176 if (hw->mac.type == e1000_i350) {
5177 thstat = rd32(E1000_THSTAT);
5178 ctrl_ext = rd32(E1000_CTRL_EXT);
5179
5180 if ((hw->phy.media_type == e1000_media_type_copper) &&
5181 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5182 ret = !!(thstat & event);
5183 }
5184
5185 return ret;
5186}
5187
5188
5189
5190
5191
5192
5193static void igb_check_lvmmc(struct igb_adapter *adapter)
5194{
5195 struct e1000_hw *hw = &adapter->hw;
5196 u32 lvmmc;
5197
5198 lvmmc = rd32(E1000_LVMMC);
5199 if (lvmmc) {
5200 if (unlikely(net_ratelimit())) {
5201 netdev_warn(adapter->netdev,
5202 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5203 lvmmc);
5204 }
5205 }
5206}
5207
5208
5209
5210
5211
5212static void igb_watchdog(struct timer_list *t)
5213{
5214 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5215
5216 schedule_work(&adapter->watchdog_task);
5217}
5218
5219static void igb_watchdog_task(struct work_struct *work)
5220{
5221 struct igb_adapter *adapter = container_of(work,
5222 struct igb_adapter,
5223 watchdog_task);
5224 struct e1000_hw *hw = &adapter->hw;
5225 struct e1000_phy_info *phy = &hw->phy;
5226 struct net_device *netdev = adapter->netdev;
5227 u32 link;
5228 int i;
5229 u32 connsw;
5230 u16 phy_data, retry_count = 20;
5231
5232 link = igb_has_link(adapter);
5233
5234 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5235 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5236 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5237 else
5238 link = false;
5239 }
5240
5241
5242 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5243 if (hw->phy.media_type == e1000_media_type_copper) {
5244 connsw = rd32(E1000_CONNSW);
5245 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5246 link = 0;
5247 }
5248 }
5249 if (link) {
5250
5251 if (hw->dev_spec._82575.media_changed) {
5252 hw->dev_spec._82575.media_changed = false;
5253 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5254 igb_reset(adapter);
5255 }
5256
5257 pm_runtime_resume(netdev->dev.parent);
5258
5259 if (!netif_carrier_ok(netdev)) {
5260 u32 ctrl;
5261
5262 hw->mac.ops.get_speed_and_duplex(hw,
5263 &adapter->link_speed,
5264 &adapter->link_duplex);
5265
5266 ctrl = rd32(E1000_CTRL);
5267
5268 netdev_info(netdev,
5269 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5270 netdev->name,
5271 adapter->link_speed,
5272 adapter->link_duplex == FULL_DUPLEX ?
5273 "Full" : "Half",
5274 (ctrl & E1000_CTRL_TFCE) &&
5275 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5276 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5277 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5278
5279
5280 if ((adapter->flags & IGB_FLAG_EEE) &&
5281 (adapter->link_duplex == HALF_DUPLEX)) {
5282 dev_info(&adapter->pdev->dev,
5283 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5284 adapter->hw.dev_spec._82575.eee_disable = true;
5285 adapter->flags &= ~IGB_FLAG_EEE;
5286 }
5287
5288
5289 igb_check_downshift(hw);
5290 if (phy->speed_downgraded)
5291 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5292
5293
5294 if (igb_thermal_sensor_event(hw,
5295 E1000_THSTAT_LINK_THROTTLE))
5296 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5297
5298
5299 adapter->tx_timeout_factor = 1;
5300 switch (adapter->link_speed) {
5301 case SPEED_10:
5302 adapter->tx_timeout_factor = 14;
5303 break;
5304 case SPEED_100:
5305
5306 break;
5307 }
5308
5309 if (adapter->link_speed != SPEED_1000)
5310 goto no_wait;
5311
5312
5313retry_read_status:
5314 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5315 &phy_data)) {
5316 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5317 retry_count) {
5318 msleep(100);
5319 retry_count--;
5320 goto retry_read_status;
5321 } else if (!retry_count) {
5322 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5323 }
5324 } else {
5325 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5326 }
5327no_wait:
5328 netif_carrier_on(netdev);
5329
5330 igb_ping_all_vfs(adapter);
5331 igb_check_vf_rate_limit(adapter);
5332
5333
5334 if (!test_bit(__IGB_DOWN, &adapter->state))
5335 mod_timer(&adapter->phy_info_timer,
5336 round_jiffies(jiffies + 2 * HZ));
5337 }
5338 } else {
5339 if (netif_carrier_ok(netdev)) {
5340 adapter->link_speed = 0;
5341 adapter->link_duplex = 0;
5342
5343
5344 if (igb_thermal_sensor_event(hw,
5345 E1000_THSTAT_PWR_DOWN)) {
5346 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5347 }
5348
5349
5350 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5351 netdev->name);
5352 netif_carrier_off(netdev);
5353
5354 igb_ping_all_vfs(adapter);
5355
5356
5357 if (!test_bit(__IGB_DOWN, &adapter->state))
5358 mod_timer(&adapter->phy_info_timer,
5359 round_jiffies(jiffies + 2 * HZ));
5360
5361
5362 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5363 igb_check_swap_media(adapter);
5364 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5365 schedule_work(&adapter->reset_task);
5366
5367 return;
5368 }
5369 }
5370 pm_schedule_suspend(netdev->dev.parent,
5371 MSEC_PER_SEC * 5);
5372
5373
5374 } else if (!netif_carrier_ok(netdev) &&
5375 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5376 igb_check_swap_media(adapter);
5377 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5378 schedule_work(&adapter->reset_task);
5379
5380 return;
5381 }
5382 }
5383 }
5384
5385 spin_lock(&adapter->stats64_lock);
5386 igb_update_stats(adapter);
5387 spin_unlock(&adapter->stats64_lock);
5388
5389 for (i = 0; i < adapter->num_tx_queues; i++) {
5390 struct igb_ring *tx_ring = adapter->tx_ring[i];
5391 if (!netif_carrier_ok(netdev)) {
5392
5393
5394
5395
5396
5397 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5398 adapter->tx_timeout_count++;
5399 schedule_work(&adapter->reset_task);
5400
5401 return;
5402 }
5403 }
5404
5405
5406 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5407 }
5408
5409
5410 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5411 u32 eics = 0;
5412
5413 for (i = 0; i < adapter->num_q_vectors; i++)
5414 eics |= adapter->q_vector[i]->eims_value;
5415 wr32(E1000_EICS, eics);
5416 } else {
5417 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5418 }
5419
5420 igb_spoof_check(adapter);
5421 igb_ptp_rx_hang(adapter);
5422 igb_ptp_tx_hang(adapter);
5423
5424
5425 if ((adapter->hw.mac.type == e1000_i350) ||
5426 (adapter->hw.mac.type == e1000_i354))
5427 igb_check_lvmmc(adapter);
5428
5429
5430 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5431 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5432 mod_timer(&adapter->watchdog_timer,
5433 round_jiffies(jiffies + HZ));
5434 else
5435 mod_timer(&adapter->watchdog_timer,
5436 round_jiffies(jiffies + 2 * HZ));
5437 }
5438}
5439
5440enum latency_range {
5441 lowest_latency = 0,
5442 low_latency = 1,
5443 bulk_latency = 2,
5444 latency_invalid = 255
5445};
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5463{
5464 int new_val = q_vector->itr_val;
5465 int avg_wire_size = 0;
5466 struct igb_adapter *adapter = q_vector->adapter;
5467 unsigned int packets;
5468
5469
5470
5471
5472 if (adapter->link_speed != SPEED_1000) {
5473 new_val = IGB_4K_ITR;
5474 goto set_itr_val;
5475 }
5476
5477 packets = q_vector->rx.total_packets;
5478 if (packets)
5479 avg_wire_size = q_vector->rx.total_bytes / packets;
5480
5481 packets = q_vector->tx.total_packets;
5482 if (packets)
5483 avg_wire_size = max_t(u32, avg_wire_size,
5484 q_vector->tx.total_bytes / packets);
5485
5486
5487 if (!avg_wire_size)
5488 goto clear_counts;
5489
5490
5491 avg_wire_size += 24;
5492
5493
5494 avg_wire_size = min(avg_wire_size, 3000);
5495
5496
5497 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5498 new_val = avg_wire_size / 3;
5499 else
5500 new_val = avg_wire_size / 2;
5501
5502
5503 if (new_val < IGB_20K_ITR &&
5504 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5505 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5506 new_val = IGB_20K_ITR;
5507
5508set_itr_val:
5509 if (new_val != q_vector->itr_val) {
5510 q_vector->itr_val = new_val;
5511 q_vector->set_itr = 1;
5512 }
5513clear_counts:
5514 q_vector->rx.total_bytes = 0;
5515 q_vector->rx.total_packets = 0;
5516 q_vector->tx.total_bytes = 0;
5517 q_vector->tx.total_packets = 0;
5518}
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536static void igb_update_itr(struct igb_q_vector *q_vector,
5537 struct igb_ring_container *ring_container)
5538{
5539 unsigned int packets = ring_container->total_packets;
5540 unsigned int bytes = ring_container->total_bytes;
5541 u8 itrval = ring_container->itr;
5542
5543
5544 if (packets == 0)
5545 return;
5546
5547 switch (itrval) {
5548 case lowest_latency:
5549
5550 if (bytes/packets > 8000)
5551 itrval = bulk_latency;
5552 else if ((packets < 5) && (bytes > 512))
5553 itrval = low_latency;
5554 break;
5555 case low_latency:
5556 if (bytes > 10000) {
5557
5558 if (bytes/packets > 8000)
5559 itrval = bulk_latency;
5560 else if ((packets < 10) || ((bytes/packets) > 1200))
5561 itrval = bulk_latency;
5562 else if ((packets > 35))
5563 itrval = lowest_latency;
5564 } else if (bytes/packets > 2000) {
5565 itrval = bulk_latency;
5566 } else if (packets <= 2 && bytes < 512) {
5567 itrval = lowest_latency;
5568 }
5569 break;
5570 case bulk_latency:
5571 if (bytes > 25000) {
5572 if (packets > 35)
5573 itrval = low_latency;
5574 } else if (bytes < 1500) {
5575 itrval = low_latency;
5576 }
5577 break;
5578 }
5579
5580
5581 ring_container->total_bytes = 0;
5582 ring_container->total_packets = 0;
5583
5584
5585 ring_container->itr = itrval;
5586}
5587
5588static void igb_set_itr(struct igb_q_vector *q_vector)
5589{
5590 struct igb_adapter *adapter = q_vector->adapter;
5591 u32 new_itr = q_vector->itr_val;
5592 u8 current_itr = 0;
5593
5594
5595 if (adapter->link_speed != SPEED_1000) {
5596 current_itr = 0;
5597 new_itr = IGB_4K_ITR;
5598 goto set_itr_now;
5599 }
5600
5601 igb_update_itr(q_vector, &q_vector->tx);
5602 igb_update_itr(q_vector, &q_vector->rx);
5603
5604 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5605
5606
5607 if (current_itr == lowest_latency &&
5608 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5609 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5610 current_itr = low_latency;
5611
5612 switch (current_itr) {
5613
5614 case lowest_latency:
5615 new_itr = IGB_70K_ITR;
5616 break;
5617 case low_latency:
5618 new_itr = IGB_20K_ITR;
5619 break;
5620 case bulk_latency:
5621 new_itr = IGB_4K_ITR;
5622 break;
5623 default:
5624 break;
5625 }
5626
5627set_itr_now:
5628 if (new_itr != q_vector->itr_val) {
5629
5630
5631
5632
5633 new_itr = new_itr > q_vector->itr_val ?
5634 max((new_itr * q_vector->itr_val) /
5635 (new_itr + (q_vector->itr_val >> 2)),
5636 new_itr) : new_itr;
5637
5638
5639
5640
5641
5642
5643 q_vector->itr_val = new_itr;
5644 q_vector->set_itr = 1;
5645 }
5646}
5647
5648static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5649 struct igb_tx_buffer *first,
5650 u32 vlan_macip_lens, u32 type_tucmd,
5651 u32 mss_l4len_idx)
5652{
5653 struct e1000_adv_tx_context_desc *context_desc;
5654 u16 i = tx_ring->next_to_use;
5655 struct timespec64 ts;
5656
5657 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5658
5659 i++;
5660 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5661
5662
5663 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5664
5665
5666 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5667 mss_l4len_idx |= tx_ring->reg_idx << 4;
5668
5669 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5670 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5671 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5672
5673
5674
5675
5676 if (tx_ring->launchtime_enable) {
5677 ts = ns_to_timespec64(first->skb->tstamp);
5678 first->skb->tstamp = 0;
5679 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5680 } else {
5681 context_desc->seqnum_seed = 0;
5682 }
5683}
5684
5685static int igb_tso(struct igb_ring *tx_ring,
5686 struct igb_tx_buffer *first,
5687 u8 *hdr_len)
5688{
5689 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5690 struct sk_buff *skb = first->skb;
5691 union {
5692 struct iphdr *v4;
5693 struct ipv6hdr *v6;
5694 unsigned char *hdr;
5695 } ip;
5696 union {
5697 struct tcphdr *tcp;
5698 unsigned char *hdr;
5699 } l4;
5700 u32 paylen, l4_offset;
5701 int err;
5702
5703 if (skb->ip_summed != CHECKSUM_PARTIAL)
5704 return 0;
5705
5706 if (!skb_is_gso(skb))
5707 return 0;
5708
5709 err = skb_cow_head(skb, 0);
5710 if (err < 0)
5711 return err;
5712
5713 ip.hdr = skb_network_header(skb);
5714 l4.hdr = skb_checksum_start(skb);
5715
5716
5717 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5718
5719
5720 if (ip.v4->version == 4) {
5721 unsigned char *csum_start = skb_checksum_start(skb);
5722 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5723
5724
5725
5726
5727 ip.v4->check = csum_fold(csum_partial(trans_start,
5728 csum_start - trans_start,
5729 0));
5730 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5731
5732 ip.v4->tot_len = 0;
5733 first->tx_flags |= IGB_TX_FLAGS_TSO |
5734 IGB_TX_FLAGS_CSUM |
5735 IGB_TX_FLAGS_IPV4;
5736 } else {
5737 ip.v6->payload_len = 0;
5738 first->tx_flags |= IGB_TX_FLAGS_TSO |
5739 IGB_TX_FLAGS_CSUM;
5740 }
5741
5742
5743 l4_offset = l4.hdr - skb->data;
5744
5745
5746 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5747
5748
5749 paylen = skb->len - l4_offset;
5750 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
5751
5752
5753 first->gso_segs = skb_shinfo(skb)->gso_segs;
5754 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5755
5756
5757 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5758 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5759
5760
5761 vlan_macip_lens = l4.hdr - ip.hdr;
5762 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5763 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5764
5765 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5766 type_tucmd, mss_l4len_idx);
5767
5768 return 1;
5769}
5770
5771static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5772{
5773 unsigned int offset = 0;
5774
5775 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5776
5777 return offset == skb_checksum_start_offset(skb);
5778}
5779
5780static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5781{
5782 struct sk_buff *skb = first->skb;
5783 u32 vlan_macip_lens = 0;
5784 u32 type_tucmd = 0;
5785
5786 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5787csum_failed:
5788 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5789 !tx_ring->launchtime_enable)
5790 return;
5791 goto no_csum;
5792 }
5793
5794 switch (skb->csum_offset) {
5795 case offsetof(struct tcphdr, check):
5796 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5797
5798 case offsetof(struct udphdr, check):
5799 break;
5800 case offsetof(struct sctphdr, checksum):
5801
5802 if (((first->protocol == htons(ETH_P_IP)) &&
5803 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5804 ((first->protocol == htons(ETH_P_IPV6)) &&
5805 igb_ipv6_csum_is_sctp(skb))) {
5806 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5807 break;
5808 }
5809
5810 default:
5811 skb_checksum_help(skb);
5812 goto csum_failed;
5813 }
5814
5815
5816 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5817 vlan_macip_lens = skb_checksum_start_offset(skb) -
5818 skb_network_offset(skb);
5819no_csum:
5820 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5821 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5822
5823 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
5824}
5825
5826#define IGB_SET_FLAG(_input, _flag, _result) \
5827 ((_flag <= _result) ? \
5828 ((u32)(_input & _flag) * (_result / _flag)) : \
5829 ((u32)(_input & _flag) / (_flag / _result)))
5830
5831static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5832{
5833
5834 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5835 E1000_ADVTXD_DCMD_DEXT |
5836 E1000_ADVTXD_DCMD_IFCS;
5837
5838
5839 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5840 (E1000_ADVTXD_DCMD_VLE));
5841
5842
5843 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5844 (E1000_ADVTXD_DCMD_TSE));
5845
5846
5847 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5848 (E1000_ADVTXD_MAC_TSTAMP));
5849
5850
5851 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5852
5853 return cmd_type;
5854}
5855
5856static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5857 union e1000_adv_tx_desc *tx_desc,
5858 u32 tx_flags, unsigned int paylen)
5859{
5860 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5861
5862
5863 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5864 olinfo_status |= tx_ring->reg_idx << 4;
5865
5866
5867 olinfo_status |= IGB_SET_FLAG(tx_flags,
5868 IGB_TX_FLAGS_CSUM,
5869 (E1000_TXD_POPTS_TXSM << 8));
5870
5871
5872 olinfo_status |= IGB_SET_FLAG(tx_flags,
5873 IGB_TX_FLAGS_IPV4,
5874 (E1000_TXD_POPTS_IXSM << 8));
5875
5876 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5877}
5878
5879static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5880{
5881 struct net_device *netdev = tx_ring->netdev;
5882
5883 netif_stop_subqueue(netdev, tx_ring->queue_index);
5884
5885
5886
5887
5888
5889 smp_mb();
5890
5891
5892
5893
5894 if (igb_desc_unused(tx_ring) < size)
5895 return -EBUSY;
5896
5897
5898 netif_wake_subqueue(netdev, tx_ring->queue_index);
5899
5900 u64_stats_update_begin(&tx_ring->tx_syncp2);
5901 tx_ring->tx_stats.restart_queue2++;
5902 u64_stats_update_end(&tx_ring->tx_syncp2);
5903
5904 return 0;
5905}
5906
5907static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5908{
5909 if (igb_desc_unused(tx_ring) >= size)
5910 return 0;
5911 return __igb_maybe_stop_tx(tx_ring, size);
5912}
5913
5914static int igb_tx_map(struct igb_ring *tx_ring,
5915 struct igb_tx_buffer *first,
5916 const u8 hdr_len)
5917{
5918 struct sk_buff *skb = first->skb;
5919 struct igb_tx_buffer *tx_buffer;
5920 union e1000_adv_tx_desc *tx_desc;
5921 struct skb_frag_struct *frag;
5922 dma_addr_t dma;
5923 unsigned int data_len, size;
5924 u32 tx_flags = first->tx_flags;
5925 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5926 u16 i = tx_ring->next_to_use;
5927
5928 tx_desc = IGB_TX_DESC(tx_ring, i);
5929
5930 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5931
5932 size = skb_headlen(skb);
5933 data_len = skb->data_len;
5934
5935 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5936
5937 tx_buffer = first;
5938
5939 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5940 if (dma_mapping_error(tx_ring->dev, dma))
5941 goto dma_error;
5942
5943
5944 dma_unmap_len_set(tx_buffer, len, size);
5945 dma_unmap_addr_set(tx_buffer, dma, dma);
5946
5947 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5948
5949 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5950 tx_desc->read.cmd_type_len =
5951 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5952
5953 i++;
5954 tx_desc++;
5955 if (i == tx_ring->count) {
5956 tx_desc = IGB_TX_DESC(tx_ring, 0);
5957 i = 0;
5958 }
5959 tx_desc->read.olinfo_status = 0;
5960
5961 dma += IGB_MAX_DATA_PER_TXD;
5962 size -= IGB_MAX_DATA_PER_TXD;
5963
5964 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5965 }
5966
5967 if (likely(!data_len))
5968 break;
5969
5970 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5971
5972 i++;
5973 tx_desc++;
5974 if (i == tx_ring->count) {
5975 tx_desc = IGB_TX_DESC(tx_ring, 0);
5976 i = 0;
5977 }
5978 tx_desc->read.olinfo_status = 0;
5979
5980 size = skb_frag_size(frag);
5981 data_len -= size;
5982
5983 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
5984 size, DMA_TO_DEVICE);
5985
5986 tx_buffer = &tx_ring->tx_buffer_info[i];
5987 }
5988
5989
5990 cmd_type |= size | IGB_TXD_DCMD;
5991 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
5992
5993 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
5994
5995
5996 first->time_stamp = jiffies;
5997
5998 skb_tx_timestamp(skb);
5999
6000
6001
6002
6003
6004
6005
6006
6007 dma_wmb();
6008
6009
6010 first->next_to_watch = tx_desc;
6011
6012 i++;
6013 if (i == tx_ring->count)
6014 i = 0;
6015
6016 tx_ring->next_to_use = i;
6017
6018
6019 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6020
6021 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6022 writel(i, tx_ring->tail);
6023 }
6024 return 0;
6025
6026dma_error:
6027 dev_err(tx_ring->dev, "TX DMA map failed\n");
6028 tx_buffer = &tx_ring->tx_buffer_info[i];
6029
6030
6031 while (tx_buffer != first) {
6032 if (dma_unmap_len(tx_buffer, len))
6033 dma_unmap_page(tx_ring->dev,
6034 dma_unmap_addr(tx_buffer, dma),
6035 dma_unmap_len(tx_buffer, len),
6036 DMA_TO_DEVICE);
6037 dma_unmap_len_set(tx_buffer, len, 0);
6038
6039 if (i-- == 0)
6040 i += tx_ring->count;
6041 tx_buffer = &tx_ring->tx_buffer_info[i];
6042 }
6043
6044 if (dma_unmap_len(tx_buffer, len))
6045 dma_unmap_single(tx_ring->dev,
6046 dma_unmap_addr(tx_buffer, dma),
6047 dma_unmap_len(tx_buffer, len),
6048 DMA_TO_DEVICE);
6049 dma_unmap_len_set(tx_buffer, len, 0);
6050
6051 dev_kfree_skb_any(tx_buffer->skb);
6052 tx_buffer->skb = NULL;
6053
6054 tx_ring->next_to_use = i;
6055
6056 return -1;
6057}
6058
6059netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6060 struct igb_ring *tx_ring)
6061{
6062 struct igb_tx_buffer *first;
6063 int tso;
6064 u32 tx_flags = 0;
6065 unsigned short f;
6066 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6067 __be16 protocol = vlan_get_protocol(skb);
6068 u8 hdr_len = 0;
6069
6070
6071
6072
6073
6074
6075
6076 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6077 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6078
6079 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6080
6081 return NETDEV_TX_BUSY;
6082 }
6083
6084
6085 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6086 first->skb = skb;
6087 first->bytecount = skb->len;
6088 first->gso_segs = 1;
6089
6090 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6091 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6092
6093 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6094 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6095 &adapter->state)) {
6096 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6097 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6098
6099 adapter->ptp_tx_skb = skb_get(skb);
6100 adapter->ptp_tx_start = jiffies;
6101 if (adapter->hw.mac.type == e1000_82576)
6102 schedule_work(&adapter->ptp_tx_work);
6103 } else {
6104 adapter->tx_hwtstamp_skipped++;
6105 }
6106 }
6107
6108 if (skb_vlan_tag_present(skb)) {
6109 tx_flags |= IGB_TX_FLAGS_VLAN;
6110 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6111 }
6112
6113
6114 first->tx_flags = tx_flags;
6115 first->protocol = protocol;
6116
6117 tso = igb_tso(tx_ring, first, &hdr_len);
6118 if (tso < 0)
6119 goto out_drop;
6120 else if (!tso)
6121 igb_tx_csum(tx_ring, first);
6122
6123 if (igb_tx_map(tx_ring, first, hdr_len))
6124 goto cleanup_tx_tstamp;
6125
6126 return NETDEV_TX_OK;
6127
6128out_drop:
6129 dev_kfree_skb_any(first->skb);
6130 first->skb = NULL;
6131cleanup_tx_tstamp:
6132 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6133 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6134
6135 dev_kfree_skb_any(adapter->ptp_tx_skb);
6136 adapter->ptp_tx_skb = NULL;
6137 if (adapter->hw.mac.type == e1000_82576)
6138 cancel_work_sync(&adapter->ptp_tx_work);
6139 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6140 }
6141
6142 return NETDEV_TX_OK;
6143}
6144
6145static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6146 struct sk_buff *skb)
6147{
6148 unsigned int r_idx = skb->queue_mapping;
6149
6150 if (r_idx >= adapter->num_tx_queues)
6151 r_idx = r_idx % adapter->num_tx_queues;
6152
6153 return adapter->tx_ring[r_idx];
6154}
6155
6156static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6157 struct net_device *netdev)
6158{
6159 struct igb_adapter *adapter = netdev_priv(netdev);
6160
6161
6162
6163
6164 if (skb_put_padto(skb, 17))
6165 return NETDEV_TX_OK;
6166
6167 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6168}
6169
6170
6171
6172
6173
6174static void igb_tx_timeout(struct net_device *netdev)
6175{
6176 struct igb_adapter *adapter = netdev_priv(netdev);
6177 struct e1000_hw *hw = &adapter->hw;
6178
6179
6180 adapter->tx_timeout_count++;
6181
6182 if (hw->mac.type >= e1000_82580)
6183 hw->dev_spec._82575.global_device_reset = true;
6184
6185 schedule_work(&adapter->reset_task);
6186 wr32(E1000_EICS,
6187 (adapter->eims_enable_mask & ~adapter->eims_other));
6188}
6189
6190static void igb_reset_task(struct work_struct *work)
6191{
6192 struct igb_adapter *adapter;
6193 adapter = container_of(work, struct igb_adapter, reset_task);
6194
6195 igb_dump(adapter);
6196 netdev_err(adapter->netdev, "Reset adapter\n");
6197 igb_reinit_locked(adapter);
6198}
6199
6200
6201
6202
6203
6204
6205static void igb_get_stats64(struct net_device *netdev,
6206 struct rtnl_link_stats64 *stats)
6207{
6208 struct igb_adapter *adapter = netdev_priv(netdev);
6209
6210 spin_lock(&adapter->stats64_lock);
6211 igb_update_stats(adapter);
6212 memcpy(stats, &adapter->stats64, sizeof(*stats));
6213 spin_unlock(&adapter->stats64_lock);
6214}
6215
6216
6217
6218
6219
6220
6221
6222
6223static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6224{
6225 struct igb_adapter *adapter = netdev_priv(netdev);
6226 struct pci_dev *pdev = adapter->pdev;
6227 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6228
6229
6230 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6231 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6232
6233 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6234 usleep_range(1000, 2000);
6235
6236
6237 adapter->max_frame_size = max_frame;
6238
6239 if (netif_running(netdev))
6240 igb_down(adapter);
6241
6242 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
6243 netdev->mtu, new_mtu);
6244 netdev->mtu = new_mtu;
6245
6246 if (netif_running(netdev))
6247 igb_up(adapter);
6248 else
6249 igb_reset(adapter);
6250
6251 clear_bit(__IGB_RESETTING, &adapter->state);
6252
6253 return 0;
6254}
6255
6256
6257
6258
6259
6260void igb_update_stats(struct igb_adapter *adapter)
6261{
6262 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6263 struct e1000_hw *hw = &adapter->hw;
6264 struct pci_dev *pdev = adapter->pdev;
6265 u32 reg, mpc;
6266 int i;
6267 u64 bytes, packets;
6268 unsigned int start;
6269 u64 _bytes, _packets;
6270
6271
6272
6273
6274 if (adapter->link_speed == 0)
6275 return;
6276 if (pci_channel_offline(pdev))
6277 return;
6278
6279 bytes = 0;
6280 packets = 0;
6281
6282 rcu_read_lock();
6283 for (i = 0; i < adapter->num_rx_queues; i++) {
6284 struct igb_ring *ring = adapter->rx_ring[i];
6285 u32 rqdpc = rd32(E1000_RQDPC(i));
6286 if (hw->mac.type >= e1000_i210)
6287 wr32(E1000_RQDPC(i), 0);
6288
6289 if (rqdpc) {
6290 ring->rx_stats.drops += rqdpc;
6291 net_stats->rx_fifo_errors += rqdpc;
6292 }
6293
6294 do {
6295 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6296 _bytes = ring->rx_stats.bytes;
6297 _packets = ring->rx_stats.packets;
6298 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6299 bytes += _bytes;
6300 packets += _packets;
6301 }
6302
6303 net_stats->rx_bytes = bytes;
6304 net_stats->rx_packets = packets;
6305
6306 bytes = 0;
6307 packets = 0;
6308 for (i = 0; i < adapter->num_tx_queues; i++) {
6309 struct igb_ring *ring = adapter->tx_ring[i];
6310 do {
6311 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6312 _bytes = ring->tx_stats.bytes;
6313 _packets = ring->tx_stats.packets;
6314 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6315 bytes += _bytes;
6316 packets += _packets;
6317 }
6318 net_stats->tx_bytes = bytes;
6319 net_stats->tx_packets = packets;
6320 rcu_read_unlock();
6321
6322
6323 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6324 adapter->stats.gprc += rd32(E1000_GPRC);
6325 adapter->stats.gorc += rd32(E1000_GORCL);
6326 rd32(E1000_GORCH);
6327 adapter->stats.bprc += rd32(E1000_BPRC);
6328 adapter->stats.mprc += rd32(E1000_MPRC);
6329 adapter->stats.roc += rd32(E1000_ROC);
6330
6331 adapter->stats.prc64 += rd32(E1000_PRC64);
6332 adapter->stats.prc127 += rd32(E1000_PRC127);
6333 adapter->stats.prc255 += rd32(E1000_PRC255);
6334 adapter->stats.prc511 += rd32(E1000_PRC511);
6335 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6336 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6337 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6338 adapter->stats.sec += rd32(E1000_SEC);
6339
6340 mpc = rd32(E1000_MPC);
6341 adapter->stats.mpc += mpc;
6342 net_stats->rx_fifo_errors += mpc;
6343 adapter->stats.scc += rd32(E1000_SCC);
6344 adapter->stats.ecol += rd32(E1000_ECOL);
6345 adapter->stats.mcc += rd32(E1000_MCC);
6346 adapter->stats.latecol += rd32(E1000_LATECOL);
6347 adapter->stats.dc += rd32(E1000_DC);
6348 adapter->stats.rlec += rd32(E1000_RLEC);
6349 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6350 adapter->stats.xontxc += rd32(E1000_XONTXC);
6351 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6352 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6353 adapter->stats.fcruc += rd32(E1000_FCRUC);
6354 adapter->stats.gptc += rd32(E1000_GPTC);
6355 adapter->stats.gotc += rd32(E1000_GOTCL);
6356 rd32(E1000_GOTCH);
6357 adapter->stats.rnbc += rd32(E1000_RNBC);
6358 adapter->stats.ruc += rd32(E1000_RUC);
6359 adapter->stats.rfc += rd32(E1000_RFC);
6360 adapter->stats.rjc += rd32(E1000_RJC);
6361 adapter->stats.tor += rd32(E1000_TORH);
6362 adapter->stats.tot += rd32(E1000_TOTH);
6363 adapter->stats.tpr += rd32(E1000_TPR);
6364
6365 adapter->stats.ptc64 += rd32(E1000_PTC64);
6366 adapter->stats.ptc127 += rd32(E1000_PTC127);
6367 adapter->stats.ptc255 += rd32(E1000_PTC255);
6368 adapter->stats.ptc511 += rd32(E1000_PTC511);
6369 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6370 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6371
6372 adapter->stats.mptc += rd32(E1000_MPTC);
6373 adapter->stats.bptc += rd32(E1000_BPTC);
6374
6375 adapter->stats.tpt += rd32(E1000_TPT);
6376 adapter->stats.colc += rd32(E1000_COLC);
6377
6378 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6379
6380 reg = rd32(E1000_CTRL_EXT);
6381 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6382 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6383
6384
6385 if ((hw->mac.type != e1000_i210) &&
6386 (hw->mac.type != e1000_i211))
6387 adapter->stats.tncrs += rd32(E1000_TNCRS);
6388 }
6389
6390 adapter->stats.tsctc += rd32(E1000_TSCTC);
6391 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6392
6393 adapter->stats.iac += rd32(E1000_IAC);
6394 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6395 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6396 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6397 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6398 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6399 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6400 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6401 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6402
6403
6404 net_stats->multicast = adapter->stats.mprc;
6405 net_stats->collisions = adapter->stats.colc;
6406
6407
6408
6409
6410
6411
6412 net_stats->rx_errors = adapter->stats.rxerrc +
6413 adapter->stats.crcerrs + adapter->stats.algnerrc +
6414 adapter->stats.ruc + adapter->stats.roc +
6415 adapter->stats.cexterr;
6416 net_stats->rx_length_errors = adapter->stats.ruc +
6417 adapter->stats.roc;
6418 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6419 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6420 net_stats->rx_missed_errors = adapter->stats.mpc;
6421
6422
6423 net_stats->tx_errors = adapter->stats.ecol +
6424 adapter->stats.latecol;
6425 net_stats->tx_aborted_errors = adapter->stats.ecol;
6426 net_stats->tx_window_errors = adapter->stats.latecol;
6427 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6428
6429
6430
6431
6432 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6433 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6434 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6435
6436
6437 reg = rd32(E1000_MANC);
6438 if (reg & E1000_MANC_EN_BMC2OS) {
6439 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6440 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6441 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6442 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6443 }
6444}
6445
6446static void igb_tsync_interrupt(struct igb_adapter *adapter)
6447{
6448 struct e1000_hw *hw = &adapter->hw;
6449 struct ptp_clock_event event;
6450 struct timespec64 ts;
6451 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6452
6453 if (tsicr & TSINTR_SYS_WRAP) {
6454 event.type = PTP_CLOCK_PPS;
6455 if (adapter->ptp_caps.pps)
6456 ptp_clock_event(adapter->ptp_clock, &event);
6457 ack |= TSINTR_SYS_WRAP;
6458 }
6459
6460 if (tsicr & E1000_TSICR_TXTS) {
6461
6462 schedule_work(&adapter->ptp_tx_work);
6463 ack |= E1000_TSICR_TXTS;
6464 }
6465
6466 if (tsicr & TSINTR_TT0) {
6467 spin_lock(&adapter->tmreg_lock);
6468 ts = timespec64_add(adapter->perout[0].start,
6469 adapter->perout[0].period);
6470
6471 wr32(E1000_TRGTTIML0, ts.tv_nsec);
6472 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6473 tsauxc = rd32(E1000_TSAUXC);
6474 tsauxc |= TSAUXC_EN_TT0;
6475 wr32(E1000_TSAUXC, tsauxc);
6476 adapter->perout[0].start = ts;
6477 spin_unlock(&adapter->tmreg_lock);
6478 ack |= TSINTR_TT0;
6479 }
6480
6481 if (tsicr & TSINTR_TT1) {
6482 spin_lock(&adapter->tmreg_lock);
6483 ts = timespec64_add(adapter->perout[1].start,
6484 adapter->perout[1].period);
6485 wr32(E1000_TRGTTIML1, ts.tv_nsec);
6486 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6487 tsauxc = rd32(E1000_TSAUXC);
6488 tsauxc |= TSAUXC_EN_TT1;
6489 wr32(E1000_TSAUXC, tsauxc);
6490 adapter->perout[1].start = ts;
6491 spin_unlock(&adapter->tmreg_lock);
6492 ack |= TSINTR_TT1;
6493 }
6494
6495 if (tsicr & TSINTR_AUTT0) {
6496 nsec = rd32(E1000_AUXSTMPL0);
6497 sec = rd32(E1000_AUXSTMPH0);
6498 event.type = PTP_CLOCK_EXTTS;
6499 event.index = 0;
6500 event.timestamp = sec * 1000000000ULL + nsec;
6501 ptp_clock_event(adapter->ptp_clock, &event);
6502 ack |= TSINTR_AUTT0;
6503 }
6504
6505 if (tsicr & TSINTR_AUTT1) {
6506 nsec = rd32(E1000_AUXSTMPL1);
6507 sec = rd32(E1000_AUXSTMPH1);
6508 event.type = PTP_CLOCK_EXTTS;
6509 event.index = 1;
6510 event.timestamp = sec * 1000000000ULL + nsec;
6511 ptp_clock_event(adapter->ptp_clock, &event);
6512 ack |= TSINTR_AUTT1;
6513 }
6514
6515
6516 wr32(E1000_TSICR, ack);
6517}
6518
6519static irqreturn_t igb_msix_other(int irq, void *data)
6520{
6521 struct igb_adapter *adapter = data;
6522 struct e1000_hw *hw = &adapter->hw;
6523 u32 icr = rd32(E1000_ICR);
6524
6525
6526 if (icr & E1000_ICR_DRSTA)
6527 schedule_work(&adapter->reset_task);
6528
6529 if (icr & E1000_ICR_DOUTSYNC) {
6530
6531 adapter->stats.doosync++;
6532
6533
6534
6535
6536 igb_check_wvbr(adapter);
6537 }
6538
6539
6540 if (icr & E1000_ICR_VMMB)
6541 igb_msg_task(adapter);
6542
6543 if (icr & E1000_ICR_LSC) {
6544 hw->mac.get_link_status = 1;
6545
6546 if (!test_bit(__IGB_DOWN, &adapter->state))
6547 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6548 }
6549
6550 if (icr & E1000_ICR_TS)
6551 igb_tsync_interrupt(adapter);
6552
6553 wr32(E1000_EIMS, adapter->eims_other);
6554
6555 return IRQ_HANDLED;
6556}
6557
6558static void igb_write_itr(struct igb_q_vector *q_vector)
6559{
6560 struct igb_adapter *adapter = q_vector->adapter;
6561 u32 itr_val = q_vector->itr_val & 0x7FFC;
6562
6563 if (!q_vector->set_itr)
6564 return;
6565
6566 if (!itr_val)
6567 itr_val = 0x4;
6568
6569 if (adapter->hw.mac.type == e1000_82575)
6570 itr_val |= itr_val << 16;
6571 else
6572 itr_val |= E1000_EITR_CNT_IGNR;
6573
6574 writel(itr_val, q_vector->itr_register);
6575 q_vector->set_itr = 0;
6576}
6577
6578static irqreturn_t igb_msix_ring(int irq, void *data)
6579{
6580 struct igb_q_vector *q_vector = data;
6581
6582
6583 igb_write_itr(q_vector);
6584
6585 napi_schedule(&q_vector->napi);
6586
6587 return IRQ_HANDLED;
6588}
6589
6590#ifdef CONFIG_IGB_DCA
6591static void igb_update_tx_dca(struct igb_adapter *adapter,
6592 struct igb_ring *tx_ring,
6593 int cpu)
6594{
6595 struct e1000_hw *hw = &adapter->hw;
6596 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6597
6598 if (hw->mac.type != e1000_82575)
6599 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6600
6601
6602
6603
6604
6605 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6606 E1000_DCA_TXCTRL_DATA_RRO_EN |
6607 E1000_DCA_TXCTRL_DESC_DCA_EN;
6608
6609 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6610}
6611
6612static void igb_update_rx_dca(struct igb_adapter *adapter,
6613 struct igb_ring *rx_ring,
6614 int cpu)
6615{
6616 struct e1000_hw *hw = &adapter->hw;
6617 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6618
6619 if (hw->mac.type != e1000_82575)
6620 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6621
6622
6623
6624
6625
6626 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6627 E1000_DCA_RXCTRL_DESC_DCA_EN;
6628
6629 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6630}
6631
6632static void igb_update_dca(struct igb_q_vector *q_vector)
6633{
6634 struct igb_adapter *adapter = q_vector->adapter;
6635 int cpu = get_cpu();
6636
6637 if (q_vector->cpu == cpu)
6638 goto out_no_update;
6639
6640 if (q_vector->tx.ring)
6641 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6642
6643 if (q_vector->rx.ring)
6644 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6645
6646 q_vector->cpu = cpu;
6647out_no_update:
6648 put_cpu();
6649}
6650
6651static void igb_setup_dca(struct igb_adapter *adapter)
6652{
6653 struct e1000_hw *hw = &adapter->hw;
6654 int i;
6655
6656 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6657 return;
6658
6659
6660 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6661
6662 for (i = 0; i < adapter->num_q_vectors; i++) {
6663 adapter->q_vector[i]->cpu = -1;
6664 igb_update_dca(adapter->q_vector[i]);
6665 }
6666}
6667
6668static int __igb_notify_dca(struct device *dev, void *data)
6669{
6670 struct net_device *netdev = dev_get_drvdata(dev);
6671 struct igb_adapter *adapter = netdev_priv(netdev);
6672 struct pci_dev *pdev = adapter->pdev;
6673 struct e1000_hw *hw = &adapter->hw;
6674 unsigned long event = *(unsigned long *)data;
6675
6676 switch (event) {
6677 case DCA_PROVIDER_ADD:
6678
6679 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6680 break;
6681 if (dca_add_requester(dev) == 0) {
6682 adapter->flags |= IGB_FLAG_DCA_ENABLED;
6683 dev_info(&pdev->dev, "DCA enabled\n");
6684 igb_setup_dca(adapter);
6685 break;
6686 }
6687
6688 case DCA_PROVIDER_REMOVE:
6689 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6690
6691
6692
6693 dca_remove_requester(dev);
6694 dev_info(&pdev->dev, "DCA disabled\n");
6695 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6696 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6697 }
6698 break;
6699 }
6700
6701 return 0;
6702}
6703
6704static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6705 void *p)
6706{
6707 int ret_val;
6708
6709 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6710 __igb_notify_dca);
6711
6712 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6713}
6714#endif
6715
6716#ifdef CONFIG_PCI_IOV
6717static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6718{
6719 unsigned char mac_addr[ETH_ALEN];
6720
6721 eth_zero_addr(mac_addr);
6722 igb_set_vf_mac(adapter, vf, mac_addr);
6723
6724
6725 adapter->vf_data[vf].spoofchk_enabled = true;
6726
6727
6728 adapter->vf_data[vf].trusted = false;
6729
6730 return 0;
6731}
6732
6733#endif
6734static void igb_ping_all_vfs(struct igb_adapter *adapter)
6735{
6736 struct e1000_hw *hw = &adapter->hw;
6737 u32 ping;
6738 int i;
6739
6740 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6741 ping = E1000_PF_CONTROL_MSG;
6742 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6743 ping |= E1000_VT_MSGTYPE_CTS;
6744 igb_write_mbx(hw, &ping, 1, i);
6745 }
6746}
6747
6748static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6749{
6750 struct e1000_hw *hw = &adapter->hw;
6751 u32 vmolr = rd32(E1000_VMOLR(vf));
6752 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6753
6754 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6755 IGB_VF_FLAG_MULTI_PROMISC);
6756 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6757
6758 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6759 vmolr |= E1000_VMOLR_MPME;
6760 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6761 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6762 } else {
6763
6764
6765
6766
6767 if (vf_data->num_vf_mc_hashes > 30) {
6768 vmolr |= E1000_VMOLR_MPME;
6769 } else if (vf_data->num_vf_mc_hashes) {
6770 int j;
6771
6772 vmolr |= E1000_VMOLR_ROMPE;
6773 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6774 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6775 }
6776 }
6777
6778 wr32(E1000_VMOLR(vf), vmolr);
6779
6780
6781 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6782 return -EINVAL;
6783
6784 return 0;
6785}
6786
6787static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6788 u32 *msgbuf, u32 vf)
6789{
6790 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6791 u16 *hash_list = (u16 *)&msgbuf[1];
6792 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6793 int i;
6794
6795
6796
6797
6798
6799 vf_data->num_vf_mc_hashes = n;
6800
6801
6802 if (n > 30)
6803 n = 30;
6804
6805
6806 for (i = 0; i < n; i++)
6807 vf_data->vf_mc_hashes[i] = hash_list[i];
6808
6809
6810 igb_set_rx_mode(adapter->netdev);
6811
6812 return 0;
6813}
6814
6815static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6816{
6817 struct e1000_hw *hw = &adapter->hw;
6818 struct vf_data_storage *vf_data;
6819 int i, j;
6820
6821 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6822 u32 vmolr = rd32(E1000_VMOLR(i));
6823
6824 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6825
6826 vf_data = &adapter->vf_data[i];
6827
6828 if ((vf_data->num_vf_mc_hashes > 30) ||
6829 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6830 vmolr |= E1000_VMOLR_MPME;
6831 } else if (vf_data->num_vf_mc_hashes) {
6832 vmolr |= E1000_VMOLR_ROMPE;
6833 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6834 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6835 }
6836 wr32(E1000_VMOLR(i), vmolr);
6837 }
6838}
6839
6840static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6841{
6842 struct e1000_hw *hw = &adapter->hw;
6843 u32 pool_mask, vlvf_mask, i;
6844
6845
6846 pool_mask = E1000_VLVF_POOLSEL_MASK;
6847 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6848
6849
6850 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6851 adapter->vfs_allocated_count);
6852
6853
6854 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6855 u32 vlvf = rd32(E1000_VLVF(i));
6856 u32 vfta_mask, vid, vfta;
6857
6858
6859 if (!(vlvf & vlvf_mask))
6860 continue;
6861
6862
6863 vlvf ^= vlvf_mask;
6864
6865
6866 if (vlvf & pool_mask)
6867 goto update_vlvfb;
6868
6869
6870 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6871 goto update_vlvf;
6872
6873 vid = vlvf & E1000_VLVF_VLANID_MASK;
6874 vfta_mask = BIT(vid % 32);
6875
6876
6877 vfta = adapter->shadow_vfta[vid / 32];
6878 if (vfta & vfta_mask)
6879 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6880update_vlvf:
6881
6882 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6883 vlvf &= E1000_VLVF_POOLSEL_MASK;
6884 else
6885 vlvf = 0;
6886update_vlvfb:
6887
6888 wr32(E1000_VLVF(i), vlvf);
6889 }
6890}
6891
6892static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6893{
6894 u32 vlvf;
6895 int idx;
6896
6897
6898 if (vlan == 0)
6899 return 0;
6900
6901
6902 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6903 vlvf = rd32(E1000_VLVF(idx));
6904 if ((vlvf & VLAN_VID_MASK) == vlan)
6905 break;
6906 }
6907
6908 return idx;
6909}
6910
6911static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6912{
6913 struct e1000_hw *hw = &adapter->hw;
6914 u32 bits, pf_id;
6915 int idx;
6916
6917 idx = igb_find_vlvf_entry(hw, vid);
6918 if (!idx)
6919 return;
6920
6921
6922
6923
6924 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6925 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6926 bits &= rd32(E1000_VLVF(idx));
6927
6928
6929 if (!bits) {
6930 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6931 wr32(E1000_VLVF(idx), BIT(pf_id));
6932 else
6933 wr32(E1000_VLVF(idx), 0);
6934 }
6935}
6936
6937static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6938 bool add, u32 vf)
6939{
6940 int pf_id = adapter->vfs_allocated_count;
6941 struct e1000_hw *hw = &adapter->hw;
6942 int err;
6943
6944
6945
6946
6947
6948
6949 if (add && test_bit(vid, adapter->active_vlans)) {
6950 err = igb_vfta_set(hw, vid, pf_id, true, false);
6951 if (err)
6952 return err;
6953 }
6954
6955 err = igb_vfta_set(hw, vid, vf, add, false);
6956
6957 if (add && !err)
6958 return err;
6959
6960
6961
6962
6963
6964 if (test_bit(vid, adapter->active_vlans) ||
6965 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6966 igb_update_pf_vlvf(adapter, vid);
6967
6968 return err;
6969}
6970
6971static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6972{
6973 struct e1000_hw *hw = &adapter->hw;
6974
6975 if (vid)
6976 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6977 else
6978 wr32(E1000_VMVIR(vf), 0);
6979}
6980
6981static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6982 u16 vlan, u8 qos)
6983{
6984 int err;
6985
6986 err = igb_set_vf_vlan(adapter, vlan, true, vf);
6987 if (err)
6988 return err;
6989
6990 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
6991 igb_set_vmolr(adapter, vf, !vlan);
6992
6993
6994 if (vlan != adapter->vf_data[vf].pf_vlan)
6995 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6996 false, vf);
6997
6998 adapter->vf_data[vf].pf_vlan = vlan;
6999 adapter->vf_data[vf].pf_qos = qos;
7000 igb_set_vf_vlan_strip(adapter, vf, true);
7001 dev_info(&adapter->pdev->dev,
7002 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7003 if (test_bit(__IGB_DOWN, &adapter->state)) {
7004 dev_warn(&adapter->pdev->dev,
7005 "The VF VLAN has been set, but the PF device is not up.\n");
7006 dev_warn(&adapter->pdev->dev,
7007 "Bring the PF device up before attempting to use the VF device.\n");
7008 }
7009
7010 return err;
7011}
7012
7013static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7014{
7015
7016 igb_set_vf_vlan(adapter, 0, true, vf);
7017
7018 igb_set_vmvir(adapter, 0, vf);
7019 igb_set_vmolr(adapter, vf, true);
7020
7021
7022 if (adapter->vf_data[vf].pf_vlan)
7023 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7024 false, vf);
7025
7026 adapter->vf_data[vf].pf_vlan = 0;
7027 adapter->vf_data[vf].pf_qos = 0;
7028 igb_set_vf_vlan_strip(adapter, vf, false);
7029
7030 return 0;
7031}
7032
7033static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7034 u16 vlan, u8 qos, __be16 vlan_proto)
7035{
7036 struct igb_adapter *adapter = netdev_priv(netdev);
7037
7038 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7039 return -EINVAL;
7040
7041 if (vlan_proto != htons(ETH_P_8021Q))
7042 return -EPROTONOSUPPORT;
7043
7044 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7045 igb_disable_port_vlan(adapter, vf);
7046}
7047
7048static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7049{
7050 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7051 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7052 int ret;
7053
7054 if (adapter->vf_data[vf].pf_vlan)
7055 return -1;
7056
7057
7058 if (!vid && !add)
7059 return 0;
7060
7061 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7062 if (!ret)
7063 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7064 return ret;
7065}
7066
7067static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7068{
7069 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7070
7071
7072 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7073 vf_data->last_nack = jiffies;
7074
7075
7076 igb_clear_vf_vfta(adapter, vf);
7077 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7078 igb_set_vmvir(adapter, vf_data->pf_vlan |
7079 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7080 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7081 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7082
7083
7084 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7085
7086
7087 igb_set_rx_mode(adapter->netdev);
7088}
7089
7090static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7091{
7092 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7093
7094
7095 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7096 eth_zero_addr(vf_mac);
7097
7098
7099 igb_vf_reset(adapter, vf);
7100}
7101
7102static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7103{
7104 struct e1000_hw *hw = &adapter->hw;
7105 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7106 u32 reg, msgbuf[3];
7107 u8 *addr = (u8 *)(&msgbuf[1]);
7108
7109
7110 igb_vf_reset(adapter, vf);
7111
7112
7113 igb_set_vf_mac(adapter, vf, vf_mac);
7114
7115
7116 reg = rd32(E1000_VFTE);
7117 wr32(E1000_VFTE, reg | BIT(vf));
7118 reg = rd32(E1000_VFRE);
7119 wr32(E1000_VFRE, reg | BIT(vf));
7120
7121 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7122
7123
7124 if (!is_zero_ether_addr(vf_mac)) {
7125 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7126 memcpy(addr, vf_mac, ETH_ALEN);
7127 } else {
7128 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7129 }
7130 igb_write_mbx(hw, msgbuf, 3, vf);
7131}
7132
7133static void igb_flush_mac_table(struct igb_adapter *adapter)
7134{
7135 struct e1000_hw *hw = &adapter->hw;
7136 int i;
7137
7138 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7139 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7140 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7141 adapter->mac_table[i].queue = 0;
7142 igb_rar_set_index(adapter, i);
7143 }
7144}
7145
7146static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7147{
7148 struct e1000_hw *hw = &adapter->hw;
7149
7150 int rar_entries = hw->mac.rar_entry_count -
7151 adapter->vfs_allocated_count;
7152 int i, count = 0;
7153
7154 for (i = 0; i < rar_entries; i++) {
7155
7156 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7157 continue;
7158
7159
7160 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7161 (adapter->mac_table[i].queue != queue))
7162 continue;
7163
7164 count++;
7165 }
7166
7167 return count;
7168}
7169
7170
7171static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7172{
7173 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7174
7175 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7176 mac_table->queue = adapter->vfs_allocated_count;
7177 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7178
7179 igb_rar_set_index(adapter, 0);
7180}
7181
7182
7183
7184
7185
7186
7187static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7188 const u8 *addr, const u8 flags)
7189{
7190 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7191 return true;
7192
7193 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7194 (flags & IGB_MAC_STATE_SRC_ADDR))
7195 return false;
7196
7197 if (!ether_addr_equal(addr, entry->addr))
7198 return false;
7199
7200 return true;
7201}
7202
7203
7204
7205
7206
7207
7208static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7209 const u8 *addr, const u8 queue,
7210 const u8 flags)
7211{
7212 struct e1000_hw *hw = &adapter->hw;
7213 int rar_entries = hw->mac.rar_entry_count -
7214 adapter->vfs_allocated_count;
7215 int i;
7216
7217 if (is_zero_ether_addr(addr))
7218 return -EINVAL;
7219
7220
7221
7222
7223
7224 for (i = 0; i < rar_entries; i++) {
7225 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7226 addr, flags))
7227 continue;
7228
7229 ether_addr_copy(adapter->mac_table[i].addr, addr);
7230 adapter->mac_table[i].queue = queue;
7231 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7232
7233 igb_rar_set_index(adapter, i);
7234 return i;
7235 }
7236
7237 return -ENOSPC;
7238}
7239
7240static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7241 const u8 queue)
7242{
7243 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7244}
7245
7246
7247
7248
7249
7250
7251
7252static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7253 const u8 *addr, const u8 queue,
7254 const u8 flags)
7255{
7256 struct e1000_hw *hw = &adapter->hw;
7257 int rar_entries = hw->mac.rar_entry_count -
7258 adapter->vfs_allocated_count;
7259 int i;
7260
7261 if (is_zero_ether_addr(addr))
7262 return -EINVAL;
7263
7264
7265
7266
7267
7268 for (i = 0; i < rar_entries; i++) {
7269 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7270 continue;
7271 if ((adapter->mac_table[i].state & flags) != flags)
7272 continue;
7273 if (adapter->mac_table[i].queue != queue)
7274 continue;
7275 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7276 continue;
7277
7278
7279
7280
7281 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7282 adapter->mac_table[i].state =
7283 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7284 adapter->mac_table[i].queue =
7285 adapter->vfs_allocated_count;
7286 } else {
7287 adapter->mac_table[i].state = 0;
7288 adapter->mac_table[i].queue = 0;
7289 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7290 }
7291
7292 igb_rar_set_index(adapter, i);
7293 return 0;
7294 }
7295
7296 return -ENOENT;
7297}
7298
7299static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7300 const u8 queue)
7301{
7302 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7303}
7304
7305int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7306 const u8 *addr, u8 queue, u8 flags)
7307{
7308 struct e1000_hw *hw = &adapter->hw;
7309
7310
7311
7312
7313 if (hw->mac.type != e1000_i210)
7314 return -EOPNOTSUPP;
7315
7316 return igb_add_mac_filter_flags(adapter, addr, queue,
7317 IGB_MAC_STATE_QUEUE_STEERING | flags);
7318}
7319
7320int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7321 const u8 *addr, u8 queue, u8 flags)
7322{
7323 return igb_del_mac_filter_flags(adapter, addr, queue,
7324 IGB_MAC_STATE_QUEUE_STEERING | flags);
7325}
7326
7327static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7328{
7329 struct igb_adapter *adapter = netdev_priv(netdev);
7330 int ret;
7331
7332 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7333
7334 return min_t(int, ret, 0);
7335}
7336
7337static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7338{
7339 struct igb_adapter *adapter = netdev_priv(netdev);
7340
7341 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7342
7343 return 0;
7344}
7345
7346static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7347 const u32 info, const u8 *addr)
7348{
7349 struct pci_dev *pdev = adapter->pdev;
7350 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7351 struct list_head *pos;
7352 struct vf_mac_filter *entry = NULL;
7353 int ret = 0;
7354
7355 switch (info) {
7356 case E1000_VF_MAC_FILTER_CLR:
7357
7358 list_for_each(pos, &adapter->vf_macs.l) {
7359 entry = list_entry(pos, struct vf_mac_filter, l);
7360 if (entry->vf == vf) {
7361 entry->vf = -1;
7362 entry->free = true;
7363 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7364 }
7365 }
7366 break;
7367 case E1000_VF_MAC_FILTER_ADD:
7368 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7369 !vf_data->trusted) {
7370 dev_warn(&pdev->dev,
7371 "VF %d requested MAC filter but is administratively denied\n",
7372 vf);
7373 return -EINVAL;
7374 }
7375 if (!is_valid_ether_addr(addr)) {
7376 dev_warn(&pdev->dev,
7377 "VF %d attempted to set invalid MAC filter\n",
7378 vf);
7379 return -EINVAL;
7380 }
7381
7382
7383 list_for_each(pos, &adapter->vf_macs.l) {
7384 entry = list_entry(pos, struct vf_mac_filter, l);
7385 if (entry->free)
7386 break;
7387 }
7388
7389 if (entry && entry->free) {
7390 entry->free = false;
7391 entry->vf = vf;
7392 ether_addr_copy(entry->vf_mac, addr);
7393
7394 ret = igb_add_mac_filter(adapter, addr, vf);
7395 ret = min_t(int, ret, 0);
7396 } else {
7397 ret = -ENOSPC;
7398 }
7399
7400 if (ret == -ENOSPC)
7401 dev_warn(&pdev->dev,
7402 "VF %d has requested MAC filter but there is no space for it\n",
7403 vf);
7404 break;
7405 default:
7406 ret = -EINVAL;
7407 break;
7408 }
7409
7410 return ret;
7411}
7412
7413static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7414{
7415 struct pci_dev *pdev = adapter->pdev;
7416 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7417 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7418
7419
7420
7421
7422 unsigned char *addr = (unsigned char *)&msg[1];
7423 int ret = 0;
7424
7425 if (!info) {
7426 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7427 !vf_data->trusted) {
7428 dev_warn(&pdev->dev,
7429 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7430 vf);
7431 return -EINVAL;
7432 }
7433
7434 if (!is_valid_ether_addr(addr)) {
7435 dev_warn(&pdev->dev,
7436 "VF %d attempted to set invalid MAC\n",
7437 vf);
7438 return -EINVAL;
7439 }
7440
7441 ret = igb_set_vf_mac(adapter, vf, addr);
7442 } else {
7443 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7444 }
7445
7446 return ret;
7447}
7448
7449static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7450{
7451 struct e1000_hw *hw = &adapter->hw;
7452 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7453 u32 msg = E1000_VT_MSGTYPE_NACK;
7454
7455
7456 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7457 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7458 igb_write_mbx(hw, &msg, 1, vf);
7459 vf_data->last_nack = jiffies;
7460 }
7461}
7462
7463static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7464{
7465 struct pci_dev *pdev = adapter->pdev;
7466 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7467 struct e1000_hw *hw = &adapter->hw;
7468 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7469 s32 retval;
7470
7471 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7472
7473 if (retval) {
7474
7475 dev_err(&pdev->dev, "Error receiving message from VF\n");
7476 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7477 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7478 goto unlock;
7479 goto out;
7480 }
7481
7482
7483 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7484 goto unlock;
7485
7486
7487
7488
7489 if (msgbuf[0] == E1000_VF_RESET) {
7490
7491 igb_vf_reset_msg(adapter, vf);
7492 return;
7493 }
7494
7495 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7496 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7497 goto unlock;
7498 retval = -1;
7499 goto out;
7500 }
7501
7502 switch ((msgbuf[0] & 0xFFFF)) {
7503 case E1000_VF_SET_MAC_ADDR:
7504 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7505 break;
7506 case E1000_VF_SET_PROMISC:
7507 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7508 break;
7509 case E1000_VF_SET_MULTICAST:
7510 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7511 break;
7512 case E1000_VF_SET_LPE:
7513 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7514 break;
7515 case E1000_VF_SET_VLAN:
7516 retval = -1;
7517 if (vf_data->pf_vlan)
7518 dev_warn(&pdev->dev,
7519 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7520 vf);
7521 else
7522 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7523 break;
7524 default:
7525 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7526 retval = -1;
7527 break;
7528 }
7529
7530 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7531out:
7532
7533 if (retval)
7534 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7535 else
7536 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7537
7538
7539 igb_write_mbx(hw, msgbuf, 1, vf);
7540 return;
7541
7542unlock:
7543 igb_unlock_mbx(hw, vf);
7544}
7545
7546static void igb_msg_task(struct igb_adapter *adapter)
7547{
7548 struct e1000_hw *hw = &adapter->hw;
7549 u32 vf;
7550
7551 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7552
7553 if (!igb_check_for_rst(hw, vf))
7554 igb_vf_reset_event(adapter, vf);
7555
7556
7557 if (!igb_check_for_msg(hw, vf))
7558 igb_rcv_msg_from_vf(adapter, vf);
7559
7560
7561 if (!igb_check_for_ack(hw, vf))
7562 igb_rcv_ack_from_vf(adapter, vf);
7563 }
7564}
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577static void igb_set_uta(struct igb_adapter *adapter, bool set)
7578{
7579 struct e1000_hw *hw = &adapter->hw;
7580 u32 uta = set ? ~0 : 0;
7581 int i;
7582
7583
7584 if (!adapter->vfs_allocated_count)
7585 return;
7586
7587 for (i = hw->mac.uta_reg_count; i--;)
7588 array_wr32(E1000_UTA, i, uta);
7589}
7590
7591
7592
7593
7594
7595
7596static irqreturn_t igb_intr_msi(int irq, void *data)
7597{
7598 struct igb_adapter *adapter = data;
7599 struct igb_q_vector *q_vector = adapter->q_vector[0];
7600 struct e1000_hw *hw = &adapter->hw;
7601
7602 u32 icr = rd32(E1000_ICR);
7603
7604 igb_write_itr(q_vector);
7605
7606 if (icr & E1000_ICR_DRSTA)
7607 schedule_work(&adapter->reset_task);
7608
7609 if (icr & E1000_ICR_DOUTSYNC) {
7610
7611 adapter->stats.doosync++;
7612 }
7613
7614 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7615 hw->mac.get_link_status = 1;
7616 if (!test_bit(__IGB_DOWN, &adapter->state))
7617 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7618 }
7619
7620 if (icr & E1000_ICR_TS)
7621 igb_tsync_interrupt(adapter);
7622
7623 napi_schedule(&q_vector->napi);
7624
7625 return IRQ_HANDLED;
7626}
7627
7628
7629
7630
7631
7632
7633static irqreturn_t igb_intr(int irq, void *data)
7634{
7635 struct igb_adapter *adapter = data;
7636 struct igb_q_vector *q_vector = adapter->q_vector[0];
7637 struct e1000_hw *hw = &adapter->hw;
7638
7639
7640
7641 u32 icr = rd32(E1000_ICR);
7642
7643
7644
7645
7646 if (!(icr & E1000_ICR_INT_ASSERTED))
7647 return IRQ_NONE;
7648
7649 igb_write_itr(q_vector);
7650
7651 if (icr & E1000_ICR_DRSTA)
7652 schedule_work(&adapter->reset_task);
7653
7654 if (icr & E1000_ICR_DOUTSYNC) {
7655
7656 adapter->stats.doosync++;
7657 }
7658
7659 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7660 hw->mac.get_link_status = 1;
7661
7662 if (!test_bit(__IGB_DOWN, &adapter->state))
7663 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7664 }
7665
7666 if (icr & E1000_ICR_TS)
7667 igb_tsync_interrupt(adapter);
7668
7669 napi_schedule(&q_vector->napi);
7670
7671 return IRQ_HANDLED;
7672}
7673
7674static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7675{
7676 struct igb_adapter *adapter = q_vector->adapter;
7677 struct e1000_hw *hw = &adapter->hw;
7678
7679 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7680 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7681 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7682 igb_set_itr(q_vector);
7683 else
7684 igb_update_ring_itr(q_vector);
7685 }
7686
7687 if (!test_bit(__IGB_DOWN, &adapter->state)) {
7688 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7689 wr32(E1000_EIMS, q_vector->eims_value);
7690 else
7691 igb_irq_enable(adapter);
7692 }
7693}
7694
7695
7696
7697
7698
7699
7700static int igb_poll(struct napi_struct *napi, int budget)
7701{
7702 struct igb_q_vector *q_vector = container_of(napi,
7703 struct igb_q_vector,
7704 napi);
7705 bool clean_complete = true;
7706 int work_done = 0;
7707
7708#ifdef CONFIG_IGB_DCA
7709 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7710 igb_update_dca(q_vector);
7711#endif
7712 if (q_vector->tx.ring)
7713 clean_complete = igb_clean_tx_irq(q_vector, budget);
7714
7715 if (q_vector->rx.ring) {
7716 int cleaned = igb_clean_rx_irq(q_vector, budget);
7717
7718 work_done += cleaned;
7719 if (cleaned >= budget)
7720 clean_complete = false;
7721 }
7722
7723
7724 if (!clean_complete)
7725 return budget;
7726
7727
7728
7729
7730 if (likely(napi_complete_done(napi, work_done)))
7731 igb_ring_irq_enable(q_vector);
7732
7733 return min(work_done, budget - 1);
7734}
7735
7736
7737
7738
7739
7740
7741
7742
7743static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7744{
7745 struct igb_adapter *adapter = q_vector->adapter;
7746 struct igb_ring *tx_ring = q_vector->tx.ring;
7747 struct igb_tx_buffer *tx_buffer;
7748 union e1000_adv_tx_desc *tx_desc;
7749 unsigned int total_bytes = 0, total_packets = 0;
7750 unsigned int budget = q_vector->tx.work_limit;
7751 unsigned int i = tx_ring->next_to_clean;
7752
7753 if (test_bit(__IGB_DOWN, &adapter->state))
7754 return true;
7755
7756 tx_buffer = &tx_ring->tx_buffer_info[i];
7757 tx_desc = IGB_TX_DESC(tx_ring, i);
7758 i -= tx_ring->count;
7759
7760 do {
7761 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7762
7763
7764 if (!eop_desc)
7765 break;
7766
7767
7768 smp_rmb();
7769
7770
7771 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7772 break;
7773
7774
7775 tx_buffer->next_to_watch = NULL;
7776
7777
7778 total_bytes += tx_buffer->bytecount;
7779 total_packets += tx_buffer->gso_segs;
7780
7781
7782 napi_consume_skb(tx_buffer->skb, napi_budget);
7783
7784
7785 dma_unmap_single(tx_ring->dev,
7786 dma_unmap_addr(tx_buffer, dma),
7787 dma_unmap_len(tx_buffer, len),
7788 DMA_TO_DEVICE);
7789
7790
7791 dma_unmap_len_set(tx_buffer, len, 0);
7792
7793
7794 while (tx_desc != eop_desc) {
7795 tx_buffer++;
7796 tx_desc++;
7797 i++;
7798 if (unlikely(!i)) {
7799 i -= tx_ring->count;
7800 tx_buffer = tx_ring->tx_buffer_info;
7801 tx_desc = IGB_TX_DESC(tx_ring, 0);
7802 }
7803
7804
7805 if (dma_unmap_len(tx_buffer, len)) {
7806 dma_unmap_page(tx_ring->dev,
7807 dma_unmap_addr(tx_buffer, dma),
7808 dma_unmap_len(tx_buffer, len),
7809 DMA_TO_DEVICE);
7810 dma_unmap_len_set(tx_buffer, len, 0);
7811 }
7812 }
7813
7814
7815 tx_buffer++;
7816 tx_desc++;
7817 i++;
7818 if (unlikely(!i)) {
7819 i -= tx_ring->count;
7820 tx_buffer = tx_ring->tx_buffer_info;
7821 tx_desc = IGB_TX_DESC(tx_ring, 0);
7822 }
7823
7824
7825 prefetch(tx_desc);
7826
7827
7828 budget--;
7829 } while (likely(budget));
7830
7831 netdev_tx_completed_queue(txring_txq(tx_ring),
7832 total_packets, total_bytes);
7833 i += tx_ring->count;
7834 tx_ring->next_to_clean = i;
7835 u64_stats_update_begin(&tx_ring->tx_syncp);
7836 tx_ring->tx_stats.bytes += total_bytes;
7837 tx_ring->tx_stats.packets += total_packets;
7838 u64_stats_update_end(&tx_ring->tx_syncp);
7839 q_vector->tx.total_bytes += total_bytes;
7840 q_vector->tx.total_packets += total_packets;
7841
7842 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7843 struct e1000_hw *hw = &adapter->hw;
7844
7845
7846
7847
7848 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7849 if (tx_buffer->next_to_watch &&
7850 time_after(jiffies, tx_buffer->time_stamp +
7851 (adapter->tx_timeout_factor * HZ)) &&
7852 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7853
7854
7855 dev_err(tx_ring->dev,
7856 "Detected Tx Unit Hang\n"
7857 " Tx Queue <%d>\n"
7858 " TDH <%x>\n"
7859 " TDT <%x>\n"
7860 " next_to_use <%x>\n"
7861 " next_to_clean <%x>\n"
7862 "buffer_info[next_to_clean]\n"
7863 " time_stamp <%lx>\n"
7864 " next_to_watch <%p>\n"
7865 " jiffies <%lx>\n"
7866 " desc.status <%x>\n",
7867 tx_ring->queue_index,
7868 rd32(E1000_TDH(tx_ring->reg_idx)),
7869 readl(tx_ring->tail),
7870 tx_ring->next_to_use,
7871 tx_ring->next_to_clean,
7872 tx_buffer->time_stamp,
7873 tx_buffer->next_to_watch,
7874 jiffies,
7875 tx_buffer->next_to_watch->wb.status);
7876 netif_stop_subqueue(tx_ring->netdev,
7877 tx_ring->queue_index);
7878
7879
7880 return true;
7881 }
7882 }
7883
7884#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7885 if (unlikely(total_packets &&
7886 netif_carrier_ok(tx_ring->netdev) &&
7887 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7888
7889
7890
7891 smp_mb();
7892 if (__netif_subqueue_stopped(tx_ring->netdev,
7893 tx_ring->queue_index) &&
7894 !(test_bit(__IGB_DOWN, &adapter->state))) {
7895 netif_wake_subqueue(tx_ring->netdev,
7896 tx_ring->queue_index);
7897
7898 u64_stats_update_begin(&tx_ring->tx_syncp);
7899 tx_ring->tx_stats.restart_queue++;
7900 u64_stats_update_end(&tx_ring->tx_syncp);
7901 }
7902 }
7903
7904 return !!budget;
7905}
7906
7907
7908
7909
7910
7911
7912
7913
7914static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7915 struct igb_rx_buffer *old_buff)
7916{
7917 struct igb_rx_buffer *new_buff;
7918 u16 nta = rx_ring->next_to_alloc;
7919
7920 new_buff = &rx_ring->rx_buffer_info[nta];
7921
7922
7923 nta++;
7924 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7925
7926
7927
7928
7929
7930 new_buff->dma = old_buff->dma;
7931 new_buff->page = old_buff->page;
7932 new_buff->page_offset = old_buff->page_offset;
7933 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
7934}
7935
7936static inline bool igb_page_is_reserved(struct page *page)
7937{
7938 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7939}
7940
7941static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7942{
7943 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7944 struct page *page = rx_buffer->page;
7945
7946
7947 if (unlikely(igb_page_is_reserved(page)))
7948 return false;
7949
7950#if (PAGE_SIZE < 8192)
7951
7952 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7953 return false;
7954#else
7955#define IGB_LAST_OFFSET \
7956 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7957
7958 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7959 return false;
7960#endif
7961
7962
7963
7964
7965
7966 if (unlikely(!pagecnt_bias)) {
7967 page_ref_add(page, USHRT_MAX);
7968 rx_buffer->pagecnt_bias = USHRT_MAX;
7969 }
7970
7971 return true;
7972}
7973
7974
7975
7976
7977
7978
7979
7980
7981
7982
7983static void igb_add_rx_frag(struct igb_ring *rx_ring,
7984 struct igb_rx_buffer *rx_buffer,
7985 struct sk_buff *skb,
7986 unsigned int size)
7987{
7988#if (PAGE_SIZE < 8192)
7989 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
7990#else
7991 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
7992 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
7993 SKB_DATA_ALIGN(size);
7994#endif
7995 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
7996 rx_buffer->page_offset, size, truesize);
7997#if (PAGE_SIZE < 8192)
7998 rx_buffer->page_offset ^= truesize;
7999#else
8000 rx_buffer->page_offset += truesize;
8001#endif
8002}
8003
8004static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8005 struct igb_rx_buffer *rx_buffer,
8006 union e1000_adv_rx_desc *rx_desc,
8007 unsigned int size)
8008{
8009 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8010#if (PAGE_SIZE < 8192)
8011 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8012#else
8013 unsigned int truesize = SKB_DATA_ALIGN(size);
8014#endif
8015 unsigned int headlen;
8016 struct sk_buff *skb;
8017
8018
8019 prefetch(va);
8020#if L1_CACHE_BYTES < 128
8021 prefetch(va + L1_CACHE_BYTES);
8022#endif
8023
8024
8025 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8026 if (unlikely(!skb))
8027 return NULL;
8028
8029 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8030 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
8031 va += IGB_TS_HDR_LEN;
8032 size -= IGB_TS_HDR_LEN;
8033 }
8034
8035
8036 headlen = size;
8037 if (headlen > IGB_RX_HDR_LEN)
8038 headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
8039
8040
8041 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
8042
8043
8044 size -= headlen;
8045 if (size) {
8046 skb_add_rx_frag(skb, 0, rx_buffer->page,
8047 (va + headlen) - page_address(rx_buffer->page),
8048 size, truesize);
8049#if (PAGE_SIZE < 8192)
8050 rx_buffer->page_offset ^= truesize;
8051#else
8052 rx_buffer->page_offset += truesize;
8053#endif
8054 } else {
8055 rx_buffer->pagecnt_bias++;
8056 }
8057
8058 return skb;
8059}
8060
8061static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8062 struct igb_rx_buffer *rx_buffer,
8063 union e1000_adv_rx_desc *rx_desc,
8064 unsigned int size)
8065{
8066 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8067#if (PAGE_SIZE < 8192)
8068 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8069#else
8070 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8071 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
8072#endif
8073 struct sk_buff *skb;
8074
8075
8076 prefetch(va);
8077#if L1_CACHE_BYTES < 128
8078 prefetch(va + L1_CACHE_BYTES);
8079#endif
8080
8081
8082 skb = build_skb(va - IGB_SKB_PAD, truesize);
8083 if (unlikely(!skb))
8084 return NULL;
8085
8086
8087 skb_reserve(skb, IGB_SKB_PAD);
8088 __skb_put(skb, size);
8089
8090
8091 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8092 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8093 __skb_pull(skb, IGB_TS_HDR_LEN);
8094 }
8095
8096
8097#if (PAGE_SIZE < 8192)
8098 rx_buffer->page_offset ^= truesize;
8099#else
8100 rx_buffer->page_offset += truesize;
8101#endif
8102
8103 return skb;
8104}
8105
8106static inline void igb_rx_checksum(struct igb_ring *ring,
8107 union e1000_adv_rx_desc *rx_desc,
8108 struct sk_buff *skb)
8109{
8110 skb_checksum_none_assert(skb);
8111
8112
8113 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8114 return;
8115
8116
8117 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8118 return;
8119
8120
8121 if (igb_test_staterr(rx_desc,
8122 E1000_RXDEXT_STATERR_TCPE |
8123 E1000_RXDEXT_STATERR_IPE)) {
8124
8125
8126
8127
8128 if (!((skb->len == 60) &&
8129 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8130 u64_stats_update_begin(&ring->rx_syncp);
8131 ring->rx_stats.csum_err++;
8132 u64_stats_update_end(&ring->rx_syncp);
8133 }
8134
8135 return;
8136 }
8137
8138 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8139 E1000_RXD_STAT_UDPCS))
8140 skb->ip_summed = CHECKSUM_UNNECESSARY;
8141
8142 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8143 le32_to_cpu(rx_desc->wb.upper.status_error));
8144}
8145
8146static inline void igb_rx_hash(struct igb_ring *ring,
8147 union e1000_adv_rx_desc *rx_desc,
8148 struct sk_buff *skb)
8149{
8150 if (ring->netdev->features & NETIF_F_RXHASH)
8151 skb_set_hash(skb,
8152 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8153 PKT_HASH_TYPE_L3);
8154}
8155
8156
8157
8158
8159
8160
8161
8162
8163
8164
8165
8166
8167static bool igb_is_non_eop(struct igb_ring *rx_ring,
8168 union e1000_adv_rx_desc *rx_desc)
8169{
8170 u32 ntc = rx_ring->next_to_clean + 1;
8171
8172
8173 ntc = (ntc < rx_ring->count) ? ntc : 0;
8174 rx_ring->next_to_clean = ntc;
8175
8176 prefetch(IGB_RX_DESC(rx_ring, ntc));
8177
8178 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8179 return false;
8180
8181 return true;
8182}
8183
8184
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197
8198static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8199 union e1000_adv_rx_desc *rx_desc,
8200 struct sk_buff *skb)
8201{
8202 if (unlikely((igb_test_staterr(rx_desc,
8203 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8204 struct net_device *netdev = rx_ring->netdev;
8205 if (!(netdev->features & NETIF_F_RXALL)) {
8206 dev_kfree_skb_any(skb);
8207 return true;
8208 }
8209 }
8210
8211
8212 if (eth_skb_pad(skb))
8213 return true;
8214
8215 return false;
8216}
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228static void igb_process_skb_fields(struct igb_ring *rx_ring,
8229 union e1000_adv_rx_desc *rx_desc,
8230 struct sk_buff *skb)
8231{
8232 struct net_device *dev = rx_ring->netdev;
8233
8234 igb_rx_hash(rx_ring, rx_desc, skb);
8235
8236 igb_rx_checksum(rx_ring, rx_desc, skb);
8237
8238 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8239 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8240 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8241
8242 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8243 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8244 u16 vid;
8245
8246 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8247 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8248 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8249 else
8250 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8251
8252 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8253 }
8254
8255 skb_record_rx_queue(skb, rx_ring->queue_index);
8256
8257 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8258}
8259
8260static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8261 const unsigned int size)
8262{
8263 struct igb_rx_buffer *rx_buffer;
8264
8265 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8266 prefetchw(rx_buffer->page);
8267
8268
8269 dma_sync_single_range_for_cpu(rx_ring->dev,
8270 rx_buffer->dma,
8271 rx_buffer->page_offset,
8272 size,
8273 DMA_FROM_DEVICE);
8274
8275 rx_buffer->pagecnt_bias--;
8276
8277 return rx_buffer;
8278}
8279
8280static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8281 struct igb_rx_buffer *rx_buffer)
8282{
8283 if (igb_can_reuse_rx_page(rx_buffer)) {
8284
8285 igb_reuse_rx_page(rx_ring, rx_buffer);
8286 } else {
8287
8288
8289
8290 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8291 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8292 IGB_RX_DMA_ATTR);
8293 __page_frag_cache_drain(rx_buffer->page,
8294 rx_buffer->pagecnt_bias);
8295 }
8296
8297
8298 rx_buffer->page = NULL;
8299}
8300
8301static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8302{
8303 struct igb_ring *rx_ring = q_vector->rx.ring;
8304 struct sk_buff *skb = rx_ring->skb;
8305 unsigned int total_bytes = 0, total_packets = 0;
8306 u16 cleaned_count = igb_desc_unused(rx_ring);
8307
8308 while (likely(total_packets < budget)) {
8309 union e1000_adv_rx_desc *rx_desc;
8310 struct igb_rx_buffer *rx_buffer;
8311 unsigned int size;
8312
8313
8314 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8315 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8316 cleaned_count = 0;
8317 }
8318
8319 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8320 size = le16_to_cpu(rx_desc->wb.upper.length);
8321 if (!size)
8322 break;
8323
8324
8325
8326
8327
8328 dma_rmb();
8329
8330 rx_buffer = igb_get_rx_buffer(rx_ring, size);
8331
8332
8333 if (skb)
8334 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8335 else if (ring_uses_build_skb(rx_ring))
8336 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8337 else
8338 skb = igb_construct_skb(rx_ring, rx_buffer,
8339 rx_desc, size);
8340
8341
8342 if (!skb) {
8343 rx_ring->rx_stats.alloc_failed++;
8344 rx_buffer->pagecnt_bias++;
8345 break;
8346 }
8347
8348 igb_put_rx_buffer(rx_ring, rx_buffer);
8349 cleaned_count++;
8350
8351
8352 if (igb_is_non_eop(rx_ring, rx_desc))
8353 continue;
8354
8355
8356 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8357 skb = NULL;
8358 continue;
8359 }
8360
8361
8362 total_bytes += skb->len;
8363
8364
8365 igb_process_skb_fields(rx_ring, rx_desc, skb);
8366
8367 napi_gro_receive(&q_vector->napi, skb);
8368
8369
8370 skb = NULL;
8371
8372
8373 total_packets++;
8374 }
8375
8376
8377 rx_ring->skb = skb;
8378
8379 u64_stats_update_begin(&rx_ring->rx_syncp);
8380 rx_ring->rx_stats.packets += total_packets;
8381 rx_ring->rx_stats.bytes += total_bytes;
8382 u64_stats_update_end(&rx_ring->rx_syncp);
8383 q_vector->rx.total_packets += total_packets;
8384 q_vector->rx.total_bytes += total_bytes;
8385
8386 if (cleaned_count)
8387 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8388
8389 return total_packets;
8390}
8391
8392static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8393{
8394 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8395}
8396
8397static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8398 struct igb_rx_buffer *bi)
8399{
8400 struct page *page = bi->page;
8401 dma_addr_t dma;
8402
8403
8404 if (likely(page))
8405 return true;
8406
8407
8408 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8409 if (unlikely(!page)) {
8410 rx_ring->rx_stats.alloc_failed++;
8411 return false;
8412 }
8413
8414
8415 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8416 igb_rx_pg_size(rx_ring),
8417 DMA_FROM_DEVICE,
8418 IGB_RX_DMA_ATTR);
8419
8420
8421
8422
8423 if (dma_mapping_error(rx_ring->dev, dma)) {
8424 __free_pages(page, igb_rx_pg_order(rx_ring));
8425
8426 rx_ring->rx_stats.alloc_failed++;
8427 return false;
8428 }
8429
8430 bi->dma = dma;
8431 bi->page = page;
8432 bi->page_offset = igb_rx_offset(rx_ring);
8433 bi->pagecnt_bias = 1;
8434
8435 return true;
8436}
8437
8438
8439
8440
8441
8442void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8443{
8444 union e1000_adv_rx_desc *rx_desc;
8445 struct igb_rx_buffer *bi;
8446 u16 i = rx_ring->next_to_use;
8447 u16 bufsz;
8448
8449
8450 if (!cleaned_count)
8451 return;
8452
8453 rx_desc = IGB_RX_DESC(rx_ring, i);
8454 bi = &rx_ring->rx_buffer_info[i];
8455 i -= rx_ring->count;
8456
8457 bufsz = igb_rx_bufsz(rx_ring);
8458
8459 do {
8460 if (!igb_alloc_mapped_page(rx_ring, bi))
8461 break;
8462
8463
8464 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8465 bi->page_offset, bufsz,
8466 DMA_FROM_DEVICE);
8467
8468
8469
8470
8471 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8472
8473 rx_desc++;
8474 bi++;
8475 i++;
8476 if (unlikely(!i)) {
8477 rx_desc = IGB_RX_DESC(rx_ring, 0);
8478 bi = rx_ring->rx_buffer_info;
8479 i -= rx_ring->count;
8480 }
8481
8482
8483 rx_desc->wb.upper.length = 0;
8484
8485 cleaned_count--;
8486 } while (cleaned_count);
8487
8488 i += rx_ring->count;
8489
8490 if (rx_ring->next_to_use != i) {
8491
8492 rx_ring->next_to_use = i;
8493
8494
8495 rx_ring->next_to_alloc = i;
8496
8497
8498
8499
8500
8501
8502 dma_wmb();
8503 writel(i, rx_ring->tail);
8504 }
8505}
8506
8507
8508
8509
8510
8511
8512
8513static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8514{
8515 struct igb_adapter *adapter = netdev_priv(netdev);
8516 struct mii_ioctl_data *data = if_mii(ifr);
8517
8518 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8519 return -EOPNOTSUPP;
8520
8521 switch (cmd) {
8522 case SIOCGMIIPHY:
8523 data->phy_id = adapter->hw.phy.addr;
8524 break;
8525 case SIOCGMIIREG:
8526 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8527 &data->val_out))
8528 return -EIO;
8529 break;
8530 case SIOCSMIIREG:
8531 default:
8532 return -EOPNOTSUPP;
8533 }
8534 return 0;
8535}
8536
8537
8538
8539
8540
8541
8542
8543static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8544{
8545 switch (cmd) {
8546 case SIOCGMIIPHY:
8547 case SIOCGMIIREG:
8548 case SIOCSMIIREG:
8549 return igb_mii_ioctl(netdev, ifr, cmd);
8550 case SIOCGHWTSTAMP:
8551 return igb_ptp_get_ts_config(netdev, ifr);
8552 case SIOCSHWTSTAMP:
8553 return igb_ptp_set_ts_config(netdev, ifr);
8554 default:
8555 return -EOPNOTSUPP;
8556 }
8557}
8558
8559void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8560{
8561 struct igb_adapter *adapter = hw->back;
8562
8563 pci_read_config_word(adapter->pdev, reg, value);
8564}
8565
8566void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8567{
8568 struct igb_adapter *adapter = hw->back;
8569
8570 pci_write_config_word(adapter->pdev, reg, *value);
8571}
8572
8573s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8574{
8575 struct igb_adapter *adapter = hw->back;
8576
8577 if (pcie_capability_read_word(adapter->pdev, reg, value))
8578 return -E1000_ERR_CONFIG;
8579
8580 return 0;
8581}
8582
8583s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8584{
8585 struct igb_adapter *adapter = hw->back;
8586
8587 if (pcie_capability_write_word(adapter->pdev, reg, *value))
8588 return -E1000_ERR_CONFIG;
8589
8590 return 0;
8591}
8592
8593static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8594{
8595 struct igb_adapter *adapter = netdev_priv(netdev);
8596 struct e1000_hw *hw = &adapter->hw;
8597 u32 ctrl, rctl;
8598 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8599
8600 if (enable) {
8601
8602 ctrl = rd32(E1000_CTRL);
8603 ctrl |= E1000_CTRL_VME;
8604 wr32(E1000_CTRL, ctrl);
8605
8606
8607 rctl = rd32(E1000_RCTL);
8608 rctl &= ~E1000_RCTL_CFIEN;
8609 wr32(E1000_RCTL, rctl);
8610 } else {
8611
8612 ctrl = rd32(E1000_CTRL);
8613 ctrl &= ~E1000_CTRL_VME;
8614 wr32(E1000_CTRL, ctrl);
8615 }
8616
8617 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8618}
8619
8620static int igb_vlan_rx_add_vid(struct net_device *netdev,
8621 __be16 proto, u16 vid)
8622{
8623 struct igb_adapter *adapter = netdev_priv(netdev);
8624 struct e1000_hw *hw = &adapter->hw;
8625 int pf_id = adapter->vfs_allocated_count;
8626
8627
8628 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8629 igb_vfta_set(hw, vid, pf_id, true, !!vid);
8630
8631 set_bit(vid, adapter->active_vlans);
8632
8633 return 0;
8634}
8635
8636static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8637 __be16 proto, u16 vid)
8638{
8639 struct igb_adapter *adapter = netdev_priv(netdev);
8640 int pf_id = adapter->vfs_allocated_count;
8641 struct e1000_hw *hw = &adapter->hw;
8642
8643
8644 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8645 igb_vfta_set(hw, vid, pf_id, false, true);
8646
8647 clear_bit(vid, adapter->active_vlans);
8648
8649 return 0;
8650}
8651
8652static void igb_restore_vlan(struct igb_adapter *adapter)
8653{
8654 u16 vid = 1;
8655
8656 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8657 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8658
8659 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8660 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8661}
8662
8663int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8664{
8665 struct pci_dev *pdev = adapter->pdev;
8666 struct e1000_mac_info *mac = &adapter->hw.mac;
8667
8668 mac->autoneg = 0;
8669
8670
8671
8672
8673 if ((spd & 1) || (dplx & ~1))
8674 goto err_inval;
8675
8676
8677
8678
8679 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8680 switch (spd + dplx) {
8681 case SPEED_10 + DUPLEX_HALF:
8682 case SPEED_10 + DUPLEX_FULL:
8683 case SPEED_100 + DUPLEX_HALF:
8684 goto err_inval;
8685 default:
8686 break;
8687 }
8688 }
8689
8690 switch (spd + dplx) {
8691 case SPEED_10 + DUPLEX_HALF:
8692 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8693 break;
8694 case SPEED_10 + DUPLEX_FULL:
8695 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8696 break;
8697 case SPEED_100 + DUPLEX_HALF:
8698 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8699 break;
8700 case SPEED_100 + DUPLEX_FULL:
8701 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8702 break;
8703 case SPEED_1000 + DUPLEX_FULL:
8704 mac->autoneg = 1;
8705 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8706 break;
8707 case SPEED_1000 + DUPLEX_HALF:
8708 default:
8709 goto err_inval;
8710 }
8711
8712
8713 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8714
8715 return 0;
8716
8717err_inval:
8718 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8719 return -EINVAL;
8720}
8721
8722static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8723 bool runtime)
8724{
8725 struct net_device *netdev = pci_get_drvdata(pdev);
8726 struct igb_adapter *adapter = netdev_priv(netdev);
8727 struct e1000_hw *hw = &adapter->hw;
8728 u32 ctrl, rctl, status;
8729 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8730 bool wake;
8731
8732 rtnl_lock();
8733 netif_device_detach(netdev);
8734
8735 if (netif_running(netdev))
8736 __igb_close(netdev, true);
8737
8738 igb_ptp_suspend(adapter);
8739
8740 igb_clear_interrupt_scheme(adapter);
8741 rtnl_unlock();
8742
8743 status = rd32(E1000_STATUS);
8744 if (status & E1000_STATUS_LU)
8745 wufc &= ~E1000_WUFC_LNKC;
8746
8747 if (wufc) {
8748 igb_setup_rctl(adapter);
8749 igb_set_rx_mode(netdev);
8750
8751
8752 if (wufc & E1000_WUFC_MC) {
8753 rctl = rd32(E1000_RCTL);
8754 rctl |= E1000_RCTL_MPE;
8755 wr32(E1000_RCTL, rctl);
8756 }
8757
8758 ctrl = rd32(E1000_CTRL);
8759 ctrl |= E1000_CTRL_ADVD3WUC;
8760 wr32(E1000_CTRL, ctrl);
8761
8762
8763 igb_disable_pcie_master(hw);
8764
8765 wr32(E1000_WUC, E1000_WUC_PME_EN);
8766 wr32(E1000_WUFC, wufc);
8767 } else {
8768 wr32(E1000_WUC, 0);
8769 wr32(E1000_WUFC, 0);
8770 }
8771
8772 wake = wufc || adapter->en_mng_pt;
8773 if (!wake)
8774 igb_power_down_link(adapter);
8775 else
8776 igb_power_up_link(adapter);
8777
8778 if (enable_wake)
8779 *enable_wake = wake;
8780
8781
8782
8783
8784 igb_release_hw_control(adapter);
8785
8786 pci_disable_device(pdev);
8787
8788 return 0;
8789}
8790
8791static void igb_deliver_wake_packet(struct net_device *netdev)
8792{
8793 struct igb_adapter *adapter = netdev_priv(netdev);
8794 struct e1000_hw *hw = &adapter->hw;
8795 struct sk_buff *skb;
8796 u32 wupl;
8797
8798 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8799
8800
8801
8802
8803 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8804 return;
8805
8806 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8807 if (!skb)
8808 return;
8809
8810 skb_put(skb, wupl);
8811
8812
8813 wupl = roundup(wupl, 4);
8814
8815 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8816
8817 skb->protocol = eth_type_trans(skb, netdev);
8818 netif_rx(skb);
8819}
8820
8821static int __maybe_unused igb_suspend(struct device *dev)
8822{
8823 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8824}
8825
8826static int __maybe_unused igb_resume(struct device *dev)
8827{
8828 struct pci_dev *pdev = to_pci_dev(dev);
8829 struct net_device *netdev = pci_get_drvdata(pdev);
8830 struct igb_adapter *adapter = netdev_priv(netdev);
8831 struct e1000_hw *hw = &adapter->hw;
8832 u32 err, val;
8833
8834 pci_set_power_state(pdev, PCI_D0);
8835 pci_restore_state(pdev);
8836 pci_save_state(pdev);
8837
8838 if (!pci_device_is_present(pdev))
8839 return -ENODEV;
8840 err = pci_enable_device_mem(pdev);
8841 if (err) {
8842 dev_err(&pdev->dev,
8843 "igb: Cannot enable PCI device from suspend\n");
8844 return err;
8845 }
8846 pci_set_master(pdev);
8847
8848 pci_enable_wake(pdev, PCI_D3hot, 0);
8849 pci_enable_wake(pdev, PCI_D3cold, 0);
8850
8851 if (igb_init_interrupt_scheme(adapter, true)) {
8852 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8853 return -ENOMEM;
8854 }
8855
8856 igb_reset(adapter);
8857
8858
8859
8860
8861 igb_get_hw_control(adapter);
8862
8863 val = rd32(E1000_WUS);
8864 if (val & WAKE_PKT_WUS)
8865 igb_deliver_wake_packet(netdev);
8866
8867 wr32(E1000_WUS, ~0);
8868
8869 rtnl_lock();
8870 if (!err && netif_running(netdev))
8871 err = __igb_open(netdev, true);
8872
8873 if (!err)
8874 netif_device_attach(netdev);
8875 rtnl_unlock();
8876
8877 return err;
8878}
8879
8880static int __maybe_unused igb_runtime_idle(struct device *dev)
8881{
8882 struct pci_dev *pdev = to_pci_dev(dev);
8883 struct net_device *netdev = pci_get_drvdata(pdev);
8884 struct igb_adapter *adapter = netdev_priv(netdev);
8885
8886 if (!igb_has_link(adapter))
8887 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8888
8889 return -EBUSY;
8890}
8891
8892static int __maybe_unused igb_runtime_suspend(struct device *dev)
8893{
8894 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8895}
8896
8897static int __maybe_unused igb_runtime_resume(struct device *dev)
8898{
8899 return igb_resume(dev);
8900}
8901
8902static void igb_shutdown(struct pci_dev *pdev)
8903{
8904 bool wake;
8905
8906 __igb_shutdown(pdev, &wake, 0);
8907
8908 if (system_state == SYSTEM_POWER_OFF) {
8909 pci_wake_from_d3(pdev, wake);
8910 pci_set_power_state(pdev, PCI_D3hot);
8911 }
8912}
8913
8914#ifdef CONFIG_PCI_IOV
8915static int igb_sriov_reinit(struct pci_dev *dev)
8916{
8917 struct net_device *netdev = pci_get_drvdata(dev);
8918 struct igb_adapter *adapter = netdev_priv(netdev);
8919 struct pci_dev *pdev = adapter->pdev;
8920
8921 rtnl_lock();
8922
8923 if (netif_running(netdev))
8924 igb_close(netdev);
8925 else
8926 igb_reset(adapter);
8927
8928 igb_clear_interrupt_scheme(adapter);
8929
8930 igb_init_queue_configuration(adapter);
8931
8932 if (igb_init_interrupt_scheme(adapter, true)) {
8933 rtnl_unlock();
8934 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8935 return -ENOMEM;
8936 }
8937
8938 if (netif_running(netdev))
8939 igb_open(netdev);
8940
8941 rtnl_unlock();
8942
8943 return 0;
8944}
8945
8946static int igb_pci_disable_sriov(struct pci_dev *dev)
8947{
8948 int err = igb_disable_sriov(dev);
8949
8950 if (!err)
8951 err = igb_sriov_reinit(dev);
8952
8953 return err;
8954}
8955
8956static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8957{
8958 int err = igb_enable_sriov(dev, num_vfs);
8959
8960 if (err)
8961 goto out;
8962
8963 err = igb_sriov_reinit(dev);
8964 if (!err)
8965 return num_vfs;
8966
8967out:
8968 return err;
8969}
8970
8971#endif
8972static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
8973{
8974#ifdef CONFIG_PCI_IOV
8975 if (num_vfs == 0)
8976 return igb_pci_disable_sriov(dev);
8977 else
8978 return igb_pci_enable_sriov(dev, num_vfs);
8979#endif
8980 return 0;
8981}
8982
8983
8984
8985
8986
8987
8988
8989
8990
8991static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
8992 pci_channel_state_t state)
8993{
8994 struct net_device *netdev = pci_get_drvdata(pdev);
8995 struct igb_adapter *adapter = netdev_priv(netdev);
8996
8997 netif_device_detach(netdev);
8998
8999 if (state == pci_channel_io_perm_failure)
9000 return PCI_ERS_RESULT_DISCONNECT;
9001
9002 if (netif_running(netdev))
9003 igb_down(adapter);
9004 pci_disable_device(pdev);
9005
9006
9007 return PCI_ERS_RESULT_NEED_RESET;
9008}
9009
9010
9011
9012
9013
9014
9015
9016
9017static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9018{
9019 struct net_device *netdev = pci_get_drvdata(pdev);
9020 struct igb_adapter *adapter = netdev_priv(netdev);
9021 struct e1000_hw *hw = &adapter->hw;
9022 pci_ers_result_t result;
9023
9024 if (pci_enable_device_mem(pdev)) {
9025 dev_err(&pdev->dev,
9026 "Cannot re-enable PCI device after reset.\n");
9027 result = PCI_ERS_RESULT_DISCONNECT;
9028 } else {
9029 pci_set_master(pdev);
9030 pci_restore_state(pdev);
9031 pci_save_state(pdev);
9032
9033 pci_enable_wake(pdev, PCI_D3hot, 0);
9034 pci_enable_wake(pdev, PCI_D3cold, 0);
9035
9036
9037
9038
9039 hw->hw_addr = adapter->io_addr;
9040
9041 igb_reset(adapter);
9042 wr32(E1000_WUS, ~0);
9043 result = PCI_ERS_RESULT_RECOVERED;
9044 }
9045
9046 return result;
9047}
9048
9049
9050
9051
9052
9053
9054
9055
9056
9057static void igb_io_resume(struct pci_dev *pdev)
9058{
9059 struct net_device *netdev = pci_get_drvdata(pdev);
9060 struct igb_adapter *adapter = netdev_priv(netdev);
9061
9062 if (netif_running(netdev)) {
9063 if (igb_up(adapter)) {
9064 dev_err(&pdev->dev, "igb_up failed after reset\n");
9065 return;
9066 }
9067 }
9068
9069 netif_device_attach(netdev);
9070
9071
9072
9073
9074 igb_get_hw_control(adapter);
9075}
9076
9077
9078
9079
9080
9081
9082static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9083{
9084 struct e1000_hw *hw = &adapter->hw;
9085 u32 rar_low, rar_high;
9086 u8 *addr = adapter->mac_table[index].addr;
9087
9088
9089
9090
9091
9092
9093 rar_low = le32_to_cpup((__le32 *)(addr));
9094 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9095
9096
9097 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9098 if (is_valid_ether_addr(addr))
9099 rar_high |= E1000_RAH_AV;
9100
9101 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9102 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9103
9104 switch (hw->mac.type) {
9105 case e1000_82575:
9106 case e1000_i210:
9107 if (adapter->mac_table[index].state &
9108 IGB_MAC_STATE_QUEUE_STEERING)
9109 rar_high |= E1000_RAH_QSEL_ENABLE;
9110
9111 rar_high |= E1000_RAH_POOL_1 *
9112 adapter->mac_table[index].queue;
9113 break;
9114 default:
9115 rar_high |= E1000_RAH_POOL_1 <<
9116 adapter->mac_table[index].queue;
9117 break;
9118 }
9119 }
9120
9121 wr32(E1000_RAL(index), rar_low);
9122 wrfl();
9123 wr32(E1000_RAH(index), rar_high);
9124 wrfl();
9125}
9126
9127static int igb_set_vf_mac(struct igb_adapter *adapter,
9128 int vf, unsigned char *mac_addr)
9129{
9130 struct e1000_hw *hw = &adapter->hw;
9131
9132
9133
9134 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9135 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9136
9137 ether_addr_copy(vf_mac_addr, mac_addr);
9138 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9139 adapter->mac_table[rar_entry].queue = vf;
9140 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9141 igb_rar_set_index(adapter, rar_entry);
9142
9143 return 0;
9144}
9145
9146static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9147{
9148 struct igb_adapter *adapter = netdev_priv(netdev);
9149
9150 if (vf >= adapter->vfs_allocated_count)
9151 return -EINVAL;
9152
9153
9154
9155
9156
9157
9158
9159 if (is_zero_ether_addr(mac)) {
9160 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9161 dev_info(&adapter->pdev->dev,
9162 "remove administratively set MAC on VF %d\n",
9163 vf);
9164 } else if (is_valid_ether_addr(mac)) {
9165 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9166 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9167 mac, vf);
9168 dev_info(&adapter->pdev->dev,
9169 "Reload the VF driver to make this change effective.");
9170
9171 if (test_bit(__IGB_DOWN, &adapter->state)) {
9172 dev_warn(&adapter->pdev->dev,
9173 "The VF MAC address has been set, but the PF device is not up.\n");
9174 dev_warn(&adapter->pdev->dev,
9175 "Bring the PF device up before attempting to use the VF device.\n");
9176 }
9177 } else {
9178 return -EINVAL;
9179 }
9180 return igb_set_vf_mac(adapter, vf, mac);
9181}
9182
9183static int igb_link_mbps(int internal_link_speed)
9184{
9185 switch (internal_link_speed) {
9186 case SPEED_100:
9187 return 100;
9188 case SPEED_1000:
9189 return 1000;
9190 default:
9191 return 0;
9192 }
9193}
9194
9195static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9196 int link_speed)
9197{
9198 int rf_dec, rf_int;
9199 u32 bcnrc_val;
9200
9201 if (tx_rate != 0) {
9202
9203 rf_int = link_speed / tx_rate;
9204 rf_dec = (link_speed - (rf_int * tx_rate));
9205 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9206 tx_rate;
9207
9208 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9209 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9210 E1000_RTTBCNRC_RF_INT_MASK);
9211 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9212 } else {
9213 bcnrc_val = 0;
9214 }
9215
9216 wr32(E1000_RTTDQSEL, vf);
9217
9218
9219
9220 wr32(E1000_RTTBCNRM, 0x14);
9221 wr32(E1000_RTTBCNRC, bcnrc_val);
9222}
9223
9224static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9225{
9226 int actual_link_speed, i;
9227 bool reset_rate = false;
9228
9229
9230 if ((adapter->vf_rate_link_speed == 0) ||
9231 (adapter->hw.mac.type != e1000_82576))
9232 return;
9233
9234 actual_link_speed = igb_link_mbps(adapter->link_speed);
9235 if (actual_link_speed != adapter->vf_rate_link_speed) {
9236 reset_rate = true;
9237 adapter->vf_rate_link_speed = 0;
9238 dev_info(&adapter->pdev->dev,
9239 "Link speed has been changed. VF Transmit rate is disabled\n");
9240 }
9241
9242 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9243 if (reset_rate)
9244 adapter->vf_data[i].tx_rate = 0;
9245
9246 igb_set_vf_rate_limit(&adapter->hw, i,
9247 adapter->vf_data[i].tx_rate,
9248 actual_link_speed);
9249 }
9250}
9251
9252static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9253 int min_tx_rate, int max_tx_rate)
9254{
9255 struct igb_adapter *adapter = netdev_priv(netdev);
9256 struct e1000_hw *hw = &adapter->hw;
9257 int actual_link_speed;
9258
9259 if (hw->mac.type != e1000_82576)
9260 return -EOPNOTSUPP;
9261
9262 if (min_tx_rate)
9263 return -EINVAL;
9264
9265 actual_link_speed = igb_link_mbps(adapter->link_speed);
9266 if ((vf >= adapter->vfs_allocated_count) ||
9267 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9268 (max_tx_rate < 0) ||
9269 (max_tx_rate > actual_link_speed))
9270 return -EINVAL;
9271
9272 adapter->vf_rate_link_speed = actual_link_speed;
9273 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9274 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9275
9276 return 0;
9277}
9278
9279static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9280 bool setting)
9281{
9282 struct igb_adapter *adapter = netdev_priv(netdev);
9283 struct e1000_hw *hw = &adapter->hw;
9284 u32 reg_val, reg_offset;
9285
9286 if (!adapter->vfs_allocated_count)
9287 return -EOPNOTSUPP;
9288
9289 if (vf >= adapter->vfs_allocated_count)
9290 return -EINVAL;
9291
9292 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9293 reg_val = rd32(reg_offset);
9294 if (setting)
9295 reg_val |= (BIT(vf) |
9296 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9297 else
9298 reg_val &= ~(BIT(vf) |
9299 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9300 wr32(reg_offset, reg_val);
9301
9302 adapter->vf_data[vf].spoofchk_enabled = setting;
9303 return 0;
9304}
9305
9306static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9307{
9308 struct igb_adapter *adapter = netdev_priv(netdev);
9309
9310 if (vf >= adapter->vfs_allocated_count)
9311 return -EINVAL;
9312 if (adapter->vf_data[vf].trusted == setting)
9313 return 0;
9314
9315 adapter->vf_data[vf].trusted = setting;
9316
9317 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9318 vf, setting ? "" : "not ");
9319 return 0;
9320}
9321
9322static int igb_ndo_get_vf_config(struct net_device *netdev,
9323 int vf, struct ifla_vf_info *ivi)
9324{
9325 struct igb_adapter *adapter = netdev_priv(netdev);
9326 if (vf >= adapter->vfs_allocated_count)
9327 return -EINVAL;
9328 ivi->vf = vf;
9329 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9330 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9331 ivi->min_tx_rate = 0;
9332 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9333 ivi->qos = adapter->vf_data[vf].pf_qos;
9334 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9335 ivi->trusted = adapter->vf_data[vf].trusted;
9336 return 0;
9337}
9338
9339static void igb_vmm_control(struct igb_adapter *adapter)
9340{
9341 struct e1000_hw *hw = &adapter->hw;
9342 u32 reg;
9343
9344 switch (hw->mac.type) {
9345 case e1000_82575:
9346 case e1000_i210:
9347 case e1000_i211:
9348 case e1000_i354:
9349 default:
9350
9351 return;
9352 case e1000_82576:
9353
9354 reg = rd32(E1000_DTXCTL);
9355 reg |= E1000_DTXCTL_VLAN_ADDED;
9356 wr32(E1000_DTXCTL, reg);
9357
9358 case e1000_82580:
9359
9360 reg = rd32(E1000_RPLOLR);
9361 reg |= E1000_RPLOLR_STRVLAN;
9362 wr32(E1000_RPLOLR, reg);
9363
9364 case e1000_i350:
9365
9366 break;
9367 }
9368
9369 if (adapter->vfs_allocated_count) {
9370 igb_vmdq_set_loopback_pf(hw, true);
9371 igb_vmdq_set_replication_pf(hw, true);
9372 igb_vmdq_set_anti_spoofing_pf(hw, true,
9373 adapter->vfs_allocated_count);
9374 } else {
9375 igb_vmdq_set_loopback_pf(hw, false);
9376 igb_vmdq_set_replication_pf(hw, false);
9377 }
9378}
9379
9380static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9381{
9382 struct e1000_hw *hw = &adapter->hw;
9383 u32 dmac_thr;
9384 u16 hwm;
9385
9386 if (hw->mac.type > e1000_82580) {
9387 if (adapter->flags & IGB_FLAG_DMAC) {
9388 u32 reg;
9389
9390
9391 wr32(E1000_DMCTXTH, 0);
9392
9393
9394
9395
9396
9397 hwm = 64 * (pba - 6);
9398 reg = rd32(E1000_FCRTC);
9399 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9400 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9401 & E1000_FCRTC_RTH_COAL_MASK);
9402 wr32(E1000_FCRTC, reg);
9403
9404
9405
9406
9407 dmac_thr = pba - 10;
9408 reg = rd32(E1000_DMACR);
9409 reg &= ~E1000_DMACR_DMACTHR_MASK;
9410 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9411 & E1000_DMACR_DMACTHR_MASK);
9412
9413
9414 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9415
9416
9417 reg |= (1000 >> 5);
9418
9419
9420 if (hw->mac.type != e1000_i354)
9421 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9422
9423 wr32(E1000_DMACR, reg);
9424
9425
9426
9427
9428 wr32(E1000_DMCRTRH, 0);
9429
9430 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9431
9432 wr32(E1000_DMCTLX, reg);
9433
9434
9435
9436
9437 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9438 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9439
9440
9441
9442
9443 reg = rd32(E1000_PCIEMISC);
9444 reg &= ~E1000_PCIEMISC_LX_DECISION;
9445 wr32(E1000_PCIEMISC, reg);
9446 }
9447 } else if (hw->mac.type == e1000_82580) {
9448 u32 reg = rd32(E1000_PCIEMISC);
9449
9450 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9451 wr32(E1000_DMACR, 0);
9452 }
9453}
9454
9455
9456
9457
9458
9459
9460
9461
9462
9463
9464
9465s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9466 u8 dev_addr, u8 *data)
9467{
9468 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9469 struct i2c_client *this_client = adapter->i2c_client;
9470 s32 status;
9471 u16 swfw_mask = 0;
9472
9473 if (!this_client)
9474 return E1000_ERR_I2C;
9475
9476 swfw_mask = E1000_SWFW_PHY0_SM;
9477
9478 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9479 return E1000_ERR_SWFW_SYNC;
9480
9481 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9482 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9483
9484 if (status < 0)
9485 return E1000_ERR_I2C;
9486 else {
9487 *data = status;
9488 return 0;
9489 }
9490}
9491
9492
9493
9494
9495
9496
9497
9498
9499
9500
9501
9502s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9503 u8 dev_addr, u8 data)
9504{
9505 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9506 struct i2c_client *this_client = adapter->i2c_client;
9507 s32 status;
9508 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9509
9510 if (!this_client)
9511 return E1000_ERR_I2C;
9512
9513 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9514 return E1000_ERR_SWFW_SYNC;
9515 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9516 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9517
9518 if (status)
9519 return E1000_ERR_I2C;
9520 else
9521 return 0;
9522
9523}
9524
9525int igb_reinit_queues(struct igb_adapter *adapter)
9526{
9527 struct net_device *netdev = adapter->netdev;
9528 struct pci_dev *pdev = adapter->pdev;
9529 int err = 0;
9530
9531 if (netif_running(netdev))
9532 igb_close(netdev);
9533
9534 igb_reset_interrupt_capability(adapter);
9535
9536 if (igb_init_interrupt_scheme(adapter, true)) {
9537 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9538 return -ENOMEM;
9539 }
9540
9541 if (netif_running(netdev))
9542 err = igb_open(netdev);
9543
9544 return err;
9545}
9546
9547static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9548{
9549 struct igb_nfc_filter *rule;
9550
9551 spin_lock(&adapter->nfc_lock);
9552
9553 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9554 igb_erase_filter(adapter, rule);
9555
9556 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9557 igb_erase_filter(adapter, rule);
9558
9559 spin_unlock(&adapter->nfc_lock);
9560}
9561
9562static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9563{
9564 struct igb_nfc_filter *rule;
9565
9566 spin_lock(&adapter->nfc_lock);
9567
9568 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9569 igb_add_filter(adapter, rule);
9570
9571 spin_unlock(&adapter->nfc_lock);
9572}
9573
9574