1
2
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/bitops.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/netdevice.h>
13#include <linux/ipv6.h>
14#include <linux/slab.h>
15#include <net/checksum.h>
16#include <net/ip6_checksum.h>
17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
19#include <linux/net_tstamp.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/ip.h>
28#include <linux/tcp.h>
29#include <linux/sctp.h>
30#include <linux/if_ether.h>
31#include <linux/aer.h>
32#include <linux/prefetch.h>
33#include <linux/pm_runtime.h>
34#include <linux/etherdevice.h>
35#ifdef CONFIG_IGB_DCA
36#include <linux/dca.h>
37#endif
38#include <linux/i2c.h>
39#include "igb.h"
40
41#define MAJ 5
42#define MIN 6
43#define BUILD 0
44#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
45__stringify(BUILD) "-k"
46
47enum queue_mode {
48 QUEUE_MODE_STRICT_PRIORITY,
49 QUEUE_MODE_STREAM_RESERVATION,
50};
51
52enum tx_queue_prio {
53 TX_QUEUE_PRIO_HIGH,
54 TX_QUEUE_PRIO_LOW,
55};
56
57char igb_driver_name[] = "igb";
58char igb_driver_version[] = DRV_VERSION;
59static const char igb_driver_string[] =
60 "Intel(R) Gigabit Ethernet Network Driver";
61static const char igb_copyright[] =
62 "Copyright (c) 2007-2014 Intel Corporation.";
63
64static const struct e1000_info *igb_info_tbl[] = {
65 [board_82575] = &e1000_82575_info,
66};
67
68static const struct pci_device_id igb_pci_tbl[] = {
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
104
105 {0, }
106};
107
108MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
109
110static int igb_setup_all_tx_resources(struct igb_adapter *);
111static int igb_setup_all_rx_resources(struct igb_adapter *);
112static void igb_free_all_tx_resources(struct igb_adapter *);
113static void igb_free_all_rx_resources(struct igb_adapter *);
114static void igb_setup_mrqc(struct igb_adapter *);
115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void igb_remove(struct pci_dev *pdev);
117static int igb_sw_init(struct igb_adapter *);
118int igb_open(struct net_device *);
119int igb_close(struct net_device *);
120static void igb_configure(struct igb_adapter *);
121static void igb_configure_tx(struct igb_adapter *);
122static void igb_configure_rx(struct igb_adapter *);
123static void igb_clean_all_tx_rings(struct igb_adapter *);
124static void igb_clean_all_rx_rings(struct igb_adapter *);
125static void igb_clean_tx_ring(struct igb_ring *);
126static void igb_clean_rx_ring(struct igb_ring *);
127static void igb_set_rx_mode(struct net_device *);
128static void igb_update_phy_info(struct timer_list *);
129static void igb_watchdog(struct timer_list *);
130static void igb_watchdog_task(struct work_struct *);
131static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
132static void igb_get_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats);
134static int igb_change_mtu(struct net_device *, int);
135static int igb_set_mac(struct net_device *, void *);
136static void igb_set_uta(struct igb_adapter *adapter, bool set);
137static irqreturn_t igb_intr(int irq, void *);
138static irqreturn_t igb_intr_msi(int irq, void *);
139static irqreturn_t igb_msix_other(int irq, void *);
140static irqreturn_t igb_msix_ring(int irq, void *);
141#ifdef CONFIG_IGB_DCA
142static void igb_update_dca(struct igb_q_vector *);
143static void igb_setup_dca(struct igb_adapter *);
144#endif
145static int igb_poll(struct napi_struct *, int);
146static bool igb_clean_tx_irq(struct igb_q_vector *, int);
147static int igb_clean_rx_irq(struct igb_q_vector *, int);
148static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149static void igb_tx_timeout(struct net_device *);
150static void igb_reset_task(struct work_struct *);
151static void igb_vlan_mode(struct net_device *netdev,
152 netdev_features_t features);
153static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
154static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
155static void igb_restore_vlan(struct igb_adapter *);
156static void igb_rar_set_index(struct igb_adapter *, u32);
157static void igb_ping_all_vfs(struct igb_adapter *);
158static void igb_msg_task(struct igb_adapter *);
159static void igb_vmm_control(struct igb_adapter *);
160static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
161static void igb_flush_mac_table(struct igb_adapter *);
162static int igb_available_rars(struct igb_adapter *, u8);
163static void igb_set_default_mac_filter(struct igb_adapter *);
164static int igb_uc_sync(struct net_device *, const unsigned char *);
165static int igb_uc_unsync(struct net_device *, const unsigned char *);
166static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
167static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
168static int igb_ndo_set_vf_vlan(struct net_device *netdev,
169 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
170static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
171static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
172 bool setting);
173static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
174 bool setting);
175static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
176 struct ifla_vf_info *ivi);
177static void igb_check_vf_rate_limit(struct igb_adapter *);
178static void igb_nfc_filter_exit(struct igb_adapter *adapter);
179static void igb_nfc_filter_restore(struct igb_adapter *adapter);
180
181#ifdef CONFIG_PCI_IOV
182static int igb_vf_configure(struct igb_adapter *adapter, int vf);
183static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
184static int igb_disable_sriov(struct pci_dev *dev);
185static int igb_pci_disable_sriov(struct pci_dev *dev);
186#endif
187
188static int igb_suspend(struct device *);
189static int igb_resume(struct device *);
190static int igb_runtime_suspend(struct device *dev);
191static int igb_runtime_resume(struct device *dev);
192static int igb_runtime_idle(struct device *dev);
193static const struct dev_pm_ops igb_pm_ops = {
194 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
195 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
196 igb_runtime_idle)
197};
198static void igb_shutdown(struct pci_dev *);
199static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
200#ifdef CONFIG_IGB_DCA
201static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
202static struct notifier_block dca_notifier = {
203 .notifier_call = igb_notify_dca,
204 .next = NULL,
205 .priority = 0
206};
207#endif
208#ifdef CONFIG_PCI_IOV
209static unsigned int max_vfs;
210module_param(max_vfs, uint, 0);
211MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
212#endif
213
214static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217static void igb_io_resume(struct pci_dev *);
218
219static const struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223};
224
225static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
226
227static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = igb_remove,
232#ifdef CONFIG_PM
233 .driver.pm = &igb_pm_ops,
234#endif
235 .shutdown = igb_shutdown,
236 .sriov_configure = igb_pci_sriov_configure,
237 .err_handler = &igb_err_handler
238};
239
240MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242MODULE_LICENSE("GPL v2");
243MODULE_VERSION(DRV_VERSION);
244
245#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246static int debug = -1;
247module_param(debug, int, 0);
248MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
249
250struct igb_reg_info {
251 u32 ofs;
252 char *name;
253};
254
255static const struct igb_reg_info igb_reg_info_tbl[] = {
256
257
258 {E1000_CTRL, "CTRL"},
259 {E1000_STATUS, "STATUS"},
260 {E1000_CTRL_EXT, "CTRL_EXT"},
261
262
263 {E1000_ICR, "ICR"},
264
265
266 {E1000_RCTL, "RCTL"},
267 {E1000_RDLEN(0), "RDLEN"},
268 {E1000_RDH(0), "RDH"},
269 {E1000_RDT(0), "RDT"},
270 {E1000_RXDCTL(0), "RXDCTL"},
271 {E1000_RDBAL(0), "RDBAL"},
272 {E1000_RDBAH(0), "RDBAH"},
273
274
275 {E1000_TCTL, "TCTL"},
276 {E1000_TDBAL(0), "TDBAL"},
277 {E1000_TDBAH(0), "TDBAH"},
278 {E1000_TDLEN(0), "TDLEN"},
279 {E1000_TDH(0), "TDH"},
280 {E1000_TDT(0), "TDT"},
281 {E1000_TXDCTL(0), "TXDCTL"},
282 {E1000_TDFH, "TDFH"},
283 {E1000_TDFT, "TDFT"},
284 {E1000_TDFHS, "TDFHS"},
285 {E1000_TDFPC, "TDFPC"},
286
287
288 {}
289};
290
291
292static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
293{
294 int n = 0;
295 char rname[16];
296 u32 regs[8];
297
298 switch (reginfo->ofs) {
299 case E1000_RDLEN(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_RDLEN(n));
302 break;
303 case E1000_RDH(0):
304 for (n = 0; n < 4; n++)
305 regs[n] = rd32(E1000_RDH(n));
306 break;
307 case E1000_RDT(0):
308 for (n = 0; n < 4; n++)
309 regs[n] = rd32(E1000_RDT(n));
310 break;
311 case E1000_RXDCTL(0):
312 for (n = 0; n < 4; n++)
313 regs[n] = rd32(E1000_RXDCTL(n));
314 break;
315 case E1000_RDBAL(0):
316 for (n = 0; n < 4; n++)
317 regs[n] = rd32(E1000_RDBAL(n));
318 break;
319 case E1000_RDBAH(0):
320 for (n = 0; n < 4; n++)
321 regs[n] = rd32(E1000_RDBAH(n));
322 break;
323 case E1000_TDBAL(0):
324 for (n = 0; n < 4; n++)
325 regs[n] = rd32(E1000_RDBAL(n));
326 break;
327 case E1000_TDBAH(0):
328 for (n = 0; n < 4; n++)
329 regs[n] = rd32(E1000_TDBAH(n));
330 break;
331 case E1000_TDLEN(0):
332 for (n = 0; n < 4; n++)
333 regs[n] = rd32(E1000_TDLEN(n));
334 break;
335 case E1000_TDH(0):
336 for (n = 0; n < 4; n++)
337 regs[n] = rd32(E1000_TDH(n));
338 break;
339 case E1000_TDT(0):
340 for (n = 0; n < 4; n++)
341 regs[n] = rd32(E1000_TDT(n));
342 break;
343 case E1000_TXDCTL(0):
344 for (n = 0; n < 4; n++)
345 regs[n] = rd32(E1000_TXDCTL(n));
346 break;
347 default:
348 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
349 return;
350 }
351
352 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
353 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
354 regs[2], regs[3]);
355}
356
357
358static void igb_dump(struct igb_adapter *adapter)
359{
360 struct net_device *netdev = adapter->netdev;
361 struct e1000_hw *hw = &adapter->hw;
362 struct igb_reg_info *reginfo;
363 struct igb_ring *tx_ring;
364 union e1000_adv_tx_desc *tx_desc;
365 struct my_u0 { u64 a; u64 b; } *u0;
366 struct igb_ring *rx_ring;
367 union e1000_adv_rx_desc *rx_desc;
368 u32 staterr;
369 u16 i, n;
370
371 if (!netif_msg_hw(adapter))
372 return;
373
374
375 if (netdev) {
376 dev_info(&adapter->pdev->dev, "Net device Info\n");
377 pr_info("Device Name state trans_start\n");
378 pr_info("%-15s %016lX %016lX\n", netdev->name,
379 netdev->state, dev_trans_start(netdev));
380 }
381
382
383 dev_info(&adapter->pdev->dev, "Register Dump\n");
384 pr_info(" Register Name Value\n");
385 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
386 reginfo->name; reginfo++) {
387 igb_regdump(hw, reginfo);
388 }
389
390
391 if (!netdev || !netif_running(netdev))
392 goto exit;
393
394 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
395 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
396 for (n = 0; n < adapter->num_tx_queues; n++) {
397 struct igb_tx_buffer *buffer_info;
398 tx_ring = adapter->tx_ring[n];
399 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
400 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
401 n, tx_ring->next_to_use, tx_ring->next_to_clean,
402 (u64)dma_unmap_addr(buffer_info, dma),
403 dma_unmap_len(buffer_info, len),
404 buffer_info->next_to_watch,
405 (u64)buffer_info->time_stamp);
406 }
407
408
409 if (!netif_msg_tx_done(adapter))
410 goto rx_ring_summary;
411
412 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
413
414
415
416
417
418
419
420
421
422
423
424
425 for (n = 0; n < adapter->num_tx_queues; n++) {
426 tx_ring = adapter->tx_ring[n];
427 pr_info("------------------------------------\n");
428 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
429 pr_info("------------------------------------\n");
430 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
431
432 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
433 const char *next_desc;
434 struct igb_tx_buffer *buffer_info;
435 tx_desc = IGB_TX_DESC(tx_ring, i);
436 buffer_info = &tx_ring->tx_buffer_info[i];
437 u0 = (struct my_u0 *)tx_desc;
438 if (i == tx_ring->next_to_use &&
439 i == tx_ring->next_to_clean)
440 next_desc = " NTC/U";
441 else if (i == tx_ring->next_to_use)
442 next_desc = " NTU";
443 else if (i == tx_ring->next_to_clean)
444 next_desc = " NTC";
445 else
446 next_desc = "";
447
448 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
449 i, le64_to_cpu(u0->a),
450 le64_to_cpu(u0->b),
451 (u64)dma_unmap_addr(buffer_info, dma),
452 dma_unmap_len(buffer_info, len),
453 buffer_info->next_to_watch,
454 (u64)buffer_info->time_stamp,
455 buffer_info->skb, next_desc);
456
457 if (netif_msg_pktdata(adapter) && buffer_info->skb)
458 print_hex_dump(KERN_INFO, "",
459 DUMP_PREFIX_ADDRESS,
460 16, 1, buffer_info->skb->data,
461 dma_unmap_len(buffer_info, len),
462 true);
463 }
464 }
465
466
467rx_ring_summary:
468 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
469 pr_info("Queue [NTU] [NTC]\n");
470 for (n = 0; n < adapter->num_rx_queues; n++) {
471 rx_ring = adapter->rx_ring[n];
472 pr_info(" %5d %5X %5X\n",
473 n, rx_ring->next_to_use, rx_ring->next_to_clean);
474 }
475
476
477 if (!netif_msg_rx_status(adapter))
478 goto exit;
479
480 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503 for (n = 0; n < adapter->num_rx_queues; n++) {
504 rx_ring = adapter->rx_ring[n];
505 pr_info("------------------------------------\n");
506 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
507 pr_info("------------------------------------\n");
508 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
509 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
510
511 for (i = 0; i < rx_ring->count; i++) {
512 const char *next_desc;
513 struct igb_rx_buffer *buffer_info;
514 buffer_info = &rx_ring->rx_buffer_info[i];
515 rx_desc = IGB_RX_DESC(rx_ring, i);
516 u0 = (struct my_u0 *)rx_desc;
517 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
518
519 if (i == rx_ring->next_to_use)
520 next_desc = " NTU";
521 else if (i == rx_ring->next_to_clean)
522 next_desc = " NTC";
523 else
524 next_desc = "";
525
526 if (staterr & E1000_RXD_STAT_DD) {
527
528 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
529 "RWB", i,
530 le64_to_cpu(u0->a),
531 le64_to_cpu(u0->b),
532 next_desc);
533 } else {
534 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
535 "R ", i,
536 le64_to_cpu(u0->a),
537 le64_to_cpu(u0->b),
538 (u64)buffer_info->dma,
539 next_desc);
540
541 if (netif_msg_pktdata(adapter) &&
542 buffer_info->dma && buffer_info->page) {
543 print_hex_dump(KERN_INFO, "",
544 DUMP_PREFIX_ADDRESS,
545 16, 1,
546 page_address(buffer_info->page) +
547 buffer_info->page_offset,
548 igb_rx_bufsz(rx_ring), true);
549 }
550 }
551 }
552 }
553
554exit:
555 return;
556}
557
558
559
560
561
562
563
564
565static int igb_get_i2c_data(void *data)
566{
567 struct igb_adapter *adapter = (struct igb_adapter *)data;
568 struct e1000_hw *hw = &adapter->hw;
569 s32 i2cctl = rd32(E1000_I2CPARAMS);
570
571 return !!(i2cctl & E1000_I2C_DATA_IN);
572}
573
574
575
576
577
578
579
580
581static void igb_set_i2c_data(void *data, int state)
582{
583 struct igb_adapter *adapter = (struct igb_adapter *)data;
584 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS);
586
587 if (state)
588 i2cctl |= E1000_I2C_DATA_OUT;
589 else
590 i2cctl &= ~E1000_I2C_DATA_OUT;
591
592 i2cctl &= ~E1000_I2C_DATA_OE_N;
593 i2cctl |= E1000_I2C_CLK_OE_N;
594 wr32(E1000_I2CPARAMS, i2cctl);
595 wrfl();
596
597}
598
599
600
601
602
603
604
605
606static void igb_set_i2c_clk(void *data, int state)
607{
608 struct igb_adapter *adapter = (struct igb_adapter *)data;
609 struct e1000_hw *hw = &adapter->hw;
610 s32 i2cctl = rd32(E1000_I2CPARAMS);
611
612 if (state) {
613 i2cctl |= E1000_I2C_CLK_OUT;
614 i2cctl &= ~E1000_I2C_CLK_OE_N;
615 } else {
616 i2cctl &= ~E1000_I2C_CLK_OUT;
617 i2cctl &= ~E1000_I2C_CLK_OE_N;
618 }
619 wr32(E1000_I2CPARAMS, i2cctl);
620 wrfl();
621}
622
623
624
625
626
627
628
629static int igb_get_i2c_clk(void *data)
630{
631 struct igb_adapter *adapter = (struct igb_adapter *)data;
632 struct e1000_hw *hw = &adapter->hw;
633 s32 i2cctl = rd32(E1000_I2CPARAMS);
634
635 return !!(i2cctl & E1000_I2C_CLK_IN);
636}
637
638static const struct i2c_algo_bit_data igb_i2c_algo = {
639 .setsda = igb_set_i2c_data,
640 .setscl = igb_set_i2c_clk,
641 .getsda = igb_get_i2c_data,
642 .getscl = igb_get_i2c_clk,
643 .udelay = 5,
644 .timeout = 20,
645};
646
647
648
649
650
651
652
653struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
654{
655 struct igb_adapter *adapter = hw->back;
656 return adapter->netdev;
657}
658
659
660
661
662
663
664
665static int __init igb_init_module(void)
666{
667 int ret;
668
669 pr_info("%s - version %s\n",
670 igb_driver_string, igb_driver_version);
671 pr_info("%s\n", igb_copyright);
672
673#ifdef CONFIG_IGB_DCA
674 dca_register_notify(&dca_notifier);
675#endif
676 ret = pci_register_driver(&igb_driver);
677 return ret;
678}
679
680module_init(igb_init_module);
681
682
683
684
685
686
687
688static void __exit igb_exit_module(void)
689{
690#ifdef CONFIG_IGB_DCA
691 dca_unregister_notify(&dca_notifier);
692#endif
693 pci_unregister_driver(&igb_driver);
694}
695
696module_exit(igb_exit_module);
697
698#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
699
700
701
702
703
704
705
706static void igb_cache_ring_register(struct igb_adapter *adapter)
707{
708 int i = 0, j = 0;
709 u32 rbase_offset = adapter->vfs_allocated_count;
710
711 switch (adapter->hw.mac.type) {
712 case e1000_82576:
713
714
715
716
717
718 if (adapter->vfs_allocated_count) {
719 for (; i < adapter->rss_queues; i++)
720 adapter->rx_ring[i]->reg_idx = rbase_offset +
721 Q_IDX_82576(i);
722 }
723
724 case e1000_82575:
725 case e1000_82580:
726 case e1000_i350:
727 case e1000_i354:
728 case e1000_i210:
729 case e1000_i211:
730
731 default:
732 for (; i < adapter->num_rx_queues; i++)
733 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
734 for (; j < adapter->num_tx_queues; j++)
735 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
736 break;
737 }
738}
739
740u32 igb_rd32(struct e1000_hw *hw, u32 reg)
741{
742 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
743 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
744 u32 value = 0;
745
746 if (E1000_REMOVED(hw_addr))
747 return ~value;
748
749 value = readl(&hw_addr[reg]);
750
751
752 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
753 struct net_device *netdev = igb->netdev;
754 hw->hw_addr = NULL;
755 netdev_err(netdev, "PCIe link lost\n");
756 }
757
758 return value;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772
773static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
774 int index, int offset)
775{
776 u32 ivar = array_rd32(E1000_IVAR0, index);
777
778
779 ivar &= ~((u32)0xFF << offset);
780
781
782 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
783
784 array_wr32(E1000_IVAR0, index, ivar);
785}
786
787#define IGB_N0_QUEUE -1
788static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
789{
790 struct igb_adapter *adapter = q_vector->adapter;
791 struct e1000_hw *hw = &adapter->hw;
792 int rx_queue = IGB_N0_QUEUE;
793 int tx_queue = IGB_N0_QUEUE;
794 u32 msixbm = 0;
795
796 if (q_vector->rx.ring)
797 rx_queue = q_vector->rx.ring->reg_idx;
798 if (q_vector->tx.ring)
799 tx_queue = q_vector->tx.ring->reg_idx;
800
801 switch (hw->mac.type) {
802 case e1000_82575:
803
804
805
806
807
808 if (rx_queue > IGB_N0_QUEUE)
809 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
810 if (tx_queue > IGB_N0_QUEUE)
811 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
812 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
813 msixbm |= E1000_EIMS_OTHER;
814 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
815 q_vector->eims_value = msixbm;
816 break;
817 case e1000_82576:
818
819
820
821
822
823 if (rx_queue > IGB_N0_QUEUE)
824 igb_write_ivar(hw, msix_vector,
825 rx_queue & 0x7,
826 (rx_queue & 0x8) << 1);
827 if (tx_queue > IGB_N0_QUEUE)
828 igb_write_ivar(hw, msix_vector,
829 tx_queue & 0x7,
830 ((tx_queue & 0x8) << 1) + 8);
831 q_vector->eims_value = BIT(msix_vector);
832 break;
833 case e1000_82580:
834 case e1000_i350:
835 case e1000_i354:
836 case e1000_i210:
837 case e1000_i211:
838
839
840
841
842
843
844 if (rx_queue > IGB_N0_QUEUE)
845 igb_write_ivar(hw, msix_vector,
846 rx_queue >> 1,
847 (rx_queue & 0x1) << 4);
848 if (tx_queue > IGB_N0_QUEUE)
849 igb_write_ivar(hw, msix_vector,
850 tx_queue >> 1,
851 ((tx_queue & 0x1) << 4) + 8);
852 q_vector->eims_value = BIT(msix_vector);
853 break;
854 default:
855 BUG();
856 break;
857 }
858
859
860 adapter->eims_enable_mask |= q_vector->eims_value;
861
862
863 q_vector->set_itr = 1;
864}
865
866
867
868
869
870
871
872
873static void igb_configure_msix(struct igb_adapter *adapter)
874{
875 u32 tmp;
876 int i, vector = 0;
877 struct e1000_hw *hw = &adapter->hw;
878
879 adapter->eims_enable_mask = 0;
880
881
882 switch (hw->mac.type) {
883 case e1000_82575:
884 tmp = rd32(E1000_CTRL_EXT);
885
886 tmp |= E1000_CTRL_EXT_PBA_CLR;
887
888
889 tmp |= E1000_CTRL_EXT_EIAME;
890 tmp |= E1000_CTRL_EXT_IRCA;
891
892 wr32(E1000_CTRL_EXT, tmp);
893
894
895 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
896 adapter->eims_other = E1000_EIMS_OTHER;
897
898 break;
899
900 case e1000_82576:
901 case e1000_82580:
902 case e1000_i350:
903 case e1000_i354:
904 case e1000_i210:
905 case e1000_i211:
906
907
908
909 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
910 E1000_GPIE_PBA | E1000_GPIE_EIAME |
911 E1000_GPIE_NSICR);
912
913
914 adapter->eims_other = BIT(vector);
915 tmp = (vector++ | E1000_IVAR_VALID) << 8;
916
917 wr32(E1000_IVAR_MISC, tmp);
918 break;
919 default:
920
921 break;
922 }
923
924 adapter->eims_enable_mask |= adapter->eims_other;
925
926 for (i = 0; i < adapter->num_q_vectors; i++)
927 igb_assign_vector(adapter->q_vector[i], vector++);
928
929 wrfl();
930}
931
932
933
934
935
936
937
938
939static int igb_request_msix(struct igb_adapter *adapter)
940{
941 struct net_device *netdev = adapter->netdev;
942 int i, err = 0, vector = 0, free_vector = 0;
943
944 err = request_irq(adapter->msix_entries[vector].vector,
945 igb_msix_other, 0, netdev->name, adapter);
946 if (err)
947 goto err_out;
948
949 for (i = 0; i < adapter->num_q_vectors; i++) {
950 struct igb_q_vector *q_vector = adapter->q_vector[i];
951
952 vector++;
953
954 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
955
956 if (q_vector->rx.ring && q_vector->tx.ring)
957 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
958 q_vector->rx.ring->queue_index);
959 else if (q_vector->tx.ring)
960 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
961 q_vector->tx.ring->queue_index);
962 else if (q_vector->rx.ring)
963 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
964 q_vector->rx.ring->queue_index);
965 else
966 sprintf(q_vector->name, "%s-unused", netdev->name);
967
968 err = request_irq(adapter->msix_entries[vector].vector,
969 igb_msix_ring, 0, q_vector->name,
970 q_vector);
971 if (err)
972 goto err_free;
973 }
974
975 igb_configure_msix(adapter);
976 return 0;
977
978err_free:
979
980 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
981
982 vector--;
983 for (i = 0; i < vector; i++) {
984 free_irq(adapter->msix_entries[free_vector++].vector,
985 adapter->q_vector[i]);
986 }
987err_out:
988 return err;
989}
990
991
992
993
994
995
996
997
998static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
999{
1000 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1001
1002 adapter->q_vector[v_idx] = NULL;
1003
1004
1005
1006
1007 if (q_vector)
1008 kfree_rcu(q_vector, rcu);
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1020{
1021 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1022
1023
1024
1025
1026 if (!q_vector)
1027 return;
1028
1029 if (q_vector->tx.ring)
1030 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1031
1032 if (q_vector->rx.ring)
1033 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1034
1035 netif_napi_del(&q_vector->napi);
1036
1037}
1038
1039static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1040{
1041 int v_idx = adapter->num_q_vectors;
1042
1043 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1044 pci_disable_msix(adapter->pdev);
1045 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1046 pci_disable_msi(adapter->pdev);
1047
1048 while (v_idx--)
1049 igb_reset_q_vector(adapter, v_idx);
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static void igb_free_q_vectors(struct igb_adapter *adapter)
1061{
1062 int v_idx = adapter->num_q_vectors;
1063
1064 adapter->num_tx_queues = 0;
1065 adapter->num_rx_queues = 0;
1066 adapter->num_q_vectors = 0;
1067
1068 while (v_idx--) {
1069 igb_reset_q_vector(adapter, v_idx);
1070 igb_free_q_vector(adapter, v_idx);
1071 }
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1082{
1083 igb_free_q_vectors(adapter);
1084 igb_reset_interrupt_capability(adapter);
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1096{
1097 int err;
1098 int numvecs, i;
1099
1100 if (!msix)
1101 goto msi_only;
1102 adapter->flags |= IGB_FLAG_HAS_MSIX;
1103
1104
1105 adapter->num_rx_queues = adapter->rss_queues;
1106 if (adapter->vfs_allocated_count)
1107 adapter->num_tx_queues = 1;
1108 else
1109 adapter->num_tx_queues = adapter->rss_queues;
1110
1111
1112 numvecs = adapter->num_rx_queues;
1113
1114
1115 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1116 numvecs += adapter->num_tx_queues;
1117
1118
1119 adapter->num_q_vectors = numvecs;
1120
1121
1122 numvecs++;
1123 for (i = 0; i < numvecs; i++)
1124 adapter->msix_entries[i].entry = i;
1125
1126 err = pci_enable_msix_range(adapter->pdev,
1127 adapter->msix_entries,
1128 numvecs,
1129 numvecs);
1130 if (err > 0)
1131 return;
1132
1133 igb_reset_interrupt_capability(adapter);
1134
1135
1136msi_only:
1137 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1138#ifdef CONFIG_PCI_IOV
1139
1140 if (adapter->vf_data) {
1141 struct e1000_hw *hw = &adapter->hw;
1142
1143 pci_disable_sriov(adapter->pdev);
1144 msleep(500);
1145
1146 kfree(adapter->vf_mac_list);
1147 adapter->vf_mac_list = NULL;
1148 kfree(adapter->vf_data);
1149 adapter->vf_data = NULL;
1150 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1151 wrfl();
1152 msleep(100);
1153 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1154 }
1155#endif
1156 adapter->vfs_allocated_count = 0;
1157 adapter->rss_queues = 1;
1158 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1159 adapter->num_rx_queues = 1;
1160 adapter->num_tx_queues = 1;
1161 adapter->num_q_vectors = 1;
1162 if (!pci_enable_msi(adapter->pdev))
1163 adapter->flags |= IGB_FLAG_HAS_MSI;
1164}
1165
1166static void igb_add_ring(struct igb_ring *ring,
1167 struct igb_ring_container *head)
1168{
1169 head->ring = ring;
1170 head->count++;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static int igb_alloc_q_vector(struct igb_adapter *adapter,
1186 int v_count, int v_idx,
1187 int txr_count, int txr_idx,
1188 int rxr_count, int rxr_idx)
1189{
1190 struct igb_q_vector *q_vector;
1191 struct igb_ring *ring;
1192 int ring_count;
1193 size_t size;
1194
1195
1196 if (txr_count > 1 || rxr_count > 1)
1197 return -ENOMEM;
1198
1199 ring_count = txr_count + rxr_count;
1200 size = struct_size(q_vector, ring, ring_count);
1201
1202
1203 q_vector = adapter->q_vector[v_idx];
1204 if (!q_vector) {
1205 q_vector = kzalloc(size, GFP_KERNEL);
1206 } else if (size > ksize(q_vector)) {
1207 kfree_rcu(q_vector, rcu);
1208 q_vector = kzalloc(size, GFP_KERNEL);
1209 } else {
1210 memset(q_vector, 0, size);
1211 }
1212 if (!q_vector)
1213 return -ENOMEM;
1214
1215
1216 netif_napi_add(adapter->netdev, &q_vector->napi,
1217 igb_poll, 64);
1218
1219
1220 adapter->q_vector[v_idx] = q_vector;
1221 q_vector->adapter = adapter;
1222
1223
1224 q_vector->tx.work_limit = adapter->tx_work_limit;
1225
1226
1227 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1228 q_vector->itr_val = IGB_START_ITR;
1229
1230
1231 ring = q_vector->ring;
1232
1233
1234 if (rxr_count) {
1235
1236 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1237 q_vector->itr_val = adapter->rx_itr_setting;
1238 } else {
1239
1240 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1241 q_vector->itr_val = adapter->tx_itr_setting;
1242 }
1243
1244 if (txr_count) {
1245
1246 ring->dev = &adapter->pdev->dev;
1247 ring->netdev = adapter->netdev;
1248
1249
1250 ring->q_vector = q_vector;
1251
1252
1253 igb_add_ring(ring, &q_vector->tx);
1254
1255
1256 if (adapter->hw.mac.type == e1000_82575)
1257 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1258
1259
1260 ring->count = adapter->tx_ring_count;
1261 ring->queue_index = txr_idx;
1262
1263 ring->cbs_enable = false;
1264 ring->idleslope = 0;
1265 ring->sendslope = 0;
1266 ring->hicredit = 0;
1267 ring->locredit = 0;
1268
1269 u64_stats_init(&ring->tx_syncp);
1270 u64_stats_init(&ring->tx_syncp2);
1271
1272
1273 adapter->tx_ring[txr_idx] = ring;
1274
1275
1276 ring++;
1277 }
1278
1279 if (rxr_count) {
1280
1281 ring->dev = &adapter->pdev->dev;
1282 ring->netdev = adapter->netdev;
1283
1284
1285 ring->q_vector = q_vector;
1286
1287
1288 igb_add_ring(ring, &q_vector->rx);
1289
1290
1291 if (adapter->hw.mac.type >= e1000_82576)
1292 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1293
1294
1295
1296
1297 if (adapter->hw.mac.type >= e1000_i350)
1298 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1299
1300
1301 ring->count = adapter->rx_ring_count;
1302 ring->queue_index = rxr_idx;
1303
1304 u64_stats_init(&ring->rx_syncp);
1305
1306
1307 adapter->rx_ring[rxr_idx] = ring;
1308 }
1309
1310 return 0;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1322{
1323 int q_vectors = adapter->num_q_vectors;
1324 int rxr_remaining = adapter->num_rx_queues;
1325 int txr_remaining = adapter->num_tx_queues;
1326 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1327 int err;
1328
1329 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1330 for (; rxr_remaining; v_idx++) {
1331 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1332 0, 0, 1, rxr_idx);
1333
1334 if (err)
1335 goto err_out;
1336
1337
1338 rxr_remaining--;
1339 rxr_idx++;
1340 }
1341 }
1342
1343 for (; v_idx < q_vectors; v_idx++) {
1344 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1345 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1346
1347 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1348 tqpv, txr_idx, rqpv, rxr_idx);
1349
1350 if (err)
1351 goto err_out;
1352
1353
1354 rxr_remaining -= rqpv;
1355 txr_remaining -= tqpv;
1356 rxr_idx++;
1357 txr_idx++;
1358 }
1359
1360 return 0;
1361
1362err_out:
1363 adapter->num_tx_queues = 0;
1364 adapter->num_rx_queues = 0;
1365 adapter->num_q_vectors = 0;
1366
1367 while (v_idx--)
1368 igb_free_q_vector(adapter, v_idx);
1369
1370 return -ENOMEM;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1381{
1382 struct pci_dev *pdev = adapter->pdev;
1383 int err;
1384
1385 igb_set_interrupt_capability(adapter, msix);
1386
1387 err = igb_alloc_q_vectors(adapter);
1388 if (err) {
1389 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1390 goto err_alloc_q_vectors;
1391 }
1392
1393 igb_cache_ring_register(adapter);
1394
1395 return 0;
1396
1397err_alloc_q_vectors:
1398 igb_reset_interrupt_capability(adapter);
1399 return err;
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409static int igb_request_irq(struct igb_adapter *adapter)
1410{
1411 struct net_device *netdev = adapter->netdev;
1412 struct pci_dev *pdev = adapter->pdev;
1413 int err = 0;
1414
1415 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1416 err = igb_request_msix(adapter);
1417 if (!err)
1418 goto request_done;
1419
1420 igb_free_all_tx_resources(adapter);
1421 igb_free_all_rx_resources(adapter);
1422
1423 igb_clear_interrupt_scheme(adapter);
1424 err = igb_init_interrupt_scheme(adapter, false);
1425 if (err)
1426 goto request_done;
1427
1428 igb_setup_all_tx_resources(adapter);
1429 igb_setup_all_rx_resources(adapter);
1430 igb_configure(adapter);
1431 }
1432
1433 igb_assign_vector(adapter->q_vector[0], 0);
1434
1435 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1436 err = request_irq(pdev->irq, igb_intr_msi, 0,
1437 netdev->name, adapter);
1438 if (!err)
1439 goto request_done;
1440
1441
1442 igb_reset_interrupt_capability(adapter);
1443 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1444 }
1445
1446 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1447 netdev->name, adapter);
1448
1449 if (err)
1450 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1451 err);
1452
1453request_done:
1454 return err;
1455}
1456
1457static void igb_free_irq(struct igb_adapter *adapter)
1458{
1459 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1460 int vector = 0, i;
1461
1462 free_irq(adapter->msix_entries[vector++].vector, adapter);
1463
1464 for (i = 0; i < adapter->num_q_vectors; i++)
1465 free_irq(adapter->msix_entries[vector++].vector,
1466 adapter->q_vector[i]);
1467 } else {
1468 free_irq(adapter->pdev->irq, adapter);
1469 }
1470}
1471
1472
1473
1474
1475
1476static void igb_irq_disable(struct igb_adapter *adapter)
1477{
1478 struct e1000_hw *hw = &adapter->hw;
1479
1480
1481
1482
1483
1484 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1485 u32 regval = rd32(E1000_EIAM);
1486
1487 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1488 wr32(E1000_EIMC, adapter->eims_enable_mask);
1489 regval = rd32(E1000_EIAC);
1490 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1491 }
1492
1493 wr32(E1000_IAM, 0);
1494 wr32(E1000_IMC, ~0);
1495 wrfl();
1496 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1497 int i;
1498
1499 for (i = 0; i < adapter->num_q_vectors; i++)
1500 synchronize_irq(adapter->msix_entries[i].vector);
1501 } else {
1502 synchronize_irq(adapter->pdev->irq);
1503 }
1504}
1505
1506
1507
1508
1509
1510static void igb_irq_enable(struct igb_adapter *adapter)
1511{
1512 struct e1000_hw *hw = &adapter->hw;
1513
1514 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1515 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1516 u32 regval = rd32(E1000_EIAC);
1517
1518 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1519 regval = rd32(E1000_EIAM);
1520 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1521 wr32(E1000_EIMS, adapter->eims_enable_mask);
1522 if (adapter->vfs_allocated_count) {
1523 wr32(E1000_MBVFIMR, 0xFF);
1524 ims |= E1000_IMS_VMMB;
1525 }
1526 wr32(E1000_IMS, ims);
1527 } else {
1528 wr32(E1000_IMS, IMS_ENABLE_MASK |
1529 E1000_IMS_DRSTA);
1530 wr32(E1000_IAM, IMS_ENABLE_MASK |
1531 E1000_IMS_DRSTA);
1532 }
1533}
1534
1535static void igb_update_mng_vlan(struct igb_adapter *adapter)
1536{
1537 struct e1000_hw *hw = &adapter->hw;
1538 u16 pf_id = adapter->vfs_allocated_count;
1539 u16 vid = adapter->hw.mng_cookie.vlan_id;
1540 u16 old_vid = adapter->mng_vlan_id;
1541
1542 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1543
1544 igb_vfta_set(hw, vid, pf_id, true, true);
1545 adapter->mng_vlan_id = vid;
1546 } else {
1547 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1548 }
1549
1550 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1551 (vid != old_vid) &&
1552 !test_bit(old_vid, adapter->active_vlans)) {
1553
1554 igb_vfta_set(hw, vid, pf_id, false, true);
1555 }
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566static void igb_release_hw_control(struct igb_adapter *adapter)
1567{
1568 struct e1000_hw *hw = &adapter->hw;
1569 u32 ctrl_ext;
1570
1571
1572 ctrl_ext = rd32(E1000_CTRL_EXT);
1573 wr32(E1000_CTRL_EXT,
1574 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585static void igb_get_hw_control(struct igb_adapter *adapter)
1586{
1587 struct e1000_hw *hw = &adapter->hw;
1588 u32 ctrl_ext;
1589
1590
1591 ctrl_ext = rd32(E1000_CTRL_EXT);
1592 wr32(E1000_CTRL_EXT,
1593 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1594}
1595
1596static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1597{
1598 struct net_device *netdev = adapter->netdev;
1599 struct e1000_hw *hw = &adapter->hw;
1600
1601 WARN_ON(hw->mac.type != e1000_i210);
1602
1603 if (enable)
1604 adapter->flags |= IGB_FLAG_FQTSS;
1605 else
1606 adapter->flags &= ~IGB_FLAG_FQTSS;
1607
1608 if (netif_running(netdev))
1609 schedule_work(&adapter->reset_task);
1610}
1611
1612static bool is_fqtss_enabled(struct igb_adapter *adapter)
1613{
1614 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1615}
1616
1617static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1618 enum tx_queue_prio prio)
1619{
1620 u32 val;
1621
1622 WARN_ON(hw->mac.type != e1000_i210);
1623 WARN_ON(queue < 0 || queue > 4);
1624
1625 val = rd32(E1000_I210_TXDCTL(queue));
1626
1627 if (prio == TX_QUEUE_PRIO_HIGH)
1628 val |= E1000_TXDCTL_PRIORITY;
1629 else
1630 val &= ~E1000_TXDCTL_PRIORITY;
1631
1632 wr32(E1000_I210_TXDCTL(queue), val);
1633}
1634
1635static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1636{
1637 u32 val;
1638
1639 WARN_ON(hw->mac.type != e1000_i210);
1640 WARN_ON(queue < 0 || queue > 1);
1641
1642 val = rd32(E1000_I210_TQAVCC(queue));
1643
1644 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1645 val |= E1000_TQAVCC_QUEUEMODE;
1646 else
1647 val &= ~E1000_TQAVCC_QUEUEMODE;
1648
1649 wr32(E1000_I210_TQAVCC(queue), val);
1650}
1651
1652static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1653{
1654 int i;
1655
1656 for (i = 0; i < adapter->num_tx_queues; i++) {
1657 if (adapter->tx_ring[i]->cbs_enable)
1658 return true;
1659 }
1660
1661 return false;
1662}
1663
1664static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1665{
1666 int i;
1667
1668 for (i = 0; i < adapter->num_tx_queues; i++) {
1669 if (adapter->tx_ring[i]->launchtime_enable)
1670 return true;
1671 }
1672
1673 return false;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1687{
1688 struct igb_ring *ring = adapter->tx_ring[queue];
1689 struct net_device *netdev = adapter->netdev;
1690 struct e1000_hw *hw = &adapter->hw;
1691 u32 tqavcc, tqavctrl;
1692 u16 value;
1693
1694 WARN_ON(hw->mac.type != e1000_i210);
1695 WARN_ON(queue < 0 || queue > 1);
1696
1697
1698
1699
1700
1701 if (ring->cbs_enable || ring->launchtime_enable) {
1702 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1703 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1704 } else {
1705 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1706 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1707 }
1708
1709
1710 if (ring->cbs_enable || queue == 0) {
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 if (queue == 0 && !ring->cbs_enable) {
1721
1722 ring->idleslope = 1000000;
1723 ring->hicredit = ETH_FRAME_LEN;
1724 }
1725
1726
1727
1728
1729
1730 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1731 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1732 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1792
1793 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1794 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1795 tqavcc |= value;
1796 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1797
1798 wr32(E1000_I210_TQAVHC(queue),
1799 0x80000000 + ring->hicredit * 0x7735);
1800 } else {
1801
1802
1803 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1804 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1805 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1806
1807
1808 wr32(E1000_I210_TQAVHC(queue), 0);
1809
1810
1811
1812
1813
1814 if (!is_any_cbs_enabled(adapter)) {
1815 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1816 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1817 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1818 }
1819 }
1820
1821
1822 if (ring->launchtime_enable) {
1823
1824
1825
1826
1827
1828
1829
1830
1831 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1832 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1833 E1000_TQAVCTRL_FETCHTIME_DELTA;
1834 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1835 } else {
1836
1837
1838
1839
1840 if (!is_any_txtime_enabled(adapter)) {
1841 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1842 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1843 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1844 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1845 }
1846 }
1847
1848
1849
1850
1851
1852
1853 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1854 ring->cbs_enable ? "enabled" : "disabled",
1855 ring->launchtime_enable ? "enabled" : "disabled",
1856 queue,
1857 ring->idleslope, ring->sendslope,
1858 ring->hicredit, ring->locredit);
1859}
1860
1861static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1862 bool enable)
1863{
1864 struct igb_ring *ring;
1865
1866 if (queue < 0 || queue > adapter->num_tx_queues)
1867 return -EINVAL;
1868
1869 ring = adapter->tx_ring[queue];
1870 ring->launchtime_enable = enable;
1871
1872 return 0;
1873}
1874
1875static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1876 bool enable, int idleslope, int sendslope,
1877 int hicredit, int locredit)
1878{
1879 struct igb_ring *ring;
1880
1881 if (queue < 0 || queue > adapter->num_tx_queues)
1882 return -EINVAL;
1883
1884 ring = adapter->tx_ring[queue];
1885
1886 ring->cbs_enable = enable;
1887 ring->idleslope = idleslope;
1888 ring->sendslope = sendslope;
1889 ring->hicredit = hicredit;
1890 ring->locredit = locredit;
1891
1892 return 0;
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904static void igb_setup_tx_mode(struct igb_adapter *adapter)
1905{
1906 struct net_device *netdev = adapter->netdev;
1907 struct e1000_hw *hw = &adapter->hw;
1908 u32 val;
1909
1910
1911 if (hw->mac.type != e1000_i210)
1912 return;
1913
1914 if (is_fqtss_enabled(adapter)) {
1915 int i, max_queue;
1916
1917
1918
1919
1920
1921 val = rd32(E1000_I210_TQAVCTRL);
1922 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1923 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1924 wr32(E1000_I210_TQAVCTRL, val);
1925
1926
1927
1928
1929 val = rd32(E1000_TXPBS);
1930 val &= ~I210_TXPBSIZE_MASK;
1931 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1932 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1933 wr32(E1000_TXPBS, val);
1934
1935 val = rd32(E1000_RXPBS);
1936 val &= ~I210_RXPBSIZE_MASK;
1937 val |= I210_RXPBSIZE_PB_30KB;
1938 wr32(E1000_RXPBS, val);
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951 val = (4096 - 1) / 64;
1952 wr32(E1000_I210_DTXMXPKTSZ, val);
1953
1954
1955
1956
1957
1958
1959 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1960 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1961
1962 for (i = 0; i < max_queue; i++) {
1963 igb_config_tx_modes(adapter, i);
1964 }
1965 } else {
1966 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1967 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1968 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1969
1970 val = rd32(E1000_I210_TQAVCTRL);
1971
1972
1973
1974
1975 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1976 wr32(E1000_I210_TQAVCTRL, val);
1977 }
1978
1979 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1980 "enabled" : "disabled");
1981}
1982
1983
1984
1985
1986
1987static void igb_configure(struct igb_adapter *adapter)
1988{
1989 struct net_device *netdev = adapter->netdev;
1990 int i;
1991
1992 igb_get_hw_control(adapter);
1993 igb_set_rx_mode(netdev);
1994 igb_setup_tx_mode(adapter);
1995
1996 igb_restore_vlan(adapter);
1997
1998 igb_setup_tctl(adapter);
1999 igb_setup_mrqc(adapter);
2000 igb_setup_rctl(adapter);
2001
2002 igb_nfc_filter_restore(adapter);
2003 igb_configure_tx(adapter);
2004 igb_configure_rx(adapter);
2005
2006 igb_rx_fifo_flush_82575(&adapter->hw);
2007
2008
2009
2010
2011
2012 for (i = 0; i < adapter->num_rx_queues; i++) {
2013 struct igb_ring *ring = adapter->rx_ring[i];
2014 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2015 }
2016}
2017
2018
2019
2020
2021
2022void igb_power_up_link(struct igb_adapter *adapter)
2023{
2024 igb_reset_phy(&adapter->hw);
2025
2026 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2027 igb_power_up_phy_copper(&adapter->hw);
2028 else
2029 igb_power_up_serdes_link_82575(&adapter->hw);
2030
2031 igb_setup_link(&adapter->hw);
2032}
2033
2034
2035
2036
2037
2038static void igb_power_down_link(struct igb_adapter *adapter)
2039{
2040 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2041 igb_power_down_phy_copper_82575(&adapter->hw);
2042 else
2043 igb_shutdown_serdes_link_82575(&adapter->hw);
2044}
2045
2046
2047
2048
2049
2050static void igb_check_swap_media(struct igb_adapter *adapter)
2051{
2052 struct e1000_hw *hw = &adapter->hw;
2053 u32 ctrl_ext, connsw;
2054 bool swap_now = false;
2055
2056 ctrl_ext = rd32(E1000_CTRL_EXT);
2057 connsw = rd32(E1000_CONNSW);
2058
2059
2060
2061
2062
2063 if ((hw->phy.media_type == e1000_media_type_copper) &&
2064 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2065 swap_now = true;
2066 } else if (!(connsw & E1000_CONNSW_SERDESD)) {
2067
2068 if (adapter->copper_tries < 4) {
2069 adapter->copper_tries++;
2070 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2071 wr32(E1000_CONNSW, connsw);
2072 return;
2073 } else {
2074 adapter->copper_tries = 0;
2075 if ((connsw & E1000_CONNSW_PHYSD) &&
2076 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2077 swap_now = true;
2078 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2079 wr32(E1000_CONNSW, connsw);
2080 }
2081 }
2082 }
2083
2084 if (!swap_now)
2085 return;
2086
2087 switch (hw->phy.media_type) {
2088 case e1000_media_type_copper:
2089 netdev_info(adapter->netdev,
2090 "MAS: changing media to fiber/serdes\n");
2091 ctrl_ext |=
2092 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2093 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2094 adapter->copper_tries = 0;
2095 break;
2096 case e1000_media_type_internal_serdes:
2097 case e1000_media_type_fiber:
2098 netdev_info(adapter->netdev,
2099 "MAS: changing media to copper\n");
2100 ctrl_ext &=
2101 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2102 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2103 break;
2104 default:
2105
2106 netdev_err(adapter->netdev,
2107 "AMS: Invalid media type found, returning\n");
2108 break;
2109 }
2110 wr32(E1000_CTRL_EXT, ctrl_ext);
2111}
2112
2113
2114
2115
2116
2117int igb_up(struct igb_adapter *adapter)
2118{
2119 struct e1000_hw *hw = &adapter->hw;
2120 int i;
2121
2122
2123 igb_configure(adapter);
2124
2125 clear_bit(__IGB_DOWN, &adapter->state);
2126
2127 for (i = 0; i < adapter->num_q_vectors; i++)
2128 napi_enable(&(adapter->q_vector[i]->napi));
2129
2130 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2131 igb_configure_msix(adapter);
2132 else
2133 igb_assign_vector(adapter->q_vector[0], 0);
2134
2135
2136 rd32(E1000_TSICR);
2137 rd32(E1000_ICR);
2138 igb_irq_enable(adapter);
2139
2140
2141 if (adapter->vfs_allocated_count) {
2142 u32 reg_data = rd32(E1000_CTRL_EXT);
2143
2144 reg_data |= E1000_CTRL_EXT_PFRSTD;
2145 wr32(E1000_CTRL_EXT, reg_data);
2146 }
2147
2148 netif_tx_start_all_queues(adapter->netdev);
2149
2150
2151 hw->mac.get_link_status = 1;
2152 schedule_work(&adapter->watchdog_task);
2153
2154 if ((adapter->flags & IGB_FLAG_EEE) &&
2155 (!hw->dev_spec._82575.eee_disable))
2156 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2157
2158 return 0;
2159}
2160
2161void igb_down(struct igb_adapter *adapter)
2162{
2163 struct net_device *netdev = adapter->netdev;
2164 struct e1000_hw *hw = &adapter->hw;
2165 u32 tctl, rctl;
2166 int i;
2167
2168
2169
2170
2171 set_bit(__IGB_DOWN, &adapter->state);
2172
2173
2174 rctl = rd32(E1000_RCTL);
2175 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2176
2177
2178 igb_nfc_filter_exit(adapter);
2179
2180 netif_carrier_off(netdev);
2181 netif_tx_stop_all_queues(netdev);
2182
2183
2184 tctl = rd32(E1000_TCTL);
2185 tctl &= ~E1000_TCTL_EN;
2186 wr32(E1000_TCTL, tctl);
2187
2188 wrfl();
2189 usleep_range(10000, 11000);
2190
2191 igb_irq_disable(adapter);
2192
2193 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2194
2195 for (i = 0; i < adapter->num_q_vectors; i++) {
2196 if (adapter->q_vector[i]) {
2197 napi_synchronize(&adapter->q_vector[i]->napi);
2198 napi_disable(&adapter->q_vector[i]->napi);
2199 }
2200 }
2201
2202 del_timer_sync(&adapter->watchdog_timer);
2203 del_timer_sync(&adapter->phy_info_timer);
2204
2205
2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter);
2208 spin_unlock(&adapter->stats64_lock);
2209
2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0;
2212
2213 if (!pci_channel_offline(adapter->pdev))
2214 igb_reset(adapter);
2215
2216
2217 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2218
2219 igb_clean_all_tx_rings(adapter);
2220 igb_clean_all_rx_rings(adapter);
2221#ifdef CONFIG_IGB_DCA
2222
2223
2224 igb_setup_dca(adapter);
2225#endif
2226}
2227
2228void igb_reinit_locked(struct igb_adapter *adapter)
2229{
2230 WARN_ON(in_interrupt());
2231 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2232 usleep_range(1000, 2000);
2233 igb_down(adapter);
2234 igb_up(adapter);
2235 clear_bit(__IGB_RESETTING, &adapter->state);
2236}
2237
2238
2239
2240
2241
2242static void igb_enable_mas(struct igb_adapter *adapter)
2243{
2244 struct e1000_hw *hw = &adapter->hw;
2245 u32 connsw = rd32(E1000_CONNSW);
2246
2247
2248 if ((hw->phy.media_type == e1000_media_type_copper) &&
2249 (!(connsw & E1000_CONNSW_SERDESD))) {
2250 connsw |= E1000_CONNSW_ENRGSRC;
2251 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2252 wr32(E1000_CONNSW, connsw);
2253 wrfl();
2254 }
2255}
2256
2257void igb_reset(struct igb_adapter *adapter)
2258{
2259 struct pci_dev *pdev = adapter->pdev;
2260 struct e1000_hw *hw = &adapter->hw;
2261 struct e1000_mac_info *mac = &hw->mac;
2262 struct e1000_fc_info *fc = &hw->fc;
2263 u32 pba, hwm;
2264
2265
2266
2267
2268 switch (mac->type) {
2269 case e1000_i350:
2270 case e1000_i354:
2271 case e1000_82580:
2272 pba = rd32(E1000_RXPBS);
2273 pba = igb_rxpbs_adjust_82580(pba);
2274 break;
2275 case e1000_82576:
2276 pba = rd32(E1000_RXPBS);
2277 pba &= E1000_RXPBS_SIZE_MASK_82576;
2278 break;
2279 case e1000_82575:
2280 case e1000_i210:
2281 case e1000_i211:
2282 default:
2283 pba = E1000_PBA_34K;
2284 break;
2285 }
2286
2287 if (mac->type == e1000_82575) {
2288 u32 min_rx_space, min_tx_space, needed_tx_space;
2289
2290
2291 wr32(E1000_PBA, pba);
2292
2293
2294
2295
2296
2297
2298
2299
2300 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2301
2302
2303
2304
2305
2306
2307 min_tx_space = adapter->max_frame_size;
2308 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2309 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2310
2311
2312 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2313
2314
2315
2316
2317
2318 if (needed_tx_space < pba) {
2319 pba -= needed_tx_space;
2320
2321
2322
2323
2324 if (pba < min_rx_space)
2325 pba = min_rx_space;
2326 }
2327
2328
2329 wr32(E1000_PBA, pba);
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2341
2342 fc->high_water = hwm & 0xFFFFFFF0;
2343 fc->low_water = fc->high_water - 16;
2344 fc->pause_time = 0xFFFF;
2345 fc->send_xon = 1;
2346 fc->current_mode = fc->requested_mode;
2347
2348
2349 if (adapter->vfs_allocated_count) {
2350 int i;
2351
2352 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2353 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2354
2355
2356 igb_ping_all_vfs(adapter);
2357
2358
2359 wr32(E1000_VFRE, 0);
2360 wr32(E1000_VFTE, 0);
2361 }
2362
2363
2364 hw->mac.ops.reset_hw(hw);
2365 wr32(E1000_WUC, 0);
2366
2367 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2368
2369 adapter->ei.get_invariants(hw);
2370 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2371 }
2372 if ((mac->type == e1000_82575) &&
2373 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2374 igb_enable_mas(adapter);
2375 }
2376 if (hw->mac.ops.init_hw(hw))
2377 dev_err(&pdev->dev, "Hardware Error\n");
2378
2379
2380 igb_flush_mac_table(adapter);
2381 __dev_uc_unsync(adapter->netdev, NULL);
2382
2383
2384 igb_set_default_mac_filter(adapter);
2385
2386
2387
2388
2389 if (!hw->mac.autoneg)
2390 igb_force_mac_fc(hw);
2391
2392 igb_init_dmac(adapter, pba);
2393#ifdef CONFIG_IGB_HWMON
2394
2395 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2396 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2397
2398
2399
2400 if (adapter->ets)
2401 mac->ops.init_thermal_sensor_thresh(hw);
2402 }
2403 }
2404#endif
2405
2406 if (hw->phy.media_type == e1000_media_type_copper) {
2407 switch (mac->type) {
2408 case e1000_i350:
2409 case e1000_i210:
2410 case e1000_i211:
2411 igb_set_eee_i350(hw, true, true);
2412 break;
2413 case e1000_i354:
2414 igb_set_eee_i354(hw, true, true);
2415 break;
2416 default:
2417 break;
2418 }
2419 }
2420 if (!netif_running(adapter->netdev))
2421 igb_power_down_link(adapter);
2422
2423 igb_update_mng_vlan(adapter);
2424
2425
2426 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2427
2428
2429 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2430 igb_ptp_reset(adapter);
2431
2432 igb_get_phy_info(hw);
2433}
2434
2435static netdev_features_t igb_fix_features(struct net_device *netdev,
2436 netdev_features_t features)
2437{
2438
2439
2440
2441 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2442 features |= NETIF_F_HW_VLAN_CTAG_TX;
2443 else
2444 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2445
2446 return features;
2447}
2448
2449static int igb_set_features(struct net_device *netdev,
2450 netdev_features_t features)
2451{
2452 netdev_features_t changed = netdev->features ^ features;
2453 struct igb_adapter *adapter = netdev_priv(netdev);
2454
2455 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2456 igb_vlan_mode(netdev, features);
2457
2458 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2459 return 0;
2460
2461 if (!(features & NETIF_F_NTUPLE)) {
2462 struct hlist_node *node2;
2463 struct igb_nfc_filter *rule;
2464
2465 spin_lock(&adapter->nfc_lock);
2466 hlist_for_each_entry_safe(rule, node2,
2467 &adapter->nfc_filter_list, nfc_node) {
2468 igb_erase_filter(adapter, rule);
2469 hlist_del(&rule->nfc_node);
2470 kfree(rule);
2471 }
2472 spin_unlock(&adapter->nfc_lock);
2473 adapter->nfc_filter_count = 0;
2474 }
2475
2476 netdev->features = features;
2477
2478 if (netif_running(netdev))
2479 igb_reinit_locked(adapter);
2480 else
2481 igb_reset(adapter);
2482
2483 return 1;
2484}
2485
2486static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2487 struct net_device *dev,
2488 const unsigned char *addr, u16 vid,
2489 u16 flags,
2490 struct netlink_ext_ack *extack)
2491{
2492
2493 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2494 struct igb_adapter *adapter = netdev_priv(dev);
2495 int vfn = adapter->vfs_allocated_count;
2496
2497 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2498 return -ENOMEM;
2499 }
2500
2501 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2502}
2503
2504#define IGB_MAX_MAC_HDR_LEN 127
2505#define IGB_MAX_NETWORK_HDR_LEN 511
2506
2507static netdev_features_t
2508igb_features_check(struct sk_buff *skb, struct net_device *dev,
2509 netdev_features_t features)
2510{
2511 unsigned int network_hdr_len, mac_hdr_len;
2512
2513
2514 mac_hdr_len = skb_network_header(skb) - skb->data;
2515 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2516 return features & ~(NETIF_F_HW_CSUM |
2517 NETIF_F_SCTP_CRC |
2518 NETIF_F_HW_VLAN_CTAG_TX |
2519 NETIF_F_TSO |
2520 NETIF_F_TSO6);
2521
2522 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2523 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2524 return features & ~(NETIF_F_HW_CSUM |
2525 NETIF_F_SCTP_CRC |
2526 NETIF_F_TSO |
2527 NETIF_F_TSO6);
2528
2529
2530
2531
2532 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2533 features &= ~NETIF_F_TSO;
2534
2535 return features;
2536}
2537
2538static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2539{
2540 if (!is_fqtss_enabled(adapter)) {
2541 enable_fqtss(adapter, true);
2542 return;
2543 }
2544
2545 igb_config_tx_modes(adapter, queue);
2546
2547 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2548 enable_fqtss(adapter, false);
2549}
2550
2551static int igb_offload_cbs(struct igb_adapter *adapter,
2552 struct tc_cbs_qopt_offload *qopt)
2553{
2554 struct e1000_hw *hw = &adapter->hw;
2555 int err;
2556
2557
2558 if (hw->mac.type != e1000_i210)
2559 return -EOPNOTSUPP;
2560
2561
2562 if (qopt->queue < 0 || qopt->queue > 1)
2563 return -EINVAL;
2564
2565 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2566 qopt->idleslope, qopt->sendslope,
2567 qopt->hicredit, qopt->locredit);
2568 if (err)
2569 return err;
2570
2571 igb_offload_apply(adapter, qopt->queue);
2572
2573 return 0;
2574}
2575
2576#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2577#define VLAN_PRIO_FULL_MASK (0x07)
2578
2579static int igb_parse_cls_flower(struct igb_adapter *adapter,
2580 struct tc_cls_flower_offload *f,
2581 int traffic_class,
2582 struct igb_nfc_filter *input)
2583{
2584 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
2585 struct flow_dissector *dissector = rule->match.dissector;
2586 struct netlink_ext_ack *extack = f->common.extack;
2587
2588 if (dissector->used_keys &
2589 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2590 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2591 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2592 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2593 NL_SET_ERR_MSG_MOD(extack,
2594 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2595 return -EOPNOTSUPP;
2596 }
2597
2598 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2599 struct flow_match_eth_addrs match;
2600
2601 flow_rule_match_eth_addrs(rule, &match);
2602 if (!is_zero_ether_addr(match.mask->dst)) {
2603 if (!is_broadcast_ether_addr(match.mask->dst)) {
2604 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2605 return -EINVAL;
2606 }
2607
2608 input->filter.match_flags |=
2609 IGB_FILTER_FLAG_DST_MAC_ADDR;
2610 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2611 }
2612
2613 if (!is_zero_ether_addr(match.mask->src)) {
2614 if (!is_broadcast_ether_addr(match.mask->src)) {
2615 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2616 return -EINVAL;
2617 }
2618
2619 input->filter.match_flags |=
2620 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2621 ether_addr_copy(input->filter.src_addr, match.key->src);
2622 }
2623 }
2624
2625 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2626 struct flow_match_basic match;
2627
2628 flow_rule_match_basic(rule, &match);
2629 if (match.mask->n_proto) {
2630 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2631 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2632 return -EINVAL;
2633 }
2634
2635 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2636 input->filter.etype = match.key->n_proto;
2637 }
2638 }
2639
2640 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2641 struct flow_match_vlan match;
2642
2643 flow_rule_match_vlan(rule, &match);
2644 if (match.mask->vlan_priority) {
2645 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2646 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2647 return -EINVAL;
2648 }
2649
2650 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2651 input->filter.vlan_tci = match.key->vlan_priority;
2652 }
2653 }
2654
2655 input->action = traffic_class;
2656 input->cookie = f->cookie;
2657
2658 return 0;
2659}
2660
2661static int igb_configure_clsflower(struct igb_adapter *adapter,
2662 struct tc_cls_flower_offload *cls_flower)
2663{
2664 struct netlink_ext_ack *extack = cls_flower->common.extack;
2665 struct igb_nfc_filter *filter, *f;
2666 int err, tc;
2667
2668 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2669 if (tc < 0) {
2670 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2671 return -EINVAL;
2672 }
2673
2674 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2675 if (!filter)
2676 return -ENOMEM;
2677
2678 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2679 if (err < 0)
2680 goto err_parse;
2681
2682 spin_lock(&adapter->nfc_lock);
2683
2684 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2685 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2686 err = -EEXIST;
2687 NL_SET_ERR_MSG_MOD(extack,
2688 "This filter is already set in ethtool");
2689 goto err_locked;
2690 }
2691 }
2692
2693 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2694 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2695 err = -EEXIST;
2696 NL_SET_ERR_MSG_MOD(extack,
2697 "This filter is already set in cls_flower");
2698 goto err_locked;
2699 }
2700 }
2701
2702 err = igb_add_filter(adapter, filter);
2703 if (err < 0) {
2704 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2705 goto err_locked;
2706 }
2707
2708 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2709
2710 spin_unlock(&adapter->nfc_lock);
2711
2712 return 0;
2713
2714err_locked:
2715 spin_unlock(&adapter->nfc_lock);
2716
2717err_parse:
2718 kfree(filter);
2719
2720 return err;
2721}
2722
2723static int igb_delete_clsflower(struct igb_adapter *adapter,
2724 struct tc_cls_flower_offload *cls_flower)
2725{
2726 struct igb_nfc_filter *filter;
2727 int err;
2728
2729 spin_lock(&adapter->nfc_lock);
2730
2731 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2732 if (filter->cookie == cls_flower->cookie)
2733 break;
2734
2735 if (!filter) {
2736 err = -ENOENT;
2737 goto out;
2738 }
2739
2740 err = igb_erase_filter(adapter, filter);
2741 if (err < 0)
2742 goto out;
2743
2744 hlist_del(&filter->nfc_node);
2745 kfree(filter);
2746
2747out:
2748 spin_unlock(&adapter->nfc_lock);
2749
2750 return err;
2751}
2752
2753static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2754 struct tc_cls_flower_offload *cls_flower)
2755{
2756 switch (cls_flower->command) {
2757 case TC_CLSFLOWER_REPLACE:
2758 return igb_configure_clsflower(adapter, cls_flower);
2759 case TC_CLSFLOWER_DESTROY:
2760 return igb_delete_clsflower(adapter, cls_flower);
2761 case TC_CLSFLOWER_STATS:
2762 return -EOPNOTSUPP;
2763 default:
2764 return -EOPNOTSUPP;
2765 }
2766}
2767
2768static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2769 void *cb_priv)
2770{
2771 struct igb_adapter *adapter = cb_priv;
2772
2773 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2774 return -EOPNOTSUPP;
2775
2776 switch (type) {
2777 case TC_SETUP_CLSFLOWER:
2778 return igb_setup_tc_cls_flower(adapter, type_data);
2779
2780 default:
2781 return -EOPNOTSUPP;
2782 }
2783}
2784
2785static int igb_setup_tc_block(struct igb_adapter *adapter,
2786 struct tc_block_offload *f)
2787{
2788 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2789 return -EOPNOTSUPP;
2790
2791 switch (f->command) {
2792 case TC_BLOCK_BIND:
2793 return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
2794 adapter, adapter, f->extack);
2795 case TC_BLOCK_UNBIND:
2796 tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
2797 adapter);
2798 return 0;
2799 default:
2800 return -EOPNOTSUPP;
2801 }
2802}
2803
2804static int igb_offload_txtime(struct igb_adapter *adapter,
2805 struct tc_etf_qopt_offload *qopt)
2806{
2807 struct e1000_hw *hw = &adapter->hw;
2808 int err;
2809
2810
2811 if (hw->mac.type != e1000_i210)
2812 return -EOPNOTSUPP;
2813
2814
2815 if (qopt->queue < 0 || qopt->queue > 1)
2816 return -EINVAL;
2817
2818 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2819 if (err)
2820 return err;
2821
2822 igb_offload_apply(adapter, qopt->queue);
2823
2824 return 0;
2825}
2826
2827static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2828 void *type_data)
2829{
2830 struct igb_adapter *adapter = netdev_priv(dev);
2831
2832 switch (type) {
2833 case TC_SETUP_QDISC_CBS:
2834 return igb_offload_cbs(adapter, type_data);
2835 case TC_SETUP_BLOCK:
2836 return igb_setup_tc_block(adapter, type_data);
2837 case TC_SETUP_QDISC_ETF:
2838 return igb_offload_txtime(adapter, type_data);
2839
2840 default:
2841 return -EOPNOTSUPP;
2842 }
2843}
2844
2845static const struct net_device_ops igb_netdev_ops = {
2846 .ndo_open = igb_open,
2847 .ndo_stop = igb_close,
2848 .ndo_start_xmit = igb_xmit_frame,
2849 .ndo_get_stats64 = igb_get_stats64,
2850 .ndo_set_rx_mode = igb_set_rx_mode,
2851 .ndo_set_mac_address = igb_set_mac,
2852 .ndo_change_mtu = igb_change_mtu,
2853 .ndo_do_ioctl = igb_ioctl,
2854 .ndo_tx_timeout = igb_tx_timeout,
2855 .ndo_validate_addr = eth_validate_addr,
2856 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2857 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2858 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2859 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2860 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2861 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2862 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2863 .ndo_get_vf_config = igb_ndo_get_vf_config,
2864 .ndo_fix_features = igb_fix_features,
2865 .ndo_set_features = igb_set_features,
2866 .ndo_fdb_add = igb_ndo_fdb_add,
2867 .ndo_features_check = igb_features_check,
2868 .ndo_setup_tc = igb_setup_tc,
2869};
2870
2871
2872
2873
2874
2875void igb_set_fw_version(struct igb_adapter *adapter)
2876{
2877 struct e1000_hw *hw = &adapter->hw;
2878 struct e1000_fw_version fw;
2879
2880 igb_get_fw_version(hw, &fw);
2881
2882 switch (hw->mac.type) {
2883 case e1000_i210:
2884 case e1000_i211:
2885 if (!(igb_get_flash_presence_i210(hw))) {
2886 snprintf(adapter->fw_version,
2887 sizeof(adapter->fw_version),
2888 "%2d.%2d-%d",
2889 fw.invm_major, fw.invm_minor,
2890 fw.invm_img_type);
2891 break;
2892 }
2893
2894 default:
2895
2896 if (fw.or_valid) {
2897 snprintf(adapter->fw_version,
2898 sizeof(adapter->fw_version),
2899 "%d.%d, 0x%08x, %d.%d.%d",
2900 fw.eep_major, fw.eep_minor, fw.etrack_id,
2901 fw.or_major, fw.or_build, fw.or_patch);
2902
2903 } else if (fw.etrack_id != 0X0000) {
2904 snprintf(adapter->fw_version,
2905 sizeof(adapter->fw_version),
2906 "%d.%d, 0x%08x",
2907 fw.eep_major, fw.eep_minor, fw.etrack_id);
2908 } else {
2909 snprintf(adapter->fw_version,
2910 sizeof(adapter->fw_version),
2911 "%d.%d.%d",
2912 fw.eep_major, fw.eep_minor, fw.eep_build);
2913 }
2914 break;
2915 }
2916}
2917
2918
2919
2920
2921
2922
2923static void igb_init_mas(struct igb_adapter *adapter)
2924{
2925 struct e1000_hw *hw = &adapter->hw;
2926 u16 eeprom_data;
2927
2928 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2929 switch (hw->bus.func) {
2930 case E1000_FUNC_0:
2931 if (eeprom_data & IGB_MAS_ENABLE_0) {
2932 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2933 netdev_info(adapter->netdev,
2934 "MAS: Enabling Media Autosense for port %d\n",
2935 hw->bus.func);
2936 }
2937 break;
2938 case E1000_FUNC_1:
2939 if (eeprom_data & IGB_MAS_ENABLE_1) {
2940 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2941 netdev_info(adapter->netdev,
2942 "MAS: Enabling Media Autosense for port %d\n",
2943 hw->bus.func);
2944 }
2945 break;
2946 case E1000_FUNC_2:
2947 if (eeprom_data & IGB_MAS_ENABLE_2) {
2948 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2949 netdev_info(adapter->netdev,
2950 "MAS: Enabling Media Autosense for port %d\n",
2951 hw->bus.func);
2952 }
2953 break;
2954 case E1000_FUNC_3:
2955 if (eeprom_data & IGB_MAS_ENABLE_3) {
2956 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2957 netdev_info(adapter->netdev,
2958 "MAS: Enabling Media Autosense for port %d\n",
2959 hw->bus.func);
2960 }
2961 break;
2962 default:
2963
2964 netdev_err(adapter->netdev,
2965 "MAS: Invalid port configuration, returning\n");
2966 break;
2967 }
2968}
2969
2970
2971
2972
2973
2974static s32 igb_init_i2c(struct igb_adapter *adapter)
2975{
2976 s32 status = 0;
2977
2978
2979 if (adapter->hw.mac.type != e1000_i350)
2980 return 0;
2981
2982
2983
2984
2985
2986 adapter->i2c_adap.owner = THIS_MODULE;
2987 adapter->i2c_algo = igb_i2c_algo;
2988 adapter->i2c_algo.data = adapter;
2989 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2990 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2991 strlcpy(adapter->i2c_adap.name, "igb BB",
2992 sizeof(adapter->i2c_adap.name));
2993 status = i2c_bit_add_bus(&adapter->i2c_adap);
2994 return status;
2995}
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3009{
3010 struct net_device *netdev;
3011 struct igb_adapter *adapter;
3012 struct e1000_hw *hw;
3013 u16 eeprom_data = 0;
3014 s32 ret_val;
3015 static int global_quad_port_a;
3016 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3017 int err, pci_using_dac;
3018 u8 part_str[E1000_PBANUM_LENGTH];
3019
3020
3021
3022
3023 if (pdev->is_virtfn) {
3024 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
3025 pci_name(pdev), pdev->vendor, pdev->device);
3026 return -EINVAL;
3027 }
3028
3029 err = pci_enable_device_mem(pdev);
3030 if (err)
3031 return err;
3032
3033 pci_using_dac = 0;
3034 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3035 if (!err) {
3036 pci_using_dac = 1;
3037 } else {
3038 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3039 if (err) {
3040 dev_err(&pdev->dev,
3041 "No usable DMA configuration, aborting\n");
3042 goto err_dma;
3043 }
3044 }
3045
3046 err = pci_request_mem_regions(pdev, igb_driver_name);
3047 if (err)
3048 goto err_pci_reg;
3049
3050 pci_enable_pcie_error_reporting(pdev);
3051
3052 pci_set_master(pdev);
3053 pci_save_state(pdev);
3054
3055 err = -ENOMEM;
3056 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3057 IGB_MAX_TX_QUEUES);
3058 if (!netdev)
3059 goto err_alloc_etherdev;
3060
3061 SET_NETDEV_DEV(netdev, &pdev->dev);
3062
3063 pci_set_drvdata(pdev, netdev);
3064 adapter = netdev_priv(netdev);
3065 adapter->netdev = netdev;
3066 adapter->pdev = pdev;
3067 hw = &adapter->hw;
3068 hw->back = adapter;
3069 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3070
3071 err = -EIO;
3072 adapter->io_addr = pci_iomap(pdev, 0, 0);
3073 if (!adapter->io_addr)
3074 goto err_ioremap;
3075
3076 hw->hw_addr = adapter->io_addr;
3077
3078 netdev->netdev_ops = &igb_netdev_ops;
3079 igb_set_ethtool_ops(netdev);
3080 netdev->watchdog_timeo = 5 * HZ;
3081
3082 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3083
3084 netdev->mem_start = pci_resource_start(pdev, 0);
3085 netdev->mem_end = pci_resource_end(pdev, 0);
3086
3087
3088 hw->vendor_id = pdev->vendor;
3089 hw->device_id = pdev->device;
3090 hw->revision_id = pdev->revision;
3091 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3092 hw->subsystem_device_id = pdev->subsystem_device;
3093
3094
3095 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3096 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3097 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3098
3099 err = ei->get_invariants(hw);
3100 if (err)
3101 goto err_sw_init;
3102
3103
3104 err = igb_sw_init(adapter);
3105 if (err)
3106 goto err_sw_init;
3107
3108 igb_get_bus_info_pcie(hw);
3109
3110 hw->phy.autoneg_wait_to_complete = false;
3111
3112
3113 if (hw->phy.media_type == e1000_media_type_copper) {
3114 hw->phy.mdix = AUTO_ALL_MODES;
3115 hw->phy.disable_polarity_correction = false;
3116 hw->phy.ms_type = e1000_ms_hw_default;
3117 }
3118
3119 if (igb_check_reset_block(hw))
3120 dev_info(&pdev->dev,
3121 "PHY reset is blocked due to SOL/IDER session.\n");
3122
3123
3124
3125
3126
3127 netdev->features |= NETIF_F_SG |
3128 NETIF_F_TSO |
3129 NETIF_F_TSO6 |
3130 NETIF_F_RXHASH |
3131 NETIF_F_RXCSUM |
3132 NETIF_F_HW_CSUM;
3133
3134 if (hw->mac.type >= e1000_82576)
3135 netdev->features |= NETIF_F_SCTP_CRC;
3136
3137 if (hw->mac.type >= e1000_i350)
3138 netdev->features |= NETIF_F_HW_TC;
3139
3140#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3141 NETIF_F_GSO_GRE_CSUM | \
3142 NETIF_F_GSO_IPXIP4 | \
3143 NETIF_F_GSO_IPXIP6 | \
3144 NETIF_F_GSO_UDP_TUNNEL | \
3145 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3146
3147 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3148 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3149
3150
3151 netdev->hw_features |= netdev->features |
3152 NETIF_F_HW_VLAN_CTAG_RX |
3153 NETIF_F_HW_VLAN_CTAG_TX |
3154 NETIF_F_RXALL;
3155
3156 if (hw->mac.type >= e1000_i350)
3157 netdev->hw_features |= NETIF_F_NTUPLE;
3158
3159 if (pci_using_dac)
3160 netdev->features |= NETIF_F_HIGHDMA;
3161
3162 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3163 netdev->mpls_features |= NETIF_F_HW_CSUM;
3164 netdev->hw_enc_features |= netdev->vlan_features;
3165
3166
3167 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3168 NETIF_F_HW_VLAN_CTAG_RX |
3169 NETIF_F_HW_VLAN_CTAG_TX;
3170
3171 netdev->priv_flags |= IFF_SUPP_NOFCS;
3172
3173 netdev->priv_flags |= IFF_UNICAST_FLT;
3174
3175
3176 netdev->min_mtu = ETH_MIN_MTU;
3177 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3178
3179 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3180
3181
3182
3183
3184 hw->mac.ops.reset_hw(hw);
3185
3186
3187
3188
3189 switch (hw->mac.type) {
3190 case e1000_i210:
3191 case e1000_i211:
3192 if (igb_get_flash_presence_i210(hw)) {
3193 if (hw->nvm.ops.validate(hw) < 0) {
3194 dev_err(&pdev->dev,
3195 "The NVM Checksum Is Not Valid\n");
3196 err = -EIO;
3197 goto err_eeprom;
3198 }
3199 }
3200 break;
3201 default:
3202 if (hw->nvm.ops.validate(hw) < 0) {
3203 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3204 err = -EIO;
3205 goto err_eeprom;
3206 }
3207 break;
3208 }
3209
3210 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3211
3212 if (hw->mac.ops.read_mac_addr(hw))
3213 dev_err(&pdev->dev, "NVM Read Error\n");
3214 }
3215
3216 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3217
3218 if (!is_valid_ether_addr(netdev->dev_addr)) {
3219 dev_err(&pdev->dev, "Invalid MAC Address\n");
3220 err = -EIO;
3221 goto err_eeprom;
3222 }
3223
3224 igb_set_default_mac_filter(adapter);
3225
3226
3227 igb_set_fw_version(adapter);
3228
3229
3230 if (hw->mac.type == e1000_i210) {
3231 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3232 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3233 }
3234
3235 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3236 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3237
3238 INIT_WORK(&adapter->reset_task, igb_reset_task);
3239 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3240
3241
3242 adapter->fc_autoneg = true;
3243 hw->mac.autoneg = true;
3244 hw->phy.autoneg_advertised = 0x2f;
3245
3246 hw->fc.requested_mode = e1000_fc_default;
3247 hw->fc.current_mode = e1000_fc_default;
3248
3249 igb_validate_mdi_setting(hw);
3250
3251
3252 if (hw->bus.func == 0)
3253 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3254
3255
3256 if (hw->mac.type >= e1000_82580)
3257 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3258 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3259 &eeprom_data);
3260 else if (hw->bus.func == 1)
3261 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3262
3263 if (eeprom_data & IGB_EEPROM_APME)
3264 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3265
3266
3267
3268
3269
3270 switch (pdev->device) {
3271 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3272 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3273 break;
3274 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3275 case E1000_DEV_ID_82576_FIBER:
3276 case E1000_DEV_ID_82576_SERDES:
3277
3278
3279
3280 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3281 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3282 break;
3283 case E1000_DEV_ID_82576_QUAD_COPPER:
3284 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3285
3286 if (global_quad_port_a != 0)
3287 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3288 else
3289 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3290
3291 if (++global_quad_port_a == 4)
3292 global_quad_port_a = 0;
3293 break;
3294 default:
3295
3296 if (!device_can_wakeup(&adapter->pdev->dev))
3297 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3298 }
3299
3300
3301 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3302 adapter->wol |= E1000_WUFC_MAG;
3303
3304
3305 if ((hw->mac.type == e1000_i350) &&
3306 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3307 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3308 adapter->wol = 0;
3309 }
3310
3311
3312
3313
3314 if (((hw->mac.type == e1000_i350) ||
3315 (hw->mac.type == e1000_i354)) &&
3316 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3317 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3318 adapter->wol = 0;
3319 }
3320 if (hw->mac.type == e1000_i350) {
3321 if (((pdev->subsystem_device == 0x5001) ||
3322 (pdev->subsystem_device == 0x5002)) &&
3323 (hw->bus.func == 0)) {
3324 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3325 adapter->wol = 0;
3326 }
3327 if (pdev->subsystem_device == 0x1F52)
3328 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3329 }
3330
3331 device_set_wakeup_enable(&adapter->pdev->dev,
3332 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3333
3334
3335 igb_reset(adapter);
3336
3337
3338 err = igb_init_i2c(adapter);
3339 if (err) {
3340 dev_err(&pdev->dev, "failed to init i2c interface\n");
3341 goto err_eeprom;
3342 }
3343
3344
3345
3346
3347 igb_get_hw_control(adapter);
3348
3349 strcpy(netdev->name, "eth%d");
3350 err = register_netdev(netdev);
3351 if (err)
3352 goto err_register;
3353
3354
3355 netif_carrier_off(netdev);
3356
3357#ifdef CONFIG_IGB_DCA
3358 if (dca_add_requester(&pdev->dev) == 0) {
3359 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3360 dev_info(&pdev->dev, "DCA enabled\n");
3361 igb_setup_dca(adapter);
3362 }
3363
3364#endif
3365#ifdef CONFIG_IGB_HWMON
3366
3367 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3368 u16 ets_word;
3369
3370
3371
3372
3373 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3374 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3375 adapter->ets = true;
3376 else
3377 adapter->ets = false;
3378 if (igb_sysfs_init(adapter))
3379 dev_err(&pdev->dev,
3380 "failed to allocate sysfs resources\n");
3381 } else {
3382 adapter->ets = false;
3383 }
3384#endif
3385
3386 adapter->ei = *ei;
3387 if (hw->dev_spec._82575.mas_capable)
3388 igb_init_mas(adapter);
3389
3390
3391 igb_ptp_init(adapter);
3392
3393 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3394
3395 if (hw->mac.type != e1000_i354) {
3396 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3397 netdev->name,
3398 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3399 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3400 "unknown"),
3401 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3402 "Width x4" :
3403 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3404 "Width x2" :
3405 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3406 "Width x1" : "unknown"), netdev->dev_addr);
3407 }
3408
3409 if ((hw->mac.type >= e1000_i210 ||
3410 igb_get_flash_presence_i210(hw))) {
3411 ret_val = igb_read_part_string(hw, part_str,
3412 E1000_PBANUM_LENGTH);
3413 } else {
3414 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3415 }
3416
3417 if (ret_val)
3418 strcpy(part_str, "Unknown");
3419 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3420 dev_info(&pdev->dev,
3421 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3422 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3423 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3424 adapter->num_rx_queues, adapter->num_tx_queues);
3425 if (hw->phy.media_type == e1000_media_type_copper) {
3426 switch (hw->mac.type) {
3427 case e1000_i350:
3428 case e1000_i210:
3429 case e1000_i211:
3430
3431 err = igb_set_eee_i350(hw, true, true);
3432 if ((!err) &&
3433 (!hw->dev_spec._82575.eee_disable)) {
3434 adapter->eee_advert =
3435 MDIO_EEE_100TX | MDIO_EEE_1000T;
3436 adapter->flags |= IGB_FLAG_EEE;
3437 }
3438 break;
3439 case e1000_i354:
3440 if ((rd32(E1000_CTRL_EXT) &
3441 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3442 err = igb_set_eee_i354(hw, true, true);
3443 if ((!err) &&
3444 (!hw->dev_spec._82575.eee_disable)) {
3445 adapter->eee_advert =
3446 MDIO_EEE_100TX | MDIO_EEE_1000T;
3447 adapter->flags |= IGB_FLAG_EEE;
3448 }
3449 }
3450 break;
3451 default:
3452 break;
3453 }
3454 }
3455
3456 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
3457
3458 pm_runtime_put_noidle(&pdev->dev);
3459 return 0;
3460
3461err_register:
3462 igb_release_hw_control(adapter);
3463 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3464err_eeprom:
3465 if (!igb_check_reset_block(hw))
3466 igb_reset_phy(hw);
3467
3468 if (hw->flash_address)
3469 iounmap(hw->flash_address);
3470err_sw_init:
3471 kfree(adapter->mac_table);
3472 kfree(adapter->shadow_vfta);
3473 igb_clear_interrupt_scheme(adapter);
3474#ifdef CONFIG_PCI_IOV
3475 igb_disable_sriov(pdev);
3476#endif
3477 pci_iounmap(pdev, adapter->io_addr);
3478err_ioremap:
3479 free_netdev(netdev);
3480err_alloc_etherdev:
3481 pci_release_mem_regions(pdev);
3482err_pci_reg:
3483err_dma:
3484 pci_disable_device(pdev);
3485 return err;
3486}
3487
3488#ifdef CONFIG_PCI_IOV
3489static int igb_disable_sriov(struct pci_dev *pdev)
3490{
3491 struct net_device *netdev = pci_get_drvdata(pdev);
3492 struct igb_adapter *adapter = netdev_priv(netdev);
3493 struct e1000_hw *hw = &adapter->hw;
3494
3495
3496 if (adapter->vf_data) {
3497
3498 if (pci_vfs_assigned(pdev)) {
3499 dev_warn(&pdev->dev,
3500 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3501 return -EPERM;
3502 } else {
3503 pci_disable_sriov(pdev);
3504 msleep(500);
3505 }
3506
3507 kfree(adapter->vf_mac_list);
3508 adapter->vf_mac_list = NULL;
3509 kfree(adapter->vf_data);
3510 adapter->vf_data = NULL;
3511 adapter->vfs_allocated_count = 0;
3512 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3513 wrfl();
3514 msleep(100);
3515 dev_info(&pdev->dev, "IOV Disabled\n");
3516
3517
3518 adapter->flags |= IGB_FLAG_DMAC;
3519 }
3520
3521 return 0;
3522}
3523
3524static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3525{
3526 struct net_device *netdev = pci_get_drvdata(pdev);
3527 struct igb_adapter *adapter = netdev_priv(netdev);
3528 int old_vfs = pci_num_vf(pdev);
3529 struct vf_mac_filter *mac_list;
3530 int err = 0;
3531 int num_vf_mac_filters, i;
3532
3533 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3534 err = -EPERM;
3535 goto out;
3536 }
3537 if (!num_vfs)
3538 goto out;
3539
3540 if (old_vfs) {
3541 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3542 old_vfs, max_vfs);
3543 adapter->vfs_allocated_count = old_vfs;
3544 } else
3545 adapter->vfs_allocated_count = num_vfs;
3546
3547 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3548 sizeof(struct vf_data_storage), GFP_KERNEL);
3549
3550
3551 if (!adapter->vf_data) {
3552 adapter->vfs_allocated_count = 0;
3553 err = -ENOMEM;
3554 goto out;
3555 }
3556
3557
3558
3559
3560
3561
3562 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3563 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3564 adapter->vfs_allocated_count);
3565
3566 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3567 sizeof(struct vf_mac_filter),
3568 GFP_KERNEL);
3569
3570 mac_list = adapter->vf_mac_list;
3571 INIT_LIST_HEAD(&adapter->vf_macs.l);
3572
3573 if (adapter->vf_mac_list) {
3574
3575 for (i = 0; i < num_vf_mac_filters; i++) {
3576 mac_list->vf = -1;
3577 mac_list->free = true;
3578 list_add(&mac_list->l, &adapter->vf_macs.l);
3579 mac_list++;
3580 }
3581 } else {
3582
3583
3584
3585 dev_err(&pdev->dev,
3586 "Unable to allocate memory for VF MAC filter list\n");
3587 }
3588
3589
3590 if (!old_vfs) {
3591 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3592 if (err)
3593 goto err_out;
3594 }
3595 dev_info(&pdev->dev, "%d VFs allocated\n",
3596 adapter->vfs_allocated_count);
3597 for (i = 0; i < adapter->vfs_allocated_count; i++)
3598 igb_vf_configure(adapter, i);
3599
3600
3601 adapter->flags &= ~IGB_FLAG_DMAC;
3602 goto out;
3603
3604err_out:
3605 kfree(adapter->vf_mac_list);
3606 adapter->vf_mac_list = NULL;
3607 kfree(adapter->vf_data);
3608 adapter->vf_data = NULL;
3609 adapter->vfs_allocated_count = 0;
3610out:
3611 return err;
3612}
3613
3614#endif
3615
3616
3617
3618
3619static void igb_remove_i2c(struct igb_adapter *adapter)
3620{
3621
3622 i2c_del_adapter(&adapter->i2c_adap);
3623}
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634static void igb_remove(struct pci_dev *pdev)
3635{
3636 struct net_device *netdev = pci_get_drvdata(pdev);
3637 struct igb_adapter *adapter = netdev_priv(netdev);
3638 struct e1000_hw *hw = &adapter->hw;
3639
3640 pm_runtime_get_noresume(&pdev->dev);
3641#ifdef CONFIG_IGB_HWMON
3642 igb_sysfs_exit(adapter);
3643#endif
3644 igb_remove_i2c(adapter);
3645 igb_ptp_stop(adapter);
3646
3647
3648
3649 set_bit(__IGB_DOWN, &adapter->state);
3650 del_timer_sync(&adapter->watchdog_timer);
3651 del_timer_sync(&adapter->phy_info_timer);
3652
3653 cancel_work_sync(&adapter->reset_task);
3654 cancel_work_sync(&adapter->watchdog_task);
3655
3656#ifdef CONFIG_IGB_DCA
3657 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3658 dev_info(&pdev->dev, "DCA disabled\n");
3659 dca_remove_requester(&pdev->dev);
3660 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3661 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3662 }
3663#endif
3664
3665
3666
3667
3668 igb_release_hw_control(adapter);
3669
3670#ifdef CONFIG_PCI_IOV
3671 igb_disable_sriov(pdev);
3672#endif
3673
3674 unregister_netdev(netdev);
3675
3676 igb_clear_interrupt_scheme(adapter);
3677
3678 pci_iounmap(pdev, adapter->io_addr);
3679 if (hw->flash_address)
3680 iounmap(hw->flash_address);
3681 pci_release_mem_regions(pdev);
3682
3683 kfree(adapter->mac_table);
3684 kfree(adapter->shadow_vfta);
3685 free_netdev(netdev);
3686
3687 pci_disable_pcie_error_reporting(pdev);
3688
3689 pci_disable_device(pdev);
3690}
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701static void igb_probe_vfs(struct igb_adapter *adapter)
3702{
3703#ifdef CONFIG_PCI_IOV
3704 struct pci_dev *pdev = adapter->pdev;
3705 struct e1000_hw *hw = &adapter->hw;
3706
3707
3708 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3709 return;
3710
3711
3712
3713
3714
3715 igb_set_interrupt_capability(adapter, true);
3716 igb_reset_interrupt_capability(adapter);
3717
3718 pci_sriov_set_totalvfs(pdev, 7);
3719 igb_enable_sriov(pdev, max_vfs);
3720
3721#endif
3722}
3723
3724unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3725{
3726 struct e1000_hw *hw = &adapter->hw;
3727 unsigned int max_rss_queues;
3728
3729
3730 switch (hw->mac.type) {
3731 case e1000_i211:
3732 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3733 break;
3734 case e1000_82575:
3735 case e1000_i210:
3736 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3737 break;
3738 case e1000_i350:
3739
3740 if (!!adapter->vfs_allocated_count) {
3741 max_rss_queues = 1;
3742 break;
3743 }
3744
3745 case e1000_82576:
3746 if (!!adapter->vfs_allocated_count) {
3747 max_rss_queues = 2;
3748 break;
3749 }
3750
3751 case e1000_82580:
3752 case e1000_i354:
3753 default:
3754 max_rss_queues = IGB_MAX_RX_QUEUES;
3755 break;
3756 }
3757
3758 return max_rss_queues;
3759}
3760
3761static void igb_init_queue_configuration(struct igb_adapter *adapter)
3762{
3763 u32 max_rss_queues;
3764
3765 max_rss_queues = igb_get_max_rss_queues(adapter);
3766 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3767
3768 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3769}
3770
3771void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3772 const u32 max_rss_queues)
3773{
3774 struct e1000_hw *hw = &adapter->hw;
3775
3776
3777 switch (hw->mac.type) {
3778 case e1000_82575:
3779 case e1000_i211:
3780
3781 break;
3782 case e1000_82576:
3783 case e1000_82580:
3784 case e1000_i350:
3785 case e1000_i354:
3786 case e1000_i210:
3787 default:
3788
3789
3790
3791 if (adapter->rss_queues > (max_rss_queues / 2))
3792 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3793 else
3794 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3795 break;
3796 }
3797}
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807static int igb_sw_init(struct igb_adapter *adapter)
3808{
3809 struct e1000_hw *hw = &adapter->hw;
3810 struct net_device *netdev = adapter->netdev;
3811 struct pci_dev *pdev = adapter->pdev;
3812
3813 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3814
3815
3816 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3817 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3818
3819
3820 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3821 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3822
3823
3824 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3825
3826 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3827 VLAN_HLEN;
3828 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3829
3830 spin_lock_init(&adapter->nfc_lock);
3831 spin_lock_init(&adapter->stats64_lock);
3832#ifdef CONFIG_PCI_IOV
3833 switch (hw->mac.type) {
3834 case e1000_82576:
3835 case e1000_i350:
3836 if (max_vfs > 7) {
3837 dev_warn(&pdev->dev,
3838 "Maximum of 7 VFs per PF, using max\n");
3839 max_vfs = adapter->vfs_allocated_count = 7;
3840 } else
3841 adapter->vfs_allocated_count = max_vfs;
3842 if (adapter->vfs_allocated_count)
3843 dev_warn(&pdev->dev,
3844 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3845 break;
3846 default:
3847 break;
3848 }
3849#endif
3850
3851
3852 adapter->flags |= IGB_FLAG_HAS_MSIX;
3853
3854 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3855 sizeof(struct igb_mac_addr),
3856 GFP_KERNEL);
3857 if (!adapter->mac_table)
3858 return -ENOMEM;
3859
3860 igb_probe_vfs(adapter);
3861
3862 igb_init_queue_configuration(adapter);
3863
3864
3865 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3866 GFP_KERNEL);
3867 if (!adapter->shadow_vfta)
3868 return -ENOMEM;
3869
3870
3871 if (igb_init_interrupt_scheme(adapter, true)) {
3872 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3873 return -ENOMEM;
3874 }
3875
3876
3877 igb_irq_disable(adapter);
3878
3879 if (hw->mac.type >= e1000_i350)
3880 adapter->flags &= ~IGB_FLAG_DMAC;
3881
3882 set_bit(__IGB_DOWN, &adapter->state);
3883 return 0;
3884}
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898static int __igb_open(struct net_device *netdev, bool resuming)
3899{
3900 struct igb_adapter *adapter = netdev_priv(netdev);
3901 struct e1000_hw *hw = &adapter->hw;
3902 struct pci_dev *pdev = adapter->pdev;
3903 int err;
3904 int i;
3905
3906
3907 if (test_bit(__IGB_TESTING, &adapter->state)) {
3908 WARN_ON(resuming);
3909 return -EBUSY;
3910 }
3911
3912 if (!resuming)
3913 pm_runtime_get_sync(&pdev->dev);
3914
3915 netif_carrier_off(netdev);
3916
3917
3918 err = igb_setup_all_tx_resources(adapter);
3919 if (err)
3920 goto err_setup_tx;
3921
3922
3923 err = igb_setup_all_rx_resources(adapter);
3924 if (err)
3925 goto err_setup_rx;
3926
3927 igb_power_up_link(adapter);
3928
3929
3930
3931
3932
3933
3934 igb_configure(adapter);
3935
3936 err = igb_request_irq(adapter);
3937 if (err)
3938 goto err_req_irq;
3939
3940
3941 err = netif_set_real_num_tx_queues(adapter->netdev,
3942 adapter->num_tx_queues);
3943 if (err)
3944 goto err_set_queues;
3945
3946 err = netif_set_real_num_rx_queues(adapter->netdev,
3947 adapter->num_rx_queues);
3948 if (err)
3949 goto err_set_queues;
3950
3951
3952 clear_bit(__IGB_DOWN, &adapter->state);
3953
3954 for (i = 0; i < adapter->num_q_vectors; i++)
3955 napi_enable(&(adapter->q_vector[i]->napi));
3956
3957
3958 rd32(E1000_TSICR);
3959 rd32(E1000_ICR);
3960
3961 igb_irq_enable(adapter);
3962
3963
3964 if (adapter->vfs_allocated_count) {
3965 u32 reg_data = rd32(E1000_CTRL_EXT);
3966
3967 reg_data |= E1000_CTRL_EXT_PFRSTD;
3968 wr32(E1000_CTRL_EXT, reg_data);
3969 }
3970
3971 netif_tx_start_all_queues(netdev);
3972
3973 if (!resuming)
3974 pm_runtime_put(&pdev->dev);
3975
3976
3977 hw->mac.get_link_status = 1;
3978 schedule_work(&adapter->watchdog_task);
3979
3980 return 0;
3981
3982err_set_queues:
3983 igb_free_irq(adapter);
3984err_req_irq:
3985 igb_release_hw_control(adapter);
3986 igb_power_down_link(adapter);
3987 igb_free_all_rx_resources(adapter);
3988err_setup_rx:
3989 igb_free_all_tx_resources(adapter);
3990err_setup_tx:
3991 igb_reset(adapter);
3992 if (!resuming)
3993 pm_runtime_put(&pdev->dev);
3994
3995 return err;
3996}
3997
3998int igb_open(struct net_device *netdev)
3999{
4000 return __igb_open(netdev, false);
4001}
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014static int __igb_close(struct net_device *netdev, bool suspending)
4015{
4016 struct igb_adapter *adapter = netdev_priv(netdev);
4017 struct pci_dev *pdev = adapter->pdev;
4018
4019 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4020
4021 if (!suspending)
4022 pm_runtime_get_sync(&pdev->dev);
4023
4024 igb_down(adapter);
4025 igb_free_irq(adapter);
4026
4027 igb_free_all_tx_resources(adapter);
4028 igb_free_all_rx_resources(adapter);
4029
4030 if (!suspending)
4031 pm_runtime_put_sync(&pdev->dev);
4032 return 0;
4033}
4034
4035int igb_close(struct net_device *netdev)
4036{
4037 if (netif_device_present(netdev) || netdev->dismantle)
4038 return __igb_close(netdev, false);
4039 return 0;
4040}
4041
4042
4043
4044
4045
4046
4047
4048int igb_setup_tx_resources(struct igb_ring *tx_ring)
4049{
4050 struct device *dev = tx_ring->dev;
4051 int size;
4052
4053 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4054
4055 tx_ring->tx_buffer_info = vmalloc(size);
4056 if (!tx_ring->tx_buffer_info)
4057 goto err;
4058
4059
4060 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4061 tx_ring->size = ALIGN(tx_ring->size, 4096);
4062
4063 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4064 &tx_ring->dma, GFP_KERNEL);
4065 if (!tx_ring->desc)
4066 goto err;
4067
4068 tx_ring->next_to_use = 0;
4069 tx_ring->next_to_clean = 0;
4070
4071 return 0;
4072
4073err:
4074 vfree(tx_ring->tx_buffer_info);
4075 tx_ring->tx_buffer_info = NULL;
4076 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4077 return -ENOMEM;
4078}
4079
4080
4081
4082
4083
4084
4085
4086
4087static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4088{
4089 struct pci_dev *pdev = adapter->pdev;
4090 int i, err = 0;
4091
4092 for (i = 0; i < adapter->num_tx_queues; i++) {
4093 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4094 if (err) {
4095 dev_err(&pdev->dev,
4096 "Allocation for Tx Queue %u failed\n", i);
4097 for (i--; i >= 0; i--)
4098 igb_free_tx_resources(adapter->tx_ring[i]);
4099 break;
4100 }
4101 }
4102
4103 return err;
4104}
4105
4106
4107
4108
4109
4110void igb_setup_tctl(struct igb_adapter *adapter)
4111{
4112 struct e1000_hw *hw = &adapter->hw;
4113 u32 tctl;
4114
4115
4116 wr32(E1000_TXDCTL(0), 0);
4117
4118
4119 tctl = rd32(E1000_TCTL);
4120 tctl &= ~E1000_TCTL_CT;
4121 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4122 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4123
4124 igb_config_collision_dist(hw);
4125
4126
4127 tctl |= E1000_TCTL_EN;
4128
4129 wr32(E1000_TCTL, tctl);
4130}
4131
4132
4133
4134
4135
4136
4137
4138
4139void igb_configure_tx_ring(struct igb_adapter *adapter,
4140 struct igb_ring *ring)
4141{
4142 struct e1000_hw *hw = &adapter->hw;
4143 u32 txdctl = 0;
4144 u64 tdba = ring->dma;
4145 int reg_idx = ring->reg_idx;
4146
4147 wr32(E1000_TDLEN(reg_idx),
4148 ring->count * sizeof(union e1000_adv_tx_desc));
4149 wr32(E1000_TDBAL(reg_idx),
4150 tdba & 0x00000000ffffffffULL);
4151 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4152
4153 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4154 wr32(E1000_TDH(reg_idx), 0);
4155 writel(0, ring->tail);
4156
4157 txdctl |= IGB_TX_PTHRESH;
4158 txdctl |= IGB_TX_HTHRESH << 8;
4159 txdctl |= IGB_TX_WTHRESH << 16;
4160
4161
4162 memset(ring->tx_buffer_info, 0,
4163 sizeof(struct igb_tx_buffer) * ring->count);
4164
4165 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4166 wr32(E1000_TXDCTL(reg_idx), txdctl);
4167}
4168
4169
4170
4171
4172
4173
4174
4175static void igb_configure_tx(struct igb_adapter *adapter)
4176{
4177 struct e1000_hw *hw = &adapter->hw;
4178 int i;
4179
4180
4181 for (i = 0; i < adapter->num_tx_queues; i++)
4182 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4183
4184 wrfl();
4185 usleep_range(10000, 20000);
4186
4187 for (i = 0; i < adapter->num_tx_queues; i++)
4188 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4189}
4190
4191
4192
4193
4194
4195
4196
4197int igb_setup_rx_resources(struct igb_ring *rx_ring)
4198{
4199 struct device *dev = rx_ring->dev;
4200 int size;
4201
4202 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4203
4204 rx_ring->rx_buffer_info = vmalloc(size);
4205 if (!rx_ring->rx_buffer_info)
4206 goto err;
4207
4208
4209 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4210 rx_ring->size = ALIGN(rx_ring->size, 4096);
4211
4212 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4213 &rx_ring->dma, GFP_KERNEL);
4214 if (!rx_ring->desc)
4215 goto err;
4216
4217 rx_ring->next_to_alloc = 0;
4218 rx_ring->next_to_clean = 0;
4219 rx_ring->next_to_use = 0;
4220
4221 return 0;
4222
4223err:
4224 vfree(rx_ring->rx_buffer_info);
4225 rx_ring->rx_buffer_info = NULL;
4226 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4227 return -ENOMEM;
4228}
4229
4230
4231
4232
4233
4234
4235
4236
4237static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4238{
4239 struct pci_dev *pdev = adapter->pdev;
4240 int i, err = 0;
4241
4242 for (i = 0; i < adapter->num_rx_queues; i++) {
4243 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4244 if (err) {
4245 dev_err(&pdev->dev,
4246 "Allocation for Rx Queue %u failed\n", i);
4247 for (i--; i >= 0; i--)
4248 igb_free_rx_resources(adapter->rx_ring[i]);
4249 break;
4250 }
4251 }
4252
4253 return err;
4254}
4255
4256
4257
4258
4259
4260static void igb_setup_mrqc(struct igb_adapter *adapter)
4261{
4262 struct e1000_hw *hw = &adapter->hw;
4263 u32 mrqc, rxcsum;
4264 u32 j, num_rx_queues;
4265 u32 rss_key[10];
4266
4267 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4268 for (j = 0; j < 10; j++)
4269 wr32(E1000_RSSRK(j), rss_key[j]);
4270
4271 num_rx_queues = adapter->rss_queues;
4272
4273 switch (hw->mac.type) {
4274 case e1000_82576:
4275
4276 if (adapter->vfs_allocated_count)
4277 num_rx_queues = 2;
4278 break;
4279 default:
4280 break;
4281 }
4282
4283 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4284 for (j = 0; j < IGB_RETA_SIZE; j++)
4285 adapter->rss_indir_tbl[j] =
4286 (j * num_rx_queues) / IGB_RETA_SIZE;
4287 adapter->rss_indir_tbl_init = num_rx_queues;
4288 }
4289 igb_write_rss_indir_tbl(adapter);
4290
4291
4292
4293
4294
4295 rxcsum = rd32(E1000_RXCSUM);
4296 rxcsum |= E1000_RXCSUM_PCSD;
4297
4298 if (adapter->hw.mac.type >= e1000_82576)
4299
4300 rxcsum |= E1000_RXCSUM_CRCOFL;
4301
4302
4303 wr32(E1000_RXCSUM, rxcsum);
4304
4305
4306
4307
4308 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4309 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4310 E1000_MRQC_RSS_FIELD_IPV6 |
4311 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4312 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4313
4314 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4315 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4316 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4317 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4318
4319
4320
4321
4322
4323 if (adapter->vfs_allocated_count) {
4324 if (hw->mac.type > e1000_82575) {
4325
4326 u32 vtctl = rd32(E1000_VT_CTL);
4327
4328 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4329 E1000_VT_CTL_DISABLE_DEF_POOL);
4330 vtctl |= adapter->vfs_allocated_count <<
4331 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4332 wr32(E1000_VT_CTL, vtctl);
4333 }
4334 if (adapter->rss_queues > 1)
4335 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4336 else
4337 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4338 } else {
4339 if (hw->mac.type != e1000_i211)
4340 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4341 }
4342 igb_vmm_control(adapter);
4343
4344 wr32(E1000_MRQC, mrqc);
4345}
4346
4347
4348
4349
4350
4351void igb_setup_rctl(struct igb_adapter *adapter)
4352{
4353 struct e1000_hw *hw = &adapter->hw;
4354 u32 rctl;
4355
4356 rctl = rd32(E1000_RCTL);
4357
4358 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4359 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4360
4361 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4362 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4363
4364
4365
4366
4367
4368 rctl |= E1000_RCTL_SECRC;
4369
4370
4371 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4372
4373
4374 rctl |= E1000_RCTL_LPE;
4375
4376
4377 wr32(E1000_RXDCTL(0), 0);
4378
4379
4380
4381
4382
4383 if (adapter->vfs_allocated_count) {
4384
4385 wr32(E1000_QDE, ALL_QUEUES);
4386 }
4387
4388
4389 if (adapter->netdev->features & NETIF_F_RXALL) {
4390
4391
4392
4393 rctl |= (E1000_RCTL_SBP |
4394 E1000_RCTL_BAM |
4395 E1000_RCTL_PMCF);
4396
4397 rctl &= ~(E1000_RCTL_DPF |
4398 E1000_RCTL_CFIEN);
4399
4400
4401
4402 }
4403
4404 wr32(E1000_RCTL, rctl);
4405}
4406
4407static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4408 int vfn)
4409{
4410 struct e1000_hw *hw = &adapter->hw;
4411 u32 vmolr;
4412
4413 if (size > MAX_JUMBO_FRAME_SIZE)
4414 size = MAX_JUMBO_FRAME_SIZE;
4415
4416 vmolr = rd32(E1000_VMOLR(vfn));
4417 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4418 vmolr |= size | E1000_VMOLR_LPE;
4419 wr32(E1000_VMOLR(vfn), vmolr);
4420
4421 return 0;
4422}
4423
4424static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4425 int vfn, bool enable)
4426{
4427 struct e1000_hw *hw = &adapter->hw;
4428 u32 val, reg;
4429
4430 if (hw->mac.type < e1000_82576)
4431 return;
4432
4433 if (hw->mac.type == e1000_i350)
4434 reg = E1000_DVMOLR(vfn);
4435 else
4436 reg = E1000_VMOLR(vfn);
4437
4438 val = rd32(reg);
4439 if (enable)
4440 val |= E1000_VMOLR_STRVLAN;
4441 else
4442 val &= ~(E1000_VMOLR_STRVLAN);
4443 wr32(reg, val);
4444}
4445
4446static inline void igb_set_vmolr(struct igb_adapter *adapter,
4447 int vfn, bool aupe)
4448{
4449 struct e1000_hw *hw = &adapter->hw;
4450 u32 vmolr;
4451
4452
4453
4454
4455 if (hw->mac.type < e1000_82576)
4456 return;
4457
4458 vmolr = rd32(E1000_VMOLR(vfn));
4459 if (aupe)
4460 vmolr |= E1000_VMOLR_AUPE;
4461 else
4462 vmolr &= ~(E1000_VMOLR_AUPE);
4463
4464
4465 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4466
4467 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4468 vmolr |= E1000_VMOLR_RSSE;
4469
4470
4471
4472 if (vfn <= adapter->vfs_allocated_count)
4473 vmolr |= E1000_VMOLR_BAM;
4474
4475 wr32(E1000_VMOLR(vfn), vmolr);
4476}
4477
4478
4479
4480
4481
4482
4483
4484
4485void igb_configure_rx_ring(struct igb_adapter *adapter,
4486 struct igb_ring *ring)
4487{
4488 struct e1000_hw *hw = &adapter->hw;
4489 union e1000_adv_rx_desc *rx_desc;
4490 u64 rdba = ring->dma;
4491 int reg_idx = ring->reg_idx;
4492 u32 srrctl = 0, rxdctl = 0;
4493
4494
4495 wr32(E1000_RXDCTL(reg_idx), 0);
4496
4497
4498 wr32(E1000_RDBAL(reg_idx),
4499 rdba & 0x00000000ffffffffULL);
4500 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4501 wr32(E1000_RDLEN(reg_idx),
4502 ring->count * sizeof(union e1000_adv_rx_desc));
4503
4504
4505 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4506 wr32(E1000_RDH(reg_idx), 0);
4507 writel(0, ring->tail);
4508
4509
4510 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4511 if (ring_uses_large_buffer(ring))
4512 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4513 else
4514 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4515 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4516 if (hw->mac.type >= e1000_82580)
4517 srrctl |= E1000_SRRCTL_TIMESTAMP;
4518
4519 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4520 srrctl |= E1000_SRRCTL_DROP_EN;
4521
4522 wr32(E1000_SRRCTL(reg_idx), srrctl);
4523
4524
4525 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4526
4527 rxdctl |= IGB_RX_PTHRESH;
4528 rxdctl |= IGB_RX_HTHRESH << 8;
4529 rxdctl |= IGB_RX_WTHRESH << 16;
4530
4531
4532 memset(ring->rx_buffer_info, 0,
4533 sizeof(struct igb_rx_buffer) * ring->count);
4534
4535
4536 rx_desc = IGB_RX_DESC(ring, 0);
4537 rx_desc->wb.upper.length = 0;
4538
4539
4540 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4541 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4542}
4543
4544static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4545 struct igb_ring *rx_ring)
4546{
4547
4548 clear_ring_build_skb_enabled(rx_ring);
4549 clear_ring_uses_large_buffer(rx_ring);
4550
4551 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4552 return;
4553
4554 set_ring_build_skb_enabled(rx_ring);
4555
4556#if (PAGE_SIZE < 8192)
4557 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4558 return;
4559
4560 set_ring_uses_large_buffer(rx_ring);
4561#endif
4562}
4563
4564
4565
4566
4567
4568
4569
4570static void igb_configure_rx(struct igb_adapter *adapter)
4571{
4572 int i;
4573
4574
4575 igb_set_default_mac_filter(adapter);
4576
4577
4578
4579
4580 for (i = 0; i < adapter->num_rx_queues; i++) {
4581 struct igb_ring *rx_ring = adapter->rx_ring[i];
4582
4583 igb_set_rx_buffer_len(adapter, rx_ring);
4584 igb_configure_rx_ring(adapter, rx_ring);
4585 }
4586}
4587
4588
4589
4590
4591
4592
4593
4594void igb_free_tx_resources(struct igb_ring *tx_ring)
4595{
4596 igb_clean_tx_ring(tx_ring);
4597
4598 vfree(tx_ring->tx_buffer_info);
4599 tx_ring->tx_buffer_info = NULL;
4600
4601
4602 if (!tx_ring->desc)
4603 return;
4604
4605 dma_free_coherent(tx_ring->dev, tx_ring->size,
4606 tx_ring->desc, tx_ring->dma);
4607
4608 tx_ring->desc = NULL;
4609}
4610
4611
4612
4613
4614
4615
4616
4617static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4618{
4619 int i;
4620
4621 for (i = 0; i < adapter->num_tx_queues; i++)
4622 if (adapter->tx_ring[i])
4623 igb_free_tx_resources(adapter->tx_ring[i]);
4624}
4625
4626
4627
4628
4629
4630static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4631{
4632 u16 i = tx_ring->next_to_clean;
4633 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4634
4635 while (i != tx_ring->next_to_use) {
4636 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4637
4638
4639 dev_kfree_skb_any(tx_buffer->skb);
4640
4641
4642 dma_unmap_single(tx_ring->dev,
4643 dma_unmap_addr(tx_buffer, dma),
4644 dma_unmap_len(tx_buffer, len),
4645 DMA_TO_DEVICE);
4646
4647
4648 eop_desc = tx_buffer->next_to_watch;
4649 tx_desc = IGB_TX_DESC(tx_ring, i);
4650
4651
4652 while (tx_desc != eop_desc) {
4653 tx_buffer++;
4654 tx_desc++;
4655 i++;
4656 if (unlikely(i == tx_ring->count)) {
4657 i = 0;
4658 tx_buffer = tx_ring->tx_buffer_info;
4659 tx_desc = IGB_TX_DESC(tx_ring, 0);
4660 }
4661
4662
4663 if (dma_unmap_len(tx_buffer, len))
4664 dma_unmap_page(tx_ring->dev,
4665 dma_unmap_addr(tx_buffer, dma),
4666 dma_unmap_len(tx_buffer, len),
4667 DMA_TO_DEVICE);
4668 }
4669
4670
4671 tx_buffer++;
4672 i++;
4673 if (unlikely(i == tx_ring->count)) {
4674 i = 0;
4675 tx_buffer = tx_ring->tx_buffer_info;
4676 }
4677 }
4678
4679
4680 netdev_tx_reset_queue(txring_txq(tx_ring));
4681
4682
4683 tx_ring->next_to_use = 0;
4684 tx_ring->next_to_clean = 0;
4685}
4686
4687
4688
4689
4690
4691static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4692{
4693 int i;
4694
4695 for (i = 0; i < adapter->num_tx_queues; i++)
4696 if (adapter->tx_ring[i])
4697 igb_clean_tx_ring(adapter->tx_ring[i]);
4698}
4699
4700
4701
4702
4703
4704
4705
4706void igb_free_rx_resources(struct igb_ring *rx_ring)
4707{
4708 igb_clean_rx_ring(rx_ring);
4709
4710 vfree(rx_ring->rx_buffer_info);
4711 rx_ring->rx_buffer_info = NULL;
4712
4713
4714 if (!rx_ring->desc)
4715 return;
4716
4717 dma_free_coherent(rx_ring->dev, rx_ring->size,
4718 rx_ring->desc, rx_ring->dma);
4719
4720 rx_ring->desc = NULL;
4721}
4722
4723
4724
4725
4726
4727
4728
4729static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4730{
4731 int i;
4732
4733 for (i = 0; i < adapter->num_rx_queues; i++)
4734 if (adapter->rx_ring[i])
4735 igb_free_rx_resources(adapter->rx_ring[i]);
4736}
4737
4738
4739
4740
4741
4742static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4743{
4744 u16 i = rx_ring->next_to_clean;
4745
4746 if (rx_ring->skb)
4747 dev_kfree_skb(rx_ring->skb);
4748 rx_ring->skb = NULL;
4749
4750
4751 while (i != rx_ring->next_to_alloc) {
4752 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4753
4754
4755
4756
4757 dma_sync_single_range_for_cpu(rx_ring->dev,
4758 buffer_info->dma,
4759 buffer_info->page_offset,
4760 igb_rx_bufsz(rx_ring),
4761 DMA_FROM_DEVICE);
4762
4763
4764 dma_unmap_page_attrs(rx_ring->dev,
4765 buffer_info->dma,
4766 igb_rx_pg_size(rx_ring),
4767 DMA_FROM_DEVICE,
4768 IGB_RX_DMA_ATTR);
4769 __page_frag_cache_drain(buffer_info->page,
4770 buffer_info->pagecnt_bias);
4771
4772 i++;
4773 if (i == rx_ring->count)
4774 i = 0;
4775 }
4776
4777 rx_ring->next_to_alloc = 0;
4778 rx_ring->next_to_clean = 0;
4779 rx_ring->next_to_use = 0;
4780}
4781
4782
4783
4784
4785
4786static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4787{
4788 int i;
4789
4790 for (i = 0; i < adapter->num_rx_queues; i++)
4791 if (adapter->rx_ring[i])
4792 igb_clean_rx_ring(adapter->rx_ring[i]);
4793}
4794
4795
4796
4797
4798
4799
4800
4801
4802static int igb_set_mac(struct net_device *netdev, void *p)
4803{
4804 struct igb_adapter *adapter = netdev_priv(netdev);
4805 struct e1000_hw *hw = &adapter->hw;
4806 struct sockaddr *addr = p;
4807
4808 if (!is_valid_ether_addr(addr->sa_data))
4809 return -EADDRNOTAVAIL;
4810
4811 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4812 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4813
4814
4815 igb_set_default_mac_filter(adapter);
4816
4817 return 0;
4818}
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829static int igb_write_mc_addr_list(struct net_device *netdev)
4830{
4831 struct igb_adapter *adapter = netdev_priv(netdev);
4832 struct e1000_hw *hw = &adapter->hw;
4833 struct netdev_hw_addr *ha;
4834 u8 *mta_list;
4835 int i;
4836
4837 if (netdev_mc_empty(netdev)) {
4838
4839 igb_update_mc_addr_list(hw, NULL, 0);
4840 igb_restore_vf_multicasts(adapter);
4841 return 0;
4842 }
4843
4844 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
4845 if (!mta_list)
4846 return -ENOMEM;
4847
4848
4849 i = 0;
4850 netdev_for_each_mc_addr(ha, netdev)
4851 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4852
4853 igb_update_mc_addr_list(hw, mta_list, i);
4854 kfree(mta_list);
4855
4856 return netdev_mc_count(netdev);
4857}
4858
4859static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4860{
4861 struct e1000_hw *hw = &adapter->hw;
4862 u32 i, pf_id;
4863
4864 switch (hw->mac.type) {
4865 case e1000_i210:
4866 case e1000_i211:
4867 case e1000_i350:
4868
4869 if (adapter->netdev->features & NETIF_F_NTUPLE)
4870 break;
4871
4872 case e1000_82576:
4873 case e1000_82580:
4874 case e1000_i354:
4875
4876 if (adapter->vfs_allocated_count)
4877 break;
4878
4879 default:
4880 return 1;
4881 }
4882
4883
4884 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4885 return 0;
4886
4887 if (!adapter->vfs_allocated_count)
4888 goto set_vfta;
4889
4890
4891 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4892
4893 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4894 u32 vlvf = rd32(E1000_VLVF(i));
4895
4896 vlvf |= BIT(pf_id);
4897 wr32(E1000_VLVF(i), vlvf);
4898 }
4899
4900set_vfta:
4901
4902 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4903 hw->mac.ops.write_vfta(hw, i, ~0U);
4904
4905
4906 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4907
4908 return 0;
4909}
4910
4911#define VFTA_BLOCK_SIZE 8
4912static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4913{
4914 struct e1000_hw *hw = &adapter->hw;
4915 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4916 u32 vid_start = vfta_offset * 32;
4917 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4918 u32 i, vid, word, bits, pf_id;
4919
4920
4921 vid = adapter->mng_vlan_id;
4922 if (vid >= vid_start && vid < vid_end)
4923 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4924
4925 if (!adapter->vfs_allocated_count)
4926 goto set_vfta;
4927
4928 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4929
4930 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4931 u32 vlvf = rd32(E1000_VLVF(i));
4932
4933
4934 vid = vlvf & VLAN_VID_MASK;
4935
4936
4937 if (vid < vid_start || vid >= vid_end)
4938 continue;
4939
4940 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4941
4942 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4943
4944
4945 if (test_bit(vid, adapter->active_vlans))
4946 continue;
4947 }
4948
4949
4950 bits = ~BIT(pf_id);
4951 bits &= rd32(E1000_VLVF(i));
4952 wr32(E1000_VLVF(i), bits);
4953 }
4954
4955set_vfta:
4956
4957 for (i = VFTA_BLOCK_SIZE; i--;) {
4958 vid = (vfta_offset + i) * 32;
4959 word = vid / BITS_PER_LONG;
4960 bits = vid % BITS_PER_LONG;
4961
4962 vfta[i] |= adapter->active_vlans[word] >> bits;
4963
4964 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4965 }
4966}
4967
4968static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4969{
4970 u32 i;
4971
4972
4973 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4974 return;
4975
4976
4977 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4978
4979 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4980 igb_scrub_vfta(adapter, i);
4981}
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992static void igb_set_rx_mode(struct net_device *netdev)
4993{
4994 struct igb_adapter *adapter = netdev_priv(netdev);
4995 struct e1000_hw *hw = &adapter->hw;
4996 unsigned int vfn = adapter->vfs_allocated_count;
4997 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4998 int count;
4999
5000
5001 if (netdev->flags & IFF_PROMISC) {
5002 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5003 vmolr |= E1000_VMOLR_MPME;
5004
5005
5006 if (hw->mac.type == e1000_82576)
5007 vmolr |= E1000_VMOLR_ROPE;
5008 } else {
5009 if (netdev->flags & IFF_ALLMULTI) {
5010 rctl |= E1000_RCTL_MPE;
5011 vmolr |= E1000_VMOLR_MPME;
5012 } else {
5013
5014
5015
5016
5017 count = igb_write_mc_addr_list(netdev);
5018 if (count < 0) {
5019 rctl |= E1000_RCTL_MPE;
5020 vmolr |= E1000_VMOLR_MPME;
5021 } else if (count) {
5022 vmolr |= E1000_VMOLR_ROMPE;
5023 }
5024 }
5025 }
5026
5027
5028
5029
5030
5031 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5032 rctl |= E1000_RCTL_UPE;
5033 vmolr |= E1000_VMOLR_ROPE;
5034 }
5035
5036
5037 rctl |= E1000_RCTL_VFE;
5038
5039
5040 if ((netdev->flags & IFF_PROMISC) ||
5041 (netdev->features & NETIF_F_RXALL)) {
5042
5043 if (igb_vlan_promisc_enable(adapter))
5044 rctl &= ~E1000_RCTL_VFE;
5045 } else {
5046 igb_vlan_promisc_disable(adapter);
5047 }
5048
5049
5050 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5051 E1000_RCTL_VFE);
5052 wr32(E1000_RCTL, rctl);
5053
5054#if (PAGE_SIZE < 8192)
5055 if (!adapter->vfs_allocated_count) {
5056 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5057 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5058 }
5059#endif
5060 wr32(E1000_RLPML, rlpml);
5061
5062
5063
5064
5065
5066
5067 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5068 return;
5069
5070
5071 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5072
5073 vmolr |= rd32(E1000_VMOLR(vfn)) &
5074 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5075
5076
5077 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5078#if (PAGE_SIZE < 8192)
5079 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5080 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5081 else
5082#endif
5083 vmolr |= MAX_JUMBO_FRAME_SIZE;
5084 vmolr |= E1000_VMOLR_LPE;
5085
5086 wr32(E1000_VMOLR(vfn), vmolr);
5087
5088 igb_restore_vf_multicasts(adapter);
5089}
5090
5091static void igb_check_wvbr(struct igb_adapter *adapter)
5092{
5093 struct e1000_hw *hw = &adapter->hw;
5094 u32 wvbr = 0;
5095
5096 switch (hw->mac.type) {
5097 case e1000_82576:
5098 case e1000_i350:
5099 wvbr = rd32(E1000_WVBR);
5100 if (!wvbr)
5101 return;
5102 break;
5103 default:
5104 break;
5105 }
5106
5107 adapter->wvbr |= wvbr;
5108}
5109
5110#define IGB_STAGGERED_QUEUE_OFFSET 8
5111
5112static void igb_spoof_check(struct igb_adapter *adapter)
5113{
5114 int j;
5115
5116 if (!adapter->wvbr)
5117 return;
5118
5119 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5120 if (adapter->wvbr & BIT(j) ||
5121 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5122 dev_warn(&adapter->pdev->dev,
5123 "Spoof event(s) detected on VF %d\n", j);
5124 adapter->wvbr &=
5125 ~(BIT(j) |
5126 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5127 }
5128 }
5129}
5130
5131
5132
5133
5134static void igb_update_phy_info(struct timer_list *t)
5135{
5136 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5137 igb_get_phy_info(&adapter->hw);
5138}
5139
5140
5141
5142
5143
5144bool igb_has_link(struct igb_adapter *adapter)
5145{
5146 struct e1000_hw *hw = &adapter->hw;
5147 bool link_active = false;
5148
5149
5150
5151
5152
5153
5154 switch (hw->phy.media_type) {
5155 case e1000_media_type_copper:
5156 if (!hw->mac.get_link_status)
5157 return true;
5158
5159 case e1000_media_type_internal_serdes:
5160 hw->mac.ops.check_for_link(hw);
5161 link_active = !hw->mac.get_link_status;
5162 break;
5163 default:
5164 case e1000_media_type_unknown:
5165 break;
5166 }
5167
5168 if (((hw->mac.type == e1000_i210) ||
5169 (hw->mac.type == e1000_i211)) &&
5170 (hw->phy.id == I210_I_PHY_ID)) {
5171 if (!netif_carrier_ok(adapter->netdev)) {
5172 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5173 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5174 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5175 adapter->link_check_timeout = jiffies;
5176 }
5177 }
5178
5179 return link_active;
5180}
5181
5182static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5183{
5184 bool ret = false;
5185 u32 ctrl_ext, thstat;
5186
5187
5188 if (hw->mac.type == e1000_i350) {
5189 thstat = rd32(E1000_THSTAT);
5190 ctrl_ext = rd32(E1000_CTRL_EXT);
5191
5192 if ((hw->phy.media_type == e1000_media_type_copper) &&
5193 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5194 ret = !!(thstat & event);
5195 }
5196
5197 return ret;
5198}
5199
5200
5201
5202
5203
5204
5205static void igb_check_lvmmc(struct igb_adapter *adapter)
5206{
5207 struct e1000_hw *hw = &adapter->hw;
5208 u32 lvmmc;
5209
5210 lvmmc = rd32(E1000_LVMMC);
5211 if (lvmmc) {
5212 if (unlikely(net_ratelimit())) {
5213 netdev_warn(adapter->netdev,
5214 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5215 lvmmc);
5216 }
5217 }
5218}
5219
5220
5221
5222
5223
5224static void igb_watchdog(struct timer_list *t)
5225{
5226 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5227
5228 schedule_work(&adapter->watchdog_task);
5229}
5230
5231static void igb_watchdog_task(struct work_struct *work)
5232{
5233 struct igb_adapter *adapter = container_of(work,
5234 struct igb_adapter,
5235 watchdog_task);
5236 struct e1000_hw *hw = &adapter->hw;
5237 struct e1000_phy_info *phy = &hw->phy;
5238 struct net_device *netdev = adapter->netdev;
5239 u32 link;
5240 int i;
5241 u32 connsw;
5242 u16 phy_data, retry_count = 20;
5243
5244 link = igb_has_link(adapter);
5245
5246 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5247 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5248 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5249 else
5250 link = false;
5251 }
5252
5253
5254 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5255 if (hw->phy.media_type == e1000_media_type_copper) {
5256 connsw = rd32(E1000_CONNSW);
5257 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5258 link = 0;
5259 }
5260 }
5261 if (link) {
5262
5263 if (hw->dev_spec._82575.media_changed) {
5264 hw->dev_spec._82575.media_changed = false;
5265 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5266 igb_reset(adapter);
5267 }
5268
5269 pm_runtime_resume(netdev->dev.parent);
5270
5271 if (!netif_carrier_ok(netdev)) {
5272 u32 ctrl;
5273
5274 hw->mac.ops.get_speed_and_duplex(hw,
5275 &adapter->link_speed,
5276 &adapter->link_duplex);
5277
5278 ctrl = rd32(E1000_CTRL);
5279
5280 netdev_info(netdev,
5281 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5282 netdev->name,
5283 adapter->link_speed,
5284 adapter->link_duplex == FULL_DUPLEX ?
5285 "Full" : "Half",
5286 (ctrl & E1000_CTRL_TFCE) &&
5287 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5288 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5289 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5290
5291
5292 if ((adapter->flags & IGB_FLAG_EEE) &&
5293 (adapter->link_duplex == HALF_DUPLEX)) {
5294 dev_info(&adapter->pdev->dev,
5295 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5296 adapter->hw.dev_spec._82575.eee_disable = true;
5297 adapter->flags &= ~IGB_FLAG_EEE;
5298 }
5299
5300
5301 igb_check_downshift(hw);
5302 if (phy->speed_downgraded)
5303 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5304
5305
5306 if (igb_thermal_sensor_event(hw,
5307 E1000_THSTAT_LINK_THROTTLE))
5308 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5309
5310
5311 adapter->tx_timeout_factor = 1;
5312 switch (adapter->link_speed) {
5313 case SPEED_10:
5314 adapter->tx_timeout_factor = 14;
5315 break;
5316 case SPEED_100:
5317
5318 break;
5319 }
5320
5321 if (adapter->link_speed != SPEED_1000)
5322 goto no_wait;
5323
5324
5325retry_read_status:
5326 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5327 &phy_data)) {
5328 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5329 retry_count) {
5330 msleep(100);
5331 retry_count--;
5332 goto retry_read_status;
5333 } else if (!retry_count) {
5334 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5335 }
5336 } else {
5337 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5338 }
5339no_wait:
5340 netif_carrier_on(netdev);
5341
5342 igb_ping_all_vfs(adapter);
5343 igb_check_vf_rate_limit(adapter);
5344
5345
5346 if (!test_bit(__IGB_DOWN, &adapter->state))
5347 mod_timer(&adapter->phy_info_timer,
5348 round_jiffies(jiffies + 2 * HZ));
5349 }
5350 } else {
5351 if (netif_carrier_ok(netdev)) {
5352 adapter->link_speed = 0;
5353 adapter->link_duplex = 0;
5354
5355
5356 if (igb_thermal_sensor_event(hw,
5357 E1000_THSTAT_PWR_DOWN)) {
5358 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5359 }
5360
5361
5362 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5363 netdev->name);
5364 netif_carrier_off(netdev);
5365
5366 igb_ping_all_vfs(adapter);
5367
5368
5369 if (!test_bit(__IGB_DOWN, &adapter->state))
5370 mod_timer(&adapter->phy_info_timer,
5371 round_jiffies(jiffies + 2 * HZ));
5372
5373
5374 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5375 igb_check_swap_media(adapter);
5376 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5377 schedule_work(&adapter->reset_task);
5378
5379 return;
5380 }
5381 }
5382 pm_schedule_suspend(netdev->dev.parent,
5383 MSEC_PER_SEC * 5);
5384
5385
5386 } else if (!netif_carrier_ok(netdev) &&
5387 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5388 igb_check_swap_media(adapter);
5389 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5390 schedule_work(&adapter->reset_task);
5391
5392 return;
5393 }
5394 }
5395 }
5396
5397 spin_lock(&adapter->stats64_lock);
5398 igb_update_stats(adapter);
5399 spin_unlock(&adapter->stats64_lock);
5400
5401 for (i = 0; i < adapter->num_tx_queues; i++) {
5402 struct igb_ring *tx_ring = adapter->tx_ring[i];
5403 if (!netif_carrier_ok(netdev)) {
5404
5405
5406
5407
5408
5409 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5410 adapter->tx_timeout_count++;
5411 schedule_work(&adapter->reset_task);
5412
5413 return;
5414 }
5415 }
5416
5417
5418 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5419 }
5420
5421
5422 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5423 u32 eics = 0;
5424
5425 for (i = 0; i < adapter->num_q_vectors; i++)
5426 eics |= adapter->q_vector[i]->eims_value;
5427 wr32(E1000_EICS, eics);
5428 } else {
5429 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5430 }
5431
5432 igb_spoof_check(adapter);
5433 igb_ptp_rx_hang(adapter);
5434 igb_ptp_tx_hang(adapter);
5435
5436
5437 if ((adapter->hw.mac.type == e1000_i350) ||
5438 (adapter->hw.mac.type == e1000_i354))
5439 igb_check_lvmmc(adapter);
5440
5441
5442 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5443 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5444 mod_timer(&adapter->watchdog_timer,
5445 round_jiffies(jiffies + HZ));
5446 else
5447 mod_timer(&adapter->watchdog_timer,
5448 round_jiffies(jiffies + 2 * HZ));
5449 }
5450}
5451
5452enum latency_range {
5453 lowest_latency = 0,
5454 low_latency = 1,
5455 bulk_latency = 2,
5456 latency_invalid = 255
5457};
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5475{
5476 int new_val = q_vector->itr_val;
5477 int avg_wire_size = 0;
5478 struct igb_adapter *adapter = q_vector->adapter;
5479 unsigned int packets;
5480
5481
5482
5483
5484 if (adapter->link_speed != SPEED_1000) {
5485 new_val = IGB_4K_ITR;
5486 goto set_itr_val;
5487 }
5488
5489 packets = q_vector->rx.total_packets;
5490 if (packets)
5491 avg_wire_size = q_vector->rx.total_bytes / packets;
5492
5493 packets = q_vector->tx.total_packets;
5494 if (packets)
5495 avg_wire_size = max_t(u32, avg_wire_size,
5496 q_vector->tx.total_bytes / packets);
5497
5498
5499 if (!avg_wire_size)
5500 goto clear_counts;
5501
5502
5503 avg_wire_size += 24;
5504
5505
5506 avg_wire_size = min(avg_wire_size, 3000);
5507
5508
5509 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5510 new_val = avg_wire_size / 3;
5511 else
5512 new_val = avg_wire_size / 2;
5513
5514
5515 if (new_val < IGB_20K_ITR &&
5516 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5517 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5518 new_val = IGB_20K_ITR;
5519
5520set_itr_val:
5521 if (new_val != q_vector->itr_val) {
5522 q_vector->itr_val = new_val;
5523 q_vector->set_itr = 1;
5524 }
5525clear_counts:
5526 q_vector->rx.total_bytes = 0;
5527 q_vector->rx.total_packets = 0;
5528 q_vector->tx.total_bytes = 0;
5529 q_vector->tx.total_packets = 0;
5530}
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548static void igb_update_itr(struct igb_q_vector *q_vector,
5549 struct igb_ring_container *ring_container)
5550{
5551 unsigned int packets = ring_container->total_packets;
5552 unsigned int bytes = ring_container->total_bytes;
5553 u8 itrval = ring_container->itr;
5554
5555
5556 if (packets == 0)
5557 return;
5558
5559 switch (itrval) {
5560 case lowest_latency:
5561
5562 if (bytes/packets > 8000)
5563 itrval = bulk_latency;
5564 else if ((packets < 5) && (bytes > 512))
5565 itrval = low_latency;
5566 break;
5567 case low_latency:
5568 if (bytes > 10000) {
5569
5570 if (bytes/packets > 8000)
5571 itrval = bulk_latency;
5572 else if ((packets < 10) || ((bytes/packets) > 1200))
5573 itrval = bulk_latency;
5574 else if ((packets > 35))
5575 itrval = lowest_latency;
5576 } else if (bytes/packets > 2000) {
5577 itrval = bulk_latency;
5578 } else if (packets <= 2 && bytes < 512) {
5579 itrval = lowest_latency;
5580 }
5581 break;
5582 case bulk_latency:
5583 if (bytes > 25000) {
5584 if (packets > 35)
5585 itrval = low_latency;
5586 } else if (bytes < 1500) {
5587 itrval = low_latency;
5588 }
5589 break;
5590 }
5591
5592
5593 ring_container->total_bytes = 0;
5594 ring_container->total_packets = 0;
5595
5596
5597 ring_container->itr = itrval;
5598}
5599
5600static void igb_set_itr(struct igb_q_vector *q_vector)
5601{
5602 struct igb_adapter *adapter = q_vector->adapter;
5603 u32 new_itr = q_vector->itr_val;
5604 u8 current_itr = 0;
5605
5606
5607 if (adapter->link_speed != SPEED_1000) {
5608 current_itr = 0;
5609 new_itr = IGB_4K_ITR;
5610 goto set_itr_now;
5611 }
5612
5613 igb_update_itr(q_vector, &q_vector->tx);
5614 igb_update_itr(q_vector, &q_vector->rx);
5615
5616 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5617
5618
5619 if (current_itr == lowest_latency &&
5620 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5621 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5622 current_itr = low_latency;
5623
5624 switch (current_itr) {
5625
5626 case lowest_latency:
5627 new_itr = IGB_70K_ITR;
5628 break;
5629 case low_latency:
5630 new_itr = IGB_20K_ITR;
5631 break;
5632 case bulk_latency:
5633 new_itr = IGB_4K_ITR;
5634 break;
5635 default:
5636 break;
5637 }
5638
5639set_itr_now:
5640 if (new_itr != q_vector->itr_val) {
5641
5642
5643
5644
5645 new_itr = new_itr > q_vector->itr_val ?
5646 max((new_itr * q_vector->itr_val) /
5647 (new_itr + (q_vector->itr_val >> 2)),
5648 new_itr) : new_itr;
5649
5650
5651
5652
5653
5654
5655 q_vector->itr_val = new_itr;
5656 q_vector->set_itr = 1;
5657 }
5658}
5659
5660static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5661 struct igb_tx_buffer *first,
5662 u32 vlan_macip_lens, u32 type_tucmd,
5663 u32 mss_l4len_idx)
5664{
5665 struct e1000_adv_tx_context_desc *context_desc;
5666 u16 i = tx_ring->next_to_use;
5667 struct timespec64 ts;
5668
5669 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5670
5671 i++;
5672 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5673
5674
5675 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5676
5677
5678 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5679 mss_l4len_idx |= tx_ring->reg_idx << 4;
5680
5681 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5682 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5683 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5684
5685
5686
5687
5688 if (tx_ring->launchtime_enable) {
5689 ts = ns_to_timespec64(first->skb->tstamp);
5690 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5691 } else {
5692 context_desc->seqnum_seed = 0;
5693 }
5694}
5695
5696static int igb_tso(struct igb_ring *tx_ring,
5697 struct igb_tx_buffer *first,
5698 u8 *hdr_len)
5699{
5700 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5701 struct sk_buff *skb = first->skb;
5702 union {
5703 struct iphdr *v4;
5704 struct ipv6hdr *v6;
5705 unsigned char *hdr;
5706 } ip;
5707 union {
5708 struct tcphdr *tcp;
5709 unsigned char *hdr;
5710 } l4;
5711 u32 paylen, l4_offset;
5712 int err;
5713
5714 if (skb->ip_summed != CHECKSUM_PARTIAL)
5715 return 0;
5716
5717 if (!skb_is_gso(skb))
5718 return 0;
5719
5720 err = skb_cow_head(skb, 0);
5721 if (err < 0)
5722 return err;
5723
5724 ip.hdr = skb_network_header(skb);
5725 l4.hdr = skb_checksum_start(skb);
5726
5727
5728 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5729
5730
5731 if (ip.v4->version == 4) {
5732 unsigned char *csum_start = skb_checksum_start(skb);
5733 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5734
5735
5736
5737
5738 ip.v4->check = csum_fold(csum_partial(trans_start,
5739 csum_start - trans_start,
5740 0));
5741 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5742
5743 ip.v4->tot_len = 0;
5744 first->tx_flags |= IGB_TX_FLAGS_TSO |
5745 IGB_TX_FLAGS_CSUM |
5746 IGB_TX_FLAGS_IPV4;
5747 } else {
5748 ip.v6->payload_len = 0;
5749 first->tx_flags |= IGB_TX_FLAGS_TSO |
5750 IGB_TX_FLAGS_CSUM;
5751 }
5752
5753
5754 l4_offset = l4.hdr - skb->data;
5755
5756
5757 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5758
5759
5760 paylen = skb->len - l4_offset;
5761 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
5762
5763
5764 first->gso_segs = skb_shinfo(skb)->gso_segs;
5765 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5766
5767
5768 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5769 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5770
5771
5772 vlan_macip_lens = l4.hdr - ip.hdr;
5773 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5774 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5775
5776 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5777 type_tucmd, mss_l4len_idx);
5778
5779 return 1;
5780}
5781
5782static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5783{
5784 unsigned int offset = 0;
5785
5786 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5787
5788 return offset == skb_checksum_start_offset(skb);
5789}
5790
5791static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5792{
5793 struct sk_buff *skb = first->skb;
5794 u32 vlan_macip_lens = 0;
5795 u32 type_tucmd = 0;
5796
5797 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5798csum_failed:
5799 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5800 !tx_ring->launchtime_enable)
5801 return;
5802 goto no_csum;
5803 }
5804
5805 switch (skb->csum_offset) {
5806 case offsetof(struct tcphdr, check):
5807 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5808
5809 case offsetof(struct udphdr, check):
5810 break;
5811 case offsetof(struct sctphdr, checksum):
5812
5813 if (((first->protocol == htons(ETH_P_IP)) &&
5814 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5815 ((first->protocol == htons(ETH_P_IPV6)) &&
5816 igb_ipv6_csum_is_sctp(skb))) {
5817 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5818 break;
5819 }
5820
5821 default:
5822 skb_checksum_help(skb);
5823 goto csum_failed;
5824 }
5825
5826
5827 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5828 vlan_macip_lens = skb_checksum_start_offset(skb) -
5829 skb_network_offset(skb);
5830no_csum:
5831 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5832 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5833
5834 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
5835}
5836
5837#define IGB_SET_FLAG(_input, _flag, _result) \
5838 ((_flag <= _result) ? \
5839 ((u32)(_input & _flag) * (_result / _flag)) : \
5840 ((u32)(_input & _flag) / (_flag / _result)))
5841
5842static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5843{
5844
5845 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5846 E1000_ADVTXD_DCMD_DEXT |
5847 E1000_ADVTXD_DCMD_IFCS;
5848
5849
5850 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5851 (E1000_ADVTXD_DCMD_VLE));
5852
5853
5854 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5855 (E1000_ADVTXD_DCMD_TSE));
5856
5857
5858 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5859 (E1000_ADVTXD_MAC_TSTAMP));
5860
5861
5862 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5863
5864 return cmd_type;
5865}
5866
5867static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5868 union e1000_adv_tx_desc *tx_desc,
5869 u32 tx_flags, unsigned int paylen)
5870{
5871 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5872
5873
5874 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5875 olinfo_status |= tx_ring->reg_idx << 4;
5876
5877
5878 olinfo_status |= IGB_SET_FLAG(tx_flags,
5879 IGB_TX_FLAGS_CSUM,
5880 (E1000_TXD_POPTS_TXSM << 8));
5881
5882
5883 olinfo_status |= IGB_SET_FLAG(tx_flags,
5884 IGB_TX_FLAGS_IPV4,
5885 (E1000_TXD_POPTS_IXSM << 8));
5886
5887 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5888}
5889
5890static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5891{
5892 struct net_device *netdev = tx_ring->netdev;
5893
5894 netif_stop_subqueue(netdev, tx_ring->queue_index);
5895
5896
5897
5898
5899
5900 smp_mb();
5901
5902
5903
5904
5905 if (igb_desc_unused(tx_ring) < size)
5906 return -EBUSY;
5907
5908
5909 netif_wake_subqueue(netdev, tx_ring->queue_index);
5910
5911 u64_stats_update_begin(&tx_ring->tx_syncp2);
5912 tx_ring->tx_stats.restart_queue2++;
5913 u64_stats_update_end(&tx_ring->tx_syncp2);
5914
5915 return 0;
5916}
5917
5918static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5919{
5920 if (igb_desc_unused(tx_ring) >= size)
5921 return 0;
5922 return __igb_maybe_stop_tx(tx_ring, size);
5923}
5924
5925static int igb_tx_map(struct igb_ring *tx_ring,
5926 struct igb_tx_buffer *first,
5927 const u8 hdr_len)
5928{
5929 struct sk_buff *skb = first->skb;
5930 struct igb_tx_buffer *tx_buffer;
5931 union e1000_adv_tx_desc *tx_desc;
5932 struct skb_frag_struct *frag;
5933 dma_addr_t dma;
5934 unsigned int data_len, size;
5935 u32 tx_flags = first->tx_flags;
5936 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5937 u16 i = tx_ring->next_to_use;
5938
5939 tx_desc = IGB_TX_DESC(tx_ring, i);
5940
5941 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5942
5943 size = skb_headlen(skb);
5944 data_len = skb->data_len;
5945
5946 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5947
5948 tx_buffer = first;
5949
5950 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5951 if (dma_mapping_error(tx_ring->dev, dma))
5952 goto dma_error;
5953
5954
5955 dma_unmap_len_set(tx_buffer, len, size);
5956 dma_unmap_addr_set(tx_buffer, dma, dma);
5957
5958 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5959
5960 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5961 tx_desc->read.cmd_type_len =
5962 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5963
5964 i++;
5965 tx_desc++;
5966 if (i == tx_ring->count) {
5967 tx_desc = IGB_TX_DESC(tx_ring, 0);
5968 i = 0;
5969 }
5970 tx_desc->read.olinfo_status = 0;
5971
5972 dma += IGB_MAX_DATA_PER_TXD;
5973 size -= IGB_MAX_DATA_PER_TXD;
5974
5975 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5976 }
5977
5978 if (likely(!data_len))
5979 break;
5980
5981 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5982
5983 i++;
5984 tx_desc++;
5985 if (i == tx_ring->count) {
5986 tx_desc = IGB_TX_DESC(tx_ring, 0);
5987 i = 0;
5988 }
5989 tx_desc->read.olinfo_status = 0;
5990
5991 size = skb_frag_size(frag);
5992 data_len -= size;
5993
5994 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
5995 size, DMA_TO_DEVICE);
5996
5997 tx_buffer = &tx_ring->tx_buffer_info[i];
5998 }
5999
6000
6001 cmd_type |= size | IGB_TXD_DCMD;
6002 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6003
6004 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6005
6006
6007 first->time_stamp = jiffies;
6008
6009 skb_tx_timestamp(skb);
6010
6011
6012
6013
6014
6015
6016
6017
6018 dma_wmb();
6019
6020
6021 first->next_to_watch = tx_desc;
6022
6023 i++;
6024 if (i == tx_ring->count)
6025 i = 0;
6026
6027 tx_ring->next_to_use = i;
6028
6029
6030 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6031
6032 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6033 writel(i, tx_ring->tail);
6034 }
6035 return 0;
6036
6037dma_error:
6038 dev_err(tx_ring->dev, "TX DMA map failed\n");
6039 tx_buffer = &tx_ring->tx_buffer_info[i];
6040
6041
6042 while (tx_buffer != first) {
6043 if (dma_unmap_len(tx_buffer, len))
6044 dma_unmap_page(tx_ring->dev,
6045 dma_unmap_addr(tx_buffer, dma),
6046 dma_unmap_len(tx_buffer, len),
6047 DMA_TO_DEVICE);
6048 dma_unmap_len_set(tx_buffer, len, 0);
6049
6050 if (i-- == 0)
6051 i += tx_ring->count;
6052 tx_buffer = &tx_ring->tx_buffer_info[i];
6053 }
6054
6055 if (dma_unmap_len(tx_buffer, len))
6056 dma_unmap_single(tx_ring->dev,
6057 dma_unmap_addr(tx_buffer, dma),
6058 dma_unmap_len(tx_buffer, len),
6059 DMA_TO_DEVICE);
6060 dma_unmap_len_set(tx_buffer, len, 0);
6061
6062 dev_kfree_skb_any(tx_buffer->skb);
6063 tx_buffer->skb = NULL;
6064
6065 tx_ring->next_to_use = i;
6066
6067 return -1;
6068}
6069
6070netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6071 struct igb_ring *tx_ring)
6072{
6073 struct igb_tx_buffer *first;
6074 int tso;
6075 u32 tx_flags = 0;
6076 unsigned short f;
6077 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6078 __be16 protocol = vlan_get_protocol(skb);
6079 u8 hdr_len = 0;
6080
6081
6082
6083
6084
6085
6086
6087 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6088 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6089
6090 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6091
6092 return NETDEV_TX_BUSY;
6093 }
6094
6095
6096 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6097 first->skb = skb;
6098 first->bytecount = skb->len;
6099 first->gso_segs = 1;
6100
6101 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6102 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6103
6104 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6105 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6106 &adapter->state)) {
6107 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6108 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6109
6110 adapter->ptp_tx_skb = skb_get(skb);
6111 adapter->ptp_tx_start = jiffies;
6112 if (adapter->hw.mac.type == e1000_82576)
6113 schedule_work(&adapter->ptp_tx_work);
6114 } else {
6115 adapter->tx_hwtstamp_skipped++;
6116 }
6117 }
6118
6119 if (skb_vlan_tag_present(skb)) {
6120 tx_flags |= IGB_TX_FLAGS_VLAN;
6121 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6122 }
6123
6124
6125 first->tx_flags = tx_flags;
6126 first->protocol = protocol;
6127
6128 tso = igb_tso(tx_ring, first, &hdr_len);
6129 if (tso < 0)
6130 goto out_drop;
6131 else if (!tso)
6132 igb_tx_csum(tx_ring, first);
6133
6134 if (igb_tx_map(tx_ring, first, hdr_len))
6135 goto cleanup_tx_tstamp;
6136
6137 return NETDEV_TX_OK;
6138
6139out_drop:
6140 dev_kfree_skb_any(first->skb);
6141 first->skb = NULL;
6142cleanup_tx_tstamp:
6143 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6144 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6145
6146 dev_kfree_skb_any(adapter->ptp_tx_skb);
6147 adapter->ptp_tx_skb = NULL;
6148 if (adapter->hw.mac.type == e1000_82576)
6149 cancel_work_sync(&adapter->ptp_tx_work);
6150 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6151 }
6152
6153 return NETDEV_TX_OK;
6154}
6155
6156static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6157 struct sk_buff *skb)
6158{
6159 unsigned int r_idx = skb->queue_mapping;
6160
6161 if (r_idx >= adapter->num_tx_queues)
6162 r_idx = r_idx % adapter->num_tx_queues;
6163
6164 return adapter->tx_ring[r_idx];
6165}
6166
6167static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6168 struct net_device *netdev)
6169{
6170 struct igb_adapter *adapter = netdev_priv(netdev);
6171
6172
6173
6174
6175 if (skb_put_padto(skb, 17))
6176 return NETDEV_TX_OK;
6177
6178 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6179}
6180
6181
6182
6183
6184
6185static void igb_tx_timeout(struct net_device *netdev)
6186{
6187 struct igb_adapter *adapter = netdev_priv(netdev);
6188 struct e1000_hw *hw = &adapter->hw;
6189
6190
6191 adapter->tx_timeout_count++;
6192
6193 if (hw->mac.type >= e1000_82580)
6194 hw->dev_spec._82575.global_device_reset = true;
6195
6196 schedule_work(&adapter->reset_task);
6197 wr32(E1000_EICS,
6198 (adapter->eims_enable_mask & ~adapter->eims_other));
6199}
6200
6201static void igb_reset_task(struct work_struct *work)
6202{
6203 struct igb_adapter *adapter;
6204 adapter = container_of(work, struct igb_adapter, reset_task);
6205
6206 igb_dump(adapter);
6207 netdev_err(adapter->netdev, "Reset adapter\n");
6208 igb_reinit_locked(adapter);
6209}
6210
6211
6212
6213
6214
6215
6216static void igb_get_stats64(struct net_device *netdev,
6217 struct rtnl_link_stats64 *stats)
6218{
6219 struct igb_adapter *adapter = netdev_priv(netdev);
6220
6221 spin_lock(&adapter->stats64_lock);
6222 igb_update_stats(adapter);
6223 memcpy(stats, &adapter->stats64, sizeof(*stats));
6224 spin_unlock(&adapter->stats64_lock);
6225}
6226
6227
6228
6229
6230
6231
6232
6233
6234static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6235{
6236 struct igb_adapter *adapter = netdev_priv(netdev);
6237 struct pci_dev *pdev = adapter->pdev;
6238 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6239
6240
6241 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6242 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6243
6244 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6245 usleep_range(1000, 2000);
6246
6247
6248 adapter->max_frame_size = max_frame;
6249
6250 if (netif_running(netdev))
6251 igb_down(adapter);
6252
6253 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
6254 netdev->mtu, new_mtu);
6255 netdev->mtu = new_mtu;
6256
6257 if (netif_running(netdev))
6258 igb_up(adapter);
6259 else
6260 igb_reset(adapter);
6261
6262 clear_bit(__IGB_RESETTING, &adapter->state);
6263
6264 return 0;
6265}
6266
6267
6268
6269
6270
6271void igb_update_stats(struct igb_adapter *adapter)
6272{
6273 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6274 struct e1000_hw *hw = &adapter->hw;
6275 struct pci_dev *pdev = adapter->pdev;
6276 u32 reg, mpc;
6277 int i;
6278 u64 bytes, packets;
6279 unsigned int start;
6280 u64 _bytes, _packets;
6281
6282
6283
6284
6285 if (adapter->link_speed == 0)
6286 return;
6287 if (pci_channel_offline(pdev))
6288 return;
6289
6290 bytes = 0;
6291 packets = 0;
6292
6293 rcu_read_lock();
6294 for (i = 0; i < adapter->num_rx_queues; i++) {
6295 struct igb_ring *ring = adapter->rx_ring[i];
6296 u32 rqdpc = rd32(E1000_RQDPC(i));
6297 if (hw->mac.type >= e1000_i210)
6298 wr32(E1000_RQDPC(i), 0);
6299
6300 if (rqdpc) {
6301 ring->rx_stats.drops += rqdpc;
6302 net_stats->rx_fifo_errors += rqdpc;
6303 }
6304
6305 do {
6306 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6307 _bytes = ring->rx_stats.bytes;
6308 _packets = ring->rx_stats.packets;
6309 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6310 bytes += _bytes;
6311 packets += _packets;
6312 }
6313
6314 net_stats->rx_bytes = bytes;
6315 net_stats->rx_packets = packets;
6316
6317 bytes = 0;
6318 packets = 0;
6319 for (i = 0; i < adapter->num_tx_queues; i++) {
6320 struct igb_ring *ring = adapter->tx_ring[i];
6321 do {
6322 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6323 _bytes = ring->tx_stats.bytes;
6324 _packets = ring->tx_stats.packets;
6325 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6326 bytes += _bytes;
6327 packets += _packets;
6328 }
6329 net_stats->tx_bytes = bytes;
6330 net_stats->tx_packets = packets;
6331 rcu_read_unlock();
6332
6333
6334 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6335 adapter->stats.gprc += rd32(E1000_GPRC);
6336 adapter->stats.gorc += rd32(E1000_GORCL);
6337 rd32(E1000_GORCH);
6338 adapter->stats.bprc += rd32(E1000_BPRC);
6339 adapter->stats.mprc += rd32(E1000_MPRC);
6340 adapter->stats.roc += rd32(E1000_ROC);
6341
6342 adapter->stats.prc64 += rd32(E1000_PRC64);
6343 adapter->stats.prc127 += rd32(E1000_PRC127);
6344 adapter->stats.prc255 += rd32(E1000_PRC255);
6345 adapter->stats.prc511 += rd32(E1000_PRC511);
6346 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6347 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6348 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6349 adapter->stats.sec += rd32(E1000_SEC);
6350
6351 mpc = rd32(E1000_MPC);
6352 adapter->stats.mpc += mpc;
6353 net_stats->rx_fifo_errors += mpc;
6354 adapter->stats.scc += rd32(E1000_SCC);
6355 adapter->stats.ecol += rd32(E1000_ECOL);
6356 adapter->stats.mcc += rd32(E1000_MCC);
6357 adapter->stats.latecol += rd32(E1000_LATECOL);
6358 adapter->stats.dc += rd32(E1000_DC);
6359 adapter->stats.rlec += rd32(E1000_RLEC);
6360 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6361 adapter->stats.xontxc += rd32(E1000_XONTXC);
6362 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6363 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6364 adapter->stats.fcruc += rd32(E1000_FCRUC);
6365 adapter->stats.gptc += rd32(E1000_GPTC);
6366 adapter->stats.gotc += rd32(E1000_GOTCL);
6367 rd32(E1000_GOTCH);
6368 adapter->stats.rnbc += rd32(E1000_RNBC);
6369 adapter->stats.ruc += rd32(E1000_RUC);
6370 adapter->stats.rfc += rd32(E1000_RFC);
6371 adapter->stats.rjc += rd32(E1000_RJC);
6372 adapter->stats.tor += rd32(E1000_TORH);
6373 adapter->stats.tot += rd32(E1000_TOTH);
6374 adapter->stats.tpr += rd32(E1000_TPR);
6375
6376 adapter->stats.ptc64 += rd32(E1000_PTC64);
6377 adapter->stats.ptc127 += rd32(E1000_PTC127);
6378 adapter->stats.ptc255 += rd32(E1000_PTC255);
6379 adapter->stats.ptc511 += rd32(E1000_PTC511);
6380 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6381 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6382
6383 adapter->stats.mptc += rd32(E1000_MPTC);
6384 adapter->stats.bptc += rd32(E1000_BPTC);
6385
6386 adapter->stats.tpt += rd32(E1000_TPT);
6387 adapter->stats.colc += rd32(E1000_COLC);
6388
6389 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6390
6391 reg = rd32(E1000_CTRL_EXT);
6392 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6393 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6394
6395
6396 if ((hw->mac.type != e1000_i210) &&
6397 (hw->mac.type != e1000_i211))
6398 adapter->stats.tncrs += rd32(E1000_TNCRS);
6399 }
6400
6401 adapter->stats.tsctc += rd32(E1000_TSCTC);
6402 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6403
6404 adapter->stats.iac += rd32(E1000_IAC);
6405 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6406 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6407 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6408 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6409 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6410 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6411 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6412 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6413
6414
6415 net_stats->multicast = adapter->stats.mprc;
6416 net_stats->collisions = adapter->stats.colc;
6417
6418
6419
6420
6421
6422
6423 net_stats->rx_errors = adapter->stats.rxerrc +
6424 adapter->stats.crcerrs + adapter->stats.algnerrc +
6425 adapter->stats.ruc + adapter->stats.roc +
6426 adapter->stats.cexterr;
6427 net_stats->rx_length_errors = adapter->stats.ruc +
6428 adapter->stats.roc;
6429 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6430 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6431 net_stats->rx_missed_errors = adapter->stats.mpc;
6432
6433
6434 net_stats->tx_errors = adapter->stats.ecol +
6435 adapter->stats.latecol;
6436 net_stats->tx_aborted_errors = adapter->stats.ecol;
6437 net_stats->tx_window_errors = adapter->stats.latecol;
6438 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6439
6440
6441
6442
6443 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6444 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6445 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6446
6447
6448 reg = rd32(E1000_MANC);
6449 if (reg & E1000_MANC_EN_BMC2OS) {
6450 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6451 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6452 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6453 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6454 }
6455}
6456
6457static void igb_tsync_interrupt(struct igb_adapter *adapter)
6458{
6459 struct e1000_hw *hw = &adapter->hw;
6460 struct ptp_clock_event event;
6461 struct timespec64 ts;
6462 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6463
6464 if (tsicr & TSINTR_SYS_WRAP) {
6465 event.type = PTP_CLOCK_PPS;
6466 if (adapter->ptp_caps.pps)
6467 ptp_clock_event(adapter->ptp_clock, &event);
6468 ack |= TSINTR_SYS_WRAP;
6469 }
6470
6471 if (tsicr & E1000_TSICR_TXTS) {
6472
6473 schedule_work(&adapter->ptp_tx_work);
6474 ack |= E1000_TSICR_TXTS;
6475 }
6476
6477 if (tsicr & TSINTR_TT0) {
6478 spin_lock(&adapter->tmreg_lock);
6479 ts = timespec64_add(adapter->perout[0].start,
6480 adapter->perout[0].period);
6481
6482 wr32(E1000_TRGTTIML0, ts.tv_nsec);
6483 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6484 tsauxc = rd32(E1000_TSAUXC);
6485 tsauxc |= TSAUXC_EN_TT0;
6486 wr32(E1000_TSAUXC, tsauxc);
6487 adapter->perout[0].start = ts;
6488 spin_unlock(&adapter->tmreg_lock);
6489 ack |= TSINTR_TT0;
6490 }
6491
6492 if (tsicr & TSINTR_TT1) {
6493 spin_lock(&adapter->tmreg_lock);
6494 ts = timespec64_add(adapter->perout[1].start,
6495 adapter->perout[1].period);
6496 wr32(E1000_TRGTTIML1, ts.tv_nsec);
6497 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6498 tsauxc = rd32(E1000_TSAUXC);
6499 tsauxc |= TSAUXC_EN_TT1;
6500 wr32(E1000_TSAUXC, tsauxc);
6501 adapter->perout[1].start = ts;
6502 spin_unlock(&adapter->tmreg_lock);
6503 ack |= TSINTR_TT1;
6504 }
6505
6506 if (tsicr & TSINTR_AUTT0) {
6507 nsec = rd32(E1000_AUXSTMPL0);
6508 sec = rd32(E1000_AUXSTMPH0);
6509 event.type = PTP_CLOCK_EXTTS;
6510 event.index = 0;
6511 event.timestamp = sec * 1000000000ULL + nsec;
6512 ptp_clock_event(adapter->ptp_clock, &event);
6513 ack |= TSINTR_AUTT0;
6514 }
6515
6516 if (tsicr & TSINTR_AUTT1) {
6517 nsec = rd32(E1000_AUXSTMPL1);
6518 sec = rd32(E1000_AUXSTMPH1);
6519 event.type = PTP_CLOCK_EXTTS;
6520 event.index = 1;
6521 event.timestamp = sec * 1000000000ULL + nsec;
6522 ptp_clock_event(adapter->ptp_clock, &event);
6523 ack |= TSINTR_AUTT1;
6524 }
6525
6526
6527 wr32(E1000_TSICR, ack);
6528}
6529
6530static irqreturn_t igb_msix_other(int irq, void *data)
6531{
6532 struct igb_adapter *adapter = data;
6533 struct e1000_hw *hw = &adapter->hw;
6534 u32 icr = rd32(E1000_ICR);
6535
6536
6537 if (icr & E1000_ICR_DRSTA)
6538 schedule_work(&adapter->reset_task);
6539
6540 if (icr & E1000_ICR_DOUTSYNC) {
6541
6542 adapter->stats.doosync++;
6543
6544
6545
6546
6547 igb_check_wvbr(adapter);
6548 }
6549
6550
6551 if (icr & E1000_ICR_VMMB)
6552 igb_msg_task(adapter);
6553
6554 if (icr & E1000_ICR_LSC) {
6555 hw->mac.get_link_status = 1;
6556
6557 if (!test_bit(__IGB_DOWN, &adapter->state))
6558 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6559 }
6560
6561 if (icr & E1000_ICR_TS)
6562 igb_tsync_interrupt(adapter);
6563
6564 wr32(E1000_EIMS, adapter->eims_other);
6565
6566 return IRQ_HANDLED;
6567}
6568
6569static void igb_write_itr(struct igb_q_vector *q_vector)
6570{
6571 struct igb_adapter *adapter = q_vector->adapter;
6572 u32 itr_val = q_vector->itr_val & 0x7FFC;
6573
6574 if (!q_vector->set_itr)
6575 return;
6576
6577 if (!itr_val)
6578 itr_val = 0x4;
6579
6580 if (adapter->hw.mac.type == e1000_82575)
6581 itr_val |= itr_val << 16;
6582 else
6583 itr_val |= E1000_EITR_CNT_IGNR;
6584
6585 writel(itr_val, q_vector->itr_register);
6586 q_vector->set_itr = 0;
6587}
6588
6589static irqreturn_t igb_msix_ring(int irq, void *data)
6590{
6591 struct igb_q_vector *q_vector = data;
6592
6593
6594 igb_write_itr(q_vector);
6595
6596 napi_schedule(&q_vector->napi);
6597
6598 return IRQ_HANDLED;
6599}
6600
6601#ifdef CONFIG_IGB_DCA
6602static void igb_update_tx_dca(struct igb_adapter *adapter,
6603 struct igb_ring *tx_ring,
6604 int cpu)
6605{
6606 struct e1000_hw *hw = &adapter->hw;
6607 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6608
6609 if (hw->mac.type != e1000_82575)
6610 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6611
6612
6613
6614
6615
6616 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6617 E1000_DCA_TXCTRL_DATA_RRO_EN |
6618 E1000_DCA_TXCTRL_DESC_DCA_EN;
6619
6620 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6621}
6622
6623static void igb_update_rx_dca(struct igb_adapter *adapter,
6624 struct igb_ring *rx_ring,
6625 int cpu)
6626{
6627 struct e1000_hw *hw = &adapter->hw;
6628 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6629
6630 if (hw->mac.type != e1000_82575)
6631 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6632
6633
6634
6635
6636
6637 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6638 E1000_DCA_RXCTRL_DESC_DCA_EN;
6639
6640 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6641}
6642
6643static void igb_update_dca(struct igb_q_vector *q_vector)
6644{
6645 struct igb_adapter *adapter = q_vector->adapter;
6646 int cpu = get_cpu();
6647
6648 if (q_vector->cpu == cpu)
6649 goto out_no_update;
6650
6651 if (q_vector->tx.ring)
6652 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6653
6654 if (q_vector->rx.ring)
6655 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6656
6657 q_vector->cpu = cpu;
6658out_no_update:
6659 put_cpu();
6660}
6661
6662static void igb_setup_dca(struct igb_adapter *adapter)
6663{
6664 struct e1000_hw *hw = &adapter->hw;
6665 int i;
6666
6667 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6668 return;
6669
6670
6671 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6672
6673 for (i = 0; i < adapter->num_q_vectors; i++) {
6674 adapter->q_vector[i]->cpu = -1;
6675 igb_update_dca(adapter->q_vector[i]);
6676 }
6677}
6678
6679static int __igb_notify_dca(struct device *dev, void *data)
6680{
6681 struct net_device *netdev = dev_get_drvdata(dev);
6682 struct igb_adapter *adapter = netdev_priv(netdev);
6683 struct pci_dev *pdev = adapter->pdev;
6684 struct e1000_hw *hw = &adapter->hw;
6685 unsigned long event = *(unsigned long *)data;
6686
6687 switch (event) {
6688 case DCA_PROVIDER_ADD:
6689
6690 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6691 break;
6692 if (dca_add_requester(dev) == 0) {
6693 adapter->flags |= IGB_FLAG_DCA_ENABLED;
6694 dev_info(&pdev->dev, "DCA enabled\n");
6695 igb_setup_dca(adapter);
6696 break;
6697 }
6698
6699 case DCA_PROVIDER_REMOVE:
6700 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6701
6702
6703
6704 dca_remove_requester(dev);
6705 dev_info(&pdev->dev, "DCA disabled\n");
6706 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6707 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6708 }
6709 break;
6710 }
6711
6712 return 0;
6713}
6714
6715static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6716 void *p)
6717{
6718 int ret_val;
6719
6720 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6721 __igb_notify_dca);
6722
6723 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6724}
6725#endif
6726
6727#ifdef CONFIG_PCI_IOV
6728static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6729{
6730 unsigned char mac_addr[ETH_ALEN];
6731
6732 eth_zero_addr(mac_addr);
6733 igb_set_vf_mac(adapter, vf, mac_addr);
6734
6735
6736 adapter->vf_data[vf].spoofchk_enabled = true;
6737
6738
6739 adapter->vf_data[vf].trusted = false;
6740
6741 return 0;
6742}
6743
6744#endif
6745static void igb_ping_all_vfs(struct igb_adapter *adapter)
6746{
6747 struct e1000_hw *hw = &adapter->hw;
6748 u32 ping;
6749 int i;
6750
6751 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6752 ping = E1000_PF_CONTROL_MSG;
6753 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6754 ping |= E1000_VT_MSGTYPE_CTS;
6755 igb_write_mbx(hw, &ping, 1, i);
6756 }
6757}
6758
6759static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6760{
6761 struct e1000_hw *hw = &adapter->hw;
6762 u32 vmolr = rd32(E1000_VMOLR(vf));
6763 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6764
6765 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6766 IGB_VF_FLAG_MULTI_PROMISC);
6767 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6768
6769 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6770 vmolr |= E1000_VMOLR_MPME;
6771 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6772 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6773 } else {
6774
6775
6776
6777
6778 if (vf_data->num_vf_mc_hashes > 30) {
6779 vmolr |= E1000_VMOLR_MPME;
6780 } else if (vf_data->num_vf_mc_hashes) {
6781 int j;
6782
6783 vmolr |= E1000_VMOLR_ROMPE;
6784 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6785 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6786 }
6787 }
6788
6789 wr32(E1000_VMOLR(vf), vmolr);
6790
6791
6792 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6793 return -EINVAL;
6794
6795 return 0;
6796}
6797
6798static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6799 u32 *msgbuf, u32 vf)
6800{
6801 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6802 u16 *hash_list = (u16 *)&msgbuf[1];
6803 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6804 int i;
6805
6806
6807
6808
6809
6810 vf_data->num_vf_mc_hashes = n;
6811
6812
6813 if (n > 30)
6814 n = 30;
6815
6816
6817 for (i = 0; i < n; i++)
6818 vf_data->vf_mc_hashes[i] = hash_list[i];
6819
6820
6821 igb_set_rx_mode(adapter->netdev);
6822
6823 return 0;
6824}
6825
6826static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6827{
6828 struct e1000_hw *hw = &adapter->hw;
6829 struct vf_data_storage *vf_data;
6830 int i, j;
6831
6832 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6833 u32 vmolr = rd32(E1000_VMOLR(i));
6834
6835 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6836
6837 vf_data = &adapter->vf_data[i];
6838
6839 if ((vf_data->num_vf_mc_hashes > 30) ||
6840 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6841 vmolr |= E1000_VMOLR_MPME;
6842 } else if (vf_data->num_vf_mc_hashes) {
6843 vmolr |= E1000_VMOLR_ROMPE;
6844 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6845 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6846 }
6847 wr32(E1000_VMOLR(i), vmolr);
6848 }
6849}
6850
6851static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6852{
6853 struct e1000_hw *hw = &adapter->hw;
6854 u32 pool_mask, vlvf_mask, i;
6855
6856
6857 pool_mask = E1000_VLVF_POOLSEL_MASK;
6858 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6859
6860
6861 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6862 adapter->vfs_allocated_count);
6863
6864
6865 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6866 u32 vlvf = rd32(E1000_VLVF(i));
6867 u32 vfta_mask, vid, vfta;
6868
6869
6870 if (!(vlvf & vlvf_mask))
6871 continue;
6872
6873
6874 vlvf ^= vlvf_mask;
6875
6876
6877 if (vlvf & pool_mask)
6878 goto update_vlvfb;
6879
6880
6881 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6882 goto update_vlvf;
6883
6884 vid = vlvf & E1000_VLVF_VLANID_MASK;
6885 vfta_mask = BIT(vid % 32);
6886
6887
6888 vfta = adapter->shadow_vfta[vid / 32];
6889 if (vfta & vfta_mask)
6890 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6891update_vlvf:
6892
6893 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6894 vlvf &= E1000_VLVF_POOLSEL_MASK;
6895 else
6896 vlvf = 0;
6897update_vlvfb:
6898
6899 wr32(E1000_VLVF(i), vlvf);
6900 }
6901}
6902
6903static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6904{
6905 u32 vlvf;
6906 int idx;
6907
6908
6909 if (vlan == 0)
6910 return 0;
6911
6912
6913 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6914 vlvf = rd32(E1000_VLVF(idx));
6915 if ((vlvf & VLAN_VID_MASK) == vlan)
6916 break;
6917 }
6918
6919 return idx;
6920}
6921
6922static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6923{
6924 struct e1000_hw *hw = &adapter->hw;
6925 u32 bits, pf_id;
6926 int idx;
6927
6928 idx = igb_find_vlvf_entry(hw, vid);
6929 if (!idx)
6930 return;
6931
6932
6933
6934
6935 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6936 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6937 bits &= rd32(E1000_VLVF(idx));
6938
6939
6940 if (!bits) {
6941 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6942 wr32(E1000_VLVF(idx), BIT(pf_id));
6943 else
6944 wr32(E1000_VLVF(idx), 0);
6945 }
6946}
6947
6948static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6949 bool add, u32 vf)
6950{
6951 int pf_id = adapter->vfs_allocated_count;
6952 struct e1000_hw *hw = &adapter->hw;
6953 int err;
6954
6955
6956
6957
6958
6959
6960 if (add && test_bit(vid, adapter->active_vlans)) {
6961 err = igb_vfta_set(hw, vid, pf_id, true, false);
6962 if (err)
6963 return err;
6964 }
6965
6966 err = igb_vfta_set(hw, vid, vf, add, false);
6967
6968 if (add && !err)
6969 return err;
6970
6971
6972
6973
6974
6975 if (test_bit(vid, adapter->active_vlans) ||
6976 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6977 igb_update_pf_vlvf(adapter, vid);
6978
6979 return err;
6980}
6981
6982static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6983{
6984 struct e1000_hw *hw = &adapter->hw;
6985
6986 if (vid)
6987 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6988 else
6989 wr32(E1000_VMVIR(vf), 0);
6990}
6991
6992static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6993 u16 vlan, u8 qos)
6994{
6995 int err;
6996
6997 err = igb_set_vf_vlan(adapter, vlan, true, vf);
6998 if (err)
6999 return err;
7000
7001 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7002 igb_set_vmolr(adapter, vf, !vlan);
7003
7004
7005 if (vlan != adapter->vf_data[vf].pf_vlan)
7006 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7007 false, vf);
7008
7009 adapter->vf_data[vf].pf_vlan = vlan;
7010 adapter->vf_data[vf].pf_qos = qos;
7011 igb_set_vf_vlan_strip(adapter, vf, true);
7012 dev_info(&adapter->pdev->dev,
7013 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7014 if (test_bit(__IGB_DOWN, &adapter->state)) {
7015 dev_warn(&adapter->pdev->dev,
7016 "The VF VLAN has been set, but the PF device is not up.\n");
7017 dev_warn(&adapter->pdev->dev,
7018 "Bring the PF device up before attempting to use the VF device.\n");
7019 }
7020
7021 return err;
7022}
7023
7024static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7025{
7026
7027 igb_set_vf_vlan(adapter, 0, true, vf);
7028
7029 igb_set_vmvir(adapter, 0, vf);
7030 igb_set_vmolr(adapter, vf, true);
7031
7032
7033 if (adapter->vf_data[vf].pf_vlan)
7034 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7035 false, vf);
7036
7037 adapter->vf_data[vf].pf_vlan = 0;
7038 adapter->vf_data[vf].pf_qos = 0;
7039 igb_set_vf_vlan_strip(adapter, vf, false);
7040
7041 return 0;
7042}
7043
7044static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7045 u16 vlan, u8 qos, __be16 vlan_proto)
7046{
7047 struct igb_adapter *adapter = netdev_priv(netdev);
7048
7049 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7050 return -EINVAL;
7051
7052 if (vlan_proto != htons(ETH_P_8021Q))
7053 return -EPROTONOSUPPORT;
7054
7055 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7056 igb_disable_port_vlan(adapter, vf);
7057}
7058
7059static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7060{
7061 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7062 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7063 int ret;
7064
7065 if (adapter->vf_data[vf].pf_vlan)
7066 return -1;
7067
7068
7069 if (!vid && !add)
7070 return 0;
7071
7072 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7073 if (!ret)
7074 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7075 return ret;
7076}
7077
7078static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7079{
7080 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7081
7082
7083 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7084 vf_data->last_nack = jiffies;
7085
7086
7087 igb_clear_vf_vfta(adapter, vf);
7088 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7089 igb_set_vmvir(adapter, vf_data->pf_vlan |
7090 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7091 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7092 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7093
7094
7095 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7096
7097
7098 igb_set_rx_mode(adapter->netdev);
7099}
7100
7101static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7102{
7103 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7104
7105
7106 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7107 eth_zero_addr(vf_mac);
7108
7109
7110 igb_vf_reset(adapter, vf);
7111}
7112
7113static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7114{
7115 struct e1000_hw *hw = &adapter->hw;
7116 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7117 u32 reg, msgbuf[3];
7118 u8 *addr = (u8 *)(&msgbuf[1]);
7119
7120
7121 igb_vf_reset(adapter, vf);
7122
7123
7124 igb_set_vf_mac(adapter, vf, vf_mac);
7125
7126
7127 reg = rd32(E1000_VFTE);
7128 wr32(E1000_VFTE, reg | BIT(vf));
7129 reg = rd32(E1000_VFRE);
7130 wr32(E1000_VFRE, reg | BIT(vf));
7131
7132 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7133
7134
7135 if (!is_zero_ether_addr(vf_mac)) {
7136 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7137 memcpy(addr, vf_mac, ETH_ALEN);
7138 } else {
7139 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7140 }
7141 igb_write_mbx(hw, msgbuf, 3, vf);
7142}
7143
7144static void igb_flush_mac_table(struct igb_adapter *adapter)
7145{
7146 struct e1000_hw *hw = &adapter->hw;
7147 int i;
7148
7149 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7150 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7151 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7152 adapter->mac_table[i].queue = 0;
7153 igb_rar_set_index(adapter, i);
7154 }
7155}
7156
7157static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7158{
7159 struct e1000_hw *hw = &adapter->hw;
7160
7161 int rar_entries = hw->mac.rar_entry_count -
7162 adapter->vfs_allocated_count;
7163 int i, count = 0;
7164
7165 for (i = 0; i < rar_entries; i++) {
7166
7167 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7168 continue;
7169
7170
7171 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7172 (adapter->mac_table[i].queue != queue))
7173 continue;
7174
7175 count++;
7176 }
7177
7178 return count;
7179}
7180
7181
7182static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7183{
7184 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7185
7186 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7187 mac_table->queue = adapter->vfs_allocated_count;
7188 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7189
7190 igb_rar_set_index(adapter, 0);
7191}
7192
7193
7194
7195
7196
7197
7198static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7199 const u8 *addr, const u8 flags)
7200{
7201 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7202 return true;
7203
7204 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7205 (flags & IGB_MAC_STATE_SRC_ADDR))
7206 return false;
7207
7208 if (!ether_addr_equal(addr, entry->addr))
7209 return false;
7210
7211 return true;
7212}
7213
7214
7215
7216
7217
7218
7219static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7220 const u8 *addr, const u8 queue,
7221 const u8 flags)
7222{
7223 struct e1000_hw *hw = &adapter->hw;
7224 int rar_entries = hw->mac.rar_entry_count -
7225 adapter->vfs_allocated_count;
7226 int i;
7227
7228 if (is_zero_ether_addr(addr))
7229 return -EINVAL;
7230
7231
7232
7233
7234
7235 for (i = 0; i < rar_entries; i++) {
7236 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7237 addr, flags))
7238 continue;
7239
7240 ether_addr_copy(adapter->mac_table[i].addr, addr);
7241 adapter->mac_table[i].queue = queue;
7242 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7243
7244 igb_rar_set_index(adapter, i);
7245 return i;
7246 }
7247
7248 return -ENOSPC;
7249}
7250
7251static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7252 const u8 queue)
7253{
7254 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7255}
7256
7257
7258
7259
7260
7261
7262
7263static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7264 const u8 *addr, const u8 queue,
7265 const u8 flags)
7266{
7267 struct e1000_hw *hw = &adapter->hw;
7268 int rar_entries = hw->mac.rar_entry_count -
7269 adapter->vfs_allocated_count;
7270 int i;
7271
7272 if (is_zero_ether_addr(addr))
7273 return -EINVAL;
7274
7275
7276
7277
7278
7279 for (i = 0; i < rar_entries; i++) {
7280 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7281 continue;
7282 if ((adapter->mac_table[i].state & flags) != flags)
7283 continue;
7284 if (adapter->mac_table[i].queue != queue)
7285 continue;
7286 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7287 continue;
7288
7289
7290
7291
7292 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7293 adapter->mac_table[i].state =
7294 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7295 adapter->mac_table[i].queue =
7296 adapter->vfs_allocated_count;
7297 } else {
7298 adapter->mac_table[i].state = 0;
7299 adapter->mac_table[i].queue = 0;
7300 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7301 }
7302
7303 igb_rar_set_index(adapter, i);
7304 return 0;
7305 }
7306
7307 return -ENOENT;
7308}
7309
7310static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7311 const u8 queue)
7312{
7313 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7314}
7315
7316int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7317 const u8 *addr, u8 queue, u8 flags)
7318{
7319 struct e1000_hw *hw = &adapter->hw;
7320
7321
7322
7323
7324 if (hw->mac.type != e1000_i210)
7325 return -EOPNOTSUPP;
7326
7327 return igb_add_mac_filter_flags(adapter, addr, queue,
7328 IGB_MAC_STATE_QUEUE_STEERING | flags);
7329}
7330
7331int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7332 const u8 *addr, u8 queue, u8 flags)
7333{
7334 return igb_del_mac_filter_flags(adapter, addr, queue,
7335 IGB_MAC_STATE_QUEUE_STEERING | flags);
7336}
7337
7338static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7339{
7340 struct igb_adapter *adapter = netdev_priv(netdev);
7341 int ret;
7342
7343 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7344
7345 return min_t(int, ret, 0);
7346}
7347
7348static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7349{
7350 struct igb_adapter *adapter = netdev_priv(netdev);
7351
7352 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7353
7354 return 0;
7355}
7356
7357static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7358 const u32 info, const u8 *addr)
7359{
7360 struct pci_dev *pdev = adapter->pdev;
7361 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7362 struct list_head *pos;
7363 struct vf_mac_filter *entry = NULL;
7364 int ret = 0;
7365
7366 switch (info) {
7367 case E1000_VF_MAC_FILTER_CLR:
7368
7369 list_for_each(pos, &adapter->vf_macs.l) {
7370 entry = list_entry(pos, struct vf_mac_filter, l);
7371 if (entry->vf == vf) {
7372 entry->vf = -1;
7373 entry->free = true;
7374 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7375 }
7376 }
7377 break;
7378 case E1000_VF_MAC_FILTER_ADD:
7379 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7380 !vf_data->trusted) {
7381 dev_warn(&pdev->dev,
7382 "VF %d requested MAC filter but is administratively denied\n",
7383 vf);
7384 return -EINVAL;
7385 }
7386 if (!is_valid_ether_addr(addr)) {
7387 dev_warn(&pdev->dev,
7388 "VF %d attempted to set invalid MAC filter\n",
7389 vf);
7390 return -EINVAL;
7391 }
7392
7393
7394 list_for_each(pos, &adapter->vf_macs.l) {
7395 entry = list_entry(pos, struct vf_mac_filter, l);
7396 if (entry->free)
7397 break;
7398 }
7399
7400 if (entry && entry->free) {
7401 entry->free = false;
7402 entry->vf = vf;
7403 ether_addr_copy(entry->vf_mac, addr);
7404
7405 ret = igb_add_mac_filter(adapter, addr, vf);
7406 ret = min_t(int, ret, 0);
7407 } else {
7408 ret = -ENOSPC;
7409 }
7410
7411 if (ret == -ENOSPC)
7412 dev_warn(&pdev->dev,
7413 "VF %d has requested MAC filter but there is no space for it\n",
7414 vf);
7415 break;
7416 default:
7417 ret = -EINVAL;
7418 break;
7419 }
7420
7421 return ret;
7422}
7423
7424static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7425{
7426 struct pci_dev *pdev = adapter->pdev;
7427 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7428 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7429
7430
7431
7432
7433 unsigned char *addr = (unsigned char *)&msg[1];
7434 int ret = 0;
7435
7436 if (!info) {
7437 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7438 !vf_data->trusted) {
7439 dev_warn(&pdev->dev,
7440 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7441 vf);
7442 return -EINVAL;
7443 }
7444
7445 if (!is_valid_ether_addr(addr)) {
7446 dev_warn(&pdev->dev,
7447 "VF %d attempted to set invalid MAC\n",
7448 vf);
7449 return -EINVAL;
7450 }
7451
7452 ret = igb_set_vf_mac(adapter, vf, addr);
7453 } else {
7454 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7455 }
7456
7457 return ret;
7458}
7459
7460static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7461{
7462 struct e1000_hw *hw = &adapter->hw;
7463 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7464 u32 msg = E1000_VT_MSGTYPE_NACK;
7465
7466
7467 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7468 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7469 igb_write_mbx(hw, &msg, 1, vf);
7470 vf_data->last_nack = jiffies;
7471 }
7472}
7473
7474static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7475{
7476 struct pci_dev *pdev = adapter->pdev;
7477 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7478 struct e1000_hw *hw = &adapter->hw;
7479 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7480 s32 retval;
7481
7482 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7483
7484 if (retval) {
7485
7486 dev_err(&pdev->dev, "Error receiving message from VF\n");
7487 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7488 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7489 goto unlock;
7490 goto out;
7491 }
7492
7493
7494 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7495 goto unlock;
7496
7497
7498
7499
7500 if (msgbuf[0] == E1000_VF_RESET) {
7501
7502 igb_vf_reset_msg(adapter, vf);
7503 return;
7504 }
7505
7506 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7507 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7508 goto unlock;
7509 retval = -1;
7510 goto out;
7511 }
7512
7513 switch ((msgbuf[0] & 0xFFFF)) {
7514 case E1000_VF_SET_MAC_ADDR:
7515 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7516 break;
7517 case E1000_VF_SET_PROMISC:
7518 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7519 break;
7520 case E1000_VF_SET_MULTICAST:
7521 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7522 break;
7523 case E1000_VF_SET_LPE:
7524 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7525 break;
7526 case E1000_VF_SET_VLAN:
7527 retval = -1;
7528 if (vf_data->pf_vlan)
7529 dev_warn(&pdev->dev,
7530 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7531 vf);
7532 else
7533 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7534 break;
7535 default:
7536 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7537 retval = -1;
7538 break;
7539 }
7540
7541 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7542out:
7543
7544 if (retval)
7545 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7546 else
7547 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7548
7549
7550 igb_write_mbx(hw, msgbuf, 1, vf);
7551 return;
7552
7553unlock:
7554 igb_unlock_mbx(hw, vf);
7555}
7556
7557static void igb_msg_task(struct igb_adapter *adapter)
7558{
7559 struct e1000_hw *hw = &adapter->hw;
7560 u32 vf;
7561
7562 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7563
7564 if (!igb_check_for_rst(hw, vf))
7565 igb_vf_reset_event(adapter, vf);
7566
7567
7568 if (!igb_check_for_msg(hw, vf))
7569 igb_rcv_msg_from_vf(adapter, vf);
7570
7571
7572 if (!igb_check_for_ack(hw, vf))
7573 igb_rcv_ack_from_vf(adapter, vf);
7574 }
7575}
7576
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586
7587
7588static void igb_set_uta(struct igb_adapter *adapter, bool set)
7589{
7590 struct e1000_hw *hw = &adapter->hw;
7591 u32 uta = set ? ~0 : 0;
7592 int i;
7593
7594
7595 if (!adapter->vfs_allocated_count)
7596 return;
7597
7598 for (i = hw->mac.uta_reg_count; i--;)
7599 array_wr32(E1000_UTA, i, uta);
7600}
7601
7602
7603
7604
7605
7606
7607static irqreturn_t igb_intr_msi(int irq, void *data)
7608{
7609 struct igb_adapter *adapter = data;
7610 struct igb_q_vector *q_vector = adapter->q_vector[0];
7611 struct e1000_hw *hw = &adapter->hw;
7612
7613 u32 icr = rd32(E1000_ICR);
7614
7615 igb_write_itr(q_vector);
7616
7617 if (icr & E1000_ICR_DRSTA)
7618 schedule_work(&adapter->reset_task);
7619
7620 if (icr & E1000_ICR_DOUTSYNC) {
7621
7622 adapter->stats.doosync++;
7623 }
7624
7625 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7626 hw->mac.get_link_status = 1;
7627 if (!test_bit(__IGB_DOWN, &adapter->state))
7628 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7629 }
7630
7631 if (icr & E1000_ICR_TS)
7632 igb_tsync_interrupt(adapter);
7633
7634 napi_schedule(&q_vector->napi);
7635
7636 return IRQ_HANDLED;
7637}
7638
7639
7640
7641
7642
7643
7644static irqreturn_t igb_intr(int irq, void *data)
7645{
7646 struct igb_adapter *adapter = data;
7647 struct igb_q_vector *q_vector = adapter->q_vector[0];
7648 struct e1000_hw *hw = &adapter->hw;
7649
7650
7651
7652 u32 icr = rd32(E1000_ICR);
7653
7654
7655
7656
7657 if (!(icr & E1000_ICR_INT_ASSERTED))
7658 return IRQ_NONE;
7659
7660 igb_write_itr(q_vector);
7661
7662 if (icr & E1000_ICR_DRSTA)
7663 schedule_work(&adapter->reset_task);
7664
7665 if (icr & E1000_ICR_DOUTSYNC) {
7666
7667 adapter->stats.doosync++;
7668 }
7669
7670 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7671 hw->mac.get_link_status = 1;
7672
7673 if (!test_bit(__IGB_DOWN, &adapter->state))
7674 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7675 }
7676
7677 if (icr & E1000_ICR_TS)
7678 igb_tsync_interrupt(adapter);
7679
7680 napi_schedule(&q_vector->napi);
7681
7682 return IRQ_HANDLED;
7683}
7684
7685static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7686{
7687 struct igb_adapter *adapter = q_vector->adapter;
7688 struct e1000_hw *hw = &adapter->hw;
7689
7690 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7691 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7692 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7693 igb_set_itr(q_vector);
7694 else
7695 igb_update_ring_itr(q_vector);
7696 }
7697
7698 if (!test_bit(__IGB_DOWN, &adapter->state)) {
7699 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7700 wr32(E1000_EIMS, q_vector->eims_value);
7701 else
7702 igb_irq_enable(adapter);
7703 }
7704}
7705
7706
7707
7708
7709
7710
7711static int igb_poll(struct napi_struct *napi, int budget)
7712{
7713 struct igb_q_vector *q_vector = container_of(napi,
7714 struct igb_q_vector,
7715 napi);
7716 bool clean_complete = true;
7717 int work_done = 0;
7718
7719#ifdef CONFIG_IGB_DCA
7720 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7721 igb_update_dca(q_vector);
7722#endif
7723 if (q_vector->tx.ring)
7724 clean_complete = igb_clean_tx_irq(q_vector, budget);
7725
7726 if (q_vector->rx.ring) {
7727 int cleaned = igb_clean_rx_irq(q_vector, budget);
7728
7729 work_done += cleaned;
7730 if (cleaned >= budget)
7731 clean_complete = false;
7732 }
7733
7734
7735 if (!clean_complete)
7736 return budget;
7737
7738
7739
7740
7741 if (likely(napi_complete_done(napi, work_done)))
7742 igb_ring_irq_enable(q_vector);
7743
7744 return min(work_done, budget - 1);
7745}
7746
7747
7748
7749
7750
7751
7752
7753
7754static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7755{
7756 struct igb_adapter *adapter = q_vector->adapter;
7757 struct igb_ring *tx_ring = q_vector->tx.ring;
7758 struct igb_tx_buffer *tx_buffer;
7759 union e1000_adv_tx_desc *tx_desc;
7760 unsigned int total_bytes = 0, total_packets = 0;
7761 unsigned int budget = q_vector->tx.work_limit;
7762 unsigned int i = tx_ring->next_to_clean;
7763
7764 if (test_bit(__IGB_DOWN, &adapter->state))
7765 return true;
7766
7767 tx_buffer = &tx_ring->tx_buffer_info[i];
7768 tx_desc = IGB_TX_DESC(tx_ring, i);
7769 i -= tx_ring->count;
7770
7771 do {
7772 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7773
7774
7775 if (!eop_desc)
7776 break;
7777
7778
7779 smp_rmb();
7780
7781
7782 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7783 break;
7784
7785
7786 tx_buffer->next_to_watch = NULL;
7787
7788
7789 total_bytes += tx_buffer->bytecount;
7790 total_packets += tx_buffer->gso_segs;
7791
7792
7793 napi_consume_skb(tx_buffer->skb, napi_budget);
7794
7795
7796 dma_unmap_single(tx_ring->dev,
7797 dma_unmap_addr(tx_buffer, dma),
7798 dma_unmap_len(tx_buffer, len),
7799 DMA_TO_DEVICE);
7800
7801
7802 dma_unmap_len_set(tx_buffer, len, 0);
7803
7804
7805 while (tx_desc != eop_desc) {
7806 tx_buffer++;
7807 tx_desc++;
7808 i++;
7809 if (unlikely(!i)) {
7810 i -= tx_ring->count;
7811 tx_buffer = tx_ring->tx_buffer_info;
7812 tx_desc = IGB_TX_DESC(tx_ring, 0);
7813 }
7814
7815
7816 if (dma_unmap_len(tx_buffer, len)) {
7817 dma_unmap_page(tx_ring->dev,
7818 dma_unmap_addr(tx_buffer, dma),
7819 dma_unmap_len(tx_buffer, len),
7820 DMA_TO_DEVICE);
7821 dma_unmap_len_set(tx_buffer, len, 0);
7822 }
7823 }
7824
7825
7826 tx_buffer++;
7827 tx_desc++;
7828 i++;
7829 if (unlikely(!i)) {
7830 i -= tx_ring->count;
7831 tx_buffer = tx_ring->tx_buffer_info;
7832 tx_desc = IGB_TX_DESC(tx_ring, 0);
7833 }
7834
7835
7836 prefetch(tx_desc);
7837
7838
7839 budget--;
7840 } while (likely(budget));
7841
7842 netdev_tx_completed_queue(txring_txq(tx_ring),
7843 total_packets, total_bytes);
7844 i += tx_ring->count;
7845 tx_ring->next_to_clean = i;
7846 u64_stats_update_begin(&tx_ring->tx_syncp);
7847 tx_ring->tx_stats.bytes += total_bytes;
7848 tx_ring->tx_stats.packets += total_packets;
7849 u64_stats_update_end(&tx_ring->tx_syncp);
7850 q_vector->tx.total_bytes += total_bytes;
7851 q_vector->tx.total_packets += total_packets;
7852
7853 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7854 struct e1000_hw *hw = &adapter->hw;
7855
7856
7857
7858
7859 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7860 if (tx_buffer->next_to_watch &&
7861 time_after(jiffies, tx_buffer->time_stamp +
7862 (adapter->tx_timeout_factor * HZ)) &&
7863 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7864
7865
7866 dev_err(tx_ring->dev,
7867 "Detected Tx Unit Hang\n"
7868 " Tx Queue <%d>\n"
7869 " TDH <%x>\n"
7870 " TDT <%x>\n"
7871 " next_to_use <%x>\n"
7872 " next_to_clean <%x>\n"
7873 "buffer_info[next_to_clean]\n"
7874 " time_stamp <%lx>\n"
7875 " next_to_watch <%p>\n"
7876 " jiffies <%lx>\n"
7877 " desc.status <%x>\n",
7878 tx_ring->queue_index,
7879 rd32(E1000_TDH(tx_ring->reg_idx)),
7880 readl(tx_ring->tail),
7881 tx_ring->next_to_use,
7882 tx_ring->next_to_clean,
7883 tx_buffer->time_stamp,
7884 tx_buffer->next_to_watch,
7885 jiffies,
7886 tx_buffer->next_to_watch->wb.status);
7887 netif_stop_subqueue(tx_ring->netdev,
7888 tx_ring->queue_index);
7889
7890
7891 return true;
7892 }
7893 }
7894
7895#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7896 if (unlikely(total_packets &&
7897 netif_carrier_ok(tx_ring->netdev) &&
7898 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7899
7900
7901
7902 smp_mb();
7903 if (__netif_subqueue_stopped(tx_ring->netdev,
7904 tx_ring->queue_index) &&
7905 !(test_bit(__IGB_DOWN, &adapter->state))) {
7906 netif_wake_subqueue(tx_ring->netdev,
7907 tx_ring->queue_index);
7908
7909 u64_stats_update_begin(&tx_ring->tx_syncp);
7910 tx_ring->tx_stats.restart_queue++;
7911 u64_stats_update_end(&tx_ring->tx_syncp);
7912 }
7913 }
7914
7915 return !!budget;
7916}
7917
7918
7919
7920
7921
7922
7923
7924
7925static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7926 struct igb_rx_buffer *old_buff)
7927{
7928 struct igb_rx_buffer *new_buff;
7929 u16 nta = rx_ring->next_to_alloc;
7930
7931 new_buff = &rx_ring->rx_buffer_info[nta];
7932
7933
7934 nta++;
7935 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7936
7937
7938
7939
7940
7941 new_buff->dma = old_buff->dma;
7942 new_buff->page = old_buff->page;
7943 new_buff->page_offset = old_buff->page_offset;
7944 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
7945}
7946
7947static inline bool igb_page_is_reserved(struct page *page)
7948{
7949 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7950}
7951
7952static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7953{
7954 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7955 struct page *page = rx_buffer->page;
7956
7957
7958 if (unlikely(igb_page_is_reserved(page)))
7959 return false;
7960
7961#if (PAGE_SIZE < 8192)
7962
7963 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7964 return false;
7965#else
7966#define IGB_LAST_OFFSET \
7967 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7968
7969 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7970 return false;
7971#endif
7972
7973
7974
7975
7976
7977 if (unlikely(!pagecnt_bias)) {
7978 page_ref_add(page, USHRT_MAX);
7979 rx_buffer->pagecnt_bias = USHRT_MAX;
7980 }
7981
7982 return true;
7983}
7984
7985
7986
7987
7988
7989
7990
7991
7992
7993
7994static void igb_add_rx_frag(struct igb_ring *rx_ring,
7995 struct igb_rx_buffer *rx_buffer,
7996 struct sk_buff *skb,
7997 unsigned int size)
7998{
7999#if (PAGE_SIZE < 8192)
8000 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8001#else
8002 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8003 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8004 SKB_DATA_ALIGN(size);
8005#endif
8006 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8007 rx_buffer->page_offset, size, truesize);
8008#if (PAGE_SIZE < 8192)
8009 rx_buffer->page_offset ^= truesize;
8010#else
8011 rx_buffer->page_offset += truesize;
8012#endif
8013}
8014
8015static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8016 struct igb_rx_buffer *rx_buffer,
8017 union e1000_adv_rx_desc *rx_desc,
8018 unsigned int size)
8019{
8020 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8021#if (PAGE_SIZE < 8192)
8022 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8023#else
8024 unsigned int truesize = SKB_DATA_ALIGN(size);
8025#endif
8026 unsigned int headlen;
8027 struct sk_buff *skb;
8028
8029
8030 prefetch(va);
8031#if L1_CACHE_BYTES < 128
8032 prefetch(va + L1_CACHE_BYTES);
8033#endif
8034
8035
8036 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8037 if (unlikely(!skb))
8038 return NULL;
8039
8040 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8041 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
8042 va += IGB_TS_HDR_LEN;
8043 size -= IGB_TS_HDR_LEN;
8044 }
8045
8046
8047 headlen = size;
8048 if (headlen > IGB_RX_HDR_LEN)
8049 headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
8050
8051
8052 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
8053
8054
8055 size -= headlen;
8056 if (size) {
8057 skb_add_rx_frag(skb, 0, rx_buffer->page,
8058 (va + headlen) - page_address(rx_buffer->page),
8059 size, truesize);
8060#if (PAGE_SIZE < 8192)
8061 rx_buffer->page_offset ^= truesize;
8062#else
8063 rx_buffer->page_offset += truesize;
8064#endif
8065 } else {
8066 rx_buffer->pagecnt_bias++;
8067 }
8068
8069 return skb;
8070}
8071
8072static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8073 struct igb_rx_buffer *rx_buffer,
8074 union e1000_adv_rx_desc *rx_desc,
8075 unsigned int size)
8076{
8077 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8078#if (PAGE_SIZE < 8192)
8079 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8080#else
8081 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8082 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
8083#endif
8084 struct sk_buff *skb;
8085
8086
8087 prefetch(va);
8088#if L1_CACHE_BYTES < 128
8089 prefetch(va + L1_CACHE_BYTES);
8090#endif
8091
8092
8093 skb = build_skb(va - IGB_SKB_PAD, truesize);
8094 if (unlikely(!skb))
8095 return NULL;
8096
8097
8098 skb_reserve(skb, IGB_SKB_PAD);
8099 __skb_put(skb, size);
8100
8101
8102 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8103 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8104 __skb_pull(skb, IGB_TS_HDR_LEN);
8105 }
8106
8107
8108#if (PAGE_SIZE < 8192)
8109 rx_buffer->page_offset ^= truesize;
8110#else
8111 rx_buffer->page_offset += truesize;
8112#endif
8113
8114 return skb;
8115}
8116
8117static inline void igb_rx_checksum(struct igb_ring *ring,
8118 union e1000_adv_rx_desc *rx_desc,
8119 struct sk_buff *skb)
8120{
8121 skb_checksum_none_assert(skb);
8122
8123
8124 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8125 return;
8126
8127
8128 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8129 return;
8130
8131
8132 if (igb_test_staterr(rx_desc,
8133 E1000_RXDEXT_STATERR_TCPE |
8134 E1000_RXDEXT_STATERR_IPE)) {
8135
8136
8137
8138
8139 if (!((skb->len == 60) &&
8140 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8141 u64_stats_update_begin(&ring->rx_syncp);
8142 ring->rx_stats.csum_err++;
8143 u64_stats_update_end(&ring->rx_syncp);
8144 }
8145
8146 return;
8147 }
8148
8149 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8150 E1000_RXD_STAT_UDPCS))
8151 skb->ip_summed = CHECKSUM_UNNECESSARY;
8152
8153 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8154 le32_to_cpu(rx_desc->wb.upper.status_error));
8155}
8156
8157static inline void igb_rx_hash(struct igb_ring *ring,
8158 union e1000_adv_rx_desc *rx_desc,
8159 struct sk_buff *skb)
8160{
8161 if (ring->netdev->features & NETIF_F_RXHASH)
8162 skb_set_hash(skb,
8163 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8164 PKT_HASH_TYPE_L3);
8165}
8166
8167
8168
8169
8170
8171
8172
8173
8174
8175
8176
8177
8178static bool igb_is_non_eop(struct igb_ring *rx_ring,
8179 union e1000_adv_rx_desc *rx_desc)
8180{
8181 u32 ntc = rx_ring->next_to_clean + 1;
8182
8183
8184 ntc = (ntc < rx_ring->count) ? ntc : 0;
8185 rx_ring->next_to_clean = ntc;
8186
8187 prefetch(IGB_RX_DESC(rx_ring, ntc));
8188
8189 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8190 return false;
8191
8192 return true;
8193}
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207
8208
8209static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8210 union e1000_adv_rx_desc *rx_desc,
8211 struct sk_buff *skb)
8212{
8213 if (unlikely((igb_test_staterr(rx_desc,
8214 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8215 struct net_device *netdev = rx_ring->netdev;
8216 if (!(netdev->features & NETIF_F_RXALL)) {
8217 dev_kfree_skb_any(skb);
8218 return true;
8219 }
8220 }
8221
8222
8223 if (eth_skb_pad(skb))
8224 return true;
8225
8226 return false;
8227}
8228
8229
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239static void igb_process_skb_fields(struct igb_ring *rx_ring,
8240 union e1000_adv_rx_desc *rx_desc,
8241 struct sk_buff *skb)
8242{
8243 struct net_device *dev = rx_ring->netdev;
8244
8245 igb_rx_hash(rx_ring, rx_desc, skb);
8246
8247 igb_rx_checksum(rx_ring, rx_desc, skb);
8248
8249 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8250 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8251 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8252
8253 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8254 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8255 u16 vid;
8256
8257 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8258 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8259 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8260 else
8261 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8262
8263 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8264 }
8265
8266 skb_record_rx_queue(skb, rx_ring->queue_index);
8267
8268 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8269}
8270
8271static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8272 const unsigned int size)
8273{
8274 struct igb_rx_buffer *rx_buffer;
8275
8276 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8277 prefetchw(rx_buffer->page);
8278
8279
8280 dma_sync_single_range_for_cpu(rx_ring->dev,
8281 rx_buffer->dma,
8282 rx_buffer->page_offset,
8283 size,
8284 DMA_FROM_DEVICE);
8285
8286 rx_buffer->pagecnt_bias--;
8287
8288 return rx_buffer;
8289}
8290
8291static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8292 struct igb_rx_buffer *rx_buffer)
8293{
8294 if (igb_can_reuse_rx_page(rx_buffer)) {
8295
8296 igb_reuse_rx_page(rx_ring, rx_buffer);
8297 } else {
8298
8299
8300
8301 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8302 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8303 IGB_RX_DMA_ATTR);
8304 __page_frag_cache_drain(rx_buffer->page,
8305 rx_buffer->pagecnt_bias);
8306 }
8307
8308
8309 rx_buffer->page = NULL;
8310}
8311
8312static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8313{
8314 struct igb_ring *rx_ring = q_vector->rx.ring;
8315 struct sk_buff *skb = rx_ring->skb;
8316 unsigned int total_bytes = 0, total_packets = 0;
8317 u16 cleaned_count = igb_desc_unused(rx_ring);
8318
8319 while (likely(total_packets < budget)) {
8320 union e1000_adv_rx_desc *rx_desc;
8321 struct igb_rx_buffer *rx_buffer;
8322 unsigned int size;
8323
8324
8325 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8326 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8327 cleaned_count = 0;
8328 }
8329
8330 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8331 size = le16_to_cpu(rx_desc->wb.upper.length);
8332 if (!size)
8333 break;
8334
8335
8336
8337
8338
8339 dma_rmb();
8340
8341 rx_buffer = igb_get_rx_buffer(rx_ring, size);
8342
8343
8344 if (skb)
8345 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8346 else if (ring_uses_build_skb(rx_ring))
8347 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8348 else
8349 skb = igb_construct_skb(rx_ring, rx_buffer,
8350 rx_desc, size);
8351
8352
8353 if (!skb) {
8354 rx_ring->rx_stats.alloc_failed++;
8355 rx_buffer->pagecnt_bias++;
8356 break;
8357 }
8358
8359 igb_put_rx_buffer(rx_ring, rx_buffer);
8360 cleaned_count++;
8361
8362
8363 if (igb_is_non_eop(rx_ring, rx_desc))
8364 continue;
8365
8366
8367 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8368 skb = NULL;
8369 continue;
8370 }
8371
8372
8373 total_bytes += skb->len;
8374
8375
8376 igb_process_skb_fields(rx_ring, rx_desc, skb);
8377
8378 napi_gro_receive(&q_vector->napi, skb);
8379
8380
8381 skb = NULL;
8382
8383
8384 total_packets++;
8385 }
8386
8387
8388 rx_ring->skb = skb;
8389
8390 u64_stats_update_begin(&rx_ring->rx_syncp);
8391 rx_ring->rx_stats.packets += total_packets;
8392 rx_ring->rx_stats.bytes += total_bytes;
8393 u64_stats_update_end(&rx_ring->rx_syncp);
8394 q_vector->rx.total_packets += total_packets;
8395 q_vector->rx.total_bytes += total_bytes;
8396
8397 if (cleaned_count)
8398 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8399
8400 return total_packets;
8401}
8402
8403static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8404{
8405 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8406}
8407
8408static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8409 struct igb_rx_buffer *bi)
8410{
8411 struct page *page = bi->page;
8412 dma_addr_t dma;
8413
8414
8415 if (likely(page))
8416 return true;
8417
8418
8419 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8420 if (unlikely(!page)) {
8421 rx_ring->rx_stats.alloc_failed++;
8422 return false;
8423 }
8424
8425
8426 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8427 igb_rx_pg_size(rx_ring),
8428 DMA_FROM_DEVICE,
8429 IGB_RX_DMA_ATTR);
8430
8431
8432
8433
8434 if (dma_mapping_error(rx_ring->dev, dma)) {
8435 __free_pages(page, igb_rx_pg_order(rx_ring));
8436
8437 rx_ring->rx_stats.alloc_failed++;
8438 return false;
8439 }
8440
8441 bi->dma = dma;
8442 bi->page = page;
8443 bi->page_offset = igb_rx_offset(rx_ring);
8444 bi->pagecnt_bias = 1;
8445
8446 return true;
8447}
8448
8449
8450
8451
8452
8453void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8454{
8455 union e1000_adv_rx_desc *rx_desc;
8456 struct igb_rx_buffer *bi;
8457 u16 i = rx_ring->next_to_use;
8458 u16 bufsz;
8459
8460
8461 if (!cleaned_count)
8462 return;
8463
8464 rx_desc = IGB_RX_DESC(rx_ring, i);
8465 bi = &rx_ring->rx_buffer_info[i];
8466 i -= rx_ring->count;
8467
8468 bufsz = igb_rx_bufsz(rx_ring);
8469
8470 do {
8471 if (!igb_alloc_mapped_page(rx_ring, bi))
8472 break;
8473
8474
8475 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8476 bi->page_offset, bufsz,
8477 DMA_FROM_DEVICE);
8478
8479
8480
8481
8482 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8483
8484 rx_desc++;
8485 bi++;
8486 i++;
8487 if (unlikely(!i)) {
8488 rx_desc = IGB_RX_DESC(rx_ring, 0);
8489 bi = rx_ring->rx_buffer_info;
8490 i -= rx_ring->count;
8491 }
8492
8493
8494 rx_desc->wb.upper.length = 0;
8495
8496 cleaned_count--;
8497 } while (cleaned_count);
8498
8499 i += rx_ring->count;
8500
8501 if (rx_ring->next_to_use != i) {
8502
8503 rx_ring->next_to_use = i;
8504
8505
8506 rx_ring->next_to_alloc = i;
8507
8508
8509
8510
8511
8512
8513 dma_wmb();
8514 writel(i, rx_ring->tail);
8515 }
8516}
8517
8518
8519
8520
8521
8522
8523
8524static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8525{
8526 struct igb_adapter *adapter = netdev_priv(netdev);
8527 struct mii_ioctl_data *data = if_mii(ifr);
8528
8529 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8530 return -EOPNOTSUPP;
8531
8532 switch (cmd) {
8533 case SIOCGMIIPHY:
8534 data->phy_id = adapter->hw.phy.addr;
8535 break;
8536 case SIOCGMIIREG:
8537 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8538 &data->val_out))
8539 return -EIO;
8540 break;
8541 case SIOCSMIIREG:
8542 default:
8543 return -EOPNOTSUPP;
8544 }
8545 return 0;
8546}
8547
8548
8549
8550
8551
8552
8553
8554static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8555{
8556 switch (cmd) {
8557 case SIOCGMIIPHY:
8558 case SIOCGMIIREG:
8559 case SIOCSMIIREG:
8560 return igb_mii_ioctl(netdev, ifr, cmd);
8561 case SIOCGHWTSTAMP:
8562 return igb_ptp_get_ts_config(netdev, ifr);
8563 case SIOCSHWTSTAMP:
8564 return igb_ptp_set_ts_config(netdev, ifr);
8565 default:
8566 return -EOPNOTSUPP;
8567 }
8568}
8569
8570void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8571{
8572 struct igb_adapter *adapter = hw->back;
8573
8574 pci_read_config_word(adapter->pdev, reg, value);
8575}
8576
8577void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8578{
8579 struct igb_adapter *adapter = hw->back;
8580
8581 pci_write_config_word(adapter->pdev, reg, *value);
8582}
8583
8584s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8585{
8586 struct igb_adapter *adapter = hw->back;
8587
8588 if (pcie_capability_read_word(adapter->pdev, reg, value))
8589 return -E1000_ERR_CONFIG;
8590
8591 return 0;
8592}
8593
8594s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8595{
8596 struct igb_adapter *adapter = hw->back;
8597
8598 if (pcie_capability_write_word(adapter->pdev, reg, *value))
8599 return -E1000_ERR_CONFIG;
8600
8601 return 0;
8602}
8603
8604static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8605{
8606 struct igb_adapter *adapter = netdev_priv(netdev);
8607 struct e1000_hw *hw = &adapter->hw;
8608 u32 ctrl, rctl;
8609 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8610
8611 if (enable) {
8612
8613 ctrl = rd32(E1000_CTRL);
8614 ctrl |= E1000_CTRL_VME;
8615 wr32(E1000_CTRL, ctrl);
8616
8617
8618 rctl = rd32(E1000_RCTL);
8619 rctl &= ~E1000_RCTL_CFIEN;
8620 wr32(E1000_RCTL, rctl);
8621 } else {
8622
8623 ctrl = rd32(E1000_CTRL);
8624 ctrl &= ~E1000_CTRL_VME;
8625 wr32(E1000_CTRL, ctrl);
8626 }
8627
8628 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8629}
8630
8631static int igb_vlan_rx_add_vid(struct net_device *netdev,
8632 __be16 proto, u16 vid)
8633{
8634 struct igb_adapter *adapter = netdev_priv(netdev);
8635 struct e1000_hw *hw = &adapter->hw;
8636 int pf_id = adapter->vfs_allocated_count;
8637
8638
8639 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8640 igb_vfta_set(hw, vid, pf_id, true, !!vid);
8641
8642 set_bit(vid, adapter->active_vlans);
8643
8644 return 0;
8645}
8646
8647static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8648 __be16 proto, u16 vid)
8649{
8650 struct igb_adapter *adapter = netdev_priv(netdev);
8651 int pf_id = adapter->vfs_allocated_count;
8652 struct e1000_hw *hw = &adapter->hw;
8653
8654
8655 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8656 igb_vfta_set(hw, vid, pf_id, false, true);
8657
8658 clear_bit(vid, adapter->active_vlans);
8659
8660 return 0;
8661}
8662
8663static void igb_restore_vlan(struct igb_adapter *adapter)
8664{
8665 u16 vid = 1;
8666
8667 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8668 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8669
8670 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8671 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8672}
8673
8674int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8675{
8676 struct pci_dev *pdev = adapter->pdev;
8677 struct e1000_mac_info *mac = &adapter->hw.mac;
8678
8679 mac->autoneg = 0;
8680
8681
8682
8683
8684 if ((spd & 1) || (dplx & ~1))
8685 goto err_inval;
8686
8687
8688
8689
8690 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8691 switch (spd + dplx) {
8692 case SPEED_10 + DUPLEX_HALF:
8693 case SPEED_10 + DUPLEX_FULL:
8694 case SPEED_100 + DUPLEX_HALF:
8695 goto err_inval;
8696 default:
8697 break;
8698 }
8699 }
8700
8701 switch (spd + dplx) {
8702 case SPEED_10 + DUPLEX_HALF:
8703 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8704 break;
8705 case SPEED_10 + DUPLEX_FULL:
8706 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8707 break;
8708 case SPEED_100 + DUPLEX_HALF:
8709 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8710 break;
8711 case SPEED_100 + DUPLEX_FULL:
8712 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8713 break;
8714 case SPEED_1000 + DUPLEX_FULL:
8715 mac->autoneg = 1;
8716 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8717 break;
8718 case SPEED_1000 + DUPLEX_HALF:
8719 default:
8720 goto err_inval;
8721 }
8722
8723
8724 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8725
8726 return 0;
8727
8728err_inval:
8729 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8730 return -EINVAL;
8731}
8732
8733static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8734 bool runtime)
8735{
8736 struct net_device *netdev = pci_get_drvdata(pdev);
8737 struct igb_adapter *adapter = netdev_priv(netdev);
8738 struct e1000_hw *hw = &adapter->hw;
8739 u32 ctrl, rctl, status;
8740 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8741 bool wake;
8742
8743 rtnl_lock();
8744 netif_device_detach(netdev);
8745
8746 if (netif_running(netdev))
8747 __igb_close(netdev, true);
8748
8749 igb_ptp_suspend(adapter);
8750
8751 igb_clear_interrupt_scheme(adapter);
8752 rtnl_unlock();
8753
8754 status = rd32(E1000_STATUS);
8755 if (status & E1000_STATUS_LU)
8756 wufc &= ~E1000_WUFC_LNKC;
8757
8758 if (wufc) {
8759 igb_setup_rctl(adapter);
8760 igb_set_rx_mode(netdev);
8761
8762
8763 if (wufc & E1000_WUFC_MC) {
8764 rctl = rd32(E1000_RCTL);
8765 rctl |= E1000_RCTL_MPE;
8766 wr32(E1000_RCTL, rctl);
8767 }
8768
8769 ctrl = rd32(E1000_CTRL);
8770 ctrl |= E1000_CTRL_ADVD3WUC;
8771 wr32(E1000_CTRL, ctrl);
8772
8773
8774 igb_disable_pcie_master(hw);
8775
8776 wr32(E1000_WUC, E1000_WUC_PME_EN);
8777 wr32(E1000_WUFC, wufc);
8778 } else {
8779 wr32(E1000_WUC, 0);
8780 wr32(E1000_WUFC, 0);
8781 }
8782
8783 wake = wufc || adapter->en_mng_pt;
8784 if (!wake)
8785 igb_power_down_link(adapter);
8786 else
8787 igb_power_up_link(adapter);
8788
8789 if (enable_wake)
8790 *enable_wake = wake;
8791
8792
8793
8794
8795 igb_release_hw_control(adapter);
8796
8797 pci_disable_device(pdev);
8798
8799 return 0;
8800}
8801
8802static void igb_deliver_wake_packet(struct net_device *netdev)
8803{
8804 struct igb_adapter *adapter = netdev_priv(netdev);
8805 struct e1000_hw *hw = &adapter->hw;
8806 struct sk_buff *skb;
8807 u32 wupl;
8808
8809 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8810
8811
8812
8813
8814 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8815 return;
8816
8817 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8818 if (!skb)
8819 return;
8820
8821 skb_put(skb, wupl);
8822
8823
8824 wupl = roundup(wupl, 4);
8825
8826 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8827
8828 skb->protocol = eth_type_trans(skb, netdev);
8829 netif_rx(skb);
8830}
8831
8832static int __maybe_unused igb_suspend(struct device *dev)
8833{
8834 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8835}
8836
8837static int __maybe_unused igb_resume(struct device *dev)
8838{
8839 struct pci_dev *pdev = to_pci_dev(dev);
8840 struct net_device *netdev = pci_get_drvdata(pdev);
8841 struct igb_adapter *adapter = netdev_priv(netdev);
8842 struct e1000_hw *hw = &adapter->hw;
8843 u32 err, val;
8844
8845 pci_set_power_state(pdev, PCI_D0);
8846 pci_restore_state(pdev);
8847 pci_save_state(pdev);
8848
8849 if (!pci_device_is_present(pdev))
8850 return -ENODEV;
8851 err = pci_enable_device_mem(pdev);
8852 if (err) {
8853 dev_err(&pdev->dev,
8854 "igb: Cannot enable PCI device from suspend\n");
8855 return err;
8856 }
8857 pci_set_master(pdev);
8858
8859 pci_enable_wake(pdev, PCI_D3hot, 0);
8860 pci_enable_wake(pdev, PCI_D3cold, 0);
8861
8862 if (igb_init_interrupt_scheme(adapter, true)) {
8863 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8864 return -ENOMEM;
8865 }
8866
8867 igb_reset(adapter);
8868
8869
8870
8871
8872 igb_get_hw_control(adapter);
8873
8874 val = rd32(E1000_WUS);
8875 if (val & WAKE_PKT_WUS)
8876 igb_deliver_wake_packet(netdev);
8877
8878 wr32(E1000_WUS, ~0);
8879
8880 rtnl_lock();
8881 if (!err && netif_running(netdev))
8882 err = __igb_open(netdev, true);
8883
8884 if (!err)
8885 netif_device_attach(netdev);
8886 rtnl_unlock();
8887
8888 return err;
8889}
8890
8891static int __maybe_unused igb_runtime_idle(struct device *dev)
8892{
8893 struct pci_dev *pdev = to_pci_dev(dev);
8894 struct net_device *netdev = pci_get_drvdata(pdev);
8895 struct igb_adapter *adapter = netdev_priv(netdev);
8896
8897 if (!igb_has_link(adapter))
8898 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8899
8900 return -EBUSY;
8901}
8902
8903static int __maybe_unused igb_runtime_suspend(struct device *dev)
8904{
8905 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8906}
8907
8908static int __maybe_unused igb_runtime_resume(struct device *dev)
8909{
8910 return igb_resume(dev);
8911}
8912
8913static void igb_shutdown(struct pci_dev *pdev)
8914{
8915 bool wake;
8916
8917 __igb_shutdown(pdev, &wake, 0);
8918
8919 if (system_state == SYSTEM_POWER_OFF) {
8920 pci_wake_from_d3(pdev, wake);
8921 pci_set_power_state(pdev, PCI_D3hot);
8922 }
8923}
8924
8925#ifdef CONFIG_PCI_IOV
8926static int igb_sriov_reinit(struct pci_dev *dev)
8927{
8928 struct net_device *netdev = pci_get_drvdata(dev);
8929 struct igb_adapter *adapter = netdev_priv(netdev);
8930 struct pci_dev *pdev = adapter->pdev;
8931
8932 rtnl_lock();
8933
8934 if (netif_running(netdev))
8935 igb_close(netdev);
8936 else
8937 igb_reset(adapter);
8938
8939 igb_clear_interrupt_scheme(adapter);
8940
8941 igb_init_queue_configuration(adapter);
8942
8943 if (igb_init_interrupt_scheme(adapter, true)) {
8944 rtnl_unlock();
8945 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8946 return -ENOMEM;
8947 }
8948
8949 if (netif_running(netdev))
8950 igb_open(netdev);
8951
8952 rtnl_unlock();
8953
8954 return 0;
8955}
8956
8957static int igb_pci_disable_sriov(struct pci_dev *dev)
8958{
8959 int err = igb_disable_sriov(dev);
8960
8961 if (!err)
8962 err = igb_sriov_reinit(dev);
8963
8964 return err;
8965}
8966
8967static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8968{
8969 int err = igb_enable_sriov(dev, num_vfs);
8970
8971 if (err)
8972 goto out;
8973
8974 err = igb_sriov_reinit(dev);
8975 if (!err)
8976 return num_vfs;
8977
8978out:
8979 return err;
8980}
8981
8982#endif
8983static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
8984{
8985#ifdef CONFIG_PCI_IOV
8986 if (num_vfs == 0)
8987 return igb_pci_disable_sriov(dev);
8988 else
8989 return igb_pci_enable_sriov(dev, num_vfs);
8990#endif
8991 return 0;
8992}
8993
8994
8995
8996
8997
8998
8999
9000
9001
9002static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9003 pci_channel_state_t state)
9004{
9005 struct net_device *netdev = pci_get_drvdata(pdev);
9006 struct igb_adapter *adapter = netdev_priv(netdev);
9007
9008 netif_device_detach(netdev);
9009
9010 if (state == pci_channel_io_perm_failure)
9011 return PCI_ERS_RESULT_DISCONNECT;
9012
9013 if (netif_running(netdev))
9014 igb_down(adapter);
9015 pci_disable_device(pdev);
9016
9017
9018 return PCI_ERS_RESULT_NEED_RESET;
9019}
9020
9021
9022
9023
9024
9025
9026
9027
9028static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9029{
9030 struct net_device *netdev = pci_get_drvdata(pdev);
9031 struct igb_adapter *adapter = netdev_priv(netdev);
9032 struct e1000_hw *hw = &adapter->hw;
9033 pci_ers_result_t result;
9034
9035 if (pci_enable_device_mem(pdev)) {
9036 dev_err(&pdev->dev,
9037 "Cannot re-enable PCI device after reset.\n");
9038 result = PCI_ERS_RESULT_DISCONNECT;
9039 } else {
9040 pci_set_master(pdev);
9041 pci_restore_state(pdev);
9042 pci_save_state(pdev);
9043
9044 pci_enable_wake(pdev, PCI_D3hot, 0);
9045 pci_enable_wake(pdev, PCI_D3cold, 0);
9046
9047
9048
9049
9050 hw->hw_addr = adapter->io_addr;
9051
9052 igb_reset(adapter);
9053 wr32(E1000_WUS, ~0);
9054 result = PCI_ERS_RESULT_RECOVERED;
9055 }
9056
9057 return result;
9058}
9059
9060
9061
9062
9063
9064
9065
9066
9067
9068static void igb_io_resume(struct pci_dev *pdev)
9069{
9070 struct net_device *netdev = pci_get_drvdata(pdev);
9071 struct igb_adapter *adapter = netdev_priv(netdev);
9072
9073 if (netif_running(netdev)) {
9074 if (igb_up(adapter)) {
9075 dev_err(&pdev->dev, "igb_up failed after reset\n");
9076 return;
9077 }
9078 }
9079
9080 netif_device_attach(netdev);
9081
9082
9083
9084
9085 igb_get_hw_control(adapter);
9086}
9087
9088
9089
9090
9091
9092
9093static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9094{
9095 struct e1000_hw *hw = &adapter->hw;
9096 u32 rar_low, rar_high;
9097 u8 *addr = adapter->mac_table[index].addr;
9098
9099
9100
9101
9102
9103
9104 rar_low = le32_to_cpup((__le32 *)(addr));
9105 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9106
9107
9108 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9109 if (is_valid_ether_addr(addr))
9110 rar_high |= E1000_RAH_AV;
9111
9112 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9113 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9114
9115 switch (hw->mac.type) {
9116 case e1000_82575:
9117 case e1000_i210:
9118 if (adapter->mac_table[index].state &
9119 IGB_MAC_STATE_QUEUE_STEERING)
9120 rar_high |= E1000_RAH_QSEL_ENABLE;
9121
9122 rar_high |= E1000_RAH_POOL_1 *
9123 adapter->mac_table[index].queue;
9124 break;
9125 default:
9126 rar_high |= E1000_RAH_POOL_1 <<
9127 adapter->mac_table[index].queue;
9128 break;
9129 }
9130 }
9131
9132 wr32(E1000_RAL(index), rar_low);
9133 wrfl();
9134 wr32(E1000_RAH(index), rar_high);
9135 wrfl();
9136}
9137
9138static int igb_set_vf_mac(struct igb_adapter *adapter,
9139 int vf, unsigned char *mac_addr)
9140{
9141 struct e1000_hw *hw = &adapter->hw;
9142
9143
9144
9145 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9146 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9147
9148 ether_addr_copy(vf_mac_addr, mac_addr);
9149 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9150 adapter->mac_table[rar_entry].queue = vf;
9151 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9152 igb_rar_set_index(adapter, rar_entry);
9153
9154 return 0;
9155}
9156
9157static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9158{
9159 struct igb_adapter *adapter = netdev_priv(netdev);
9160
9161 if (vf >= adapter->vfs_allocated_count)
9162 return -EINVAL;
9163
9164
9165
9166
9167
9168
9169
9170 if (is_zero_ether_addr(mac)) {
9171 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9172 dev_info(&adapter->pdev->dev,
9173 "remove administratively set MAC on VF %d\n",
9174 vf);
9175 } else if (is_valid_ether_addr(mac)) {
9176 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9177 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9178 mac, vf);
9179 dev_info(&adapter->pdev->dev,
9180 "Reload the VF driver to make this change effective.");
9181
9182 if (test_bit(__IGB_DOWN, &adapter->state)) {
9183 dev_warn(&adapter->pdev->dev,
9184 "The VF MAC address has been set, but the PF device is not up.\n");
9185 dev_warn(&adapter->pdev->dev,
9186 "Bring the PF device up before attempting to use the VF device.\n");
9187 }
9188 } else {
9189 return -EINVAL;
9190 }
9191 return igb_set_vf_mac(adapter, vf, mac);
9192}
9193
9194static int igb_link_mbps(int internal_link_speed)
9195{
9196 switch (internal_link_speed) {
9197 case SPEED_100:
9198 return 100;
9199 case SPEED_1000:
9200 return 1000;
9201 default:
9202 return 0;
9203 }
9204}
9205
9206static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9207 int link_speed)
9208{
9209 int rf_dec, rf_int;
9210 u32 bcnrc_val;
9211
9212 if (tx_rate != 0) {
9213
9214 rf_int = link_speed / tx_rate;
9215 rf_dec = (link_speed - (rf_int * tx_rate));
9216 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9217 tx_rate;
9218
9219 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9220 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9221 E1000_RTTBCNRC_RF_INT_MASK);
9222 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9223 } else {
9224 bcnrc_val = 0;
9225 }
9226
9227 wr32(E1000_RTTDQSEL, vf);
9228
9229
9230
9231 wr32(E1000_RTTBCNRM, 0x14);
9232 wr32(E1000_RTTBCNRC, bcnrc_val);
9233}
9234
9235static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9236{
9237 int actual_link_speed, i;
9238 bool reset_rate = false;
9239
9240
9241 if ((adapter->vf_rate_link_speed == 0) ||
9242 (adapter->hw.mac.type != e1000_82576))
9243 return;
9244
9245 actual_link_speed = igb_link_mbps(adapter->link_speed);
9246 if (actual_link_speed != adapter->vf_rate_link_speed) {
9247 reset_rate = true;
9248 adapter->vf_rate_link_speed = 0;
9249 dev_info(&adapter->pdev->dev,
9250 "Link speed has been changed. VF Transmit rate is disabled\n");
9251 }
9252
9253 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9254 if (reset_rate)
9255 adapter->vf_data[i].tx_rate = 0;
9256
9257 igb_set_vf_rate_limit(&adapter->hw, i,
9258 adapter->vf_data[i].tx_rate,
9259 actual_link_speed);
9260 }
9261}
9262
9263static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9264 int min_tx_rate, int max_tx_rate)
9265{
9266 struct igb_adapter *adapter = netdev_priv(netdev);
9267 struct e1000_hw *hw = &adapter->hw;
9268 int actual_link_speed;
9269
9270 if (hw->mac.type != e1000_82576)
9271 return -EOPNOTSUPP;
9272
9273 if (min_tx_rate)
9274 return -EINVAL;
9275
9276 actual_link_speed = igb_link_mbps(adapter->link_speed);
9277 if ((vf >= adapter->vfs_allocated_count) ||
9278 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9279 (max_tx_rate < 0) ||
9280 (max_tx_rate > actual_link_speed))
9281 return -EINVAL;
9282
9283 adapter->vf_rate_link_speed = actual_link_speed;
9284 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9285 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9286
9287 return 0;
9288}
9289
9290static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9291 bool setting)
9292{
9293 struct igb_adapter *adapter = netdev_priv(netdev);
9294 struct e1000_hw *hw = &adapter->hw;
9295 u32 reg_val, reg_offset;
9296
9297 if (!adapter->vfs_allocated_count)
9298 return -EOPNOTSUPP;
9299
9300 if (vf >= adapter->vfs_allocated_count)
9301 return -EINVAL;
9302
9303 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9304 reg_val = rd32(reg_offset);
9305 if (setting)
9306 reg_val |= (BIT(vf) |
9307 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9308 else
9309 reg_val &= ~(BIT(vf) |
9310 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9311 wr32(reg_offset, reg_val);
9312
9313 adapter->vf_data[vf].spoofchk_enabled = setting;
9314 return 0;
9315}
9316
9317static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9318{
9319 struct igb_adapter *adapter = netdev_priv(netdev);
9320
9321 if (vf >= adapter->vfs_allocated_count)
9322 return -EINVAL;
9323 if (adapter->vf_data[vf].trusted == setting)
9324 return 0;
9325
9326 adapter->vf_data[vf].trusted = setting;
9327
9328 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9329 vf, setting ? "" : "not ");
9330 return 0;
9331}
9332
9333static int igb_ndo_get_vf_config(struct net_device *netdev,
9334 int vf, struct ifla_vf_info *ivi)
9335{
9336 struct igb_adapter *adapter = netdev_priv(netdev);
9337 if (vf >= adapter->vfs_allocated_count)
9338 return -EINVAL;
9339 ivi->vf = vf;
9340 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9341 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9342 ivi->min_tx_rate = 0;
9343 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9344 ivi->qos = adapter->vf_data[vf].pf_qos;
9345 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9346 ivi->trusted = adapter->vf_data[vf].trusted;
9347 return 0;
9348}
9349
9350static void igb_vmm_control(struct igb_adapter *adapter)
9351{
9352 struct e1000_hw *hw = &adapter->hw;
9353 u32 reg;
9354
9355 switch (hw->mac.type) {
9356 case e1000_82575:
9357 case e1000_i210:
9358 case e1000_i211:
9359 case e1000_i354:
9360 default:
9361
9362 return;
9363 case e1000_82576:
9364
9365 reg = rd32(E1000_DTXCTL);
9366 reg |= E1000_DTXCTL_VLAN_ADDED;
9367 wr32(E1000_DTXCTL, reg);
9368
9369 case e1000_82580:
9370
9371 reg = rd32(E1000_RPLOLR);
9372 reg |= E1000_RPLOLR_STRVLAN;
9373 wr32(E1000_RPLOLR, reg);
9374
9375 case e1000_i350:
9376
9377 break;
9378 }
9379
9380 if (adapter->vfs_allocated_count) {
9381 igb_vmdq_set_loopback_pf(hw, true);
9382 igb_vmdq_set_replication_pf(hw, true);
9383 igb_vmdq_set_anti_spoofing_pf(hw, true,
9384 adapter->vfs_allocated_count);
9385 } else {
9386 igb_vmdq_set_loopback_pf(hw, false);
9387 igb_vmdq_set_replication_pf(hw, false);
9388 }
9389}
9390
9391static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9392{
9393 struct e1000_hw *hw = &adapter->hw;
9394 u32 dmac_thr;
9395 u16 hwm;
9396
9397 if (hw->mac.type > e1000_82580) {
9398 if (adapter->flags & IGB_FLAG_DMAC) {
9399 u32 reg;
9400
9401
9402 wr32(E1000_DMCTXTH, 0);
9403
9404
9405
9406
9407
9408 hwm = 64 * (pba - 6);
9409 reg = rd32(E1000_FCRTC);
9410 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9411 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9412 & E1000_FCRTC_RTH_COAL_MASK);
9413 wr32(E1000_FCRTC, reg);
9414
9415
9416
9417
9418 dmac_thr = pba - 10;
9419 reg = rd32(E1000_DMACR);
9420 reg &= ~E1000_DMACR_DMACTHR_MASK;
9421 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9422 & E1000_DMACR_DMACTHR_MASK);
9423
9424
9425 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9426
9427
9428 reg |= (1000 >> 5);
9429
9430
9431 if (hw->mac.type != e1000_i354)
9432 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9433
9434 wr32(E1000_DMACR, reg);
9435
9436
9437
9438
9439 wr32(E1000_DMCRTRH, 0);
9440
9441 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9442
9443 wr32(E1000_DMCTLX, reg);
9444
9445
9446
9447
9448 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9449 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9450
9451
9452
9453
9454 reg = rd32(E1000_PCIEMISC);
9455 reg &= ~E1000_PCIEMISC_LX_DECISION;
9456 wr32(E1000_PCIEMISC, reg);
9457 }
9458 } else if (hw->mac.type == e1000_82580) {
9459 u32 reg = rd32(E1000_PCIEMISC);
9460
9461 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9462 wr32(E1000_DMACR, 0);
9463 }
9464}
9465
9466
9467
9468
9469
9470
9471
9472
9473
9474
9475
9476s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9477 u8 dev_addr, u8 *data)
9478{
9479 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9480 struct i2c_client *this_client = adapter->i2c_client;
9481 s32 status;
9482 u16 swfw_mask = 0;
9483
9484 if (!this_client)
9485 return E1000_ERR_I2C;
9486
9487 swfw_mask = E1000_SWFW_PHY0_SM;
9488
9489 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9490 return E1000_ERR_SWFW_SYNC;
9491
9492 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9493 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9494
9495 if (status < 0)
9496 return E1000_ERR_I2C;
9497 else {
9498 *data = status;
9499 return 0;
9500 }
9501}
9502
9503
9504
9505
9506
9507
9508
9509
9510
9511
9512
9513s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9514 u8 dev_addr, u8 data)
9515{
9516 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9517 struct i2c_client *this_client = adapter->i2c_client;
9518 s32 status;
9519 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9520
9521 if (!this_client)
9522 return E1000_ERR_I2C;
9523
9524 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9525 return E1000_ERR_SWFW_SYNC;
9526 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9527 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9528
9529 if (status)
9530 return E1000_ERR_I2C;
9531 else
9532 return 0;
9533
9534}
9535
9536int igb_reinit_queues(struct igb_adapter *adapter)
9537{
9538 struct net_device *netdev = adapter->netdev;
9539 struct pci_dev *pdev = adapter->pdev;
9540 int err = 0;
9541
9542 if (netif_running(netdev))
9543 igb_close(netdev);
9544
9545 igb_reset_interrupt_capability(adapter);
9546
9547 if (igb_init_interrupt_scheme(adapter, true)) {
9548 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9549 return -ENOMEM;
9550 }
9551
9552 if (netif_running(netdev))
9553 err = igb_open(netdev);
9554
9555 return err;
9556}
9557
9558static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9559{
9560 struct igb_nfc_filter *rule;
9561
9562 spin_lock(&adapter->nfc_lock);
9563
9564 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9565 igb_erase_filter(adapter, rule);
9566
9567 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9568 igb_erase_filter(adapter, rule);
9569
9570 spin_unlock(&adapter->nfc_lock);
9571}
9572
9573static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9574{
9575 struct igb_nfc_filter *rule;
9576
9577 spin_lock(&adapter->nfc_lock);
9578
9579 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9580 igb_add_filter(adapter, rule);
9581
9582 spin_unlock(&adapter->nfc_lock);
9583}
9584
9585