1
2
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/bitops.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/netdevice.h>
13#include <linux/ipv6.h>
14#include <linux/slab.h>
15#include <net/checksum.h>
16#include <net/ip6_checksum.h>
17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
19#include <linux/net_tstamp.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/ip.h>
28#include <linux/tcp.h>
29#include <linux/sctp.h>
30#include <linux/if_ether.h>
31#include <linux/aer.h>
32#include <linux/prefetch.h>
33#include <linux/pm_runtime.h>
34#include <linux/etherdevice.h>
35#ifdef CONFIG_IGB_DCA
36#include <linux/dca.h>
37#endif
38#include <linux/i2c.h>
39#include "igb.h"
40
41#define MAJ 5
42#define MIN 4
43#define BUILD 0
44#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
45__stringify(BUILD) "-k"
46
47enum queue_mode {
48 QUEUE_MODE_STRICT_PRIORITY,
49 QUEUE_MODE_STREAM_RESERVATION,
50};
51
52enum tx_queue_prio {
53 TX_QUEUE_PRIO_HIGH,
54 TX_QUEUE_PRIO_LOW,
55};
56
57char igb_driver_name[] = "igb";
58char igb_driver_version[] = DRV_VERSION;
59static const char igb_driver_string[] =
60 "Intel(R) Gigabit Ethernet Network Driver";
61static const char igb_copyright[] =
62 "Copyright (c) 2007-2014 Intel Corporation.";
63
64static const struct e1000_info *igb_info_tbl[] = {
65 [board_82575] = &e1000_82575_info,
66};
67
68static const struct pci_device_id igb_pci_tbl[] = {
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
104
105 {0, }
106};
107
108MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
109
110static int igb_setup_all_tx_resources(struct igb_adapter *);
111static int igb_setup_all_rx_resources(struct igb_adapter *);
112static void igb_free_all_tx_resources(struct igb_adapter *);
113static void igb_free_all_rx_resources(struct igb_adapter *);
114static void igb_setup_mrqc(struct igb_adapter *);
115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void igb_remove(struct pci_dev *pdev);
117static int igb_sw_init(struct igb_adapter *);
118int igb_open(struct net_device *);
119int igb_close(struct net_device *);
120static void igb_configure(struct igb_adapter *);
121static void igb_configure_tx(struct igb_adapter *);
122static void igb_configure_rx(struct igb_adapter *);
123static void igb_clean_all_tx_rings(struct igb_adapter *);
124static void igb_clean_all_rx_rings(struct igb_adapter *);
125static void igb_clean_tx_ring(struct igb_ring *);
126static void igb_clean_rx_ring(struct igb_ring *);
127static void igb_set_rx_mode(struct net_device *);
128static void igb_update_phy_info(struct timer_list *);
129static void igb_watchdog(struct timer_list *);
130static void igb_watchdog_task(struct work_struct *);
131static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
132static void igb_get_stats64(struct net_device *dev,
133 struct rtnl_link_stats64 *stats);
134static int igb_change_mtu(struct net_device *, int);
135static int igb_set_mac(struct net_device *, void *);
136static void igb_set_uta(struct igb_adapter *adapter, bool set);
137static irqreturn_t igb_intr(int irq, void *);
138static irqreturn_t igb_intr_msi(int irq, void *);
139static irqreturn_t igb_msix_other(int irq, void *);
140static irqreturn_t igb_msix_ring(int irq, void *);
141#ifdef CONFIG_IGB_DCA
142static void igb_update_dca(struct igb_q_vector *);
143static void igb_setup_dca(struct igb_adapter *);
144#endif
145static int igb_poll(struct napi_struct *, int);
146static bool igb_clean_tx_irq(struct igb_q_vector *, int);
147static int igb_clean_rx_irq(struct igb_q_vector *, int);
148static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
149static void igb_tx_timeout(struct net_device *);
150static void igb_reset_task(struct work_struct *);
151static void igb_vlan_mode(struct net_device *netdev,
152 netdev_features_t features);
153static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
154static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
155static void igb_restore_vlan(struct igb_adapter *);
156static void igb_rar_set_index(struct igb_adapter *, u32);
157static void igb_ping_all_vfs(struct igb_adapter *);
158static void igb_msg_task(struct igb_adapter *);
159static void igb_vmm_control(struct igb_adapter *);
160static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
161static void igb_flush_mac_table(struct igb_adapter *);
162static int igb_available_rars(struct igb_adapter *, u8);
163static void igb_set_default_mac_filter(struct igb_adapter *);
164static int igb_uc_sync(struct net_device *, const unsigned char *);
165static int igb_uc_unsync(struct net_device *, const unsigned char *);
166static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
167static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
168static int igb_ndo_set_vf_vlan(struct net_device *netdev,
169 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
170static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
171static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
172 bool setting);
173static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
174 bool setting);
175static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
176 struct ifla_vf_info *ivi);
177static void igb_check_vf_rate_limit(struct igb_adapter *);
178static void igb_nfc_filter_exit(struct igb_adapter *adapter);
179static void igb_nfc_filter_restore(struct igb_adapter *adapter);
180
181#ifdef CONFIG_PCI_IOV
182static int igb_vf_configure(struct igb_adapter *adapter, int vf);
183static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
184static int igb_disable_sriov(struct pci_dev *dev);
185static int igb_pci_disable_sriov(struct pci_dev *dev);
186#endif
187
188static int igb_suspend(struct device *);
189static int igb_resume(struct device *);
190static int igb_runtime_suspend(struct device *dev);
191static int igb_runtime_resume(struct device *dev);
192static int igb_runtime_idle(struct device *dev);
193static const struct dev_pm_ops igb_pm_ops = {
194 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
195 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
196 igb_runtime_idle)
197};
198static void igb_shutdown(struct pci_dev *);
199static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
200#ifdef CONFIG_IGB_DCA
201static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
202static struct notifier_block dca_notifier = {
203 .notifier_call = igb_notify_dca,
204 .next = NULL,
205 .priority = 0
206};
207#endif
208#ifdef CONFIG_PCI_IOV
209static unsigned int max_vfs;
210module_param(max_vfs, uint, 0);
211MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
212#endif
213
214static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217static void igb_io_resume(struct pci_dev *);
218
219static const struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223};
224
225static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
226
227static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = igb_remove,
232#ifdef CONFIG_PM
233 .driver.pm = &igb_pm_ops,
234#endif
235 .shutdown = igb_shutdown,
236 .sriov_configure = igb_pci_sriov_configure,
237 .err_handler = &igb_err_handler
238};
239
240MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
241MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
242MODULE_LICENSE("GPL v2");
243MODULE_VERSION(DRV_VERSION);
244
245#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
246static int debug = -1;
247module_param(debug, int, 0);
248MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
249
250struct igb_reg_info {
251 u32 ofs;
252 char *name;
253};
254
255static const struct igb_reg_info igb_reg_info_tbl[] = {
256
257
258 {E1000_CTRL, "CTRL"},
259 {E1000_STATUS, "STATUS"},
260 {E1000_CTRL_EXT, "CTRL_EXT"},
261
262
263 {E1000_ICR, "ICR"},
264
265
266 {E1000_RCTL, "RCTL"},
267 {E1000_RDLEN(0), "RDLEN"},
268 {E1000_RDH(0), "RDH"},
269 {E1000_RDT(0), "RDT"},
270 {E1000_RXDCTL(0), "RXDCTL"},
271 {E1000_RDBAL(0), "RDBAL"},
272 {E1000_RDBAH(0), "RDBAH"},
273
274
275 {E1000_TCTL, "TCTL"},
276 {E1000_TDBAL(0), "TDBAL"},
277 {E1000_TDBAH(0), "TDBAH"},
278 {E1000_TDLEN(0), "TDLEN"},
279 {E1000_TDH(0), "TDH"},
280 {E1000_TDT(0), "TDT"},
281 {E1000_TXDCTL(0), "TXDCTL"},
282 {E1000_TDFH, "TDFH"},
283 {E1000_TDFT, "TDFT"},
284 {E1000_TDFHS, "TDFHS"},
285 {E1000_TDFPC, "TDFPC"},
286
287
288 {}
289};
290
291
292static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
293{
294 int n = 0;
295 char rname[16];
296 u32 regs[8];
297
298 switch (reginfo->ofs) {
299 case E1000_RDLEN(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_RDLEN(n));
302 break;
303 case E1000_RDH(0):
304 for (n = 0; n < 4; n++)
305 regs[n] = rd32(E1000_RDH(n));
306 break;
307 case E1000_RDT(0):
308 for (n = 0; n < 4; n++)
309 regs[n] = rd32(E1000_RDT(n));
310 break;
311 case E1000_RXDCTL(0):
312 for (n = 0; n < 4; n++)
313 regs[n] = rd32(E1000_RXDCTL(n));
314 break;
315 case E1000_RDBAL(0):
316 for (n = 0; n < 4; n++)
317 regs[n] = rd32(E1000_RDBAL(n));
318 break;
319 case E1000_RDBAH(0):
320 for (n = 0; n < 4; n++)
321 regs[n] = rd32(E1000_RDBAH(n));
322 break;
323 case E1000_TDBAL(0):
324 for (n = 0; n < 4; n++)
325 regs[n] = rd32(E1000_RDBAL(n));
326 break;
327 case E1000_TDBAH(0):
328 for (n = 0; n < 4; n++)
329 regs[n] = rd32(E1000_TDBAH(n));
330 break;
331 case E1000_TDLEN(0):
332 for (n = 0; n < 4; n++)
333 regs[n] = rd32(E1000_TDLEN(n));
334 break;
335 case E1000_TDH(0):
336 for (n = 0; n < 4; n++)
337 regs[n] = rd32(E1000_TDH(n));
338 break;
339 case E1000_TDT(0):
340 for (n = 0; n < 4; n++)
341 regs[n] = rd32(E1000_TDT(n));
342 break;
343 case E1000_TXDCTL(0):
344 for (n = 0; n < 4; n++)
345 regs[n] = rd32(E1000_TXDCTL(n));
346 break;
347 default:
348 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
349 return;
350 }
351
352 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
353 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
354 regs[2], regs[3]);
355}
356
357
358static void igb_dump(struct igb_adapter *adapter)
359{
360 struct net_device *netdev = adapter->netdev;
361 struct e1000_hw *hw = &adapter->hw;
362 struct igb_reg_info *reginfo;
363 struct igb_ring *tx_ring;
364 union e1000_adv_tx_desc *tx_desc;
365 struct my_u0 { u64 a; u64 b; } *u0;
366 struct igb_ring *rx_ring;
367 union e1000_adv_rx_desc *rx_desc;
368 u32 staterr;
369 u16 i, n;
370
371 if (!netif_msg_hw(adapter))
372 return;
373
374
375 if (netdev) {
376 dev_info(&adapter->pdev->dev, "Net device Info\n");
377 pr_info("Device Name state trans_start\n");
378 pr_info("%-15s %016lX %016lX\n", netdev->name,
379 netdev->state, dev_trans_start(netdev));
380 }
381
382
383 dev_info(&adapter->pdev->dev, "Register Dump\n");
384 pr_info(" Register Name Value\n");
385 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
386 reginfo->name; reginfo++) {
387 igb_regdump(hw, reginfo);
388 }
389
390
391 if (!netdev || !netif_running(netdev))
392 goto exit;
393
394 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
395 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
396 for (n = 0; n < adapter->num_tx_queues; n++) {
397 struct igb_tx_buffer *buffer_info;
398 tx_ring = adapter->tx_ring[n];
399 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
400 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
401 n, tx_ring->next_to_use, tx_ring->next_to_clean,
402 (u64)dma_unmap_addr(buffer_info, dma),
403 dma_unmap_len(buffer_info, len),
404 buffer_info->next_to_watch,
405 (u64)buffer_info->time_stamp);
406 }
407
408
409 if (!netif_msg_tx_done(adapter))
410 goto rx_ring_summary;
411
412 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
413
414
415
416
417
418
419
420
421
422
423
424
425 for (n = 0; n < adapter->num_tx_queues; n++) {
426 tx_ring = adapter->tx_ring[n];
427 pr_info("------------------------------------\n");
428 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
429 pr_info("------------------------------------\n");
430 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
431
432 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
433 const char *next_desc;
434 struct igb_tx_buffer *buffer_info;
435 tx_desc = IGB_TX_DESC(tx_ring, i);
436 buffer_info = &tx_ring->tx_buffer_info[i];
437 u0 = (struct my_u0 *)tx_desc;
438 if (i == tx_ring->next_to_use &&
439 i == tx_ring->next_to_clean)
440 next_desc = " NTC/U";
441 else if (i == tx_ring->next_to_use)
442 next_desc = " NTU";
443 else if (i == tx_ring->next_to_clean)
444 next_desc = " NTC";
445 else
446 next_desc = "";
447
448 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
449 i, le64_to_cpu(u0->a),
450 le64_to_cpu(u0->b),
451 (u64)dma_unmap_addr(buffer_info, dma),
452 dma_unmap_len(buffer_info, len),
453 buffer_info->next_to_watch,
454 (u64)buffer_info->time_stamp,
455 buffer_info->skb, next_desc);
456
457 if (netif_msg_pktdata(adapter) && buffer_info->skb)
458 print_hex_dump(KERN_INFO, "",
459 DUMP_PREFIX_ADDRESS,
460 16, 1, buffer_info->skb->data,
461 dma_unmap_len(buffer_info, len),
462 true);
463 }
464 }
465
466
467rx_ring_summary:
468 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
469 pr_info("Queue [NTU] [NTC]\n");
470 for (n = 0; n < adapter->num_rx_queues; n++) {
471 rx_ring = adapter->rx_ring[n];
472 pr_info(" %5d %5X %5X\n",
473 n, rx_ring->next_to_use, rx_ring->next_to_clean);
474 }
475
476
477 if (!netif_msg_rx_status(adapter))
478 goto exit;
479
480 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503 for (n = 0; n < adapter->num_rx_queues; n++) {
504 rx_ring = adapter->rx_ring[n];
505 pr_info("------------------------------------\n");
506 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
507 pr_info("------------------------------------\n");
508 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
509 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
510
511 for (i = 0; i < rx_ring->count; i++) {
512 const char *next_desc;
513 struct igb_rx_buffer *buffer_info;
514 buffer_info = &rx_ring->rx_buffer_info[i];
515 rx_desc = IGB_RX_DESC(rx_ring, i);
516 u0 = (struct my_u0 *)rx_desc;
517 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
518
519 if (i == rx_ring->next_to_use)
520 next_desc = " NTU";
521 else if (i == rx_ring->next_to_clean)
522 next_desc = " NTC";
523 else
524 next_desc = "";
525
526 if (staterr & E1000_RXD_STAT_DD) {
527
528 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
529 "RWB", i,
530 le64_to_cpu(u0->a),
531 le64_to_cpu(u0->b),
532 next_desc);
533 } else {
534 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
535 "R ", i,
536 le64_to_cpu(u0->a),
537 le64_to_cpu(u0->b),
538 (u64)buffer_info->dma,
539 next_desc);
540
541 if (netif_msg_pktdata(adapter) &&
542 buffer_info->dma && buffer_info->page) {
543 print_hex_dump(KERN_INFO, "",
544 DUMP_PREFIX_ADDRESS,
545 16, 1,
546 page_address(buffer_info->page) +
547 buffer_info->page_offset,
548 igb_rx_bufsz(rx_ring), true);
549 }
550 }
551 }
552 }
553
554exit:
555 return;
556}
557
558
559
560
561
562
563
564
565static int igb_get_i2c_data(void *data)
566{
567 struct igb_adapter *adapter = (struct igb_adapter *)data;
568 struct e1000_hw *hw = &adapter->hw;
569 s32 i2cctl = rd32(E1000_I2CPARAMS);
570
571 return !!(i2cctl & E1000_I2C_DATA_IN);
572}
573
574
575
576
577
578
579
580
581static void igb_set_i2c_data(void *data, int state)
582{
583 struct igb_adapter *adapter = (struct igb_adapter *)data;
584 struct e1000_hw *hw = &adapter->hw;
585 s32 i2cctl = rd32(E1000_I2CPARAMS);
586
587 if (state)
588 i2cctl |= E1000_I2C_DATA_OUT;
589 else
590 i2cctl &= ~E1000_I2C_DATA_OUT;
591
592 i2cctl &= ~E1000_I2C_DATA_OE_N;
593 i2cctl |= E1000_I2C_CLK_OE_N;
594 wr32(E1000_I2CPARAMS, i2cctl);
595 wrfl();
596
597}
598
599
600
601
602
603
604
605
606static void igb_set_i2c_clk(void *data, int state)
607{
608 struct igb_adapter *adapter = (struct igb_adapter *)data;
609 struct e1000_hw *hw = &adapter->hw;
610 s32 i2cctl = rd32(E1000_I2CPARAMS);
611
612 if (state) {
613 i2cctl |= E1000_I2C_CLK_OUT;
614 i2cctl &= ~E1000_I2C_CLK_OE_N;
615 } else {
616 i2cctl &= ~E1000_I2C_CLK_OUT;
617 i2cctl &= ~E1000_I2C_CLK_OE_N;
618 }
619 wr32(E1000_I2CPARAMS, i2cctl);
620 wrfl();
621}
622
623
624
625
626
627
628
629static int igb_get_i2c_clk(void *data)
630{
631 struct igb_adapter *adapter = (struct igb_adapter *)data;
632 struct e1000_hw *hw = &adapter->hw;
633 s32 i2cctl = rd32(E1000_I2CPARAMS);
634
635 return !!(i2cctl & E1000_I2C_CLK_IN);
636}
637
638static const struct i2c_algo_bit_data igb_i2c_algo = {
639 .setsda = igb_set_i2c_data,
640 .setscl = igb_set_i2c_clk,
641 .getsda = igb_get_i2c_data,
642 .getscl = igb_get_i2c_clk,
643 .udelay = 5,
644 .timeout = 20,
645};
646
647
648
649
650
651
652
653struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
654{
655 struct igb_adapter *adapter = hw->back;
656 return adapter->netdev;
657}
658
659
660
661
662
663
664
665static int __init igb_init_module(void)
666{
667 int ret;
668
669 pr_info("%s - version %s\n",
670 igb_driver_string, igb_driver_version);
671 pr_info("%s\n", igb_copyright);
672
673#ifdef CONFIG_IGB_DCA
674 dca_register_notify(&dca_notifier);
675#endif
676 ret = pci_register_driver(&igb_driver);
677 return ret;
678}
679
680module_init(igb_init_module);
681
682
683
684
685
686
687
688static void __exit igb_exit_module(void)
689{
690#ifdef CONFIG_IGB_DCA
691 dca_unregister_notify(&dca_notifier);
692#endif
693 pci_unregister_driver(&igb_driver);
694}
695
696module_exit(igb_exit_module);
697
698#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
699
700
701
702
703
704
705
706static void igb_cache_ring_register(struct igb_adapter *adapter)
707{
708 int i = 0, j = 0;
709 u32 rbase_offset = adapter->vfs_allocated_count;
710
711 switch (adapter->hw.mac.type) {
712 case e1000_82576:
713
714
715
716
717
718 if (adapter->vfs_allocated_count) {
719 for (; i < adapter->rss_queues; i++)
720 adapter->rx_ring[i]->reg_idx = rbase_offset +
721 Q_IDX_82576(i);
722 }
723
724 case e1000_82575:
725 case e1000_82580:
726 case e1000_i350:
727 case e1000_i354:
728 case e1000_i210:
729 case e1000_i211:
730
731 default:
732 for (; i < adapter->num_rx_queues; i++)
733 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
734 for (; j < adapter->num_tx_queues; j++)
735 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
736 break;
737 }
738}
739
740u32 igb_rd32(struct e1000_hw *hw, u32 reg)
741{
742 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
743 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
744 u32 value = 0;
745
746 if (E1000_REMOVED(hw_addr))
747 return ~value;
748
749 value = readl(&hw_addr[reg]);
750
751
752 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
753 struct net_device *netdev = igb->netdev;
754 hw->hw_addr = NULL;
755 netdev_err(netdev, "PCIe link lost\n");
756 }
757
758 return value;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772
773static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
774 int index, int offset)
775{
776 u32 ivar = array_rd32(E1000_IVAR0, index);
777
778
779 ivar &= ~((u32)0xFF << offset);
780
781
782 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
783
784 array_wr32(E1000_IVAR0, index, ivar);
785}
786
787#define IGB_N0_QUEUE -1
788static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
789{
790 struct igb_adapter *adapter = q_vector->adapter;
791 struct e1000_hw *hw = &adapter->hw;
792 int rx_queue = IGB_N0_QUEUE;
793 int tx_queue = IGB_N0_QUEUE;
794 u32 msixbm = 0;
795
796 if (q_vector->rx.ring)
797 rx_queue = q_vector->rx.ring->reg_idx;
798 if (q_vector->tx.ring)
799 tx_queue = q_vector->tx.ring->reg_idx;
800
801 switch (hw->mac.type) {
802 case e1000_82575:
803
804
805
806
807
808 if (rx_queue > IGB_N0_QUEUE)
809 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
810 if (tx_queue > IGB_N0_QUEUE)
811 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
812 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
813 msixbm |= E1000_EIMS_OTHER;
814 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
815 q_vector->eims_value = msixbm;
816 break;
817 case e1000_82576:
818
819
820
821
822
823 if (rx_queue > IGB_N0_QUEUE)
824 igb_write_ivar(hw, msix_vector,
825 rx_queue & 0x7,
826 (rx_queue & 0x8) << 1);
827 if (tx_queue > IGB_N0_QUEUE)
828 igb_write_ivar(hw, msix_vector,
829 tx_queue & 0x7,
830 ((tx_queue & 0x8) << 1) + 8);
831 q_vector->eims_value = BIT(msix_vector);
832 break;
833 case e1000_82580:
834 case e1000_i350:
835 case e1000_i354:
836 case e1000_i210:
837 case e1000_i211:
838
839
840
841
842
843
844 if (rx_queue > IGB_N0_QUEUE)
845 igb_write_ivar(hw, msix_vector,
846 rx_queue >> 1,
847 (rx_queue & 0x1) << 4);
848 if (tx_queue > IGB_N0_QUEUE)
849 igb_write_ivar(hw, msix_vector,
850 tx_queue >> 1,
851 ((tx_queue & 0x1) << 4) + 8);
852 q_vector->eims_value = BIT(msix_vector);
853 break;
854 default:
855 BUG();
856 break;
857 }
858
859
860 adapter->eims_enable_mask |= q_vector->eims_value;
861
862
863 q_vector->set_itr = 1;
864}
865
866
867
868
869
870
871
872
873static void igb_configure_msix(struct igb_adapter *adapter)
874{
875 u32 tmp;
876 int i, vector = 0;
877 struct e1000_hw *hw = &adapter->hw;
878
879 adapter->eims_enable_mask = 0;
880
881
882 switch (hw->mac.type) {
883 case e1000_82575:
884 tmp = rd32(E1000_CTRL_EXT);
885
886 tmp |= E1000_CTRL_EXT_PBA_CLR;
887
888
889 tmp |= E1000_CTRL_EXT_EIAME;
890 tmp |= E1000_CTRL_EXT_IRCA;
891
892 wr32(E1000_CTRL_EXT, tmp);
893
894
895 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
896 adapter->eims_other = E1000_EIMS_OTHER;
897
898 break;
899
900 case e1000_82576:
901 case e1000_82580:
902 case e1000_i350:
903 case e1000_i354:
904 case e1000_i210:
905 case e1000_i211:
906
907
908
909 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
910 E1000_GPIE_PBA | E1000_GPIE_EIAME |
911 E1000_GPIE_NSICR);
912
913
914 adapter->eims_other = BIT(vector);
915 tmp = (vector++ | E1000_IVAR_VALID) << 8;
916
917 wr32(E1000_IVAR_MISC, tmp);
918 break;
919 default:
920
921 break;
922 }
923
924 adapter->eims_enable_mask |= adapter->eims_other;
925
926 for (i = 0; i < adapter->num_q_vectors; i++)
927 igb_assign_vector(adapter->q_vector[i], vector++);
928
929 wrfl();
930}
931
932
933
934
935
936
937
938
939static int igb_request_msix(struct igb_adapter *adapter)
940{
941 struct net_device *netdev = adapter->netdev;
942 int i, err = 0, vector = 0, free_vector = 0;
943
944 err = request_irq(adapter->msix_entries[vector].vector,
945 igb_msix_other, 0, netdev->name, adapter);
946 if (err)
947 goto err_out;
948
949 for (i = 0; i < adapter->num_q_vectors; i++) {
950 struct igb_q_vector *q_vector = adapter->q_vector[i];
951
952 vector++;
953
954 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
955
956 if (q_vector->rx.ring && q_vector->tx.ring)
957 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
958 q_vector->rx.ring->queue_index);
959 else if (q_vector->tx.ring)
960 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
961 q_vector->tx.ring->queue_index);
962 else if (q_vector->rx.ring)
963 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
964 q_vector->rx.ring->queue_index);
965 else
966 sprintf(q_vector->name, "%s-unused", netdev->name);
967
968 err = request_irq(adapter->msix_entries[vector].vector,
969 igb_msix_ring, 0, q_vector->name,
970 q_vector);
971 if (err)
972 goto err_free;
973 }
974
975 igb_configure_msix(adapter);
976 return 0;
977
978err_free:
979
980 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
981
982 vector--;
983 for (i = 0; i < vector; i++) {
984 free_irq(adapter->msix_entries[free_vector++].vector,
985 adapter->q_vector[i]);
986 }
987err_out:
988 return err;
989}
990
991
992
993
994
995
996
997
998static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
999{
1000 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1001
1002 adapter->q_vector[v_idx] = NULL;
1003
1004
1005
1006
1007 if (q_vector)
1008 kfree_rcu(q_vector, rcu);
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1020{
1021 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1022
1023
1024
1025
1026 if (!q_vector)
1027 return;
1028
1029 if (q_vector->tx.ring)
1030 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1031
1032 if (q_vector->rx.ring)
1033 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1034
1035 netif_napi_del(&q_vector->napi);
1036
1037}
1038
1039static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1040{
1041 int v_idx = adapter->num_q_vectors;
1042
1043 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1044 pci_disable_msix(adapter->pdev);
1045 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1046 pci_disable_msi(adapter->pdev);
1047
1048 while (v_idx--)
1049 igb_reset_q_vector(adapter, v_idx);
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static void igb_free_q_vectors(struct igb_adapter *adapter)
1061{
1062 int v_idx = adapter->num_q_vectors;
1063
1064 adapter->num_tx_queues = 0;
1065 adapter->num_rx_queues = 0;
1066 adapter->num_q_vectors = 0;
1067
1068 while (v_idx--) {
1069 igb_reset_q_vector(adapter, v_idx);
1070 igb_free_q_vector(adapter, v_idx);
1071 }
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1082{
1083 igb_free_q_vectors(adapter);
1084 igb_reset_interrupt_capability(adapter);
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1096{
1097 int err;
1098 int numvecs, i;
1099
1100 if (!msix)
1101 goto msi_only;
1102 adapter->flags |= IGB_FLAG_HAS_MSIX;
1103
1104
1105 adapter->num_rx_queues = adapter->rss_queues;
1106 if (adapter->vfs_allocated_count)
1107 adapter->num_tx_queues = 1;
1108 else
1109 adapter->num_tx_queues = adapter->rss_queues;
1110
1111
1112 numvecs = adapter->num_rx_queues;
1113
1114
1115 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1116 numvecs += adapter->num_tx_queues;
1117
1118
1119 adapter->num_q_vectors = numvecs;
1120
1121
1122 numvecs++;
1123 for (i = 0; i < numvecs; i++)
1124 adapter->msix_entries[i].entry = i;
1125
1126 err = pci_enable_msix_range(adapter->pdev,
1127 adapter->msix_entries,
1128 numvecs,
1129 numvecs);
1130 if (err > 0)
1131 return;
1132
1133 igb_reset_interrupt_capability(adapter);
1134
1135
1136msi_only:
1137 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1138#ifdef CONFIG_PCI_IOV
1139
1140 if (adapter->vf_data) {
1141 struct e1000_hw *hw = &adapter->hw;
1142
1143 pci_disable_sriov(adapter->pdev);
1144 msleep(500);
1145
1146 kfree(adapter->vf_mac_list);
1147 adapter->vf_mac_list = NULL;
1148 kfree(adapter->vf_data);
1149 adapter->vf_data = NULL;
1150 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1151 wrfl();
1152 msleep(100);
1153 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1154 }
1155#endif
1156 adapter->vfs_allocated_count = 0;
1157 adapter->rss_queues = 1;
1158 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1159 adapter->num_rx_queues = 1;
1160 adapter->num_tx_queues = 1;
1161 adapter->num_q_vectors = 1;
1162 if (!pci_enable_msi(adapter->pdev))
1163 adapter->flags |= IGB_FLAG_HAS_MSI;
1164}
1165
1166static void igb_add_ring(struct igb_ring *ring,
1167 struct igb_ring_container *head)
1168{
1169 head->ring = ring;
1170 head->count++;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static int igb_alloc_q_vector(struct igb_adapter *adapter,
1186 int v_count, int v_idx,
1187 int txr_count, int txr_idx,
1188 int rxr_count, int rxr_idx)
1189{
1190 struct igb_q_vector *q_vector;
1191 struct igb_ring *ring;
1192 int ring_count, size;
1193
1194
1195 if (txr_count > 1 || rxr_count > 1)
1196 return -ENOMEM;
1197
1198 ring_count = txr_count + rxr_count;
1199 size = sizeof(struct igb_q_vector) +
1200 (sizeof(struct igb_ring) * ring_count);
1201
1202
1203 q_vector = adapter->q_vector[v_idx];
1204 if (!q_vector) {
1205 q_vector = kzalloc(size, GFP_KERNEL);
1206 } else if (size > ksize(q_vector)) {
1207 kfree_rcu(q_vector, rcu);
1208 q_vector = kzalloc(size, GFP_KERNEL);
1209 } else {
1210 memset(q_vector, 0, size);
1211 }
1212 if (!q_vector)
1213 return -ENOMEM;
1214
1215
1216 netif_napi_add(adapter->netdev, &q_vector->napi,
1217 igb_poll, 64);
1218
1219
1220 adapter->q_vector[v_idx] = q_vector;
1221 q_vector->adapter = adapter;
1222
1223
1224 q_vector->tx.work_limit = adapter->tx_work_limit;
1225
1226
1227 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1228 q_vector->itr_val = IGB_START_ITR;
1229
1230
1231 ring = q_vector->ring;
1232
1233
1234 if (rxr_count) {
1235
1236 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1237 q_vector->itr_val = adapter->rx_itr_setting;
1238 } else {
1239
1240 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1241 q_vector->itr_val = adapter->tx_itr_setting;
1242 }
1243
1244 if (txr_count) {
1245
1246 ring->dev = &adapter->pdev->dev;
1247 ring->netdev = adapter->netdev;
1248
1249
1250 ring->q_vector = q_vector;
1251
1252
1253 igb_add_ring(ring, &q_vector->tx);
1254
1255
1256 if (adapter->hw.mac.type == e1000_82575)
1257 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1258
1259
1260 ring->count = adapter->tx_ring_count;
1261 ring->queue_index = txr_idx;
1262
1263 ring->cbs_enable = false;
1264 ring->idleslope = 0;
1265 ring->sendslope = 0;
1266 ring->hicredit = 0;
1267 ring->locredit = 0;
1268
1269 u64_stats_init(&ring->tx_syncp);
1270 u64_stats_init(&ring->tx_syncp2);
1271
1272
1273 adapter->tx_ring[txr_idx] = ring;
1274
1275
1276 ring++;
1277 }
1278
1279 if (rxr_count) {
1280
1281 ring->dev = &adapter->pdev->dev;
1282 ring->netdev = adapter->netdev;
1283
1284
1285 ring->q_vector = q_vector;
1286
1287
1288 igb_add_ring(ring, &q_vector->rx);
1289
1290
1291 if (adapter->hw.mac.type >= e1000_82576)
1292 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1293
1294
1295
1296
1297 if (adapter->hw.mac.type >= e1000_i350)
1298 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1299
1300
1301 ring->count = adapter->rx_ring_count;
1302 ring->queue_index = rxr_idx;
1303
1304 u64_stats_init(&ring->rx_syncp);
1305
1306
1307 adapter->rx_ring[rxr_idx] = ring;
1308 }
1309
1310 return 0;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1322{
1323 int q_vectors = adapter->num_q_vectors;
1324 int rxr_remaining = adapter->num_rx_queues;
1325 int txr_remaining = adapter->num_tx_queues;
1326 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1327 int err;
1328
1329 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1330 for (; rxr_remaining; v_idx++) {
1331 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1332 0, 0, 1, rxr_idx);
1333
1334 if (err)
1335 goto err_out;
1336
1337
1338 rxr_remaining--;
1339 rxr_idx++;
1340 }
1341 }
1342
1343 for (; v_idx < q_vectors; v_idx++) {
1344 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1345 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1346
1347 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1348 tqpv, txr_idx, rqpv, rxr_idx);
1349
1350 if (err)
1351 goto err_out;
1352
1353
1354 rxr_remaining -= rqpv;
1355 txr_remaining -= tqpv;
1356 rxr_idx++;
1357 txr_idx++;
1358 }
1359
1360 return 0;
1361
1362err_out:
1363 adapter->num_tx_queues = 0;
1364 adapter->num_rx_queues = 0;
1365 adapter->num_q_vectors = 0;
1366
1367 while (v_idx--)
1368 igb_free_q_vector(adapter, v_idx);
1369
1370 return -ENOMEM;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1381{
1382 struct pci_dev *pdev = adapter->pdev;
1383 int err;
1384
1385 igb_set_interrupt_capability(adapter, msix);
1386
1387 err = igb_alloc_q_vectors(adapter);
1388 if (err) {
1389 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1390 goto err_alloc_q_vectors;
1391 }
1392
1393 igb_cache_ring_register(adapter);
1394
1395 return 0;
1396
1397err_alloc_q_vectors:
1398 igb_reset_interrupt_capability(adapter);
1399 return err;
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409static int igb_request_irq(struct igb_adapter *adapter)
1410{
1411 struct net_device *netdev = adapter->netdev;
1412 struct pci_dev *pdev = adapter->pdev;
1413 int err = 0;
1414
1415 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1416 err = igb_request_msix(adapter);
1417 if (!err)
1418 goto request_done;
1419
1420 igb_free_all_tx_resources(adapter);
1421 igb_free_all_rx_resources(adapter);
1422
1423 igb_clear_interrupt_scheme(adapter);
1424 err = igb_init_interrupt_scheme(adapter, false);
1425 if (err)
1426 goto request_done;
1427
1428 igb_setup_all_tx_resources(adapter);
1429 igb_setup_all_rx_resources(adapter);
1430 igb_configure(adapter);
1431 }
1432
1433 igb_assign_vector(adapter->q_vector[0], 0);
1434
1435 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1436 err = request_irq(pdev->irq, igb_intr_msi, 0,
1437 netdev->name, adapter);
1438 if (!err)
1439 goto request_done;
1440
1441
1442 igb_reset_interrupt_capability(adapter);
1443 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1444 }
1445
1446 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1447 netdev->name, adapter);
1448
1449 if (err)
1450 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1451 err);
1452
1453request_done:
1454 return err;
1455}
1456
1457static void igb_free_irq(struct igb_adapter *adapter)
1458{
1459 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1460 int vector = 0, i;
1461
1462 free_irq(adapter->msix_entries[vector++].vector, adapter);
1463
1464 for (i = 0; i < adapter->num_q_vectors; i++)
1465 free_irq(adapter->msix_entries[vector++].vector,
1466 adapter->q_vector[i]);
1467 } else {
1468 free_irq(adapter->pdev->irq, adapter);
1469 }
1470}
1471
1472
1473
1474
1475
1476static void igb_irq_disable(struct igb_adapter *adapter)
1477{
1478 struct e1000_hw *hw = &adapter->hw;
1479
1480
1481
1482
1483
1484 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1485 u32 regval = rd32(E1000_EIAM);
1486
1487 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1488 wr32(E1000_EIMC, adapter->eims_enable_mask);
1489 regval = rd32(E1000_EIAC);
1490 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1491 }
1492
1493 wr32(E1000_IAM, 0);
1494 wr32(E1000_IMC, ~0);
1495 wrfl();
1496 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1497 int i;
1498
1499 for (i = 0; i < adapter->num_q_vectors; i++)
1500 synchronize_irq(adapter->msix_entries[i].vector);
1501 } else {
1502 synchronize_irq(adapter->pdev->irq);
1503 }
1504}
1505
1506
1507
1508
1509
1510static void igb_irq_enable(struct igb_adapter *adapter)
1511{
1512 struct e1000_hw *hw = &adapter->hw;
1513
1514 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1515 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1516 u32 regval = rd32(E1000_EIAC);
1517
1518 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1519 regval = rd32(E1000_EIAM);
1520 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1521 wr32(E1000_EIMS, adapter->eims_enable_mask);
1522 if (adapter->vfs_allocated_count) {
1523 wr32(E1000_MBVFIMR, 0xFF);
1524 ims |= E1000_IMS_VMMB;
1525 }
1526 wr32(E1000_IMS, ims);
1527 } else {
1528 wr32(E1000_IMS, IMS_ENABLE_MASK |
1529 E1000_IMS_DRSTA);
1530 wr32(E1000_IAM, IMS_ENABLE_MASK |
1531 E1000_IMS_DRSTA);
1532 }
1533}
1534
1535static void igb_update_mng_vlan(struct igb_adapter *adapter)
1536{
1537 struct e1000_hw *hw = &adapter->hw;
1538 u16 pf_id = adapter->vfs_allocated_count;
1539 u16 vid = adapter->hw.mng_cookie.vlan_id;
1540 u16 old_vid = adapter->mng_vlan_id;
1541
1542 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1543
1544 igb_vfta_set(hw, vid, pf_id, true, true);
1545 adapter->mng_vlan_id = vid;
1546 } else {
1547 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1548 }
1549
1550 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1551 (vid != old_vid) &&
1552 !test_bit(old_vid, adapter->active_vlans)) {
1553
1554 igb_vfta_set(hw, vid, pf_id, false, true);
1555 }
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566static void igb_release_hw_control(struct igb_adapter *adapter)
1567{
1568 struct e1000_hw *hw = &adapter->hw;
1569 u32 ctrl_ext;
1570
1571
1572 ctrl_ext = rd32(E1000_CTRL_EXT);
1573 wr32(E1000_CTRL_EXT,
1574 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585static void igb_get_hw_control(struct igb_adapter *adapter)
1586{
1587 struct e1000_hw *hw = &adapter->hw;
1588 u32 ctrl_ext;
1589
1590
1591 ctrl_ext = rd32(E1000_CTRL_EXT);
1592 wr32(E1000_CTRL_EXT,
1593 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1594}
1595
1596static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1597{
1598 struct net_device *netdev = adapter->netdev;
1599 struct e1000_hw *hw = &adapter->hw;
1600
1601 WARN_ON(hw->mac.type != e1000_i210);
1602
1603 if (enable)
1604 adapter->flags |= IGB_FLAG_FQTSS;
1605 else
1606 adapter->flags &= ~IGB_FLAG_FQTSS;
1607
1608 if (netif_running(netdev))
1609 schedule_work(&adapter->reset_task);
1610}
1611
1612static bool is_fqtss_enabled(struct igb_adapter *adapter)
1613{
1614 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1615}
1616
1617static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1618 enum tx_queue_prio prio)
1619{
1620 u32 val;
1621
1622 WARN_ON(hw->mac.type != e1000_i210);
1623 WARN_ON(queue < 0 || queue > 4);
1624
1625 val = rd32(E1000_I210_TXDCTL(queue));
1626
1627 if (prio == TX_QUEUE_PRIO_HIGH)
1628 val |= E1000_TXDCTL_PRIORITY;
1629 else
1630 val &= ~E1000_TXDCTL_PRIORITY;
1631
1632 wr32(E1000_I210_TXDCTL(queue), val);
1633}
1634
1635static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1636{
1637 u32 val;
1638
1639 WARN_ON(hw->mac.type != e1000_i210);
1640 WARN_ON(queue < 0 || queue > 1);
1641
1642 val = rd32(E1000_I210_TQAVCC(queue));
1643
1644 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1645 val |= E1000_TQAVCC_QUEUEMODE;
1646 else
1647 val &= ~E1000_TQAVCC_QUEUEMODE;
1648
1649 wr32(E1000_I210_TQAVCC(queue), val);
1650}
1651
1652static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1653{
1654 int i;
1655
1656 for (i = 0; i < adapter->num_tx_queues; i++) {
1657 if (adapter->tx_ring[i]->cbs_enable)
1658 return true;
1659 }
1660
1661 return false;
1662}
1663
1664static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1665{
1666 int i;
1667
1668 for (i = 0; i < adapter->num_tx_queues; i++) {
1669 if (adapter->tx_ring[i]->launchtime_enable)
1670 return true;
1671 }
1672
1673 return false;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1687{
1688 struct igb_ring *ring = adapter->tx_ring[queue];
1689 struct net_device *netdev = adapter->netdev;
1690 struct e1000_hw *hw = &adapter->hw;
1691 u32 tqavcc, tqavctrl;
1692 u16 value;
1693
1694 WARN_ON(hw->mac.type != e1000_i210);
1695 WARN_ON(queue < 0 || queue > 1);
1696
1697
1698
1699
1700
1701 if (ring->cbs_enable || ring->launchtime_enable) {
1702 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1703 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1704 } else {
1705 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1706 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1707 }
1708
1709
1710 if (ring->cbs_enable || queue == 0) {
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 if (queue == 0 && !ring->cbs_enable) {
1721
1722 ring->idleslope = 1000000;
1723 ring->hicredit = ETH_FRAME_LEN;
1724 }
1725
1726
1727
1728
1729
1730 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1731 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1732 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1792
1793 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1794 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1795 tqavcc |= value;
1796 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1797
1798 wr32(E1000_I210_TQAVHC(queue),
1799 0x80000000 + ring->hicredit * 0x7735);
1800 } else {
1801
1802
1803 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1804 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1805 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1806
1807
1808 wr32(E1000_I210_TQAVHC(queue), 0);
1809
1810
1811
1812
1813
1814 if (!is_any_cbs_enabled(adapter)) {
1815 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1816 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1817 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1818 }
1819 }
1820
1821
1822 if (ring->launchtime_enable) {
1823
1824
1825
1826
1827
1828
1829
1830
1831 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1832 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1833 E1000_TQAVCTRL_FETCHTIME_DELTA;
1834 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1835 } else {
1836
1837
1838
1839
1840 if (!is_any_txtime_enabled(adapter)) {
1841 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1842 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1843 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1844 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1845 }
1846 }
1847
1848
1849
1850
1851
1852
1853 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1854 ring->cbs_enable ? "enabled" : "disabled",
1855 ring->launchtime_enable ? "enabled" : "disabled",
1856 queue,
1857 ring->idleslope, ring->sendslope,
1858 ring->hicredit, ring->locredit);
1859}
1860
1861static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1862 bool enable)
1863{
1864 struct igb_ring *ring;
1865
1866 if (queue < 0 || queue > adapter->num_tx_queues)
1867 return -EINVAL;
1868
1869 ring = adapter->tx_ring[queue];
1870 ring->launchtime_enable = enable;
1871
1872 return 0;
1873}
1874
1875static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1876 bool enable, int idleslope, int sendslope,
1877 int hicredit, int locredit)
1878{
1879 struct igb_ring *ring;
1880
1881 if (queue < 0 || queue > adapter->num_tx_queues)
1882 return -EINVAL;
1883
1884 ring = adapter->tx_ring[queue];
1885
1886 ring->cbs_enable = enable;
1887 ring->idleslope = idleslope;
1888 ring->sendslope = sendslope;
1889 ring->hicredit = hicredit;
1890 ring->locredit = locredit;
1891
1892 return 0;
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904static void igb_setup_tx_mode(struct igb_adapter *adapter)
1905{
1906 struct net_device *netdev = adapter->netdev;
1907 struct e1000_hw *hw = &adapter->hw;
1908 u32 val;
1909
1910
1911 if (hw->mac.type != e1000_i210)
1912 return;
1913
1914 if (is_fqtss_enabled(adapter)) {
1915 int i, max_queue;
1916
1917
1918
1919
1920
1921 val = rd32(E1000_I210_TQAVCTRL);
1922 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1923 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1924 wr32(E1000_I210_TQAVCTRL, val);
1925
1926
1927
1928
1929 val = rd32(E1000_TXPBS);
1930 val &= ~I210_TXPBSIZE_MASK;
1931 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1932 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1933 wr32(E1000_TXPBS, val);
1934
1935 val = rd32(E1000_RXPBS);
1936 val &= ~I210_RXPBSIZE_MASK;
1937 val |= I210_RXPBSIZE_PB_30KB;
1938 wr32(E1000_RXPBS, val);
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951 val = (4096 - 1) / 64;
1952 wr32(E1000_I210_DTXMXPKTSZ, val);
1953
1954
1955
1956
1957
1958
1959 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1960 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1961
1962 for (i = 0; i < max_queue; i++) {
1963 igb_config_tx_modes(adapter, i);
1964 }
1965 } else {
1966 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1967 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1968 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1969
1970 val = rd32(E1000_I210_TQAVCTRL);
1971
1972
1973
1974
1975 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1976 wr32(E1000_I210_TQAVCTRL, val);
1977 }
1978
1979 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1980 "enabled" : "disabled");
1981}
1982
1983
1984
1985
1986
1987static void igb_configure(struct igb_adapter *adapter)
1988{
1989 struct net_device *netdev = adapter->netdev;
1990 int i;
1991
1992 igb_get_hw_control(adapter);
1993 igb_set_rx_mode(netdev);
1994 igb_setup_tx_mode(adapter);
1995
1996 igb_restore_vlan(adapter);
1997
1998 igb_setup_tctl(adapter);
1999 igb_setup_mrqc(adapter);
2000 igb_setup_rctl(adapter);
2001
2002 igb_nfc_filter_restore(adapter);
2003 igb_configure_tx(adapter);
2004 igb_configure_rx(adapter);
2005
2006 igb_rx_fifo_flush_82575(&adapter->hw);
2007
2008
2009
2010
2011
2012 for (i = 0; i < adapter->num_rx_queues; i++) {
2013 struct igb_ring *ring = adapter->rx_ring[i];
2014 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2015 }
2016}
2017
2018
2019
2020
2021
2022void igb_power_up_link(struct igb_adapter *adapter)
2023{
2024 igb_reset_phy(&adapter->hw);
2025
2026 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2027 igb_power_up_phy_copper(&adapter->hw);
2028 else
2029 igb_power_up_serdes_link_82575(&adapter->hw);
2030
2031 igb_setup_link(&adapter->hw);
2032}
2033
2034
2035
2036
2037
2038static void igb_power_down_link(struct igb_adapter *adapter)
2039{
2040 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2041 igb_power_down_phy_copper_82575(&adapter->hw);
2042 else
2043 igb_shutdown_serdes_link_82575(&adapter->hw);
2044}
2045
2046
2047
2048
2049
2050static void igb_check_swap_media(struct igb_adapter *adapter)
2051{
2052 struct e1000_hw *hw = &adapter->hw;
2053 u32 ctrl_ext, connsw;
2054 bool swap_now = false;
2055
2056 ctrl_ext = rd32(E1000_CTRL_EXT);
2057 connsw = rd32(E1000_CONNSW);
2058
2059
2060
2061
2062
2063 if ((hw->phy.media_type == e1000_media_type_copper) &&
2064 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2065 swap_now = true;
2066 } else if (!(connsw & E1000_CONNSW_SERDESD)) {
2067
2068 if (adapter->copper_tries < 4) {
2069 adapter->copper_tries++;
2070 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2071 wr32(E1000_CONNSW, connsw);
2072 return;
2073 } else {
2074 adapter->copper_tries = 0;
2075 if ((connsw & E1000_CONNSW_PHYSD) &&
2076 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2077 swap_now = true;
2078 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2079 wr32(E1000_CONNSW, connsw);
2080 }
2081 }
2082 }
2083
2084 if (!swap_now)
2085 return;
2086
2087 switch (hw->phy.media_type) {
2088 case e1000_media_type_copper:
2089 netdev_info(adapter->netdev,
2090 "MAS: changing media to fiber/serdes\n");
2091 ctrl_ext |=
2092 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2093 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2094 adapter->copper_tries = 0;
2095 break;
2096 case e1000_media_type_internal_serdes:
2097 case e1000_media_type_fiber:
2098 netdev_info(adapter->netdev,
2099 "MAS: changing media to copper\n");
2100 ctrl_ext &=
2101 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2102 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2103 break;
2104 default:
2105
2106 netdev_err(adapter->netdev,
2107 "AMS: Invalid media type found, returning\n");
2108 break;
2109 }
2110 wr32(E1000_CTRL_EXT, ctrl_ext);
2111}
2112
2113
2114
2115
2116
2117int igb_up(struct igb_adapter *adapter)
2118{
2119 struct e1000_hw *hw = &adapter->hw;
2120 int i;
2121
2122
2123 igb_configure(adapter);
2124
2125 clear_bit(__IGB_DOWN, &adapter->state);
2126
2127 for (i = 0; i < adapter->num_q_vectors; i++)
2128 napi_enable(&(adapter->q_vector[i]->napi));
2129
2130 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2131 igb_configure_msix(adapter);
2132 else
2133 igb_assign_vector(adapter->q_vector[0], 0);
2134
2135
2136 rd32(E1000_TSICR);
2137 rd32(E1000_ICR);
2138 igb_irq_enable(adapter);
2139
2140
2141 if (adapter->vfs_allocated_count) {
2142 u32 reg_data = rd32(E1000_CTRL_EXT);
2143
2144 reg_data |= E1000_CTRL_EXT_PFRSTD;
2145 wr32(E1000_CTRL_EXT, reg_data);
2146 }
2147
2148 netif_tx_start_all_queues(adapter->netdev);
2149
2150
2151 hw->mac.get_link_status = 1;
2152 schedule_work(&adapter->watchdog_task);
2153
2154 if ((adapter->flags & IGB_FLAG_EEE) &&
2155 (!hw->dev_spec._82575.eee_disable))
2156 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2157
2158 return 0;
2159}
2160
2161void igb_down(struct igb_adapter *adapter)
2162{
2163 struct net_device *netdev = adapter->netdev;
2164 struct e1000_hw *hw = &adapter->hw;
2165 u32 tctl, rctl;
2166 int i;
2167
2168
2169
2170
2171 set_bit(__IGB_DOWN, &adapter->state);
2172
2173
2174 rctl = rd32(E1000_RCTL);
2175 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2176
2177
2178 igb_nfc_filter_exit(adapter);
2179
2180 netif_carrier_off(netdev);
2181 netif_tx_stop_all_queues(netdev);
2182
2183
2184 tctl = rd32(E1000_TCTL);
2185 tctl &= ~E1000_TCTL_EN;
2186 wr32(E1000_TCTL, tctl);
2187
2188 wrfl();
2189 usleep_range(10000, 11000);
2190
2191 igb_irq_disable(adapter);
2192
2193 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2194
2195 for (i = 0; i < adapter->num_q_vectors; i++) {
2196 if (adapter->q_vector[i]) {
2197 napi_synchronize(&adapter->q_vector[i]->napi);
2198 napi_disable(&adapter->q_vector[i]->napi);
2199 }
2200 }
2201
2202 del_timer_sync(&adapter->watchdog_timer);
2203 del_timer_sync(&adapter->phy_info_timer);
2204
2205
2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter);
2208 spin_unlock(&adapter->stats64_lock);
2209
2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0;
2212
2213 if (!pci_channel_offline(adapter->pdev))
2214 igb_reset(adapter);
2215
2216
2217 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2218
2219 igb_clean_all_tx_rings(adapter);
2220 igb_clean_all_rx_rings(adapter);
2221#ifdef CONFIG_IGB_DCA
2222
2223
2224 igb_setup_dca(adapter);
2225#endif
2226}
2227
2228void igb_reinit_locked(struct igb_adapter *adapter)
2229{
2230 WARN_ON(in_interrupt());
2231 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2232 usleep_range(1000, 2000);
2233 igb_down(adapter);
2234 igb_up(adapter);
2235 clear_bit(__IGB_RESETTING, &adapter->state);
2236}
2237
2238
2239
2240
2241
2242static void igb_enable_mas(struct igb_adapter *adapter)
2243{
2244 struct e1000_hw *hw = &adapter->hw;
2245 u32 connsw = rd32(E1000_CONNSW);
2246
2247
2248 if ((hw->phy.media_type == e1000_media_type_copper) &&
2249 (!(connsw & E1000_CONNSW_SERDESD))) {
2250 connsw |= E1000_CONNSW_ENRGSRC;
2251 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2252 wr32(E1000_CONNSW, connsw);
2253 wrfl();
2254 }
2255}
2256
2257void igb_reset(struct igb_adapter *adapter)
2258{
2259 struct pci_dev *pdev = adapter->pdev;
2260 struct e1000_hw *hw = &adapter->hw;
2261 struct e1000_mac_info *mac = &hw->mac;
2262 struct e1000_fc_info *fc = &hw->fc;
2263 u32 pba, hwm;
2264
2265
2266
2267
2268 switch (mac->type) {
2269 case e1000_i350:
2270 case e1000_i354:
2271 case e1000_82580:
2272 pba = rd32(E1000_RXPBS);
2273 pba = igb_rxpbs_adjust_82580(pba);
2274 break;
2275 case e1000_82576:
2276 pba = rd32(E1000_RXPBS);
2277 pba &= E1000_RXPBS_SIZE_MASK_82576;
2278 break;
2279 case e1000_82575:
2280 case e1000_i210:
2281 case e1000_i211:
2282 default:
2283 pba = E1000_PBA_34K;
2284 break;
2285 }
2286
2287 if (mac->type == e1000_82575) {
2288 u32 min_rx_space, min_tx_space, needed_tx_space;
2289
2290
2291 wr32(E1000_PBA, pba);
2292
2293
2294
2295
2296
2297
2298
2299
2300 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2301
2302
2303
2304
2305
2306
2307 min_tx_space = adapter->max_frame_size;
2308 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2309 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2310
2311
2312 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2313
2314
2315
2316
2317
2318 if (needed_tx_space < pba) {
2319 pba -= needed_tx_space;
2320
2321
2322
2323
2324 if (pba < min_rx_space)
2325 pba = min_rx_space;
2326 }
2327
2328
2329 wr32(E1000_PBA, pba);
2330 }
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2341
2342 fc->high_water = hwm & 0xFFFFFFF0;
2343 fc->low_water = fc->high_water - 16;
2344 fc->pause_time = 0xFFFF;
2345 fc->send_xon = 1;
2346 fc->current_mode = fc->requested_mode;
2347
2348
2349 if (adapter->vfs_allocated_count) {
2350 int i;
2351
2352 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2353 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2354
2355
2356 igb_ping_all_vfs(adapter);
2357
2358
2359 wr32(E1000_VFRE, 0);
2360 wr32(E1000_VFTE, 0);
2361 }
2362
2363
2364 hw->mac.ops.reset_hw(hw);
2365 wr32(E1000_WUC, 0);
2366
2367 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2368
2369 adapter->ei.get_invariants(hw);
2370 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2371 }
2372 if ((mac->type == e1000_82575) &&
2373 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2374 igb_enable_mas(adapter);
2375 }
2376 if (hw->mac.ops.init_hw(hw))
2377 dev_err(&pdev->dev, "Hardware Error\n");
2378
2379
2380 igb_flush_mac_table(adapter);
2381 __dev_uc_unsync(adapter->netdev, NULL);
2382
2383
2384 igb_set_default_mac_filter(adapter);
2385
2386
2387
2388
2389 if (!hw->mac.autoneg)
2390 igb_force_mac_fc(hw);
2391
2392 igb_init_dmac(adapter, pba);
2393#ifdef CONFIG_IGB_HWMON
2394
2395 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2396 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2397
2398
2399
2400 if (adapter->ets)
2401 mac->ops.init_thermal_sensor_thresh(hw);
2402 }
2403 }
2404#endif
2405
2406 if (hw->phy.media_type == e1000_media_type_copper) {
2407 switch (mac->type) {
2408 case e1000_i350:
2409 case e1000_i210:
2410 case e1000_i211:
2411 igb_set_eee_i350(hw, true, true);
2412 break;
2413 case e1000_i354:
2414 igb_set_eee_i354(hw, true, true);
2415 break;
2416 default:
2417 break;
2418 }
2419 }
2420 if (!netif_running(adapter->netdev))
2421 igb_power_down_link(adapter);
2422
2423 igb_update_mng_vlan(adapter);
2424
2425
2426 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2427
2428
2429 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2430 igb_ptp_reset(adapter);
2431
2432 igb_get_phy_info(hw);
2433}
2434
2435static netdev_features_t igb_fix_features(struct net_device *netdev,
2436 netdev_features_t features)
2437{
2438
2439
2440
2441 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2442 features |= NETIF_F_HW_VLAN_CTAG_TX;
2443 else
2444 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2445
2446 return features;
2447}
2448
2449static int igb_set_features(struct net_device *netdev,
2450 netdev_features_t features)
2451{
2452 netdev_features_t changed = netdev->features ^ features;
2453 struct igb_adapter *adapter = netdev_priv(netdev);
2454
2455 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2456 igb_vlan_mode(netdev, features);
2457
2458 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2459 return 0;
2460
2461 if (!(features & NETIF_F_NTUPLE)) {
2462 struct hlist_node *node2;
2463 struct igb_nfc_filter *rule;
2464
2465 spin_lock(&adapter->nfc_lock);
2466 hlist_for_each_entry_safe(rule, node2,
2467 &adapter->nfc_filter_list, nfc_node) {
2468 igb_erase_filter(adapter, rule);
2469 hlist_del(&rule->nfc_node);
2470 kfree(rule);
2471 }
2472 spin_unlock(&adapter->nfc_lock);
2473 adapter->nfc_filter_count = 0;
2474 }
2475
2476 netdev->features = features;
2477
2478 if (netif_running(netdev))
2479 igb_reinit_locked(adapter);
2480 else
2481 igb_reset(adapter);
2482
2483 return 0;
2484}
2485
2486static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2487 struct net_device *dev,
2488 const unsigned char *addr, u16 vid,
2489 u16 flags)
2490{
2491
2492 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2493 struct igb_adapter *adapter = netdev_priv(dev);
2494 int vfn = adapter->vfs_allocated_count;
2495
2496 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2497 return -ENOMEM;
2498 }
2499
2500 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2501}
2502
2503#define IGB_MAX_MAC_HDR_LEN 127
2504#define IGB_MAX_NETWORK_HDR_LEN 511
2505
2506static netdev_features_t
2507igb_features_check(struct sk_buff *skb, struct net_device *dev,
2508 netdev_features_t features)
2509{
2510 unsigned int network_hdr_len, mac_hdr_len;
2511
2512
2513 mac_hdr_len = skb_network_header(skb) - skb->data;
2514 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2515 return features & ~(NETIF_F_HW_CSUM |
2516 NETIF_F_SCTP_CRC |
2517 NETIF_F_HW_VLAN_CTAG_TX |
2518 NETIF_F_TSO |
2519 NETIF_F_TSO6);
2520
2521 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2522 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2523 return features & ~(NETIF_F_HW_CSUM |
2524 NETIF_F_SCTP_CRC |
2525 NETIF_F_TSO |
2526 NETIF_F_TSO6);
2527
2528
2529
2530
2531 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2532 features &= ~NETIF_F_TSO;
2533
2534 return features;
2535}
2536
2537static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2538{
2539 if (!is_fqtss_enabled(adapter)) {
2540 enable_fqtss(adapter, true);
2541 return;
2542 }
2543
2544 igb_config_tx_modes(adapter, queue);
2545
2546 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2547 enable_fqtss(adapter, false);
2548}
2549
2550static int igb_offload_cbs(struct igb_adapter *adapter,
2551 struct tc_cbs_qopt_offload *qopt)
2552{
2553 struct e1000_hw *hw = &adapter->hw;
2554 int err;
2555
2556
2557 if (hw->mac.type != e1000_i210)
2558 return -EOPNOTSUPP;
2559
2560
2561 if (qopt->queue < 0 || qopt->queue > 1)
2562 return -EINVAL;
2563
2564 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2565 qopt->idleslope, qopt->sendslope,
2566 qopt->hicredit, qopt->locredit);
2567 if (err)
2568 return err;
2569
2570 igb_offload_apply(adapter, qopt->queue);
2571
2572 return 0;
2573}
2574
2575#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2576#define VLAN_PRIO_FULL_MASK (0x07)
2577
2578static int igb_parse_cls_flower(struct igb_adapter *adapter,
2579 struct tc_cls_flower_offload *f,
2580 int traffic_class,
2581 struct igb_nfc_filter *input)
2582{
2583 struct netlink_ext_ack *extack = f->common.extack;
2584
2585 if (f->dissector->used_keys &
2586 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2587 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2588 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2589 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2590 NL_SET_ERR_MSG_MOD(extack,
2591 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2592 return -EOPNOTSUPP;
2593 }
2594
2595 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2596 struct flow_dissector_key_eth_addrs *key, *mask;
2597
2598 key = skb_flow_dissector_target(f->dissector,
2599 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2600 f->key);
2601 mask = skb_flow_dissector_target(f->dissector,
2602 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2603 f->mask);
2604
2605 if (!is_zero_ether_addr(mask->dst)) {
2606 if (!is_broadcast_ether_addr(mask->dst)) {
2607 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2608 return -EINVAL;
2609 }
2610
2611 input->filter.match_flags |=
2612 IGB_FILTER_FLAG_DST_MAC_ADDR;
2613 ether_addr_copy(input->filter.dst_addr, key->dst);
2614 }
2615
2616 if (!is_zero_ether_addr(mask->src)) {
2617 if (!is_broadcast_ether_addr(mask->src)) {
2618 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2619 return -EINVAL;
2620 }
2621
2622 input->filter.match_flags |=
2623 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2624 ether_addr_copy(input->filter.src_addr, key->src);
2625 }
2626 }
2627
2628 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2629 struct flow_dissector_key_basic *key, *mask;
2630
2631 key = skb_flow_dissector_target(f->dissector,
2632 FLOW_DISSECTOR_KEY_BASIC,
2633 f->key);
2634 mask = skb_flow_dissector_target(f->dissector,
2635 FLOW_DISSECTOR_KEY_BASIC,
2636 f->mask);
2637
2638 if (mask->n_proto) {
2639 if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
2640 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2641 return -EINVAL;
2642 }
2643
2644 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2645 input->filter.etype = key->n_proto;
2646 }
2647 }
2648
2649 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2650 struct flow_dissector_key_vlan *key, *mask;
2651
2652 key = skb_flow_dissector_target(f->dissector,
2653 FLOW_DISSECTOR_KEY_VLAN,
2654 f->key);
2655 mask = skb_flow_dissector_target(f->dissector,
2656 FLOW_DISSECTOR_KEY_VLAN,
2657 f->mask);
2658
2659 if (mask->vlan_priority) {
2660 if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2661 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2662 return -EINVAL;
2663 }
2664
2665 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2666 input->filter.vlan_tci = key->vlan_priority;
2667 }
2668 }
2669
2670 input->action = traffic_class;
2671 input->cookie = f->cookie;
2672
2673 return 0;
2674}
2675
2676static int igb_configure_clsflower(struct igb_adapter *adapter,
2677 struct tc_cls_flower_offload *cls_flower)
2678{
2679 struct netlink_ext_ack *extack = cls_flower->common.extack;
2680 struct igb_nfc_filter *filter, *f;
2681 int err, tc;
2682
2683 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2684 if (tc < 0) {
2685 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2686 return -EINVAL;
2687 }
2688
2689 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2690 if (!filter)
2691 return -ENOMEM;
2692
2693 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2694 if (err < 0)
2695 goto err_parse;
2696
2697 spin_lock(&adapter->nfc_lock);
2698
2699 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2700 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2701 err = -EEXIST;
2702 NL_SET_ERR_MSG_MOD(extack,
2703 "This filter is already set in ethtool");
2704 goto err_locked;
2705 }
2706 }
2707
2708 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2709 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2710 err = -EEXIST;
2711 NL_SET_ERR_MSG_MOD(extack,
2712 "This filter is already set in cls_flower");
2713 goto err_locked;
2714 }
2715 }
2716
2717 err = igb_add_filter(adapter, filter);
2718 if (err < 0) {
2719 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2720 goto err_locked;
2721 }
2722
2723 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2724
2725 spin_unlock(&adapter->nfc_lock);
2726
2727 return 0;
2728
2729err_locked:
2730 spin_unlock(&adapter->nfc_lock);
2731
2732err_parse:
2733 kfree(filter);
2734
2735 return err;
2736}
2737
2738static int igb_delete_clsflower(struct igb_adapter *adapter,
2739 struct tc_cls_flower_offload *cls_flower)
2740{
2741 struct igb_nfc_filter *filter;
2742 int err;
2743
2744 spin_lock(&adapter->nfc_lock);
2745
2746 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2747 if (filter->cookie == cls_flower->cookie)
2748 break;
2749
2750 if (!filter) {
2751 err = -ENOENT;
2752 goto out;
2753 }
2754
2755 err = igb_erase_filter(adapter, filter);
2756 if (err < 0)
2757 goto out;
2758
2759 hlist_del(&filter->nfc_node);
2760 kfree(filter);
2761
2762out:
2763 spin_unlock(&adapter->nfc_lock);
2764
2765 return err;
2766}
2767
2768static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2769 struct tc_cls_flower_offload *cls_flower)
2770{
2771 switch (cls_flower->command) {
2772 case TC_CLSFLOWER_REPLACE:
2773 return igb_configure_clsflower(adapter, cls_flower);
2774 case TC_CLSFLOWER_DESTROY:
2775 return igb_delete_clsflower(adapter, cls_flower);
2776 case TC_CLSFLOWER_STATS:
2777 return -EOPNOTSUPP;
2778 default:
2779 return -EOPNOTSUPP;
2780 }
2781}
2782
2783static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2784 void *cb_priv)
2785{
2786 struct igb_adapter *adapter = cb_priv;
2787
2788 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2789 return -EOPNOTSUPP;
2790
2791 switch (type) {
2792 case TC_SETUP_CLSFLOWER:
2793 return igb_setup_tc_cls_flower(adapter, type_data);
2794
2795 default:
2796 return -EOPNOTSUPP;
2797 }
2798}
2799
2800static int igb_setup_tc_block(struct igb_adapter *adapter,
2801 struct tc_block_offload *f)
2802{
2803 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2804 return -EOPNOTSUPP;
2805
2806 switch (f->command) {
2807 case TC_BLOCK_BIND:
2808 return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
2809 adapter, adapter, f->extack);
2810 case TC_BLOCK_UNBIND:
2811 tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
2812 adapter);
2813 return 0;
2814 default:
2815 return -EOPNOTSUPP;
2816 }
2817}
2818
2819static int igb_offload_txtime(struct igb_adapter *adapter,
2820 struct tc_etf_qopt_offload *qopt)
2821{
2822 struct e1000_hw *hw = &adapter->hw;
2823 int err;
2824
2825
2826 if (hw->mac.type != e1000_i210)
2827 return -EOPNOTSUPP;
2828
2829
2830 if (qopt->queue < 0 || qopt->queue > 1)
2831 return -EINVAL;
2832
2833 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2834 if (err)
2835 return err;
2836
2837 igb_offload_apply(adapter, qopt->queue);
2838
2839 return 0;
2840}
2841
2842static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2843 void *type_data)
2844{
2845 struct igb_adapter *adapter = netdev_priv(dev);
2846
2847 switch (type) {
2848 case TC_SETUP_QDISC_CBS:
2849 return igb_offload_cbs(adapter, type_data);
2850 case TC_SETUP_BLOCK:
2851 return igb_setup_tc_block(adapter, type_data);
2852 case TC_SETUP_QDISC_ETF:
2853 return igb_offload_txtime(adapter, type_data);
2854
2855 default:
2856 return -EOPNOTSUPP;
2857 }
2858}
2859
2860static const struct net_device_ops igb_netdev_ops = {
2861 .ndo_open = igb_open,
2862 .ndo_stop = igb_close,
2863 .ndo_start_xmit = igb_xmit_frame,
2864 .ndo_get_stats64 = igb_get_stats64,
2865 .ndo_set_rx_mode = igb_set_rx_mode,
2866 .ndo_set_mac_address = igb_set_mac,
2867 .ndo_change_mtu = igb_change_mtu,
2868 .ndo_do_ioctl = igb_ioctl,
2869 .ndo_tx_timeout = igb_tx_timeout,
2870 .ndo_validate_addr = eth_validate_addr,
2871 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2872 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2873 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2874 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2875 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2876 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2877 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2878 .ndo_get_vf_config = igb_ndo_get_vf_config,
2879 .ndo_fix_features = igb_fix_features,
2880 .ndo_set_features = igb_set_features,
2881 .ndo_fdb_add = igb_ndo_fdb_add,
2882 .ndo_features_check = igb_features_check,
2883 .ndo_setup_tc = igb_setup_tc,
2884};
2885
2886
2887
2888
2889
2890void igb_set_fw_version(struct igb_adapter *adapter)
2891{
2892 struct e1000_hw *hw = &adapter->hw;
2893 struct e1000_fw_version fw;
2894
2895 igb_get_fw_version(hw, &fw);
2896
2897 switch (hw->mac.type) {
2898 case e1000_i210:
2899 case e1000_i211:
2900 if (!(igb_get_flash_presence_i210(hw))) {
2901 snprintf(adapter->fw_version,
2902 sizeof(adapter->fw_version),
2903 "%2d.%2d-%d",
2904 fw.invm_major, fw.invm_minor,
2905 fw.invm_img_type);
2906 break;
2907 }
2908
2909 default:
2910
2911 if (fw.or_valid) {
2912 snprintf(adapter->fw_version,
2913 sizeof(adapter->fw_version),
2914 "%d.%d, 0x%08x, %d.%d.%d",
2915 fw.eep_major, fw.eep_minor, fw.etrack_id,
2916 fw.or_major, fw.or_build, fw.or_patch);
2917
2918 } else if (fw.etrack_id != 0X0000) {
2919 snprintf(adapter->fw_version,
2920 sizeof(adapter->fw_version),
2921 "%d.%d, 0x%08x",
2922 fw.eep_major, fw.eep_minor, fw.etrack_id);
2923 } else {
2924 snprintf(adapter->fw_version,
2925 sizeof(adapter->fw_version),
2926 "%d.%d.%d",
2927 fw.eep_major, fw.eep_minor, fw.eep_build);
2928 }
2929 break;
2930 }
2931}
2932
2933
2934
2935
2936
2937
2938static void igb_init_mas(struct igb_adapter *adapter)
2939{
2940 struct e1000_hw *hw = &adapter->hw;
2941 u16 eeprom_data;
2942
2943 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2944 switch (hw->bus.func) {
2945 case E1000_FUNC_0:
2946 if (eeprom_data & IGB_MAS_ENABLE_0) {
2947 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2948 netdev_info(adapter->netdev,
2949 "MAS: Enabling Media Autosense for port %d\n",
2950 hw->bus.func);
2951 }
2952 break;
2953 case E1000_FUNC_1:
2954 if (eeprom_data & IGB_MAS_ENABLE_1) {
2955 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2956 netdev_info(adapter->netdev,
2957 "MAS: Enabling Media Autosense for port %d\n",
2958 hw->bus.func);
2959 }
2960 break;
2961 case E1000_FUNC_2:
2962 if (eeprom_data & IGB_MAS_ENABLE_2) {
2963 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2964 netdev_info(adapter->netdev,
2965 "MAS: Enabling Media Autosense for port %d\n",
2966 hw->bus.func);
2967 }
2968 break;
2969 case E1000_FUNC_3:
2970 if (eeprom_data & IGB_MAS_ENABLE_3) {
2971 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2972 netdev_info(adapter->netdev,
2973 "MAS: Enabling Media Autosense for port %d\n",
2974 hw->bus.func);
2975 }
2976 break;
2977 default:
2978
2979 netdev_err(adapter->netdev,
2980 "MAS: Invalid port configuration, returning\n");
2981 break;
2982 }
2983}
2984
2985
2986
2987
2988
2989static s32 igb_init_i2c(struct igb_adapter *adapter)
2990{
2991 s32 status = 0;
2992
2993
2994 if (adapter->hw.mac.type != e1000_i350)
2995 return 0;
2996
2997
2998
2999
3000
3001 adapter->i2c_adap.owner = THIS_MODULE;
3002 adapter->i2c_algo = igb_i2c_algo;
3003 adapter->i2c_algo.data = adapter;
3004 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
3005 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
3006 strlcpy(adapter->i2c_adap.name, "igb BB",
3007 sizeof(adapter->i2c_adap.name));
3008 status = i2c_bit_add_bus(&adapter->i2c_adap);
3009 return status;
3010}
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3024{
3025 struct net_device *netdev;
3026 struct igb_adapter *adapter;
3027 struct e1000_hw *hw;
3028 u16 eeprom_data = 0;
3029 s32 ret_val;
3030 static int global_quad_port_a;
3031 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3032 int err, pci_using_dac;
3033 u8 part_str[E1000_PBANUM_LENGTH];
3034
3035
3036
3037
3038 if (pdev->is_virtfn) {
3039 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
3040 pci_name(pdev), pdev->vendor, pdev->device);
3041 return -EINVAL;
3042 }
3043
3044 err = pci_enable_device_mem(pdev);
3045 if (err)
3046 return err;
3047
3048 pci_using_dac = 0;
3049 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3050 if (!err) {
3051 pci_using_dac = 1;
3052 } else {
3053 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3054 if (err) {
3055 dev_err(&pdev->dev,
3056 "No usable DMA configuration, aborting\n");
3057 goto err_dma;
3058 }
3059 }
3060
3061 err = pci_request_mem_regions(pdev, igb_driver_name);
3062 if (err)
3063 goto err_pci_reg;
3064
3065 pci_enable_pcie_error_reporting(pdev);
3066
3067 pci_set_master(pdev);
3068 pci_save_state(pdev);
3069
3070 err = -ENOMEM;
3071 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3072 IGB_MAX_TX_QUEUES);
3073 if (!netdev)
3074 goto err_alloc_etherdev;
3075
3076 SET_NETDEV_DEV(netdev, &pdev->dev);
3077
3078 pci_set_drvdata(pdev, netdev);
3079 adapter = netdev_priv(netdev);
3080 adapter->netdev = netdev;
3081 adapter->pdev = pdev;
3082 hw = &adapter->hw;
3083 hw->back = adapter;
3084 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3085
3086 err = -EIO;
3087 adapter->io_addr = pci_iomap(pdev, 0, 0);
3088 if (!adapter->io_addr)
3089 goto err_ioremap;
3090
3091 hw->hw_addr = adapter->io_addr;
3092
3093 netdev->netdev_ops = &igb_netdev_ops;
3094 igb_set_ethtool_ops(netdev);
3095 netdev->watchdog_timeo = 5 * HZ;
3096
3097 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3098
3099 netdev->mem_start = pci_resource_start(pdev, 0);
3100 netdev->mem_end = pci_resource_end(pdev, 0);
3101
3102
3103 hw->vendor_id = pdev->vendor;
3104 hw->device_id = pdev->device;
3105 hw->revision_id = pdev->revision;
3106 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3107 hw->subsystem_device_id = pdev->subsystem_device;
3108
3109
3110 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3111 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3112 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3113
3114 err = ei->get_invariants(hw);
3115 if (err)
3116 goto err_sw_init;
3117
3118
3119 err = igb_sw_init(adapter);
3120 if (err)
3121 goto err_sw_init;
3122
3123 igb_get_bus_info_pcie(hw);
3124
3125 hw->phy.autoneg_wait_to_complete = false;
3126
3127
3128 if (hw->phy.media_type == e1000_media_type_copper) {
3129 hw->phy.mdix = AUTO_ALL_MODES;
3130 hw->phy.disable_polarity_correction = false;
3131 hw->phy.ms_type = e1000_ms_hw_default;
3132 }
3133
3134 if (igb_check_reset_block(hw))
3135 dev_info(&pdev->dev,
3136 "PHY reset is blocked due to SOL/IDER session.\n");
3137
3138
3139
3140
3141
3142 netdev->features |= NETIF_F_SG |
3143 NETIF_F_TSO |
3144 NETIF_F_TSO6 |
3145 NETIF_F_RXHASH |
3146 NETIF_F_RXCSUM |
3147 NETIF_F_HW_CSUM;
3148
3149 if (hw->mac.type >= e1000_82576)
3150 netdev->features |= NETIF_F_SCTP_CRC;
3151
3152 if (hw->mac.type >= e1000_i350)
3153 netdev->features |= NETIF_F_HW_TC;
3154
3155#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3156 NETIF_F_GSO_GRE_CSUM | \
3157 NETIF_F_GSO_IPXIP4 | \
3158 NETIF_F_GSO_IPXIP6 | \
3159 NETIF_F_GSO_UDP_TUNNEL | \
3160 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3161
3162 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3163 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3164
3165
3166 netdev->hw_features |= netdev->features |
3167 NETIF_F_HW_VLAN_CTAG_RX |
3168 NETIF_F_HW_VLAN_CTAG_TX |
3169 NETIF_F_RXALL;
3170
3171 if (hw->mac.type >= e1000_i350)
3172 netdev->hw_features |= NETIF_F_NTUPLE;
3173
3174 if (pci_using_dac)
3175 netdev->features |= NETIF_F_HIGHDMA;
3176
3177 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3178 netdev->mpls_features |= NETIF_F_HW_CSUM;
3179 netdev->hw_enc_features |= netdev->vlan_features;
3180
3181
3182 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3183 NETIF_F_HW_VLAN_CTAG_RX |
3184 NETIF_F_HW_VLAN_CTAG_TX;
3185
3186 netdev->priv_flags |= IFF_SUPP_NOFCS;
3187
3188 netdev->priv_flags |= IFF_UNICAST_FLT;
3189
3190
3191 netdev->min_mtu = ETH_MIN_MTU;
3192 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3193
3194 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3195
3196
3197
3198
3199 hw->mac.ops.reset_hw(hw);
3200
3201
3202
3203
3204 switch (hw->mac.type) {
3205 case e1000_i210:
3206 case e1000_i211:
3207 if (igb_get_flash_presence_i210(hw)) {
3208 if (hw->nvm.ops.validate(hw) < 0) {
3209 dev_err(&pdev->dev,
3210 "The NVM Checksum Is Not Valid\n");
3211 err = -EIO;
3212 goto err_eeprom;
3213 }
3214 }
3215 break;
3216 default:
3217 if (hw->nvm.ops.validate(hw) < 0) {
3218 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3219 err = -EIO;
3220 goto err_eeprom;
3221 }
3222 break;
3223 }
3224
3225 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3226
3227 if (hw->mac.ops.read_mac_addr(hw))
3228 dev_err(&pdev->dev, "NVM Read Error\n");
3229 }
3230
3231 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3232
3233 if (!is_valid_ether_addr(netdev->dev_addr)) {
3234 dev_err(&pdev->dev, "Invalid MAC Address\n");
3235 err = -EIO;
3236 goto err_eeprom;
3237 }
3238
3239 igb_set_default_mac_filter(adapter);
3240
3241
3242 igb_set_fw_version(adapter);
3243
3244
3245 if (hw->mac.type == e1000_i210) {
3246 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3247 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3248 }
3249
3250 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3251 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3252
3253 INIT_WORK(&adapter->reset_task, igb_reset_task);
3254 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3255
3256
3257 adapter->fc_autoneg = true;
3258 hw->mac.autoneg = true;
3259 hw->phy.autoneg_advertised = 0x2f;
3260
3261 hw->fc.requested_mode = e1000_fc_default;
3262 hw->fc.current_mode = e1000_fc_default;
3263
3264 igb_validate_mdi_setting(hw);
3265
3266
3267 if (hw->bus.func == 0)
3268 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3269
3270
3271 if (hw->mac.type >= e1000_82580)
3272 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3273 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3274 &eeprom_data);
3275 else if (hw->bus.func == 1)
3276 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3277
3278 if (eeprom_data & IGB_EEPROM_APME)
3279 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3280
3281
3282
3283
3284
3285 switch (pdev->device) {
3286 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3287 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3288 break;
3289 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3290 case E1000_DEV_ID_82576_FIBER:
3291 case E1000_DEV_ID_82576_SERDES:
3292
3293
3294
3295 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3296 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3297 break;
3298 case E1000_DEV_ID_82576_QUAD_COPPER:
3299 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3300
3301 if (global_quad_port_a != 0)
3302 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3303 else
3304 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3305
3306 if (++global_quad_port_a == 4)
3307 global_quad_port_a = 0;
3308 break;
3309 default:
3310
3311 if (!device_can_wakeup(&adapter->pdev->dev))
3312 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3313 }
3314
3315
3316 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3317 adapter->wol |= E1000_WUFC_MAG;
3318
3319
3320 if ((hw->mac.type == e1000_i350) &&
3321 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3322 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3323 adapter->wol = 0;
3324 }
3325
3326
3327
3328
3329 if (((hw->mac.type == e1000_i350) ||
3330 (hw->mac.type == e1000_i354)) &&
3331 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3332 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3333 adapter->wol = 0;
3334 }
3335 if (hw->mac.type == e1000_i350) {
3336 if (((pdev->subsystem_device == 0x5001) ||
3337 (pdev->subsystem_device == 0x5002)) &&
3338 (hw->bus.func == 0)) {
3339 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3340 adapter->wol = 0;
3341 }
3342 if (pdev->subsystem_device == 0x1F52)
3343 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3344 }
3345
3346 device_set_wakeup_enable(&adapter->pdev->dev,
3347 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3348
3349
3350 igb_reset(adapter);
3351
3352
3353 err = igb_init_i2c(adapter);
3354 if (err) {
3355 dev_err(&pdev->dev, "failed to init i2c interface\n");
3356 goto err_eeprom;
3357 }
3358
3359
3360
3361
3362 igb_get_hw_control(adapter);
3363
3364 strcpy(netdev->name, "eth%d");
3365 err = register_netdev(netdev);
3366 if (err)
3367 goto err_register;
3368
3369
3370 netif_carrier_off(netdev);
3371
3372#ifdef CONFIG_IGB_DCA
3373 if (dca_add_requester(&pdev->dev) == 0) {
3374 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3375 dev_info(&pdev->dev, "DCA enabled\n");
3376 igb_setup_dca(adapter);
3377 }
3378
3379#endif
3380#ifdef CONFIG_IGB_HWMON
3381
3382 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3383 u16 ets_word;
3384
3385
3386
3387
3388 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3389 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3390 adapter->ets = true;
3391 else
3392 adapter->ets = false;
3393 if (igb_sysfs_init(adapter))
3394 dev_err(&pdev->dev,
3395 "failed to allocate sysfs resources\n");
3396 } else {
3397 adapter->ets = false;
3398 }
3399#endif
3400
3401 adapter->ei = *ei;
3402 if (hw->dev_spec._82575.mas_capable)
3403 igb_init_mas(adapter);
3404
3405
3406 igb_ptp_init(adapter);
3407
3408 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3409
3410 if (hw->mac.type != e1000_i354) {
3411 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3412 netdev->name,
3413 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3414 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3415 "unknown"),
3416 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3417 "Width x4" :
3418 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3419 "Width x2" :
3420 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3421 "Width x1" : "unknown"), netdev->dev_addr);
3422 }
3423
3424 if ((hw->mac.type >= e1000_i210 ||
3425 igb_get_flash_presence_i210(hw))) {
3426 ret_val = igb_read_part_string(hw, part_str,
3427 E1000_PBANUM_LENGTH);
3428 } else {
3429 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3430 }
3431
3432 if (ret_val)
3433 strcpy(part_str, "Unknown");
3434 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3435 dev_info(&pdev->dev,
3436 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3437 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3438 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3439 adapter->num_rx_queues, adapter->num_tx_queues);
3440 if (hw->phy.media_type == e1000_media_type_copper) {
3441 switch (hw->mac.type) {
3442 case e1000_i350:
3443 case e1000_i210:
3444 case e1000_i211:
3445
3446 err = igb_set_eee_i350(hw, true, true);
3447 if ((!err) &&
3448 (!hw->dev_spec._82575.eee_disable)) {
3449 adapter->eee_advert =
3450 MDIO_EEE_100TX | MDIO_EEE_1000T;
3451 adapter->flags |= IGB_FLAG_EEE;
3452 }
3453 break;
3454 case e1000_i354:
3455 if ((rd32(E1000_CTRL_EXT) &
3456 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3457 err = igb_set_eee_i354(hw, true, true);
3458 if ((!err) &&
3459 (!hw->dev_spec._82575.eee_disable)) {
3460 adapter->eee_advert =
3461 MDIO_EEE_100TX | MDIO_EEE_1000T;
3462 adapter->flags |= IGB_FLAG_EEE;
3463 }
3464 }
3465 break;
3466 default:
3467 break;
3468 }
3469 }
3470 pm_runtime_put_noidle(&pdev->dev);
3471 return 0;
3472
3473err_register:
3474 igb_release_hw_control(adapter);
3475 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3476err_eeprom:
3477 if (!igb_check_reset_block(hw))
3478 igb_reset_phy(hw);
3479
3480 if (hw->flash_address)
3481 iounmap(hw->flash_address);
3482err_sw_init:
3483 kfree(adapter->mac_table);
3484 kfree(adapter->shadow_vfta);
3485 igb_clear_interrupt_scheme(adapter);
3486#ifdef CONFIG_PCI_IOV
3487 igb_disable_sriov(pdev);
3488#endif
3489 pci_iounmap(pdev, adapter->io_addr);
3490err_ioremap:
3491 free_netdev(netdev);
3492err_alloc_etherdev:
3493 pci_release_mem_regions(pdev);
3494err_pci_reg:
3495err_dma:
3496 pci_disable_device(pdev);
3497 return err;
3498}
3499
3500#ifdef CONFIG_PCI_IOV
3501static int igb_disable_sriov(struct pci_dev *pdev)
3502{
3503 struct net_device *netdev = pci_get_drvdata(pdev);
3504 struct igb_adapter *adapter = netdev_priv(netdev);
3505 struct e1000_hw *hw = &adapter->hw;
3506
3507
3508 if (adapter->vf_data) {
3509
3510 if (pci_vfs_assigned(pdev)) {
3511 dev_warn(&pdev->dev,
3512 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3513 return -EPERM;
3514 } else {
3515 pci_disable_sriov(pdev);
3516 msleep(500);
3517 }
3518
3519 kfree(adapter->vf_mac_list);
3520 adapter->vf_mac_list = NULL;
3521 kfree(adapter->vf_data);
3522 adapter->vf_data = NULL;
3523 adapter->vfs_allocated_count = 0;
3524 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3525 wrfl();
3526 msleep(100);
3527 dev_info(&pdev->dev, "IOV Disabled\n");
3528
3529
3530 adapter->flags |= IGB_FLAG_DMAC;
3531 }
3532
3533 return 0;
3534}
3535
3536static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3537{
3538 struct net_device *netdev = pci_get_drvdata(pdev);
3539 struct igb_adapter *adapter = netdev_priv(netdev);
3540 int old_vfs = pci_num_vf(pdev);
3541 struct vf_mac_filter *mac_list;
3542 int err = 0;
3543 int num_vf_mac_filters, i;
3544
3545 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3546 err = -EPERM;
3547 goto out;
3548 }
3549 if (!num_vfs)
3550 goto out;
3551
3552 if (old_vfs) {
3553 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3554 old_vfs, max_vfs);
3555 adapter->vfs_allocated_count = old_vfs;
3556 } else
3557 adapter->vfs_allocated_count = num_vfs;
3558
3559 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3560 sizeof(struct vf_data_storage), GFP_KERNEL);
3561
3562
3563 if (!adapter->vf_data) {
3564 adapter->vfs_allocated_count = 0;
3565 err = -ENOMEM;
3566 goto out;
3567 }
3568
3569
3570
3571
3572
3573
3574 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3575 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3576 adapter->vfs_allocated_count);
3577
3578 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3579 sizeof(struct vf_mac_filter),
3580 GFP_KERNEL);
3581
3582 mac_list = adapter->vf_mac_list;
3583 INIT_LIST_HEAD(&adapter->vf_macs.l);
3584
3585 if (adapter->vf_mac_list) {
3586
3587 for (i = 0; i < num_vf_mac_filters; i++) {
3588 mac_list->vf = -1;
3589 mac_list->free = true;
3590 list_add(&mac_list->l, &adapter->vf_macs.l);
3591 mac_list++;
3592 }
3593 } else {
3594
3595
3596
3597 dev_err(&pdev->dev,
3598 "Unable to allocate memory for VF MAC filter list\n");
3599 }
3600
3601
3602 if (!old_vfs) {
3603 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3604 if (err)
3605 goto err_out;
3606 }
3607 dev_info(&pdev->dev, "%d VFs allocated\n",
3608 adapter->vfs_allocated_count);
3609 for (i = 0; i < adapter->vfs_allocated_count; i++)
3610 igb_vf_configure(adapter, i);
3611
3612
3613 adapter->flags &= ~IGB_FLAG_DMAC;
3614 goto out;
3615
3616err_out:
3617 kfree(adapter->vf_mac_list);
3618 adapter->vf_mac_list = NULL;
3619 kfree(adapter->vf_data);
3620 adapter->vf_data = NULL;
3621 adapter->vfs_allocated_count = 0;
3622out:
3623 return err;
3624}
3625
3626#endif
3627
3628
3629
3630
3631static void igb_remove_i2c(struct igb_adapter *adapter)
3632{
3633
3634 i2c_del_adapter(&adapter->i2c_adap);
3635}
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646static void igb_remove(struct pci_dev *pdev)
3647{
3648 struct net_device *netdev = pci_get_drvdata(pdev);
3649 struct igb_adapter *adapter = netdev_priv(netdev);
3650 struct e1000_hw *hw = &adapter->hw;
3651
3652 pm_runtime_get_noresume(&pdev->dev);
3653#ifdef CONFIG_IGB_HWMON
3654 igb_sysfs_exit(adapter);
3655#endif
3656 igb_remove_i2c(adapter);
3657 igb_ptp_stop(adapter);
3658
3659
3660
3661 set_bit(__IGB_DOWN, &adapter->state);
3662 del_timer_sync(&adapter->watchdog_timer);
3663 del_timer_sync(&adapter->phy_info_timer);
3664
3665 cancel_work_sync(&adapter->reset_task);
3666 cancel_work_sync(&adapter->watchdog_task);
3667
3668#ifdef CONFIG_IGB_DCA
3669 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3670 dev_info(&pdev->dev, "DCA disabled\n");
3671 dca_remove_requester(&pdev->dev);
3672 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3673 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3674 }
3675#endif
3676
3677
3678
3679
3680 igb_release_hw_control(adapter);
3681
3682#ifdef CONFIG_PCI_IOV
3683 igb_disable_sriov(pdev);
3684#endif
3685
3686 unregister_netdev(netdev);
3687
3688 igb_clear_interrupt_scheme(adapter);
3689
3690 pci_iounmap(pdev, adapter->io_addr);
3691 if (hw->flash_address)
3692 iounmap(hw->flash_address);
3693 pci_release_mem_regions(pdev);
3694
3695 kfree(adapter->mac_table);
3696 kfree(adapter->shadow_vfta);
3697 free_netdev(netdev);
3698
3699 pci_disable_pcie_error_reporting(pdev);
3700
3701 pci_disable_device(pdev);
3702}
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713static void igb_probe_vfs(struct igb_adapter *adapter)
3714{
3715#ifdef CONFIG_PCI_IOV
3716 struct pci_dev *pdev = adapter->pdev;
3717 struct e1000_hw *hw = &adapter->hw;
3718
3719
3720 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3721 return;
3722
3723
3724
3725
3726
3727 igb_set_interrupt_capability(adapter, true);
3728 igb_reset_interrupt_capability(adapter);
3729
3730 pci_sriov_set_totalvfs(pdev, 7);
3731 igb_enable_sriov(pdev, max_vfs);
3732
3733#endif
3734}
3735
3736unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3737{
3738 struct e1000_hw *hw = &adapter->hw;
3739 unsigned int max_rss_queues;
3740
3741
3742 switch (hw->mac.type) {
3743 case e1000_i211:
3744 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3745 break;
3746 case e1000_82575:
3747 case e1000_i210:
3748 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3749 break;
3750 case e1000_i350:
3751
3752 if (!!adapter->vfs_allocated_count) {
3753 max_rss_queues = 1;
3754 break;
3755 }
3756
3757 case e1000_82576:
3758 if (!!adapter->vfs_allocated_count) {
3759 max_rss_queues = 2;
3760 break;
3761 }
3762
3763 case e1000_82580:
3764 case e1000_i354:
3765 default:
3766 max_rss_queues = IGB_MAX_RX_QUEUES;
3767 break;
3768 }
3769
3770 return max_rss_queues;
3771}
3772
3773static void igb_init_queue_configuration(struct igb_adapter *adapter)
3774{
3775 u32 max_rss_queues;
3776
3777 max_rss_queues = igb_get_max_rss_queues(adapter);
3778 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3779
3780 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3781}
3782
3783void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3784 const u32 max_rss_queues)
3785{
3786 struct e1000_hw *hw = &adapter->hw;
3787
3788
3789 switch (hw->mac.type) {
3790 case e1000_82575:
3791 case e1000_i211:
3792
3793 break;
3794 case e1000_82576:
3795 case e1000_82580:
3796 case e1000_i350:
3797 case e1000_i354:
3798 case e1000_i210:
3799 default:
3800
3801
3802
3803 if (adapter->rss_queues > (max_rss_queues / 2))
3804 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3805 else
3806 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3807 break;
3808 }
3809}
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819static int igb_sw_init(struct igb_adapter *adapter)
3820{
3821 struct e1000_hw *hw = &adapter->hw;
3822 struct net_device *netdev = adapter->netdev;
3823 struct pci_dev *pdev = adapter->pdev;
3824
3825 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3826
3827
3828 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3829 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3830
3831
3832 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3833 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3834
3835
3836 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3837
3838 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3839 VLAN_HLEN;
3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3841
3842 spin_lock_init(&adapter->nfc_lock);
3843 spin_lock_init(&adapter->stats64_lock);
3844#ifdef CONFIG_PCI_IOV
3845 switch (hw->mac.type) {
3846 case e1000_82576:
3847 case e1000_i350:
3848 if (max_vfs > 7) {
3849 dev_warn(&pdev->dev,
3850 "Maximum of 7 VFs per PF, using max\n");
3851 max_vfs = adapter->vfs_allocated_count = 7;
3852 } else
3853 adapter->vfs_allocated_count = max_vfs;
3854 if (adapter->vfs_allocated_count)
3855 dev_warn(&pdev->dev,
3856 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3857 break;
3858 default:
3859 break;
3860 }
3861#endif
3862
3863
3864 adapter->flags |= IGB_FLAG_HAS_MSIX;
3865
3866 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3867 sizeof(struct igb_mac_addr),
3868 GFP_KERNEL);
3869 if (!adapter->mac_table)
3870 return -ENOMEM;
3871
3872 igb_probe_vfs(adapter);
3873
3874 igb_init_queue_configuration(adapter);
3875
3876
3877 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3878 GFP_KERNEL);
3879 if (!adapter->shadow_vfta)
3880 return -ENOMEM;
3881
3882
3883 if (igb_init_interrupt_scheme(adapter, true)) {
3884 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3885 return -ENOMEM;
3886 }
3887
3888
3889 igb_irq_disable(adapter);
3890
3891 if (hw->mac.type >= e1000_i350)
3892 adapter->flags &= ~IGB_FLAG_DMAC;
3893
3894 set_bit(__IGB_DOWN, &adapter->state);
3895 return 0;
3896}
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910static int __igb_open(struct net_device *netdev, bool resuming)
3911{
3912 struct igb_adapter *adapter = netdev_priv(netdev);
3913 struct e1000_hw *hw = &adapter->hw;
3914 struct pci_dev *pdev = adapter->pdev;
3915 int err;
3916 int i;
3917
3918
3919 if (test_bit(__IGB_TESTING, &adapter->state)) {
3920 WARN_ON(resuming);
3921 return -EBUSY;
3922 }
3923
3924 if (!resuming)
3925 pm_runtime_get_sync(&pdev->dev);
3926
3927 netif_carrier_off(netdev);
3928
3929
3930 err = igb_setup_all_tx_resources(adapter);
3931 if (err)
3932 goto err_setup_tx;
3933
3934
3935 err = igb_setup_all_rx_resources(adapter);
3936 if (err)
3937 goto err_setup_rx;
3938
3939 igb_power_up_link(adapter);
3940
3941
3942
3943
3944
3945
3946 igb_configure(adapter);
3947
3948 err = igb_request_irq(adapter);
3949 if (err)
3950 goto err_req_irq;
3951
3952
3953 err = netif_set_real_num_tx_queues(adapter->netdev,
3954 adapter->num_tx_queues);
3955 if (err)
3956 goto err_set_queues;
3957
3958 err = netif_set_real_num_rx_queues(adapter->netdev,
3959 adapter->num_rx_queues);
3960 if (err)
3961 goto err_set_queues;
3962
3963
3964 clear_bit(__IGB_DOWN, &adapter->state);
3965
3966 for (i = 0; i < adapter->num_q_vectors; i++)
3967 napi_enable(&(adapter->q_vector[i]->napi));
3968
3969
3970 rd32(E1000_TSICR);
3971 rd32(E1000_ICR);
3972
3973 igb_irq_enable(adapter);
3974
3975
3976 if (adapter->vfs_allocated_count) {
3977 u32 reg_data = rd32(E1000_CTRL_EXT);
3978
3979 reg_data |= E1000_CTRL_EXT_PFRSTD;
3980 wr32(E1000_CTRL_EXT, reg_data);
3981 }
3982
3983 netif_tx_start_all_queues(netdev);
3984
3985 if (!resuming)
3986 pm_runtime_put(&pdev->dev);
3987
3988
3989 hw->mac.get_link_status = 1;
3990 schedule_work(&adapter->watchdog_task);
3991
3992 return 0;
3993
3994err_set_queues:
3995 igb_free_irq(adapter);
3996err_req_irq:
3997 igb_release_hw_control(adapter);
3998 igb_power_down_link(adapter);
3999 igb_free_all_rx_resources(adapter);
4000err_setup_rx:
4001 igb_free_all_tx_resources(adapter);
4002err_setup_tx:
4003 igb_reset(adapter);
4004 if (!resuming)
4005 pm_runtime_put(&pdev->dev);
4006
4007 return err;
4008}
4009
4010int igb_open(struct net_device *netdev)
4011{
4012 return __igb_open(netdev, false);
4013}
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026static int __igb_close(struct net_device *netdev, bool suspending)
4027{
4028 struct igb_adapter *adapter = netdev_priv(netdev);
4029 struct pci_dev *pdev = adapter->pdev;
4030
4031 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4032
4033 if (!suspending)
4034 pm_runtime_get_sync(&pdev->dev);
4035
4036 igb_down(adapter);
4037 igb_free_irq(adapter);
4038
4039 igb_free_all_tx_resources(adapter);
4040 igb_free_all_rx_resources(adapter);
4041
4042 if (!suspending)
4043 pm_runtime_put_sync(&pdev->dev);
4044 return 0;
4045}
4046
4047int igb_close(struct net_device *netdev)
4048{
4049 if (netif_device_present(netdev) || netdev->dismantle)
4050 return __igb_close(netdev, false);
4051 return 0;
4052}
4053
4054
4055
4056
4057
4058
4059
4060int igb_setup_tx_resources(struct igb_ring *tx_ring)
4061{
4062 struct device *dev = tx_ring->dev;
4063 int size;
4064
4065 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4066
4067 tx_ring->tx_buffer_info = vmalloc(size);
4068 if (!tx_ring->tx_buffer_info)
4069 goto err;
4070
4071
4072 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4073 tx_ring->size = ALIGN(tx_ring->size, 4096);
4074
4075 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4076 &tx_ring->dma, GFP_KERNEL);
4077 if (!tx_ring->desc)
4078 goto err;
4079
4080 tx_ring->next_to_use = 0;
4081 tx_ring->next_to_clean = 0;
4082
4083 return 0;
4084
4085err:
4086 vfree(tx_ring->tx_buffer_info);
4087 tx_ring->tx_buffer_info = NULL;
4088 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4089 return -ENOMEM;
4090}
4091
4092
4093
4094
4095
4096
4097
4098
4099static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4100{
4101 struct pci_dev *pdev = adapter->pdev;
4102 int i, err = 0;
4103
4104 for (i = 0; i < adapter->num_tx_queues; i++) {
4105 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4106 if (err) {
4107 dev_err(&pdev->dev,
4108 "Allocation for Tx Queue %u failed\n", i);
4109 for (i--; i >= 0; i--)
4110 igb_free_tx_resources(adapter->tx_ring[i]);
4111 break;
4112 }
4113 }
4114
4115 return err;
4116}
4117
4118
4119
4120
4121
4122void igb_setup_tctl(struct igb_adapter *adapter)
4123{
4124 struct e1000_hw *hw = &adapter->hw;
4125 u32 tctl;
4126
4127
4128 wr32(E1000_TXDCTL(0), 0);
4129
4130
4131 tctl = rd32(E1000_TCTL);
4132 tctl &= ~E1000_TCTL_CT;
4133 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4134 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4135
4136 igb_config_collision_dist(hw);
4137
4138
4139 tctl |= E1000_TCTL_EN;
4140
4141 wr32(E1000_TCTL, tctl);
4142}
4143
4144
4145
4146
4147
4148
4149
4150
4151void igb_configure_tx_ring(struct igb_adapter *adapter,
4152 struct igb_ring *ring)
4153{
4154 struct e1000_hw *hw = &adapter->hw;
4155 u32 txdctl = 0;
4156 u64 tdba = ring->dma;
4157 int reg_idx = ring->reg_idx;
4158
4159 wr32(E1000_TDLEN(reg_idx),
4160 ring->count * sizeof(union e1000_adv_tx_desc));
4161 wr32(E1000_TDBAL(reg_idx),
4162 tdba & 0x00000000ffffffffULL);
4163 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4164
4165 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4166 wr32(E1000_TDH(reg_idx), 0);
4167 writel(0, ring->tail);
4168
4169 txdctl |= IGB_TX_PTHRESH;
4170 txdctl |= IGB_TX_HTHRESH << 8;
4171 txdctl |= IGB_TX_WTHRESH << 16;
4172
4173
4174 memset(ring->tx_buffer_info, 0,
4175 sizeof(struct igb_tx_buffer) * ring->count);
4176
4177 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4178 wr32(E1000_TXDCTL(reg_idx), txdctl);
4179}
4180
4181
4182
4183
4184
4185
4186
4187static void igb_configure_tx(struct igb_adapter *adapter)
4188{
4189 struct e1000_hw *hw = &adapter->hw;
4190 int i;
4191
4192
4193 for (i = 0; i < adapter->num_tx_queues; i++)
4194 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4195
4196 wrfl();
4197 usleep_range(10000, 20000);
4198
4199 for (i = 0; i < adapter->num_tx_queues; i++)
4200 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4201}
4202
4203
4204
4205
4206
4207
4208
4209int igb_setup_rx_resources(struct igb_ring *rx_ring)
4210{
4211 struct device *dev = rx_ring->dev;
4212 int size;
4213
4214 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4215
4216 rx_ring->rx_buffer_info = vmalloc(size);
4217 if (!rx_ring->rx_buffer_info)
4218 goto err;
4219
4220
4221 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4222 rx_ring->size = ALIGN(rx_ring->size, 4096);
4223
4224 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4225 &rx_ring->dma, GFP_KERNEL);
4226 if (!rx_ring->desc)
4227 goto err;
4228
4229 rx_ring->next_to_alloc = 0;
4230 rx_ring->next_to_clean = 0;
4231 rx_ring->next_to_use = 0;
4232
4233 return 0;
4234
4235err:
4236 vfree(rx_ring->rx_buffer_info);
4237 rx_ring->rx_buffer_info = NULL;
4238 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4239 return -ENOMEM;
4240}
4241
4242
4243
4244
4245
4246
4247
4248
4249static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4250{
4251 struct pci_dev *pdev = adapter->pdev;
4252 int i, err = 0;
4253
4254 for (i = 0; i < adapter->num_rx_queues; i++) {
4255 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4256 if (err) {
4257 dev_err(&pdev->dev,
4258 "Allocation for Rx Queue %u failed\n", i);
4259 for (i--; i >= 0; i--)
4260 igb_free_rx_resources(adapter->rx_ring[i]);
4261 break;
4262 }
4263 }
4264
4265 return err;
4266}
4267
4268
4269
4270
4271
4272static void igb_setup_mrqc(struct igb_adapter *adapter)
4273{
4274 struct e1000_hw *hw = &adapter->hw;
4275 u32 mrqc, rxcsum;
4276 u32 j, num_rx_queues;
4277 u32 rss_key[10];
4278
4279 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4280 for (j = 0; j < 10; j++)
4281 wr32(E1000_RSSRK(j), rss_key[j]);
4282
4283 num_rx_queues = adapter->rss_queues;
4284
4285 switch (hw->mac.type) {
4286 case e1000_82576:
4287
4288 if (adapter->vfs_allocated_count)
4289 num_rx_queues = 2;
4290 break;
4291 default:
4292 break;
4293 }
4294
4295 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4296 for (j = 0; j < IGB_RETA_SIZE; j++)
4297 adapter->rss_indir_tbl[j] =
4298 (j * num_rx_queues) / IGB_RETA_SIZE;
4299 adapter->rss_indir_tbl_init = num_rx_queues;
4300 }
4301 igb_write_rss_indir_tbl(adapter);
4302
4303
4304
4305
4306
4307 rxcsum = rd32(E1000_RXCSUM);
4308 rxcsum |= E1000_RXCSUM_PCSD;
4309
4310 if (adapter->hw.mac.type >= e1000_82576)
4311
4312 rxcsum |= E1000_RXCSUM_CRCOFL;
4313
4314
4315 wr32(E1000_RXCSUM, rxcsum);
4316
4317
4318
4319
4320 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4321 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4322 E1000_MRQC_RSS_FIELD_IPV6 |
4323 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4324 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4325
4326 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4327 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4328 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4329 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4330
4331
4332
4333
4334
4335 if (adapter->vfs_allocated_count) {
4336 if (hw->mac.type > e1000_82575) {
4337
4338 u32 vtctl = rd32(E1000_VT_CTL);
4339
4340 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4341 E1000_VT_CTL_DISABLE_DEF_POOL);
4342 vtctl |= adapter->vfs_allocated_count <<
4343 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4344 wr32(E1000_VT_CTL, vtctl);
4345 }
4346 if (adapter->rss_queues > 1)
4347 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4348 else
4349 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4350 } else {
4351 if (hw->mac.type != e1000_i211)
4352 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4353 }
4354 igb_vmm_control(adapter);
4355
4356 wr32(E1000_MRQC, mrqc);
4357}
4358
4359
4360
4361
4362
4363void igb_setup_rctl(struct igb_adapter *adapter)
4364{
4365 struct e1000_hw *hw = &adapter->hw;
4366 u32 rctl;
4367
4368 rctl = rd32(E1000_RCTL);
4369
4370 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4371 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4372
4373 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4374 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4375
4376
4377
4378
4379
4380 rctl |= E1000_RCTL_SECRC;
4381
4382
4383 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4384
4385
4386 rctl |= E1000_RCTL_LPE;
4387
4388
4389 wr32(E1000_RXDCTL(0), 0);
4390
4391
4392
4393
4394
4395 if (adapter->vfs_allocated_count) {
4396
4397 wr32(E1000_QDE, ALL_QUEUES);
4398 }
4399
4400
4401 if (adapter->netdev->features & NETIF_F_RXALL) {
4402
4403
4404
4405 rctl |= (E1000_RCTL_SBP |
4406 E1000_RCTL_BAM |
4407 E1000_RCTL_PMCF);
4408
4409 rctl &= ~(E1000_RCTL_DPF |
4410 E1000_RCTL_CFIEN);
4411
4412
4413
4414 }
4415
4416 wr32(E1000_RCTL, rctl);
4417}
4418
4419static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4420 int vfn)
4421{
4422 struct e1000_hw *hw = &adapter->hw;
4423 u32 vmolr;
4424
4425 if (size > MAX_JUMBO_FRAME_SIZE)
4426 size = MAX_JUMBO_FRAME_SIZE;
4427
4428 vmolr = rd32(E1000_VMOLR(vfn));
4429 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4430 vmolr |= size | E1000_VMOLR_LPE;
4431 wr32(E1000_VMOLR(vfn), vmolr);
4432
4433 return 0;
4434}
4435
4436static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4437 int vfn, bool enable)
4438{
4439 struct e1000_hw *hw = &adapter->hw;
4440 u32 val, reg;
4441
4442 if (hw->mac.type < e1000_82576)
4443 return;
4444
4445 if (hw->mac.type == e1000_i350)
4446 reg = E1000_DVMOLR(vfn);
4447 else
4448 reg = E1000_VMOLR(vfn);
4449
4450 val = rd32(reg);
4451 if (enable)
4452 val |= E1000_VMOLR_STRVLAN;
4453 else
4454 val &= ~(E1000_VMOLR_STRVLAN);
4455 wr32(reg, val);
4456}
4457
4458static inline void igb_set_vmolr(struct igb_adapter *adapter,
4459 int vfn, bool aupe)
4460{
4461 struct e1000_hw *hw = &adapter->hw;
4462 u32 vmolr;
4463
4464
4465
4466
4467 if (hw->mac.type < e1000_82576)
4468 return;
4469
4470 vmolr = rd32(E1000_VMOLR(vfn));
4471 if (aupe)
4472 vmolr |= E1000_VMOLR_AUPE;
4473 else
4474 vmolr &= ~(E1000_VMOLR_AUPE);
4475
4476
4477 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4478
4479 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4480 vmolr |= E1000_VMOLR_RSSE;
4481
4482
4483
4484 if (vfn <= adapter->vfs_allocated_count)
4485 vmolr |= E1000_VMOLR_BAM;
4486
4487 wr32(E1000_VMOLR(vfn), vmolr);
4488}
4489
4490
4491
4492
4493
4494
4495
4496
4497void igb_configure_rx_ring(struct igb_adapter *adapter,
4498 struct igb_ring *ring)
4499{
4500 struct e1000_hw *hw = &adapter->hw;
4501 union e1000_adv_rx_desc *rx_desc;
4502 u64 rdba = ring->dma;
4503 int reg_idx = ring->reg_idx;
4504 u32 srrctl = 0, rxdctl = 0;
4505
4506
4507 wr32(E1000_RXDCTL(reg_idx), 0);
4508
4509
4510 wr32(E1000_RDBAL(reg_idx),
4511 rdba & 0x00000000ffffffffULL);
4512 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4513 wr32(E1000_RDLEN(reg_idx),
4514 ring->count * sizeof(union e1000_adv_rx_desc));
4515
4516
4517 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4518 wr32(E1000_RDH(reg_idx), 0);
4519 writel(0, ring->tail);
4520
4521
4522 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4523 if (ring_uses_large_buffer(ring))
4524 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4525 else
4526 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4527 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4528 if (hw->mac.type >= e1000_82580)
4529 srrctl |= E1000_SRRCTL_TIMESTAMP;
4530
4531 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
4532 srrctl |= E1000_SRRCTL_DROP_EN;
4533
4534 wr32(E1000_SRRCTL(reg_idx), srrctl);
4535
4536
4537 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4538
4539 rxdctl |= IGB_RX_PTHRESH;
4540 rxdctl |= IGB_RX_HTHRESH << 8;
4541 rxdctl |= IGB_RX_WTHRESH << 16;
4542
4543
4544 memset(ring->rx_buffer_info, 0,
4545 sizeof(struct igb_rx_buffer) * ring->count);
4546
4547
4548 rx_desc = IGB_RX_DESC(ring, 0);
4549 rx_desc->wb.upper.length = 0;
4550
4551
4552 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4553 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4554}
4555
4556static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4557 struct igb_ring *rx_ring)
4558{
4559
4560 clear_ring_build_skb_enabled(rx_ring);
4561 clear_ring_uses_large_buffer(rx_ring);
4562
4563 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4564 return;
4565
4566 set_ring_build_skb_enabled(rx_ring);
4567
4568#if (PAGE_SIZE < 8192)
4569 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4570 return;
4571
4572 set_ring_uses_large_buffer(rx_ring);
4573#endif
4574}
4575
4576
4577
4578
4579
4580
4581
4582static void igb_configure_rx(struct igb_adapter *adapter)
4583{
4584 int i;
4585
4586
4587 igb_set_default_mac_filter(adapter);
4588
4589
4590
4591
4592 for (i = 0; i < adapter->num_rx_queues; i++) {
4593 struct igb_ring *rx_ring = adapter->rx_ring[i];
4594
4595 igb_set_rx_buffer_len(adapter, rx_ring);
4596 igb_configure_rx_ring(adapter, rx_ring);
4597 }
4598}
4599
4600
4601
4602
4603
4604
4605
4606void igb_free_tx_resources(struct igb_ring *tx_ring)
4607{
4608 igb_clean_tx_ring(tx_ring);
4609
4610 vfree(tx_ring->tx_buffer_info);
4611 tx_ring->tx_buffer_info = NULL;
4612
4613
4614 if (!tx_ring->desc)
4615 return;
4616
4617 dma_free_coherent(tx_ring->dev, tx_ring->size,
4618 tx_ring->desc, tx_ring->dma);
4619
4620 tx_ring->desc = NULL;
4621}
4622
4623
4624
4625
4626
4627
4628
4629static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4630{
4631 int i;
4632
4633 for (i = 0; i < adapter->num_tx_queues; i++)
4634 if (adapter->tx_ring[i])
4635 igb_free_tx_resources(adapter->tx_ring[i]);
4636}
4637
4638
4639
4640
4641
4642static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4643{
4644 u16 i = tx_ring->next_to_clean;
4645 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4646
4647 while (i != tx_ring->next_to_use) {
4648 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4649
4650
4651 dev_kfree_skb_any(tx_buffer->skb);
4652
4653
4654 dma_unmap_single(tx_ring->dev,
4655 dma_unmap_addr(tx_buffer, dma),
4656 dma_unmap_len(tx_buffer, len),
4657 DMA_TO_DEVICE);
4658
4659
4660 eop_desc = tx_buffer->next_to_watch;
4661 tx_desc = IGB_TX_DESC(tx_ring, i);
4662
4663
4664 while (tx_desc != eop_desc) {
4665 tx_buffer++;
4666 tx_desc++;
4667 i++;
4668 if (unlikely(i == tx_ring->count)) {
4669 i = 0;
4670 tx_buffer = tx_ring->tx_buffer_info;
4671 tx_desc = IGB_TX_DESC(tx_ring, 0);
4672 }
4673
4674
4675 if (dma_unmap_len(tx_buffer, len))
4676 dma_unmap_page(tx_ring->dev,
4677 dma_unmap_addr(tx_buffer, dma),
4678 dma_unmap_len(tx_buffer, len),
4679 DMA_TO_DEVICE);
4680 }
4681
4682
4683 tx_buffer++;
4684 i++;
4685 if (unlikely(i == tx_ring->count)) {
4686 i = 0;
4687 tx_buffer = tx_ring->tx_buffer_info;
4688 }
4689 }
4690
4691
4692 netdev_tx_reset_queue(txring_txq(tx_ring));
4693
4694
4695 tx_ring->next_to_use = 0;
4696 tx_ring->next_to_clean = 0;
4697}
4698
4699
4700
4701
4702
4703static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4704{
4705 int i;
4706
4707 for (i = 0; i < adapter->num_tx_queues; i++)
4708 if (adapter->tx_ring[i])
4709 igb_clean_tx_ring(adapter->tx_ring[i]);
4710}
4711
4712
4713
4714
4715
4716
4717
4718void igb_free_rx_resources(struct igb_ring *rx_ring)
4719{
4720 igb_clean_rx_ring(rx_ring);
4721
4722 vfree(rx_ring->rx_buffer_info);
4723 rx_ring->rx_buffer_info = NULL;
4724
4725
4726 if (!rx_ring->desc)
4727 return;
4728
4729 dma_free_coherent(rx_ring->dev, rx_ring->size,
4730 rx_ring->desc, rx_ring->dma);
4731
4732 rx_ring->desc = NULL;
4733}
4734
4735
4736
4737
4738
4739
4740
4741static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4742{
4743 int i;
4744
4745 for (i = 0; i < adapter->num_rx_queues; i++)
4746 if (adapter->rx_ring[i])
4747 igb_free_rx_resources(adapter->rx_ring[i]);
4748}
4749
4750
4751
4752
4753
4754static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4755{
4756 u16 i = rx_ring->next_to_clean;
4757
4758 if (rx_ring->skb)
4759 dev_kfree_skb(rx_ring->skb);
4760 rx_ring->skb = NULL;
4761
4762
4763 while (i != rx_ring->next_to_alloc) {
4764 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4765
4766
4767
4768
4769 dma_sync_single_range_for_cpu(rx_ring->dev,
4770 buffer_info->dma,
4771 buffer_info->page_offset,
4772 igb_rx_bufsz(rx_ring),
4773 DMA_FROM_DEVICE);
4774
4775
4776 dma_unmap_page_attrs(rx_ring->dev,
4777 buffer_info->dma,
4778 igb_rx_pg_size(rx_ring),
4779 DMA_FROM_DEVICE,
4780 IGB_RX_DMA_ATTR);
4781 __page_frag_cache_drain(buffer_info->page,
4782 buffer_info->pagecnt_bias);
4783
4784 i++;
4785 if (i == rx_ring->count)
4786 i = 0;
4787 }
4788
4789 rx_ring->next_to_alloc = 0;
4790 rx_ring->next_to_clean = 0;
4791 rx_ring->next_to_use = 0;
4792}
4793
4794
4795
4796
4797
4798static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4799{
4800 int i;
4801
4802 for (i = 0; i < adapter->num_rx_queues; i++)
4803 if (adapter->rx_ring[i])
4804 igb_clean_rx_ring(adapter->rx_ring[i]);
4805}
4806
4807
4808
4809
4810
4811
4812
4813
4814static int igb_set_mac(struct net_device *netdev, void *p)
4815{
4816 struct igb_adapter *adapter = netdev_priv(netdev);
4817 struct e1000_hw *hw = &adapter->hw;
4818 struct sockaddr *addr = p;
4819
4820 if (!is_valid_ether_addr(addr->sa_data))
4821 return -EADDRNOTAVAIL;
4822
4823 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4824 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4825
4826
4827 igb_set_default_mac_filter(adapter);
4828
4829 return 0;
4830}
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841static int igb_write_mc_addr_list(struct net_device *netdev)
4842{
4843 struct igb_adapter *adapter = netdev_priv(netdev);
4844 struct e1000_hw *hw = &adapter->hw;
4845 struct netdev_hw_addr *ha;
4846 u8 *mta_list;
4847 int i;
4848
4849 if (netdev_mc_empty(netdev)) {
4850
4851 igb_update_mc_addr_list(hw, NULL, 0);
4852 igb_restore_vf_multicasts(adapter);
4853 return 0;
4854 }
4855
4856 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
4857 if (!mta_list)
4858 return -ENOMEM;
4859
4860
4861 i = 0;
4862 netdev_for_each_mc_addr(ha, netdev)
4863 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4864
4865 igb_update_mc_addr_list(hw, mta_list, i);
4866 kfree(mta_list);
4867
4868 return netdev_mc_count(netdev);
4869}
4870
4871static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4872{
4873 struct e1000_hw *hw = &adapter->hw;
4874 u32 i, pf_id;
4875
4876 switch (hw->mac.type) {
4877 case e1000_i210:
4878 case e1000_i211:
4879 case e1000_i350:
4880
4881 if (adapter->netdev->features & NETIF_F_NTUPLE)
4882 break;
4883
4884 case e1000_82576:
4885 case e1000_82580:
4886 case e1000_i354:
4887
4888 if (adapter->vfs_allocated_count)
4889 break;
4890
4891 default:
4892 return 1;
4893 }
4894
4895
4896 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4897 return 0;
4898
4899 if (!adapter->vfs_allocated_count)
4900 goto set_vfta;
4901
4902
4903 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4904
4905 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4906 u32 vlvf = rd32(E1000_VLVF(i));
4907
4908 vlvf |= BIT(pf_id);
4909 wr32(E1000_VLVF(i), vlvf);
4910 }
4911
4912set_vfta:
4913
4914 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4915 hw->mac.ops.write_vfta(hw, i, ~0U);
4916
4917
4918 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4919
4920 return 0;
4921}
4922
4923#define VFTA_BLOCK_SIZE 8
4924static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4925{
4926 struct e1000_hw *hw = &adapter->hw;
4927 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4928 u32 vid_start = vfta_offset * 32;
4929 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4930 u32 i, vid, word, bits, pf_id;
4931
4932
4933 vid = adapter->mng_vlan_id;
4934 if (vid >= vid_start && vid < vid_end)
4935 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4936
4937 if (!adapter->vfs_allocated_count)
4938 goto set_vfta;
4939
4940 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4941
4942 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4943 u32 vlvf = rd32(E1000_VLVF(i));
4944
4945
4946 vid = vlvf & VLAN_VID_MASK;
4947
4948
4949 if (vid < vid_start || vid >= vid_end)
4950 continue;
4951
4952 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4953
4954 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4955
4956
4957 if (test_bit(vid, adapter->active_vlans))
4958 continue;
4959 }
4960
4961
4962 bits = ~BIT(pf_id);
4963 bits &= rd32(E1000_VLVF(i));
4964 wr32(E1000_VLVF(i), bits);
4965 }
4966
4967set_vfta:
4968
4969 for (i = VFTA_BLOCK_SIZE; i--;) {
4970 vid = (vfta_offset + i) * 32;
4971 word = vid / BITS_PER_LONG;
4972 bits = vid % BITS_PER_LONG;
4973
4974 vfta[i] |= adapter->active_vlans[word] >> bits;
4975
4976 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4977 }
4978}
4979
4980static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4981{
4982 u32 i;
4983
4984
4985 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4986 return;
4987
4988
4989 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4990
4991 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4992 igb_scrub_vfta(adapter, i);
4993}
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004static void igb_set_rx_mode(struct net_device *netdev)
5005{
5006 struct igb_adapter *adapter = netdev_priv(netdev);
5007 struct e1000_hw *hw = &adapter->hw;
5008 unsigned int vfn = adapter->vfs_allocated_count;
5009 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5010 int count;
5011
5012
5013 if (netdev->flags & IFF_PROMISC) {
5014 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5015 vmolr |= E1000_VMOLR_MPME;
5016
5017
5018 if (hw->mac.type == e1000_82576)
5019 vmolr |= E1000_VMOLR_ROPE;
5020 } else {
5021 if (netdev->flags & IFF_ALLMULTI) {
5022 rctl |= E1000_RCTL_MPE;
5023 vmolr |= E1000_VMOLR_MPME;
5024 } else {
5025
5026
5027
5028
5029 count = igb_write_mc_addr_list(netdev);
5030 if (count < 0) {
5031 rctl |= E1000_RCTL_MPE;
5032 vmolr |= E1000_VMOLR_MPME;
5033 } else if (count) {
5034 vmolr |= E1000_VMOLR_ROMPE;
5035 }
5036 }
5037 }
5038
5039
5040
5041
5042
5043 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5044 rctl |= E1000_RCTL_UPE;
5045 vmolr |= E1000_VMOLR_ROPE;
5046 }
5047
5048
5049 rctl |= E1000_RCTL_VFE;
5050
5051
5052 if ((netdev->flags & IFF_PROMISC) ||
5053 (netdev->features & NETIF_F_RXALL)) {
5054
5055 if (igb_vlan_promisc_enable(adapter))
5056 rctl &= ~E1000_RCTL_VFE;
5057 } else {
5058 igb_vlan_promisc_disable(adapter);
5059 }
5060
5061
5062 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5063 E1000_RCTL_VFE);
5064 wr32(E1000_RCTL, rctl);
5065
5066#if (PAGE_SIZE < 8192)
5067 if (!adapter->vfs_allocated_count) {
5068 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5069 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5070 }
5071#endif
5072 wr32(E1000_RLPML, rlpml);
5073
5074
5075
5076
5077
5078
5079 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5080 return;
5081
5082
5083 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5084
5085 vmolr |= rd32(E1000_VMOLR(vfn)) &
5086 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5087
5088
5089 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5090#if (PAGE_SIZE < 8192)
5091 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5092 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5093 else
5094#endif
5095 vmolr |= MAX_JUMBO_FRAME_SIZE;
5096 vmolr |= E1000_VMOLR_LPE;
5097
5098 wr32(E1000_VMOLR(vfn), vmolr);
5099
5100 igb_restore_vf_multicasts(adapter);
5101}
5102
5103static void igb_check_wvbr(struct igb_adapter *adapter)
5104{
5105 struct e1000_hw *hw = &adapter->hw;
5106 u32 wvbr = 0;
5107
5108 switch (hw->mac.type) {
5109 case e1000_82576:
5110 case e1000_i350:
5111 wvbr = rd32(E1000_WVBR);
5112 if (!wvbr)
5113 return;
5114 break;
5115 default:
5116 break;
5117 }
5118
5119 adapter->wvbr |= wvbr;
5120}
5121
5122#define IGB_STAGGERED_QUEUE_OFFSET 8
5123
5124static void igb_spoof_check(struct igb_adapter *adapter)
5125{
5126 int j;
5127
5128 if (!adapter->wvbr)
5129 return;
5130
5131 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5132 if (adapter->wvbr & BIT(j) ||
5133 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5134 dev_warn(&adapter->pdev->dev,
5135 "Spoof event(s) detected on VF %d\n", j);
5136 adapter->wvbr &=
5137 ~(BIT(j) |
5138 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5139 }
5140 }
5141}
5142
5143
5144
5145
5146static void igb_update_phy_info(struct timer_list *t)
5147{
5148 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5149 igb_get_phy_info(&adapter->hw);
5150}
5151
5152
5153
5154
5155
5156bool igb_has_link(struct igb_adapter *adapter)
5157{
5158 struct e1000_hw *hw = &adapter->hw;
5159 bool link_active = false;
5160
5161
5162
5163
5164
5165
5166 switch (hw->phy.media_type) {
5167 case e1000_media_type_copper:
5168 if (!hw->mac.get_link_status)
5169 return true;
5170
5171 case e1000_media_type_internal_serdes:
5172 hw->mac.ops.check_for_link(hw);
5173 link_active = !hw->mac.get_link_status;
5174 break;
5175 default:
5176 case e1000_media_type_unknown:
5177 break;
5178 }
5179
5180 if (((hw->mac.type == e1000_i210) ||
5181 (hw->mac.type == e1000_i211)) &&
5182 (hw->phy.id == I210_I_PHY_ID)) {
5183 if (!netif_carrier_ok(adapter->netdev)) {
5184 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5185 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5186 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5187 adapter->link_check_timeout = jiffies;
5188 }
5189 }
5190
5191 return link_active;
5192}
5193
5194static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5195{
5196 bool ret = false;
5197 u32 ctrl_ext, thstat;
5198
5199
5200 if (hw->mac.type == e1000_i350) {
5201 thstat = rd32(E1000_THSTAT);
5202 ctrl_ext = rd32(E1000_CTRL_EXT);
5203
5204 if ((hw->phy.media_type == e1000_media_type_copper) &&
5205 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5206 ret = !!(thstat & event);
5207 }
5208
5209 return ret;
5210}
5211
5212
5213
5214
5215
5216
5217static void igb_check_lvmmc(struct igb_adapter *adapter)
5218{
5219 struct e1000_hw *hw = &adapter->hw;
5220 u32 lvmmc;
5221
5222 lvmmc = rd32(E1000_LVMMC);
5223 if (lvmmc) {
5224 if (unlikely(net_ratelimit())) {
5225 netdev_warn(adapter->netdev,
5226 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5227 lvmmc);
5228 }
5229 }
5230}
5231
5232
5233
5234
5235
5236static void igb_watchdog(struct timer_list *t)
5237{
5238 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5239
5240 schedule_work(&adapter->watchdog_task);
5241}
5242
5243static void igb_watchdog_task(struct work_struct *work)
5244{
5245 struct igb_adapter *adapter = container_of(work,
5246 struct igb_adapter,
5247 watchdog_task);
5248 struct e1000_hw *hw = &adapter->hw;
5249 struct e1000_phy_info *phy = &hw->phy;
5250 struct net_device *netdev = adapter->netdev;
5251 u32 link;
5252 int i;
5253 u32 connsw;
5254 u16 phy_data, retry_count = 20;
5255
5256 link = igb_has_link(adapter);
5257
5258 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5259 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5260 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5261 else
5262 link = false;
5263 }
5264
5265
5266 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5267 if (hw->phy.media_type == e1000_media_type_copper) {
5268 connsw = rd32(E1000_CONNSW);
5269 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5270 link = 0;
5271 }
5272 }
5273 if (link) {
5274
5275 if (hw->dev_spec._82575.media_changed) {
5276 hw->dev_spec._82575.media_changed = false;
5277 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5278 igb_reset(adapter);
5279 }
5280
5281 pm_runtime_resume(netdev->dev.parent);
5282
5283 if (!netif_carrier_ok(netdev)) {
5284 u32 ctrl;
5285
5286 hw->mac.ops.get_speed_and_duplex(hw,
5287 &adapter->link_speed,
5288 &adapter->link_duplex);
5289
5290 ctrl = rd32(E1000_CTRL);
5291
5292 netdev_info(netdev,
5293 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5294 netdev->name,
5295 adapter->link_speed,
5296 adapter->link_duplex == FULL_DUPLEX ?
5297 "Full" : "Half",
5298 (ctrl & E1000_CTRL_TFCE) &&
5299 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5300 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5301 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5302
5303
5304 if ((adapter->flags & IGB_FLAG_EEE) &&
5305 (adapter->link_duplex == HALF_DUPLEX)) {
5306 dev_info(&adapter->pdev->dev,
5307 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5308 adapter->hw.dev_spec._82575.eee_disable = true;
5309 adapter->flags &= ~IGB_FLAG_EEE;
5310 }
5311
5312
5313 igb_check_downshift(hw);
5314 if (phy->speed_downgraded)
5315 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5316
5317
5318 if (igb_thermal_sensor_event(hw,
5319 E1000_THSTAT_LINK_THROTTLE))
5320 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5321
5322
5323 adapter->tx_timeout_factor = 1;
5324 switch (adapter->link_speed) {
5325 case SPEED_10:
5326 adapter->tx_timeout_factor = 14;
5327 break;
5328 case SPEED_100:
5329
5330 break;
5331 }
5332
5333 if (adapter->link_speed != SPEED_1000)
5334 goto no_wait;
5335
5336
5337retry_read_status:
5338 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5339 &phy_data)) {
5340 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5341 retry_count) {
5342 msleep(100);
5343 retry_count--;
5344 goto retry_read_status;
5345 } else if (!retry_count) {
5346 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5347 }
5348 } else {
5349 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5350 }
5351no_wait:
5352 netif_carrier_on(netdev);
5353
5354 igb_ping_all_vfs(adapter);
5355 igb_check_vf_rate_limit(adapter);
5356
5357
5358 if (!test_bit(__IGB_DOWN, &adapter->state))
5359 mod_timer(&adapter->phy_info_timer,
5360 round_jiffies(jiffies + 2 * HZ));
5361 }
5362 } else {
5363 if (netif_carrier_ok(netdev)) {
5364 adapter->link_speed = 0;
5365 adapter->link_duplex = 0;
5366
5367
5368 if (igb_thermal_sensor_event(hw,
5369 E1000_THSTAT_PWR_DOWN)) {
5370 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5371 }
5372
5373
5374 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5375 netdev->name);
5376 netif_carrier_off(netdev);
5377
5378 igb_ping_all_vfs(adapter);
5379
5380
5381 if (!test_bit(__IGB_DOWN, &adapter->state))
5382 mod_timer(&adapter->phy_info_timer,
5383 round_jiffies(jiffies + 2 * HZ));
5384
5385
5386 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5387 igb_check_swap_media(adapter);
5388 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5389 schedule_work(&adapter->reset_task);
5390
5391 return;
5392 }
5393 }
5394 pm_schedule_suspend(netdev->dev.parent,
5395 MSEC_PER_SEC * 5);
5396
5397
5398 } else if (!netif_carrier_ok(netdev) &&
5399 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5400 igb_check_swap_media(adapter);
5401 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5402 schedule_work(&adapter->reset_task);
5403
5404 return;
5405 }
5406 }
5407 }
5408
5409 spin_lock(&adapter->stats64_lock);
5410 igb_update_stats(adapter);
5411 spin_unlock(&adapter->stats64_lock);
5412
5413 for (i = 0; i < adapter->num_tx_queues; i++) {
5414 struct igb_ring *tx_ring = adapter->tx_ring[i];
5415 if (!netif_carrier_ok(netdev)) {
5416
5417
5418
5419
5420
5421 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5422 adapter->tx_timeout_count++;
5423 schedule_work(&adapter->reset_task);
5424
5425 return;
5426 }
5427 }
5428
5429
5430 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5431 }
5432
5433
5434 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5435 u32 eics = 0;
5436
5437 for (i = 0; i < adapter->num_q_vectors; i++)
5438 eics |= adapter->q_vector[i]->eims_value;
5439 wr32(E1000_EICS, eics);
5440 } else {
5441 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5442 }
5443
5444 igb_spoof_check(adapter);
5445 igb_ptp_rx_hang(adapter);
5446 igb_ptp_tx_hang(adapter);
5447
5448
5449 if ((adapter->hw.mac.type == e1000_i350) ||
5450 (adapter->hw.mac.type == e1000_i354))
5451 igb_check_lvmmc(adapter);
5452
5453
5454 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5455 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5456 mod_timer(&adapter->watchdog_timer,
5457 round_jiffies(jiffies + HZ));
5458 else
5459 mod_timer(&adapter->watchdog_timer,
5460 round_jiffies(jiffies + 2 * HZ));
5461 }
5462}
5463
5464enum latency_range {
5465 lowest_latency = 0,
5466 low_latency = 1,
5467 bulk_latency = 2,
5468 latency_invalid = 255
5469};
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5487{
5488 int new_val = q_vector->itr_val;
5489 int avg_wire_size = 0;
5490 struct igb_adapter *adapter = q_vector->adapter;
5491 unsigned int packets;
5492
5493
5494
5495
5496 if (adapter->link_speed != SPEED_1000) {
5497 new_val = IGB_4K_ITR;
5498 goto set_itr_val;
5499 }
5500
5501 packets = q_vector->rx.total_packets;
5502 if (packets)
5503 avg_wire_size = q_vector->rx.total_bytes / packets;
5504
5505 packets = q_vector->tx.total_packets;
5506 if (packets)
5507 avg_wire_size = max_t(u32, avg_wire_size,
5508 q_vector->tx.total_bytes / packets);
5509
5510
5511 if (!avg_wire_size)
5512 goto clear_counts;
5513
5514
5515 avg_wire_size += 24;
5516
5517
5518 avg_wire_size = min(avg_wire_size, 3000);
5519
5520
5521 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5522 new_val = avg_wire_size / 3;
5523 else
5524 new_val = avg_wire_size / 2;
5525
5526
5527 if (new_val < IGB_20K_ITR &&
5528 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5529 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5530 new_val = IGB_20K_ITR;
5531
5532set_itr_val:
5533 if (new_val != q_vector->itr_val) {
5534 q_vector->itr_val = new_val;
5535 q_vector->set_itr = 1;
5536 }
5537clear_counts:
5538 q_vector->rx.total_bytes = 0;
5539 q_vector->rx.total_packets = 0;
5540 q_vector->tx.total_bytes = 0;
5541 q_vector->tx.total_packets = 0;
5542}
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560static void igb_update_itr(struct igb_q_vector *q_vector,
5561 struct igb_ring_container *ring_container)
5562{
5563 unsigned int packets = ring_container->total_packets;
5564 unsigned int bytes = ring_container->total_bytes;
5565 u8 itrval = ring_container->itr;
5566
5567
5568 if (packets == 0)
5569 return;
5570
5571 switch (itrval) {
5572 case lowest_latency:
5573
5574 if (bytes/packets > 8000)
5575 itrval = bulk_latency;
5576 else if ((packets < 5) && (bytes > 512))
5577 itrval = low_latency;
5578 break;
5579 case low_latency:
5580 if (bytes > 10000) {
5581
5582 if (bytes/packets > 8000)
5583 itrval = bulk_latency;
5584 else if ((packets < 10) || ((bytes/packets) > 1200))
5585 itrval = bulk_latency;
5586 else if ((packets > 35))
5587 itrval = lowest_latency;
5588 } else if (bytes/packets > 2000) {
5589 itrval = bulk_latency;
5590 } else if (packets <= 2 && bytes < 512) {
5591 itrval = lowest_latency;
5592 }
5593 break;
5594 case bulk_latency:
5595 if (bytes > 25000) {
5596 if (packets > 35)
5597 itrval = low_latency;
5598 } else if (bytes < 1500) {
5599 itrval = low_latency;
5600 }
5601 break;
5602 }
5603
5604
5605 ring_container->total_bytes = 0;
5606 ring_container->total_packets = 0;
5607
5608
5609 ring_container->itr = itrval;
5610}
5611
5612static void igb_set_itr(struct igb_q_vector *q_vector)
5613{
5614 struct igb_adapter *adapter = q_vector->adapter;
5615 u32 new_itr = q_vector->itr_val;
5616 u8 current_itr = 0;
5617
5618
5619 if (adapter->link_speed != SPEED_1000) {
5620 current_itr = 0;
5621 new_itr = IGB_4K_ITR;
5622 goto set_itr_now;
5623 }
5624
5625 igb_update_itr(q_vector, &q_vector->tx);
5626 igb_update_itr(q_vector, &q_vector->rx);
5627
5628 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5629
5630
5631 if (current_itr == lowest_latency &&
5632 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5633 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5634 current_itr = low_latency;
5635
5636 switch (current_itr) {
5637
5638 case lowest_latency:
5639 new_itr = IGB_70K_ITR;
5640 break;
5641 case low_latency:
5642 new_itr = IGB_20K_ITR;
5643 break;
5644 case bulk_latency:
5645 new_itr = IGB_4K_ITR;
5646 break;
5647 default:
5648 break;
5649 }
5650
5651set_itr_now:
5652 if (new_itr != q_vector->itr_val) {
5653
5654
5655
5656
5657 new_itr = new_itr > q_vector->itr_val ?
5658 max((new_itr * q_vector->itr_val) /
5659 (new_itr + (q_vector->itr_val >> 2)),
5660 new_itr) : new_itr;
5661
5662
5663
5664
5665
5666
5667 q_vector->itr_val = new_itr;
5668 q_vector->set_itr = 1;
5669 }
5670}
5671
5672static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5673 struct igb_tx_buffer *first,
5674 u32 vlan_macip_lens, u32 type_tucmd,
5675 u32 mss_l4len_idx)
5676{
5677 struct e1000_adv_tx_context_desc *context_desc;
5678 u16 i = tx_ring->next_to_use;
5679 struct timespec64 ts;
5680
5681 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5682
5683 i++;
5684 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5685
5686
5687 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5688
5689
5690 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5691 mss_l4len_idx |= tx_ring->reg_idx << 4;
5692
5693 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5694 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5695 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5696
5697
5698
5699
5700 if (tx_ring->launchtime_enable) {
5701 ts = ns_to_timespec64(first->skb->tstamp);
5702 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5703 } else {
5704 context_desc->seqnum_seed = 0;
5705 }
5706}
5707
5708static int igb_tso(struct igb_ring *tx_ring,
5709 struct igb_tx_buffer *first,
5710 u8 *hdr_len)
5711{
5712 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5713 struct sk_buff *skb = first->skb;
5714 union {
5715 struct iphdr *v4;
5716 struct ipv6hdr *v6;
5717 unsigned char *hdr;
5718 } ip;
5719 union {
5720 struct tcphdr *tcp;
5721 unsigned char *hdr;
5722 } l4;
5723 u32 paylen, l4_offset;
5724 int err;
5725
5726 if (skb->ip_summed != CHECKSUM_PARTIAL)
5727 return 0;
5728
5729 if (!skb_is_gso(skb))
5730 return 0;
5731
5732 err = skb_cow_head(skb, 0);
5733 if (err < 0)
5734 return err;
5735
5736 ip.hdr = skb_network_header(skb);
5737 l4.hdr = skb_checksum_start(skb);
5738
5739
5740 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5741
5742
5743 if (ip.v4->version == 4) {
5744 unsigned char *csum_start = skb_checksum_start(skb);
5745 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5746
5747
5748
5749
5750 ip.v4->check = csum_fold(csum_partial(trans_start,
5751 csum_start - trans_start,
5752 0));
5753 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5754
5755 ip.v4->tot_len = 0;
5756 first->tx_flags |= IGB_TX_FLAGS_TSO |
5757 IGB_TX_FLAGS_CSUM |
5758 IGB_TX_FLAGS_IPV4;
5759 } else {
5760 ip.v6->payload_len = 0;
5761 first->tx_flags |= IGB_TX_FLAGS_TSO |
5762 IGB_TX_FLAGS_CSUM;
5763 }
5764
5765
5766 l4_offset = l4.hdr - skb->data;
5767
5768
5769 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5770
5771
5772 paylen = skb->len - l4_offset;
5773 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
5774
5775
5776 first->gso_segs = skb_shinfo(skb)->gso_segs;
5777 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5778
5779
5780 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5781 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5782
5783
5784 vlan_macip_lens = l4.hdr - ip.hdr;
5785 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5786 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5787
5788 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5789 type_tucmd, mss_l4len_idx);
5790
5791 return 1;
5792}
5793
5794static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5795{
5796 unsigned int offset = 0;
5797
5798 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5799
5800 return offset == skb_checksum_start_offset(skb);
5801}
5802
5803static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5804{
5805 struct sk_buff *skb = first->skb;
5806 u32 vlan_macip_lens = 0;
5807 u32 type_tucmd = 0;
5808
5809 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5810csum_failed:
5811 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5812 !tx_ring->launchtime_enable)
5813 return;
5814 goto no_csum;
5815 }
5816
5817 switch (skb->csum_offset) {
5818 case offsetof(struct tcphdr, check):
5819 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5820
5821 case offsetof(struct udphdr, check):
5822 break;
5823 case offsetof(struct sctphdr, checksum):
5824
5825 if (((first->protocol == htons(ETH_P_IP)) &&
5826 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5827 ((first->protocol == htons(ETH_P_IPV6)) &&
5828 igb_ipv6_csum_is_sctp(skb))) {
5829 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5830 break;
5831 }
5832
5833 default:
5834 skb_checksum_help(skb);
5835 goto csum_failed;
5836 }
5837
5838
5839 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5840 vlan_macip_lens = skb_checksum_start_offset(skb) -
5841 skb_network_offset(skb);
5842no_csum:
5843 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5844 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5845
5846 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
5847}
5848
5849#define IGB_SET_FLAG(_input, _flag, _result) \
5850 ((_flag <= _result) ? \
5851 ((u32)(_input & _flag) * (_result / _flag)) : \
5852 ((u32)(_input & _flag) / (_flag / _result)))
5853
5854static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5855{
5856
5857 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5858 E1000_ADVTXD_DCMD_DEXT |
5859 E1000_ADVTXD_DCMD_IFCS;
5860
5861
5862 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5863 (E1000_ADVTXD_DCMD_VLE));
5864
5865
5866 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5867 (E1000_ADVTXD_DCMD_TSE));
5868
5869
5870 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5871 (E1000_ADVTXD_MAC_TSTAMP));
5872
5873
5874 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5875
5876 return cmd_type;
5877}
5878
5879static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5880 union e1000_adv_tx_desc *tx_desc,
5881 u32 tx_flags, unsigned int paylen)
5882{
5883 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5884
5885
5886 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5887 olinfo_status |= tx_ring->reg_idx << 4;
5888
5889
5890 olinfo_status |= IGB_SET_FLAG(tx_flags,
5891 IGB_TX_FLAGS_CSUM,
5892 (E1000_TXD_POPTS_TXSM << 8));
5893
5894
5895 olinfo_status |= IGB_SET_FLAG(tx_flags,
5896 IGB_TX_FLAGS_IPV4,
5897 (E1000_TXD_POPTS_IXSM << 8));
5898
5899 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5900}
5901
5902static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5903{
5904 struct net_device *netdev = tx_ring->netdev;
5905
5906 netif_stop_subqueue(netdev, tx_ring->queue_index);
5907
5908
5909
5910
5911
5912 smp_mb();
5913
5914
5915
5916
5917 if (igb_desc_unused(tx_ring) < size)
5918 return -EBUSY;
5919
5920
5921 netif_wake_subqueue(netdev, tx_ring->queue_index);
5922
5923 u64_stats_update_begin(&tx_ring->tx_syncp2);
5924 tx_ring->tx_stats.restart_queue2++;
5925 u64_stats_update_end(&tx_ring->tx_syncp2);
5926
5927 return 0;
5928}
5929
5930static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5931{
5932 if (igb_desc_unused(tx_ring) >= size)
5933 return 0;
5934 return __igb_maybe_stop_tx(tx_ring, size);
5935}
5936
5937static int igb_tx_map(struct igb_ring *tx_ring,
5938 struct igb_tx_buffer *first,
5939 const u8 hdr_len)
5940{
5941 struct sk_buff *skb = first->skb;
5942 struct igb_tx_buffer *tx_buffer;
5943 union e1000_adv_tx_desc *tx_desc;
5944 struct skb_frag_struct *frag;
5945 dma_addr_t dma;
5946 unsigned int data_len, size;
5947 u32 tx_flags = first->tx_flags;
5948 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5949 u16 i = tx_ring->next_to_use;
5950
5951 tx_desc = IGB_TX_DESC(tx_ring, i);
5952
5953 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5954
5955 size = skb_headlen(skb);
5956 data_len = skb->data_len;
5957
5958 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5959
5960 tx_buffer = first;
5961
5962 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5963 if (dma_mapping_error(tx_ring->dev, dma))
5964 goto dma_error;
5965
5966
5967 dma_unmap_len_set(tx_buffer, len, size);
5968 dma_unmap_addr_set(tx_buffer, dma, dma);
5969
5970 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5971
5972 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5973 tx_desc->read.cmd_type_len =
5974 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5975
5976 i++;
5977 tx_desc++;
5978 if (i == tx_ring->count) {
5979 tx_desc = IGB_TX_DESC(tx_ring, 0);
5980 i = 0;
5981 }
5982 tx_desc->read.olinfo_status = 0;
5983
5984 dma += IGB_MAX_DATA_PER_TXD;
5985 size -= IGB_MAX_DATA_PER_TXD;
5986
5987 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5988 }
5989
5990 if (likely(!data_len))
5991 break;
5992
5993 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5994
5995 i++;
5996 tx_desc++;
5997 if (i == tx_ring->count) {
5998 tx_desc = IGB_TX_DESC(tx_ring, 0);
5999 i = 0;
6000 }
6001 tx_desc->read.olinfo_status = 0;
6002
6003 size = skb_frag_size(frag);
6004 data_len -= size;
6005
6006 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6007 size, DMA_TO_DEVICE);
6008
6009 tx_buffer = &tx_ring->tx_buffer_info[i];
6010 }
6011
6012
6013 cmd_type |= size | IGB_TXD_DCMD;
6014 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6015
6016 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6017
6018
6019 first->time_stamp = jiffies;
6020
6021 skb_tx_timestamp(skb);
6022
6023
6024
6025
6026
6027
6028
6029
6030 dma_wmb();
6031
6032
6033 first->next_to_watch = tx_desc;
6034
6035 i++;
6036 if (i == tx_ring->count)
6037 i = 0;
6038
6039 tx_ring->next_to_use = i;
6040
6041
6042 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6043
6044 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
6045 writel(i, tx_ring->tail);
6046
6047
6048
6049
6050 mmiowb();
6051 }
6052 return 0;
6053
6054dma_error:
6055 dev_err(tx_ring->dev, "TX DMA map failed\n");
6056 tx_buffer = &tx_ring->tx_buffer_info[i];
6057
6058
6059 while (tx_buffer != first) {
6060 if (dma_unmap_len(tx_buffer, len))
6061 dma_unmap_page(tx_ring->dev,
6062 dma_unmap_addr(tx_buffer, dma),
6063 dma_unmap_len(tx_buffer, len),
6064 DMA_TO_DEVICE);
6065 dma_unmap_len_set(tx_buffer, len, 0);
6066
6067 if (i-- == 0)
6068 i += tx_ring->count;
6069 tx_buffer = &tx_ring->tx_buffer_info[i];
6070 }
6071
6072 if (dma_unmap_len(tx_buffer, len))
6073 dma_unmap_single(tx_ring->dev,
6074 dma_unmap_addr(tx_buffer, dma),
6075 dma_unmap_len(tx_buffer, len),
6076 DMA_TO_DEVICE);
6077 dma_unmap_len_set(tx_buffer, len, 0);
6078
6079 dev_kfree_skb_any(tx_buffer->skb);
6080 tx_buffer->skb = NULL;
6081
6082 tx_ring->next_to_use = i;
6083
6084 return -1;
6085}
6086
6087netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6088 struct igb_ring *tx_ring)
6089{
6090 struct igb_tx_buffer *first;
6091 int tso;
6092 u32 tx_flags = 0;
6093 unsigned short f;
6094 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6095 __be16 protocol = vlan_get_protocol(skb);
6096 u8 hdr_len = 0;
6097
6098
6099
6100
6101
6102
6103
6104 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6105 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6106
6107 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6108
6109 return NETDEV_TX_BUSY;
6110 }
6111
6112
6113 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6114 first->skb = skb;
6115 first->bytecount = skb->len;
6116 first->gso_segs = 1;
6117
6118 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6119 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6120
6121 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6122 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6123 &adapter->state)) {
6124 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6125 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6126
6127 adapter->ptp_tx_skb = skb_get(skb);
6128 adapter->ptp_tx_start = jiffies;
6129 if (adapter->hw.mac.type == e1000_82576)
6130 schedule_work(&adapter->ptp_tx_work);
6131 } else {
6132 adapter->tx_hwtstamp_skipped++;
6133 }
6134 }
6135
6136 if (skb_vlan_tag_present(skb)) {
6137 tx_flags |= IGB_TX_FLAGS_VLAN;
6138 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6139 }
6140
6141
6142 first->tx_flags = tx_flags;
6143 first->protocol = protocol;
6144
6145 tso = igb_tso(tx_ring, first, &hdr_len);
6146 if (tso < 0)
6147 goto out_drop;
6148 else if (!tso)
6149 igb_tx_csum(tx_ring, first);
6150
6151 if (igb_tx_map(tx_ring, first, hdr_len))
6152 goto cleanup_tx_tstamp;
6153
6154 return NETDEV_TX_OK;
6155
6156out_drop:
6157 dev_kfree_skb_any(first->skb);
6158 first->skb = NULL;
6159cleanup_tx_tstamp:
6160 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6161 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6162
6163 dev_kfree_skb_any(adapter->ptp_tx_skb);
6164 adapter->ptp_tx_skb = NULL;
6165 if (adapter->hw.mac.type == e1000_82576)
6166 cancel_work_sync(&adapter->ptp_tx_work);
6167 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6168 }
6169
6170 return NETDEV_TX_OK;
6171}
6172
6173static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6174 struct sk_buff *skb)
6175{
6176 unsigned int r_idx = skb->queue_mapping;
6177
6178 if (r_idx >= adapter->num_tx_queues)
6179 r_idx = r_idx % adapter->num_tx_queues;
6180
6181 return adapter->tx_ring[r_idx];
6182}
6183
6184static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6185 struct net_device *netdev)
6186{
6187 struct igb_adapter *adapter = netdev_priv(netdev);
6188
6189
6190
6191
6192 if (skb_put_padto(skb, 17))
6193 return NETDEV_TX_OK;
6194
6195 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6196}
6197
6198
6199
6200
6201
6202static void igb_tx_timeout(struct net_device *netdev)
6203{
6204 struct igb_adapter *adapter = netdev_priv(netdev);
6205 struct e1000_hw *hw = &adapter->hw;
6206
6207
6208 adapter->tx_timeout_count++;
6209
6210 if (hw->mac.type >= e1000_82580)
6211 hw->dev_spec._82575.global_device_reset = true;
6212
6213 schedule_work(&adapter->reset_task);
6214 wr32(E1000_EICS,
6215 (adapter->eims_enable_mask & ~adapter->eims_other));
6216}
6217
6218static void igb_reset_task(struct work_struct *work)
6219{
6220 struct igb_adapter *adapter;
6221 adapter = container_of(work, struct igb_adapter, reset_task);
6222
6223 igb_dump(adapter);
6224 netdev_err(adapter->netdev, "Reset adapter\n");
6225 igb_reinit_locked(adapter);
6226}
6227
6228
6229
6230
6231
6232
6233static void igb_get_stats64(struct net_device *netdev,
6234 struct rtnl_link_stats64 *stats)
6235{
6236 struct igb_adapter *adapter = netdev_priv(netdev);
6237
6238 spin_lock(&adapter->stats64_lock);
6239 igb_update_stats(adapter);
6240 memcpy(stats, &adapter->stats64, sizeof(*stats));
6241 spin_unlock(&adapter->stats64_lock);
6242}
6243
6244
6245
6246
6247
6248
6249
6250
6251static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6252{
6253 struct igb_adapter *adapter = netdev_priv(netdev);
6254 struct pci_dev *pdev = adapter->pdev;
6255 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6256
6257
6258 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6259 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6260
6261 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6262 usleep_range(1000, 2000);
6263
6264
6265 adapter->max_frame_size = max_frame;
6266
6267 if (netif_running(netdev))
6268 igb_down(adapter);
6269
6270 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
6271 netdev->mtu, new_mtu);
6272 netdev->mtu = new_mtu;
6273
6274 if (netif_running(netdev))
6275 igb_up(adapter);
6276 else
6277 igb_reset(adapter);
6278
6279 clear_bit(__IGB_RESETTING, &adapter->state);
6280
6281 return 0;
6282}
6283
6284
6285
6286
6287
6288void igb_update_stats(struct igb_adapter *adapter)
6289{
6290 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6291 struct e1000_hw *hw = &adapter->hw;
6292 struct pci_dev *pdev = adapter->pdev;
6293 u32 reg, mpc;
6294 int i;
6295 u64 bytes, packets;
6296 unsigned int start;
6297 u64 _bytes, _packets;
6298
6299
6300
6301
6302 if (adapter->link_speed == 0)
6303 return;
6304 if (pci_channel_offline(pdev))
6305 return;
6306
6307 bytes = 0;
6308 packets = 0;
6309
6310 rcu_read_lock();
6311 for (i = 0; i < adapter->num_rx_queues; i++) {
6312 struct igb_ring *ring = adapter->rx_ring[i];
6313 u32 rqdpc = rd32(E1000_RQDPC(i));
6314 if (hw->mac.type >= e1000_i210)
6315 wr32(E1000_RQDPC(i), 0);
6316
6317 if (rqdpc) {
6318 ring->rx_stats.drops += rqdpc;
6319 net_stats->rx_fifo_errors += rqdpc;
6320 }
6321
6322 do {
6323 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6324 _bytes = ring->rx_stats.bytes;
6325 _packets = ring->rx_stats.packets;
6326 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6327 bytes += _bytes;
6328 packets += _packets;
6329 }
6330
6331 net_stats->rx_bytes = bytes;
6332 net_stats->rx_packets = packets;
6333
6334 bytes = 0;
6335 packets = 0;
6336 for (i = 0; i < adapter->num_tx_queues; i++) {
6337 struct igb_ring *ring = adapter->tx_ring[i];
6338 do {
6339 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6340 _bytes = ring->tx_stats.bytes;
6341 _packets = ring->tx_stats.packets;
6342 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6343 bytes += _bytes;
6344 packets += _packets;
6345 }
6346 net_stats->tx_bytes = bytes;
6347 net_stats->tx_packets = packets;
6348 rcu_read_unlock();
6349
6350
6351 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6352 adapter->stats.gprc += rd32(E1000_GPRC);
6353 adapter->stats.gorc += rd32(E1000_GORCL);
6354 rd32(E1000_GORCH);
6355 adapter->stats.bprc += rd32(E1000_BPRC);
6356 adapter->stats.mprc += rd32(E1000_MPRC);
6357 adapter->stats.roc += rd32(E1000_ROC);
6358
6359 adapter->stats.prc64 += rd32(E1000_PRC64);
6360 adapter->stats.prc127 += rd32(E1000_PRC127);
6361 adapter->stats.prc255 += rd32(E1000_PRC255);
6362 adapter->stats.prc511 += rd32(E1000_PRC511);
6363 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6364 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6365 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6366 adapter->stats.sec += rd32(E1000_SEC);
6367
6368 mpc = rd32(E1000_MPC);
6369 adapter->stats.mpc += mpc;
6370 net_stats->rx_fifo_errors += mpc;
6371 adapter->stats.scc += rd32(E1000_SCC);
6372 adapter->stats.ecol += rd32(E1000_ECOL);
6373 adapter->stats.mcc += rd32(E1000_MCC);
6374 adapter->stats.latecol += rd32(E1000_LATECOL);
6375 adapter->stats.dc += rd32(E1000_DC);
6376 adapter->stats.rlec += rd32(E1000_RLEC);
6377 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6378 adapter->stats.xontxc += rd32(E1000_XONTXC);
6379 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6380 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6381 adapter->stats.fcruc += rd32(E1000_FCRUC);
6382 adapter->stats.gptc += rd32(E1000_GPTC);
6383 adapter->stats.gotc += rd32(E1000_GOTCL);
6384 rd32(E1000_GOTCH);
6385 adapter->stats.rnbc += rd32(E1000_RNBC);
6386 adapter->stats.ruc += rd32(E1000_RUC);
6387 adapter->stats.rfc += rd32(E1000_RFC);
6388 adapter->stats.rjc += rd32(E1000_RJC);
6389 adapter->stats.tor += rd32(E1000_TORH);
6390 adapter->stats.tot += rd32(E1000_TOTH);
6391 adapter->stats.tpr += rd32(E1000_TPR);
6392
6393 adapter->stats.ptc64 += rd32(E1000_PTC64);
6394 adapter->stats.ptc127 += rd32(E1000_PTC127);
6395 adapter->stats.ptc255 += rd32(E1000_PTC255);
6396 adapter->stats.ptc511 += rd32(E1000_PTC511);
6397 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6398 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6399
6400 adapter->stats.mptc += rd32(E1000_MPTC);
6401 adapter->stats.bptc += rd32(E1000_BPTC);
6402
6403 adapter->stats.tpt += rd32(E1000_TPT);
6404 adapter->stats.colc += rd32(E1000_COLC);
6405
6406 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6407
6408 reg = rd32(E1000_CTRL_EXT);
6409 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6410 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6411
6412
6413 if ((hw->mac.type != e1000_i210) &&
6414 (hw->mac.type != e1000_i211))
6415 adapter->stats.tncrs += rd32(E1000_TNCRS);
6416 }
6417
6418 adapter->stats.tsctc += rd32(E1000_TSCTC);
6419 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6420
6421 adapter->stats.iac += rd32(E1000_IAC);
6422 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6423 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6424 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6425 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6426 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6427 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6428 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6429 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6430
6431
6432 net_stats->multicast = adapter->stats.mprc;
6433 net_stats->collisions = adapter->stats.colc;
6434
6435
6436
6437
6438
6439
6440 net_stats->rx_errors = adapter->stats.rxerrc +
6441 adapter->stats.crcerrs + adapter->stats.algnerrc +
6442 adapter->stats.ruc + adapter->stats.roc +
6443 adapter->stats.cexterr;
6444 net_stats->rx_length_errors = adapter->stats.ruc +
6445 adapter->stats.roc;
6446 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6447 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6448 net_stats->rx_missed_errors = adapter->stats.mpc;
6449
6450
6451 net_stats->tx_errors = adapter->stats.ecol +
6452 adapter->stats.latecol;
6453 net_stats->tx_aborted_errors = adapter->stats.ecol;
6454 net_stats->tx_window_errors = adapter->stats.latecol;
6455 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6456
6457
6458
6459
6460 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6461 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6462 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6463
6464
6465 reg = rd32(E1000_MANC);
6466 if (reg & E1000_MANC_EN_BMC2OS) {
6467 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6468 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6469 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6470 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6471 }
6472}
6473
6474static void igb_tsync_interrupt(struct igb_adapter *adapter)
6475{
6476 struct e1000_hw *hw = &adapter->hw;
6477 struct ptp_clock_event event;
6478 struct timespec64 ts;
6479 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6480
6481 if (tsicr & TSINTR_SYS_WRAP) {
6482 event.type = PTP_CLOCK_PPS;
6483 if (adapter->ptp_caps.pps)
6484 ptp_clock_event(adapter->ptp_clock, &event);
6485 ack |= TSINTR_SYS_WRAP;
6486 }
6487
6488 if (tsicr & E1000_TSICR_TXTS) {
6489
6490 schedule_work(&adapter->ptp_tx_work);
6491 ack |= E1000_TSICR_TXTS;
6492 }
6493
6494 if (tsicr & TSINTR_TT0) {
6495 spin_lock(&adapter->tmreg_lock);
6496 ts = timespec64_add(adapter->perout[0].start,
6497 adapter->perout[0].period);
6498
6499 wr32(E1000_TRGTTIML0, ts.tv_nsec);
6500 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6501 tsauxc = rd32(E1000_TSAUXC);
6502 tsauxc |= TSAUXC_EN_TT0;
6503 wr32(E1000_TSAUXC, tsauxc);
6504 adapter->perout[0].start = ts;
6505 spin_unlock(&adapter->tmreg_lock);
6506 ack |= TSINTR_TT0;
6507 }
6508
6509 if (tsicr & TSINTR_TT1) {
6510 spin_lock(&adapter->tmreg_lock);
6511 ts = timespec64_add(adapter->perout[1].start,
6512 adapter->perout[1].period);
6513 wr32(E1000_TRGTTIML1, ts.tv_nsec);
6514 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6515 tsauxc = rd32(E1000_TSAUXC);
6516 tsauxc |= TSAUXC_EN_TT1;
6517 wr32(E1000_TSAUXC, tsauxc);
6518 adapter->perout[1].start = ts;
6519 spin_unlock(&adapter->tmreg_lock);
6520 ack |= TSINTR_TT1;
6521 }
6522
6523 if (tsicr & TSINTR_AUTT0) {
6524 nsec = rd32(E1000_AUXSTMPL0);
6525 sec = rd32(E1000_AUXSTMPH0);
6526 event.type = PTP_CLOCK_EXTTS;
6527 event.index = 0;
6528 event.timestamp = sec * 1000000000ULL + nsec;
6529 ptp_clock_event(adapter->ptp_clock, &event);
6530 ack |= TSINTR_AUTT0;
6531 }
6532
6533 if (tsicr & TSINTR_AUTT1) {
6534 nsec = rd32(E1000_AUXSTMPL1);
6535 sec = rd32(E1000_AUXSTMPH1);
6536 event.type = PTP_CLOCK_EXTTS;
6537 event.index = 1;
6538 event.timestamp = sec * 1000000000ULL + nsec;
6539 ptp_clock_event(adapter->ptp_clock, &event);
6540 ack |= TSINTR_AUTT1;
6541 }
6542
6543
6544 wr32(E1000_TSICR, ack);
6545}
6546
6547static irqreturn_t igb_msix_other(int irq, void *data)
6548{
6549 struct igb_adapter *adapter = data;
6550 struct e1000_hw *hw = &adapter->hw;
6551 u32 icr = rd32(E1000_ICR);
6552
6553
6554 if (icr & E1000_ICR_DRSTA)
6555 schedule_work(&adapter->reset_task);
6556
6557 if (icr & E1000_ICR_DOUTSYNC) {
6558
6559 adapter->stats.doosync++;
6560
6561
6562
6563
6564 igb_check_wvbr(adapter);
6565 }
6566
6567
6568 if (icr & E1000_ICR_VMMB)
6569 igb_msg_task(adapter);
6570
6571 if (icr & E1000_ICR_LSC) {
6572 hw->mac.get_link_status = 1;
6573
6574 if (!test_bit(__IGB_DOWN, &adapter->state))
6575 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6576 }
6577
6578 if (icr & E1000_ICR_TS)
6579 igb_tsync_interrupt(adapter);
6580
6581 wr32(E1000_EIMS, adapter->eims_other);
6582
6583 return IRQ_HANDLED;
6584}
6585
6586static void igb_write_itr(struct igb_q_vector *q_vector)
6587{
6588 struct igb_adapter *adapter = q_vector->adapter;
6589 u32 itr_val = q_vector->itr_val & 0x7FFC;
6590
6591 if (!q_vector->set_itr)
6592 return;
6593
6594 if (!itr_val)
6595 itr_val = 0x4;
6596
6597 if (adapter->hw.mac.type == e1000_82575)
6598 itr_val |= itr_val << 16;
6599 else
6600 itr_val |= E1000_EITR_CNT_IGNR;
6601
6602 writel(itr_val, q_vector->itr_register);
6603 q_vector->set_itr = 0;
6604}
6605
6606static irqreturn_t igb_msix_ring(int irq, void *data)
6607{
6608 struct igb_q_vector *q_vector = data;
6609
6610
6611 igb_write_itr(q_vector);
6612
6613 napi_schedule(&q_vector->napi);
6614
6615 return IRQ_HANDLED;
6616}
6617
6618#ifdef CONFIG_IGB_DCA
6619static void igb_update_tx_dca(struct igb_adapter *adapter,
6620 struct igb_ring *tx_ring,
6621 int cpu)
6622{
6623 struct e1000_hw *hw = &adapter->hw;
6624 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6625
6626 if (hw->mac.type != e1000_82575)
6627 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6628
6629
6630
6631
6632
6633 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6634 E1000_DCA_TXCTRL_DATA_RRO_EN |
6635 E1000_DCA_TXCTRL_DESC_DCA_EN;
6636
6637 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6638}
6639
6640static void igb_update_rx_dca(struct igb_adapter *adapter,
6641 struct igb_ring *rx_ring,
6642 int cpu)
6643{
6644 struct e1000_hw *hw = &adapter->hw;
6645 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6646
6647 if (hw->mac.type != e1000_82575)
6648 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6649
6650
6651
6652
6653
6654 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6655 E1000_DCA_RXCTRL_DESC_DCA_EN;
6656
6657 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6658}
6659
6660static void igb_update_dca(struct igb_q_vector *q_vector)
6661{
6662 struct igb_adapter *adapter = q_vector->adapter;
6663 int cpu = get_cpu();
6664
6665 if (q_vector->cpu == cpu)
6666 goto out_no_update;
6667
6668 if (q_vector->tx.ring)
6669 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6670
6671 if (q_vector->rx.ring)
6672 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6673
6674 q_vector->cpu = cpu;
6675out_no_update:
6676 put_cpu();
6677}
6678
6679static void igb_setup_dca(struct igb_adapter *adapter)
6680{
6681 struct e1000_hw *hw = &adapter->hw;
6682 int i;
6683
6684 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6685 return;
6686
6687
6688 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6689
6690 for (i = 0; i < adapter->num_q_vectors; i++) {
6691 adapter->q_vector[i]->cpu = -1;
6692 igb_update_dca(adapter->q_vector[i]);
6693 }
6694}
6695
6696static int __igb_notify_dca(struct device *dev, void *data)
6697{
6698 struct net_device *netdev = dev_get_drvdata(dev);
6699 struct igb_adapter *adapter = netdev_priv(netdev);
6700 struct pci_dev *pdev = adapter->pdev;
6701 struct e1000_hw *hw = &adapter->hw;
6702 unsigned long event = *(unsigned long *)data;
6703
6704 switch (event) {
6705 case DCA_PROVIDER_ADD:
6706
6707 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6708 break;
6709 if (dca_add_requester(dev) == 0) {
6710 adapter->flags |= IGB_FLAG_DCA_ENABLED;
6711 dev_info(&pdev->dev, "DCA enabled\n");
6712 igb_setup_dca(adapter);
6713 break;
6714 }
6715
6716 case DCA_PROVIDER_REMOVE:
6717 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6718
6719
6720
6721 dca_remove_requester(dev);
6722 dev_info(&pdev->dev, "DCA disabled\n");
6723 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6724 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6725 }
6726 break;
6727 }
6728
6729 return 0;
6730}
6731
6732static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6733 void *p)
6734{
6735 int ret_val;
6736
6737 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6738 __igb_notify_dca);
6739
6740 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6741}
6742#endif
6743
6744#ifdef CONFIG_PCI_IOV
6745static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6746{
6747 unsigned char mac_addr[ETH_ALEN];
6748
6749 eth_zero_addr(mac_addr);
6750 igb_set_vf_mac(adapter, vf, mac_addr);
6751
6752
6753 adapter->vf_data[vf].spoofchk_enabled = true;
6754
6755
6756 adapter->vf_data[vf].trusted = false;
6757
6758 return 0;
6759}
6760
6761#endif
6762static void igb_ping_all_vfs(struct igb_adapter *adapter)
6763{
6764 struct e1000_hw *hw = &adapter->hw;
6765 u32 ping;
6766 int i;
6767
6768 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6769 ping = E1000_PF_CONTROL_MSG;
6770 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6771 ping |= E1000_VT_MSGTYPE_CTS;
6772 igb_write_mbx(hw, &ping, 1, i);
6773 }
6774}
6775
6776static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6777{
6778 struct e1000_hw *hw = &adapter->hw;
6779 u32 vmolr = rd32(E1000_VMOLR(vf));
6780 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6781
6782 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6783 IGB_VF_FLAG_MULTI_PROMISC);
6784 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6785
6786 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6787 vmolr |= E1000_VMOLR_MPME;
6788 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6789 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6790 } else {
6791
6792
6793
6794
6795 if (vf_data->num_vf_mc_hashes > 30) {
6796 vmolr |= E1000_VMOLR_MPME;
6797 } else if (vf_data->num_vf_mc_hashes) {
6798 int j;
6799
6800 vmolr |= E1000_VMOLR_ROMPE;
6801 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6802 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6803 }
6804 }
6805
6806 wr32(E1000_VMOLR(vf), vmolr);
6807
6808
6809 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6810 return -EINVAL;
6811
6812 return 0;
6813}
6814
6815static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6816 u32 *msgbuf, u32 vf)
6817{
6818 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6819 u16 *hash_list = (u16 *)&msgbuf[1];
6820 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6821 int i;
6822
6823
6824
6825
6826
6827 vf_data->num_vf_mc_hashes = n;
6828
6829
6830 if (n > 30)
6831 n = 30;
6832
6833
6834 for (i = 0; i < n; i++)
6835 vf_data->vf_mc_hashes[i] = hash_list[i];
6836
6837
6838 igb_set_rx_mode(adapter->netdev);
6839
6840 return 0;
6841}
6842
6843static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6844{
6845 struct e1000_hw *hw = &adapter->hw;
6846 struct vf_data_storage *vf_data;
6847 int i, j;
6848
6849 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6850 u32 vmolr = rd32(E1000_VMOLR(i));
6851
6852 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6853
6854 vf_data = &adapter->vf_data[i];
6855
6856 if ((vf_data->num_vf_mc_hashes > 30) ||
6857 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6858 vmolr |= E1000_VMOLR_MPME;
6859 } else if (vf_data->num_vf_mc_hashes) {
6860 vmolr |= E1000_VMOLR_ROMPE;
6861 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6862 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6863 }
6864 wr32(E1000_VMOLR(i), vmolr);
6865 }
6866}
6867
6868static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6869{
6870 struct e1000_hw *hw = &adapter->hw;
6871 u32 pool_mask, vlvf_mask, i;
6872
6873
6874 pool_mask = E1000_VLVF_POOLSEL_MASK;
6875 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6876
6877
6878 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6879 adapter->vfs_allocated_count);
6880
6881
6882 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6883 u32 vlvf = rd32(E1000_VLVF(i));
6884 u32 vfta_mask, vid, vfta;
6885
6886
6887 if (!(vlvf & vlvf_mask))
6888 continue;
6889
6890
6891 vlvf ^= vlvf_mask;
6892
6893
6894 if (vlvf & pool_mask)
6895 goto update_vlvfb;
6896
6897
6898 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6899 goto update_vlvf;
6900
6901 vid = vlvf & E1000_VLVF_VLANID_MASK;
6902 vfta_mask = BIT(vid % 32);
6903
6904
6905 vfta = adapter->shadow_vfta[vid / 32];
6906 if (vfta & vfta_mask)
6907 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6908update_vlvf:
6909
6910 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6911 vlvf &= E1000_VLVF_POOLSEL_MASK;
6912 else
6913 vlvf = 0;
6914update_vlvfb:
6915
6916 wr32(E1000_VLVF(i), vlvf);
6917 }
6918}
6919
6920static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6921{
6922 u32 vlvf;
6923 int idx;
6924
6925
6926 if (vlan == 0)
6927 return 0;
6928
6929
6930 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6931 vlvf = rd32(E1000_VLVF(idx));
6932 if ((vlvf & VLAN_VID_MASK) == vlan)
6933 break;
6934 }
6935
6936 return idx;
6937}
6938
6939static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6940{
6941 struct e1000_hw *hw = &adapter->hw;
6942 u32 bits, pf_id;
6943 int idx;
6944
6945 idx = igb_find_vlvf_entry(hw, vid);
6946 if (!idx)
6947 return;
6948
6949
6950
6951
6952 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6953 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6954 bits &= rd32(E1000_VLVF(idx));
6955
6956
6957 if (!bits) {
6958 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6959 wr32(E1000_VLVF(idx), BIT(pf_id));
6960 else
6961 wr32(E1000_VLVF(idx), 0);
6962 }
6963}
6964
6965static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6966 bool add, u32 vf)
6967{
6968 int pf_id = adapter->vfs_allocated_count;
6969 struct e1000_hw *hw = &adapter->hw;
6970 int err;
6971
6972
6973
6974
6975
6976
6977 if (add && test_bit(vid, adapter->active_vlans)) {
6978 err = igb_vfta_set(hw, vid, pf_id, true, false);
6979 if (err)
6980 return err;
6981 }
6982
6983 err = igb_vfta_set(hw, vid, vf, add, false);
6984
6985 if (add && !err)
6986 return err;
6987
6988
6989
6990
6991
6992 if (test_bit(vid, adapter->active_vlans) ||
6993 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6994 igb_update_pf_vlvf(adapter, vid);
6995
6996 return err;
6997}
6998
6999static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7000{
7001 struct e1000_hw *hw = &adapter->hw;
7002
7003 if (vid)
7004 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7005 else
7006 wr32(E1000_VMVIR(vf), 0);
7007}
7008
7009static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7010 u16 vlan, u8 qos)
7011{
7012 int err;
7013
7014 err = igb_set_vf_vlan(adapter, vlan, true, vf);
7015 if (err)
7016 return err;
7017
7018 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7019 igb_set_vmolr(adapter, vf, !vlan);
7020
7021
7022 if (vlan != adapter->vf_data[vf].pf_vlan)
7023 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7024 false, vf);
7025
7026 adapter->vf_data[vf].pf_vlan = vlan;
7027 adapter->vf_data[vf].pf_qos = qos;
7028 igb_set_vf_vlan_strip(adapter, vf, true);
7029 dev_info(&adapter->pdev->dev,
7030 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7031 if (test_bit(__IGB_DOWN, &adapter->state)) {
7032 dev_warn(&adapter->pdev->dev,
7033 "The VF VLAN has been set, but the PF device is not up.\n");
7034 dev_warn(&adapter->pdev->dev,
7035 "Bring the PF device up before attempting to use the VF device.\n");
7036 }
7037
7038 return err;
7039}
7040
7041static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7042{
7043
7044 igb_set_vf_vlan(adapter, 0, true, vf);
7045
7046 igb_set_vmvir(adapter, 0, vf);
7047 igb_set_vmolr(adapter, vf, true);
7048
7049
7050 if (adapter->vf_data[vf].pf_vlan)
7051 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7052 false, vf);
7053
7054 adapter->vf_data[vf].pf_vlan = 0;
7055 adapter->vf_data[vf].pf_qos = 0;
7056 igb_set_vf_vlan_strip(adapter, vf, false);
7057
7058 return 0;
7059}
7060
7061static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7062 u16 vlan, u8 qos, __be16 vlan_proto)
7063{
7064 struct igb_adapter *adapter = netdev_priv(netdev);
7065
7066 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7067 return -EINVAL;
7068
7069 if (vlan_proto != htons(ETH_P_8021Q))
7070 return -EPROTONOSUPPORT;
7071
7072 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7073 igb_disable_port_vlan(adapter, vf);
7074}
7075
7076static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7077{
7078 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7079 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7080 int ret;
7081
7082 if (adapter->vf_data[vf].pf_vlan)
7083 return -1;
7084
7085
7086 if (!vid && !add)
7087 return 0;
7088
7089 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7090 if (!ret)
7091 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7092 return ret;
7093}
7094
7095static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7096{
7097 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7098
7099
7100 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7101 vf_data->last_nack = jiffies;
7102
7103
7104 igb_clear_vf_vfta(adapter, vf);
7105 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7106 igb_set_vmvir(adapter, vf_data->pf_vlan |
7107 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7108 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7109 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7110
7111
7112 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7113
7114
7115 igb_set_rx_mode(adapter->netdev);
7116}
7117
7118static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7119{
7120 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7121
7122
7123 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7124 eth_zero_addr(vf_mac);
7125
7126
7127 igb_vf_reset(adapter, vf);
7128}
7129
7130static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7131{
7132 struct e1000_hw *hw = &adapter->hw;
7133 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7134 u32 reg, msgbuf[3];
7135 u8 *addr = (u8 *)(&msgbuf[1]);
7136
7137
7138 igb_vf_reset(adapter, vf);
7139
7140
7141 igb_set_vf_mac(adapter, vf, vf_mac);
7142
7143
7144 reg = rd32(E1000_VFTE);
7145 wr32(E1000_VFTE, reg | BIT(vf));
7146 reg = rd32(E1000_VFRE);
7147 wr32(E1000_VFRE, reg | BIT(vf));
7148
7149 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7150
7151
7152 if (!is_zero_ether_addr(vf_mac)) {
7153 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7154 memcpy(addr, vf_mac, ETH_ALEN);
7155 } else {
7156 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7157 }
7158 igb_write_mbx(hw, msgbuf, 3, vf);
7159}
7160
7161static void igb_flush_mac_table(struct igb_adapter *adapter)
7162{
7163 struct e1000_hw *hw = &adapter->hw;
7164 int i;
7165
7166 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7167 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7168 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7169 adapter->mac_table[i].queue = 0;
7170 igb_rar_set_index(adapter, i);
7171 }
7172}
7173
7174static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7175{
7176 struct e1000_hw *hw = &adapter->hw;
7177
7178 int rar_entries = hw->mac.rar_entry_count -
7179 adapter->vfs_allocated_count;
7180 int i, count = 0;
7181
7182 for (i = 0; i < rar_entries; i++) {
7183
7184 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7185 continue;
7186
7187
7188 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7189 (adapter->mac_table[i].queue != queue))
7190 continue;
7191
7192 count++;
7193 }
7194
7195 return count;
7196}
7197
7198
7199static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7200{
7201 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7202
7203 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7204 mac_table->queue = adapter->vfs_allocated_count;
7205 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7206
7207 igb_rar_set_index(adapter, 0);
7208}
7209
7210
7211
7212
7213
7214
7215static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7216 const u8 *addr, const u8 flags)
7217{
7218 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7219 return true;
7220
7221 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7222 (flags & IGB_MAC_STATE_SRC_ADDR))
7223 return false;
7224
7225 if (!ether_addr_equal(addr, entry->addr))
7226 return false;
7227
7228 return true;
7229}
7230
7231
7232
7233
7234
7235
7236static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7237 const u8 *addr, const u8 queue,
7238 const u8 flags)
7239{
7240 struct e1000_hw *hw = &adapter->hw;
7241 int rar_entries = hw->mac.rar_entry_count -
7242 adapter->vfs_allocated_count;
7243 int i;
7244
7245 if (is_zero_ether_addr(addr))
7246 return -EINVAL;
7247
7248
7249
7250
7251
7252 for (i = 0; i < rar_entries; i++) {
7253 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7254 addr, flags))
7255 continue;
7256
7257 ether_addr_copy(adapter->mac_table[i].addr, addr);
7258 adapter->mac_table[i].queue = queue;
7259 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7260
7261 igb_rar_set_index(adapter, i);
7262 return i;
7263 }
7264
7265 return -ENOSPC;
7266}
7267
7268static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7269 const u8 queue)
7270{
7271 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7272}
7273
7274
7275
7276
7277
7278
7279
7280static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7281 const u8 *addr, const u8 queue,
7282 const u8 flags)
7283{
7284 struct e1000_hw *hw = &adapter->hw;
7285 int rar_entries = hw->mac.rar_entry_count -
7286 adapter->vfs_allocated_count;
7287 int i;
7288
7289 if (is_zero_ether_addr(addr))
7290 return -EINVAL;
7291
7292
7293
7294
7295
7296 for (i = 0; i < rar_entries; i++) {
7297 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7298 continue;
7299 if ((adapter->mac_table[i].state & flags) != flags)
7300 continue;
7301 if (adapter->mac_table[i].queue != queue)
7302 continue;
7303 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7304 continue;
7305
7306
7307
7308
7309 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7310 adapter->mac_table[i].state =
7311 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7312 adapter->mac_table[i].queue =
7313 adapter->vfs_allocated_count;
7314 } else {
7315 adapter->mac_table[i].state = 0;
7316 adapter->mac_table[i].queue = 0;
7317 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
7318 }
7319
7320 igb_rar_set_index(adapter, i);
7321 return 0;
7322 }
7323
7324 return -ENOENT;
7325}
7326
7327static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7328 const u8 queue)
7329{
7330 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7331}
7332
7333int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7334 const u8 *addr, u8 queue, u8 flags)
7335{
7336 struct e1000_hw *hw = &adapter->hw;
7337
7338
7339
7340
7341 if (hw->mac.type != e1000_i210)
7342 return -EOPNOTSUPP;
7343
7344 return igb_add_mac_filter_flags(adapter, addr, queue,
7345 IGB_MAC_STATE_QUEUE_STEERING | flags);
7346}
7347
7348int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7349 const u8 *addr, u8 queue, u8 flags)
7350{
7351 return igb_del_mac_filter_flags(adapter, addr, queue,
7352 IGB_MAC_STATE_QUEUE_STEERING | flags);
7353}
7354
7355static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7356{
7357 struct igb_adapter *adapter = netdev_priv(netdev);
7358 int ret;
7359
7360 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7361
7362 return min_t(int, ret, 0);
7363}
7364
7365static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7366{
7367 struct igb_adapter *adapter = netdev_priv(netdev);
7368
7369 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7370
7371 return 0;
7372}
7373
7374static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7375 const u32 info, const u8 *addr)
7376{
7377 struct pci_dev *pdev = adapter->pdev;
7378 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7379 struct list_head *pos;
7380 struct vf_mac_filter *entry = NULL;
7381 int ret = 0;
7382
7383 switch (info) {
7384 case E1000_VF_MAC_FILTER_CLR:
7385
7386 list_for_each(pos, &adapter->vf_macs.l) {
7387 entry = list_entry(pos, struct vf_mac_filter, l);
7388 if (entry->vf == vf) {
7389 entry->vf = -1;
7390 entry->free = true;
7391 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7392 }
7393 }
7394 break;
7395 case E1000_VF_MAC_FILTER_ADD:
7396 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7397 !vf_data->trusted) {
7398 dev_warn(&pdev->dev,
7399 "VF %d requested MAC filter but is administratively denied\n",
7400 vf);
7401 return -EINVAL;
7402 }
7403 if (!is_valid_ether_addr(addr)) {
7404 dev_warn(&pdev->dev,
7405 "VF %d attempted to set invalid MAC filter\n",
7406 vf);
7407 return -EINVAL;
7408 }
7409
7410
7411 list_for_each(pos, &adapter->vf_macs.l) {
7412 entry = list_entry(pos, struct vf_mac_filter, l);
7413 if (entry->free)
7414 break;
7415 }
7416
7417 if (entry && entry->free) {
7418 entry->free = false;
7419 entry->vf = vf;
7420 ether_addr_copy(entry->vf_mac, addr);
7421
7422 ret = igb_add_mac_filter(adapter, addr, vf);
7423 ret = min_t(int, ret, 0);
7424 } else {
7425 ret = -ENOSPC;
7426 }
7427
7428 if (ret == -ENOSPC)
7429 dev_warn(&pdev->dev,
7430 "VF %d has requested MAC filter but there is no space for it\n",
7431 vf);
7432 break;
7433 default:
7434 ret = -EINVAL;
7435 break;
7436 }
7437
7438 return ret;
7439}
7440
7441static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7442{
7443 struct pci_dev *pdev = adapter->pdev;
7444 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7445 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7446
7447
7448
7449
7450 unsigned char *addr = (unsigned char *)&msg[1];
7451 int ret = 0;
7452
7453 if (!info) {
7454 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7455 !vf_data->trusted) {
7456 dev_warn(&pdev->dev,
7457 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7458 vf);
7459 return -EINVAL;
7460 }
7461
7462 if (!is_valid_ether_addr(addr)) {
7463 dev_warn(&pdev->dev,
7464 "VF %d attempted to set invalid MAC\n",
7465 vf);
7466 return -EINVAL;
7467 }
7468
7469 ret = igb_set_vf_mac(adapter, vf, addr);
7470 } else {
7471 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7472 }
7473
7474 return ret;
7475}
7476
7477static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7478{
7479 struct e1000_hw *hw = &adapter->hw;
7480 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7481 u32 msg = E1000_VT_MSGTYPE_NACK;
7482
7483
7484 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7485 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7486 igb_write_mbx(hw, &msg, 1, vf);
7487 vf_data->last_nack = jiffies;
7488 }
7489}
7490
7491static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7492{
7493 struct pci_dev *pdev = adapter->pdev;
7494 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7495 struct e1000_hw *hw = &adapter->hw;
7496 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7497 s32 retval;
7498
7499 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7500
7501 if (retval) {
7502
7503 dev_err(&pdev->dev, "Error receiving message from VF\n");
7504 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7505 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7506 goto unlock;
7507 goto out;
7508 }
7509
7510
7511 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7512 goto unlock;
7513
7514
7515
7516
7517 if (msgbuf[0] == E1000_VF_RESET) {
7518
7519 igb_vf_reset_msg(adapter, vf);
7520 return;
7521 }
7522
7523 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7524 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7525 goto unlock;
7526 retval = -1;
7527 goto out;
7528 }
7529
7530 switch ((msgbuf[0] & 0xFFFF)) {
7531 case E1000_VF_SET_MAC_ADDR:
7532 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7533 break;
7534 case E1000_VF_SET_PROMISC:
7535 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7536 break;
7537 case E1000_VF_SET_MULTICAST:
7538 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7539 break;
7540 case E1000_VF_SET_LPE:
7541 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7542 break;
7543 case E1000_VF_SET_VLAN:
7544 retval = -1;
7545 if (vf_data->pf_vlan)
7546 dev_warn(&pdev->dev,
7547 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7548 vf);
7549 else
7550 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7551 break;
7552 default:
7553 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7554 retval = -1;
7555 break;
7556 }
7557
7558 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7559out:
7560
7561 if (retval)
7562 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7563 else
7564 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7565
7566
7567 igb_write_mbx(hw, msgbuf, 1, vf);
7568 return;
7569
7570unlock:
7571 igb_unlock_mbx(hw, vf);
7572}
7573
7574static void igb_msg_task(struct igb_adapter *adapter)
7575{
7576 struct e1000_hw *hw = &adapter->hw;
7577 u32 vf;
7578
7579 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7580
7581 if (!igb_check_for_rst(hw, vf))
7582 igb_vf_reset_event(adapter, vf);
7583
7584
7585 if (!igb_check_for_msg(hw, vf))
7586 igb_rcv_msg_from_vf(adapter, vf);
7587
7588
7589 if (!igb_check_for_ack(hw, vf))
7590 igb_rcv_ack_from_vf(adapter, vf);
7591 }
7592}
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605static void igb_set_uta(struct igb_adapter *adapter, bool set)
7606{
7607 struct e1000_hw *hw = &adapter->hw;
7608 u32 uta = set ? ~0 : 0;
7609 int i;
7610
7611
7612 if (!adapter->vfs_allocated_count)
7613 return;
7614
7615 for (i = hw->mac.uta_reg_count; i--;)
7616 array_wr32(E1000_UTA, i, uta);
7617}
7618
7619
7620
7621
7622
7623
7624static irqreturn_t igb_intr_msi(int irq, void *data)
7625{
7626 struct igb_adapter *adapter = data;
7627 struct igb_q_vector *q_vector = adapter->q_vector[0];
7628 struct e1000_hw *hw = &adapter->hw;
7629
7630 u32 icr = rd32(E1000_ICR);
7631
7632 igb_write_itr(q_vector);
7633
7634 if (icr & E1000_ICR_DRSTA)
7635 schedule_work(&adapter->reset_task);
7636
7637 if (icr & E1000_ICR_DOUTSYNC) {
7638
7639 adapter->stats.doosync++;
7640 }
7641
7642 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7643 hw->mac.get_link_status = 1;
7644 if (!test_bit(__IGB_DOWN, &adapter->state))
7645 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7646 }
7647
7648 if (icr & E1000_ICR_TS)
7649 igb_tsync_interrupt(adapter);
7650
7651 napi_schedule(&q_vector->napi);
7652
7653 return IRQ_HANDLED;
7654}
7655
7656
7657
7658
7659
7660
7661static irqreturn_t igb_intr(int irq, void *data)
7662{
7663 struct igb_adapter *adapter = data;
7664 struct igb_q_vector *q_vector = adapter->q_vector[0];
7665 struct e1000_hw *hw = &adapter->hw;
7666
7667
7668
7669 u32 icr = rd32(E1000_ICR);
7670
7671
7672
7673
7674 if (!(icr & E1000_ICR_INT_ASSERTED))
7675 return IRQ_NONE;
7676
7677 igb_write_itr(q_vector);
7678
7679 if (icr & E1000_ICR_DRSTA)
7680 schedule_work(&adapter->reset_task);
7681
7682 if (icr & E1000_ICR_DOUTSYNC) {
7683
7684 adapter->stats.doosync++;
7685 }
7686
7687 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7688 hw->mac.get_link_status = 1;
7689
7690 if (!test_bit(__IGB_DOWN, &adapter->state))
7691 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7692 }
7693
7694 if (icr & E1000_ICR_TS)
7695 igb_tsync_interrupt(adapter);
7696
7697 napi_schedule(&q_vector->napi);
7698
7699 return IRQ_HANDLED;
7700}
7701
7702static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7703{
7704 struct igb_adapter *adapter = q_vector->adapter;
7705 struct e1000_hw *hw = &adapter->hw;
7706
7707 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7708 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7709 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7710 igb_set_itr(q_vector);
7711 else
7712 igb_update_ring_itr(q_vector);
7713 }
7714
7715 if (!test_bit(__IGB_DOWN, &adapter->state)) {
7716 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7717 wr32(E1000_EIMS, q_vector->eims_value);
7718 else
7719 igb_irq_enable(adapter);
7720 }
7721}
7722
7723
7724
7725
7726
7727
7728static int igb_poll(struct napi_struct *napi, int budget)
7729{
7730 struct igb_q_vector *q_vector = container_of(napi,
7731 struct igb_q_vector,
7732 napi);
7733 bool clean_complete = true;
7734 int work_done = 0;
7735
7736#ifdef CONFIG_IGB_DCA
7737 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7738 igb_update_dca(q_vector);
7739#endif
7740 if (q_vector->tx.ring)
7741 clean_complete = igb_clean_tx_irq(q_vector, budget);
7742
7743 if (q_vector->rx.ring) {
7744 int cleaned = igb_clean_rx_irq(q_vector, budget);
7745
7746 work_done += cleaned;
7747 if (cleaned >= budget)
7748 clean_complete = false;
7749 }
7750
7751
7752 if (!clean_complete)
7753 return budget;
7754
7755
7756
7757
7758 if (likely(napi_complete_done(napi, work_done)))
7759 igb_ring_irq_enable(q_vector);
7760
7761 return min(work_done, budget - 1);
7762}
7763
7764
7765
7766
7767
7768
7769
7770
7771static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7772{
7773 struct igb_adapter *adapter = q_vector->adapter;
7774 struct igb_ring *tx_ring = q_vector->tx.ring;
7775 struct igb_tx_buffer *tx_buffer;
7776 union e1000_adv_tx_desc *tx_desc;
7777 unsigned int total_bytes = 0, total_packets = 0;
7778 unsigned int budget = q_vector->tx.work_limit;
7779 unsigned int i = tx_ring->next_to_clean;
7780
7781 if (test_bit(__IGB_DOWN, &adapter->state))
7782 return true;
7783
7784 tx_buffer = &tx_ring->tx_buffer_info[i];
7785 tx_desc = IGB_TX_DESC(tx_ring, i);
7786 i -= tx_ring->count;
7787
7788 do {
7789 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7790
7791
7792 if (!eop_desc)
7793 break;
7794
7795
7796 smp_rmb();
7797
7798
7799 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7800 break;
7801
7802
7803 tx_buffer->next_to_watch = NULL;
7804
7805
7806 total_bytes += tx_buffer->bytecount;
7807 total_packets += tx_buffer->gso_segs;
7808
7809
7810 napi_consume_skb(tx_buffer->skb, napi_budget);
7811
7812
7813 dma_unmap_single(tx_ring->dev,
7814 dma_unmap_addr(tx_buffer, dma),
7815 dma_unmap_len(tx_buffer, len),
7816 DMA_TO_DEVICE);
7817
7818
7819 dma_unmap_len_set(tx_buffer, len, 0);
7820
7821
7822 while (tx_desc != eop_desc) {
7823 tx_buffer++;
7824 tx_desc++;
7825 i++;
7826 if (unlikely(!i)) {
7827 i -= tx_ring->count;
7828 tx_buffer = tx_ring->tx_buffer_info;
7829 tx_desc = IGB_TX_DESC(tx_ring, 0);
7830 }
7831
7832
7833 if (dma_unmap_len(tx_buffer, len)) {
7834 dma_unmap_page(tx_ring->dev,
7835 dma_unmap_addr(tx_buffer, dma),
7836 dma_unmap_len(tx_buffer, len),
7837 DMA_TO_DEVICE);
7838 dma_unmap_len_set(tx_buffer, len, 0);
7839 }
7840 }
7841
7842
7843 tx_buffer++;
7844 tx_desc++;
7845 i++;
7846 if (unlikely(!i)) {
7847 i -= tx_ring->count;
7848 tx_buffer = tx_ring->tx_buffer_info;
7849 tx_desc = IGB_TX_DESC(tx_ring, 0);
7850 }
7851
7852
7853 prefetch(tx_desc);
7854
7855
7856 budget--;
7857 } while (likely(budget));
7858
7859 netdev_tx_completed_queue(txring_txq(tx_ring),
7860 total_packets, total_bytes);
7861 i += tx_ring->count;
7862 tx_ring->next_to_clean = i;
7863 u64_stats_update_begin(&tx_ring->tx_syncp);
7864 tx_ring->tx_stats.bytes += total_bytes;
7865 tx_ring->tx_stats.packets += total_packets;
7866 u64_stats_update_end(&tx_ring->tx_syncp);
7867 q_vector->tx.total_bytes += total_bytes;
7868 q_vector->tx.total_packets += total_packets;
7869
7870 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7871 struct e1000_hw *hw = &adapter->hw;
7872
7873
7874
7875
7876 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7877 if (tx_buffer->next_to_watch &&
7878 time_after(jiffies, tx_buffer->time_stamp +
7879 (adapter->tx_timeout_factor * HZ)) &&
7880 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7881
7882
7883 dev_err(tx_ring->dev,
7884 "Detected Tx Unit Hang\n"
7885 " Tx Queue <%d>\n"
7886 " TDH <%x>\n"
7887 " TDT <%x>\n"
7888 " next_to_use <%x>\n"
7889 " next_to_clean <%x>\n"
7890 "buffer_info[next_to_clean]\n"
7891 " time_stamp <%lx>\n"
7892 " next_to_watch <%p>\n"
7893 " jiffies <%lx>\n"
7894 " desc.status <%x>\n",
7895 tx_ring->queue_index,
7896 rd32(E1000_TDH(tx_ring->reg_idx)),
7897 readl(tx_ring->tail),
7898 tx_ring->next_to_use,
7899 tx_ring->next_to_clean,
7900 tx_buffer->time_stamp,
7901 tx_buffer->next_to_watch,
7902 jiffies,
7903 tx_buffer->next_to_watch->wb.status);
7904 netif_stop_subqueue(tx_ring->netdev,
7905 tx_ring->queue_index);
7906
7907
7908 return true;
7909 }
7910 }
7911
7912#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7913 if (unlikely(total_packets &&
7914 netif_carrier_ok(tx_ring->netdev) &&
7915 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7916
7917
7918
7919 smp_mb();
7920 if (__netif_subqueue_stopped(tx_ring->netdev,
7921 tx_ring->queue_index) &&
7922 !(test_bit(__IGB_DOWN, &adapter->state))) {
7923 netif_wake_subqueue(tx_ring->netdev,
7924 tx_ring->queue_index);
7925
7926 u64_stats_update_begin(&tx_ring->tx_syncp);
7927 tx_ring->tx_stats.restart_queue++;
7928 u64_stats_update_end(&tx_ring->tx_syncp);
7929 }
7930 }
7931
7932 return !!budget;
7933}
7934
7935
7936
7937
7938
7939
7940
7941
7942static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7943 struct igb_rx_buffer *old_buff)
7944{
7945 struct igb_rx_buffer *new_buff;
7946 u16 nta = rx_ring->next_to_alloc;
7947
7948 new_buff = &rx_ring->rx_buffer_info[nta];
7949
7950
7951 nta++;
7952 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7953
7954
7955
7956
7957
7958 new_buff->dma = old_buff->dma;
7959 new_buff->page = old_buff->page;
7960 new_buff->page_offset = old_buff->page_offset;
7961 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
7962}
7963
7964static inline bool igb_page_is_reserved(struct page *page)
7965{
7966 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7967}
7968
7969static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7970{
7971 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7972 struct page *page = rx_buffer->page;
7973
7974
7975 if (unlikely(igb_page_is_reserved(page)))
7976 return false;
7977
7978#if (PAGE_SIZE < 8192)
7979
7980 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7981 return false;
7982#else
7983#define IGB_LAST_OFFSET \
7984 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7985
7986 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7987 return false;
7988#endif
7989
7990
7991
7992
7993
7994 if (unlikely(!pagecnt_bias)) {
7995 page_ref_add(page, USHRT_MAX);
7996 rx_buffer->pagecnt_bias = USHRT_MAX;
7997 }
7998
7999 return true;
8000}
8001
8002
8003
8004
8005
8006
8007
8008
8009
8010
8011static void igb_add_rx_frag(struct igb_ring *rx_ring,
8012 struct igb_rx_buffer *rx_buffer,
8013 struct sk_buff *skb,
8014 unsigned int size)
8015{
8016#if (PAGE_SIZE < 8192)
8017 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8018#else
8019 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8020 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8021 SKB_DATA_ALIGN(size);
8022#endif
8023 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8024 rx_buffer->page_offset, size, truesize);
8025#if (PAGE_SIZE < 8192)
8026 rx_buffer->page_offset ^= truesize;
8027#else
8028 rx_buffer->page_offset += truesize;
8029#endif
8030}
8031
8032static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8033 struct igb_rx_buffer *rx_buffer,
8034 union e1000_adv_rx_desc *rx_desc,
8035 unsigned int size)
8036{
8037 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8038#if (PAGE_SIZE < 8192)
8039 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8040#else
8041 unsigned int truesize = SKB_DATA_ALIGN(size);
8042#endif
8043 unsigned int headlen;
8044 struct sk_buff *skb;
8045
8046
8047 prefetch(va);
8048#if L1_CACHE_BYTES < 128
8049 prefetch(va + L1_CACHE_BYTES);
8050#endif
8051
8052
8053 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8054 if (unlikely(!skb))
8055 return NULL;
8056
8057 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8058 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
8059 va += IGB_TS_HDR_LEN;
8060 size -= IGB_TS_HDR_LEN;
8061 }
8062
8063
8064 headlen = size;
8065 if (headlen > IGB_RX_HDR_LEN)
8066 headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
8067
8068
8069 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
8070
8071
8072 size -= headlen;
8073 if (size) {
8074 skb_add_rx_frag(skb, 0, rx_buffer->page,
8075 (va + headlen) - page_address(rx_buffer->page),
8076 size, truesize);
8077#if (PAGE_SIZE < 8192)
8078 rx_buffer->page_offset ^= truesize;
8079#else
8080 rx_buffer->page_offset += truesize;
8081#endif
8082 } else {
8083 rx_buffer->pagecnt_bias++;
8084 }
8085
8086 return skb;
8087}
8088
8089static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8090 struct igb_rx_buffer *rx_buffer,
8091 union e1000_adv_rx_desc *rx_desc,
8092 unsigned int size)
8093{
8094 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8095#if (PAGE_SIZE < 8192)
8096 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8097#else
8098 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8099 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
8100#endif
8101 struct sk_buff *skb;
8102
8103
8104 prefetch(va);
8105#if L1_CACHE_BYTES < 128
8106 prefetch(va + L1_CACHE_BYTES);
8107#endif
8108
8109
8110 skb = build_skb(va - IGB_SKB_PAD, truesize);
8111 if (unlikely(!skb))
8112 return NULL;
8113
8114
8115 skb_reserve(skb, IGB_SKB_PAD);
8116 __skb_put(skb, size);
8117
8118
8119 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8120 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8121 __skb_pull(skb, IGB_TS_HDR_LEN);
8122 }
8123
8124
8125#if (PAGE_SIZE < 8192)
8126 rx_buffer->page_offset ^= truesize;
8127#else
8128 rx_buffer->page_offset += truesize;
8129#endif
8130
8131 return skb;
8132}
8133
8134static inline void igb_rx_checksum(struct igb_ring *ring,
8135 union e1000_adv_rx_desc *rx_desc,
8136 struct sk_buff *skb)
8137{
8138 skb_checksum_none_assert(skb);
8139
8140
8141 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8142 return;
8143
8144
8145 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8146 return;
8147
8148
8149 if (igb_test_staterr(rx_desc,
8150 E1000_RXDEXT_STATERR_TCPE |
8151 E1000_RXDEXT_STATERR_IPE)) {
8152
8153
8154
8155
8156 if (!((skb->len == 60) &&
8157 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8158 u64_stats_update_begin(&ring->rx_syncp);
8159 ring->rx_stats.csum_err++;
8160 u64_stats_update_end(&ring->rx_syncp);
8161 }
8162
8163 return;
8164 }
8165
8166 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8167 E1000_RXD_STAT_UDPCS))
8168 skb->ip_summed = CHECKSUM_UNNECESSARY;
8169
8170 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8171 le32_to_cpu(rx_desc->wb.upper.status_error));
8172}
8173
8174static inline void igb_rx_hash(struct igb_ring *ring,
8175 union e1000_adv_rx_desc *rx_desc,
8176 struct sk_buff *skb)
8177{
8178 if (ring->netdev->features & NETIF_F_RXHASH)
8179 skb_set_hash(skb,
8180 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8181 PKT_HASH_TYPE_L3);
8182}
8183
8184
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195static bool igb_is_non_eop(struct igb_ring *rx_ring,
8196 union e1000_adv_rx_desc *rx_desc)
8197{
8198 u32 ntc = rx_ring->next_to_clean + 1;
8199
8200
8201 ntc = (ntc < rx_ring->count) ? ntc : 0;
8202 rx_ring->next_to_clean = ntc;
8203
8204 prefetch(IGB_RX_DESC(rx_ring, ntc));
8205
8206 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8207 return false;
8208
8209 return true;
8210}
8211
8212
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8227 union e1000_adv_rx_desc *rx_desc,
8228 struct sk_buff *skb)
8229{
8230 if (unlikely((igb_test_staterr(rx_desc,
8231 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8232 struct net_device *netdev = rx_ring->netdev;
8233 if (!(netdev->features & NETIF_F_RXALL)) {
8234 dev_kfree_skb_any(skb);
8235 return true;
8236 }
8237 }
8238
8239
8240 if (eth_skb_pad(skb))
8241 return true;
8242
8243 return false;
8244}
8245
8246
8247
8248
8249
8250
8251
8252
8253
8254
8255
8256static void igb_process_skb_fields(struct igb_ring *rx_ring,
8257 union e1000_adv_rx_desc *rx_desc,
8258 struct sk_buff *skb)
8259{
8260 struct net_device *dev = rx_ring->netdev;
8261
8262 igb_rx_hash(rx_ring, rx_desc, skb);
8263
8264 igb_rx_checksum(rx_ring, rx_desc, skb);
8265
8266 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8267 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8268 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8269
8270 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8271 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8272 u16 vid;
8273
8274 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8275 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8276 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8277 else
8278 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8279
8280 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8281 }
8282
8283 skb_record_rx_queue(skb, rx_ring->queue_index);
8284
8285 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8286}
8287
8288static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8289 const unsigned int size)
8290{
8291 struct igb_rx_buffer *rx_buffer;
8292
8293 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8294 prefetchw(rx_buffer->page);
8295
8296
8297 dma_sync_single_range_for_cpu(rx_ring->dev,
8298 rx_buffer->dma,
8299 rx_buffer->page_offset,
8300 size,
8301 DMA_FROM_DEVICE);
8302
8303 rx_buffer->pagecnt_bias--;
8304
8305 return rx_buffer;
8306}
8307
8308static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8309 struct igb_rx_buffer *rx_buffer)
8310{
8311 if (igb_can_reuse_rx_page(rx_buffer)) {
8312
8313 igb_reuse_rx_page(rx_ring, rx_buffer);
8314 } else {
8315
8316
8317
8318 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8319 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8320 IGB_RX_DMA_ATTR);
8321 __page_frag_cache_drain(rx_buffer->page,
8322 rx_buffer->pagecnt_bias);
8323 }
8324
8325
8326 rx_buffer->page = NULL;
8327}
8328
8329static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8330{
8331 struct igb_ring *rx_ring = q_vector->rx.ring;
8332 struct sk_buff *skb = rx_ring->skb;
8333 unsigned int total_bytes = 0, total_packets = 0;
8334 u16 cleaned_count = igb_desc_unused(rx_ring);
8335
8336 while (likely(total_packets < budget)) {
8337 union e1000_adv_rx_desc *rx_desc;
8338 struct igb_rx_buffer *rx_buffer;
8339 unsigned int size;
8340
8341
8342 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8343 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8344 cleaned_count = 0;
8345 }
8346
8347 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8348 size = le16_to_cpu(rx_desc->wb.upper.length);
8349 if (!size)
8350 break;
8351
8352
8353
8354
8355
8356 dma_rmb();
8357
8358 rx_buffer = igb_get_rx_buffer(rx_ring, size);
8359
8360
8361 if (skb)
8362 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8363 else if (ring_uses_build_skb(rx_ring))
8364 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8365 else
8366 skb = igb_construct_skb(rx_ring, rx_buffer,
8367 rx_desc, size);
8368
8369
8370 if (!skb) {
8371 rx_ring->rx_stats.alloc_failed++;
8372 rx_buffer->pagecnt_bias++;
8373 break;
8374 }
8375
8376 igb_put_rx_buffer(rx_ring, rx_buffer);
8377 cleaned_count++;
8378
8379
8380 if (igb_is_non_eop(rx_ring, rx_desc))
8381 continue;
8382
8383
8384 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8385 skb = NULL;
8386 continue;
8387 }
8388
8389
8390 total_bytes += skb->len;
8391
8392
8393 igb_process_skb_fields(rx_ring, rx_desc, skb);
8394
8395 napi_gro_receive(&q_vector->napi, skb);
8396
8397
8398 skb = NULL;
8399
8400
8401 total_packets++;
8402 }
8403
8404
8405 rx_ring->skb = skb;
8406
8407 u64_stats_update_begin(&rx_ring->rx_syncp);
8408 rx_ring->rx_stats.packets += total_packets;
8409 rx_ring->rx_stats.bytes += total_bytes;
8410 u64_stats_update_end(&rx_ring->rx_syncp);
8411 q_vector->rx.total_packets += total_packets;
8412 q_vector->rx.total_bytes += total_bytes;
8413
8414 if (cleaned_count)
8415 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8416
8417 return total_packets;
8418}
8419
8420static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8421{
8422 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8423}
8424
8425static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8426 struct igb_rx_buffer *bi)
8427{
8428 struct page *page = bi->page;
8429 dma_addr_t dma;
8430
8431
8432 if (likely(page))
8433 return true;
8434
8435
8436 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8437 if (unlikely(!page)) {
8438 rx_ring->rx_stats.alloc_failed++;
8439 return false;
8440 }
8441
8442
8443 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8444 igb_rx_pg_size(rx_ring),
8445 DMA_FROM_DEVICE,
8446 IGB_RX_DMA_ATTR);
8447
8448
8449
8450
8451 if (dma_mapping_error(rx_ring->dev, dma)) {
8452 __free_pages(page, igb_rx_pg_order(rx_ring));
8453
8454 rx_ring->rx_stats.alloc_failed++;
8455 return false;
8456 }
8457
8458 bi->dma = dma;
8459 bi->page = page;
8460 bi->page_offset = igb_rx_offset(rx_ring);
8461 bi->pagecnt_bias = 1;
8462
8463 return true;
8464}
8465
8466
8467
8468
8469
8470void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8471{
8472 union e1000_adv_rx_desc *rx_desc;
8473 struct igb_rx_buffer *bi;
8474 u16 i = rx_ring->next_to_use;
8475 u16 bufsz;
8476
8477
8478 if (!cleaned_count)
8479 return;
8480
8481 rx_desc = IGB_RX_DESC(rx_ring, i);
8482 bi = &rx_ring->rx_buffer_info[i];
8483 i -= rx_ring->count;
8484
8485 bufsz = igb_rx_bufsz(rx_ring);
8486
8487 do {
8488 if (!igb_alloc_mapped_page(rx_ring, bi))
8489 break;
8490
8491
8492 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8493 bi->page_offset, bufsz,
8494 DMA_FROM_DEVICE);
8495
8496
8497
8498
8499 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8500
8501 rx_desc++;
8502 bi++;
8503 i++;
8504 if (unlikely(!i)) {
8505 rx_desc = IGB_RX_DESC(rx_ring, 0);
8506 bi = rx_ring->rx_buffer_info;
8507 i -= rx_ring->count;
8508 }
8509
8510
8511 rx_desc->wb.upper.length = 0;
8512
8513 cleaned_count--;
8514 } while (cleaned_count);
8515
8516 i += rx_ring->count;
8517
8518 if (rx_ring->next_to_use != i) {
8519
8520 rx_ring->next_to_use = i;
8521
8522
8523 rx_ring->next_to_alloc = i;
8524
8525
8526
8527
8528
8529
8530 dma_wmb();
8531 writel(i, rx_ring->tail);
8532 }
8533}
8534
8535
8536
8537
8538
8539
8540
8541static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8542{
8543 struct igb_adapter *adapter = netdev_priv(netdev);
8544 struct mii_ioctl_data *data = if_mii(ifr);
8545
8546 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8547 return -EOPNOTSUPP;
8548
8549 switch (cmd) {
8550 case SIOCGMIIPHY:
8551 data->phy_id = adapter->hw.phy.addr;
8552 break;
8553 case SIOCGMIIREG:
8554 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8555 &data->val_out))
8556 return -EIO;
8557 break;
8558 case SIOCSMIIREG:
8559 default:
8560 return -EOPNOTSUPP;
8561 }
8562 return 0;
8563}
8564
8565
8566
8567
8568
8569
8570
8571static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8572{
8573 switch (cmd) {
8574 case SIOCGMIIPHY:
8575 case SIOCGMIIREG:
8576 case SIOCSMIIREG:
8577 return igb_mii_ioctl(netdev, ifr, cmd);
8578 case SIOCGHWTSTAMP:
8579 return igb_ptp_get_ts_config(netdev, ifr);
8580 case SIOCSHWTSTAMP:
8581 return igb_ptp_set_ts_config(netdev, ifr);
8582 default:
8583 return -EOPNOTSUPP;
8584 }
8585}
8586
8587void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8588{
8589 struct igb_adapter *adapter = hw->back;
8590
8591 pci_read_config_word(adapter->pdev, reg, value);
8592}
8593
8594void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8595{
8596 struct igb_adapter *adapter = hw->back;
8597
8598 pci_write_config_word(adapter->pdev, reg, *value);
8599}
8600
8601s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8602{
8603 struct igb_adapter *adapter = hw->back;
8604
8605 if (pcie_capability_read_word(adapter->pdev, reg, value))
8606 return -E1000_ERR_CONFIG;
8607
8608 return 0;
8609}
8610
8611s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8612{
8613 struct igb_adapter *adapter = hw->back;
8614
8615 if (pcie_capability_write_word(adapter->pdev, reg, *value))
8616 return -E1000_ERR_CONFIG;
8617
8618 return 0;
8619}
8620
8621static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8622{
8623 struct igb_adapter *adapter = netdev_priv(netdev);
8624 struct e1000_hw *hw = &adapter->hw;
8625 u32 ctrl, rctl;
8626 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8627
8628 if (enable) {
8629
8630 ctrl = rd32(E1000_CTRL);
8631 ctrl |= E1000_CTRL_VME;
8632 wr32(E1000_CTRL, ctrl);
8633
8634
8635 rctl = rd32(E1000_RCTL);
8636 rctl &= ~E1000_RCTL_CFIEN;
8637 wr32(E1000_RCTL, rctl);
8638 } else {
8639
8640 ctrl = rd32(E1000_CTRL);
8641 ctrl &= ~E1000_CTRL_VME;
8642 wr32(E1000_CTRL, ctrl);
8643 }
8644
8645 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8646}
8647
8648static int igb_vlan_rx_add_vid(struct net_device *netdev,
8649 __be16 proto, u16 vid)
8650{
8651 struct igb_adapter *adapter = netdev_priv(netdev);
8652 struct e1000_hw *hw = &adapter->hw;
8653 int pf_id = adapter->vfs_allocated_count;
8654
8655
8656 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8657 igb_vfta_set(hw, vid, pf_id, true, !!vid);
8658
8659 set_bit(vid, adapter->active_vlans);
8660
8661 return 0;
8662}
8663
8664static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8665 __be16 proto, u16 vid)
8666{
8667 struct igb_adapter *adapter = netdev_priv(netdev);
8668 int pf_id = adapter->vfs_allocated_count;
8669 struct e1000_hw *hw = &adapter->hw;
8670
8671
8672 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8673 igb_vfta_set(hw, vid, pf_id, false, true);
8674
8675 clear_bit(vid, adapter->active_vlans);
8676
8677 return 0;
8678}
8679
8680static void igb_restore_vlan(struct igb_adapter *adapter)
8681{
8682 u16 vid = 1;
8683
8684 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8685 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8686
8687 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8688 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8689}
8690
8691int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8692{
8693 struct pci_dev *pdev = adapter->pdev;
8694 struct e1000_mac_info *mac = &adapter->hw.mac;
8695
8696 mac->autoneg = 0;
8697
8698
8699
8700
8701 if ((spd & 1) || (dplx & ~1))
8702 goto err_inval;
8703
8704
8705
8706
8707 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8708 switch (spd + dplx) {
8709 case SPEED_10 + DUPLEX_HALF:
8710 case SPEED_10 + DUPLEX_FULL:
8711 case SPEED_100 + DUPLEX_HALF:
8712 goto err_inval;
8713 default:
8714 break;
8715 }
8716 }
8717
8718 switch (spd + dplx) {
8719 case SPEED_10 + DUPLEX_HALF:
8720 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8721 break;
8722 case SPEED_10 + DUPLEX_FULL:
8723 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8724 break;
8725 case SPEED_100 + DUPLEX_HALF:
8726 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8727 break;
8728 case SPEED_100 + DUPLEX_FULL:
8729 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8730 break;
8731 case SPEED_1000 + DUPLEX_FULL:
8732 mac->autoneg = 1;
8733 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8734 break;
8735 case SPEED_1000 + DUPLEX_HALF:
8736 default:
8737 goto err_inval;
8738 }
8739
8740
8741 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8742
8743 return 0;
8744
8745err_inval:
8746 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8747 return -EINVAL;
8748}
8749
8750static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8751 bool runtime)
8752{
8753 struct net_device *netdev = pci_get_drvdata(pdev);
8754 struct igb_adapter *adapter = netdev_priv(netdev);
8755 struct e1000_hw *hw = &adapter->hw;
8756 u32 ctrl, rctl, status;
8757 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8758#ifdef CONFIG_PM
8759 int retval = 0;
8760#endif
8761
8762 rtnl_lock();
8763 netif_device_detach(netdev);
8764
8765 if (netif_running(netdev))
8766 __igb_close(netdev, true);
8767
8768 igb_ptp_suspend(adapter);
8769
8770 igb_clear_interrupt_scheme(adapter);
8771 rtnl_unlock();
8772
8773#ifdef CONFIG_PM
8774 if (!runtime) {
8775 retval = pci_save_state(pdev);
8776 if (retval)
8777 return retval;
8778 }
8779#endif
8780
8781 status = rd32(E1000_STATUS);
8782 if (status & E1000_STATUS_LU)
8783 wufc &= ~E1000_WUFC_LNKC;
8784
8785 if (wufc) {
8786 igb_setup_rctl(adapter);
8787 igb_set_rx_mode(netdev);
8788
8789
8790 if (wufc & E1000_WUFC_MC) {
8791 rctl = rd32(E1000_RCTL);
8792 rctl |= E1000_RCTL_MPE;
8793 wr32(E1000_RCTL, rctl);
8794 }
8795
8796 ctrl = rd32(E1000_CTRL);
8797
8798 #define E1000_CTRL_ADVD3WUC 0x00100000
8799
8800 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8801 ctrl |= E1000_CTRL_ADVD3WUC;
8802 wr32(E1000_CTRL, ctrl);
8803
8804
8805 igb_disable_pcie_master(hw);
8806
8807 wr32(E1000_WUC, E1000_WUC_PME_EN);
8808 wr32(E1000_WUFC, wufc);
8809 } else {
8810 wr32(E1000_WUC, 0);
8811 wr32(E1000_WUFC, 0);
8812 }
8813
8814 *enable_wake = wufc || adapter->en_mng_pt;
8815 if (!*enable_wake)
8816 igb_power_down_link(adapter);
8817 else
8818 igb_power_up_link(adapter);
8819
8820
8821
8822
8823 igb_release_hw_control(adapter);
8824
8825 pci_disable_device(pdev);
8826
8827 return 0;
8828}
8829
8830static void igb_deliver_wake_packet(struct net_device *netdev)
8831{
8832 struct igb_adapter *adapter = netdev_priv(netdev);
8833 struct e1000_hw *hw = &adapter->hw;
8834 struct sk_buff *skb;
8835 u32 wupl;
8836
8837 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8838
8839
8840
8841
8842 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8843 return;
8844
8845 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8846 if (!skb)
8847 return;
8848
8849 skb_put(skb, wupl);
8850
8851
8852 wupl = roundup(wupl, 4);
8853
8854 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8855
8856 skb->protocol = eth_type_trans(skb, netdev);
8857 netif_rx(skb);
8858}
8859
8860static int __maybe_unused igb_suspend(struct device *dev)
8861{
8862 int retval;
8863 bool wake;
8864 struct pci_dev *pdev = to_pci_dev(dev);
8865
8866 retval = __igb_shutdown(pdev, &wake, 0);
8867 if (retval)
8868 return retval;
8869
8870 if (wake) {
8871 pci_prepare_to_sleep(pdev);
8872 } else {
8873 pci_wake_from_d3(pdev, false);
8874 pci_set_power_state(pdev, PCI_D3hot);
8875 }
8876
8877 return 0;
8878}
8879
8880static int __maybe_unused igb_resume(struct device *dev)
8881{
8882 struct pci_dev *pdev = to_pci_dev(dev);
8883 struct net_device *netdev = pci_get_drvdata(pdev);
8884 struct igb_adapter *adapter = netdev_priv(netdev);
8885 struct e1000_hw *hw = &adapter->hw;
8886 u32 err, val;
8887
8888 pci_set_power_state(pdev, PCI_D0);
8889 pci_restore_state(pdev);
8890 pci_save_state(pdev);
8891
8892 if (!pci_device_is_present(pdev))
8893 return -ENODEV;
8894 err = pci_enable_device_mem(pdev);
8895 if (err) {
8896 dev_err(&pdev->dev,
8897 "igb: Cannot enable PCI device from suspend\n");
8898 return err;
8899 }
8900 pci_set_master(pdev);
8901
8902 pci_enable_wake(pdev, PCI_D3hot, 0);
8903 pci_enable_wake(pdev, PCI_D3cold, 0);
8904
8905 if (igb_init_interrupt_scheme(adapter, true)) {
8906 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8907 return -ENOMEM;
8908 }
8909
8910 igb_reset(adapter);
8911
8912
8913
8914
8915 igb_get_hw_control(adapter);
8916
8917 val = rd32(E1000_WUS);
8918 if (val & WAKE_PKT_WUS)
8919 igb_deliver_wake_packet(netdev);
8920
8921 wr32(E1000_WUS, ~0);
8922
8923 rtnl_lock();
8924 if (!err && netif_running(netdev))
8925 err = __igb_open(netdev, true);
8926
8927 if (!err)
8928 netif_device_attach(netdev);
8929 rtnl_unlock();
8930
8931 return err;
8932}
8933
8934static int __maybe_unused igb_runtime_idle(struct device *dev)
8935{
8936 struct pci_dev *pdev = to_pci_dev(dev);
8937 struct net_device *netdev = pci_get_drvdata(pdev);
8938 struct igb_adapter *adapter = netdev_priv(netdev);
8939
8940 if (!igb_has_link(adapter))
8941 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8942
8943 return -EBUSY;
8944}
8945
8946static int __maybe_unused igb_runtime_suspend(struct device *dev)
8947{
8948 struct pci_dev *pdev = to_pci_dev(dev);
8949 int retval;
8950 bool wake;
8951
8952 retval = __igb_shutdown(pdev, &wake, 1);
8953 if (retval)
8954 return retval;
8955
8956 if (wake) {
8957 pci_prepare_to_sleep(pdev);
8958 } else {
8959 pci_wake_from_d3(pdev, false);
8960 pci_set_power_state(pdev, PCI_D3hot);
8961 }
8962
8963 return 0;
8964}
8965
8966static int __maybe_unused igb_runtime_resume(struct device *dev)
8967{
8968 return igb_resume(dev);
8969}
8970
8971static void igb_shutdown(struct pci_dev *pdev)
8972{
8973 bool wake;
8974
8975 __igb_shutdown(pdev, &wake, 0);
8976
8977 if (system_state == SYSTEM_POWER_OFF) {
8978 pci_wake_from_d3(pdev, wake);
8979 pci_set_power_state(pdev, PCI_D3hot);
8980 }
8981}
8982
8983#ifdef CONFIG_PCI_IOV
8984static int igb_sriov_reinit(struct pci_dev *dev)
8985{
8986 struct net_device *netdev = pci_get_drvdata(dev);
8987 struct igb_adapter *adapter = netdev_priv(netdev);
8988 struct pci_dev *pdev = adapter->pdev;
8989
8990 rtnl_lock();
8991
8992 if (netif_running(netdev))
8993 igb_close(netdev);
8994 else
8995 igb_reset(adapter);
8996
8997 igb_clear_interrupt_scheme(adapter);
8998
8999 igb_init_queue_configuration(adapter);
9000
9001 if (igb_init_interrupt_scheme(adapter, true)) {
9002 rtnl_unlock();
9003 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9004 return -ENOMEM;
9005 }
9006
9007 if (netif_running(netdev))
9008 igb_open(netdev);
9009
9010 rtnl_unlock();
9011
9012 return 0;
9013}
9014
9015static int igb_pci_disable_sriov(struct pci_dev *dev)
9016{
9017 int err = igb_disable_sriov(dev);
9018
9019 if (!err)
9020 err = igb_sriov_reinit(dev);
9021
9022 return err;
9023}
9024
9025static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
9026{
9027 int err = igb_enable_sriov(dev, num_vfs);
9028
9029 if (err)
9030 goto out;
9031
9032 err = igb_sriov_reinit(dev);
9033 if (!err)
9034 return num_vfs;
9035
9036out:
9037 return err;
9038}
9039
9040#endif
9041static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9042{
9043#ifdef CONFIG_PCI_IOV
9044 if (num_vfs == 0)
9045 return igb_pci_disable_sriov(dev);
9046 else
9047 return igb_pci_enable_sriov(dev, num_vfs);
9048#endif
9049 return 0;
9050}
9051
9052
9053
9054
9055
9056
9057
9058
9059
9060static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9061 pci_channel_state_t state)
9062{
9063 struct net_device *netdev = pci_get_drvdata(pdev);
9064 struct igb_adapter *adapter = netdev_priv(netdev);
9065
9066 netif_device_detach(netdev);
9067
9068 if (state == pci_channel_io_perm_failure)
9069 return PCI_ERS_RESULT_DISCONNECT;
9070
9071 if (netif_running(netdev))
9072 igb_down(adapter);
9073 pci_disable_device(pdev);
9074
9075
9076 return PCI_ERS_RESULT_NEED_RESET;
9077}
9078
9079
9080
9081
9082
9083
9084
9085
9086static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9087{
9088 struct net_device *netdev = pci_get_drvdata(pdev);
9089 struct igb_adapter *adapter = netdev_priv(netdev);
9090 struct e1000_hw *hw = &adapter->hw;
9091 pci_ers_result_t result;
9092
9093 if (pci_enable_device_mem(pdev)) {
9094 dev_err(&pdev->dev,
9095 "Cannot re-enable PCI device after reset.\n");
9096 result = PCI_ERS_RESULT_DISCONNECT;
9097 } else {
9098 pci_set_master(pdev);
9099 pci_restore_state(pdev);
9100 pci_save_state(pdev);
9101
9102 pci_enable_wake(pdev, PCI_D3hot, 0);
9103 pci_enable_wake(pdev, PCI_D3cold, 0);
9104
9105
9106
9107
9108 hw->hw_addr = adapter->io_addr;
9109
9110 igb_reset(adapter);
9111 wr32(E1000_WUS, ~0);
9112 result = PCI_ERS_RESULT_RECOVERED;
9113 }
9114
9115 return result;
9116}
9117
9118
9119
9120
9121
9122
9123
9124
9125
9126static void igb_io_resume(struct pci_dev *pdev)
9127{
9128 struct net_device *netdev = pci_get_drvdata(pdev);
9129 struct igb_adapter *adapter = netdev_priv(netdev);
9130
9131 if (netif_running(netdev)) {
9132 if (igb_up(adapter)) {
9133 dev_err(&pdev->dev, "igb_up failed after reset\n");
9134 return;
9135 }
9136 }
9137
9138 netif_device_attach(netdev);
9139
9140
9141
9142
9143 igb_get_hw_control(adapter);
9144}
9145
9146
9147
9148
9149
9150
9151static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9152{
9153 struct e1000_hw *hw = &adapter->hw;
9154 u32 rar_low, rar_high;
9155 u8 *addr = adapter->mac_table[index].addr;
9156
9157
9158
9159
9160
9161
9162 rar_low = le32_to_cpup((__le32 *)(addr));
9163 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9164
9165
9166 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9167 if (is_valid_ether_addr(addr))
9168 rar_high |= E1000_RAH_AV;
9169
9170 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9171 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9172
9173 switch (hw->mac.type) {
9174 case e1000_82575:
9175 case e1000_i210:
9176 if (adapter->mac_table[index].state &
9177 IGB_MAC_STATE_QUEUE_STEERING)
9178 rar_high |= E1000_RAH_QSEL_ENABLE;
9179
9180 rar_high |= E1000_RAH_POOL_1 *
9181 adapter->mac_table[index].queue;
9182 break;
9183 default:
9184 rar_high |= E1000_RAH_POOL_1 <<
9185 adapter->mac_table[index].queue;
9186 break;
9187 }
9188 }
9189
9190 wr32(E1000_RAL(index), rar_low);
9191 wrfl();
9192 wr32(E1000_RAH(index), rar_high);
9193 wrfl();
9194}
9195
9196static int igb_set_vf_mac(struct igb_adapter *adapter,
9197 int vf, unsigned char *mac_addr)
9198{
9199 struct e1000_hw *hw = &adapter->hw;
9200
9201
9202
9203 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9204 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9205
9206 ether_addr_copy(vf_mac_addr, mac_addr);
9207 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9208 adapter->mac_table[rar_entry].queue = vf;
9209 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9210 igb_rar_set_index(adapter, rar_entry);
9211
9212 return 0;
9213}
9214
9215static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9216{
9217 struct igb_adapter *adapter = netdev_priv(netdev);
9218
9219 if (vf >= adapter->vfs_allocated_count)
9220 return -EINVAL;
9221
9222
9223
9224
9225
9226
9227
9228 if (is_zero_ether_addr(mac)) {
9229 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9230 dev_info(&adapter->pdev->dev,
9231 "remove administratively set MAC on VF %d\n",
9232 vf);
9233 } else if (is_valid_ether_addr(mac)) {
9234 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9235 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9236 mac, vf);
9237 dev_info(&adapter->pdev->dev,
9238 "Reload the VF driver to make this change effective.");
9239
9240 if (test_bit(__IGB_DOWN, &adapter->state)) {
9241 dev_warn(&adapter->pdev->dev,
9242 "The VF MAC address has been set, but the PF device is not up.\n");
9243 dev_warn(&adapter->pdev->dev,
9244 "Bring the PF device up before attempting to use the VF device.\n");
9245 }
9246 } else {
9247 return -EINVAL;
9248 }
9249 return igb_set_vf_mac(adapter, vf, mac);
9250}
9251
9252static int igb_link_mbps(int internal_link_speed)
9253{
9254 switch (internal_link_speed) {
9255 case SPEED_100:
9256 return 100;
9257 case SPEED_1000:
9258 return 1000;
9259 default:
9260 return 0;
9261 }
9262}
9263
9264static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9265 int link_speed)
9266{
9267 int rf_dec, rf_int;
9268 u32 bcnrc_val;
9269
9270 if (tx_rate != 0) {
9271
9272 rf_int = link_speed / tx_rate;
9273 rf_dec = (link_speed - (rf_int * tx_rate));
9274 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9275 tx_rate;
9276
9277 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9278 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9279 E1000_RTTBCNRC_RF_INT_MASK);
9280 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9281 } else {
9282 bcnrc_val = 0;
9283 }
9284
9285 wr32(E1000_RTTDQSEL, vf);
9286
9287
9288
9289 wr32(E1000_RTTBCNRM, 0x14);
9290 wr32(E1000_RTTBCNRC, bcnrc_val);
9291}
9292
9293static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9294{
9295 int actual_link_speed, i;
9296 bool reset_rate = false;
9297
9298
9299 if ((adapter->vf_rate_link_speed == 0) ||
9300 (adapter->hw.mac.type != e1000_82576))
9301 return;
9302
9303 actual_link_speed = igb_link_mbps(adapter->link_speed);
9304 if (actual_link_speed != adapter->vf_rate_link_speed) {
9305 reset_rate = true;
9306 adapter->vf_rate_link_speed = 0;
9307 dev_info(&adapter->pdev->dev,
9308 "Link speed has been changed. VF Transmit rate is disabled\n");
9309 }
9310
9311 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9312 if (reset_rate)
9313 adapter->vf_data[i].tx_rate = 0;
9314
9315 igb_set_vf_rate_limit(&adapter->hw, i,
9316 adapter->vf_data[i].tx_rate,
9317 actual_link_speed);
9318 }
9319}
9320
9321static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9322 int min_tx_rate, int max_tx_rate)
9323{
9324 struct igb_adapter *adapter = netdev_priv(netdev);
9325 struct e1000_hw *hw = &adapter->hw;
9326 int actual_link_speed;
9327
9328 if (hw->mac.type != e1000_82576)
9329 return -EOPNOTSUPP;
9330
9331 if (min_tx_rate)
9332 return -EINVAL;
9333
9334 actual_link_speed = igb_link_mbps(adapter->link_speed);
9335 if ((vf >= adapter->vfs_allocated_count) ||
9336 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9337 (max_tx_rate < 0) ||
9338 (max_tx_rate > actual_link_speed))
9339 return -EINVAL;
9340
9341 adapter->vf_rate_link_speed = actual_link_speed;
9342 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9343 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9344
9345 return 0;
9346}
9347
9348static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9349 bool setting)
9350{
9351 struct igb_adapter *adapter = netdev_priv(netdev);
9352 struct e1000_hw *hw = &adapter->hw;
9353 u32 reg_val, reg_offset;
9354
9355 if (!adapter->vfs_allocated_count)
9356 return -EOPNOTSUPP;
9357
9358 if (vf >= adapter->vfs_allocated_count)
9359 return -EINVAL;
9360
9361 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9362 reg_val = rd32(reg_offset);
9363 if (setting)
9364 reg_val |= (BIT(vf) |
9365 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9366 else
9367 reg_val &= ~(BIT(vf) |
9368 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9369 wr32(reg_offset, reg_val);
9370
9371 adapter->vf_data[vf].spoofchk_enabled = setting;
9372 return 0;
9373}
9374
9375static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9376{
9377 struct igb_adapter *adapter = netdev_priv(netdev);
9378
9379 if (vf >= adapter->vfs_allocated_count)
9380 return -EINVAL;
9381 if (adapter->vf_data[vf].trusted == setting)
9382 return 0;
9383
9384 adapter->vf_data[vf].trusted = setting;
9385
9386 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9387 vf, setting ? "" : "not ");
9388 return 0;
9389}
9390
9391static int igb_ndo_get_vf_config(struct net_device *netdev,
9392 int vf, struct ifla_vf_info *ivi)
9393{
9394 struct igb_adapter *adapter = netdev_priv(netdev);
9395 if (vf >= adapter->vfs_allocated_count)
9396 return -EINVAL;
9397 ivi->vf = vf;
9398 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9399 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9400 ivi->min_tx_rate = 0;
9401 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9402 ivi->qos = adapter->vf_data[vf].pf_qos;
9403 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9404 ivi->trusted = adapter->vf_data[vf].trusted;
9405 return 0;
9406}
9407
9408static void igb_vmm_control(struct igb_adapter *adapter)
9409{
9410 struct e1000_hw *hw = &adapter->hw;
9411 u32 reg;
9412
9413 switch (hw->mac.type) {
9414 case e1000_82575:
9415 case e1000_i210:
9416 case e1000_i211:
9417 case e1000_i354:
9418 default:
9419
9420 return;
9421 case e1000_82576:
9422
9423 reg = rd32(E1000_DTXCTL);
9424 reg |= E1000_DTXCTL_VLAN_ADDED;
9425 wr32(E1000_DTXCTL, reg);
9426
9427 case e1000_82580:
9428
9429 reg = rd32(E1000_RPLOLR);
9430 reg |= E1000_RPLOLR_STRVLAN;
9431 wr32(E1000_RPLOLR, reg);
9432
9433 case e1000_i350:
9434
9435 break;
9436 }
9437
9438 if (adapter->vfs_allocated_count) {
9439 igb_vmdq_set_loopback_pf(hw, true);
9440 igb_vmdq_set_replication_pf(hw, true);
9441 igb_vmdq_set_anti_spoofing_pf(hw, true,
9442 adapter->vfs_allocated_count);
9443 } else {
9444 igb_vmdq_set_loopback_pf(hw, false);
9445 igb_vmdq_set_replication_pf(hw, false);
9446 }
9447}
9448
9449static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9450{
9451 struct e1000_hw *hw = &adapter->hw;
9452 u32 dmac_thr;
9453 u16 hwm;
9454
9455 if (hw->mac.type > e1000_82580) {
9456 if (adapter->flags & IGB_FLAG_DMAC) {
9457 u32 reg;
9458
9459
9460 wr32(E1000_DMCTXTH, 0);
9461
9462
9463
9464
9465
9466 hwm = 64 * (pba - 6);
9467 reg = rd32(E1000_FCRTC);
9468 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9469 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9470 & E1000_FCRTC_RTH_COAL_MASK);
9471 wr32(E1000_FCRTC, reg);
9472
9473
9474
9475
9476 dmac_thr = pba - 10;
9477 reg = rd32(E1000_DMACR);
9478 reg &= ~E1000_DMACR_DMACTHR_MASK;
9479 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9480 & E1000_DMACR_DMACTHR_MASK);
9481
9482
9483 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9484
9485
9486 reg |= (1000 >> 5);
9487
9488
9489 if (hw->mac.type != e1000_i354)
9490 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9491
9492 wr32(E1000_DMACR, reg);
9493
9494
9495
9496
9497 wr32(E1000_DMCRTRH, 0);
9498
9499 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9500
9501 wr32(E1000_DMCTLX, reg);
9502
9503
9504
9505
9506 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9507 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9508
9509
9510
9511
9512 reg = rd32(E1000_PCIEMISC);
9513 reg &= ~E1000_PCIEMISC_LX_DECISION;
9514 wr32(E1000_PCIEMISC, reg);
9515 }
9516 } else if (hw->mac.type == e1000_82580) {
9517 u32 reg = rd32(E1000_PCIEMISC);
9518
9519 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9520 wr32(E1000_DMACR, 0);
9521 }
9522}
9523
9524
9525
9526
9527
9528
9529
9530
9531
9532
9533
9534s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9535 u8 dev_addr, u8 *data)
9536{
9537 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9538 struct i2c_client *this_client = adapter->i2c_client;
9539 s32 status;
9540 u16 swfw_mask = 0;
9541
9542 if (!this_client)
9543 return E1000_ERR_I2C;
9544
9545 swfw_mask = E1000_SWFW_PHY0_SM;
9546
9547 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9548 return E1000_ERR_SWFW_SYNC;
9549
9550 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9551 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9552
9553 if (status < 0)
9554 return E1000_ERR_I2C;
9555 else {
9556 *data = status;
9557 return 0;
9558 }
9559}
9560
9561
9562
9563
9564
9565
9566
9567
9568
9569
9570
9571s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9572 u8 dev_addr, u8 data)
9573{
9574 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9575 struct i2c_client *this_client = adapter->i2c_client;
9576 s32 status;
9577 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9578
9579 if (!this_client)
9580 return E1000_ERR_I2C;
9581
9582 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9583 return E1000_ERR_SWFW_SYNC;
9584 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9585 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9586
9587 if (status)
9588 return E1000_ERR_I2C;
9589 else
9590 return 0;
9591
9592}
9593
9594int igb_reinit_queues(struct igb_adapter *adapter)
9595{
9596 struct net_device *netdev = adapter->netdev;
9597 struct pci_dev *pdev = adapter->pdev;
9598 int err = 0;
9599
9600 if (netif_running(netdev))
9601 igb_close(netdev);
9602
9603 igb_reset_interrupt_capability(adapter);
9604
9605 if (igb_init_interrupt_scheme(adapter, true)) {
9606 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9607 return -ENOMEM;
9608 }
9609
9610 if (netif_running(netdev))
9611 err = igb_open(netdev);
9612
9613 return err;
9614}
9615
9616static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9617{
9618 struct igb_nfc_filter *rule;
9619
9620 spin_lock(&adapter->nfc_lock);
9621
9622 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9623 igb_erase_filter(adapter, rule);
9624
9625 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9626 igb_erase_filter(adapter, rule);
9627
9628 spin_unlock(&adapter->nfc_lock);
9629}
9630
9631static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9632{
9633 struct igb_nfc_filter *rule;
9634
9635 spin_lock(&adapter->nfc_lock);
9636
9637 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9638 igb_add_filter(adapter, rule);
9639
9640 spin_unlock(&adapter->nfc_lock);
9641}
9642
9643