1
2
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/bitops.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/netdevice.h>
13#include <linux/ipv6.h>
14#include <linux/slab.h>
15#include <net/checksum.h>
16#include <net/ip6_checksum.h>
17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
19#include <linux/net_tstamp.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/ip.h>
28#include <linux/tcp.h>
29#include <linux/sctp.h>
30#include <linux/if_ether.h>
31#include <linux/aer.h>
32#include <linux/prefetch.h>
33#include <linux/bpf.h>
34#include <linux/bpf_trace.h>
35#include <linux/pm_runtime.h>
36#include <linux/etherdevice.h>
37#ifdef CONFIG_IGB_DCA
38#include <linux/dca.h>
39#endif
40#include <linux/i2c.h>
41#include "igb.h"
42
43enum queue_mode {
44 QUEUE_MODE_STRICT_PRIORITY,
45 QUEUE_MODE_STREAM_RESERVATION,
46};
47
48enum tx_queue_prio {
49 TX_QUEUE_PRIO_HIGH,
50 TX_QUEUE_PRIO_LOW,
51};
52
53char igb_driver_name[] = "igb";
54static const char igb_driver_string[] =
55 "Intel(R) Gigabit Ethernet Network Driver";
56static const char igb_copyright[] =
57 "Copyright (c) 2007-2014 Intel Corporation.";
58
59static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static const struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
99
100 {0, }
101};
102
103MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
104
105static int igb_setup_all_tx_resources(struct igb_adapter *);
106static int igb_setup_all_rx_resources(struct igb_adapter *);
107static void igb_free_all_tx_resources(struct igb_adapter *);
108static void igb_free_all_rx_resources(struct igb_adapter *);
109static void igb_setup_mrqc(struct igb_adapter *);
110static int igb_probe(struct pci_dev *, const struct pci_device_id *);
111static void igb_remove(struct pci_dev *pdev);
112static int igb_sw_init(struct igb_adapter *);
113int igb_open(struct net_device *);
114int igb_close(struct net_device *);
115static void igb_configure(struct igb_adapter *);
116static void igb_configure_tx(struct igb_adapter *);
117static void igb_configure_rx(struct igb_adapter *);
118static void igb_clean_all_tx_rings(struct igb_adapter *);
119static void igb_clean_all_rx_rings(struct igb_adapter *);
120static void igb_clean_tx_ring(struct igb_ring *);
121static void igb_clean_rx_ring(struct igb_ring *);
122static void igb_set_rx_mode(struct net_device *);
123static void igb_update_phy_info(struct timer_list *);
124static void igb_watchdog(struct timer_list *);
125static void igb_watchdog_task(struct work_struct *);
126static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
127static void igb_get_stats64(struct net_device *dev,
128 struct rtnl_link_stats64 *stats);
129static int igb_change_mtu(struct net_device *, int);
130static int igb_set_mac(struct net_device *, void *);
131static void igb_set_uta(struct igb_adapter *adapter, bool set);
132static irqreturn_t igb_intr(int irq, void *);
133static irqreturn_t igb_intr_msi(int irq, void *);
134static irqreturn_t igb_msix_other(int irq, void *);
135static irqreturn_t igb_msix_ring(int irq, void *);
136#ifdef CONFIG_IGB_DCA
137static void igb_update_dca(struct igb_q_vector *);
138static void igb_setup_dca(struct igb_adapter *);
139#endif
140static int igb_poll(struct napi_struct *, int);
141static bool igb_clean_tx_irq(struct igb_q_vector *, int);
142static int igb_clean_rx_irq(struct igb_q_vector *, int);
143static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
144static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
145static void igb_reset_task(struct work_struct *);
146static void igb_vlan_mode(struct net_device *netdev,
147 netdev_features_t features);
148static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
149static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
150static void igb_restore_vlan(struct igb_adapter *);
151static void igb_rar_set_index(struct igb_adapter *, u32);
152static void igb_ping_all_vfs(struct igb_adapter *);
153static void igb_msg_task(struct igb_adapter *);
154static void igb_vmm_control(struct igb_adapter *);
155static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
156static void igb_flush_mac_table(struct igb_adapter *);
157static int igb_available_rars(struct igb_adapter *, u8);
158static void igb_set_default_mac_filter(struct igb_adapter *);
159static int igb_uc_sync(struct net_device *, const unsigned char *);
160static int igb_uc_unsync(struct net_device *, const unsigned char *);
161static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
162static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
163static int igb_ndo_set_vf_vlan(struct net_device *netdev,
164 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
165static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
166static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
167 bool setting);
168static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
169 bool setting);
170static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
171 struct ifla_vf_info *ivi);
172static void igb_check_vf_rate_limit(struct igb_adapter *);
173static void igb_nfc_filter_exit(struct igb_adapter *adapter);
174static void igb_nfc_filter_restore(struct igb_adapter *adapter);
175
176#ifdef CONFIG_PCI_IOV
177static int igb_vf_configure(struct igb_adapter *adapter, int vf);
178static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
179static int igb_disable_sriov(struct pci_dev *dev);
180static int igb_pci_disable_sriov(struct pci_dev *dev);
181#endif
182
183static int igb_suspend(struct device *);
184static int igb_resume(struct device *);
185static int igb_runtime_suspend(struct device *dev);
186static int igb_runtime_resume(struct device *dev);
187static int igb_runtime_idle(struct device *dev);
188static const struct dev_pm_ops igb_pm_ops = {
189 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
190 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
191 igb_runtime_idle)
192};
193static void igb_shutdown(struct pci_dev *);
194static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
195#ifdef CONFIG_IGB_DCA
196static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
197static struct notifier_block dca_notifier = {
198 .notifier_call = igb_notify_dca,
199 .next = NULL,
200 .priority = 0
201};
202#endif
203#ifdef CONFIG_PCI_IOV
204static unsigned int max_vfs;
205module_param(max_vfs, uint, 0);
206MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
207#endif
208
209static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
210 pci_channel_state_t);
211static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
212static void igb_io_resume(struct pci_dev *);
213
214static const struct pci_error_handlers igb_err_handler = {
215 .error_detected = igb_io_error_detected,
216 .slot_reset = igb_io_slot_reset,
217 .resume = igb_io_resume,
218};
219
220static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
221
222static struct pci_driver igb_driver = {
223 .name = igb_driver_name,
224 .id_table = igb_pci_tbl,
225 .probe = igb_probe,
226 .remove = igb_remove,
227#ifdef CONFIG_PM
228 .driver.pm = &igb_pm_ops,
229#endif
230 .shutdown = igb_shutdown,
231 .sriov_configure = igb_pci_sriov_configure,
232 .err_handler = &igb_err_handler
233};
234
235MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
236MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
237MODULE_LICENSE("GPL v2");
238
239#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
240static int debug = -1;
241module_param(debug, int, 0);
242MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
243
244struct igb_reg_info {
245 u32 ofs;
246 char *name;
247};
248
249static const struct igb_reg_info igb_reg_info_tbl[] = {
250
251
252 {E1000_CTRL, "CTRL"},
253 {E1000_STATUS, "STATUS"},
254 {E1000_CTRL_EXT, "CTRL_EXT"},
255
256
257 {E1000_ICR, "ICR"},
258
259
260 {E1000_RCTL, "RCTL"},
261 {E1000_RDLEN(0), "RDLEN"},
262 {E1000_RDH(0), "RDH"},
263 {E1000_RDT(0), "RDT"},
264 {E1000_RXDCTL(0), "RXDCTL"},
265 {E1000_RDBAL(0), "RDBAL"},
266 {E1000_RDBAH(0), "RDBAH"},
267
268
269 {E1000_TCTL, "TCTL"},
270 {E1000_TDBAL(0), "TDBAL"},
271 {E1000_TDBAH(0), "TDBAH"},
272 {E1000_TDLEN(0), "TDLEN"},
273 {E1000_TDH(0), "TDH"},
274 {E1000_TDT(0), "TDT"},
275 {E1000_TXDCTL(0), "TXDCTL"},
276 {E1000_TDFH, "TDFH"},
277 {E1000_TDFT, "TDFT"},
278 {E1000_TDFHS, "TDFHS"},
279 {E1000_TDFPC, "TDFPC"},
280
281
282 {}
283};
284
285
286static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
287{
288 int n = 0;
289 char rname[16];
290 u32 regs[8];
291
292 switch (reginfo->ofs) {
293 case E1000_RDLEN(0):
294 for (n = 0; n < 4; n++)
295 regs[n] = rd32(E1000_RDLEN(n));
296 break;
297 case E1000_RDH(0):
298 for (n = 0; n < 4; n++)
299 regs[n] = rd32(E1000_RDH(n));
300 break;
301 case E1000_RDT(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_RDT(n));
304 break;
305 case E1000_RXDCTL(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_RXDCTL(n));
308 break;
309 case E1000_RDBAL(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_RDBAL(n));
312 break;
313 case E1000_RDBAH(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_RDBAH(n));
316 break;
317 case E1000_TDBAL(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_TDBAL(n));
320 break;
321 case E1000_TDBAH(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_TDBAH(n));
324 break;
325 case E1000_TDLEN(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_TDLEN(n));
328 break;
329 case E1000_TDH(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_TDH(n));
332 break;
333 case E1000_TDT(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_TDT(n));
336 break;
337 case E1000_TXDCTL(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_TXDCTL(n));
340 break;
341 default:
342 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
343 return;
344 }
345
346 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
347 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
348 regs[2], regs[3]);
349}
350
351
352static void igb_dump(struct igb_adapter *adapter)
353{
354 struct net_device *netdev = adapter->netdev;
355 struct e1000_hw *hw = &adapter->hw;
356 struct igb_reg_info *reginfo;
357 struct igb_ring *tx_ring;
358 union e1000_adv_tx_desc *tx_desc;
359 struct my_u0 { __le64 a; __le64 b; } *u0;
360 struct igb_ring *rx_ring;
361 union e1000_adv_rx_desc *rx_desc;
362 u32 staterr;
363 u16 i, n;
364
365 if (!netif_msg_hw(adapter))
366 return;
367
368
369 if (netdev) {
370 dev_info(&adapter->pdev->dev, "Net device Info\n");
371 pr_info("Device Name state trans_start\n");
372 pr_info("%-15s %016lX %016lX\n", netdev->name,
373 netdev->state, dev_trans_start(netdev));
374 }
375
376
377 dev_info(&adapter->pdev->dev, "Register Dump\n");
378 pr_info(" Register Name Value\n");
379 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
380 reginfo->name; reginfo++) {
381 igb_regdump(hw, reginfo);
382 }
383
384
385 if (!netdev || !netif_running(netdev))
386 goto exit;
387
388 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
389 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
390 for (n = 0; n < adapter->num_tx_queues; n++) {
391 struct igb_tx_buffer *buffer_info;
392 tx_ring = adapter->tx_ring[n];
393 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
394 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
395 n, tx_ring->next_to_use, tx_ring->next_to_clean,
396 (u64)dma_unmap_addr(buffer_info, dma),
397 dma_unmap_len(buffer_info, len),
398 buffer_info->next_to_watch,
399 (u64)buffer_info->time_stamp);
400 }
401
402
403 if (!netif_msg_tx_done(adapter))
404 goto rx_ring_summary;
405
406 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
407
408
409
410
411
412
413
414
415
416
417
418
419 for (n = 0; n < adapter->num_tx_queues; n++) {
420 tx_ring = adapter->tx_ring[n];
421 pr_info("------------------------------------\n");
422 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
423 pr_info("------------------------------------\n");
424 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
425
426 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
427 const char *next_desc;
428 struct igb_tx_buffer *buffer_info;
429 tx_desc = IGB_TX_DESC(tx_ring, i);
430 buffer_info = &tx_ring->tx_buffer_info[i];
431 u0 = (struct my_u0 *)tx_desc;
432 if (i == tx_ring->next_to_use &&
433 i == tx_ring->next_to_clean)
434 next_desc = " NTC/U";
435 else if (i == tx_ring->next_to_use)
436 next_desc = " NTU";
437 else if (i == tx_ring->next_to_clean)
438 next_desc = " NTC";
439 else
440 next_desc = "";
441
442 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
443 i, le64_to_cpu(u0->a),
444 le64_to_cpu(u0->b),
445 (u64)dma_unmap_addr(buffer_info, dma),
446 dma_unmap_len(buffer_info, len),
447 buffer_info->next_to_watch,
448 (u64)buffer_info->time_stamp,
449 buffer_info->skb, next_desc);
450
451 if (netif_msg_pktdata(adapter) && buffer_info->skb)
452 print_hex_dump(KERN_INFO, "",
453 DUMP_PREFIX_ADDRESS,
454 16, 1, buffer_info->skb->data,
455 dma_unmap_len(buffer_info, len),
456 true);
457 }
458 }
459
460
461rx_ring_summary:
462 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
463 pr_info("Queue [NTU] [NTC]\n");
464 for (n = 0; n < adapter->num_rx_queues; n++) {
465 rx_ring = adapter->rx_ring[n];
466 pr_info(" %5d %5X %5X\n",
467 n, rx_ring->next_to_use, rx_ring->next_to_clean);
468 }
469
470
471 if (!netif_msg_rx_status(adapter))
472 goto exit;
473
474 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497 for (n = 0; n < adapter->num_rx_queues; n++) {
498 rx_ring = adapter->rx_ring[n];
499 pr_info("------------------------------------\n");
500 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
501 pr_info("------------------------------------\n");
502 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
503 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
504
505 for (i = 0; i < rx_ring->count; i++) {
506 const char *next_desc;
507 struct igb_rx_buffer *buffer_info;
508 buffer_info = &rx_ring->rx_buffer_info[i];
509 rx_desc = IGB_RX_DESC(rx_ring, i);
510 u0 = (struct my_u0 *)rx_desc;
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512
513 if (i == rx_ring->next_to_use)
514 next_desc = " NTU";
515 else if (i == rx_ring->next_to_clean)
516 next_desc = " NTC";
517 else
518 next_desc = "";
519
520 if (staterr & E1000_RXD_STAT_DD) {
521
522 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
523 "RWB", i,
524 le64_to_cpu(u0->a),
525 le64_to_cpu(u0->b),
526 next_desc);
527 } else {
528 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
529 "R ", i,
530 le64_to_cpu(u0->a),
531 le64_to_cpu(u0->b),
532 (u64)buffer_info->dma,
533 next_desc);
534
535 if (netif_msg_pktdata(adapter) &&
536 buffer_info->dma && buffer_info->page) {
537 print_hex_dump(KERN_INFO, "",
538 DUMP_PREFIX_ADDRESS,
539 16, 1,
540 page_address(buffer_info->page) +
541 buffer_info->page_offset,
542 igb_rx_bufsz(rx_ring), true);
543 }
544 }
545 }
546 }
547
548exit:
549 return;
550}
551
552
553
554
555
556
557
558static int igb_get_i2c_data(void *data)
559{
560 struct igb_adapter *adapter = (struct igb_adapter *)data;
561 struct e1000_hw *hw = &adapter->hw;
562 s32 i2cctl = rd32(E1000_I2CPARAMS);
563
564 return !!(i2cctl & E1000_I2C_DATA_IN);
565}
566
567
568
569
570
571
572
573
574static void igb_set_i2c_data(void *data, int state)
575{
576 struct igb_adapter *adapter = (struct igb_adapter *)data;
577 struct e1000_hw *hw = &adapter->hw;
578 s32 i2cctl = rd32(E1000_I2CPARAMS);
579
580 if (state) {
581 i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
582 } else {
583 i2cctl &= ~E1000_I2C_DATA_OE_N;
584 i2cctl &= ~E1000_I2C_DATA_OUT;
585 }
586
587 wr32(E1000_I2CPARAMS, i2cctl);
588 wrfl();
589}
590
591
592
593
594
595
596
597
598static void igb_set_i2c_clk(void *data, int state)
599{
600 struct igb_adapter *adapter = (struct igb_adapter *)data;
601 struct e1000_hw *hw = &adapter->hw;
602 s32 i2cctl = rd32(E1000_I2CPARAMS);
603
604 if (state) {
605 i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N;
606 } else {
607 i2cctl &= ~E1000_I2C_CLK_OUT;
608 i2cctl &= ~E1000_I2C_CLK_OE_N;
609 }
610 wr32(E1000_I2CPARAMS, i2cctl);
611 wrfl();
612}
613
614
615
616
617
618
619
620static int igb_get_i2c_clk(void *data)
621{
622 struct igb_adapter *adapter = (struct igb_adapter *)data;
623 struct e1000_hw *hw = &adapter->hw;
624 s32 i2cctl = rd32(E1000_I2CPARAMS);
625
626 return !!(i2cctl & E1000_I2C_CLK_IN);
627}
628
629static const struct i2c_algo_bit_data igb_i2c_algo = {
630 .setsda = igb_set_i2c_data,
631 .setscl = igb_set_i2c_clk,
632 .getsda = igb_get_i2c_data,
633 .getscl = igb_get_i2c_clk,
634 .udelay = 5,
635 .timeout = 20,
636};
637
638
639
640
641
642
643
644struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
645{
646 struct igb_adapter *adapter = hw->back;
647 return adapter->netdev;
648}
649
650
651
652
653
654
655
656static int __init igb_init_module(void)
657{
658 int ret;
659
660 pr_info("%s\n", igb_driver_string);
661 pr_info("%s\n", igb_copyright);
662
663#ifdef CONFIG_IGB_DCA
664 dca_register_notify(&dca_notifier);
665#endif
666 ret = pci_register_driver(&igb_driver);
667 return ret;
668}
669
670module_init(igb_init_module);
671
672
673
674
675
676
677
678static void __exit igb_exit_module(void)
679{
680#ifdef CONFIG_IGB_DCA
681 dca_unregister_notify(&dca_notifier);
682#endif
683 pci_unregister_driver(&igb_driver);
684}
685
686module_exit(igb_exit_module);
687
688#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
689
690
691
692
693
694
695
696static void igb_cache_ring_register(struct igb_adapter *adapter)
697{
698 int i = 0, j = 0;
699 u32 rbase_offset = adapter->vfs_allocated_count;
700
701 switch (adapter->hw.mac.type) {
702 case e1000_82576:
703
704
705
706
707
708 if (adapter->vfs_allocated_count) {
709 for (; i < adapter->rss_queues; i++)
710 adapter->rx_ring[i]->reg_idx = rbase_offset +
711 Q_IDX_82576(i);
712 }
713 fallthrough;
714 case e1000_82575:
715 case e1000_82580:
716 case e1000_i350:
717 case e1000_i354:
718 case e1000_i210:
719 case e1000_i211:
720 default:
721 for (; i < adapter->num_rx_queues; i++)
722 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
723 for (; j < adapter->num_tx_queues; j++)
724 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
725 break;
726 }
727}
728
729u32 igb_rd32(struct e1000_hw *hw, u32 reg)
730{
731 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
732 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
733 u32 value = 0;
734
735 if (E1000_REMOVED(hw_addr))
736 return ~value;
737
738 value = readl(&hw_addr[reg]);
739
740
741 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
742 struct net_device *netdev = igb->netdev;
743 hw->hw_addr = NULL;
744 netdev_err(netdev, "PCIe link lost\n");
745 WARN(pci_device_is_present(igb->pdev),
746 "igb: Failed to read reg 0x%x!\n", reg);
747 }
748
749 return value;
750}
751
752
753
754
755
756
757
758
759
760
761
762
763
764static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
765 int index, int offset)
766{
767 u32 ivar = array_rd32(E1000_IVAR0, index);
768
769
770 ivar &= ~((u32)0xFF << offset);
771
772
773 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
774
775 array_wr32(E1000_IVAR0, index, ivar);
776}
777
778#define IGB_N0_QUEUE -1
779static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
780{
781 struct igb_adapter *adapter = q_vector->adapter;
782 struct e1000_hw *hw = &adapter->hw;
783 int rx_queue = IGB_N0_QUEUE;
784 int tx_queue = IGB_N0_QUEUE;
785 u32 msixbm = 0;
786
787 if (q_vector->rx.ring)
788 rx_queue = q_vector->rx.ring->reg_idx;
789 if (q_vector->tx.ring)
790 tx_queue = q_vector->tx.ring->reg_idx;
791
792 switch (hw->mac.type) {
793 case e1000_82575:
794
795
796
797
798
799 if (rx_queue > IGB_N0_QUEUE)
800 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
801 if (tx_queue > IGB_N0_QUEUE)
802 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
803 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
804 msixbm |= E1000_EIMS_OTHER;
805 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
806 q_vector->eims_value = msixbm;
807 break;
808 case e1000_82576:
809
810
811
812
813
814 if (rx_queue > IGB_N0_QUEUE)
815 igb_write_ivar(hw, msix_vector,
816 rx_queue & 0x7,
817 (rx_queue & 0x8) << 1);
818 if (tx_queue > IGB_N0_QUEUE)
819 igb_write_ivar(hw, msix_vector,
820 tx_queue & 0x7,
821 ((tx_queue & 0x8) << 1) + 8);
822 q_vector->eims_value = BIT(msix_vector);
823 break;
824 case e1000_82580:
825 case e1000_i350:
826 case e1000_i354:
827 case e1000_i210:
828 case e1000_i211:
829
830
831
832
833
834
835 if (rx_queue > IGB_N0_QUEUE)
836 igb_write_ivar(hw, msix_vector,
837 rx_queue >> 1,
838 (rx_queue & 0x1) << 4);
839 if (tx_queue > IGB_N0_QUEUE)
840 igb_write_ivar(hw, msix_vector,
841 tx_queue >> 1,
842 ((tx_queue & 0x1) << 4) + 8);
843 q_vector->eims_value = BIT(msix_vector);
844 break;
845 default:
846 BUG();
847 break;
848 }
849
850
851 adapter->eims_enable_mask |= q_vector->eims_value;
852
853
854 q_vector->set_itr = 1;
855}
856
857
858
859
860
861
862
863
864static void igb_configure_msix(struct igb_adapter *adapter)
865{
866 u32 tmp;
867 int i, vector = 0;
868 struct e1000_hw *hw = &adapter->hw;
869
870 adapter->eims_enable_mask = 0;
871
872
873 switch (hw->mac.type) {
874 case e1000_82575:
875 tmp = rd32(E1000_CTRL_EXT);
876
877 tmp |= E1000_CTRL_EXT_PBA_CLR;
878
879
880 tmp |= E1000_CTRL_EXT_EIAME;
881 tmp |= E1000_CTRL_EXT_IRCA;
882
883 wr32(E1000_CTRL_EXT, tmp);
884
885
886 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
887 adapter->eims_other = E1000_EIMS_OTHER;
888
889 break;
890
891 case e1000_82576:
892 case e1000_82580:
893 case e1000_i350:
894 case e1000_i354:
895 case e1000_i210:
896 case e1000_i211:
897
898
899
900 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
901 E1000_GPIE_PBA | E1000_GPIE_EIAME |
902 E1000_GPIE_NSICR);
903
904
905 adapter->eims_other = BIT(vector);
906 tmp = (vector++ | E1000_IVAR_VALID) << 8;
907
908 wr32(E1000_IVAR_MISC, tmp);
909 break;
910 default:
911
912 break;
913 }
914
915 adapter->eims_enable_mask |= adapter->eims_other;
916
917 for (i = 0; i < adapter->num_q_vectors; i++)
918 igb_assign_vector(adapter->q_vector[i], vector++);
919
920 wrfl();
921}
922
923
924
925
926
927
928
929
930static int igb_request_msix(struct igb_adapter *adapter)
931{
932 unsigned int num_q_vectors = adapter->num_q_vectors;
933 struct net_device *netdev = adapter->netdev;
934 int i, err = 0, vector = 0, free_vector = 0;
935
936 err = request_irq(adapter->msix_entries[vector].vector,
937 igb_msix_other, 0, netdev->name, adapter);
938 if (err)
939 goto err_out;
940
941 if (num_q_vectors > MAX_Q_VECTORS) {
942 num_q_vectors = MAX_Q_VECTORS;
943 dev_warn(&adapter->pdev->dev,
944 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
945 adapter->num_q_vectors, MAX_Q_VECTORS);
946 }
947 for (i = 0; i < num_q_vectors; i++) {
948 struct igb_q_vector *q_vector = adapter->q_vector[i];
949
950 vector++;
951
952 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
953
954 if (q_vector->rx.ring && q_vector->tx.ring)
955 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
956 q_vector->rx.ring->queue_index);
957 else if (q_vector->tx.ring)
958 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
959 q_vector->tx.ring->queue_index);
960 else if (q_vector->rx.ring)
961 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
962 q_vector->rx.ring->queue_index);
963 else
964 sprintf(q_vector->name, "%s-unused", netdev->name);
965
966 err = request_irq(adapter->msix_entries[vector].vector,
967 igb_msix_ring, 0, q_vector->name,
968 q_vector);
969 if (err)
970 goto err_free;
971 }
972
973 igb_configure_msix(adapter);
974 return 0;
975
976err_free:
977
978 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
979
980 vector--;
981 for (i = 0; i < vector; i++) {
982 free_irq(adapter->msix_entries[free_vector++].vector,
983 adapter->q_vector[i]);
984 }
985err_out:
986 return err;
987}
988
989
990
991
992
993
994
995
996static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
997{
998 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
999
1000 adapter->q_vector[v_idx] = NULL;
1001
1002
1003
1004
1005 if (q_vector)
1006 kfree_rcu(q_vector, rcu);
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1018{
1019 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1020
1021
1022
1023
1024 if (!q_vector)
1025 return;
1026
1027 if (q_vector->tx.ring)
1028 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1029
1030 if (q_vector->rx.ring)
1031 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1032
1033 netif_napi_del(&q_vector->napi);
1034
1035}
1036
1037static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1038{
1039 int v_idx = adapter->num_q_vectors;
1040
1041 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1042 pci_disable_msix(adapter->pdev);
1043 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1044 pci_disable_msi(adapter->pdev);
1045
1046 while (v_idx--)
1047 igb_reset_q_vector(adapter, v_idx);
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static void igb_free_q_vectors(struct igb_adapter *adapter)
1059{
1060 int v_idx = adapter->num_q_vectors;
1061
1062 adapter->num_tx_queues = 0;
1063 adapter->num_rx_queues = 0;
1064 adapter->num_q_vectors = 0;
1065
1066 while (v_idx--) {
1067 igb_reset_q_vector(adapter, v_idx);
1068 igb_free_q_vector(adapter, v_idx);
1069 }
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1080{
1081 igb_free_q_vectors(adapter);
1082 igb_reset_interrupt_capability(adapter);
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1094{
1095 int err;
1096 int numvecs, i;
1097
1098 if (!msix)
1099 goto msi_only;
1100 adapter->flags |= IGB_FLAG_HAS_MSIX;
1101
1102
1103 adapter->num_rx_queues = adapter->rss_queues;
1104 if (adapter->vfs_allocated_count)
1105 adapter->num_tx_queues = 1;
1106 else
1107 adapter->num_tx_queues = adapter->rss_queues;
1108
1109
1110 numvecs = adapter->num_rx_queues;
1111
1112
1113 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1114 numvecs += adapter->num_tx_queues;
1115
1116
1117 adapter->num_q_vectors = numvecs;
1118
1119
1120 numvecs++;
1121 for (i = 0; i < numvecs; i++)
1122 adapter->msix_entries[i].entry = i;
1123
1124 err = pci_enable_msix_range(adapter->pdev,
1125 adapter->msix_entries,
1126 numvecs,
1127 numvecs);
1128 if (err > 0)
1129 return;
1130
1131 igb_reset_interrupt_capability(adapter);
1132
1133
1134msi_only:
1135 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1136#ifdef CONFIG_PCI_IOV
1137
1138 if (adapter->vf_data) {
1139 struct e1000_hw *hw = &adapter->hw;
1140
1141 pci_disable_sriov(adapter->pdev);
1142 msleep(500);
1143
1144 kfree(adapter->vf_mac_list);
1145 adapter->vf_mac_list = NULL;
1146 kfree(adapter->vf_data);
1147 adapter->vf_data = NULL;
1148 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1149 wrfl();
1150 msleep(100);
1151 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1152 }
1153#endif
1154 adapter->vfs_allocated_count = 0;
1155 adapter->rss_queues = 1;
1156 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1157 adapter->num_rx_queues = 1;
1158 adapter->num_tx_queues = 1;
1159 adapter->num_q_vectors = 1;
1160 if (!pci_enable_msi(adapter->pdev))
1161 adapter->flags |= IGB_FLAG_HAS_MSI;
1162}
1163
1164static void igb_add_ring(struct igb_ring *ring,
1165 struct igb_ring_container *head)
1166{
1167 head->ring = ring;
1168 head->count++;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static int igb_alloc_q_vector(struct igb_adapter *adapter,
1184 int v_count, int v_idx,
1185 int txr_count, int txr_idx,
1186 int rxr_count, int rxr_idx)
1187{
1188 struct igb_q_vector *q_vector;
1189 struct igb_ring *ring;
1190 int ring_count;
1191 size_t size;
1192
1193
1194 if (txr_count > 1 || rxr_count > 1)
1195 return -ENOMEM;
1196
1197 ring_count = txr_count + rxr_count;
1198 size = struct_size(q_vector, ring, ring_count);
1199
1200
1201 q_vector = adapter->q_vector[v_idx];
1202 if (!q_vector) {
1203 q_vector = kzalloc(size, GFP_KERNEL);
1204 } else if (size > ksize(q_vector)) {
1205 kfree_rcu(q_vector, rcu);
1206 q_vector = kzalloc(size, GFP_KERNEL);
1207 } else {
1208 memset(q_vector, 0, size);
1209 }
1210 if (!q_vector)
1211 return -ENOMEM;
1212
1213
1214 netif_napi_add(adapter->netdev, &q_vector->napi,
1215 igb_poll, 64);
1216
1217
1218 adapter->q_vector[v_idx] = q_vector;
1219 q_vector->adapter = adapter;
1220
1221
1222 q_vector->tx.work_limit = adapter->tx_work_limit;
1223
1224
1225 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1226 q_vector->itr_val = IGB_START_ITR;
1227
1228
1229 ring = q_vector->ring;
1230
1231
1232 if (rxr_count) {
1233
1234 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1235 q_vector->itr_val = adapter->rx_itr_setting;
1236 } else {
1237
1238 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1239 q_vector->itr_val = adapter->tx_itr_setting;
1240 }
1241
1242 if (txr_count) {
1243
1244 ring->dev = &adapter->pdev->dev;
1245 ring->netdev = adapter->netdev;
1246
1247
1248 ring->q_vector = q_vector;
1249
1250
1251 igb_add_ring(ring, &q_vector->tx);
1252
1253
1254 if (adapter->hw.mac.type == e1000_82575)
1255 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1256
1257
1258 ring->count = adapter->tx_ring_count;
1259 ring->queue_index = txr_idx;
1260
1261 ring->cbs_enable = false;
1262 ring->idleslope = 0;
1263 ring->sendslope = 0;
1264 ring->hicredit = 0;
1265 ring->locredit = 0;
1266
1267 u64_stats_init(&ring->tx_syncp);
1268 u64_stats_init(&ring->tx_syncp2);
1269
1270
1271 adapter->tx_ring[txr_idx] = ring;
1272
1273
1274 ring++;
1275 }
1276
1277 if (rxr_count) {
1278
1279 ring->dev = &adapter->pdev->dev;
1280 ring->netdev = adapter->netdev;
1281
1282
1283 ring->q_vector = q_vector;
1284
1285
1286 igb_add_ring(ring, &q_vector->rx);
1287
1288
1289 if (adapter->hw.mac.type >= e1000_82576)
1290 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1291
1292
1293
1294
1295 if (adapter->hw.mac.type >= e1000_i350)
1296 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1297
1298
1299 ring->count = adapter->rx_ring_count;
1300 ring->queue_index = rxr_idx;
1301
1302 u64_stats_init(&ring->rx_syncp);
1303
1304
1305 adapter->rx_ring[rxr_idx] = ring;
1306 }
1307
1308 return 0;
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1320{
1321 int q_vectors = adapter->num_q_vectors;
1322 int rxr_remaining = adapter->num_rx_queues;
1323 int txr_remaining = adapter->num_tx_queues;
1324 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1325 int err;
1326
1327 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1328 for (; rxr_remaining; v_idx++) {
1329 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1330 0, 0, 1, rxr_idx);
1331
1332 if (err)
1333 goto err_out;
1334
1335
1336 rxr_remaining--;
1337 rxr_idx++;
1338 }
1339 }
1340
1341 for (; v_idx < q_vectors; v_idx++) {
1342 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1343 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1344
1345 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1346 tqpv, txr_idx, rqpv, rxr_idx);
1347
1348 if (err)
1349 goto err_out;
1350
1351
1352 rxr_remaining -= rqpv;
1353 txr_remaining -= tqpv;
1354 rxr_idx++;
1355 txr_idx++;
1356 }
1357
1358 return 0;
1359
1360err_out:
1361 adapter->num_tx_queues = 0;
1362 adapter->num_rx_queues = 0;
1363 adapter->num_q_vectors = 0;
1364
1365 while (v_idx--)
1366 igb_free_q_vector(adapter, v_idx);
1367
1368 return -ENOMEM;
1369}
1370
1371
1372
1373
1374
1375
1376
1377
1378static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1379{
1380 struct pci_dev *pdev = adapter->pdev;
1381 int err;
1382
1383 igb_set_interrupt_capability(adapter, msix);
1384
1385 err = igb_alloc_q_vectors(adapter);
1386 if (err) {
1387 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1388 goto err_alloc_q_vectors;
1389 }
1390
1391 igb_cache_ring_register(adapter);
1392
1393 return 0;
1394
1395err_alloc_q_vectors:
1396 igb_reset_interrupt_capability(adapter);
1397 return err;
1398}
1399
1400
1401
1402
1403
1404
1405
1406
1407static int igb_request_irq(struct igb_adapter *adapter)
1408{
1409 struct net_device *netdev = adapter->netdev;
1410 struct pci_dev *pdev = adapter->pdev;
1411 int err = 0;
1412
1413 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1414 err = igb_request_msix(adapter);
1415 if (!err)
1416 goto request_done;
1417
1418 igb_free_all_tx_resources(adapter);
1419 igb_free_all_rx_resources(adapter);
1420
1421 igb_clear_interrupt_scheme(adapter);
1422 err = igb_init_interrupt_scheme(adapter, false);
1423 if (err)
1424 goto request_done;
1425
1426 igb_setup_all_tx_resources(adapter);
1427 igb_setup_all_rx_resources(adapter);
1428 igb_configure(adapter);
1429 }
1430
1431 igb_assign_vector(adapter->q_vector[0], 0);
1432
1433 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1434 err = request_irq(pdev->irq, igb_intr_msi, 0,
1435 netdev->name, adapter);
1436 if (!err)
1437 goto request_done;
1438
1439
1440 igb_reset_interrupt_capability(adapter);
1441 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1442 }
1443
1444 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1445 netdev->name, adapter);
1446
1447 if (err)
1448 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1449 err);
1450
1451request_done:
1452 return err;
1453}
1454
1455static void igb_free_irq(struct igb_adapter *adapter)
1456{
1457 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1458 int vector = 0, i;
1459
1460 free_irq(adapter->msix_entries[vector++].vector, adapter);
1461
1462 for (i = 0; i < adapter->num_q_vectors; i++)
1463 free_irq(adapter->msix_entries[vector++].vector,
1464 adapter->q_vector[i]);
1465 } else {
1466 free_irq(adapter->pdev->irq, adapter);
1467 }
1468}
1469
1470
1471
1472
1473
1474static void igb_irq_disable(struct igb_adapter *adapter)
1475{
1476 struct e1000_hw *hw = &adapter->hw;
1477
1478
1479
1480
1481
1482 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1483 u32 regval = rd32(E1000_EIAM);
1484
1485 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1486 wr32(E1000_EIMC, adapter->eims_enable_mask);
1487 regval = rd32(E1000_EIAC);
1488 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1489 }
1490
1491 wr32(E1000_IAM, 0);
1492 wr32(E1000_IMC, ~0);
1493 wrfl();
1494 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1495 int i;
1496
1497 for (i = 0; i < adapter->num_q_vectors; i++)
1498 synchronize_irq(adapter->msix_entries[i].vector);
1499 } else {
1500 synchronize_irq(adapter->pdev->irq);
1501 }
1502}
1503
1504
1505
1506
1507
1508static void igb_irq_enable(struct igb_adapter *adapter)
1509{
1510 struct e1000_hw *hw = &adapter->hw;
1511
1512 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1513 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1514 u32 regval = rd32(E1000_EIAC);
1515
1516 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1517 regval = rd32(E1000_EIAM);
1518 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1519 wr32(E1000_EIMS, adapter->eims_enable_mask);
1520 if (adapter->vfs_allocated_count) {
1521 wr32(E1000_MBVFIMR, 0xFF);
1522 ims |= E1000_IMS_VMMB;
1523 }
1524 wr32(E1000_IMS, ims);
1525 } else {
1526 wr32(E1000_IMS, IMS_ENABLE_MASK |
1527 E1000_IMS_DRSTA);
1528 wr32(E1000_IAM, IMS_ENABLE_MASK |
1529 E1000_IMS_DRSTA);
1530 }
1531}
1532
1533static void igb_update_mng_vlan(struct igb_adapter *adapter)
1534{
1535 struct e1000_hw *hw = &adapter->hw;
1536 u16 pf_id = adapter->vfs_allocated_count;
1537 u16 vid = adapter->hw.mng_cookie.vlan_id;
1538 u16 old_vid = adapter->mng_vlan_id;
1539
1540 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1541
1542 igb_vfta_set(hw, vid, pf_id, true, true);
1543 adapter->mng_vlan_id = vid;
1544 } else {
1545 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1546 }
1547
1548 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1549 (vid != old_vid) &&
1550 !test_bit(old_vid, adapter->active_vlans)) {
1551
1552 igb_vfta_set(hw, vid, pf_id, false, true);
1553 }
1554}
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564static void igb_release_hw_control(struct igb_adapter *adapter)
1565{
1566 struct e1000_hw *hw = &adapter->hw;
1567 u32 ctrl_ext;
1568
1569
1570 ctrl_ext = rd32(E1000_CTRL_EXT);
1571 wr32(E1000_CTRL_EXT,
1572 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1573}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583static void igb_get_hw_control(struct igb_adapter *adapter)
1584{
1585 struct e1000_hw *hw = &adapter->hw;
1586 u32 ctrl_ext;
1587
1588
1589 ctrl_ext = rd32(E1000_CTRL_EXT);
1590 wr32(E1000_CTRL_EXT,
1591 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1592}
1593
1594static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1595{
1596 struct net_device *netdev = adapter->netdev;
1597 struct e1000_hw *hw = &adapter->hw;
1598
1599 WARN_ON(hw->mac.type != e1000_i210);
1600
1601 if (enable)
1602 adapter->flags |= IGB_FLAG_FQTSS;
1603 else
1604 adapter->flags &= ~IGB_FLAG_FQTSS;
1605
1606 if (netif_running(netdev))
1607 schedule_work(&adapter->reset_task);
1608}
1609
1610static bool is_fqtss_enabled(struct igb_adapter *adapter)
1611{
1612 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1613}
1614
1615static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1616 enum tx_queue_prio prio)
1617{
1618 u32 val;
1619
1620 WARN_ON(hw->mac.type != e1000_i210);
1621 WARN_ON(queue < 0 || queue > 4);
1622
1623 val = rd32(E1000_I210_TXDCTL(queue));
1624
1625 if (prio == TX_QUEUE_PRIO_HIGH)
1626 val |= E1000_TXDCTL_PRIORITY;
1627 else
1628 val &= ~E1000_TXDCTL_PRIORITY;
1629
1630 wr32(E1000_I210_TXDCTL(queue), val);
1631}
1632
1633static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1634{
1635 u32 val;
1636
1637 WARN_ON(hw->mac.type != e1000_i210);
1638 WARN_ON(queue < 0 || queue > 1);
1639
1640 val = rd32(E1000_I210_TQAVCC(queue));
1641
1642 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1643 val |= E1000_TQAVCC_QUEUEMODE;
1644 else
1645 val &= ~E1000_TQAVCC_QUEUEMODE;
1646
1647 wr32(E1000_I210_TQAVCC(queue), val);
1648}
1649
1650static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1651{
1652 int i;
1653
1654 for (i = 0; i < adapter->num_tx_queues; i++) {
1655 if (adapter->tx_ring[i]->cbs_enable)
1656 return true;
1657 }
1658
1659 return false;
1660}
1661
1662static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1663{
1664 int i;
1665
1666 for (i = 0; i < adapter->num_tx_queues; i++) {
1667 if (adapter->tx_ring[i]->launchtime_enable)
1668 return true;
1669 }
1670
1671 return false;
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1685{
1686 struct net_device *netdev = adapter->netdev;
1687 struct e1000_hw *hw = &adapter->hw;
1688 struct igb_ring *ring;
1689 u32 tqavcc, tqavctrl;
1690 u16 value;
1691
1692 WARN_ON(hw->mac.type != e1000_i210);
1693 WARN_ON(queue < 0 || queue > 1);
1694 ring = adapter->tx_ring[queue];
1695
1696
1697
1698
1699
1700 if (ring->cbs_enable || ring->launchtime_enable) {
1701 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1702 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1703 } else {
1704 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1705 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1706 }
1707
1708
1709 if (ring->cbs_enable || queue == 0) {
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719 if (queue == 0 && !ring->cbs_enable) {
1720
1721 ring->idleslope = 1000000;
1722 ring->hicredit = ETH_FRAME_LEN;
1723 }
1724
1725
1726
1727
1728
1729 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1730 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1731 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1791
1792 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1793 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1794 tqavcc |= value;
1795 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1796
1797 wr32(E1000_I210_TQAVHC(queue),
1798 0x80000000 + ring->hicredit * 0x7735);
1799 } else {
1800
1801
1802 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1803 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1804 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1805
1806
1807 wr32(E1000_I210_TQAVHC(queue), 0);
1808
1809
1810
1811
1812
1813 if (!is_any_cbs_enabled(adapter)) {
1814 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1815 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1816 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1817 }
1818 }
1819
1820
1821 if (ring->launchtime_enable) {
1822
1823
1824
1825
1826
1827
1828
1829
1830 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1831 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1832 E1000_TQAVCTRL_FETCHTIME_DELTA;
1833 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1834 } else {
1835
1836
1837
1838
1839 if (!is_any_txtime_enabled(adapter)) {
1840 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1841 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1842 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1843 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1844 }
1845 }
1846
1847
1848
1849
1850
1851
1852 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1853 ring->cbs_enable ? "enabled" : "disabled",
1854 ring->launchtime_enable ? "enabled" : "disabled",
1855 queue,
1856 ring->idleslope, ring->sendslope,
1857 ring->hicredit, ring->locredit);
1858}
1859
1860static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1861 bool enable)
1862{
1863 struct igb_ring *ring;
1864
1865 if (queue < 0 || queue > adapter->num_tx_queues)
1866 return -EINVAL;
1867
1868 ring = adapter->tx_ring[queue];
1869 ring->launchtime_enable = enable;
1870
1871 return 0;
1872}
1873
1874static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1875 bool enable, int idleslope, int sendslope,
1876 int hicredit, int locredit)
1877{
1878 struct igb_ring *ring;
1879
1880 if (queue < 0 || queue > adapter->num_tx_queues)
1881 return -EINVAL;
1882
1883 ring = adapter->tx_ring[queue];
1884
1885 ring->cbs_enable = enable;
1886 ring->idleslope = idleslope;
1887 ring->sendslope = sendslope;
1888 ring->hicredit = hicredit;
1889 ring->locredit = locredit;
1890
1891 return 0;
1892}
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903static void igb_setup_tx_mode(struct igb_adapter *adapter)
1904{
1905 struct net_device *netdev = adapter->netdev;
1906 struct e1000_hw *hw = &adapter->hw;
1907 u32 val;
1908
1909
1910 if (hw->mac.type != e1000_i210)
1911 return;
1912
1913 if (is_fqtss_enabled(adapter)) {
1914 int i, max_queue;
1915
1916
1917
1918
1919
1920 val = rd32(E1000_I210_TQAVCTRL);
1921 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1922 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1923 wr32(E1000_I210_TQAVCTRL, val);
1924
1925
1926
1927
1928 val = rd32(E1000_TXPBS);
1929 val &= ~I210_TXPBSIZE_MASK;
1930 val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB |
1931 I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB;
1932 wr32(E1000_TXPBS, val);
1933
1934 val = rd32(E1000_RXPBS);
1935 val &= ~I210_RXPBSIZE_MASK;
1936 val |= I210_RXPBSIZE_PB_30KB;
1937 wr32(E1000_RXPBS, val);
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950 val = (4096 - 1) / 64;
1951 wr32(E1000_I210_DTXMXPKTSZ, val);
1952
1953
1954
1955
1956
1957
1958 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1959 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1960
1961 for (i = 0; i < max_queue; i++) {
1962 igb_config_tx_modes(adapter, i);
1963 }
1964 } else {
1965 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1966 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1967 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1968
1969 val = rd32(E1000_I210_TQAVCTRL);
1970
1971
1972
1973
1974 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1975 wr32(E1000_I210_TQAVCTRL, val);
1976 }
1977
1978 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1979 "enabled" : "disabled");
1980}
1981
1982
1983
1984
1985
1986static void igb_configure(struct igb_adapter *adapter)
1987{
1988 struct net_device *netdev = adapter->netdev;
1989 int i;
1990
1991 igb_get_hw_control(adapter);
1992 igb_set_rx_mode(netdev);
1993 igb_setup_tx_mode(adapter);
1994
1995 igb_restore_vlan(adapter);
1996
1997 igb_setup_tctl(adapter);
1998 igb_setup_mrqc(adapter);
1999 igb_setup_rctl(adapter);
2000
2001 igb_nfc_filter_restore(adapter);
2002 igb_configure_tx(adapter);
2003 igb_configure_rx(adapter);
2004
2005 igb_rx_fifo_flush_82575(&adapter->hw);
2006
2007
2008
2009
2010
2011 for (i = 0; i < adapter->num_rx_queues; i++) {
2012 struct igb_ring *ring = adapter->rx_ring[i];
2013 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2014 }
2015}
2016
2017
2018
2019
2020
2021void igb_power_up_link(struct igb_adapter *adapter)
2022{
2023 igb_reset_phy(&adapter->hw);
2024
2025 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2026 igb_power_up_phy_copper(&adapter->hw);
2027 else
2028 igb_power_up_serdes_link_82575(&adapter->hw);
2029
2030 igb_setup_link(&adapter->hw);
2031}
2032
2033
2034
2035
2036
2037static void igb_power_down_link(struct igb_adapter *adapter)
2038{
2039 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2040 igb_power_down_phy_copper_82575(&adapter->hw);
2041 else
2042 igb_shutdown_serdes_link_82575(&adapter->hw);
2043}
2044
2045
2046
2047
2048
2049static void igb_check_swap_media(struct igb_adapter *adapter)
2050{
2051 struct e1000_hw *hw = &adapter->hw;
2052 u32 ctrl_ext, connsw;
2053 bool swap_now = false;
2054
2055 ctrl_ext = rd32(E1000_CTRL_EXT);
2056 connsw = rd32(E1000_CONNSW);
2057
2058
2059
2060
2061
2062 if ((hw->phy.media_type == e1000_media_type_copper) &&
2063 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2064 swap_now = true;
2065 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2066 !(connsw & E1000_CONNSW_SERDESD)) {
2067
2068 if (adapter->copper_tries < 4) {
2069 adapter->copper_tries++;
2070 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2071 wr32(E1000_CONNSW, connsw);
2072 return;
2073 } else {
2074 adapter->copper_tries = 0;
2075 if ((connsw & E1000_CONNSW_PHYSD) &&
2076 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2077 swap_now = true;
2078 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2079 wr32(E1000_CONNSW, connsw);
2080 }
2081 }
2082 }
2083
2084 if (!swap_now)
2085 return;
2086
2087 switch (hw->phy.media_type) {
2088 case e1000_media_type_copper:
2089 netdev_info(adapter->netdev,
2090 "MAS: changing media to fiber/serdes\n");
2091 ctrl_ext |=
2092 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2093 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2094 adapter->copper_tries = 0;
2095 break;
2096 case e1000_media_type_internal_serdes:
2097 case e1000_media_type_fiber:
2098 netdev_info(adapter->netdev,
2099 "MAS: changing media to copper\n");
2100 ctrl_ext &=
2101 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2102 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2103 break;
2104 default:
2105
2106 netdev_err(adapter->netdev,
2107 "AMS: Invalid media type found, returning\n");
2108 break;
2109 }
2110 wr32(E1000_CTRL_EXT, ctrl_ext);
2111}
2112
2113
2114
2115
2116
2117int igb_up(struct igb_adapter *adapter)
2118{
2119 struct e1000_hw *hw = &adapter->hw;
2120 int i;
2121
2122
2123 igb_configure(adapter);
2124
2125 clear_bit(__IGB_DOWN, &adapter->state);
2126
2127 for (i = 0; i < adapter->num_q_vectors; i++)
2128 napi_enable(&(adapter->q_vector[i]->napi));
2129
2130 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2131 igb_configure_msix(adapter);
2132 else
2133 igb_assign_vector(adapter->q_vector[0], 0);
2134
2135
2136 rd32(E1000_TSICR);
2137 rd32(E1000_ICR);
2138 igb_irq_enable(adapter);
2139
2140
2141 if (adapter->vfs_allocated_count) {
2142 u32 reg_data = rd32(E1000_CTRL_EXT);
2143
2144 reg_data |= E1000_CTRL_EXT_PFRSTD;
2145 wr32(E1000_CTRL_EXT, reg_data);
2146 }
2147
2148 netif_tx_start_all_queues(adapter->netdev);
2149
2150
2151 hw->mac.get_link_status = 1;
2152 schedule_work(&adapter->watchdog_task);
2153
2154 if ((adapter->flags & IGB_FLAG_EEE) &&
2155 (!hw->dev_spec._82575.eee_disable))
2156 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2157
2158 return 0;
2159}
2160
2161void igb_down(struct igb_adapter *adapter)
2162{
2163 struct net_device *netdev = adapter->netdev;
2164 struct e1000_hw *hw = &adapter->hw;
2165 u32 tctl, rctl;
2166 int i;
2167
2168
2169
2170
2171 set_bit(__IGB_DOWN, &adapter->state);
2172
2173
2174 rctl = rd32(E1000_RCTL);
2175 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2176
2177
2178 igb_nfc_filter_exit(adapter);
2179
2180 netif_carrier_off(netdev);
2181 netif_tx_stop_all_queues(netdev);
2182
2183
2184 tctl = rd32(E1000_TCTL);
2185 tctl &= ~E1000_TCTL_EN;
2186 wr32(E1000_TCTL, tctl);
2187
2188 wrfl();
2189 usleep_range(10000, 11000);
2190
2191 igb_irq_disable(adapter);
2192
2193 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2194
2195 for (i = 0; i < adapter->num_q_vectors; i++) {
2196 if (adapter->q_vector[i]) {
2197 napi_synchronize(&adapter->q_vector[i]->napi);
2198 napi_disable(&adapter->q_vector[i]->napi);
2199 }
2200 }
2201
2202 del_timer_sync(&adapter->watchdog_timer);
2203 del_timer_sync(&adapter->phy_info_timer);
2204
2205
2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter);
2208 spin_unlock(&adapter->stats64_lock);
2209
2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0;
2212
2213 if (!pci_channel_offline(adapter->pdev))
2214 igb_reset(adapter);
2215
2216
2217 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2218
2219 igb_clean_all_tx_rings(adapter);
2220 igb_clean_all_rx_rings(adapter);
2221#ifdef CONFIG_IGB_DCA
2222
2223
2224 igb_setup_dca(adapter);
2225#endif
2226}
2227
2228void igb_reinit_locked(struct igb_adapter *adapter)
2229{
2230 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2231 usleep_range(1000, 2000);
2232 igb_down(adapter);
2233 igb_up(adapter);
2234 clear_bit(__IGB_RESETTING, &adapter->state);
2235}
2236
2237
2238
2239
2240
2241static void igb_enable_mas(struct igb_adapter *adapter)
2242{
2243 struct e1000_hw *hw = &adapter->hw;
2244 u32 connsw = rd32(E1000_CONNSW);
2245
2246
2247 if ((hw->phy.media_type == e1000_media_type_copper) &&
2248 (!(connsw & E1000_CONNSW_SERDESD))) {
2249 connsw |= E1000_CONNSW_ENRGSRC;
2250 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2251 wr32(E1000_CONNSW, connsw);
2252 wrfl();
2253 }
2254}
2255
2256void igb_reset(struct igb_adapter *adapter)
2257{
2258 struct pci_dev *pdev = adapter->pdev;
2259 struct e1000_hw *hw = &adapter->hw;
2260 struct e1000_mac_info *mac = &hw->mac;
2261 struct e1000_fc_info *fc = &hw->fc;
2262 u32 pba, hwm;
2263
2264
2265
2266
2267 switch (mac->type) {
2268 case e1000_i350:
2269 case e1000_i354:
2270 case e1000_82580:
2271 pba = rd32(E1000_RXPBS);
2272 pba = igb_rxpbs_adjust_82580(pba);
2273 break;
2274 case e1000_82576:
2275 pba = rd32(E1000_RXPBS);
2276 pba &= E1000_RXPBS_SIZE_MASK_82576;
2277 break;
2278 case e1000_82575:
2279 case e1000_i210:
2280 case e1000_i211:
2281 default:
2282 pba = E1000_PBA_34K;
2283 break;
2284 }
2285
2286 if (mac->type == e1000_82575) {
2287 u32 min_rx_space, min_tx_space, needed_tx_space;
2288
2289
2290 wr32(E1000_PBA, pba);
2291
2292
2293
2294
2295
2296
2297
2298
2299 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2300
2301
2302
2303
2304
2305
2306 min_tx_space = adapter->max_frame_size;
2307 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2308 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2309
2310
2311 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2312
2313
2314
2315
2316
2317 if (needed_tx_space < pba) {
2318 pba -= needed_tx_space;
2319
2320
2321
2322
2323 if (pba < min_rx_space)
2324 pba = min_rx_space;
2325 }
2326
2327
2328 wr32(E1000_PBA, pba);
2329 }
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2340
2341 fc->high_water = hwm & 0xFFFFFFF0;
2342 fc->low_water = fc->high_water - 16;
2343 fc->pause_time = 0xFFFF;
2344 fc->send_xon = 1;
2345 fc->current_mode = fc->requested_mode;
2346
2347
2348 if (adapter->vfs_allocated_count) {
2349 int i;
2350
2351 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2352 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2353
2354
2355 igb_ping_all_vfs(adapter);
2356
2357
2358 wr32(E1000_VFRE, 0);
2359 wr32(E1000_VFTE, 0);
2360 }
2361
2362
2363 hw->mac.ops.reset_hw(hw);
2364 wr32(E1000_WUC, 0);
2365
2366 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2367
2368 adapter->ei.get_invariants(hw);
2369 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2370 }
2371 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2372 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2373 igb_enable_mas(adapter);
2374 }
2375 if (hw->mac.ops.init_hw(hw))
2376 dev_err(&pdev->dev, "Hardware Error\n");
2377
2378
2379 igb_flush_mac_table(adapter);
2380 __dev_uc_unsync(adapter->netdev, NULL);
2381
2382
2383 igb_set_default_mac_filter(adapter);
2384
2385
2386
2387
2388 if (!hw->mac.autoneg)
2389 igb_force_mac_fc(hw);
2390
2391 igb_init_dmac(adapter, pba);
2392#ifdef CONFIG_IGB_HWMON
2393
2394 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2395 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2396
2397
2398
2399 if (adapter->ets)
2400 mac->ops.init_thermal_sensor_thresh(hw);
2401 }
2402 }
2403#endif
2404
2405 if (hw->phy.media_type == e1000_media_type_copper) {
2406 switch (mac->type) {
2407 case e1000_i350:
2408 case e1000_i210:
2409 case e1000_i211:
2410 igb_set_eee_i350(hw, true, true);
2411 break;
2412 case e1000_i354:
2413 igb_set_eee_i354(hw, true, true);
2414 break;
2415 default:
2416 break;
2417 }
2418 }
2419 if (!netif_running(adapter->netdev))
2420 igb_power_down_link(adapter);
2421
2422 igb_update_mng_vlan(adapter);
2423
2424
2425 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2426
2427
2428 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2429 igb_ptp_reset(adapter);
2430
2431 igb_get_phy_info(hw);
2432}
2433
2434static netdev_features_t igb_fix_features(struct net_device *netdev,
2435 netdev_features_t features)
2436{
2437
2438
2439
2440 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2441 features |= NETIF_F_HW_VLAN_CTAG_TX;
2442 else
2443 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2444
2445 return features;
2446}
2447
2448static int igb_set_features(struct net_device *netdev,
2449 netdev_features_t features)
2450{
2451 netdev_features_t changed = netdev->features ^ features;
2452 struct igb_adapter *adapter = netdev_priv(netdev);
2453
2454 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2455 igb_vlan_mode(netdev, features);
2456
2457 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2458 return 0;
2459
2460 if (!(features & NETIF_F_NTUPLE)) {
2461 struct hlist_node *node2;
2462 struct igb_nfc_filter *rule;
2463
2464 spin_lock(&adapter->nfc_lock);
2465 hlist_for_each_entry_safe(rule, node2,
2466 &adapter->nfc_filter_list, nfc_node) {
2467 igb_erase_filter(adapter, rule);
2468 hlist_del(&rule->nfc_node);
2469 kfree(rule);
2470 }
2471 spin_unlock(&adapter->nfc_lock);
2472 adapter->nfc_filter_count = 0;
2473 }
2474
2475 netdev->features = features;
2476
2477 if (netif_running(netdev))
2478 igb_reinit_locked(adapter);
2479 else
2480 igb_reset(adapter);
2481
2482 return 1;
2483}
2484
2485static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2486 struct net_device *dev,
2487 const unsigned char *addr, u16 vid,
2488 u16 flags,
2489 struct netlink_ext_ack *extack)
2490{
2491
2492 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2493 struct igb_adapter *adapter = netdev_priv(dev);
2494 int vfn = adapter->vfs_allocated_count;
2495
2496 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2497 return -ENOMEM;
2498 }
2499
2500 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2501}
2502
2503#define IGB_MAX_MAC_HDR_LEN 127
2504#define IGB_MAX_NETWORK_HDR_LEN 511
2505
2506static netdev_features_t
2507igb_features_check(struct sk_buff *skb, struct net_device *dev,
2508 netdev_features_t features)
2509{
2510 unsigned int network_hdr_len, mac_hdr_len;
2511
2512
2513 mac_hdr_len = skb_network_header(skb) - skb->data;
2514 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2515 return features & ~(NETIF_F_HW_CSUM |
2516 NETIF_F_SCTP_CRC |
2517 NETIF_F_GSO_UDP_L4 |
2518 NETIF_F_HW_VLAN_CTAG_TX |
2519 NETIF_F_TSO |
2520 NETIF_F_TSO6);
2521
2522 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2523 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2524 return features & ~(NETIF_F_HW_CSUM |
2525 NETIF_F_SCTP_CRC |
2526 NETIF_F_GSO_UDP_L4 |
2527 NETIF_F_TSO |
2528 NETIF_F_TSO6);
2529
2530
2531
2532
2533 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2534 features &= ~NETIF_F_TSO;
2535
2536 return features;
2537}
2538
2539static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2540{
2541 if (!is_fqtss_enabled(adapter)) {
2542 enable_fqtss(adapter, true);
2543 return;
2544 }
2545
2546 igb_config_tx_modes(adapter, queue);
2547
2548 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2549 enable_fqtss(adapter, false);
2550}
2551
2552static int igb_offload_cbs(struct igb_adapter *adapter,
2553 struct tc_cbs_qopt_offload *qopt)
2554{
2555 struct e1000_hw *hw = &adapter->hw;
2556 int err;
2557
2558
2559 if (hw->mac.type != e1000_i210)
2560 return -EOPNOTSUPP;
2561
2562
2563 if (qopt->queue < 0 || qopt->queue > 1)
2564 return -EINVAL;
2565
2566 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2567 qopt->idleslope, qopt->sendslope,
2568 qopt->hicredit, qopt->locredit);
2569 if (err)
2570 return err;
2571
2572 igb_offload_apply(adapter, qopt->queue);
2573
2574 return 0;
2575}
2576
2577#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2578#define VLAN_PRIO_FULL_MASK (0x07)
2579
2580static int igb_parse_cls_flower(struct igb_adapter *adapter,
2581 struct flow_cls_offload *f,
2582 int traffic_class,
2583 struct igb_nfc_filter *input)
2584{
2585 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2586 struct flow_dissector *dissector = rule->match.dissector;
2587 struct netlink_ext_ack *extack = f->common.extack;
2588
2589 if (dissector->used_keys &
2590 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2591 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2592 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2593 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2594 NL_SET_ERR_MSG_MOD(extack,
2595 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2596 return -EOPNOTSUPP;
2597 }
2598
2599 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2600 struct flow_match_eth_addrs match;
2601
2602 flow_rule_match_eth_addrs(rule, &match);
2603 if (!is_zero_ether_addr(match.mask->dst)) {
2604 if (!is_broadcast_ether_addr(match.mask->dst)) {
2605 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2606 return -EINVAL;
2607 }
2608
2609 input->filter.match_flags |=
2610 IGB_FILTER_FLAG_DST_MAC_ADDR;
2611 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2612 }
2613
2614 if (!is_zero_ether_addr(match.mask->src)) {
2615 if (!is_broadcast_ether_addr(match.mask->src)) {
2616 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2617 return -EINVAL;
2618 }
2619
2620 input->filter.match_flags |=
2621 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2622 ether_addr_copy(input->filter.src_addr, match.key->src);
2623 }
2624 }
2625
2626 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2627 struct flow_match_basic match;
2628
2629 flow_rule_match_basic(rule, &match);
2630 if (match.mask->n_proto) {
2631 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2632 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2633 return -EINVAL;
2634 }
2635
2636 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2637 input->filter.etype = match.key->n_proto;
2638 }
2639 }
2640
2641 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2642 struct flow_match_vlan match;
2643
2644 flow_rule_match_vlan(rule, &match);
2645 if (match.mask->vlan_priority) {
2646 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2647 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2648 return -EINVAL;
2649 }
2650
2651 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2652 input->filter.vlan_tci =
2653 (__force __be16)match.key->vlan_priority;
2654 }
2655 }
2656
2657 input->action = traffic_class;
2658 input->cookie = f->cookie;
2659
2660 return 0;
2661}
2662
2663static int igb_configure_clsflower(struct igb_adapter *adapter,
2664 struct flow_cls_offload *cls_flower)
2665{
2666 struct netlink_ext_ack *extack = cls_flower->common.extack;
2667 struct igb_nfc_filter *filter, *f;
2668 int err, tc;
2669
2670 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2671 if (tc < 0) {
2672 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2673 return -EINVAL;
2674 }
2675
2676 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2677 if (!filter)
2678 return -ENOMEM;
2679
2680 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2681 if (err < 0)
2682 goto err_parse;
2683
2684 spin_lock(&adapter->nfc_lock);
2685
2686 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2687 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2688 err = -EEXIST;
2689 NL_SET_ERR_MSG_MOD(extack,
2690 "This filter is already set in ethtool");
2691 goto err_locked;
2692 }
2693 }
2694
2695 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2696 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2697 err = -EEXIST;
2698 NL_SET_ERR_MSG_MOD(extack,
2699 "This filter is already set in cls_flower");
2700 goto err_locked;
2701 }
2702 }
2703
2704 err = igb_add_filter(adapter, filter);
2705 if (err < 0) {
2706 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2707 goto err_locked;
2708 }
2709
2710 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2711
2712 spin_unlock(&adapter->nfc_lock);
2713
2714 return 0;
2715
2716err_locked:
2717 spin_unlock(&adapter->nfc_lock);
2718
2719err_parse:
2720 kfree(filter);
2721
2722 return err;
2723}
2724
2725static int igb_delete_clsflower(struct igb_adapter *adapter,
2726 struct flow_cls_offload *cls_flower)
2727{
2728 struct igb_nfc_filter *filter;
2729 int err;
2730
2731 spin_lock(&adapter->nfc_lock);
2732
2733 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2734 if (filter->cookie == cls_flower->cookie)
2735 break;
2736
2737 if (!filter) {
2738 err = -ENOENT;
2739 goto out;
2740 }
2741
2742 err = igb_erase_filter(adapter, filter);
2743 if (err < 0)
2744 goto out;
2745
2746 hlist_del(&filter->nfc_node);
2747 kfree(filter);
2748
2749out:
2750 spin_unlock(&adapter->nfc_lock);
2751
2752 return err;
2753}
2754
2755static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2756 struct flow_cls_offload *cls_flower)
2757{
2758 switch (cls_flower->command) {
2759 case FLOW_CLS_REPLACE:
2760 return igb_configure_clsflower(adapter, cls_flower);
2761 case FLOW_CLS_DESTROY:
2762 return igb_delete_clsflower(adapter, cls_flower);
2763 case FLOW_CLS_STATS:
2764 return -EOPNOTSUPP;
2765 default:
2766 return -EOPNOTSUPP;
2767 }
2768}
2769
2770static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2771 void *cb_priv)
2772{
2773 struct igb_adapter *adapter = cb_priv;
2774
2775 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2776 return -EOPNOTSUPP;
2777
2778 switch (type) {
2779 case TC_SETUP_CLSFLOWER:
2780 return igb_setup_tc_cls_flower(adapter, type_data);
2781
2782 default:
2783 return -EOPNOTSUPP;
2784 }
2785}
2786
2787static int igb_offload_txtime(struct igb_adapter *adapter,
2788 struct tc_etf_qopt_offload *qopt)
2789{
2790 struct e1000_hw *hw = &adapter->hw;
2791 int err;
2792
2793
2794 if (hw->mac.type != e1000_i210)
2795 return -EOPNOTSUPP;
2796
2797
2798 if (qopt->queue < 0 || qopt->queue > 1)
2799 return -EINVAL;
2800
2801 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2802 if (err)
2803 return err;
2804
2805 igb_offload_apply(adapter, qopt->queue);
2806
2807 return 0;
2808}
2809
2810static LIST_HEAD(igb_block_cb_list);
2811
2812static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2813 void *type_data)
2814{
2815 struct igb_adapter *adapter = netdev_priv(dev);
2816
2817 switch (type) {
2818 case TC_SETUP_QDISC_CBS:
2819 return igb_offload_cbs(adapter, type_data);
2820 case TC_SETUP_BLOCK:
2821 return flow_block_cb_setup_simple(type_data,
2822 &igb_block_cb_list,
2823 igb_setup_tc_block_cb,
2824 adapter, adapter, true);
2825
2826 case TC_SETUP_QDISC_ETF:
2827 return igb_offload_txtime(adapter, type_data);
2828
2829 default:
2830 return -EOPNOTSUPP;
2831 }
2832}
2833
2834static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
2835{
2836 int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
2837 struct igb_adapter *adapter = netdev_priv(dev);
2838 struct bpf_prog *prog = bpf->prog, *old_prog;
2839 bool running = netif_running(dev);
2840 bool need_reset;
2841
2842
2843 for (i = 0; i < adapter->num_rx_queues; i++) {
2844 struct igb_ring *ring = adapter->rx_ring[i];
2845
2846 if (frame_size > igb_rx_bufsz(ring)) {
2847 NL_SET_ERR_MSG_MOD(bpf->extack,
2848 "The RX buffer size is too small for the frame size");
2849 netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
2850 igb_rx_bufsz(ring), frame_size);
2851 return -EINVAL;
2852 }
2853 }
2854
2855 old_prog = xchg(&adapter->xdp_prog, prog);
2856 need_reset = (!!prog != !!old_prog);
2857
2858
2859 if (need_reset && running) {
2860 igb_close(dev);
2861 } else {
2862 for (i = 0; i < adapter->num_rx_queues; i++)
2863 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
2864 adapter->xdp_prog);
2865 }
2866
2867 if (old_prog)
2868 bpf_prog_put(old_prog);
2869
2870
2871 if (!need_reset)
2872 return 0;
2873
2874 if (running)
2875 igb_open(dev);
2876
2877 return 0;
2878}
2879
2880static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2881{
2882 switch (xdp->command) {
2883 case XDP_SETUP_PROG:
2884 return igb_xdp_setup(dev, xdp);
2885 default:
2886 return -EINVAL;
2887 }
2888}
2889
2890static void igb_xdp_ring_update_tail(struct igb_ring *ring)
2891{
2892
2893
2894
2895 wmb();
2896 writel(ring->next_to_use, ring->tail);
2897}
2898
2899static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
2900{
2901 unsigned int r_idx = smp_processor_id();
2902
2903 if (r_idx >= adapter->num_tx_queues)
2904 r_idx = r_idx % adapter->num_tx_queues;
2905
2906 return adapter->tx_ring[r_idx];
2907}
2908
2909static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
2910{
2911 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2912 int cpu = smp_processor_id();
2913 struct igb_ring *tx_ring;
2914 struct netdev_queue *nq;
2915 u32 ret;
2916
2917 if (unlikely(!xdpf))
2918 return IGB_XDP_CONSUMED;
2919
2920
2921
2922
2923 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2924 if (unlikely(!tx_ring))
2925 return IGB_XDP_CONSUMED;
2926
2927 nq = txring_txq(tx_ring);
2928 __netif_tx_lock(nq, cpu);
2929
2930 txq_trans_cond_update(nq);
2931 ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2932 __netif_tx_unlock(nq);
2933
2934 return ret;
2935}
2936
2937static int igb_xdp_xmit(struct net_device *dev, int n,
2938 struct xdp_frame **frames, u32 flags)
2939{
2940 struct igb_adapter *adapter = netdev_priv(dev);
2941 int cpu = smp_processor_id();
2942 struct igb_ring *tx_ring;
2943 struct netdev_queue *nq;
2944 int nxmit = 0;
2945 int i;
2946
2947 if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
2948 return -ENETDOWN;
2949
2950 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2951 return -EINVAL;
2952
2953
2954
2955
2956 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2957 if (unlikely(!tx_ring))
2958 return -ENXIO;
2959
2960 nq = txring_txq(tx_ring);
2961 __netif_tx_lock(nq, cpu);
2962
2963
2964 txq_trans_cond_update(nq);
2965
2966 for (i = 0; i < n; i++) {
2967 struct xdp_frame *xdpf = frames[i];
2968 int err;
2969
2970 err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2971 if (err != IGB_XDP_TX)
2972 break;
2973 nxmit++;
2974 }
2975
2976 __netif_tx_unlock(nq);
2977
2978 if (unlikely(flags & XDP_XMIT_FLUSH))
2979 igb_xdp_ring_update_tail(tx_ring);
2980
2981 return nxmit;
2982}
2983
2984static const struct net_device_ops igb_netdev_ops = {
2985 .ndo_open = igb_open,
2986 .ndo_stop = igb_close,
2987 .ndo_start_xmit = igb_xmit_frame,
2988 .ndo_get_stats64 = igb_get_stats64,
2989 .ndo_set_rx_mode = igb_set_rx_mode,
2990 .ndo_set_mac_address = igb_set_mac,
2991 .ndo_change_mtu = igb_change_mtu,
2992 .ndo_eth_ioctl = igb_ioctl,
2993 .ndo_tx_timeout = igb_tx_timeout,
2994 .ndo_validate_addr = eth_validate_addr,
2995 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2996 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2997 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2998 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2999 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
3000 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
3001 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
3002 .ndo_get_vf_config = igb_ndo_get_vf_config,
3003 .ndo_fix_features = igb_fix_features,
3004 .ndo_set_features = igb_set_features,
3005 .ndo_fdb_add = igb_ndo_fdb_add,
3006 .ndo_features_check = igb_features_check,
3007 .ndo_setup_tc = igb_setup_tc,
3008 .ndo_bpf = igb_xdp,
3009 .ndo_xdp_xmit = igb_xdp_xmit,
3010};
3011
3012
3013
3014
3015
3016void igb_set_fw_version(struct igb_adapter *adapter)
3017{
3018 struct e1000_hw *hw = &adapter->hw;
3019 struct e1000_fw_version fw;
3020
3021 igb_get_fw_version(hw, &fw);
3022
3023 switch (hw->mac.type) {
3024 case e1000_i210:
3025 case e1000_i211:
3026 if (!(igb_get_flash_presence_i210(hw))) {
3027 snprintf(adapter->fw_version,
3028 sizeof(adapter->fw_version),
3029 "%2d.%2d-%d",
3030 fw.invm_major, fw.invm_minor,
3031 fw.invm_img_type);
3032 break;
3033 }
3034 fallthrough;
3035 default:
3036
3037 if (fw.or_valid) {
3038 snprintf(adapter->fw_version,
3039 sizeof(adapter->fw_version),
3040 "%d.%d, 0x%08x, %d.%d.%d",
3041 fw.eep_major, fw.eep_minor, fw.etrack_id,
3042 fw.or_major, fw.or_build, fw.or_patch);
3043
3044 } else if (fw.etrack_id != 0X0000) {
3045 snprintf(adapter->fw_version,
3046 sizeof(adapter->fw_version),
3047 "%d.%d, 0x%08x",
3048 fw.eep_major, fw.eep_minor, fw.etrack_id);
3049 } else {
3050 snprintf(adapter->fw_version,
3051 sizeof(adapter->fw_version),
3052 "%d.%d.%d",
3053 fw.eep_major, fw.eep_minor, fw.eep_build);
3054 }
3055 break;
3056 }
3057}
3058
3059
3060
3061
3062
3063
3064static void igb_init_mas(struct igb_adapter *adapter)
3065{
3066 struct e1000_hw *hw = &adapter->hw;
3067 u16 eeprom_data;
3068
3069 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
3070 switch (hw->bus.func) {
3071 case E1000_FUNC_0:
3072 if (eeprom_data & IGB_MAS_ENABLE_0) {
3073 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3074 netdev_info(adapter->netdev,
3075 "MAS: Enabling Media Autosense for port %d\n",
3076 hw->bus.func);
3077 }
3078 break;
3079 case E1000_FUNC_1:
3080 if (eeprom_data & IGB_MAS_ENABLE_1) {
3081 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3082 netdev_info(adapter->netdev,
3083 "MAS: Enabling Media Autosense for port %d\n",
3084 hw->bus.func);
3085 }
3086 break;
3087 case E1000_FUNC_2:
3088 if (eeprom_data & IGB_MAS_ENABLE_2) {
3089 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3090 netdev_info(adapter->netdev,
3091 "MAS: Enabling Media Autosense for port %d\n",
3092 hw->bus.func);
3093 }
3094 break;
3095 case E1000_FUNC_3:
3096 if (eeprom_data & IGB_MAS_ENABLE_3) {
3097 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3098 netdev_info(adapter->netdev,
3099 "MAS: Enabling Media Autosense for port %d\n",
3100 hw->bus.func);
3101 }
3102 break;
3103 default:
3104
3105 netdev_err(adapter->netdev,
3106 "MAS: Invalid port configuration, returning\n");
3107 break;
3108 }
3109}
3110
3111
3112
3113
3114
3115static s32 igb_init_i2c(struct igb_adapter *adapter)
3116{
3117 struct e1000_hw *hw = &adapter->hw;
3118 s32 status = 0;
3119 s32 i2cctl;
3120
3121
3122 if (adapter->hw.mac.type != e1000_i350)
3123 return 0;
3124
3125 i2cctl = rd32(E1000_I2CPARAMS);
3126 i2cctl |= E1000_I2CBB_EN
3127 | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
3128 | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
3129 wr32(E1000_I2CPARAMS, i2cctl);
3130 wrfl();
3131
3132
3133
3134
3135
3136 adapter->i2c_adap.owner = THIS_MODULE;
3137 adapter->i2c_algo = igb_i2c_algo;
3138 adapter->i2c_algo.data = adapter;
3139 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
3140 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
3141 strlcpy(adapter->i2c_adap.name, "igb BB",
3142 sizeof(adapter->i2c_adap.name));
3143 status = i2c_bit_add_bus(&adapter->i2c_adap);
3144 return status;
3145}
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3159{
3160 struct net_device *netdev;
3161 struct igb_adapter *adapter;
3162 struct e1000_hw *hw;
3163 u16 eeprom_data = 0;
3164 s32 ret_val;
3165 static int global_quad_port_a;
3166 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3167 u8 part_str[E1000_PBANUM_LENGTH];
3168 int err;
3169
3170
3171
3172
3173 if (pdev->is_virtfn) {
3174 WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n",
3175 pci_name(pdev), pdev->vendor, pdev->device);
3176 return -EINVAL;
3177 }
3178
3179 err = pci_enable_device_mem(pdev);
3180 if (err)
3181 return err;
3182
3183 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3184 if (err) {
3185 dev_err(&pdev->dev,
3186 "No usable DMA configuration, aborting\n");
3187 goto err_dma;
3188 }
3189
3190 err = pci_request_mem_regions(pdev, igb_driver_name);
3191 if (err)
3192 goto err_pci_reg;
3193
3194 pci_enable_pcie_error_reporting(pdev);
3195
3196 pci_set_master(pdev);
3197 pci_save_state(pdev);
3198
3199 err = -ENOMEM;
3200 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3201 IGB_MAX_TX_QUEUES);
3202 if (!netdev)
3203 goto err_alloc_etherdev;
3204
3205 SET_NETDEV_DEV(netdev, &pdev->dev);
3206
3207 pci_set_drvdata(pdev, netdev);
3208 adapter = netdev_priv(netdev);
3209 adapter->netdev = netdev;
3210 adapter->pdev = pdev;
3211 hw = &adapter->hw;
3212 hw->back = adapter;
3213 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3214
3215 err = -EIO;
3216 adapter->io_addr = pci_iomap(pdev, 0, 0);
3217 if (!adapter->io_addr)
3218 goto err_ioremap;
3219
3220 hw->hw_addr = adapter->io_addr;
3221
3222 netdev->netdev_ops = &igb_netdev_ops;
3223 igb_set_ethtool_ops(netdev);
3224 netdev->watchdog_timeo = 5 * HZ;
3225
3226 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3227
3228 netdev->mem_start = pci_resource_start(pdev, 0);
3229 netdev->mem_end = pci_resource_end(pdev, 0);
3230
3231
3232 hw->vendor_id = pdev->vendor;
3233 hw->device_id = pdev->device;
3234 hw->revision_id = pdev->revision;
3235 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3236 hw->subsystem_device_id = pdev->subsystem_device;
3237
3238
3239 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3240 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3241 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3242
3243 err = ei->get_invariants(hw);
3244 if (err)
3245 goto err_sw_init;
3246
3247
3248 err = igb_sw_init(adapter);
3249 if (err)
3250 goto err_sw_init;
3251
3252 igb_get_bus_info_pcie(hw);
3253
3254 hw->phy.autoneg_wait_to_complete = false;
3255
3256
3257 if (hw->phy.media_type == e1000_media_type_copper) {
3258 hw->phy.mdix = AUTO_ALL_MODES;
3259 hw->phy.disable_polarity_correction = false;
3260 hw->phy.ms_type = e1000_ms_hw_default;
3261 }
3262
3263 if (igb_check_reset_block(hw))
3264 dev_info(&pdev->dev,
3265 "PHY reset is blocked due to SOL/IDER session.\n");
3266
3267
3268
3269
3270
3271 netdev->features |= NETIF_F_SG |
3272 NETIF_F_TSO |
3273 NETIF_F_TSO6 |
3274 NETIF_F_RXHASH |
3275 NETIF_F_RXCSUM |
3276 NETIF_F_HW_CSUM;
3277
3278 if (hw->mac.type >= e1000_82576)
3279 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3280
3281 if (hw->mac.type >= e1000_i350)
3282 netdev->features |= NETIF_F_HW_TC;
3283
3284#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3285 NETIF_F_GSO_GRE_CSUM | \
3286 NETIF_F_GSO_IPXIP4 | \
3287 NETIF_F_GSO_IPXIP6 | \
3288 NETIF_F_GSO_UDP_TUNNEL | \
3289 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3290
3291 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3292 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3293
3294
3295 netdev->hw_features |= netdev->features |
3296 NETIF_F_HW_VLAN_CTAG_RX |
3297 NETIF_F_HW_VLAN_CTAG_TX |
3298 NETIF_F_RXALL;
3299
3300 if (hw->mac.type >= e1000_i350)
3301 netdev->hw_features |= NETIF_F_NTUPLE;
3302
3303 netdev->features |= NETIF_F_HIGHDMA;
3304
3305 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3306 netdev->mpls_features |= NETIF_F_HW_CSUM;
3307 netdev->hw_enc_features |= netdev->vlan_features;
3308
3309
3310 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3311 NETIF_F_HW_VLAN_CTAG_RX |
3312 NETIF_F_HW_VLAN_CTAG_TX;
3313
3314 netdev->priv_flags |= IFF_SUPP_NOFCS;
3315
3316 netdev->priv_flags |= IFF_UNICAST_FLT;
3317
3318
3319 netdev->min_mtu = ETH_MIN_MTU;
3320 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3321
3322 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3323
3324
3325
3326
3327 hw->mac.ops.reset_hw(hw);
3328
3329
3330
3331
3332 switch (hw->mac.type) {
3333 case e1000_i210:
3334 case e1000_i211:
3335 if (igb_get_flash_presence_i210(hw)) {
3336 if (hw->nvm.ops.validate(hw) < 0) {
3337 dev_err(&pdev->dev,
3338 "The NVM Checksum Is Not Valid\n");
3339 err = -EIO;
3340 goto err_eeprom;
3341 }
3342 }
3343 break;
3344 default:
3345 if (hw->nvm.ops.validate(hw) < 0) {
3346 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3347 err = -EIO;
3348 goto err_eeprom;
3349 }
3350 break;
3351 }
3352
3353 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3354
3355 if (hw->mac.ops.read_mac_addr(hw))
3356 dev_err(&pdev->dev, "NVM Read Error\n");
3357 }
3358
3359 eth_hw_addr_set(netdev, hw->mac.addr);
3360
3361 if (!is_valid_ether_addr(netdev->dev_addr)) {
3362 dev_err(&pdev->dev, "Invalid MAC Address\n");
3363 err = -EIO;
3364 goto err_eeprom;
3365 }
3366
3367 igb_set_default_mac_filter(adapter);
3368
3369
3370 igb_set_fw_version(adapter);
3371
3372
3373 if (hw->mac.type == e1000_i210) {
3374 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3375 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3376 }
3377
3378 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3379 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3380
3381 INIT_WORK(&adapter->reset_task, igb_reset_task);
3382 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3383
3384
3385 adapter->fc_autoneg = true;
3386 hw->mac.autoneg = true;
3387 hw->phy.autoneg_advertised = 0x2f;
3388
3389 hw->fc.requested_mode = e1000_fc_default;
3390 hw->fc.current_mode = e1000_fc_default;
3391
3392 igb_validate_mdi_setting(hw);
3393
3394
3395 if (hw->bus.func == 0)
3396 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3397
3398
3399 if (hw->mac.type >= e1000_82580)
3400 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3401 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3402 &eeprom_data);
3403 else if (hw->bus.func == 1)
3404 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3405
3406 if (eeprom_data & IGB_EEPROM_APME)
3407 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3408
3409
3410
3411
3412
3413 switch (pdev->device) {
3414 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3415 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3416 break;
3417 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3418 case E1000_DEV_ID_82576_FIBER:
3419 case E1000_DEV_ID_82576_SERDES:
3420
3421
3422
3423 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3424 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3425 break;
3426 case E1000_DEV_ID_82576_QUAD_COPPER:
3427 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3428
3429 if (global_quad_port_a != 0)
3430 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3431 else
3432 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3433
3434 if (++global_quad_port_a == 4)
3435 global_quad_port_a = 0;
3436 break;
3437 default:
3438
3439 if (!device_can_wakeup(&adapter->pdev->dev))
3440 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3441 }
3442
3443
3444 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3445 adapter->wol |= E1000_WUFC_MAG;
3446
3447
3448 if ((hw->mac.type == e1000_i350) &&
3449 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3450 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3451 adapter->wol = 0;
3452 }
3453
3454
3455
3456
3457 if (((hw->mac.type == e1000_i350) ||
3458 (hw->mac.type == e1000_i354)) &&
3459 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3460 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3461 adapter->wol = 0;
3462 }
3463 if (hw->mac.type == e1000_i350) {
3464 if (((pdev->subsystem_device == 0x5001) ||
3465 (pdev->subsystem_device == 0x5002)) &&
3466 (hw->bus.func == 0)) {
3467 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3468 adapter->wol = 0;
3469 }
3470 if (pdev->subsystem_device == 0x1F52)
3471 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3472 }
3473
3474 device_set_wakeup_enable(&adapter->pdev->dev,
3475 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3476
3477
3478 igb_reset(adapter);
3479
3480
3481 err = igb_init_i2c(adapter);
3482 if (err) {
3483 dev_err(&pdev->dev, "failed to init i2c interface\n");
3484 goto err_eeprom;
3485 }
3486
3487
3488
3489
3490 igb_get_hw_control(adapter);
3491
3492 strcpy(netdev->name, "eth%d");
3493 err = register_netdev(netdev);
3494 if (err)
3495 goto err_register;
3496
3497
3498 netif_carrier_off(netdev);
3499
3500#ifdef CONFIG_IGB_DCA
3501 if (dca_add_requester(&pdev->dev) == 0) {
3502 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3503 dev_info(&pdev->dev, "DCA enabled\n");
3504 igb_setup_dca(adapter);
3505 }
3506
3507#endif
3508#ifdef CONFIG_IGB_HWMON
3509
3510 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3511 u16 ets_word;
3512
3513
3514
3515
3516 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3517 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3518 adapter->ets = true;
3519 else
3520 adapter->ets = false;
3521 if (igb_sysfs_init(adapter))
3522 dev_err(&pdev->dev,
3523 "failed to allocate sysfs resources\n");
3524 } else {
3525 adapter->ets = false;
3526 }
3527#endif
3528
3529 adapter->ei = *ei;
3530 if (hw->dev_spec._82575.mas_capable)
3531 igb_init_mas(adapter);
3532
3533
3534 igb_ptp_init(adapter);
3535
3536 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3537
3538 if (hw->mac.type != e1000_i354) {
3539 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3540 netdev->name,
3541 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3542 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3543 "unknown"),
3544 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3545 "Width x4" :
3546 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3547 "Width x2" :
3548 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3549 "Width x1" : "unknown"), netdev->dev_addr);
3550 }
3551
3552 if ((hw->mac.type == e1000_82576 &&
3553 rd32(E1000_EECD) & E1000_EECD_PRES) ||
3554 (hw->mac.type >= e1000_i210 ||
3555 igb_get_flash_presence_i210(hw))) {
3556 ret_val = igb_read_part_string(hw, part_str,
3557 E1000_PBANUM_LENGTH);
3558 } else {
3559 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3560 }
3561
3562 if (ret_val)
3563 strcpy(part_str, "Unknown");
3564 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3565 dev_info(&pdev->dev,
3566 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3567 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3568 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3569 adapter->num_rx_queues, adapter->num_tx_queues);
3570 if (hw->phy.media_type == e1000_media_type_copper) {
3571 switch (hw->mac.type) {
3572 case e1000_i350:
3573 case e1000_i210:
3574 case e1000_i211:
3575
3576 err = igb_set_eee_i350(hw, true, true);
3577 if ((!err) &&
3578 (!hw->dev_spec._82575.eee_disable)) {
3579 adapter->eee_advert =
3580 MDIO_EEE_100TX | MDIO_EEE_1000T;
3581 adapter->flags |= IGB_FLAG_EEE;
3582 }
3583 break;
3584 case e1000_i354:
3585 if ((rd32(E1000_CTRL_EXT) &
3586 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3587 err = igb_set_eee_i354(hw, true, true);
3588 if ((!err) &&
3589 (!hw->dev_spec._82575.eee_disable)) {
3590 adapter->eee_advert =
3591 MDIO_EEE_100TX | MDIO_EEE_1000T;
3592 adapter->flags |= IGB_FLAG_EEE;
3593 }
3594 }
3595 break;
3596 default:
3597 break;
3598 }
3599 }
3600
3601 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3602
3603 pm_runtime_put_noidle(&pdev->dev);
3604 return 0;
3605
3606err_register:
3607 igb_release_hw_control(adapter);
3608 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3609err_eeprom:
3610 if (!igb_check_reset_block(hw))
3611 igb_reset_phy(hw);
3612
3613 if (hw->flash_address)
3614 iounmap(hw->flash_address);
3615err_sw_init:
3616 kfree(adapter->mac_table);
3617 kfree(adapter->shadow_vfta);
3618 igb_clear_interrupt_scheme(adapter);
3619#ifdef CONFIG_PCI_IOV
3620 igb_disable_sriov(pdev);
3621#endif
3622 pci_iounmap(pdev, adapter->io_addr);
3623err_ioremap:
3624 free_netdev(netdev);
3625err_alloc_etherdev:
3626 pci_disable_pcie_error_reporting(pdev);
3627 pci_release_mem_regions(pdev);
3628err_pci_reg:
3629err_dma:
3630 pci_disable_device(pdev);
3631 return err;
3632}
3633
3634#ifdef CONFIG_PCI_IOV
3635static int igb_disable_sriov(struct pci_dev *pdev)
3636{
3637 struct net_device *netdev = pci_get_drvdata(pdev);
3638 struct igb_adapter *adapter = netdev_priv(netdev);
3639 struct e1000_hw *hw = &adapter->hw;
3640
3641
3642 if (adapter->vf_data) {
3643
3644 if (pci_vfs_assigned(pdev)) {
3645 dev_warn(&pdev->dev,
3646 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3647 return -EPERM;
3648 } else {
3649 pci_disable_sriov(pdev);
3650 msleep(500);
3651 }
3652
3653 kfree(adapter->vf_mac_list);
3654 adapter->vf_mac_list = NULL;
3655 kfree(adapter->vf_data);
3656 adapter->vf_data = NULL;
3657 adapter->vfs_allocated_count = 0;
3658 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3659 wrfl();
3660 msleep(100);
3661 dev_info(&pdev->dev, "IOV Disabled\n");
3662
3663
3664 adapter->flags |= IGB_FLAG_DMAC;
3665 }
3666
3667 return 0;
3668}
3669
3670static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3671{
3672 struct net_device *netdev = pci_get_drvdata(pdev);
3673 struct igb_adapter *adapter = netdev_priv(netdev);
3674 int old_vfs = pci_num_vf(pdev);
3675 struct vf_mac_filter *mac_list;
3676 int err = 0;
3677 int num_vf_mac_filters, i;
3678
3679 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3680 err = -EPERM;
3681 goto out;
3682 }
3683 if (!num_vfs)
3684 goto out;
3685
3686 if (old_vfs) {
3687 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3688 old_vfs, max_vfs);
3689 adapter->vfs_allocated_count = old_vfs;
3690 } else
3691 adapter->vfs_allocated_count = num_vfs;
3692
3693 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3694 sizeof(struct vf_data_storage), GFP_KERNEL);
3695
3696
3697 if (!adapter->vf_data) {
3698 adapter->vfs_allocated_count = 0;
3699 err = -ENOMEM;
3700 goto out;
3701 }
3702
3703
3704
3705
3706
3707
3708 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3709 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3710 adapter->vfs_allocated_count);
3711
3712 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3713 sizeof(struct vf_mac_filter),
3714 GFP_KERNEL);
3715
3716 mac_list = adapter->vf_mac_list;
3717 INIT_LIST_HEAD(&adapter->vf_macs.l);
3718
3719 if (adapter->vf_mac_list) {
3720
3721 for (i = 0; i < num_vf_mac_filters; i++) {
3722 mac_list->vf = -1;
3723 mac_list->free = true;
3724 list_add(&mac_list->l, &adapter->vf_macs.l);
3725 mac_list++;
3726 }
3727 } else {
3728
3729
3730
3731 dev_err(&pdev->dev,
3732 "Unable to allocate memory for VF MAC filter list\n");
3733 }
3734
3735
3736 if (!old_vfs) {
3737 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3738 if (err)
3739 goto err_out;
3740 }
3741 dev_info(&pdev->dev, "%d VFs allocated\n",
3742 adapter->vfs_allocated_count);
3743 for (i = 0; i < adapter->vfs_allocated_count; i++)
3744 igb_vf_configure(adapter, i);
3745
3746
3747 adapter->flags &= ~IGB_FLAG_DMAC;
3748 goto out;
3749
3750err_out:
3751 kfree(adapter->vf_mac_list);
3752 adapter->vf_mac_list = NULL;
3753 kfree(adapter->vf_data);
3754 adapter->vf_data = NULL;
3755 adapter->vfs_allocated_count = 0;
3756out:
3757 return err;
3758}
3759
3760#endif
3761
3762
3763
3764
3765static void igb_remove_i2c(struct igb_adapter *adapter)
3766{
3767
3768 i2c_del_adapter(&adapter->i2c_adap);
3769}
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780static void igb_remove(struct pci_dev *pdev)
3781{
3782 struct net_device *netdev = pci_get_drvdata(pdev);
3783 struct igb_adapter *adapter = netdev_priv(netdev);
3784 struct e1000_hw *hw = &adapter->hw;
3785
3786 pm_runtime_get_noresume(&pdev->dev);
3787#ifdef CONFIG_IGB_HWMON
3788 igb_sysfs_exit(adapter);
3789#endif
3790 igb_remove_i2c(adapter);
3791 igb_ptp_stop(adapter);
3792
3793
3794
3795 set_bit(__IGB_DOWN, &adapter->state);
3796 del_timer_sync(&adapter->watchdog_timer);
3797 del_timer_sync(&adapter->phy_info_timer);
3798
3799 cancel_work_sync(&adapter->reset_task);
3800 cancel_work_sync(&adapter->watchdog_task);
3801
3802#ifdef CONFIG_IGB_DCA
3803 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3804 dev_info(&pdev->dev, "DCA disabled\n");
3805 dca_remove_requester(&pdev->dev);
3806 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3807 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3808 }
3809#endif
3810
3811
3812
3813
3814 igb_release_hw_control(adapter);
3815
3816#ifdef CONFIG_PCI_IOV
3817 igb_disable_sriov(pdev);
3818#endif
3819
3820 unregister_netdev(netdev);
3821
3822 igb_clear_interrupt_scheme(adapter);
3823
3824 pci_iounmap(pdev, adapter->io_addr);
3825 if (hw->flash_address)
3826 iounmap(hw->flash_address);
3827 pci_release_mem_regions(pdev);
3828
3829 kfree(adapter->mac_table);
3830 kfree(adapter->shadow_vfta);
3831 free_netdev(netdev);
3832
3833 pci_disable_pcie_error_reporting(pdev);
3834
3835 pci_disable_device(pdev);
3836}
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847static void igb_probe_vfs(struct igb_adapter *adapter)
3848{
3849#ifdef CONFIG_PCI_IOV
3850 struct pci_dev *pdev = adapter->pdev;
3851 struct e1000_hw *hw = &adapter->hw;
3852
3853
3854 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3855 return;
3856
3857
3858
3859
3860
3861 igb_set_interrupt_capability(adapter, true);
3862 igb_reset_interrupt_capability(adapter);
3863
3864 pci_sriov_set_totalvfs(pdev, 7);
3865 igb_enable_sriov(pdev, max_vfs);
3866
3867#endif
3868}
3869
3870unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3871{
3872 struct e1000_hw *hw = &adapter->hw;
3873 unsigned int max_rss_queues;
3874
3875
3876 switch (hw->mac.type) {
3877 case e1000_i211:
3878 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3879 break;
3880 case e1000_82575:
3881 case e1000_i210:
3882 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3883 break;
3884 case e1000_i350:
3885
3886 if (!!adapter->vfs_allocated_count) {
3887 max_rss_queues = 1;
3888 break;
3889 }
3890 fallthrough;
3891 case e1000_82576:
3892 if (!!adapter->vfs_allocated_count) {
3893 max_rss_queues = 2;
3894 break;
3895 }
3896 fallthrough;
3897 case e1000_82580:
3898 case e1000_i354:
3899 default:
3900 max_rss_queues = IGB_MAX_RX_QUEUES;
3901 break;
3902 }
3903
3904 return max_rss_queues;
3905}
3906
3907static void igb_init_queue_configuration(struct igb_adapter *adapter)
3908{
3909 u32 max_rss_queues;
3910
3911 max_rss_queues = igb_get_max_rss_queues(adapter);
3912 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3913
3914 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3915}
3916
3917void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3918 const u32 max_rss_queues)
3919{
3920 struct e1000_hw *hw = &adapter->hw;
3921
3922
3923 switch (hw->mac.type) {
3924 case e1000_82575:
3925 case e1000_i211:
3926
3927 break;
3928 case e1000_82576:
3929 case e1000_82580:
3930 case e1000_i350:
3931 case e1000_i354:
3932 case e1000_i210:
3933 default:
3934
3935
3936
3937 if (adapter->rss_queues > (max_rss_queues / 2))
3938 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3939 else
3940 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3941 break;
3942 }
3943}
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953static int igb_sw_init(struct igb_adapter *adapter)
3954{
3955 struct e1000_hw *hw = &adapter->hw;
3956 struct net_device *netdev = adapter->netdev;
3957 struct pci_dev *pdev = adapter->pdev;
3958
3959 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3960
3961
3962 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3963 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3964
3965
3966 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3967 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3968
3969
3970 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3971
3972 adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
3973 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3974
3975 spin_lock_init(&adapter->nfc_lock);
3976 spin_lock_init(&adapter->stats64_lock);
3977#ifdef CONFIG_PCI_IOV
3978 switch (hw->mac.type) {
3979 case e1000_82576:
3980 case e1000_i350:
3981 if (max_vfs > 7) {
3982 dev_warn(&pdev->dev,
3983 "Maximum of 7 VFs per PF, using max\n");
3984 max_vfs = adapter->vfs_allocated_count = 7;
3985 } else
3986 adapter->vfs_allocated_count = max_vfs;
3987 if (adapter->vfs_allocated_count)
3988 dev_warn(&pdev->dev,
3989 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3990 break;
3991 default:
3992 break;
3993 }
3994#endif
3995
3996
3997 adapter->flags |= IGB_FLAG_HAS_MSIX;
3998
3999 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
4000 sizeof(struct igb_mac_addr),
4001 GFP_KERNEL);
4002 if (!adapter->mac_table)
4003 return -ENOMEM;
4004
4005 igb_probe_vfs(adapter);
4006
4007 igb_init_queue_configuration(adapter);
4008
4009
4010 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
4011 GFP_KERNEL);
4012 if (!adapter->shadow_vfta)
4013 return -ENOMEM;
4014
4015
4016 if (igb_init_interrupt_scheme(adapter, true)) {
4017 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4018 return -ENOMEM;
4019 }
4020
4021
4022 igb_irq_disable(adapter);
4023
4024 if (hw->mac.type >= e1000_i350)
4025 adapter->flags &= ~IGB_FLAG_DMAC;
4026
4027 set_bit(__IGB_DOWN, &adapter->state);
4028 return 0;
4029}
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044static int __igb_open(struct net_device *netdev, bool resuming)
4045{
4046 struct igb_adapter *adapter = netdev_priv(netdev);
4047 struct e1000_hw *hw = &adapter->hw;
4048 struct pci_dev *pdev = adapter->pdev;
4049 int err;
4050 int i;
4051
4052
4053 if (test_bit(__IGB_TESTING, &adapter->state)) {
4054 WARN_ON(resuming);
4055 return -EBUSY;
4056 }
4057
4058 if (!resuming)
4059 pm_runtime_get_sync(&pdev->dev);
4060
4061 netif_carrier_off(netdev);
4062
4063
4064 err = igb_setup_all_tx_resources(adapter);
4065 if (err)
4066 goto err_setup_tx;
4067
4068
4069 err = igb_setup_all_rx_resources(adapter);
4070 if (err)
4071 goto err_setup_rx;
4072
4073 igb_power_up_link(adapter);
4074
4075
4076
4077
4078
4079
4080 igb_configure(adapter);
4081
4082 err = igb_request_irq(adapter);
4083 if (err)
4084 goto err_req_irq;
4085
4086
4087 err = netif_set_real_num_tx_queues(adapter->netdev,
4088 adapter->num_tx_queues);
4089 if (err)
4090 goto err_set_queues;
4091
4092 err = netif_set_real_num_rx_queues(adapter->netdev,
4093 adapter->num_rx_queues);
4094 if (err)
4095 goto err_set_queues;
4096
4097
4098 clear_bit(__IGB_DOWN, &adapter->state);
4099
4100 for (i = 0; i < adapter->num_q_vectors; i++)
4101 napi_enable(&(adapter->q_vector[i]->napi));
4102
4103
4104 rd32(E1000_TSICR);
4105 rd32(E1000_ICR);
4106
4107 igb_irq_enable(adapter);
4108
4109
4110 if (adapter->vfs_allocated_count) {
4111 u32 reg_data = rd32(E1000_CTRL_EXT);
4112
4113 reg_data |= E1000_CTRL_EXT_PFRSTD;
4114 wr32(E1000_CTRL_EXT, reg_data);
4115 }
4116
4117 netif_tx_start_all_queues(netdev);
4118
4119 if (!resuming)
4120 pm_runtime_put(&pdev->dev);
4121
4122
4123 hw->mac.get_link_status = 1;
4124 schedule_work(&adapter->watchdog_task);
4125
4126 return 0;
4127
4128err_set_queues:
4129 igb_free_irq(adapter);
4130err_req_irq:
4131 igb_release_hw_control(adapter);
4132 igb_power_down_link(adapter);
4133 igb_free_all_rx_resources(adapter);
4134err_setup_rx:
4135 igb_free_all_tx_resources(adapter);
4136err_setup_tx:
4137 igb_reset(adapter);
4138 if (!resuming)
4139 pm_runtime_put(&pdev->dev);
4140
4141 return err;
4142}
4143
4144int igb_open(struct net_device *netdev)
4145{
4146 return __igb_open(netdev, false);
4147}
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161static int __igb_close(struct net_device *netdev, bool suspending)
4162{
4163 struct igb_adapter *adapter = netdev_priv(netdev);
4164 struct pci_dev *pdev = adapter->pdev;
4165
4166 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4167
4168 if (!suspending)
4169 pm_runtime_get_sync(&pdev->dev);
4170
4171 igb_down(adapter);
4172 igb_free_irq(adapter);
4173
4174 igb_free_all_tx_resources(adapter);
4175 igb_free_all_rx_resources(adapter);
4176
4177 if (!suspending)
4178 pm_runtime_put_sync(&pdev->dev);
4179 return 0;
4180}
4181
4182int igb_close(struct net_device *netdev)
4183{
4184 if (netif_device_present(netdev) || netdev->dismantle)
4185 return __igb_close(netdev, false);
4186 return 0;
4187}
4188
4189
4190
4191
4192
4193
4194
4195int igb_setup_tx_resources(struct igb_ring *tx_ring)
4196{
4197 struct device *dev = tx_ring->dev;
4198 int size;
4199
4200 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4201
4202 tx_ring->tx_buffer_info = vmalloc(size);
4203 if (!tx_ring->tx_buffer_info)
4204 goto err;
4205
4206
4207 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4208 tx_ring->size = ALIGN(tx_ring->size, 4096);
4209
4210 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4211 &tx_ring->dma, GFP_KERNEL);
4212 if (!tx_ring->desc)
4213 goto err;
4214
4215 tx_ring->next_to_use = 0;
4216 tx_ring->next_to_clean = 0;
4217
4218 return 0;
4219
4220err:
4221 vfree(tx_ring->tx_buffer_info);
4222 tx_ring->tx_buffer_info = NULL;
4223 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4224 return -ENOMEM;
4225}
4226
4227
4228
4229
4230
4231
4232
4233
4234static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4235{
4236 struct pci_dev *pdev = adapter->pdev;
4237 int i, err = 0;
4238
4239 for (i = 0; i < adapter->num_tx_queues; i++) {
4240 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4241 if (err) {
4242 dev_err(&pdev->dev,
4243 "Allocation for Tx Queue %u failed\n", i);
4244 for (i--; i >= 0; i--)
4245 igb_free_tx_resources(adapter->tx_ring[i]);
4246 break;
4247 }
4248 }
4249
4250 return err;
4251}
4252
4253
4254
4255
4256
4257void igb_setup_tctl(struct igb_adapter *adapter)
4258{
4259 struct e1000_hw *hw = &adapter->hw;
4260 u32 tctl;
4261
4262
4263 wr32(E1000_TXDCTL(0), 0);
4264
4265
4266 tctl = rd32(E1000_TCTL);
4267 tctl &= ~E1000_TCTL_CT;
4268 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4269 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4270
4271 igb_config_collision_dist(hw);
4272
4273
4274 tctl |= E1000_TCTL_EN;
4275
4276 wr32(E1000_TCTL, tctl);
4277}
4278
4279
4280
4281
4282
4283
4284
4285
4286void igb_configure_tx_ring(struct igb_adapter *adapter,
4287 struct igb_ring *ring)
4288{
4289 struct e1000_hw *hw = &adapter->hw;
4290 u32 txdctl = 0;
4291 u64 tdba = ring->dma;
4292 int reg_idx = ring->reg_idx;
4293
4294 wr32(E1000_TDLEN(reg_idx),
4295 ring->count * sizeof(union e1000_adv_tx_desc));
4296 wr32(E1000_TDBAL(reg_idx),
4297 tdba & 0x00000000ffffffffULL);
4298 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4299
4300 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4301 wr32(E1000_TDH(reg_idx), 0);
4302 writel(0, ring->tail);
4303
4304 txdctl |= IGB_TX_PTHRESH;
4305 txdctl |= IGB_TX_HTHRESH << 8;
4306 txdctl |= IGB_TX_WTHRESH << 16;
4307
4308
4309 memset(ring->tx_buffer_info, 0,
4310 sizeof(struct igb_tx_buffer) * ring->count);
4311
4312 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4313 wr32(E1000_TXDCTL(reg_idx), txdctl);
4314}
4315
4316
4317
4318
4319
4320
4321
4322static void igb_configure_tx(struct igb_adapter *adapter)
4323{
4324 struct e1000_hw *hw = &adapter->hw;
4325 int i;
4326
4327
4328 for (i = 0; i < adapter->num_tx_queues; i++)
4329 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4330
4331 wrfl();
4332 usleep_range(10000, 20000);
4333
4334 for (i = 0; i < adapter->num_tx_queues; i++)
4335 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4336}
4337
4338
4339
4340
4341
4342
4343
4344int igb_setup_rx_resources(struct igb_ring *rx_ring)
4345{
4346 struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
4347 struct device *dev = rx_ring->dev;
4348 int size, res;
4349
4350
4351 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
4352 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4353 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
4354 rx_ring->queue_index, 0);
4355 if (res < 0) {
4356 dev_err(dev, "Failed to register xdp_rxq index %u\n",
4357 rx_ring->queue_index);
4358 return res;
4359 }
4360
4361 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4362
4363 rx_ring->rx_buffer_info = vmalloc(size);
4364 if (!rx_ring->rx_buffer_info)
4365 goto err;
4366
4367
4368 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4369 rx_ring->size = ALIGN(rx_ring->size, 4096);
4370
4371 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4372 &rx_ring->dma, GFP_KERNEL);
4373 if (!rx_ring->desc)
4374 goto err;
4375
4376 rx_ring->next_to_alloc = 0;
4377 rx_ring->next_to_clean = 0;
4378 rx_ring->next_to_use = 0;
4379
4380 rx_ring->xdp_prog = adapter->xdp_prog;
4381
4382 return 0;
4383
4384err:
4385 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4386 vfree(rx_ring->rx_buffer_info);
4387 rx_ring->rx_buffer_info = NULL;
4388 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4389 return -ENOMEM;
4390}
4391
4392
4393
4394
4395
4396
4397
4398
4399static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4400{
4401 struct pci_dev *pdev = adapter->pdev;
4402 int i, err = 0;
4403
4404 for (i = 0; i < adapter->num_rx_queues; i++) {
4405 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4406 if (err) {
4407 dev_err(&pdev->dev,
4408 "Allocation for Rx Queue %u failed\n", i);
4409 for (i--; i >= 0; i--)
4410 igb_free_rx_resources(adapter->rx_ring[i]);
4411 break;
4412 }
4413 }
4414
4415 return err;
4416}
4417
4418
4419
4420
4421
4422static void igb_setup_mrqc(struct igb_adapter *adapter)
4423{
4424 struct e1000_hw *hw = &adapter->hw;
4425 u32 mrqc, rxcsum;
4426 u32 j, num_rx_queues;
4427 u32 rss_key[10];
4428
4429 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4430 for (j = 0; j < 10; j++)
4431 wr32(E1000_RSSRK(j), rss_key[j]);
4432
4433 num_rx_queues = adapter->rss_queues;
4434
4435 switch (hw->mac.type) {
4436 case e1000_82576:
4437
4438 if (adapter->vfs_allocated_count)
4439 num_rx_queues = 2;
4440 break;
4441 default:
4442 break;
4443 }
4444
4445 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4446 for (j = 0; j < IGB_RETA_SIZE; j++)
4447 adapter->rss_indir_tbl[j] =
4448 (j * num_rx_queues) / IGB_RETA_SIZE;
4449 adapter->rss_indir_tbl_init = num_rx_queues;
4450 }
4451 igb_write_rss_indir_tbl(adapter);
4452
4453
4454
4455
4456
4457 rxcsum = rd32(E1000_RXCSUM);
4458 rxcsum |= E1000_RXCSUM_PCSD;
4459
4460 if (adapter->hw.mac.type >= e1000_82576)
4461
4462 rxcsum |= E1000_RXCSUM_CRCOFL;
4463
4464
4465 wr32(E1000_RXCSUM, rxcsum);
4466
4467
4468
4469
4470 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4471 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4472 E1000_MRQC_RSS_FIELD_IPV6 |
4473 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4474 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4475
4476 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4477 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4478 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4479 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4480
4481
4482
4483
4484
4485 if (adapter->vfs_allocated_count) {
4486 if (hw->mac.type > e1000_82575) {
4487
4488 u32 vtctl = rd32(E1000_VT_CTL);
4489
4490 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4491 E1000_VT_CTL_DISABLE_DEF_POOL);
4492 vtctl |= adapter->vfs_allocated_count <<
4493 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4494 wr32(E1000_VT_CTL, vtctl);
4495 }
4496 if (adapter->rss_queues > 1)
4497 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4498 else
4499 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4500 } else {
4501 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4502 }
4503 igb_vmm_control(adapter);
4504
4505 wr32(E1000_MRQC, mrqc);
4506}
4507
4508
4509
4510
4511
4512void igb_setup_rctl(struct igb_adapter *adapter)
4513{
4514 struct e1000_hw *hw = &adapter->hw;
4515 u32 rctl;
4516
4517 rctl = rd32(E1000_RCTL);
4518
4519 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4520 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4521
4522 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4523 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4524
4525
4526
4527
4528
4529 rctl |= E1000_RCTL_SECRC;
4530
4531
4532 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4533
4534
4535 rctl |= E1000_RCTL_LPE;
4536
4537
4538 wr32(E1000_RXDCTL(0), 0);
4539
4540
4541
4542
4543
4544 if (adapter->vfs_allocated_count) {
4545
4546 wr32(E1000_QDE, ALL_QUEUES);
4547 }
4548
4549
4550 if (adapter->netdev->features & NETIF_F_RXALL) {
4551
4552
4553
4554 rctl |= (E1000_RCTL_SBP |
4555 E1000_RCTL_BAM |
4556 E1000_RCTL_PMCF);
4557
4558 rctl &= ~(E1000_RCTL_DPF |
4559 E1000_RCTL_CFIEN);
4560
4561
4562
4563 }
4564
4565 wr32(E1000_RCTL, rctl);
4566}
4567
4568static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4569 int vfn)
4570{
4571 struct e1000_hw *hw = &adapter->hw;
4572 u32 vmolr;
4573
4574 if (size > MAX_JUMBO_FRAME_SIZE)
4575 size = MAX_JUMBO_FRAME_SIZE;
4576
4577 vmolr = rd32(E1000_VMOLR(vfn));
4578 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4579 vmolr |= size | E1000_VMOLR_LPE;
4580 wr32(E1000_VMOLR(vfn), vmolr);
4581
4582 return 0;
4583}
4584
4585static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4586 int vfn, bool enable)
4587{
4588 struct e1000_hw *hw = &adapter->hw;
4589 u32 val, reg;
4590
4591 if (hw->mac.type < e1000_82576)
4592 return;
4593
4594 if (hw->mac.type == e1000_i350)
4595 reg = E1000_DVMOLR(vfn);
4596 else
4597 reg = E1000_VMOLR(vfn);
4598
4599 val = rd32(reg);
4600 if (enable)
4601 val |= E1000_VMOLR_STRVLAN;
4602 else
4603 val &= ~(E1000_VMOLR_STRVLAN);
4604 wr32(reg, val);
4605}
4606
4607static inline void igb_set_vmolr(struct igb_adapter *adapter,
4608 int vfn, bool aupe)
4609{
4610 struct e1000_hw *hw = &adapter->hw;
4611 u32 vmolr;
4612
4613
4614
4615
4616 if (hw->mac.type < e1000_82576)
4617 return;
4618
4619 vmolr = rd32(E1000_VMOLR(vfn));
4620 if (aupe)
4621 vmolr |= E1000_VMOLR_AUPE;
4622 else
4623 vmolr &= ~(E1000_VMOLR_AUPE);
4624
4625
4626 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4627
4628 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4629 vmolr |= E1000_VMOLR_RSSE;
4630
4631
4632
4633 if (vfn <= adapter->vfs_allocated_count)
4634 vmolr |= E1000_VMOLR_BAM;
4635
4636 wr32(E1000_VMOLR(vfn), vmolr);
4637}
4638
4639
4640
4641
4642
4643
4644
4645void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4646{
4647 struct e1000_hw *hw = &adapter->hw;
4648 int reg_idx = ring->reg_idx;
4649 u32 srrctl = 0;
4650
4651 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4652 if (ring_uses_large_buffer(ring))
4653 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4654 else
4655 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4656 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4657 if (hw->mac.type >= e1000_82580)
4658 srrctl |= E1000_SRRCTL_TIMESTAMP;
4659
4660
4661
4662 if (adapter->vfs_allocated_count ||
4663 (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4664 adapter->num_rx_queues > 1))
4665 srrctl |= E1000_SRRCTL_DROP_EN;
4666
4667 wr32(E1000_SRRCTL(reg_idx), srrctl);
4668}
4669
4670
4671
4672
4673
4674
4675
4676
4677void igb_configure_rx_ring(struct igb_adapter *adapter,
4678 struct igb_ring *ring)
4679{
4680 struct e1000_hw *hw = &adapter->hw;
4681 union e1000_adv_rx_desc *rx_desc;
4682 u64 rdba = ring->dma;
4683 int reg_idx = ring->reg_idx;
4684 u32 rxdctl = 0;
4685
4686 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4687 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4688 MEM_TYPE_PAGE_SHARED, NULL));
4689
4690
4691 wr32(E1000_RXDCTL(reg_idx), 0);
4692
4693
4694 wr32(E1000_RDBAL(reg_idx),
4695 rdba & 0x00000000ffffffffULL);
4696 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4697 wr32(E1000_RDLEN(reg_idx),
4698 ring->count * sizeof(union e1000_adv_rx_desc));
4699
4700
4701 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4702 wr32(E1000_RDH(reg_idx), 0);
4703 writel(0, ring->tail);
4704
4705
4706 igb_setup_srrctl(adapter, ring);
4707
4708
4709 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4710
4711 rxdctl |= IGB_RX_PTHRESH;
4712 rxdctl |= IGB_RX_HTHRESH << 8;
4713 rxdctl |= IGB_RX_WTHRESH << 16;
4714
4715
4716 memset(ring->rx_buffer_info, 0,
4717 sizeof(struct igb_rx_buffer) * ring->count);
4718
4719
4720 rx_desc = IGB_RX_DESC(ring, 0);
4721 rx_desc->wb.upper.length = 0;
4722
4723
4724 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4725 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4726}
4727
4728static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4729 struct igb_ring *rx_ring)
4730{
4731
4732 clear_ring_build_skb_enabled(rx_ring);
4733 clear_ring_uses_large_buffer(rx_ring);
4734
4735 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4736 return;
4737
4738 set_ring_build_skb_enabled(rx_ring);
4739
4740#if (PAGE_SIZE < 8192)
4741 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4742 return;
4743
4744 set_ring_uses_large_buffer(rx_ring);
4745#endif
4746}
4747
4748
4749
4750
4751
4752
4753
4754static void igb_configure_rx(struct igb_adapter *adapter)
4755{
4756 int i;
4757
4758
4759 igb_set_default_mac_filter(adapter);
4760
4761
4762
4763
4764 for (i = 0; i < adapter->num_rx_queues; i++) {
4765 struct igb_ring *rx_ring = adapter->rx_ring[i];
4766
4767 igb_set_rx_buffer_len(adapter, rx_ring);
4768 igb_configure_rx_ring(adapter, rx_ring);
4769 }
4770}
4771
4772
4773
4774
4775
4776
4777
4778void igb_free_tx_resources(struct igb_ring *tx_ring)
4779{
4780 igb_clean_tx_ring(tx_ring);
4781
4782 vfree(tx_ring->tx_buffer_info);
4783 tx_ring->tx_buffer_info = NULL;
4784
4785
4786 if (!tx_ring->desc)
4787 return;
4788
4789 dma_free_coherent(tx_ring->dev, tx_ring->size,
4790 tx_ring->desc, tx_ring->dma);
4791
4792 tx_ring->desc = NULL;
4793}
4794
4795
4796
4797
4798
4799
4800
4801static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4802{
4803 int i;
4804
4805 for (i = 0; i < adapter->num_tx_queues; i++)
4806 if (adapter->tx_ring[i])
4807 igb_free_tx_resources(adapter->tx_ring[i]);
4808}
4809
4810
4811
4812
4813
4814static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4815{
4816 u16 i = tx_ring->next_to_clean;
4817 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4818
4819 while (i != tx_ring->next_to_use) {
4820 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4821
4822
4823 if (tx_buffer->type == IGB_TYPE_SKB)
4824 dev_kfree_skb_any(tx_buffer->skb);
4825 else
4826 xdp_return_frame(tx_buffer->xdpf);
4827
4828
4829 dma_unmap_single(tx_ring->dev,
4830 dma_unmap_addr(tx_buffer, dma),
4831 dma_unmap_len(tx_buffer, len),
4832 DMA_TO_DEVICE);
4833
4834
4835 eop_desc = tx_buffer->next_to_watch;
4836 tx_desc = IGB_TX_DESC(tx_ring, i);
4837
4838
4839 while (tx_desc != eop_desc) {
4840 tx_buffer++;
4841 tx_desc++;
4842 i++;
4843 if (unlikely(i == tx_ring->count)) {
4844 i = 0;
4845 tx_buffer = tx_ring->tx_buffer_info;
4846 tx_desc = IGB_TX_DESC(tx_ring, 0);
4847 }
4848
4849
4850 if (dma_unmap_len(tx_buffer, len))
4851 dma_unmap_page(tx_ring->dev,
4852 dma_unmap_addr(tx_buffer, dma),
4853 dma_unmap_len(tx_buffer, len),
4854 DMA_TO_DEVICE);
4855 }
4856
4857 tx_buffer->next_to_watch = NULL;
4858
4859
4860 tx_buffer++;
4861 i++;
4862 if (unlikely(i == tx_ring->count)) {
4863 i = 0;
4864 tx_buffer = tx_ring->tx_buffer_info;
4865 }
4866 }
4867
4868
4869 netdev_tx_reset_queue(txring_txq(tx_ring));
4870
4871
4872 tx_ring->next_to_use = 0;
4873 tx_ring->next_to_clean = 0;
4874}
4875
4876
4877
4878
4879
4880static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4881{
4882 int i;
4883
4884 for (i = 0; i < adapter->num_tx_queues; i++)
4885 if (adapter->tx_ring[i])
4886 igb_clean_tx_ring(adapter->tx_ring[i]);
4887}
4888
4889
4890
4891
4892
4893
4894
4895void igb_free_rx_resources(struct igb_ring *rx_ring)
4896{
4897 igb_clean_rx_ring(rx_ring);
4898
4899 rx_ring->xdp_prog = NULL;
4900 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4901 vfree(rx_ring->rx_buffer_info);
4902 rx_ring->rx_buffer_info = NULL;
4903
4904
4905 if (!rx_ring->desc)
4906 return;
4907
4908 dma_free_coherent(rx_ring->dev, rx_ring->size,
4909 rx_ring->desc, rx_ring->dma);
4910
4911 rx_ring->desc = NULL;
4912}
4913
4914
4915
4916
4917
4918
4919
4920static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4921{
4922 int i;
4923
4924 for (i = 0; i < adapter->num_rx_queues; i++)
4925 if (adapter->rx_ring[i])
4926 igb_free_rx_resources(adapter->rx_ring[i]);
4927}
4928
4929
4930
4931
4932
4933static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4934{
4935 u16 i = rx_ring->next_to_clean;
4936
4937 dev_kfree_skb(rx_ring->skb);
4938 rx_ring->skb = NULL;
4939
4940
4941 while (i != rx_ring->next_to_alloc) {
4942 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4943
4944
4945
4946
4947 dma_sync_single_range_for_cpu(rx_ring->dev,
4948 buffer_info->dma,
4949 buffer_info->page_offset,
4950 igb_rx_bufsz(rx_ring),
4951 DMA_FROM_DEVICE);
4952
4953
4954 dma_unmap_page_attrs(rx_ring->dev,
4955 buffer_info->dma,
4956 igb_rx_pg_size(rx_ring),
4957 DMA_FROM_DEVICE,
4958 IGB_RX_DMA_ATTR);
4959 __page_frag_cache_drain(buffer_info->page,
4960 buffer_info->pagecnt_bias);
4961
4962 i++;
4963 if (i == rx_ring->count)
4964 i = 0;
4965 }
4966
4967 rx_ring->next_to_alloc = 0;
4968 rx_ring->next_to_clean = 0;
4969 rx_ring->next_to_use = 0;
4970}
4971
4972
4973
4974
4975
4976static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4977{
4978 int i;
4979
4980 for (i = 0; i < adapter->num_rx_queues; i++)
4981 if (adapter->rx_ring[i])
4982 igb_clean_rx_ring(adapter->rx_ring[i]);
4983}
4984
4985
4986
4987
4988
4989
4990
4991
4992static int igb_set_mac(struct net_device *netdev, void *p)
4993{
4994 struct igb_adapter *adapter = netdev_priv(netdev);
4995 struct e1000_hw *hw = &adapter->hw;
4996 struct sockaddr *addr = p;
4997
4998 if (!is_valid_ether_addr(addr->sa_data))
4999 return -EADDRNOTAVAIL;
5000
5001 eth_hw_addr_set(netdev, addr->sa_data);
5002 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5003
5004
5005 igb_set_default_mac_filter(adapter);
5006
5007 return 0;
5008}
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019static int igb_write_mc_addr_list(struct net_device *netdev)
5020{
5021 struct igb_adapter *adapter = netdev_priv(netdev);
5022 struct e1000_hw *hw = &adapter->hw;
5023 struct netdev_hw_addr *ha;
5024 u8 *mta_list;
5025 int i;
5026
5027 if (netdev_mc_empty(netdev)) {
5028
5029 igb_update_mc_addr_list(hw, NULL, 0);
5030 igb_restore_vf_multicasts(adapter);
5031 return 0;
5032 }
5033
5034 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
5035 if (!mta_list)
5036 return -ENOMEM;
5037
5038
5039 i = 0;
5040 netdev_for_each_mc_addr(ha, netdev)
5041 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
5042
5043 igb_update_mc_addr_list(hw, mta_list, i);
5044 kfree(mta_list);
5045
5046 return netdev_mc_count(netdev);
5047}
5048
5049static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
5050{
5051 struct e1000_hw *hw = &adapter->hw;
5052 u32 i, pf_id;
5053
5054 switch (hw->mac.type) {
5055 case e1000_i210:
5056 case e1000_i211:
5057 case e1000_i350:
5058
5059 if (adapter->netdev->features & NETIF_F_NTUPLE)
5060 break;
5061 fallthrough;
5062 case e1000_82576:
5063 case e1000_82580:
5064 case e1000_i354:
5065
5066 if (adapter->vfs_allocated_count)
5067 break;
5068 fallthrough;
5069 default:
5070 return 1;
5071 }
5072
5073
5074 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
5075 return 0;
5076
5077 if (!adapter->vfs_allocated_count)
5078 goto set_vfta;
5079
5080
5081 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5082
5083 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5084 u32 vlvf = rd32(E1000_VLVF(i));
5085
5086 vlvf |= BIT(pf_id);
5087 wr32(E1000_VLVF(i), vlvf);
5088 }
5089
5090set_vfta:
5091
5092 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
5093 hw->mac.ops.write_vfta(hw, i, ~0U);
5094
5095
5096 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
5097
5098 return 0;
5099}
5100
5101#define VFTA_BLOCK_SIZE 8
5102static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
5103{
5104 struct e1000_hw *hw = &adapter->hw;
5105 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
5106 u32 vid_start = vfta_offset * 32;
5107 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
5108 u32 i, vid, word, bits, pf_id;
5109
5110
5111 vid = adapter->mng_vlan_id;
5112 if (vid >= vid_start && vid < vid_end)
5113 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5114
5115 if (!adapter->vfs_allocated_count)
5116 goto set_vfta;
5117
5118 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5119
5120 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5121 u32 vlvf = rd32(E1000_VLVF(i));
5122
5123
5124 vid = vlvf & VLAN_VID_MASK;
5125
5126
5127 if (vid < vid_start || vid >= vid_end)
5128 continue;
5129
5130 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
5131
5132 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5133
5134
5135 if (test_bit(vid, adapter->active_vlans))
5136 continue;
5137 }
5138
5139
5140 bits = ~BIT(pf_id);
5141 bits &= rd32(E1000_VLVF(i));
5142 wr32(E1000_VLVF(i), bits);
5143 }
5144
5145set_vfta:
5146
5147 for (i = VFTA_BLOCK_SIZE; i--;) {
5148 vid = (vfta_offset + i) * 32;
5149 word = vid / BITS_PER_LONG;
5150 bits = vid % BITS_PER_LONG;
5151
5152 vfta[i] |= adapter->active_vlans[word] >> bits;
5153
5154 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
5155 }
5156}
5157
5158static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
5159{
5160 u32 i;
5161
5162
5163 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
5164 return;
5165
5166
5167 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
5168
5169 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
5170 igb_scrub_vfta(adapter, i);
5171}
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182static void igb_set_rx_mode(struct net_device *netdev)
5183{
5184 struct igb_adapter *adapter = netdev_priv(netdev);
5185 struct e1000_hw *hw = &adapter->hw;
5186 unsigned int vfn = adapter->vfs_allocated_count;
5187 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5188 int count;
5189
5190
5191 if (netdev->flags & IFF_PROMISC) {
5192 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5193 vmolr |= E1000_VMOLR_MPME;
5194
5195
5196 if (hw->mac.type == e1000_82576)
5197 vmolr |= E1000_VMOLR_ROPE;
5198 } else {
5199 if (netdev->flags & IFF_ALLMULTI) {
5200 rctl |= E1000_RCTL_MPE;
5201 vmolr |= E1000_VMOLR_MPME;
5202 } else {
5203
5204
5205
5206
5207 count = igb_write_mc_addr_list(netdev);
5208 if (count < 0) {
5209 rctl |= E1000_RCTL_MPE;
5210 vmolr |= E1000_VMOLR_MPME;
5211 } else if (count) {
5212 vmolr |= E1000_VMOLR_ROMPE;
5213 }
5214 }
5215 }
5216
5217
5218
5219
5220
5221 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5222 rctl |= E1000_RCTL_UPE;
5223 vmolr |= E1000_VMOLR_ROPE;
5224 }
5225
5226
5227 rctl |= E1000_RCTL_VFE;
5228
5229
5230 if ((netdev->flags & IFF_PROMISC) ||
5231 (netdev->features & NETIF_F_RXALL)) {
5232
5233 if (igb_vlan_promisc_enable(adapter))
5234 rctl &= ~E1000_RCTL_VFE;
5235 } else {
5236 igb_vlan_promisc_disable(adapter);
5237 }
5238
5239
5240 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5241 E1000_RCTL_VFE);
5242 wr32(E1000_RCTL, rctl);
5243
5244#if (PAGE_SIZE < 8192)
5245 if (!adapter->vfs_allocated_count) {
5246 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5247 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5248 }
5249#endif
5250 wr32(E1000_RLPML, rlpml);
5251
5252
5253
5254
5255
5256
5257 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5258 return;
5259
5260
5261 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5262
5263 vmolr |= rd32(E1000_VMOLR(vfn)) &
5264 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5265
5266
5267 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5268#if (PAGE_SIZE < 8192)
5269 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5270 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5271 else
5272#endif
5273 vmolr |= MAX_JUMBO_FRAME_SIZE;
5274 vmolr |= E1000_VMOLR_LPE;
5275
5276 wr32(E1000_VMOLR(vfn), vmolr);
5277
5278 igb_restore_vf_multicasts(adapter);
5279}
5280
5281static void igb_check_wvbr(struct igb_adapter *adapter)
5282{
5283 struct e1000_hw *hw = &adapter->hw;
5284 u32 wvbr = 0;
5285
5286 switch (hw->mac.type) {
5287 case e1000_82576:
5288 case e1000_i350:
5289 wvbr = rd32(E1000_WVBR);
5290 if (!wvbr)
5291 return;
5292 break;
5293 default:
5294 break;
5295 }
5296
5297 adapter->wvbr |= wvbr;
5298}
5299
5300#define IGB_STAGGERED_QUEUE_OFFSET 8
5301
5302static void igb_spoof_check(struct igb_adapter *adapter)
5303{
5304 int j;
5305
5306 if (!adapter->wvbr)
5307 return;
5308
5309 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5310 if (adapter->wvbr & BIT(j) ||
5311 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5312 dev_warn(&adapter->pdev->dev,
5313 "Spoof event(s) detected on VF %d\n", j);
5314 adapter->wvbr &=
5315 ~(BIT(j) |
5316 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5317 }
5318 }
5319}
5320
5321
5322
5323
5324static void igb_update_phy_info(struct timer_list *t)
5325{
5326 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5327 igb_get_phy_info(&adapter->hw);
5328}
5329
5330
5331
5332
5333
5334bool igb_has_link(struct igb_adapter *adapter)
5335{
5336 struct e1000_hw *hw = &adapter->hw;
5337 bool link_active = false;
5338
5339
5340
5341
5342
5343
5344 switch (hw->phy.media_type) {
5345 case e1000_media_type_copper:
5346 if (!hw->mac.get_link_status)
5347 return true;
5348 fallthrough;
5349 case e1000_media_type_internal_serdes:
5350 hw->mac.ops.check_for_link(hw);
5351 link_active = !hw->mac.get_link_status;
5352 break;
5353 default:
5354 case e1000_media_type_unknown:
5355 break;
5356 }
5357
5358 if (((hw->mac.type == e1000_i210) ||
5359 (hw->mac.type == e1000_i211)) &&
5360 (hw->phy.id == I210_I_PHY_ID)) {
5361 if (!netif_carrier_ok(adapter->netdev)) {
5362 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5363 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5364 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5365 adapter->link_check_timeout = jiffies;
5366 }
5367 }
5368
5369 return link_active;
5370}
5371
5372static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5373{
5374 bool ret = false;
5375 u32 ctrl_ext, thstat;
5376
5377
5378 if (hw->mac.type == e1000_i350) {
5379 thstat = rd32(E1000_THSTAT);
5380 ctrl_ext = rd32(E1000_CTRL_EXT);
5381
5382 if ((hw->phy.media_type == e1000_media_type_copper) &&
5383 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5384 ret = !!(thstat & event);
5385 }
5386
5387 return ret;
5388}
5389
5390
5391
5392
5393
5394
5395static void igb_check_lvmmc(struct igb_adapter *adapter)
5396{
5397 struct e1000_hw *hw = &adapter->hw;
5398 u32 lvmmc;
5399
5400 lvmmc = rd32(E1000_LVMMC);
5401 if (lvmmc) {
5402 if (unlikely(net_ratelimit())) {
5403 netdev_warn(adapter->netdev,
5404 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5405 lvmmc);
5406 }
5407 }
5408}
5409
5410
5411
5412
5413
5414static void igb_watchdog(struct timer_list *t)
5415{
5416 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5417
5418 schedule_work(&adapter->watchdog_task);
5419}
5420
5421static void igb_watchdog_task(struct work_struct *work)
5422{
5423 struct igb_adapter *adapter = container_of(work,
5424 struct igb_adapter,
5425 watchdog_task);
5426 struct e1000_hw *hw = &adapter->hw;
5427 struct e1000_phy_info *phy = &hw->phy;
5428 struct net_device *netdev = adapter->netdev;
5429 u32 link;
5430 int i;
5431 u32 connsw;
5432 u16 phy_data, retry_count = 20;
5433
5434 link = igb_has_link(adapter);
5435
5436 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5437 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5438 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5439 else
5440 link = false;
5441 }
5442
5443
5444 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5445 if (hw->phy.media_type == e1000_media_type_copper) {
5446 connsw = rd32(E1000_CONNSW);
5447 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5448 link = 0;
5449 }
5450 }
5451 if (link) {
5452
5453 if (hw->dev_spec._82575.media_changed) {
5454 hw->dev_spec._82575.media_changed = false;
5455 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5456 igb_reset(adapter);
5457 }
5458
5459 pm_runtime_resume(netdev->dev.parent);
5460
5461 if (!netif_carrier_ok(netdev)) {
5462 u32 ctrl;
5463
5464 hw->mac.ops.get_speed_and_duplex(hw,
5465 &adapter->link_speed,
5466 &adapter->link_duplex);
5467
5468 ctrl = rd32(E1000_CTRL);
5469
5470 netdev_info(netdev,
5471 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5472 netdev->name,
5473 adapter->link_speed,
5474 adapter->link_duplex == FULL_DUPLEX ?
5475 "Full" : "Half",
5476 (ctrl & E1000_CTRL_TFCE) &&
5477 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5478 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5479 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5480
5481
5482 if ((adapter->flags & IGB_FLAG_EEE) &&
5483 (adapter->link_duplex == HALF_DUPLEX)) {
5484 dev_info(&adapter->pdev->dev,
5485 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5486 adapter->hw.dev_spec._82575.eee_disable = true;
5487 adapter->flags &= ~IGB_FLAG_EEE;
5488 }
5489
5490
5491 igb_check_downshift(hw);
5492 if (phy->speed_downgraded)
5493 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5494
5495
5496 if (igb_thermal_sensor_event(hw,
5497 E1000_THSTAT_LINK_THROTTLE))
5498 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5499
5500
5501 adapter->tx_timeout_factor = 1;
5502 switch (adapter->link_speed) {
5503 case SPEED_10:
5504 adapter->tx_timeout_factor = 14;
5505 break;
5506 case SPEED_100:
5507
5508 break;
5509 }
5510
5511 if (adapter->link_speed != SPEED_1000 ||
5512 !hw->phy.ops.read_reg)
5513 goto no_wait;
5514
5515
5516retry_read_status:
5517 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5518 &phy_data)) {
5519 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5520 retry_count) {
5521 msleep(100);
5522 retry_count--;
5523 goto retry_read_status;
5524 } else if (!retry_count) {
5525 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5526 }
5527 } else {
5528 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5529 }
5530no_wait:
5531 netif_carrier_on(netdev);
5532
5533 igb_ping_all_vfs(adapter);
5534 igb_check_vf_rate_limit(adapter);
5535
5536
5537 if (!test_bit(__IGB_DOWN, &adapter->state))
5538 mod_timer(&adapter->phy_info_timer,
5539 round_jiffies(jiffies + 2 * HZ));
5540 }
5541 } else {
5542 if (netif_carrier_ok(netdev)) {
5543 adapter->link_speed = 0;
5544 adapter->link_duplex = 0;
5545
5546
5547 if (igb_thermal_sensor_event(hw,
5548 E1000_THSTAT_PWR_DOWN)) {
5549 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5550 }
5551
5552
5553 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5554 netdev->name);
5555 netif_carrier_off(netdev);
5556
5557 igb_ping_all_vfs(adapter);
5558
5559
5560 if (!test_bit(__IGB_DOWN, &adapter->state))
5561 mod_timer(&adapter->phy_info_timer,
5562 round_jiffies(jiffies + 2 * HZ));
5563
5564
5565 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5566 igb_check_swap_media(adapter);
5567 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5568 schedule_work(&adapter->reset_task);
5569
5570 return;
5571 }
5572 }
5573 pm_schedule_suspend(netdev->dev.parent,
5574 MSEC_PER_SEC * 5);
5575
5576
5577 } else if (!netif_carrier_ok(netdev) &&
5578 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5579 igb_check_swap_media(adapter);
5580 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5581 schedule_work(&adapter->reset_task);
5582
5583 return;
5584 }
5585 }
5586 }
5587
5588 spin_lock(&adapter->stats64_lock);
5589 igb_update_stats(adapter);
5590 spin_unlock(&adapter->stats64_lock);
5591
5592 for (i = 0; i < adapter->num_tx_queues; i++) {
5593 struct igb_ring *tx_ring = adapter->tx_ring[i];
5594 if (!netif_carrier_ok(netdev)) {
5595
5596
5597
5598
5599
5600 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5601 adapter->tx_timeout_count++;
5602 schedule_work(&adapter->reset_task);
5603
5604 return;
5605 }
5606 }
5607
5608
5609 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5610 }
5611
5612
5613 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5614 u32 eics = 0;
5615
5616 for (i = 0; i < adapter->num_q_vectors; i++)
5617 eics |= adapter->q_vector[i]->eims_value;
5618 wr32(E1000_EICS, eics);
5619 } else {
5620 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5621 }
5622
5623 igb_spoof_check(adapter);
5624 igb_ptp_rx_hang(adapter);
5625 igb_ptp_tx_hang(adapter);
5626
5627
5628 if ((adapter->hw.mac.type == e1000_i350) ||
5629 (adapter->hw.mac.type == e1000_i354))
5630 igb_check_lvmmc(adapter);
5631
5632
5633 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5634 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5635 mod_timer(&adapter->watchdog_timer,
5636 round_jiffies(jiffies + HZ));
5637 else
5638 mod_timer(&adapter->watchdog_timer,
5639 round_jiffies(jiffies + 2 * HZ));
5640 }
5641}
5642
5643enum latency_range {
5644 lowest_latency = 0,
5645 low_latency = 1,
5646 bulk_latency = 2,
5647 latency_invalid = 255
5648};
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5666{
5667 int new_val = q_vector->itr_val;
5668 int avg_wire_size = 0;
5669 struct igb_adapter *adapter = q_vector->adapter;
5670 unsigned int packets;
5671
5672
5673
5674
5675 if (adapter->link_speed != SPEED_1000) {
5676 new_val = IGB_4K_ITR;
5677 goto set_itr_val;
5678 }
5679
5680 packets = q_vector->rx.total_packets;
5681 if (packets)
5682 avg_wire_size = q_vector->rx.total_bytes / packets;
5683
5684 packets = q_vector->tx.total_packets;
5685 if (packets)
5686 avg_wire_size = max_t(u32, avg_wire_size,
5687 q_vector->tx.total_bytes / packets);
5688
5689
5690 if (!avg_wire_size)
5691 goto clear_counts;
5692
5693
5694 avg_wire_size += 24;
5695
5696
5697 avg_wire_size = min(avg_wire_size, 3000);
5698
5699
5700 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5701 new_val = avg_wire_size / 3;
5702 else
5703 new_val = avg_wire_size / 2;
5704
5705
5706 if (new_val < IGB_20K_ITR &&
5707 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5708 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5709 new_val = IGB_20K_ITR;
5710
5711set_itr_val:
5712 if (new_val != q_vector->itr_val) {
5713 q_vector->itr_val = new_val;
5714 q_vector->set_itr = 1;
5715 }
5716clear_counts:
5717 q_vector->rx.total_bytes = 0;
5718 q_vector->rx.total_packets = 0;
5719 q_vector->tx.total_bytes = 0;
5720 q_vector->tx.total_packets = 0;
5721}
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739static void igb_update_itr(struct igb_q_vector *q_vector,
5740 struct igb_ring_container *ring_container)
5741{
5742 unsigned int packets = ring_container->total_packets;
5743 unsigned int bytes = ring_container->total_bytes;
5744 u8 itrval = ring_container->itr;
5745
5746
5747 if (packets == 0)
5748 return;
5749
5750 switch (itrval) {
5751 case lowest_latency:
5752
5753 if (bytes/packets > 8000)
5754 itrval = bulk_latency;
5755 else if ((packets < 5) && (bytes > 512))
5756 itrval = low_latency;
5757 break;
5758 case low_latency:
5759 if (bytes > 10000) {
5760
5761 if (bytes/packets > 8000)
5762 itrval = bulk_latency;
5763 else if ((packets < 10) || ((bytes/packets) > 1200))
5764 itrval = bulk_latency;
5765 else if ((packets > 35))
5766 itrval = lowest_latency;
5767 } else if (bytes/packets > 2000) {
5768 itrval = bulk_latency;
5769 } else if (packets <= 2 && bytes < 512) {
5770 itrval = lowest_latency;
5771 }
5772 break;
5773 case bulk_latency:
5774 if (bytes > 25000) {
5775 if (packets > 35)
5776 itrval = low_latency;
5777 } else if (bytes < 1500) {
5778 itrval = low_latency;
5779 }
5780 break;
5781 }
5782
5783
5784 ring_container->total_bytes = 0;
5785 ring_container->total_packets = 0;
5786
5787
5788 ring_container->itr = itrval;
5789}
5790
5791static void igb_set_itr(struct igb_q_vector *q_vector)
5792{
5793 struct igb_adapter *adapter = q_vector->adapter;
5794 u32 new_itr = q_vector->itr_val;
5795 u8 current_itr = 0;
5796
5797
5798 if (adapter->link_speed != SPEED_1000) {
5799 current_itr = 0;
5800 new_itr = IGB_4K_ITR;
5801 goto set_itr_now;
5802 }
5803
5804 igb_update_itr(q_vector, &q_vector->tx);
5805 igb_update_itr(q_vector, &q_vector->rx);
5806
5807 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5808
5809
5810 if (current_itr == lowest_latency &&
5811 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5812 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5813 current_itr = low_latency;
5814
5815 switch (current_itr) {
5816
5817 case lowest_latency:
5818 new_itr = IGB_70K_ITR;
5819 break;
5820 case low_latency:
5821 new_itr = IGB_20K_ITR;
5822 break;
5823 case bulk_latency:
5824 new_itr = IGB_4K_ITR;
5825 break;
5826 default:
5827 break;
5828 }
5829
5830set_itr_now:
5831 if (new_itr != q_vector->itr_val) {
5832
5833
5834
5835
5836 new_itr = new_itr > q_vector->itr_val ?
5837 max((new_itr * q_vector->itr_val) /
5838 (new_itr + (q_vector->itr_val >> 2)),
5839 new_itr) : new_itr;
5840
5841
5842
5843
5844
5845
5846 q_vector->itr_val = new_itr;
5847 q_vector->set_itr = 1;
5848 }
5849}
5850
5851static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5852 struct igb_tx_buffer *first,
5853 u32 vlan_macip_lens, u32 type_tucmd,
5854 u32 mss_l4len_idx)
5855{
5856 struct e1000_adv_tx_context_desc *context_desc;
5857 u16 i = tx_ring->next_to_use;
5858 struct timespec64 ts;
5859
5860 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5861
5862 i++;
5863 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5864
5865
5866 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5867
5868
5869 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5870 mss_l4len_idx |= tx_ring->reg_idx << 4;
5871
5872 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5873 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5874 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5875
5876
5877
5878
5879 if (tx_ring->launchtime_enable) {
5880 ts = ktime_to_timespec64(first->skb->tstamp);
5881 skb_txtime_consumed(first->skb);
5882 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5883 } else {
5884 context_desc->seqnum_seed = 0;
5885 }
5886}
5887
5888static int igb_tso(struct igb_ring *tx_ring,
5889 struct igb_tx_buffer *first,
5890 u8 *hdr_len)
5891{
5892 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5893 struct sk_buff *skb = first->skb;
5894 union {
5895 struct iphdr *v4;
5896 struct ipv6hdr *v6;
5897 unsigned char *hdr;
5898 } ip;
5899 union {
5900 struct tcphdr *tcp;
5901 struct udphdr *udp;
5902 unsigned char *hdr;
5903 } l4;
5904 u32 paylen, l4_offset;
5905 int err;
5906
5907 if (skb->ip_summed != CHECKSUM_PARTIAL)
5908 return 0;
5909
5910 if (!skb_is_gso(skb))
5911 return 0;
5912
5913 err = skb_cow_head(skb, 0);
5914 if (err < 0)
5915 return err;
5916
5917 ip.hdr = skb_network_header(skb);
5918 l4.hdr = skb_checksum_start(skb);
5919
5920
5921 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5922 E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
5923
5924
5925 if (ip.v4->version == 4) {
5926 unsigned char *csum_start = skb_checksum_start(skb);
5927 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5928
5929
5930
5931
5932 ip.v4->check = csum_fold(csum_partial(trans_start,
5933 csum_start - trans_start,
5934 0));
5935 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5936
5937 ip.v4->tot_len = 0;
5938 first->tx_flags |= IGB_TX_FLAGS_TSO |
5939 IGB_TX_FLAGS_CSUM |
5940 IGB_TX_FLAGS_IPV4;
5941 } else {
5942 ip.v6->payload_len = 0;
5943 first->tx_flags |= IGB_TX_FLAGS_TSO |
5944 IGB_TX_FLAGS_CSUM;
5945 }
5946
5947
5948 l4_offset = l4.hdr - skb->data;
5949
5950
5951 paylen = skb->len - l4_offset;
5952 if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
5953
5954 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5955 csum_replace_by_diff(&l4.tcp->check,
5956 (__force __wsum)htonl(paylen));
5957 } else {
5958
5959 *hdr_len = sizeof(*l4.udp) + l4_offset;
5960 csum_replace_by_diff(&l4.udp->check,
5961 (__force __wsum)htonl(paylen));
5962 }
5963
5964
5965 first->gso_segs = skb_shinfo(skb)->gso_segs;
5966 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5967
5968
5969 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5970 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5971
5972
5973 vlan_macip_lens = l4.hdr - ip.hdr;
5974 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5975 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5976
5977 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5978 type_tucmd, mss_l4len_idx);
5979
5980 return 1;
5981}
5982
5983static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5984{
5985 struct sk_buff *skb = first->skb;
5986 u32 vlan_macip_lens = 0;
5987 u32 type_tucmd = 0;
5988
5989 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5990csum_failed:
5991 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5992 !tx_ring->launchtime_enable)
5993 return;
5994 goto no_csum;
5995 }
5996
5997 switch (skb->csum_offset) {
5998 case offsetof(struct tcphdr, check):
5999 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
6000 fallthrough;
6001 case offsetof(struct udphdr, check):
6002 break;
6003 case offsetof(struct sctphdr, checksum):
6004
6005 if (skb_csum_is_sctp(skb)) {
6006 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
6007 break;
6008 }
6009 fallthrough;
6010 default:
6011 skb_checksum_help(skb);
6012 goto csum_failed;
6013 }
6014
6015
6016 first->tx_flags |= IGB_TX_FLAGS_CSUM;
6017 vlan_macip_lens = skb_checksum_start_offset(skb) -
6018 skb_network_offset(skb);
6019no_csum:
6020 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
6021 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
6022
6023 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
6024}
6025
6026#define IGB_SET_FLAG(_input, _flag, _result) \
6027 ((_flag <= _result) ? \
6028 ((u32)(_input & _flag) * (_result / _flag)) : \
6029 ((u32)(_input & _flag) / (_flag / _result)))
6030
6031static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6032{
6033
6034 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
6035 E1000_ADVTXD_DCMD_DEXT |
6036 E1000_ADVTXD_DCMD_IFCS;
6037
6038
6039 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
6040 (E1000_ADVTXD_DCMD_VLE));
6041
6042
6043 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
6044 (E1000_ADVTXD_DCMD_TSE));
6045
6046
6047 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
6048 (E1000_ADVTXD_MAC_TSTAMP));
6049
6050
6051 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
6052
6053 return cmd_type;
6054}
6055
6056static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
6057 union e1000_adv_tx_desc *tx_desc,
6058 u32 tx_flags, unsigned int paylen)
6059{
6060 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
6061
6062
6063 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6064 olinfo_status |= tx_ring->reg_idx << 4;
6065
6066
6067 olinfo_status |= IGB_SET_FLAG(tx_flags,
6068 IGB_TX_FLAGS_CSUM,
6069 (E1000_TXD_POPTS_TXSM << 8));
6070
6071
6072 olinfo_status |= IGB_SET_FLAG(tx_flags,
6073 IGB_TX_FLAGS_IPV4,
6074 (E1000_TXD_POPTS_IXSM << 8));
6075
6076 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6077}
6078
6079static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6080{
6081 struct net_device *netdev = tx_ring->netdev;
6082
6083 netif_stop_subqueue(netdev, tx_ring->queue_index);
6084
6085
6086
6087
6088
6089 smp_mb();
6090
6091
6092
6093
6094 if (igb_desc_unused(tx_ring) < size)
6095 return -EBUSY;
6096
6097
6098 netif_wake_subqueue(netdev, tx_ring->queue_index);
6099
6100 u64_stats_update_begin(&tx_ring->tx_syncp2);
6101 tx_ring->tx_stats.restart_queue2++;
6102 u64_stats_update_end(&tx_ring->tx_syncp2);
6103
6104 return 0;
6105}
6106
6107static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6108{
6109 if (igb_desc_unused(tx_ring) >= size)
6110 return 0;
6111 return __igb_maybe_stop_tx(tx_ring, size);
6112}
6113
6114static int igb_tx_map(struct igb_ring *tx_ring,
6115 struct igb_tx_buffer *first,
6116 const u8 hdr_len)
6117{
6118 struct sk_buff *skb = first->skb;
6119 struct igb_tx_buffer *tx_buffer;
6120 union e1000_adv_tx_desc *tx_desc;
6121 skb_frag_t *frag;
6122 dma_addr_t dma;
6123 unsigned int data_len, size;
6124 u32 tx_flags = first->tx_flags;
6125 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
6126 u16 i = tx_ring->next_to_use;
6127
6128 tx_desc = IGB_TX_DESC(tx_ring, i);
6129
6130 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
6131
6132 size = skb_headlen(skb);
6133 data_len = skb->data_len;
6134
6135 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6136
6137 tx_buffer = first;
6138
6139 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6140 if (dma_mapping_error(tx_ring->dev, dma))
6141 goto dma_error;
6142
6143
6144 dma_unmap_len_set(tx_buffer, len, size);
6145 dma_unmap_addr_set(tx_buffer, dma, dma);
6146
6147 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6148
6149 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
6150 tx_desc->read.cmd_type_len =
6151 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
6152
6153 i++;
6154 tx_desc++;
6155 if (i == tx_ring->count) {
6156 tx_desc = IGB_TX_DESC(tx_ring, 0);
6157 i = 0;
6158 }
6159 tx_desc->read.olinfo_status = 0;
6160
6161 dma += IGB_MAX_DATA_PER_TXD;
6162 size -= IGB_MAX_DATA_PER_TXD;
6163
6164 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6165 }
6166
6167 if (likely(!data_len))
6168 break;
6169
6170 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6171
6172 i++;
6173 tx_desc++;
6174 if (i == tx_ring->count) {
6175 tx_desc = IGB_TX_DESC(tx_ring, 0);
6176 i = 0;
6177 }
6178 tx_desc->read.olinfo_status = 0;
6179
6180 size = skb_frag_size(frag);
6181 data_len -= size;
6182
6183 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6184 size, DMA_TO_DEVICE);
6185
6186 tx_buffer = &tx_ring->tx_buffer_info[i];
6187 }
6188
6189
6190 cmd_type |= size | IGB_TXD_DCMD;
6191 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6192
6193 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6194
6195
6196 first->time_stamp = jiffies;
6197
6198 skb_tx_timestamp(skb);
6199
6200
6201
6202
6203
6204
6205
6206
6207 dma_wmb();
6208
6209
6210 first->next_to_watch = tx_desc;
6211
6212 i++;
6213 if (i == tx_ring->count)
6214 i = 0;
6215
6216 tx_ring->next_to_use = i;
6217
6218
6219 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6220
6221 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6222 writel(i, tx_ring->tail);
6223 }
6224 return 0;
6225
6226dma_error:
6227 dev_err(tx_ring->dev, "TX DMA map failed\n");
6228 tx_buffer = &tx_ring->tx_buffer_info[i];
6229
6230
6231 while (tx_buffer != first) {
6232 if (dma_unmap_len(tx_buffer, len))
6233 dma_unmap_page(tx_ring->dev,
6234 dma_unmap_addr(tx_buffer, dma),
6235 dma_unmap_len(tx_buffer, len),
6236 DMA_TO_DEVICE);
6237 dma_unmap_len_set(tx_buffer, len, 0);
6238
6239 if (i-- == 0)
6240 i += tx_ring->count;
6241 tx_buffer = &tx_ring->tx_buffer_info[i];
6242 }
6243
6244 if (dma_unmap_len(tx_buffer, len))
6245 dma_unmap_single(tx_ring->dev,
6246 dma_unmap_addr(tx_buffer, dma),
6247 dma_unmap_len(tx_buffer, len),
6248 DMA_TO_DEVICE);
6249 dma_unmap_len_set(tx_buffer, len, 0);
6250
6251 dev_kfree_skb_any(tx_buffer->skb);
6252 tx_buffer->skb = NULL;
6253
6254 tx_ring->next_to_use = i;
6255
6256 return -1;
6257}
6258
6259int igb_xmit_xdp_ring(struct igb_adapter *adapter,
6260 struct igb_ring *tx_ring,
6261 struct xdp_frame *xdpf)
6262{
6263 union e1000_adv_tx_desc *tx_desc;
6264 u32 len, cmd_type, olinfo_status;
6265 struct igb_tx_buffer *tx_buffer;
6266 dma_addr_t dma;
6267 u16 i;
6268
6269 len = xdpf->len;
6270
6271 if (unlikely(!igb_desc_unused(tx_ring)))
6272 return IGB_XDP_CONSUMED;
6273
6274 dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
6275 if (dma_mapping_error(tx_ring->dev, dma))
6276 return IGB_XDP_CONSUMED;
6277
6278
6279 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6280 tx_buffer->bytecount = len;
6281 tx_buffer->gso_segs = 1;
6282 tx_buffer->protocol = 0;
6283
6284 i = tx_ring->next_to_use;
6285 tx_desc = IGB_TX_DESC(tx_ring, i);
6286
6287 dma_unmap_len_set(tx_buffer, len, len);
6288 dma_unmap_addr_set(tx_buffer, dma, dma);
6289 tx_buffer->type = IGB_TYPE_XDP;
6290 tx_buffer->xdpf = xdpf;
6291
6292 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6293
6294
6295 cmd_type = E1000_ADVTXD_DTYP_DATA |
6296 E1000_ADVTXD_DCMD_DEXT |
6297 E1000_ADVTXD_DCMD_IFCS;
6298 cmd_type |= len | IGB_TXD_DCMD;
6299 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6300
6301 olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
6302
6303 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6304 olinfo_status |= tx_ring->reg_idx << 4;
6305
6306 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6307
6308 netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
6309
6310
6311 tx_buffer->time_stamp = jiffies;
6312
6313
6314 smp_wmb();
6315
6316
6317 i++;
6318 if (i == tx_ring->count)
6319 i = 0;
6320
6321 tx_buffer->next_to_watch = tx_desc;
6322 tx_ring->next_to_use = i;
6323
6324
6325 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6326
6327 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6328 writel(i, tx_ring->tail);
6329
6330 return IGB_XDP_TX;
6331}
6332
6333netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6334 struct igb_ring *tx_ring)
6335{
6336 struct igb_tx_buffer *first;
6337 int tso;
6338 u32 tx_flags = 0;
6339 unsigned short f;
6340 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6341 __be16 protocol = vlan_get_protocol(skb);
6342 u8 hdr_len = 0;
6343
6344
6345
6346
6347
6348
6349
6350 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6351 count += TXD_USE_COUNT(skb_frag_size(
6352 &skb_shinfo(skb)->frags[f]));
6353
6354 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6355
6356 return NETDEV_TX_BUSY;
6357 }
6358
6359
6360 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6361 first->type = IGB_TYPE_SKB;
6362 first->skb = skb;
6363 first->bytecount = skb->len;
6364 first->gso_segs = 1;
6365
6366 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6367 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6368
6369 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6370 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6371 &adapter->state)) {
6372 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6373 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6374
6375 adapter->ptp_tx_skb = skb_get(skb);
6376 adapter->ptp_tx_start = jiffies;
6377 if (adapter->hw.mac.type == e1000_82576)
6378 schedule_work(&adapter->ptp_tx_work);
6379 } else {
6380 adapter->tx_hwtstamp_skipped++;
6381 }
6382 }
6383
6384 if (skb_vlan_tag_present(skb)) {
6385 tx_flags |= IGB_TX_FLAGS_VLAN;
6386 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6387 }
6388
6389
6390 first->tx_flags = tx_flags;
6391 first->protocol = protocol;
6392
6393 tso = igb_tso(tx_ring, first, &hdr_len);
6394 if (tso < 0)
6395 goto out_drop;
6396 else if (!tso)
6397 igb_tx_csum(tx_ring, first);
6398
6399 if (igb_tx_map(tx_ring, first, hdr_len))
6400 goto cleanup_tx_tstamp;
6401
6402 return NETDEV_TX_OK;
6403
6404out_drop:
6405 dev_kfree_skb_any(first->skb);
6406 first->skb = NULL;
6407cleanup_tx_tstamp:
6408 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6409 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6410
6411 dev_kfree_skb_any(adapter->ptp_tx_skb);
6412 adapter->ptp_tx_skb = NULL;
6413 if (adapter->hw.mac.type == e1000_82576)
6414 cancel_work_sync(&adapter->ptp_tx_work);
6415 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6416 }
6417
6418 return NETDEV_TX_OK;
6419}
6420
6421static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6422 struct sk_buff *skb)
6423{
6424 unsigned int r_idx = skb->queue_mapping;
6425
6426 if (r_idx >= adapter->num_tx_queues)
6427 r_idx = r_idx % adapter->num_tx_queues;
6428
6429 return adapter->tx_ring[r_idx];
6430}
6431
6432static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6433 struct net_device *netdev)
6434{
6435 struct igb_adapter *adapter = netdev_priv(netdev);
6436
6437
6438
6439
6440 if (skb_put_padto(skb, 17))
6441 return NETDEV_TX_OK;
6442
6443 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6444}
6445
6446
6447
6448
6449
6450
6451static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6452{
6453 struct igb_adapter *adapter = netdev_priv(netdev);
6454 struct e1000_hw *hw = &adapter->hw;
6455
6456
6457 adapter->tx_timeout_count++;
6458
6459 if (hw->mac.type >= e1000_82580)
6460 hw->dev_spec._82575.global_device_reset = true;
6461
6462 schedule_work(&adapter->reset_task);
6463 wr32(E1000_EICS,
6464 (adapter->eims_enable_mask & ~adapter->eims_other));
6465}
6466
6467static void igb_reset_task(struct work_struct *work)
6468{
6469 struct igb_adapter *adapter;
6470 adapter = container_of(work, struct igb_adapter, reset_task);
6471
6472 rtnl_lock();
6473
6474 if (test_bit(__IGB_DOWN, &adapter->state) ||
6475 test_bit(__IGB_RESETTING, &adapter->state)) {
6476 rtnl_unlock();
6477 return;
6478 }
6479
6480 igb_dump(adapter);
6481 netdev_err(adapter->netdev, "Reset adapter\n");
6482 igb_reinit_locked(adapter);
6483 rtnl_unlock();
6484}
6485
6486
6487
6488
6489
6490
6491static void igb_get_stats64(struct net_device *netdev,
6492 struct rtnl_link_stats64 *stats)
6493{
6494 struct igb_adapter *adapter = netdev_priv(netdev);
6495
6496 spin_lock(&adapter->stats64_lock);
6497 igb_update_stats(adapter);
6498 memcpy(stats, &adapter->stats64, sizeof(*stats));
6499 spin_unlock(&adapter->stats64_lock);
6500}
6501
6502
6503
6504
6505
6506
6507
6508
6509static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6510{
6511 struct igb_adapter *adapter = netdev_priv(netdev);
6512 int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
6513
6514 if (adapter->xdp_prog) {
6515 int i;
6516
6517 for (i = 0; i < adapter->num_rx_queues; i++) {
6518 struct igb_ring *ring = adapter->rx_ring[i];
6519
6520 if (max_frame > igb_rx_bufsz(ring)) {
6521 netdev_warn(adapter->netdev,
6522 "Requested MTU size is not supported with XDP. Max frame size is %d\n",
6523 max_frame);
6524 return -EINVAL;
6525 }
6526 }
6527 }
6528
6529
6530 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6531 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6532
6533 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6534 usleep_range(1000, 2000);
6535
6536
6537 adapter->max_frame_size = max_frame;
6538
6539 if (netif_running(netdev))
6540 igb_down(adapter);
6541
6542 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6543 netdev->mtu, new_mtu);
6544 netdev->mtu = new_mtu;
6545
6546 if (netif_running(netdev))
6547 igb_up(adapter);
6548 else
6549 igb_reset(adapter);
6550
6551 clear_bit(__IGB_RESETTING, &adapter->state);
6552
6553 return 0;
6554}
6555
6556
6557
6558
6559
6560void igb_update_stats(struct igb_adapter *adapter)
6561{
6562 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6563 struct e1000_hw *hw = &adapter->hw;
6564 struct pci_dev *pdev = adapter->pdev;
6565 u32 reg, mpc;
6566 int i;
6567 u64 bytes, packets;
6568 unsigned int start;
6569 u64 _bytes, _packets;
6570
6571
6572
6573
6574 if (adapter->link_speed == 0)
6575 return;
6576 if (pci_channel_offline(pdev))
6577 return;
6578
6579 bytes = 0;
6580 packets = 0;
6581
6582 rcu_read_lock();
6583 for (i = 0; i < adapter->num_rx_queues; i++) {
6584 struct igb_ring *ring = adapter->rx_ring[i];
6585 u32 rqdpc = rd32(E1000_RQDPC(i));
6586 if (hw->mac.type >= e1000_i210)
6587 wr32(E1000_RQDPC(i), 0);
6588
6589 if (rqdpc) {
6590 ring->rx_stats.drops += rqdpc;
6591 net_stats->rx_fifo_errors += rqdpc;
6592 }
6593
6594 do {
6595 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6596 _bytes = ring->rx_stats.bytes;
6597 _packets = ring->rx_stats.packets;
6598 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6599 bytes += _bytes;
6600 packets += _packets;
6601 }
6602
6603 net_stats->rx_bytes = bytes;
6604 net_stats->rx_packets = packets;
6605
6606 bytes = 0;
6607 packets = 0;
6608 for (i = 0; i < adapter->num_tx_queues; i++) {
6609 struct igb_ring *ring = adapter->tx_ring[i];
6610 do {
6611 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6612 _bytes = ring->tx_stats.bytes;
6613 _packets = ring->tx_stats.packets;
6614 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6615 bytes += _bytes;
6616 packets += _packets;
6617 }
6618 net_stats->tx_bytes = bytes;
6619 net_stats->tx_packets = packets;
6620 rcu_read_unlock();
6621
6622
6623 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6624 adapter->stats.gprc += rd32(E1000_GPRC);
6625 adapter->stats.gorc += rd32(E1000_GORCL);
6626 rd32(E1000_GORCH);
6627 adapter->stats.bprc += rd32(E1000_BPRC);
6628 adapter->stats.mprc += rd32(E1000_MPRC);
6629 adapter->stats.roc += rd32(E1000_ROC);
6630
6631 adapter->stats.prc64 += rd32(E1000_PRC64);
6632 adapter->stats.prc127 += rd32(E1000_PRC127);
6633 adapter->stats.prc255 += rd32(E1000_PRC255);
6634 adapter->stats.prc511 += rd32(E1000_PRC511);
6635 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6636 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6637 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6638 adapter->stats.sec += rd32(E1000_SEC);
6639
6640 mpc = rd32(E1000_MPC);
6641 adapter->stats.mpc += mpc;
6642 net_stats->rx_fifo_errors += mpc;
6643 adapter->stats.scc += rd32(E1000_SCC);
6644 adapter->stats.ecol += rd32(E1000_ECOL);
6645 adapter->stats.mcc += rd32(E1000_MCC);
6646 adapter->stats.latecol += rd32(E1000_LATECOL);
6647 adapter->stats.dc += rd32(E1000_DC);
6648 adapter->stats.rlec += rd32(E1000_RLEC);
6649 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6650 adapter->stats.xontxc += rd32(E1000_XONTXC);
6651 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6652 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6653 adapter->stats.fcruc += rd32(E1000_FCRUC);
6654 adapter->stats.gptc += rd32(E1000_GPTC);
6655 adapter->stats.gotc += rd32(E1000_GOTCL);
6656 rd32(E1000_GOTCH);
6657 adapter->stats.rnbc += rd32(E1000_RNBC);
6658 adapter->stats.ruc += rd32(E1000_RUC);
6659 adapter->stats.rfc += rd32(E1000_RFC);
6660 adapter->stats.rjc += rd32(E1000_RJC);
6661 adapter->stats.tor += rd32(E1000_TORH);
6662 adapter->stats.tot += rd32(E1000_TOTH);
6663 adapter->stats.tpr += rd32(E1000_TPR);
6664
6665 adapter->stats.ptc64 += rd32(E1000_PTC64);
6666 adapter->stats.ptc127 += rd32(E1000_PTC127);
6667 adapter->stats.ptc255 += rd32(E1000_PTC255);
6668 adapter->stats.ptc511 += rd32(E1000_PTC511);
6669 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6670 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6671
6672 adapter->stats.mptc += rd32(E1000_MPTC);
6673 adapter->stats.bptc += rd32(E1000_BPTC);
6674
6675 adapter->stats.tpt += rd32(E1000_TPT);
6676 adapter->stats.colc += rd32(E1000_COLC);
6677
6678 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6679
6680 reg = rd32(E1000_CTRL_EXT);
6681 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6682 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6683
6684
6685 if ((hw->mac.type != e1000_i210) &&
6686 (hw->mac.type != e1000_i211))
6687 adapter->stats.tncrs += rd32(E1000_TNCRS);
6688 }
6689
6690 adapter->stats.tsctc += rd32(E1000_TSCTC);
6691 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6692
6693 adapter->stats.iac += rd32(E1000_IAC);
6694 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6695 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6696 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6697 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6698 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6699 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6700 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6701 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6702
6703
6704 net_stats->multicast = adapter->stats.mprc;
6705 net_stats->collisions = adapter->stats.colc;
6706
6707
6708
6709
6710
6711
6712 net_stats->rx_errors = adapter->stats.rxerrc +
6713 adapter->stats.crcerrs + adapter->stats.algnerrc +
6714 adapter->stats.ruc + adapter->stats.roc +
6715 adapter->stats.cexterr;
6716 net_stats->rx_length_errors = adapter->stats.ruc +
6717 adapter->stats.roc;
6718 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6719 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6720 net_stats->rx_missed_errors = adapter->stats.mpc;
6721
6722
6723 net_stats->tx_errors = adapter->stats.ecol +
6724 adapter->stats.latecol;
6725 net_stats->tx_aborted_errors = adapter->stats.ecol;
6726 net_stats->tx_window_errors = adapter->stats.latecol;
6727 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6728
6729
6730
6731
6732 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6733 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6734 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6735
6736
6737 reg = rd32(E1000_MANC);
6738 if (reg & E1000_MANC_EN_BMC2OS) {
6739 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6740 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6741 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6742 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6743 }
6744}
6745
6746static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
6747{
6748 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
6749 struct e1000_hw *hw = &adapter->hw;
6750 struct timespec64 ts;
6751 u32 tsauxc;
6752
6753 if (pin < 0 || pin >= IGB_N_PEROUT)
6754 return;
6755
6756 spin_lock(&adapter->tmreg_lock);
6757
6758 if (hw->mac.type == e1000_82580 ||
6759 hw->mac.type == e1000_i354 ||
6760 hw->mac.type == e1000_i350) {
6761 s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
6762 u32 systiml, systimh, level_mask, level, rem;
6763 u64 systim, now;
6764
6765
6766 rd32(E1000_SYSTIMR);
6767 systiml = rd32(E1000_SYSTIML);
6768 systimh = rd32(E1000_SYSTIMH);
6769 systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
6770 now = timecounter_cyc2time(&adapter->tc, systim);
6771
6772 if (pin < 2) {
6773 level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000;
6774 level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
6775 } else {
6776 level_mask = (tsintr_tt == 1) ? 0x80 : 0x40;
6777 level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
6778 }
6779
6780 div_u64_rem(now, ns, &rem);
6781 systim = systim + (ns - rem);
6782
6783
6784 div_u64_rem(now, ns << 1, &rem);
6785 if (rem < ns) {
6786
6787 if (level == 0) {
6788
6789 systim += ns;
6790 pr_notice("igb: periodic output on %s missed falling edge\n",
6791 adapter->sdp_config[pin].name);
6792 }
6793 } else {
6794
6795 if (level == 1) {
6796
6797 systim += ns;
6798 pr_notice("igb: periodic output on %s missed rising edge\n",
6799 adapter->sdp_config[pin].name);
6800 }
6801 }
6802
6803
6804
6805
6806 ts.tv_nsec = (u32)systim;
6807 ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
6808 } else {
6809 ts = timespec64_add(adapter->perout[pin].start,
6810 adapter->perout[pin].period);
6811 }
6812
6813
6814 wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
6815 wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
6816 tsauxc = rd32(E1000_TSAUXC);
6817 tsauxc |= TSAUXC_EN_TT0;
6818 wr32(E1000_TSAUXC, tsauxc);
6819 adapter->perout[pin].start = ts;
6820
6821 spin_unlock(&adapter->tmreg_lock);
6822}
6823
6824static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
6825{
6826 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
6827 int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0;
6828 int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0;
6829 struct e1000_hw *hw = &adapter->hw;
6830 struct ptp_clock_event event;
6831 struct timespec64 ts;
6832
6833 if (pin < 0 || pin >= IGB_N_EXTTS)
6834 return;
6835
6836 if (hw->mac.type == e1000_82580 ||
6837 hw->mac.type == e1000_i354 ||
6838 hw->mac.type == e1000_i350) {
6839 s64 ns = rd32(auxstmpl);
6840
6841 ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
6842 ts = ns_to_timespec64(ns);
6843 } else {
6844 ts.tv_nsec = rd32(auxstmpl);
6845 ts.tv_sec = rd32(auxstmph);
6846 }
6847
6848 event.type = PTP_CLOCK_EXTTS;
6849 event.index = tsintr_tt;
6850 event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
6851 ptp_clock_event(adapter->ptp_clock, &event);
6852}
6853
6854static void igb_tsync_interrupt(struct igb_adapter *adapter)
6855{
6856 struct e1000_hw *hw = &adapter->hw;
6857 u32 ack = 0, tsicr = rd32(E1000_TSICR);
6858 struct ptp_clock_event event;
6859
6860 if (tsicr & TSINTR_SYS_WRAP) {
6861 event.type = PTP_CLOCK_PPS;
6862 if (adapter->ptp_caps.pps)
6863 ptp_clock_event(adapter->ptp_clock, &event);
6864 ack |= TSINTR_SYS_WRAP;
6865 }
6866
6867 if (tsicr & E1000_TSICR_TXTS) {
6868
6869 schedule_work(&adapter->ptp_tx_work);
6870 ack |= E1000_TSICR_TXTS;
6871 }
6872
6873 if (tsicr & TSINTR_TT0) {
6874 igb_perout(adapter, 0);
6875 ack |= TSINTR_TT0;
6876 }
6877
6878 if (tsicr & TSINTR_TT1) {
6879 igb_perout(adapter, 1);
6880 ack |= TSINTR_TT1;
6881 }
6882
6883 if (tsicr & TSINTR_AUTT0) {
6884 igb_extts(adapter, 0);
6885 ack |= TSINTR_AUTT0;
6886 }
6887
6888 if (tsicr & TSINTR_AUTT1) {
6889 igb_extts(adapter, 1);
6890 ack |= TSINTR_AUTT1;
6891 }
6892
6893
6894 wr32(E1000_TSICR, ack);
6895}
6896
6897static irqreturn_t igb_msix_other(int irq, void *data)
6898{
6899 struct igb_adapter *adapter = data;
6900 struct e1000_hw *hw = &adapter->hw;
6901 u32 icr = rd32(E1000_ICR);
6902
6903
6904 if (icr & E1000_ICR_DRSTA)
6905 schedule_work(&adapter->reset_task);
6906
6907 if (icr & E1000_ICR_DOUTSYNC) {
6908
6909 adapter->stats.doosync++;
6910
6911
6912
6913
6914 igb_check_wvbr(adapter);
6915 }
6916
6917
6918 if (icr & E1000_ICR_VMMB)
6919 igb_msg_task(adapter);
6920
6921 if (icr & E1000_ICR_LSC) {
6922 hw->mac.get_link_status = 1;
6923
6924 if (!test_bit(__IGB_DOWN, &adapter->state))
6925 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6926 }
6927
6928 if (icr & E1000_ICR_TS)
6929 igb_tsync_interrupt(adapter);
6930
6931 wr32(E1000_EIMS, adapter->eims_other);
6932
6933 return IRQ_HANDLED;
6934}
6935
6936static void igb_write_itr(struct igb_q_vector *q_vector)
6937{
6938 struct igb_adapter *adapter = q_vector->adapter;
6939 u32 itr_val = q_vector->itr_val & 0x7FFC;
6940
6941 if (!q_vector->set_itr)
6942 return;
6943
6944 if (!itr_val)
6945 itr_val = 0x4;
6946
6947 if (adapter->hw.mac.type == e1000_82575)
6948 itr_val |= itr_val << 16;
6949 else
6950 itr_val |= E1000_EITR_CNT_IGNR;
6951
6952 writel(itr_val, q_vector->itr_register);
6953 q_vector->set_itr = 0;
6954}
6955
6956static irqreturn_t igb_msix_ring(int irq, void *data)
6957{
6958 struct igb_q_vector *q_vector = data;
6959
6960
6961 igb_write_itr(q_vector);
6962
6963 napi_schedule(&q_vector->napi);
6964
6965 return IRQ_HANDLED;
6966}
6967
6968#ifdef CONFIG_IGB_DCA
6969static void igb_update_tx_dca(struct igb_adapter *adapter,
6970 struct igb_ring *tx_ring,
6971 int cpu)
6972{
6973 struct e1000_hw *hw = &adapter->hw;
6974 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6975
6976 if (hw->mac.type != e1000_82575)
6977 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6978
6979
6980
6981
6982
6983 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6984 E1000_DCA_TXCTRL_DATA_RRO_EN |
6985 E1000_DCA_TXCTRL_DESC_DCA_EN;
6986
6987 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6988}
6989
6990static void igb_update_rx_dca(struct igb_adapter *adapter,
6991 struct igb_ring *rx_ring,
6992 int cpu)
6993{
6994 struct e1000_hw *hw = &adapter->hw;
6995 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6996
6997 if (hw->mac.type != e1000_82575)
6998 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6999
7000
7001
7002
7003
7004 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
7005 E1000_DCA_RXCTRL_DESC_DCA_EN;
7006
7007 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
7008}
7009
7010static void igb_update_dca(struct igb_q_vector *q_vector)
7011{
7012 struct igb_adapter *adapter = q_vector->adapter;
7013 int cpu = get_cpu();
7014
7015 if (q_vector->cpu == cpu)
7016 goto out_no_update;
7017
7018 if (q_vector->tx.ring)
7019 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
7020
7021 if (q_vector->rx.ring)
7022 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
7023
7024 q_vector->cpu = cpu;
7025out_no_update:
7026 put_cpu();
7027}
7028
7029static void igb_setup_dca(struct igb_adapter *adapter)
7030{
7031 struct e1000_hw *hw = &adapter->hw;
7032 int i;
7033
7034 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
7035 return;
7036
7037
7038 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
7039
7040 for (i = 0; i < adapter->num_q_vectors; i++) {
7041 adapter->q_vector[i]->cpu = -1;
7042 igb_update_dca(adapter->q_vector[i]);
7043 }
7044}
7045
7046static int __igb_notify_dca(struct device *dev, void *data)
7047{
7048 struct net_device *netdev = dev_get_drvdata(dev);
7049 struct igb_adapter *adapter = netdev_priv(netdev);
7050 struct pci_dev *pdev = adapter->pdev;
7051 struct e1000_hw *hw = &adapter->hw;
7052 unsigned long event = *(unsigned long *)data;
7053
7054 switch (event) {
7055 case DCA_PROVIDER_ADD:
7056
7057 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
7058 break;
7059 if (dca_add_requester(dev) == 0) {
7060 adapter->flags |= IGB_FLAG_DCA_ENABLED;
7061 dev_info(&pdev->dev, "DCA enabled\n");
7062 igb_setup_dca(adapter);
7063 break;
7064 }
7065 fallthrough;
7066 case DCA_PROVIDER_REMOVE:
7067 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
7068
7069
7070
7071 dca_remove_requester(dev);
7072 dev_info(&pdev->dev, "DCA disabled\n");
7073 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
7074 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
7075 }
7076 break;
7077 }
7078
7079 return 0;
7080}
7081
7082static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
7083 void *p)
7084{
7085 int ret_val;
7086
7087 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
7088 __igb_notify_dca);
7089
7090 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7091}
7092#endif
7093
7094#ifdef CONFIG_PCI_IOV
7095static int igb_vf_configure(struct igb_adapter *adapter, int vf)
7096{
7097 unsigned char mac_addr[ETH_ALEN];
7098
7099 eth_zero_addr(mac_addr);
7100 igb_set_vf_mac(adapter, vf, mac_addr);
7101
7102
7103 adapter->vf_data[vf].spoofchk_enabled = true;
7104
7105
7106 adapter->vf_data[vf].trusted = false;
7107
7108 return 0;
7109}
7110
7111#endif
7112static void igb_ping_all_vfs(struct igb_adapter *adapter)
7113{
7114 struct e1000_hw *hw = &adapter->hw;
7115 u32 ping;
7116 int i;
7117
7118 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
7119 ping = E1000_PF_CONTROL_MSG;
7120 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
7121 ping |= E1000_VT_MSGTYPE_CTS;
7122 igb_write_mbx(hw, &ping, 1, i);
7123 }
7124}
7125
7126static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7127{
7128 struct e1000_hw *hw = &adapter->hw;
7129 u32 vmolr = rd32(E1000_VMOLR(vf));
7130 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7131
7132 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7133 IGB_VF_FLAG_MULTI_PROMISC);
7134 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7135
7136 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
7137 vmolr |= E1000_VMOLR_MPME;
7138 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7139 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
7140 } else {
7141
7142
7143
7144
7145 if (vf_data->num_vf_mc_hashes > 30) {
7146 vmolr |= E1000_VMOLR_MPME;
7147 } else if (vf_data->num_vf_mc_hashes) {
7148 int j;
7149
7150 vmolr |= E1000_VMOLR_ROMPE;
7151 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7152 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7153 }
7154 }
7155
7156 wr32(E1000_VMOLR(vf), vmolr);
7157
7158
7159 if (*msgbuf & E1000_VT_MSGINFO_MASK)
7160 return -EINVAL;
7161
7162 return 0;
7163}
7164
7165static int igb_set_vf_multicasts(struct igb_adapter *adapter,
7166 u32 *msgbuf, u32 vf)
7167{
7168 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7169 u16 *hash_list = (u16 *)&msgbuf[1];
7170 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7171 int i;
7172
7173
7174
7175
7176
7177 vf_data->num_vf_mc_hashes = n;
7178
7179
7180 if (n > 30)
7181 n = 30;
7182
7183
7184 for (i = 0; i < n; i++)
7185 vf_data->vf_mc_hashes[i] = hash_list[i];
7186
7187
7188 igb_set_rx_mode(adapter->netdev);
7189
7190 return 0;
7191}
7192
7193static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
7194{
7195 struct e1000_hw *hw = &adapter->hw;
7196 struct vf_data_storage *vf_data;
7197 int i, j;
7198
7199 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7200 u32 vmolr = rd32(E1000_VMOLR(i));
7201
7202 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7203
7204 vf_data = &adapter->vf_data[i];
7205
7206 if ((vf_data->num_vf_mc_hashes > 30) ||
7207 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
7208 vmolr |= E1000_VMOLR_MPME;
7209 } else if (vf_data->num_vf_mc_hashes) {
7210 vmolr |= E1000_VMOLR_ROMPE;
7211 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7212 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7213 }
7214 wr32(E1000_VMOLR(i), vmolr);
7215 }
7216}
7217
7218static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
7219{
7220 struct e1000_hw *hw = &adapter->hw;
7221 u32 pool_mask, vlvf_mask, i;
7222
7223
7224 pool_mask = E1000_VLVF_POOLSEL_MASK;
7225 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
7226
7227
7228 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
7229 adapter->vfs_allocated_count);
7230
7231
7232 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
7233 u32 vlvf = rd32(E1000_VLVF(i));
7234 u32 vfta_mask, vid, vfta;
7235
7236
7237 if (!(vlvf & vlvf_mask))
7238 continue;
7239
7240
7241 vlvf ^= vlvf_mask;
7242
7243
7244 if (vlvf & pool_mask)
7245 goto update_vlvfb;
7246
7247
7248 if (vlvf & E1000_VLVF_POOLSEL_MASK)
7249 goto update_vlvf;
7250
7251 vid = vlvf & E1000_VLVF_VLANID_MASK;
7252 vfta_mask = BIT(vid % 32);
7253
7254
7255 vfta = adapter->shadow_vfta[vid / 32];
7256 if (vfta & vfta_mask)
7257 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
7258update_vlvf:
7259
7260 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7261 vlvf &= E1000_VLVF_POOLSEL_MASK;
7262 else
7263 vlvf = 0;
7264update_vlvfb:
7265
7266 wr32(E1000_VLVF(i), vlvf);
7267 }
7268}
7269
7270static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
7271{
7272 u32 vlvf;
7273 int idx;
7274
7275
7276 if (vlan == 0)
7277 return 0;
7278
7279
7280 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
7281 vlvf = rd32(E1000_VLVF(idx));
7282 if ((vlvf & VLAN_VID_MASK) == vlan)
7283 break;
7284 }
7285
7286 return idx;
7287}
7288
7289static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
7290{
7291 struct e1000_hw *hw = &adapter->hw;
7292 u32 bits, pf_id;
7293 int idx;
7294
7295 idx = igb_find_vlvf_entry(hw, vid);
7296 if (!idx)
7297 return;
7298
7299
7300
7301
7302 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
7303 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
7304 bits &= rd32(E1000_VLVF(idx));
7305
7306
7307 if (!bits) {
7308 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7309 wr32(E1000_VLVF(idx), BIT(pf_id));
7310 else
7311 wr32(E1000_VLVF(idx), 0);
7312 }
7313}
7314
7315static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
7316 bool add, u32 vf)
7317{
7318 int pf_id = adapter->vfs_allocated_count;
7319 struct e1000_hw *hw = &adapter->hw;
7320 int err;
7321
7322
7323
7324
7325
7326
7327 if (add && test_bit(vid, adapter->active_vlans)) {
7328 err = igb_vfta_set(hw, vid, pf_id, true, false);
7329 if (err)
7330 return err;
7331 }
7332
7333 err = igb_vfta_set(hw, vid, vf, add, false);
7334
7335 if (add && !err)
7336 return err;
7337
7338
7339
7340
7341
7342 if (test_bit(vid, adapter->active_vlans) ||
7343 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
7344 igb_update_pf_vlvf(adapter, vid);
7345
7346 return err;
7347}
7348
7349static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7350{
7351 struct e1000_hw *hw = &adapter->hw;
7352
7353 if (vid)
7354 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7355 else
7356 wr32(E1000_VMVIR(vf), 0);
7357}
7358
7359static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7360 u16 vlan, u8 qos)
7361{
7362 int err;
7363
7364 err = igb_set_vf_vlan(adapter, vlan, true, vf);
7365 if (err)
7366 return err;
7367
7368 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7369 igb_set_vmolr(adapter, vf, !vlan);
7370
7371
7372 if (vlan != adapter->vf_data[vf].pf_vlan)
7373 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7374 false, vf);
7375
7376 adapter->vf_data[vf].pf_vlan = vlan;
7377 adapter->vf_data[vf].pf_qos = qos;
7378 igb_set_vf_vlan_strip(adapter, vf, true);
7379 dev_info(&adapter->pdev->dev,
7380 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7381 if (test_bit(__IGB_DOWN, &adapter->state)) {
7382 dev_warn(&adapter->pdev->dev,
7383 "The VF VLAN has been set, but the PF device is not up.\n");
7384 dev_warn(&adapter->pdev->dev,
7385 "Bring the PF device up before attempting to use the VF device.\n");
7386 }
7387
7388 return err;
7389}
7390
7391static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7392{
7393
7394 igb_set_vf_vlan(adapter, 0, true, vf);
7395
7396 igb_set_vmvir(adapter, 0, vf);
7397 igb_set_vmolr(adapter, vf, true);
7398
7399
7400 if (adapter->vf_data[vf].pf_vlan)
7401 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7402 false, vf);
7403
7404 adapter->vf_data[vf].pf_vlan = 0;
7405 adapter->vf_data[vf].pf_qos = 0;
7406 igb_set_vf_vlan_strip(adapter, vf, false);
7407
7408 return 0;
7409}
7410
7411static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7412 u16 vlan, u8 qos, __be16 vlan_proto)
7413{
7414 struct igb_adapter *adapter = netdev_priv(netdev);
7415
7416 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7417 return -EINVAL;
7418
7419 if (vlan_proto != htons(ETH_P_8021Q))
7420 return -EPROTONOSUPPORT;
7421
7422 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7423 igb_disable_port_vlan(adapter, vf);
7424}
7425
7426static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7427{
7428 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7429 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7430 int ret;
7431
7432 if (adapter->vf_data[vf].pf_vlan)
7433 return -1;
7434
7435
7436 if (!vid && !add)
7437 return 0;
7438
7439 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7440 if (!ret)
7441 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7442 return ret;
7443}
7444
7445static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7446{
7447 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7448
7449
7450 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7451 vf_data->last_nack = jiffies;
7452
7453
7454 igb_clear_vf_vfta(adapter, vf);
7455 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7456 igb_set_vmvir(adapter, vf_data->pf_vlan |
7457 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7458 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7459 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7460
7461
7462 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7463
7464
7465 igb_set_rx_mode(adapter->netdev);
7466}
7467
7468static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7469{
7470 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7471
7472
7473 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7474 eth_zero_addr(vf_mac);
7475
7476
7477 igb_vf_reset(adapter, vf);
7478}
7479
7480static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7481{
7482 struct e1000_hw *hw = &adapter->hw;
7483 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7484 u32 reg, msgbuf[3];
7485 u8 *addr = (u8 *)(&msgbuf[1]);
7486
7487
7488 igb_vf_reset(adapter, vf);
7489
7490
7491 igb_set_vf_mac(adapter, vf, vf_mac);
7492
7493
7494 reg = rd32(E1000_VFTE);
7495 wr32(E1000_VFTE, reg | BIT(vf));
7496 reg = rd32(E1000_VFRE);
7497 wr32(E1000_VFRE, reg | BIT(vf));
7498
7499 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7500
7501
7502 if (!is_zero_ether_addr(vf_mac)) {
7503 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7504 memcpy(addr, vf_mac, ETH_ALEN);
7505 } else {
7506 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7507 }
7508 igb_write_mbx(hw, msgbuf, 3, vf);
7509}
7510
7511static void igb_flush_mac_table(struct igb_adapter *adapter)
7512{
7513 struct e1000_hw *hw = &adapter->hw;
7514 int i;
7515
7516 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7517 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7518 eth_zero_addr(adapter->mac_table[i].addr);
7519 adapter->mac_table[i].queue = 0;
7520 igb_rar_set_index(adapter, i);
7521 }
7522}
7523
7524static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7525{
7526 struct e1000_hw *hw = &adapter->hw;
7527
7528 int rar_entries = hw->mac.rar_entry_count -
7529 adapter->vfs_allocated_count;
7530 int i, count = 0;
7531
7532 for (i = 0; i < rar_entries; i++) {
7533
7534 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7535 continue;
7536
7537
7538 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7539 (adapter->mac_table[i].queue != queue))
7540 continue;
7541
7542 count++;
7543 }
7544
7545 return count;
7546}
7547
7548
7549static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7550{
7551 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7552
7553 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7554 mac_table->queue = adapter->vfs_allocated_count;
7555 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7556
7557 igb_rar_set_index(adapter, 0);
7558}
7559
7560
7561
7562
7563
7564
7565static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7566 const u8 *addr, const u8 flags)
7567{
7568 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7569 return true;
7570
7571 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7572 (flags & IGB_MAC_STATE_SRC_ADDR))
7573 return false;
7574
7575 if (!ether_addr_equal(addr, entry->addr))
7576 return false;
7577
7578 return true;
7579}
7580
7581
7582
7583
7584
7585
7586static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7587 const u8 *addr, const u8 queue,
7588 const u8 flags)
7589{
7590 struct e1000_hw *hw = &adapter->hw;
7591 int rar_entries = hw->mac.rar_entry_count -
7592 adapter->vfs_allocated_count;
7593 int i;
7594
7595 if (is_zero_ether_addr(addr))
7596 return -EINVAL;
7597
7598
7599
7600
7601
7602 for (i = 0; i < rar_entries; i++) {
7603 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7604 addr, flags))
7605 continue;
7606
7607 ether_addr_copy(adapter->mac_table[i].addr, addr);
7608 adapter->mac_table[i].queue = queue;
7609 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7610
7611 igb_rar_set_index(adapter, i);
7612 return i;
7613 }
7614
7615 return -ENOSPC;
7616}
7617
7618static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7619 const u8 queue)
7620{
7621 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7622}
7623
7624
7625
7626
7627
7628
7629
7630static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7631 const u8 *addr, const u8 queue,
7632 const u8 flags)
7633{
7634 struct e1000_hw *hw = &adapter->hw;
7635 int rar_entries = hw->mac.rar_entry_count -
7636 adapter->vfs_allocated_count;
7637 int i;
7638
7639 if (is_zero_ether_addr(addr))
7640 return -EINVAL;
7641
7642
7643
7644
7645
7646 for (i = 0; i < rar_entries; i++) {
7647 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7648 continue;
7649 if ((adapter->mac_table[i].state & flags) != flags)
7650 continue;
7651 if (adapter->mac_table[i].queue != queue)
7652 continue;
7653 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7654 continue;
7655
7656
7657
7658
7659 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7660 adapter->mac_table[i].state =
7661 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7662 adapter->mac_table[i].queue =
7663 adapter->vfs_allocated_count;
7664 } else {
7665 adapter->mac_table[i].state = 0;
7666 adapter->mac_table[i].queue = 0;
7667 eth_zero_addr(adapter->mac_table[i].addr);
7668 }
7669
7670 igb_rar_set_index(adapter, i);
7671 return 0;
7672 }
7673
7674 return -ENOENT;
7675}
7676
7677static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7678 const u8 queue)
7679{
7680 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7681}
7682
7683int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7684 const u8 *addr, u8 queue, u8 flags)
7685{
7686 struct e1000_hw *hw = &adapter->hw;
7687
7688
7689
7690
7691 if (hw->mac.type != e1000_i210)
7692 return -EOPNOTSUPP;
7693
7694 return igb_add_mac_filter_flags(adapter, addr, queue,
7695 IGB_MAC_STATE_QUEUE_STEERING | flags);
7696}
7697
7698int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7699 const u8 *addr, u8 queue, u8 flags)
7700{
7701 return igb_del_mac_filter_flags(adapter, addr, queue,
7702 IGB_MAC_STATE_QUEUE_STEERING | flags);
7703}
7704
7705static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7706{
7707 struct igb_adapter *adapter = netdev_priv(netdev);
7708 int ret;
7709
7710 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7711
7712 return min_t(int, ret, 0);
7713}
7714
7715static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7716{
7717 struct igb_adapter *adapter = netdev_priv(netdev);
7718
7719 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7720
7721 return 0;
7722}
7723
7724static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7725 const u32 info, const u8 *addr)
7726{
7727 struct pci_dev *pdev = adapter->pdev;
7728 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7729 struct list_head *pos;
7730 struct vf_mac_filter *entry = NULL;
7731 int ret = 0;
7732
7733 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7734 !vf_data->trusted) {
7735 dev_warn(&pdev->dev,
7736 "VF %d requested MAC filter but is administratively denied\n",
7737 vf);
7738 return -EINVAL;
7739 }
7740 if (!is_valid_ether_addr(addr)) {
7741 dev_warn(&pdev->dev,
7742 "VF %d attempted to set invalid MAC filter\n",
7743 vf);
7744 return -EINVAL;
7745 }
7746
7747 switch (info) {
7748 case E1000_VF_MAC_FILTER_CLR:
7749
7750 list_for_each(pos, &adapter->vf_macs.l) {
7751 entry = list_entry(pos, struct vf_mac_filter, l);
7752 if (entry->vf == vf) {
7753 entry->vf = -1;
7754 entry->free = true;
7755 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7756 }
7757 }
7758 break;
7759 case E1000_VF_MAC_FILTER_ADD:
7760
7761 list_for_each(pos, &adapter->vf_macs.l) {
7762 entry = list_entry(pos, struct vf_mac_filter, l);
7763 if (entry->free)
7764 break;
7765 }
7766
7767 if (entry && entry->free) {
7768 entry->free = false;
7769 entry->vf = vf;
7770 ether_addr_copy(entry->vf_mac, addr);
7771
7772 ret = igb_add_mac_filter(adapter, addr, vf);
7773 ret = min_t(int, ret, 0);
7774 } else {
7775 ret = -ENOSPC;
7776 }
7777
7778 if (ret == -ENOSPC)
7779 dev_warn(&pdev->dev,
7780 "VF %d has requested MAC filter but there is no space for it\n",
7781 vf);
7782 break;
7783 default:
7784 ret = -EINVAL;
7785 break;
7786 }
7787
7788 return ret;
7789}
7790
7791static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7792{
7793 struct pci_dev *pdev = adapter->pdev;
7794 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7795 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7796
7797
7798
7799
7800 unsigned char *addr = (unsigned char *)&msg[1];
7801 int ret = 0;
7802
7803 if (!info) {
7804 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7805 !vf_data->trusted) {
7806 dev_warn(&pdev->dev,
7807 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7808 vf);
7809 return -EINVAL;
7810 }
7811
7812 if (!is_valid_ether_addr(addr)) {
7813 dev_warn(&pdev->dev,
7814 "VF %d attempted to set invalid MAC\n",
7815 vf);
7816 return -EINVAL;
7817 }
7818
7819 ret = igb_set_vf_mac(adapter, vf, addr);
7820 } else {
7821 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7822 }
7823
7824 return ret;
7825}
7826
7827static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7828{
7829 struct e1000_hw *hw = &adapter->hw;
7830 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7831 u32 msg = E1000_VT_MSGTYPE_NACK;
7832
7833
7834 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7835 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7836 igb_write_mbx(hw, &msg, 1, vf);
7837 vf_data->last_nack = jiffies;
7838 }
7839}
7840
7841static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7842{
7843 struct pci_dev *pdev = adapter->pdev;
7844 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7845 struct e1000_hw *hw = &adapter->hw;
7846 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7847 s32 retval;
7848
7849 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7850
7851 if (retval) {
7852
7853 dev_err(&pdev->dev, "Error receiving message from VF\n");
7854 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7855 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7856 goto unlock;
7857 goto out;
7858 }
7859
7860
7861 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7862 goto unlock;
7863
7864
7865
7866
7867 if (msgbuf[0] == E1000_VF_RESET) {
7868
7869 igb_vf_reset_msg(adapter, vf);
7870 return;
7871 }
7872
7873 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7874 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7875 goto unlock;
7876 retval = -1;
7877 goto out;
7878 }
7879
7880 switch ((msgbuf[0] & 0xFFFF)) {
7881 case E1000_VF_SET_MAC_ADDR:
7882 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7883 break;
7884 case E1000_VF_SET_PROMISC:
7885 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7886 break;
7887 case E1000_VF_SET_MULTICAST:
7888 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7889 break;
7890 case E1000_VF_SET_LPE:
7891 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7892 break;
7893 case E1000_VF_SET_VLAN:
7894 retval = -1;
7895 if (vf_data->pf_vlan)
7896 dev_warn(&pdev->dev,
7897 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7898 vf);
7899 else
7900 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7901 break;
7902 default:
7903 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7904 retval = -1;
7905 break;
7906 }
7907
7908 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7909out:
7910
7911 if (retval)
7912 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7913 else
7914 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7915
7916
7917 igb_write_mbx(hw, msgbuf, 1, vf);
7918 return;
7919
7920unlock:
7921 igb_unlock_mbx(hw, vf);
7922}
7923
7924static void igb_msg_task(struct igb_adapter *adapter)
7925{
7926 struct e1000_hw *hw = &adapter->hw;
7927 u32 vf;
7928
7929 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7930
7931 if (!igb_check_for_rst(hw, vf))
7932 igb_vf_reset_event(adapter, vf);
7933
7934
7935 if (!igb_check_for_msg(hw, vf))
7936 igb_rcv_msg_from_vf(adapter, vf);
7937
7938
7939 if (!igb_check_for_ack(hw, vf))
7940 igb_rcv_ack_from_vf(adapter, vf);
7941 }
7942}
7943
7944
7945
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955static void igb_set_uta(struct igb_adapter *adapter, bool set)
7956{
7957 struct e1000_hw *hw = &adapter->hw;
7958 u32 uta = set ? ~0 : 0;
7959 int i;
7960
7961
7962 if (!adapter->vfs_allocated_count)
7963 return;
7964
7965 for (i = hw->mac.uta_reg_count; i--;)
7966 array_wr32(E1000_UTA, i, uta);
7967}
7968
7969
7970
7971
7972
7973
7974static irqreturn_t igb_intr_msi(int irq, void *data)
7975{
7976 struct igb_adapter *adapter = data;
7977 struct igb_q_vector *q_vector = adapter->q_vector[0];
7978 struct e1000_hw *hw = &adapter->hw;
7979
7980 u32 icr = rd32(E1000_ICR);
7981
7982 igb_write_itr(q_vector);
7983
7984 if (icr & E1000_ICR_DRSTA)
7985 schedule_work(&adapter->reset_task);
7986
7987 if (icr & E1000_ICR_DOUTSYNC) {
7988
7989 adapter->stats.doosync++;
7990 }
7991
7992 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7993 hw->mac.get_link_status = 1;
7994 if (!test_bit(__IGB_DOWN, &adapter->state))
7995 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7996 }
7997
7998 if (icr & E1000_ICR_TS)
7999 igb_tsync_interrupt(adapter);
8000
8001 napi_schedule(&q_vector->napi);
8002
8003 return IRQ_HANDLED;
8004}
8005
8006
8007
8008
8009
8010
8011static irqreturn_t igb_intr(int irq, void *data)
8012{
8013 struct igb_adapter *adapter = data;
8014 struct igb_q_vector *q_vector = adapter->q_vector[0];
8015 struct e1000_hw *hw = &adapter->hw;
8016
8017
8018
8019 u32 icr = rd32(E1000_ICR);
8020
8021
8022
8023
8024 if (!(icr & E1000_ICR_INT_ASSERTED))
8025 return IRQ_NONE;
8026
8027 igb_write_itr(q_vector);
8028
8029 if (icr & E1000_ICR_DRSTA)
8030 schedule_work(&adapter->reset_task);
8031
8032 if (icr & E1000_ICR_DOUTSYNC) {
8033
8034 adapter->stats.doosync++;
8035 }
8036
8037 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
8038 hw->mac.get_link_status = 1;
8039
8040 if (!test_bit(__IGB_DOWN, &adapter->state))
8041 mod_timer(&adapter->watchdog_timer, jiffies + 1);
8042 }
8043
8044 if (icr & E1000_ICR_TS)
8045 igb_tsync_interrupt(adapter);
8046
8047 napi_schedule(&q_vector->napi);
8048
8049 return IRQ_HANDLED;
8050}
8051
8052static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
8053{
8054 struct igb_adapter *adapter = q_vector->adapter;
8055 struct e1000_hw *hw = &adapter->hw;
8056
8057 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
8058 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
8059 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
8060 igb_set_itr(q_vector);
8061 else
8062 igb_update_ring_itr(q_vector);
8063 }
8064
8065 if (!test_bit(__IGB_DOWN, &adapter->state)) {
8066 if (adapter->flags & IGB_FLAG_HAS_MSIX)
8067 wr32(E1000_EIMS, q_vector->eims_value);
8068 else
8069 igb_irq_enable(adapter);
8070 }
8071}
8072
8073
8074
8075
8076
8077
8078static int igb_poll(struct napi_struct *napi, int budget)
8079{
8080 struct igb_q_vector *q_vector = container_of(napi,
8081 struct igb_q_vector,
8082 napi);
8083 bool clean_complete = true;
8084 int work_done = 0;
8085
8086#ifdef CONFIG_IGB_DCA
8087 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
8088 igb_update_dca(q_vector);
8089#endif
8090 if (q_vector->tx.ring)
8091 clean_complete = igb_clean_tx_irq(q_vector, budget);
8092
8093 if (q_vector->rx.ring) {
8094 int cleaned = igb_clean_rx_irq(q_vector, budget);
8095
8096 work_done += cleaned;
8097 if (cleaned >= budget)
8098 clean_complete = false;
8099 }
8100
8101
8102 if (!clean_complete)
8103 return budget;
8104
8105
8106
8107
8108 if (likely(napi_complete_done(napi, work_done)))
8109 igb_ring_irq_enable(q_vector);
8110
8111 return work_done;
8112}
8113
8114
8115
8116
8117
8118
8119
8120
8121static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
8122{
8123 struct igb_adapter *adapter = q_vector->adapter;
8124 struct igb_ring *tx_ring = q_vector->tx.ring;
8125 struct igb_tx_buffer *tx_buffer;
8126 union e1000_adv_tx_desc *tx_desc;
8127 unsigned int total_bytes = 0, total_packets = 0;
8128 unsigned int budget = q_vector->tx.work_limit;
8129 unsigned int i = tx_ring->next_to_clean;
8130
8131 if (test_bit(__IGB_DOWN, &adapter->state))
8132 return true;
8133
8134 tx_buffer = &tx_ring->tx_buffer_info[i];
8135 tx_desc = IGB_TX_DESC(tx_ring, i);
8136 i -= tx_ring->count;
8137
8138 do {
8139 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8140
8141
8142 if (!eop_desc)
8143 break;
8144
8145
8146 smp_rmb();
8147
8148
8149 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
8150 break;
8151
8152
8153 tx_buffer->next_to_watch = NULL;
8154
8155
8156 total_bytes += tx_buffer->bytecount;
8157 total_packets += tx_buffer->gso_segs;
8158
8159
8160 if (tx_buffer->type == IGB_TYPE_SKB)
8161 napi_consume_skb(tx_buffer->skb, napi_budget);
8162 else
8163 xdp_return_frame(tx_buffer->xdpf);
8164
8165
8166 dma_unmap_single(tx_ring->dev,
8167 dma_unmap_addr(tx_buffer, dma),
8168 dma_unmap_len(tx_buffer, len),
8169 DMA_TO_DEVICE);
8170
8171
8172 dma_unmap_len_set(tx_buffer, len, 0);
8173
8174
8175 while (tx_desc != eop_desc) {
8176 tx_buffer++;
8177 tx_desc++;
8178 i++;
8179 if (unlikely(!i)) {
8180 i -= tx_ring->count;
8181 tx_buffer = tx_ring->tx_buffer_info;
8182 tx_desc = IGB_TX_DESC(tx_ring, 0);
8183 }
8184
8185
8186 if (dma_unmap_len(tx_buffer, len)) {
8187 dma_unmap_page(tx_ring->dev,
8188 dma_unmap_addr(tx_buffer, dma),
8189 dma_unmap_len(tx_buffer, len),
8190 DMA_TO_DEVICE);
8191 dma_unmap_len_set(tx_buffer, len, 0);
8192 }
8193 }
8194
8195
8196 tx_buffer++;
8197 tx_desc++;
8198 i++;
8199 if (unlikely(!i)) {
8200 i -= tx_ring->count;
8201 tx_buffer = tx_ring->tx_buffer_info;
8202 tx_desc = IGB_TX_DESC(tx_ring, 0);
8203 }
8204
8205
8206 prefetch(tx_desc);
8207
8208
8209 budget--;
8210 } while (likely(budget));
8211
8212 netdev_tx_completed_queue(txring_txq(tx_ring),
8213 total_packets, total_bytes);
8214 i += tx_ring->count;
8215 tx_ring->next_to_clean = i;
8216 u64_stats_update_begin(&tx_ring->tx_syncp);
8217 tx_ring->tx_stats.bytes += total_bytes;
8218 tx_ring->tx_stats.packets += total_packets;
8219 u64_stats_update_end(&tx_ring->tx_syncp);
8220 q_vector->tx.total_bytes += total_bytes;
8221 q_vector->tx.total_packets += total_packets;
8222
8223 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
8224 struct e1000_hw *hw = &adapter->hw;
8225
8226
8227
8228
8229 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8230 if (tx_buffer->next_to_watch &&
8231 time_after(jiffies, tx_buffer->time_stamp +
8232 (adapter->tx_timeout_factor * HZ)) &&
8233 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
8234
8235
8236 dev_err(tx_ring->dev,
8237 "Detected Tx Unit Hang\n"
8238 " Tx Queue <%d>\n"
8239 " TDH <%x>\n"
8240 " TDT <%x>\n"
8241 " next_to_use <%x>\n"
8242 " next_to_clean <%x>\n"
8243 "buffer_info[next_to_clean]\n"
8244 " time_stamp <%lx>\n"
8245 " next_to_watch <%p>\n"
8246 " jiffies <%lx>\n"
8247 " desc.status <%x>\n",
8248 tx_ring->queue_index,
8249 rd32(E1000_TDH(tx_ring->reg_idx)),
8250 readl(tx_ring->tail),
8251 tx_ring->next_to_use,
8252 tx_ring->next_to_clean,
8253 tx_buffer->time_stamp,
8254 tx_buffer->next_to_watch,
8255 jiffies,
8256 tx_buffer->next_to_watch->wb.status);
8257 netif_stop_subqueue(tx_ring->netdev,
8258 tx_ring->queue_index);
8259
8260
8261 return true;
8262 }
8263 }
8264
8265#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
8266 if (unlikely(total_packets &&
8267 netif_carrier_ok(tx_ring->netdev) &&
8268 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
8269
8270
8271
8272 smp_mb();
8273 if (__netif_subqueue_stopped(tx_ring->netdev,
8274 tx_ring->queue_index) &&
8275 !(test_bit(__IGB_DOWN, &adapter->state))) {
8276 netif_wake_subqueue(tx_ring->netdev,
8277 tx_ring->queue_index);
8278
8279 u64_stats_update_begin(&tx_ring->tx_syncp);
8280 tx_ring->tx_stats.restart_queue++;
8281 u64_stats_update_end(&tx_ring->tx_syncp);
8282 }
8283 }
8284
8285 return !!budget;
8286}
8287
8288
8289
8290
8291
8292
8293
8294
8295static void igb_reuse_rx_page(struct igb_ring *rx_ring,
8296 struct igb_rx_buffer *old_buff)
8297{
8298 struct igb_rx_buffer *new_buff;
8299 u16 nta = rx_ring->next_to_alloc;
8300
8301 new_buff = &rx_ring->rx_buffer_info[nta];
8302
8303
8304 nta++;
8305 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
8306
8307
8308
8309
8310
8311 new_buff->dma = old_buff->dma;
8312 new_buff->page = old_buff->page;
8313 new_buff->page_offset = old_buff->page_offset;
8314 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
8315}
8316
8317static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
8318 int rx_buf_pgcnt)
8319{
8320 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
8321 struct page *page = rx_buffer->page;
8322
8323
8324 if (!dev_page_is_reusable(page))
8325 return false;
8326
8327#if (PAGE_SIZE < 8192)
8328
8329 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
8330 return false;
8331#else
8332#define IGB_LAST_OFFSET \
8333 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
8334
8335 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
8336 return false;
8337#endif
8338
8339
8340
8341
8342
8343 if (unlikely(pagecnt_bias == 1)) {
8344 page_ref_add(page, USHRT_MAX - 1);
8345 rx_buffer->pagecnt_bias = USHRT_MAX;
8346 }
8347
8348 return true;
8349}
8350
8351
8352
8353
8354
8355
8356
8357
8358
8359
8360static void igb_add_rx_frag(struct igb_ring *rx_ring,
8361 struct igb_rx_buffer *rx_buffer,
8362 struct sk_buff *skb,
8363 unsigned int size)
8364{
8365#if (PAGE_SIZE < 8192)
8366 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8367#else
8368 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8369 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8370 SKB_DATA_ALIGN(size);
8371#endif
8372 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8373 rx_buffer->page_offset, size, truesize);
8374#if (PAGE_SIZE < 8192)
8375 rx_buffer->page_offset ^= truesize;
8376#else
8377 rx_buffer->page_offset += truesize;
8378#endif
8379}
8380
8381static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8382 struct igb_rx_buffer *rx_buffer,
8383 struct xdp_buff *xdp,
8384 ktime_t timestamp)
8385{
8386#if (PAGE_SIZE < 8192)
8387 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8388#else
8389 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
8390 xdp->data_hard_start);
8391#endif
8392 unsigned int size = xdp->data_end - xdp->data;
8393 unsigned int headlen;
8394 struct sk_buff *skb;
8395
8396
8397 net_prefetch(xdp->data);
8398
8399
8400 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8401 if (unlikely(!skb))
8402 return NULL;
8403
8404 if (timestamp)
8405 skb_hwtstamps(skb)->hwtstamp = timestamp;
8406
8407
8408 headlen = size;
8409 if (headlen > IGB_RX_HDR_LEN)
8410 headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
8411
8412
8413 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
8414
8415
8416 size -= headlen;
8417 if (size) {
8418 skb_add_rx_frag(skb, 0, rx_buffer->page,
8419 (xdp->data + headlen) - page_address(rx_buffer->page),
8420 size, truesize);
8421#if (PAGE_SIZE < 8192)
8422 rx_buffer->page_offset ^= truesize;
8423#else
8424 rx_buffer->page_offset += truesize;
8425#endif
8426 } else {
8427 rx_buffer->pagecnt_bias++;
8428 }
8429
8430 return skb;
8431}
8432
8433static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8434 struct igb_rx_buffer *rx_buffer,
8435 struct xdp_buff *xdp,
8436 ktime_t timestamp)
8437{
8438#if (PAGE_SIZE < 8192)
8439 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8440#else
8441 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8442 SKB_DATA_ALIGN(xdp->data_end -
8443 xdp->data_hard_start);
8444#endif
8445 unsigned int metasize = xdp->data - xdp->data_meta;
8446 struct sk_buff *skb;
8447
8448
8449 net_prefetch(xdp->data_meta);
8450
8451
8452 skb = napi_build_skb(xdp->data_hard_start, truesize);
8453 if (unlikely(!skb))
8454 return NULL;
8455
8456
8457 skb_reserve(skb, xdp->data - xdp->data_hard_start);
8458 __skb_put(skb, xdp->data_end - xdp->data);
8459
8460 if (metasize)
8461 skb_metadata_set(skb, metasize);
8462
8463 if (timestamp)
8464 skb_hwtstamps(skb)->hwtstamp = timestamp;
8465
8466
8467#if (PAGE_SIZE < 8192)
8468 rx_buffer->page_offset ^= truesize;
8469#else
8470 rx_buffer->page_offset += truesize;
8471#endif
8472
8473 return skb;
8474}
8475
8476static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
8477 struct igb_ring *rx_ring,
8478 struct xdp_buff *xdp)
8479{
8480 int err, result = IGB_XDP_PASS;
8481 struct bpf_prog *xdp_prog;
8482 u32 act;
8483
8484 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
8485
8486 if (!xdp_prog)
8487 goto xdp_out;
8488
8489 prefetchw(xdp->data_hard_start);
8490
8491 act = bpf_prog_run_xdp(xdp_prog, xdp);
8492 switch (act) {
8493 case XDP_PASS:
8494 break;
8495 case XDP_TX:
8496 result = igb_xdp_xmit_back(adapter, xdp);
8497 if (result == IGB_XDP_CONSUMED)
8498 goto out_failure;
8499 break;
8500 case XDP_REDIRECT:
8501 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
8502 if (err)
8503 goto out_failure;
8504 result = IGB_XDP_REDIR;
8505 break;
8506 default:
8507 bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
8508 fallthrough;
8509 case XDP_ABORTED:
8510out_failure:
8511 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
8512 fallthrough;
8513 case XDP_DROP:
8514 result = IGB_XDP_CONSUMED;
8515 break;
8516 }
8517xdp_out:
8518 return ERR_PTR(-result);
8519}
8520
8521static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
8522 unsigned int size)
8523{
8524 unsigned int truesize;
8525
8526#if (PAGE_SIZE < 8192)
8527 truesize = igb_rx_pg_size(rx_ring) / 2;
8528#else
8529 truesize = ring_uses_build_skb(rx_ring) ?
8530 SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
8531 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
8532 SKB_DATA_ALIGN(size);
8533#endif
8534 return truesize;
8535}
8536
8537static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
8538 struct igb_rx_buffer *rx_buffer,
8539 unsigned int size)
8540{
8541 unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
8542#if (PAGE_SIZE < 8192)
8543 rx_buffer->page_offset ^= truesize;
8544#else
8545 rx_buffer->page_offset += truesize;
8546#endif
8547}
8548
8549static inline void igb_rx_checksum(struct igb_ring *ring,
8550 union e1000_adv_rx_desc *rx_desc,
8551 struct sk_buff *skb)
8552{
8553 skb_checksum_none_assert(skb);
8554
8555
8556 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8557 return;
8558
8559
8560 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8561 return;
8562
8563
8564 if (igb_test_staterr(rx_desc,
8565 E1000_RXDEXT_STATERR_TCPE |
8566 E1000_RXDEXT_STATERR_IPE)) {
8567
8568
8569
8570
8571 if (!((skb->len == 60) &&
8572 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8573 u64_stats_update_begin(&ring->rx_syncp);
8574 ring->rx_stats.csum_err++;
8575 u64_stats_update_end(&ring->rx_syncp);
8576 }
8577
8578 return;
8579 }
8580
8581 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8582 E1000_RXD_STAT_UDPCS))
8583 skb->ip_summed = CHECKSUM_UNNECESSARY;
8584
8585 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8586 le32_to_cpu(rx_desc->wb.upper.status_error));
8587}
8588
8589static inline void igb_rx_hash(struct igb_ring *ring,
8590 union e1000_adv_rx_desc *rx_desc,
8591 struct sk_buff *skb)
8592{
8593 if (ring->netdev->features & NETIF_F_RXHASH)
8594 skb_set_hash(skb,
8595 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8596 PKT_HASH_TYPE_L3);
8597}
8598
8599
8600
8601
8602
8603
8604
8605
8606
8607
8608
8609static bool igb_is_non_eop(struct igb_ring *rx_ring,
8610 union e1000_adv_rx_desc *rx_desc)
8611{
8612 u32 ntc = rx_ring->next_to_clean + 1;
8613
8614
8615 ntc = (ntc < rx_ring->count) ? ntc : 0;
8616 rx_ring->next_to_clean = ntc;
8617
8618 prefetch(IGB_RX_DESC(rx_ring, ntc));
8619
8620 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8621 return false;
8622
8623 return true;
8624}
8625
8626
8627
8628
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639
8640static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8641 union e1000_adv_rx_desc *rx_desc,
8642 struct sk_buff *skb)
8643{
8644
8645 if (IS_ERR(skb))
8646 return true;
8647
8648 if (unlikely((igb_test_staterr(rx_desc,
8649 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8650 struct net_device *netdev = rx_ring->netdev;
8651 if (!(netdev->features & NETIF_F_RXALL)) {
8652 dev_kfree_skb_any(skb);
8653 return true;
8654 }
8655 }
8656
8657
8658 if (eth_skb_pad(skb))
8659 return true;
8660
8661 return false;
8662}
8663
8664
8665
8666
8667
8668
8669
8670
8671
8672
8673
8674static void igb_process_skb_fields(struct igb_ring *rx_ring,
8675 union e1000_adv_rx_desc *rx_desc,
8676 struct sk_buff *skb)
8677{
8678 struct net_device *dev = rx_ring->netdev;
8679
8680 igb_rx_hash(rx_ring, rx_desc, skb);
8681
8682 igb_rx_checksum(rx_ring, rx_desc, skb);
8683
8684 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8685 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8686 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8687
8688 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8689 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8690 u16 vid;
8691
8692 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8693 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8694 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
8695 else
8696 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8697
8698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8699 }
8700
8701 skb_record_rx_queue(skb, rx_ring->queue_index);
8702
8703 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8704}
8705
8706static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8707{
8708 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8709}
8710
8711static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8712 const unsigned int size, int *rx_buf_pgcnt)
8713{
8714 struct igb_rx_buffer *rx_buffer;
8715
8716 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8717 *rx_buf_pgcnt =
8718#if (PAGE_SIZE < 8192)
8719 page_count(rx_buffer->page);
8720#else
8721 0;
8722#endif
8723 prefetchw(rx_buffer->page);
8724
8725
8726 dma_sync_single_range_for_cpu(rx_ring->dev,
8727 rx_buffer->dma,
8728 rx_buffer->page_offset,
8729 size,
8730 DMA_FROM_DEVICE);
8731
8732 rx_buffer->pagecnt_bias--;
8733
8734 return rx_buffer;
8735}
8736
8737static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8738 struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
8739{
8740 if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
8741
8742 igb_reuse_rx_page(rx_ring, rx_buffer);
8743 } else {
8744
8745
8746
8747 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8748 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8749 IGB_RX_DMA_ATTR);
8750 __page_frag_cache_drain(rx_buffer->page,
8751 rx_buffer->pagecnt_bias);
8752 }
8753
8754
8755 rx_buffer->page = NULL;
8756}
8757
8758static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8759{
8760 struct igb_adapter *adapter = q_vector->adapter;
8761 struct igb_ring *rx_ring = q_vector->rx.ring;
8762 struct sk_buff *skb = rx_ring->skb;
8763 unsigned int total_bytes = 0, total_packets = 0;
8764 u16 cleaned_count = igb_desc_unused(rx_ring);
8765 unsigned int xdp_xmit = 0;
8766 struct xdp_buff xdp;
8767 u32 frame_sz = 0;
8768 int rx_buf_pgcnt;
8769
8770
8771#if (PAGE_SIZE < 8192)
8772 frame_sz = igb_rx_frame_truesize(rx_ring, 0);
8773#endif
8774 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
8775
8776 while (likely(total_packets < budget)) {
8777 union e1000_adv_rx_desc *rx_desc;
8778 struct igb_rx_buffer *rx_buffer;
8779 ktime_t timestamp = 0;
8780 int pkt_offset = 0;
8781 unsigned int size;
8782 void *pktbuf;
8783
8784
8785 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8786 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8787 cleaned_count = 0;
8788 }
8789
8790 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8791 size = le16_to_cpu(rx_desc->wb.upper.length);
8792 if (!size)
8793 break;
8794
8795
8796
8797
8798
8799 dma_rmb();
8800
8801 rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
8802 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
8803
8804
8805 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8806 int ts_hdr_len;
8807
8808 ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
8809 pktbuf, ×tamp);
8810
8811 pkt_offset += ts_hdr_len;
8812 size -= ts_hdr_len;
8813 }
8814
8815
8816 if (!skb) {
8817 unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
8818 unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
8819
8820 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
8821#if (PAGE_SIZE > 4096)
8822
8823 xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
8824#endif
8825 skb = igb_run_xdp(adapter, rx_ring, &xdp);
8826 }
8827
8828 if (IS_ERR(skb)) {
8829 unsigned int xdp_res = -PTR_ERR(skb);
8830
8831 if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
8832 xdp_xmit |= xdp_res;
8833 igb_rx_buffer_flip(rx_ring, rx_buffer, size);
8834 } else {
8835 rx_buffer->pagecnt_bias++;
8836 }
8837 total_packets++;
8838 total_bytes += size;
8839 } else if (skb)
8840 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8841 else if (ring_uses_build_skb(rx_ring))
8842 skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
8843 timestamp);
8844 else
8845 skb = igb_construct_skb(rx_ring, rx_buffer,
8846 &xdp, timestamp);
8847
8848
8849 if (!skb) {
8850 rx_ring->rx_stats.alloc_failed++;
8851 rx_buffer->pagecnt_bias++;
8852 break;
8853 }
8854
8855 igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
8856 cleaned_count++;
8857
8858
8859 if (igb_is_non_eop(rx_ring, rx_desc))
8860 continue;
8861
8862
8863 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8864 skb = NULL;
8865 continue;
8866 }
8867
8868
8869 total_bytes += skb->len;
8870
8871
8872 igb_process_skb_fields(rx_ring, rx_desc, skb);
8873
8874 napi_gro_receive(&q_vector->napi, skb);
8875
8876
8877 skb = NULL;
8878
8879
8880 total_packets++;
8881 }
8882
8883
8884 rx_ring->skb = skb;
8885
8886 if (xdp_xmit & IGB_XDP_REDIR)
8887 xdp_do_flush();
8888
8889 if (xdp_xmit & IGB_XDP_TX) {
8890 struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
8891
8892 igb_xdp_ring_update_tail(tx_ring);
8893 }
8894
8895 u64_stats_update_begin(&rx_ring->rx_syncp);
8896 rx_ring->rx_stats.packets += total_packets;
8897 rx_ring->rx_stats.bytes += total_bytes;
8898 u64_stats_update_end(&rx_ring->rx_syncp);
8899 q_vector->rx.total_packets += total_packets;
8900 q_vector->rx.total_bytes += total_bytes;
8901
8902 if (cleaned_count)
8903 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8904
8905 return total_packets;
8906}
8907
8908static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8909 struct igb_rx_buffer *bi)
8910{
8911 struct page *page = bi->page;
8912 dma_addr_t dma;
8913
8914
8915 if (likely(page))
8916 return true;
8917
8918
8919 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8920 if (unlikely(!page)) {
8921 rx_ring->rx_stats.alloc_failed++;
8922 return false;
8923 }
8924
8925
8926 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8927 igb_rx_pg_size(rx_ring),
8928 DMA_FROM_DEVICE,
8929 IGB_RX_DMA_ATTR);
8930
8931
8932
8933
8934 if (dma_mapping_error(rx_ring->dev, dma)) {
8935 __free_pages(page, igb_rx_pg_order(rx_ring));
8936
8937 rx_ring->rx_stats.alloc_failed++;
8938 return false;
8939 }
8940
8941 bi->dma = dma;
8942 bi->page = page;
8943 bi->page_offset = igb_rx_offset(rx_ring);
8944 page_ref_add(page, USHRT_MAX - 1);
8945 bi->pagecnt_bias = USHRT_MAX;
8946
8947 return true;
8948}
8949
8950
8951
8952
8953
8954
8955void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8956{
8957 union e1000_adv_rx_desc *rx_desc;
8958 struct igb_rx_buffer *bi;
8959 u16 i = rx_ring->next_to_use;
8960 u16 bufsz;
8961
8962
8963 if (!cleaned_count)
8964 return;
8965
8966 rx_desc = IGB_RX_DESC(rx_ring, i);
8967 bi = &rx_ring->rx_buffer_info[i];
8968 i -= rx_ring->count;
8969
8970 bufsz = igb_rx_bufsz(rx_ring);
8971
8972 do {
8973 if (!igb_alloc_mapped_page(rx_ring, bi))
8974 break;
8975
8976
8977 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8978 bi->page_offset, bufsz,
8979 DMA_FROM_DEVICE);
8980
8981
8982
8983
8984 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8985
8986 rx_desc++;
8987 bi++;
8988 i++;
8989 if (unlikely(!i)) {
8990 rx_desc = IGB_RX_DESC(rx_ring, 0);
8991 bi = rx_ring->rx_buffer_info;
8992 i -= rx_ring->count;
8993 }
8994
8995
8996 rx_desc->wb.upper.length = 0;
8997
8998 cleaned_count--;
8999 } while (cleaned_count);
9000
9001 i += rx_ring->count;
9002
9003 if (rx_ring->next_to_use != i) {
9004
9005 rx_ring->next_to_use = i;
9006
9007
9008 rx_ring->next_to_alloc = i;
9009
9010
9011
9012
9013
9014
9015 dma_wmb();
9016 writel(i, rx_ring->tail);
9017 }
9018}
9019
9020
9021
9022
9023
9024
9025
9026static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9027{
9028 struct igb_adapter *adapter = netdev_priv(netdev);
9029 struct mii_ioctl_data *data = if_mii(ifr);
9030
9031 if (adapter->hw.phy.media_type != e1000_media_type_copper)
9032 return -EOPNOTSUPP;
9033
9034 switch (cmd) {
9035 case SIOCGMIIPHY:
9036 data->phy_id = adapter->hw.phy.addr;
9037 break;
9038 case SIOCGMIIREG:
9039 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
9040 &data->val_out))
9041 return -EIO;
9042 break;
9043 case SIOCSMIIREG:
9044 default:
9045 return -EOPNOTSUPP;
9046 }
9047 return 0;
9048}
9049
9050
9051
9052
9053
9054
9055
9056static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9057{
9058 switch (cmd) {
9059 case SIOCGMIIPHY:
9060 case SIOCGMIIREG:
9061 case SIOCSMIIREG:
9062 return igb_mii_ioctl(netdev, ifr, cmd);
9063 case SIOCGHWTSTAMP:
9064 return igb_ptp_get_ts_config(netdev, ifr);
9065 case SIOCSHWTSTAMP:
9066 return igb_ptp_set_ts_config(netdev, ifr);
9067 default:
9068 return -EOPNOTSUPP;
9069 }
9070}
9071
9072void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9073{
9074 struct igb_adapter *adapter = hw->back;
9075
9076 pci_read_config_word(adapter->pdev, reg, value);
9077}
9078
9079void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9080{
9081 struct igb_adapter *adapter = hw->back;
9082
9083 pci_write_config_word(adapter->pdev, reg, *value);
9084}
9085
9086s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9087{
9088 struct igb_adapter *adapter = hw->back;
9089
9090 if (pcie_capability_read_word(adapter->pdev, reg, value))
9091 return -E1000_ERR_CONFIG;
9092
9093 return 0;
9094}
9095
9096s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9097{
9098 struct igb_adapter *adapter = hw->back;
9099
9100 if (pcie_capability_write_word(adapter->pdev, reg, *value))
9101 return -E1000_ERR_CONFIG;
9102
9103 return 0;
9104}
9105
9106static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9107{
9108 struct igb_adapter *adapter = netdev_priv(netdev);
9109 struct e1000_hw *hw = &adapter->hw;
9110 u32 ctrl, rctl;
9111 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
9112
9113 if (enable) {
9114
9115 ctrl = rd32(E1000_CTRL);
9116 ctrl |= E1000_CTRL_VME;
9117 wr32(E1000_CTRL, ctrl);
9118
9119
9120 rctl = rd32(E1000_RCTL);
9121 rctl &= ~E1000_RCTL_CFIEN;
9122 wr32(E1000_RCTL, rctl);
9123 } else {
9124
9125 ctrl = rd32(E1000_CTRL);
9126 ctrl &= ~E1000_CTRL_VME;
9127 wr32(E1000_CTRL, ctrl);
9128 }
9129
9130 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
9131}
9132
9133static int igb_vlan_rx_add_vid(struct net_device *netdev,
9134 __be16 proto, u16 vid)
9135{
9136 struct igb_adapter *adapter = netdev_priv(netdev);
9137 struct e1000_hw *hw = &adapter->hw;
9138 int pf_id = adapter->vfs_allocated_count;
9139
9140
9141 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9142 igb_vfta_set(hw, vid, pf_id, true, !!vid);
9143
9144 set_bit(vid, adapter->active_vlans);
9145
9146 return 0;
9147}
9148
9149static int igb_vlan_rx_kill_vid(struct net_device *netdev,
9150 __be16 proto, u16 vid)
9151{
9152 struct igb_adapter *adapter = netdev_priv(netdev);
9153 int pf_id = adapter->vfs_allocated_count;
9154 struct e1000_hw *hw = &adapter->hw;
9155
9156
9157 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9158 igb_vfta_set(hw, vid, pf_id, false, true);
9159
9160 clear_bit(vid, adapter->active_vlans);
9161
9162 return 0;
9163}
9164
9165static void igb_restore_vlan(struct igb_adapter *adapter)
9166{
9167 u16 vid = 1;
9168
9169 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
9170 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
9171
9172 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
9173 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
9174}
9175
9176int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9177{
9178 struct pci_dev *pdev = adapter->pdev;
9179 struct e1000_mac_info *mac = &adapter->hw.mac;
9180
9181 mac->autoneg = 0;
9182
9183
9184
9185
9186 if ((spd & 1) || (dplx & ~1))
9187 goto err_inval;
9188
9189
9190
9191
9192 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
9193 switch (spd + dplx) {
9194 case SPEED_10 + DUPLEX_HALF:
9195 case SPEED_10 + DUPLEX_FULL:
9196 case SPEED_100 + DUPLEX_HALF:
9197 goto err_inval;
9198 default:
9199 break;
9200 }
9201 }
9202
9203 switch (spd + dplx) {
9204 case SPEED_10 + DUPLEX_HALF:
9205 mac->forced_speed_duplex = ADVERTISE_10_HALF;
9206 break;
9207 case SPEED_10 + DUPLEX_FULL:
9208 mac->forced_speed_duplex = ADVERTISE_10_FULL;
9209 break;
9210 case SPEED_100 + DUPLEX_HALF:
9211 mac->forced_speed_duplex = ADVERTISE_100_HALF;
9212 break;
9213 case SPEED_100 + DUPLEX_FULL:
9214 mac->forced_speed_duplex = ADVERTISE_100_FULL;
9215 break;
9216 case SPEED_1000 + DUPLEX_FULL:
9217 mac->autoneg = 1;
9218 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
9219 break;
9220 case SPEED_1000 + DUPLEX_HALF:
9221 default:
9222 goto err_inval;
9223 }
9224
9225
9226 adapter->hw.phy.mdix = AUTO_ALL_MODES;
9227
9228 return 0;
9229
9230err_inval:
9231 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
9232 return -EINVAL;
9233}
9234
9235static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
9236 bool runtime)
9237{
9238 struct net_device *netdev = pci_get_drvdata(pdev);
9239 struct igb_adapter *adapter = netdev_priv(netdev);
9240 struct e1000_hw *hw = &adapter->hw;
9241 u32 ctrl, rctl, status;
9242 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9243 bool wake;
9244
9245 rtnl_lock();
9246 netif_device_detach(netdev);
9247
9248 if (netif_running(netdev))
9249 __igb_close(netdev, true);
9250
9251 igb_ptp_suspend(adapter);
9252
9253 igb_clear_interrupt_scheme(adapter);
9254 rtnl_unlock();
9255
9256 status = rd32(E1000_STATUS);
9257 if (status & E1000_STATUS_LU)
9258 wufc &= ~E1000_WUFC_LNKC;
9259
9260 if (wufc) {
9261 igb_setup_rctl(adapter);
9262 igb_set_rx_mode(netdev);
9263
9264
9265 if (wufc & E1000_WUFC_MC) {
9266 rctl = rd32(E1000_RCTL);
9267 rctl |= E1000_RCTL_MPE;
9268 wr32(E1000_RCTL, rctl);
9269 }
9270
9271 ctrl = rd32(E1000_CTRL);
9272 ctrl |= E1000_CTRL_ADVD3WUC;
9273 wr32(E1000_CTRL, ctrl);
9274
9275
9276 igb_disable_pcie_master(hw);
9277
9278 wr32(E1000_WUC, E1000_WUC_PME_EN);
9279 wr32(E1000_WUFC, wufc);
9280 } else {
9281 wr32(E1000_WUC, 0);
9282 wr32(E1000_WUFC, 0);
9283 }
9284
9285 wake = wufc || adapter->en_mng_pt;
9286 if (!wake)
9287 igb_power_down_link(adapter);
9288 else
9289 igb_power_up_link(adapter);
9290
9291 if (enable_wake)
9292 *enable_wake = wake;
9293
9294
9295
9296
9297 igb_release_hw_control(adapter);
9298
9299 pci_disable_device(pdev);
9300
9301 return 0;
9302}
9303
9304static void igb_deliver_wake_packet(struct net_device *netdev)
9305{
9306 struct igb_adapter *adapter = netdev_priv(netdev);
9307 struct e1000_hw *hw = &adapter->hw;
9308 struct sk_buff *skb;
9309 u32 wupl;
9310
9311 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
9312
9313
9314
9315
9316 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
9317 return;
9318
9319 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
9320 if (!skb)
9321 return;
9322
9323 skb_put(skb, wupl);
9324
9325
9326 wupl = roundup(wupl, 4);
9327
9328 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
9329
9330 skb->protocol = eth_type_trans(skb, netdev);
9331 netif_rx(skb);
9332}
9333
9334static int __maybe_unused igb_suspend(struct device *dev)
9335{
9336 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
9337}
9338
9339static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
9340{
9341 struct pci_dev *pdev = to_pci_dev(dev);
9342 struct net_device *netdev = pci_get_drvdata(pdev);
9343 struct igb_adapter *adapter = netdev_priv(netdev);
9344 struct e1000_hw *hw = &adapter->hw;
9345 u32 err, val;
9346
9347 pci_set_power_state(pdev, PCI_D0);
9348 pci_restore_state(pdev);
9349 pci_save_state(pdev);
9350
9351 if (!pci_device_is_present(pdev))
9352 return -ENODEV;
9353 err = pci_enable_device_mem(pdev);
9354 if (err) {
9355 dev_err(&pdev->dev,
9356 "igb: Cannot enable PCI device from suspend\n");
9357 return err;
9358 }
9359 pci_set_master(pdev);
9360
9361 pci_enable_wake(pdev, PCI_D3hot, 0);
9362 pci_enable_wake(pdev, PCI_D3cold, 0);
9363
9364 if (igb_init_interrupt_scheme(adapter, true)) {
9365 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9366 return -ENOMEM;
9367 }
9368
9369 igb_reset(adapter);
9370
9371
9372
9373
9374 igb_get_hw_control(adapter);
9375
9376 val = rd32(E1000_WUS);
9377 if (val & WAKE_PKT_WUS)
9378 igb_deliver_wake_packet(netdev);
9379
9380 wr32(E1000_WUS, ~0);
9381
9382 if (!rpm)
9383 rtnl_lock();
9384 if (!err && netif_running(netdev))
9385 err = __igb_open(netdev, true);
9386
9387 if (!err)
9388 netif_device_attach(netdev);
9389 if (!rpm)
9390 rtnl_unlock();
9391
9392 return err;
9393}
9394
9395static int __maybe_unused igb_resume(struct device *dev)
9396{
9397 return __igb_resume(dev, false);
9398}
9399
9400static int __maybe_unused igb_runtime_idle(struct device *dev)
9401{
9402 struct net_device *netdev = dev_get_drvdata(dev);
9403 struct igb_adapter *adapter = netdev_priv(netdev);
9404
9405 if (!igb_has_link(adapter))
9406 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
9407
9408 return -EBUSY;
9409}
9410
9411static int __maybe_unused igb_runtime_suspend(struct device *dev)
9412{
9413 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
9414}
9415
9416static int __maybe_unused igb_runtime_resume(struct device *dev)
9417{
9418 return __igb_resume(dev, true);
9419}
9420
9421static void igb_shutdown(struct pci_dev *pdev)
9422{
9423 bool wake;
9424
9425 __igb_shutdown(pdev, &wake, 0);
9426
9427 if (system_state == SYSTEM_POWER_OFF) {
9428 pci_wake_from_d3(pdev, wake);
9429 pci_set_power_state(pdev, PCI_D3hot);
9430 }
9431}
9432
9433#ifdef CONFIG_PCI_IOV
9434static int igb_sriov_reinit(struct pci_dev *dev)
9435{
9436 struct net_device *netdev = pci_get_drvdata(dev);
9437 struct igb_adapter *adapter = netdev_priv(netdev);
9438 struct pci_dev *pdev = adapter->pdev;
9439
9440 rtnl_lock();
9441
9442 if (netif_running(netdev))
9443 igb_close(netdev);
9444 else
9445 igb_reset(adapter);
9446
9447 igb_clear_interrupt_scheme(adapter);
9448
9449 igb_init_queue_configuration(adapter);
9450
9451 if (igb_init_interrupt_scheme(adapter, true)) {
9452 rtnl_unlock();
9453 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9454 return -ENOMEM;
9455 }
9456
9457 if (netif_running(netdev))
9458 igb_open(netdev);
9459
9460 rtnl_unlock();
9461
9462 return 0;
9463}
9464
9465static int igb_pci_disable_sriov(struct pci_dev *dev)
9466{
9467 int err = igb_disable_sriov(dev);
9468
9469 if (!err)
9470 err = igb_sriov_reinit(dev);
9471
9472 return err;
9473}
9474
9475static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
9476{
9477 int err = igb_enable_sriov(dev, num_vfs);
9478
9479 if (err)
9480 goto out;
9481
9482 err = igb_sriov_reinit(dev);
9483 if (!err)
9484 return num_vfs;
9485
9486out:
9487 return err;
9488}
9489
9490#endif
9491static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9492{
9493#ifdef CONFIG_PCI_IOV
9494 if (num_vfs == 0)
9495 return igb_pci_disable_sriov(dev);
9496 else
9497 return igb_pci_enable_sriov(dev, num_vfs);
9498#endif
9499 return 0;
9500}
9501
9502
9503
9504
9505
9506
9507
9508
9509
9510static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9511 pci_channel_state_t state)
9512{
9513 struct net_device *netdev = pci_get_drvdata(pdev);
9514 struct igb_adapter *adapter = netdev_priv(netdev);
9515
9516 netif_device_detach(netdev);
9517
9518 if (state == pci_channel_io_perm_failure)
9519 return PCI_ERS_RESULT_DISCONNECT;
9520
9521 if (netif_running(netdev))
9522 igb_down(adapter);
9523 pci_disable_device(pdev);
9524
9525
9526 return PCI_ERS_RESULT_NEED_RESET;
9527}
9528
9529
9530
9531
9532
9533
9534
9535
9536static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9537{
9538 struct net_device *netdev = pci_get_drvdata(pdev);
9539 struct igb_adapter *adapter = netdev_priv(netdev);
9540 struct e1000_hw *hw = &adapter->hw;
9541 pci_ers_result_t result;
9542
9543 if (pci_enable_device_mem(pdev)) {
9544 dev_err(&pdev->dev,
9545 "Cannot re-enable PCI device after reset.\n");
9546 result = PCI_ERS_RESULT_DISCONNECT;
9547 } else {
9548 pci_set_master(pdev);
9549 pci_restore_state(pdev);
9550 pci_save_state(pdev);
9551
9552 pci_enable_wake(pdev, PCI_D3hot, 0);
9553 pci_enable_wake(pdev, PCI_D3cold, 0);
9554
9555
9556
9557
9558 hw->hw_addr = adapter->io_addr;
9559
9560 igb_reset(adapter);
9561 wr32(E1000_WUS, ~0);
9562 result = PCI_ERS_RESULT_RECOVERED;
9563 }
9564
9565 return result;
9566}
9567
9568
9569
9570
9571
9572
9573
9574
9575
9576static void igb_io_resume(struct pci_dev *pdev)
9577{
9578 struct net_device *netdev = pci_get_drvdata(pdev);
9579 struct igb_adapter *adapter = netdev_priv(netdev);
9580
9581 if (netif_running(netdev)) {
9582 if (igb_up(adapter)) {
9583 dev_err(&pdev->dev, "igb_up failed after reset\n");
9584 return;
9585 }
9586 }
9587
9588 netif_device_attach(netdev);
9589
9590
9591
9592
9593 igb_get_hw_control(adapter);
9594}
9595
9596
9597
9598
9599
9600
9601static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9602{
9603 struct e1000_hw *hw = &adapter->hw;
9604 u32 rar_low, rar_high;
9605 u8 *addr = adapter->mac_table[index].addr;
9606
9607
9608
9609
9610
9611
9612 rar_low = le32_to_cpup((__le32 *)(addr));
9613 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9614
9615
9616 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9617 if (is_valid_ether_addr(addr))
9618 rar_high |= E1000_RAH_AV;
9619
9620 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9621 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9622
9623 switch (hw->mac.type) {
9624 case e1000_82575:
9625 case e1000_i210:
9626 if (adapter->mac_table[index].state &
9627 IGB_MAC_STATE_QUEUE_STEERING)
9628 rar_high |= E1000_RAH_QSEL_ENABLE;
9629
9630 rar_high |= E1000_RAH_POOL_1 *
9631 adapter->mac_table[index].queue;
9632 break;
9633 default:
9634 rar_high |= E1000_RAH_POOL_1 <<
9635 adapter->mac_table[index].queue;
9636 break;
9637 }
9638 }
9639
9640 wr32(E1000_RAL(index), rar_low);
9641 wrfl();
9642 wr32(E1000_RAH(index), rar_high);
9643 wrfl();
9644}
9645
9646static int igb_set_vf_mac(struct igb_adapter *adapter,
9647 int vf, unsigned char *mac_addr)
9648{
9649 struct e1000_hw *hw = &adapter->hw;
9650
9651
9652
9653 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9654 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9655
9656 ether_addr_copy(vf_mac_addr, mac_addr);
9657 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9658 adapter->mac_table[rar_entry].queue = vf;
9659 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9660 igb_rar_set_index(adapter, rar_entry);
9661
9662 return 0;
9663}
9664
9665static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9666{
9667 struct igb_adapter *adapter = netdev_priv(netdev);
9668
9669 if (vf >= adapter->vfs_allocated_count)
9670 return -EINVAL;
9671
9672
9673
9674
9675
9676
9677
9678 if (is_zero_ether_addr(mac)) {
9679 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9680 dev_info(&adapter->pdev->dev,
9681 "remove administratively set MAC on VF %d\n",
9682 vf);
9683 } else if (is_valid_ether_addr(mac)) {
9684 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9685 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9686 mac, vf);
9687 dev_info(&adapter->pdev->dev,
9688 "Reload the VF driver to make this change effective.");
9689
9690 if (test_bit(__IGB_DOWN, &adapter->state)) {
9691 dev_warn(&adapter->pdev->dev,
9692 "The VF MAC address has been set, but the PF device is not up.\n");
9693 dev_warn(&adapter->pdev->dev,
9694 "Bring the PF device up before attempting to use the VF device.\n");
9695 }
9696 } else {
9697 return -EINVAL;
9698 }
9699 return igb_set_vf_mac(adapter, vf, mac);
9700}
9701
9702static int igb_link_mbps(int internal_link_speed)
9703{
9704 switch (internal_link_speed) {
9705 case SPEED_100:
9706 return 100;
9707 case SPEED_1000:
9708 return 1000;
9709 default:
9710 return 0;
9711 }
9712}
9713
9714static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9715 int link_speed)
9716{
9717 int rf_dec, rf_int;
9718 u32 bcnrc_val;
9719
9720 if (tx_rate != 0) {
9721
9722 rf_int = link_speed / tx_rate;
9723 rf_dec = (link_speed - (rf_int * tx_rate));
9724 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9725 tx_rate;
9726
9727 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9728 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9729 E1000_RTTBCNRC_RF_INT_MASK);
9730 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9731 } else {
9732 bcnrc_val = 0;
9733 }
9734
9735 wr32(E1000_RTTDQSEL, vf);
9736
9737
9738
9739 wr32(E1000_RTTBCNRM, 0x14);
9740 wr32(E1000_RTTBCNRC, bcnrc_val);
9741}
9742
9743static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9744{
9745 int actual_link_speed, i;
9746 bool reset_rate = false;
9747
9748
9749 if ((adapter->vf_rate_link_speed == 0) ||
9750 (adapter->hw.mac.type != e1000_82576))
9751 return;
9752
9753 actual_link_speed = igb_link_mbps(adapter->link_speed);
9754 if (actual_link_speed != adapter->vf_rate_link_speed) {
9755 reset_rate = true;
9756 adapter->vf_rate_link_speed = 0;
9757 dev_info(&adapter->pdev->dev,
9758 "Link speed has been changed. VF Transmit rate is disabled\n");
9759 }
9760
9761 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9762 if (reset_rate)
9763 adapter->vf_data[i].tx_rate = 0;
9764
9765 igb_set_vf_rate_limit(&adapter->hw, i,
9766 adapter->vf_data[i].tx_rate,
9767 actual_link_speed);
9768 }
9769}
9770
9771static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9772 int min_tx_rate, int max_tx_rate)
9773{
9774 struct igb_adapter *adapter = netdev_priv(netdev);
9775 struct e1000_hw *hw = &adapter->hw;
9776 int actual_link_speed;
9777
9778 if (hw->mac.type != e1000_82576)
9779 return -EOPNOTSUPP;
9780
9781 if (min_tx_rate)
9782 return -EINVAL;
9783
9784 actual_link_speed = igb_link_mbps(adapter->link_speed);
9785 if ((vf >= adapter->vfs_allocated_count) ||
9786 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9787 (max_tx_rate < 0) ||
9788 (max_tx_rate > actual_link_speed))
9789 return -EINVAL;
9790
9791 adapter->vf_rate_link_speed = actual_link_speed;
9792 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9793 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9794
9795 return 0;
9796}
9797
9798static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9799 bool setting)
9800{
9801 struct igb_adapter *adapter = netdev_priv(netdev);
9802 struct e1000_hw *hw = &adapter->hw;
9803 u32 reg_val, reg_offset;
9804
9805 if (!adapter->vfs_allocated_count)
9806 return -EOPNOTSUPP;
9807
9808 if (vf >= adapter->vfs_allocated_count)
9809 return -EINVAL;
9810
9811 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9812 reg_val = rd32(reg_offset);
9813 if (setting)
9814 reg_val |= (BIT(vf) |
9815 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9816 else
9817 reg_val &= ~(BIT(vf) |
9818 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9819 wr32(reg_offset, reg_val);
9820
9821 adapter->vf_data[vf].spoofchk_enabled = setting;
9822 return 0;
9823}
9824
9825static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9826{
9827 struct igb_adapter *adapter = netdev_priv(netdev);
9828
9829 if (vf >= adapter->vfs_allocated_count)
9830 return -EINVAL;
9831 if (adapter->vf_data[vf].trusted == setting)
9832 return 0;
9833
9834 adapter->vf_data[vf].trusted = setting;
9835
9836 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9837 vf, setting ? "" : "not ");
9838 return 0;
9839}
9840
9841static int igb_ndo_get_vf_config(struct net_device *netdev,
9842 int vf, struct ifla_vf_info *ivi)
9843{
9844 struct igb_adapter *adapter = netdev_priv(netdev);
9845 if (vf >= adapter->vfs_allocated_count)
9846 return -EINVAL;
9847 ivi->vf = vf;
9848 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9849 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9850 ivi->min_tx_rate = 0;
9851 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9852 ivi->qos = adapter->vf_data[vf].pf_qos;
9853 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9854 ivi->trusted = adapter->vf_data[vf].trusted;
9855 return 0;
9856}
9857
9858static void igb_vmm_control(struct igb_adapter *adapter)
9859{
9860 struct e1000_hw *hw = &adapter->hw;
9861 u32 reg;
9862
9863 switch (hw->mac.type) {
9864 case e1000_82575:
9865 case e1000_i210:
9866 case e1000_i211:
9867 case e1000_i354:
9868 default:
9869
9870 return;
9871 case e1000_82576:
9872
9873 reg = rd32(E1000_DTXCTL);
9874 reg |= E1000_DTXCTL_VLAN_ADDED;
9875 wr32(E1000_DTXCTL, reg);
9876 fallthrough;
9877 case e1000_82580:
9878
9879 reg = rd32(E1000_RPLOLR);
9880 reg |= E1000_RPLOLR_STRVLAN;
9881 wr32(E1000_RPLOLR, reg);
9882 fallthrough;
9883 case e1000_i350:
9884
9885 break;
9886 }
9887
9888 if (adapter->vfs_allocated_count) {
9889 igb_vmdq_set_loopback_pf(hw, true);
9890 igb_vmdq_set_replication_pf(hw, true);
9891 igb_vmdq_set_anti_spoofing_pf(hw, true,
9892 adapter->vfs_allocated_count);
9893 } else {
9894 igb_vmdq_set_loopback_pf(hw, false);
9895 igb_vmdq_set_replication_pf(hw, false);
9896 }
9897}
9898
9899static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9900{
9901 struct e1000_hw *hw = &adapter->hw;
9902 u32 dmac_thr;
9903 u16 hwm;
9904 u32 reg;
9905
9906 if (hw->mac.type > e1000_82580) {
9907 if (adapter->flags & IGB_FLAG_DMAC) {
9908
9909 wr32(E1000_DMCTXTH, 0);
9910
9911
9912
9913
9914
9915 hwm = 64 * (pba - 6);
9916 reg = rd32(E1000_FCRTC);
9917 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9918 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9919 & E1000_FCRTC_RTH_COAL_MASK);
9920 wr32(E1000_FCRTC, reg);
9921
9922
9923
9924
9925 dmac_thr = pba - 10;
9926 reg = rd32(E1000_DMACR);
9927 reg &= ~E1000_DMACR_DMACTHR_MASK;
9928 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9929 & E1000_DMACR_DMACTHR_MASK);
9930
9931
9932 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9933
9934
9935 reg |= (1000 >> 5);
9936
9937
9938 if (hw->mac.type != e1000_i354)
9939 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9940 wr32(E1000_DMACR, reg);
9941
9942
9943
9944
9945 wr32(E1000_DMCRTRH, 0);
9946
9947 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9948
9949 wr32(E1000_DMCTLX, reg);
9950
9951
9952
9953
9954 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9955 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9956 }
9957
9958 if (hw->mac.type >= e1000_i210 ||
9959 (adapter->flags & IGB_FLAG_DMAC)) {
9960 reg = rd32(E1000_PCIEMISC);
9961 reg |= E1000_PCIEMISC_LX_DECISION;
9962 wr32(E1000_PCIEMISC, reg);
9963 }
9964 } else if (hw->mac.type == e1000_82580) {
9965 u32 reg = rd32(E1000_PCIEMISC);
9966
9967 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9968 wr32(E1000_DMACR, 0);
9969 }
9970}
9971
9972
9973
9974
9975
9976
9977
9978
9979
9980
9981
9982s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9983 u8 dev_addr, u8 *data)
9984{
9985 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9986 struct i2c_client *this_client = adapter->i2c_client;
9987 s32 status;
9988 u16 swfw_mask = 0;
9989
9990 if (!this_client)
9991 return E1000_ERR_I2C;
9992
9993 swfw_mask = E1000_SWFW_PHY0_SM;
9994
9995 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9996 return E1000_ERR_SWFW_SYNC;
9997
9998 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9999 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10000
10001 if (status < 0)
10002 return E1000_ERR_I2C;
10003 else {
10004 *data = status;
10005 return 0;
10006 }
10007}
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017
10018
10019s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
10020 u8 dev_addr, u8 data)
10021{
10022 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
10023 struct i2c_client *this_client = adapter->i2c_client;
10024 s32 status;
10025 u16 swfw_mask = E1000_SWFW_PHY0_SM;
10026
10027 if (!this_client)
10028 return E1000_ERR_I2C;
10029
10030 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
10031 return E1000_ERR_SWFW_SYNC;
10032 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
10033 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10034
10035 if (status)
10036 return E1000_ERR_I2C;
10037 else
10038 return 0;
10039
10040}
10041
10042int igb_reinit_queues(struct igb_adapter *adapter)
10043{
10044 struct net_device *netdev = adapter->netdev;
10045 struct pci_dev *pdev = adapter->pdev;
10046 int err = 0;
10047
10048 if (netif_running(netdev))
10049 igb_close(netdev);
10050
10051 igb_reset_interrupt_capability(adapter);
10052
10053 if (igb_init_interrupt_scheme(adapter, true)) {
10054 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
10055 return -ENOMEM;
10056 }
10057
10058 if (netif_running(netdev))
10059 err = igb_open(netdev);
10060
10061 return err;
10062}
10063
10064static void igb_nfc_filter_exit(struct igb_adapter *adapter)
10065{
10066 struct igb_nfc_filter *rule;
10067
10068 spin_lock(&adapter->nfc_lock);
10069
10070 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10071 igb_erase_filter(adapter, rule);
10072
10073 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
10074 igb_erase_filter(adapter, rule);
10075
10076 spin_unlock(&adapter->nfc_lock);
10077}
10078
10079static void igb_nfc_filter_restore(struct igb_adapter *adapter)
10080{
10081 struct igb_nfc_filter *rule;
10082
10083 spin_lock(&adapter->nfc_lock);
10084
10085 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10086 igb_add_filter(adapter, rule);
10087
10088 spin_unlock(&adapter->nfc_lock);
10089}
10090
10091