1
2
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/bitops.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/netdevice.h>
13#include <linux/ipv6.h>
14#include <linux/slab.h>
15#include <net/checksum.h>
16#include <net/ip6_checksum.h>
17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
19#include <linux/net_tstamp.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/ip.h>
28#include <linux/tcp.h>
29#include <linux/sctp.h>
30#include <linux/if_ether.h>
31#include <linux/aer.h>
32#include <linux/prefetch.h>
33#include <linux/bpf.h>
34#include <linux/bpf_trace.h>
35#include <linux/pm_runtime.h>
36#include <linux/etherdevice.h>
37#ifdef CONFIG_IGB_DCA
38#include <linux/dca.h>
39#endif
40#include <linux/i2c.h>
41#include "igb.h"
42
43enum queue_mode {
44 QUEUE_MODE_STRICT_PRIORITY,
45 QUEUE_MODE_STREAM_RESERVATION,
46};
47
48enum tx_queue_prio {
49 TX_QUEUE_PRIO_HIGH,
50 TX_QUEUE_PRIO_LOW,
51};
52
53char igb_driver_name[] = "igb";
54static const char igb_driver_string[] =
55 "Intel(R) Gigabit Ethernet Network Driver";
56static const char igb_copyright[] =
57 "Copyright (c) 2007-2014 Intel Corporation.";
58
59static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static const struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
99
100 {0, }
101};
102
103MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
104
105static int igb_setup_all_tx_resources(struct igb_adapter *);
106static int igb_setup_all_rx_resources(struct igb_adapter *);
107static void igb_free_all_tx_resources(struct igb_adapter *);
108static void igb_free_all_rx_resources(struct igb_adapter *);
109static void igb_setup_mrqc(struct igb_adapter *);
110static int igb_probe(struct pci_dev *, const struct pci_device_id *);
111static void igb_remove(struct pci_dev *pdev);
112static int igb_sw_init(struct igb_adapter *);
113int igb_open(struct net_device *);
114int igb_close(struct net_device *);
115static void igb_configure(struct igb_adapter *);
116static void igb_configure_tx(struct igb_adapter *);
117static void igb_configure_rx(struct igb_adapter *);
118static void igb_clean_all_tx_rings(struct igb_adapter *);
119static void igb_clean_all_rx_rings(struct igb_adapter *);
120static void igb_clean_tx_ring(struct igb_ring *);
121static void igb_clean_rx_ring(struct igb_ring *);
122static void igb_set_rx_mode(struct net_device *);
123static void igb_update_phy_info(struct timer_list *);
124static void igb_watchdog(struct timer_list *);
125static void igb_watchdog_task(struct work_struct *);
126static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
127static void igb_get_stats64(struct net_device *dev,
128 struct rtnl_link_stats64 *stats);
129static int igb_change_mtu(struct net_device *, int);
130static int igb_set_mac(struct net_device *, void *);
131static void igb_set_uta(struct igb_adapter *adapter, bool set);
132static irqreturn_t igb_intr(int irq, void *);
133static irqreturn_t igb_intr_msi(int irq, void *);
134static irqreturn_t igb_msix_other(int irq, void *);
135static irqreturn_t igb_msix_ring(int irq, void *);
136#ifdef CONFIG_IGB_DCA
137static void igb_update_dca(struct igb_q_vector *);
138static void igb_setup_dca(struct igb_adapter *);
139#endif
140static int igb_poll(struct napi_struct *, int);
141static bool igb_clean_tx_irq(struct igb_q_vector *, int);
142static int igb_clean_rx_irq(struct igb_q_vector *, int);
143static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
144static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
145static void igb_reset_task(struct work_struct *);
146static void igb_vlan_mode(struct net_device *netdev,
147 netdev_features_t features);
148static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
149static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
150static void igb_restore_vlan(struct igb_adapter *);
151static void igb_rar_set_index(struct igb_adapter *, u32);
152static void igb_ping_all_vfs(struct igb_adapter *);
153static void igb_msg_task(struct igb_adapter *);
154static void igb_vmm_control(struct igb_adapter *);
155static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
156static void igb_flush_mac_table(struct igb_adapter *);
157static int igb_available_rars(struct igb_adapter *, u8);
158static void igb_set_default_mac_filter(struct igb_adapter *);
159static int igb_uc_sync(struct net_device *, const unsigned char *);
160static int igb_uc_unsync(struct net_device *, const unsigned char *);
161static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
162static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
163static int igb_ndo_set_vf_vlan(struct net_device *netdev,
164 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
165static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
166static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
167 bool setting);
168static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
169 bool setting);
170static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
171 struct ifla_vf_info *ivi);
172static void igb_check_vf_rate_limit(struct igb_adapter *);
173static void igb_nfc_filter_exit(struct igb_adapter *adapter);
174static void igb_nfc_filter_restore(struct igb_adapter *adapter);
175
176#ifdef CONFIG_PCI_IOV
177static int igb_vf_configure(struct igb_adapter *adapter, int vf);
178static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
179static int igb_disable_sriov(struct pci_dev *dev);
180static int igb_pci_disable_sriov(struct pci_dev *dev);
181#endif
182
183static int igb_suspend(struct device *);
184static int igb_resume(struct device *);
185static int igb_runtime_suspend(struct device *dev);
186static int igb_runtime_resume(struct device *dev);
187static int igb_runtime_idle(struct device *dev);
188static const struct dev_pm_ops igb_pm_ops = {
189 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
190 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
191 igb_runtime_idle)
192};
193static void igb_shutdown(struct pci_dev *);
194static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
195#ifdef CONFIG_IGB_DCA
196static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
197static struct notifier_block dca_notifier = {
198 .notifier_call = igb_notify_dca,
199 .next = NULL,
200 .priority = 0
201};
202#endif
203#ifdef CONFIG_PCI_IOV
204static unsigned int max_vfs;
205module_param(max_vfs, uint, 0);
206MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
207#endif
208
209static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
210 pci_channel_state_t);
211static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
212static void igb_io_resume(struct pci_dev *);
213
214static const struct pci_error_handlers igb_err_handler = {
215 .error_detected = igb_io_error_detected,
216 .slot_reset = igb_io_slot_reset,
217 .resume = igb_io_resume,
218};
219
220static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
221
222static struct pci_driver igb_driver = {
223 .name = igb_driver_name,
224 .id_table = igb_pci_tbl,
225 .probe = igb_probe,
226 .remove = igb_remove,
227#ifdef CONFIG_PM
228 .driver.pm = &igb_pm_ops,
229#endif
230 .shutdown = igb_shutdown,
231 .sriov_configure = igb_pci_sriov_configure,
232 .err_handler = &igb_err_handler
233};
234
235MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
236MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
237MODULE_LICENSE("GPL v2");
238
239#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
240static int debug = -1;
241module_param(debug, int, 0);
242MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
243
244struct igb_reg_info {
245 u32 ofs;
246 char *name;
247};
248
249static const struct igb_reg_info igb_reg_info_tbl[] = {
250
251
252 {E1000_CTRL, "CTRL"},
253 {E1000_STATUS, "STATUS"},
254 {E1000_CTRL_EXT, "CTRL_EXT"},
255
256
257 {E1000_ICR, "ICR"},
258
259
260 {E1000_RCTL, "RCTL"},
261 {E1000_RDLEN(0), "RDLEN"},
262 {E1000_RDH(0), "RDH"},
263 {E1000_RDT(0), "RDT"},
264 {E1000_RXDCTL(0), "RXDCTL"},
265 {E1000_RDBAL(0), "RDBAL"},
266 {E1000_RDBAH(0), "RDBAH"},
267
268
269 {E1000_TCTL, "TCTL"},
270 {E1000_TDBAL(0), "TDBAL"},
271 {E1000_TDBAH(0), "TDBAH"},
272 {E1000_TDLEN(0), "TDLEN"},
273 {E1000_TDH(0), "TDH"},
274 {E1000_TDT(0), "TDT"},
275 {E1000_TXDCTL(0), "TXDCTL"},
276 {E1000_TDFH, "TDFH"},
277 {E1000_TDFT, "TDFT"},
278 {E1000_TDFHS, "TDFHS"},
279 {E1000_TDFPC, "TDFPC"},
280
281
282 {}
283};
284
285
286static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
287{
288 int n = 0;
289 char rname[16];
290 u32 regs[8];
291
292 switch (reginfo->ofs) {
293 case E1000_RDLEN(0):
294 for (n = 0; n < 4; n++)
295 regs[n] = rd32(E1000_RDLEN(n));
296 break;
297 case E1000_RDH(0):
298 for (n = 0; n < 4; n++)
299 regs[n] = rd32(E1000_RDH(n));
300 break;
301 case E1000_RDT(0):
302 for (n = 0; n < 4; n++)
303 regs[n] = rd32(E1000_RDT(n));
304 break;
305 case E1000_RXDCTL(0):
306 for (n = 0; n < 4; n++)
307 regs[n] = rd32(E1000_RXDCTL(n));
308 break;
309 case E1000_RDBAL(0):
310 for (n = 0; n < 4; n++)
311 regs[n] = rd32(E1000_RDBAL(n));
312 break;
313 case E1000_RDBAH(0):
314 for (n = 0; n < 4; n++)
315 regs[n] = rd32(E1000_RDBAH(n));
316 break;
317 case E1000_TDBAL(0):
318 for (n = 0; n < 4; n++)
319 regs[n] = rd32(E1000_TDBAL(n));
320 break;
321 case E1000_TDBAH(0):
322 for (n = 0; n < 4; n++)
323 regs[n] = rd32(E1000_TDBAH(n));
324 break;
325 case E1000_TDLEN(0):
326 for (n = 0; n < 4; n++)
327 regs[n] = rd32(E1000_TDLEN(n));
328 break;
329 case E1000_TDH(0):
330 for (n = 0; n < 4; n++)
331 regs[n] = rd32(E1000_TDH(n));
332 break;
333 case E1000_TDT(0):
334 for (n = 0; n < 4; n++)
335 regs[n] = rd32(E1000_TDT(n));
336 break;
337 case E1000_TXDCTL(0):
338 for (n = 0; n < 4; n++)
339 regs[n] = rd32(E1000_TXDCTL(n));
340 break;
341 default:
342 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
343 return;
344 }
345
346 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
347 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
348 regs[2], regs[3]);
349}
350
351
352static void igb_dump(struct igb_adapter *adapter)
353{
354 struct net_device *netdev = adapter->netdev;
355 struct e1000_hw *hw = &adapter->hw;
356 struct igb_reg_info *reginfo;
357 struct igb_ring *tx_ring;
358 union e1000_adv_tx_desc *tx_desc;
359 struct my_u0 { u64 a; u64 b; } *u0;
360 struct igb_ring *rx_ring;
361 union e1000_adv_rx_desc *rx_desc;
362 u32 staterr;
363 u16 i, n;
364
365 if (!netif_msg_hw(adapter))
366 return;
367
368
369 if (netdev) {
370 dev_info(&adapter->pdev->dev, "Net device Info\n");
371 pr_info("Device Name state trans_start\n");
372 pr_info("%-15s %016lX %016lX\n", netdev->name,
373 netdev->state, dev_trans_start(netdev));
374 }
375
376
377 dev_info(&adapter->pdev->dev, "Register Dump\n");
378 pr_info(" Register Name Value\n");
379 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
380 reginfo->name; reginfo++) {
381 igb_regdump(hw, reginfo);
382 }
383
384
385 if (!netdev || !netif_running(netdev))
386 goto exit;
387
388 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
389 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
390 for (n = 0; n < adapter->num_tx_queues; n++) {
391 struct igb_tx_buffer *buffer_info;
392 tx_ring = adapter->tx_ring[n];
393 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
394 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
395 n, tx_ring->next_to_use, tx_ring->next_to_clean,
396 (u64)dma_unmap_addr(buffer_info, dma),
397 dma_unmap_len(buffer_info, len),
398 buffer_info->next_to_watch,
399 (u64)buffer_info->time_stamp);
400 }
401
402
403 if (!netif_msg_tx_done(adapter))
404 goto rx_ring_summary;
405
406 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
407
408
409
410
411
412
413
414
415
416
417
418
419 for (n = 0; n < adapter->num_tx_queues; n++) {
420 tx_ring = adapter->tx_ring[n];
421 pr_info("------------------------------------\n");
422 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
423 pr_info("------------------------------------\n");
424 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
425
426 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
427 const char *next_desc;
428 struct igb_tx_buffer *buffer_info;
429 tx_desc = IGB_TX_DESC(tx_ring, i);
430 buffer_info = &tx_ring->tx_buffer_info[i];
431 u0 = (struct my_u0 *)tx_desc;
432 if (i == tx_ring->next_to_use &&
433 i == tx_ring->next_to_clean)
434 next_desc = " NTC/U";
435 else if (i == tx_ring->next_to_use)
436 next_desc = " NTU";
437 else if (i == tx_ring->next_to_clean)
438 next_desc = " NTC";
439 else
440 next_desc = "";
441
442 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
443 i, le64_to_cpu(u0->a),
444 le64_to_cpu(u0->b),
445 (u64)dma_unmap_addr(buffer_info, dma),
446 dma_unmap_len(buffer_info, len),
447 buffer_info->next_to_watch,
448 (u64)buffer_info->time_stamp,
449 buffer_info->skb, next_desc);
450
451 if (netif_msg_pktdata(adapter) && buffer_info->skb)
452 print_hex_dump(KERN_INFO, "",
453 DUMP_PREFIX_ADDRESS,
454 16, 1, buffer_info->skb->data,
455 dma_unmap_len(buffer_info, len),
456 true);
457 }
458 }
459
460
461rx_ring_summary:
462 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
463 pr_info("Queue [NTU] [NTC]\n");
464 for (n = 0; n < adapter->num_rx_queues; n++) {
465 rx_ring = adapter->rx_ring[n];
466 pr_info(" %5d %5X %5X\n",
467 n, rx_ring->next_to_use, rx_ring->next_to_clean);
468 }
469
470
471 if (!netif_msg_rx_status(adapter))
472 goto exit;
473
474 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497 for (n = 0; n < adapter->num_rx_queues; n++) {
498 rx_ring = adapter->rx_ring[n];
499 pr_info("------------------------------------\n");
500 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
501 pr_info("------------------------------------\n");
502 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
503 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
504
505 for (i = 0; i < rx_ring->count; i++) {
506 const char *next_desc;
507 struct igb_rx_buffer *buffer_info;
508 buffer_info = &rx_ring->rx_buffer_info[i];
509 rx_desc = IGB_RX_DESC(rx_ring, i);
510 u0 = (struct my_u0 *)rx_desc;
511 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
512
513 if (i == rx_ring->next_to_use)
514 next_desc = " NTU";
515 else if (i == rx_ring->next_to_clean)
516 next_desc = " NTC";
517 else
518 next_desc = "";
519
520 if (staterr & E1000_RXD_STAT_DD) {
521
522 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
523 "RWB", i,
524 le64_to_cpu(u0->a),
525 le64_to_cpu(u0->b),
526 next_desc);
527 } else {
528 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
529 "R ", i,
530 le64_to_cpu(u0->a),
531 le64_to_cpu(u0->b),
532 (u64)buffer_info->dma,
533 next_desc);
534
535 if (netif_msg_pktdata(adapter) &&
536 buffer_info->dma && buffer_info->page) {
537 print_hex_dump(KERN_INFO, "",
538 DUMP_PREFIX_ADDRESS,
539 16, 1,
540 page_address(buffer_info->page) +
541 buffer_info->page_offset,
542 igb_rx_bufsz(rx_ring), true);
543 }
544 }
545 }
546 }
547
548exit:
549 return;
550}
551
552
553
554
555
556
557
558static int igb_get_i2c_data(void *data)
559{
560 struct igb_adapter *adapter = (struct igb_adapter *)data;
561 struct e1000_hw *hw = &adapter->hw;
562 s32 i2cctl = rd32(E1000_I2CPARAMS);
563
564 return !!(i2cctl & E1000_I2C_DATA_IN);
565}
566
567
568
569
570
571
572
573
574static void igb_set_i2c_data(void *data, int state)
575{
576 struct igb_adapter *adapter = (struct igb_adapter *)data;
577 struct e1000_hw *hw = &adapter->hw;
578 s32 i2cctl = rd32(E1000_I2CPARAMS);
579
580 if (state)
581 i2cctl |= E1000_I2C_DATA_OUT;
582 else
583 i2cctl &= ~E1000_I2C_DATA_OUT;
584
585 i2cctl &= ~E1000_I2C_DATA_OE_N;
586 i2cctl |= E1000_I2C_CLK_OE_N;
587 wr32(E1000_I2CPARAMS, i2cctl);
588 wrfl();
589
590}
591
592
593
594
595
596
597
598
599static void igb_set_i2c_clk(void *data, int state)
600{
601 struct igb_adapter *adapter = (struct igb_adapter *)data;
602 struct e1000_hw *hw = &adapter->hw;
603 s32 i2cctl = rd32(E1000_I2CPARAMS);
604
605 if (state) {
606 i2cctl |= E1000_I2C_CLK_OUT;
607 i2cctl &= ~E1000_I2C_CLK_OE_N;
608 } else {
609 i2cctl &= ~E1000_I2C_CLK_OUT;
610 i2cctl &= ~E1000_I2C_CLK_OE_N;
611 }
612 wr32(E1000_I2CPARAMS, i2cctl);
613 wrfl();
614}
615
616
617
618
619
620
621
622static int igb_get_i2c_clk(void *data)
623{
624 struct igb_adapter *adapter = (struct igb_adapter *)data;
625 struct e1000_hw *hw = &adapter->hw;
626 s32 i2cctl = rd32(E1000_I2CPARAMS);
627
628 return !!(i2cctl & E1000_I2C_CLK_IN);
629}
630
631static const struct i2c_algo_bit_data igb_i2c_algo = {
632 .setsda = igb_set_i2c_data,
633 .setscl = igb_set_i2c_clk,
634 .getsda = igb_get_i2c_data,
635 .getscl = igb_get_i2c_clk,
636 .udelay = 5,
637 .timeout = 20,
638};
639
640
641
642
643
644
645
646struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
647{
648 struct igb_adapter *adapter = hw->back;
649 return adapter->netdev;
650}
651
652
653
654
655
656
657
658static int __init igb_init_module(void)
659{
660 int ret;
661
662 pr_info("%s\n", igb_driver_string);
663 pr_info("%s\n", igb_copyright);
664
665#ifdef CONFIG_IGB_DCA
666 dca_register_notify(&dca_notifier);
667#endif
668 ret = pci_register_driver(&igb_driver);
669 return ret;
670}
671
672module_init(igb_init_module);
673
674
675
676
677
678
679
680static void __exit igb_exit_module(void)
681{
682#ifdef CONFIG_IGB_DCA
683 dca_unregister_notify(&dca_notifier);
684#endif
685 pci_unregister_driver(&igb_driver);
686}
687
688module_exit(igb_exit_module);
689
690#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
691
692
693
694
695
696
697
698static void igb_cache_ring_register(struct igb_adapter *adapter)
699{
700 int i = 0, j = 0;
701 u32 rbase_offset = adapter->vfs_allocated_count;
702
703 switch (adapter->hw.mac.type) {
704 case e1000_82576:
705
706
707
708
709
710 if (adapter->vfs_allocated_count) {
711 for (; i < adapter->rss_queues; i++)
712 adapter->rx_ring[i]->reg_idx = rbase_offset +
713 Q_IDX_82576(i);
714 }
715 fallthrough;
716 case e1000_82575:
717 case e1000_82580:
718 case e1000_i350:
719 case e1000_i354:
720 case e1000_i210:
721 case e1000_i211:
722 default:
723 for (; i < adapter->num_rx_queues; i++)
724 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
725 for (; j < adapter->num_tx_queues; j++)
726 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
727 break;
728 }
729}
730
731u32 igb_rd32(struct e1000_hw *hw, u32 reg)
732{
733 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
734 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
735 u32 value = 0;
736
737 if (E1000_REMOVED(hw_addr))
738 return ~value;
739
740 value = readl(&hw_addr[reg]);
741
742
743 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
744 struct net_device *netdev = igb->netdev;
745 hw->hw_addr = NULL;
746 netdev_err(netdev, "PCIe link lost\n");
747 WARN(pci_device_is_present(igb->pdev),
748 "igb: Failed to read reg 0x%x!\n", reg);
749 }
750
751 return value;
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
767 int index, int offset)
768{
769 u32 ivar = array_rd32(E1000_IVAR0, index);
770
771
772 ivar &= ~((u32)0xFF << offset);
773
774
775 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
776
777 array_wr32(E1000_IVAR0, index, ivar);
778}
779
780#define IGB_N0_QUEUE -1
781static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
782{
783 struct igb_adapter *adapter = q_vector->adapter;
784 struct e1000_hw *hw = &adapter->hw;
785 int rx_queue = IGB_N0_QUEUE;
786 int tx_queue = IGB_N0_QUEUE;
787 u32 msixbm = 0;
788
789 if (q_vector->rx.ring)
790 rx_queue = q_vector->rx.ring->reg_idx;
791 if (q_vector->tx.ring)
792 tx_queue = q_vector->tx.ring->reg_idx;
793
794 switch (hw->mac.type) {
795 case e1000_82575:
796
797
798
799
800
801 if (rx_queue > IGB_N0_QUEUE)
802 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
803 if (tx_queue > IGB_N0_QUEUE)
804 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
805 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
806 msixbm |= E1000_EIMS_OTHER;
807 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
808 q_vector->eims_value = msixbm;
809 break;
810 case e1000_82576:
811
812
813
814
815
816 if (rx_queue > IGB_N0_QUEUE)
817 igb_write_ivar(hw, msix_vector,
818 rx_queue & 0x7,
819 (rx_queue & 0x8) << 1);
820 if (tx_queue > IGB_N0_QUEUE)
821 igb_write_ivar(hw, msix_vector,
822 tx_queue & 0x7,
823 ((tx_queue & 0x8) << 1) + 8);
824 q_vector->eims_value = BIT(msix_vector);
825 break;
826 case e1000_82580:
827 case e1000_i350:
828 case e1000_i354:
829 case e1000_i210:
830 case e1000_i211:
831
832
833
834
835
836
837 if (rx_queue > IGB_N0_QUEUE)
838 igb_write_ivar(hw, msix_vector,
839 rx_queue >> 1,
840 (rx_queue & 0x1) << 4);
841 if (tx_queue > IGB_N0_QUEUE)
842 igb_write_ivar(hw, msix_vector,
843 tx_queue >> 1,
844 ((tx_queue & 0x1) << 4) + 8);
845 q_vector->eims_value = BIT(msix_vector);
846 break;
847 default:
848 BUG();
849 break;
850 }
851
852
853 adapter->eims_enable_mask |= q_vector->eims_value;
854
855
856 q_vector->set_itr = 1;
857}
858
859
860
861
862
863
864
865
866static void igb_configure_msix(struct igb_adapter *adapter)
867{
868 u32 tmp;
869 int i, vector = 0;
870 struct e1000_hw *hw = &adapter->hw;
871
872 adapter->eims_enable_mask = 0;
873
874
875 switch (hw->mac.type) {
876 case e1000_82575:
877 tmp = rd32(E1000_CTRL_EXT);
878
879 tmp |= E1000_CTRL_EXT_PBA_CLR;
880
881
882 tmp |= E1000_CTRL_EXT_EIAME;
883 tmp |= E1000_CTRL_EXT_IRCA;
884
885 wr32(E1000_CTRL_EXT, tmp);
886
887
888 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
889 adapter->eims_other = E1000_EIMS_OTHER;
890
891 break;
892
893 case e1000_82576:
894 case e1000_82580:
895 case e1000_i350:
896 case e1000_i354:
897 case e1000_i210:
898 case e1000_i211:
899
900
901
902 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
903 E1000_GPIE_PBA | E1000_GPIE_EIAME |
904 E1000_GPIE_NSICR);
905
906
907 adapter->eims_other = BIT(vector);
908 tmp = (vector++ | E1000_IVAR_VALID) << 8;
909
910 wr32(E1000_IVAR_MISC, tmp);
911 break;
912 default:
913
914 break;
915 }
916
917 adapter->eims_enable_mask |= adapter->eims_other;
918
919 for (i = 0; i < adapter->num_q_vectors; i++)
920 igb_assign_vector(adapter->q_vector[i], vector++);
921
922 wrfl();
923}
924
925
926
927
928
929
930
931
932static int igb_request_msix(struct igb_adapter *adapter)
933{
934 struct net_device *netdev = adapter->netdev;
935 int i, err = 0, vector = 0, free_vector = 0;
936
937 err = request_irq(adapter->msix_entries[vector].vector,
938 igb_msix_other, 0, netdev->name, adapter);
939 if (err)
940 goto err_out;
941
942 for (i = 0; i < adapter->num_q_vectors; i++) {
943 struct igb_q_vector *q_vector = adapter->q_vector[i];
944
945 vector++;
946
947 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
948
949 if (q_vector->rx.ring && q_vector->tx.ring)
950 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
951 q_vector->rx.ring->queue_index);
952 else if (q_vector->tx.ring)
953 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
954 q_vector->tx.ring->queue_index);
955 else if (q_vector->rx.ring)
956 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
957 q_vector->rx.ring->queue_index);
958 else
959 sprintf(q_vector->name, "%s-unused", netdev->name);
960
961 err = request_irq(adapter->msix_entries[vector].vector,
962 igb_msix_ring, 0, q_vector->name,
963 q_vector);
964 if (err)
965 goto err_free;
966 }
967
968 igb_configure_msix(adapter);
969 return 0;
970
971err_free:
972
973 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
974
975 vector--;
976 for (i = 0; i < vector; i++) {
977 free_irq(adapter->msix_entries[free_vector++].vector,
978 adapter->q_vector[i]);
979 }
980err_out:
981 return err;
982}
983
984
985
986
987
988
989
990
991static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
992{
993 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
994
995 adapter->q_vector[v_idx] = NULL;
996
997
998
999
1000 if (q_vector)
1001 kfree_rcu(q_vector, rcu);
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1013{
1014 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1015
1016
1017
1018
1019 if (!q_vector)
1020 return;
1021
1022 if (q_vector->tx.ring)
1023 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1024
1025 if (q_vector->rx.ring)
1026 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1027
1028 netif_napi_del(&q_vector->napi);
1029
1030}
1031
1032static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1033{
1034 int v_idx = adapter->num_q_vectors;
1035
1036 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1037 pci_disable_msix(adapter->pdev);
1038 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1039 pci_disable_msi(adapter->pdev);
1040
1041 while (v_idx--)
1042 igb_reset_q_vector(adapter, v_idx);
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053static void igb_free_q_vectors(struct igb_adapter *adapter)
1054{
1055 int v_idx = adapter->num_q_vectors;
1056
1057 adapter->num_tx_queues = 0;
1058 adapter->num_rx_queues = 0;
1059 adapter->num_q_vectors = 0;
1060
1061 while (v_idx--) {
1062 igb_reset_q_vector(adapter, v_idx);
1063 igb_free_q_vector(adapter, v_idx);
1064 }
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1075{
1076 igb_free_q_vectors(adapter);
1077 igb_reset_interrupt_capability(adapter);
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1089{
1090 int err;
1091 int numvecs, i;
1092
1093 if (!msix)
1094 goto msi_only;
1095 adapter->flags |= IGB_FLAG_HAS_MSIX;
1096
1097
1098 adapter->num_rx_queues = adapter->rss_queues;
1099 if (adapter->vfs_allocated_count)
1100 adapter->num_tx_queues = 1;
1101 else
1102 adapter->num_tx_queues = adapter->rss_queues;
1103
1104
1105 numvecs = adapter->num_rx_queues;
1106
1107
1108 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1109 numvecs += adapter->num_tx_queues;
1110
1111
1112 adapter->num_q_vectors = numvecs;
1113
1114
1115 numvecs++;
1116 for (i = 0; i < numvecs; i++)
1117 adapter->msix_entries[i].entry = i;
1118
1119 err = pci_enable_msix_range(adapter->pdev,
1120 adapter->msix_entries,
1121 numvecs,
1122 numvecs);
1123 if (err > 0)
1124 return;
1125
1126 igb_reset_interrupt_capability(adapter);
1127
1128
1129msi_only:
1130 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1131#ifdef CONFIG_PCI_IOV
1132
1133 if (adapter->vf_data) {
1134 struct e1000_hw *hw = &adapter->hw;
1135
1136 pci_disable_sriov(adapter->pdev);
1137 msleep(500);
1138
1139 kfree(adapter->vf_mac_list);
1140 adapter->vf_mac_list = NULL;
1141 kfree(adapter->vf_data);
1142 adapter->vf_data = NULL;
1143 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1144 wrfl();
1145 msleep(100);
1146 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1147 }
1148#endif
1149 adapter->vfs_allocated_count = 0;
1150 adapter->rss_queues = 1;
1151 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1152 adapter->num_rx_queues = 1;
1153 adapter->num_tx_queues = 1;
1154 adapter->num_q_vectors = 1;
1155 if (!pci_enable_msi(adapter->pdev))
1156 adapter->flags |= IGB_FLAG_HAS_MSI;
1157}
1158
1159static void igb_add_ring(struct igb_ring *ring,
1160 struct igb_ring_container *head)
1161{
1162 head->ring = ring;
1163 head->count++;
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178static int igb_alloc_q_vector(struct igb_adapter *adapter,
1179 int v_count, int v_idx,
1180 int txr_count, int txr_idx,
1181 int rxr_count, int rxr_idx)
1182{
1183 struct igb_q_vector *q_vector;
1184 struct igb_ring *ring;
1185 int ring_count;
1186 size_t size;
1187
1188
1189 if (txr_count > 1 || rxr_count > 1)
1190 return -ENOMEM;
1191
1192 ring_count = txr_count + rxr_count;
1193 size = struct_size(q_vector, ring, ring_count);
1194
1195
1196 q_vector = adapter->q_vector[v_idx];
1197 if (!q_vector) {
1198 q_vector = kzalloc(size, GFP_KERNEL);
1199 } else if (size > ksize(q_vector)) {
1200 kfree_rcu(q_vector, rcu);
1201 q_vector = kzalloc(size, GFP_KERNEL);
1202 } else {
1203 memset(q_vector, 0, size);
1204 }
1205 if (!q_vector)
1206 return -ENOMEM;
1207
1208
1209 netif_napi_add(adapter->netdev, &q_vector->napi,
1210 igb_poll, 64);
1211
1212
1213 adapter->q_vector[v_idx] = q_vector;
1214 q_vector->adapter = adapter;
1215
1216
1217 q_vector->tx.work_limit = adapter->tx_work_limit;
1218
1219
1220 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1221 q_vector->itr_val = IGB_START_ITR;
1222
1223
1224 ring = q_vector->ring;
1225
1226
1227 if (rxr_count) {
1228
1229 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1230 q_vector->itr_val = adapter->rx_itr_setting;
1231 } else {
1232
1233 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1234 q_vector->itr_val = adapter->tx_itr_setting;
1235 }
1236
1237 if (txr_count) {
1238
1239 ring->dev = &adapter->pdev->dev;
1240 ring->netdev = adapter->netdev;
1241
1242
1243 ring->q_vector = q_vector;
1244
1245
1246 igb_add_ring(ring, &q_vector->tx);
1247
1248
1249 if (adapter->hw.mac.type == e1000_82575)
1250 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1251
1252
1253 ring->count = adapter->tx_ring_count;
1254 ring->queue_index = txr_idx;
1255
1256 ring->cbs_enable = false;
1257 ring->idleslope = 0;
1258 ring->sendslope = 0;
1259 ring->hicredit = 0;
1260 ring->locredit = 0;
1261
1262 u64_stats_init(&ring->tx_syncp);
1263 u64_stats_init(&ring->tx_syncp2);
1264
1265
1266 adapter->tx_ring[txr_idx] = ring;
1267
1268
1269 ring++;
1270 }
1271
1272 if (rxr_count) {
1273
1274 ring->dev = &adapter->pdev->dev;
1275 ring->netdev = adapter->netdev;
1276
1277
1278 ring->q_vector = q_vector;
1279
1280
1281 igb_add_ring(ring, &q_vector->rx);
1282
1283
1284 if (adapter->hw.mac.type >= e1000_82576)
1285 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1286
1287
1288
1289
1290 if (adapter->hw.mac.type >= e1000_i350)
1291 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1292
1293
1294 ring->count = adapter->rx_ring_count;
1295 ring->queue_index = rxr_idx;
1296
1297 u64_stats_init(&ring->rx_syncp);
1298
1299
1300 adapter->rx_ring[rxr_idx] = ring;
1301 }
1302
1303 return 0;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1315{
1316 int q_vectors = adapter->num_q_vectors;
1317 int rxr_remaining = adapter->num_rx_queues;
1318 int txr_remaining = adapter->num_tx_queues;
1319 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1320 int err;
1321
1322 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1323 for (; rxr_remaining; v_idx++) {
1324 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1325 0, 0, 1, rxr_idx);
1326
1327 if (err)
1328 goto err_out;
1329
1330
1331 rxr_remaining--;
1332 rxr_idx++;
1333 }
1334 }
1335
1336 for (; v_idx < q_vectors; v_idx++) {
1337 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1338 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1339
1340 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1341 tqpv, txr_idx, rqpv, rxr_idx);
1342
1343 if (err)
1344 goto err_out;
1345
1346
1347 rxr_remaining -= rqpv;
1348 txr_remaining -= tqpv;
1349 rxr_idx++;
1350 txr_idx++;
1351 }
1352
1353 return 0;
1354
1355err_out:
1356 adapter->num_tx_queues = 0;
1357 adapter->num_rx_queues = 0;
1358 adapter->num_q_vectors = 0;
1359
1360 while (v_idx--)
1361 igb_free_q_vector(adapter, v_idx);
1362
1363 return -ENOMEM;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1374{
1375 struct pci_dev *pdev = adapter->pdev;
1376 int err;
1377
1378 igb_set_interrupt_capability(adapter, msix);
1379
1380 err = igb_alloc_q_vectors(adapter);
1381 if (err) {
1382 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1383 goto err_alloc_q_vectors;
1384 }
1385
1386 igb_cache_ring_register(adapter);
1387
1388 return 0;
1389
1390err_alloc_q_vectors:
1391 igb_reset_interrupt_capability(adapter);
1392 return err;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402static int igb_request_irq(struct igb_adapter *adapter)
1403{
1404 struct net_device *netdev = adapter->netdev;
1405 struct pci_dev *pdev = adapter->pdev;
1406 int err = 0;
1407
1408 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1409 err = igb_request_msix(adapter);
1410 if (!err)
1411 goto request_done;
1412
1413 igb_free_all_tx_resources(adapter);
1414 igb_free_all_rx_resources(adapter);
1415
1416 igb_clear_interrupt_scheme(adapter);
1417 err = igb_init_interrupt_scheme(adapter, false);
1418 if (err)
1419 goto request_done;
1420
1421 igb_setup_all_tx_resources(adapter);
1422 igb_setup_all_rx_resources(adapter);
1423 igb_configure(adapter);
1424 }
1425
1426 igb_assign_vector(adapter->q_vector[0], 0);
1427
1428 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1429 err = request_irq(pdev->irq, igb_intr_msi, 0,
1430 netdev->name, adapter);
1431 if (!err)
1432 goto request_done;
1433
1434
1435 igb_reset_interrupt_capability(adapter);
1436 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1437 }
1438
1439 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1440 netdev->name, adapter);
1441
1442 if (err)
1443 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1444 err);
1445
1446request_done:
1447 return err;
1448}
1449
1450static void igb_free_irq(struct igb_adapter *adapter)
1451{
1452 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1453 int vector = 0, i;
1454
1455 free_irq(adapter->msix_entries[vector++].vector, adapter);
1456
1457 for (i = 0; i < adapter->num_q_vectors; i++)
1458 free_irq(adapter->msix_entries[vector++].vector,
1459 adapter->q_vector[i]);
1460 } else {
1461 free_irq(adapter->pdev->irq, adapter);
1462 }
1463}
1464
1465
1466
1467
1468
1469static void igb_irq_disable(struct igb_adapter *adapter)
1470{
1471 struct e1000_hw *hw = &adapter->hw;
1472
1473
1474
1475
1476
1477 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1478 u32 regval = rd32(E1000_EIAM);
1479
1480 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1481 wr32(E1000_EIMC, adapter->eims_enable_mask);
1482 regval = rd32(E1000_EIAC);
1483 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1484 }
1485
1486 wr32(E1000_IAM, 0);
1487 wr32(E1000_IMC, ~0);
1488 wrfl();
1489 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1490 int i;
1491
1492 for (i = 0; i < adapter->num_q_vectors; i++)
1493 synchronize_irq(adapter->msix_entries[i].vector);
1494 } else {
1495 synchronize_irq(adapter->pdev->irq);
1496 }
1497}
1498
1499
1500
1501
1502
1503static void igb_irq_enable(struct igb_adapter *adapter)
1504{
1505 struct e1000_hw *hw = &adapter->hw;
1506
1507 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1508 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1509 u32 regval = rd32(E1000_EIAC);
1510
1511 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1512 regval = rd32(E1000_EIAM);
1513 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1514 wr32(E1000_EIMS, adapter->eims_enable_mask);
1515 if (adapter->vfs_allocated_count) {
1516 wr32(E1000_MBVFIMR, 0xFF);
1517 ims |= E1000_IMS_VMMB;
1518 }
1519 wr32(E1000_IMS, ims);
1520 } else {
1521 wr32(E1000_IMS, IMS_ENABLE_MASK |
1522 E1000_IMS_DRSTA);
1523 wr32(E1000_IAM, IMS_ENABLE_MASK |
1524 E1000_IMS_DRSTA);
1525 }
1526}
1527
1528static void igb_update_mng_vlan(struct igb_adapter *adapter)
1529{
1530 struct e1000_hw *hw = &adapter->hw;
1531 u16 pf_id = adapter->vfs_allocated_count;
1532 u16 vid = adapter->hw.mng_cookie.vlan_id;
1533 u16 old_vid = adapter->mng_vlan_id;
1534
1535 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1536
1537 igb_vfta_set(hw, vid, pf_id, true, true);
1538 adapter->mng_vlan_id = vid;
1539 } else {
1540 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1541 }
1542
1543 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1544 (vid != old_vid) &&
1545 !test_bit(old_vid, adapter->active_vlans)) {
1546
1547 igb_vfta_set(hw, vid, pf_id, false, true);
1548 }
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static void igb_release_hw_control(struct igb_adapter *adapter)
1560{
1561 struct e1000_hw *hw = &adapter->hw;
1562 u32 ctrl_ext;
1563
1564
1565 ctrl_ext = rd32(E1000_CTRL_EXT);
1566 wr32(E1000_CTRL_EXT,
1567 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static void igb_get_hw_control(struct igb_adapter *adapter)
1579{
1580 struct e1000_hw *hw = &adapter->hw;
1581 u32 ctrl_ext;
1582
1583
1584 ctrl_ext = rd32(E1000_CTRL_EXT);
1585 wr32(E1000_CTRL_EXT,
1586 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1587}
1588
1589static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1590{
1591 struct net_device *netdev = adapter->netdev;
1592 struct e1000_hw *hw = &adapter->hw;
1593
1594 WARN_ON(hw->mac.type != e1000_i210);
1595
1596 if (enable)
1597 adapter->flags |= IGB_FLAG_FQTSS;
1598 else
1599 adapter->flags &= ~IGB_FLAG_FQTSS;
1600
1601 if (netif_running(netdev))
1602 schedule_work(&adapter->reset_task);
1603}
1604
1605static bool is_fqtss_enabled(struct igb_adapter *adapter)
1606{
1607 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1608}
1609
1610static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1611 enum tx_queue_prio prio)
1612{
1613 u32 val;
1614
1615 WARN_ON(hw->mac.type != e1000_i210);
1616 WARN_ON(queue < 0 || queue > 4);
1617
1618 val = rd32(E1000_I210_TXDCTL(queue));
1619
1620 if (prio == TX_QUEUE_PRIO_HIGH)
1621 val |= E1000_TXDCTL_PRIORITY;
1622 else
1623 val &= ~E1000_TXDCTL_PRIORITY;
1624
1625 wr32(E1000_I210_TXDCTL(queue), val);
1626}
1627
1628static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1629{
1630 u32 val;
1631
1632 WARN_ON(hw->mac.type != e1000_i210);
1633 WARN_ON(queue < 0 || queue > 1);
1634
1635 val = rd32(E1000_I210_TQAVCC(queue));
1636
1637 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1638 val |= E1000_TQAVCC_QUEUEMODE;
1639 else
1640 val &= ~E1000_TQAVCC_QUEUEMODE;
1641
1642 wr32(E1000_I210_TQAVCC(queue), val);
1643}
1644
1645static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1646{
1647 int i;
1648
1649 for (i = 0; i < adapter->num_tx_queues; i++) {
1650 if (adapter->tx_ring[i]->cbs_enable)
1651 return true;
1652 }
1653
1654 return false;
1655}
1656
1657static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1658{
1659 int i;
1660
1661 for (i = 0; i < adapter->num_tx_queues; i++) {
1662 if (adapter->tx_ring[i]->launchtime_enable)
1663 return true;
1664 }
1665
1666 return false;
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1680{
1681 struct igb_ring *ring = adapter->tx_ring[queue];
1682 struct net_device *netdev = adapter->netdev;
1683 struct e1000_hw *hw = &adapter->hw;
1684 u32 tqavcc, tqavctrl;
1685 u16 value;
1686
1687 WARN_ON(hw->mac.type != e1000_i210);
1688 WARN_ON(queue < 0 || queue > 1);
1689
1690
1691
1692
1693
1694 if (ring->cbs_enable || ring->launchtime_enable) {
1695 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1696 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1697 } else {
1698 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1699 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1700 }
1701
1702
1703 if (ring->cbs_enable || queue == 0) {
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 if (queue == 0 && !ring->cbs_enable) {
1714
1715 ring->idleslope = 1000000;
1716 ring->hicredit = ETH_FRAME_LEN;
1717 }
1718
1719
1720
1721
1722
1723 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1724 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1725 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1785
1786 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1787 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1788 tqavcc |= value;
1789 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1790
1791 wr32(E1000_I210_TQAVHC(queue),
1792 0x80000000 + ring->hicredit * 0x7735);
1793 } else {
1794
1795
1796 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1797 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1798 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1799
1800
1801 wr32(E1000_I210_TQAVHC(queue), 0);
1802
1803
1804
1805
1806
1807 if (!is_any_cbs_enabled(adapter)) {
1808 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1809 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1810 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1811 }
1812 }
1813
1814
1815 if (ring->launchtime_enable) {
1816
1817
1818
1819
1820
1821
1822
1823
1824 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1825 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1826 E1000_TQAVCTRL_FETCHTIME_DELTA;
1827 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1828 } else {
1829
1830
1831
1832
1833 if (!is_any_txtime_enabled(adapter)) {
1834 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1835 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1836 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1837 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1838 }
1839 }
1840
1841
1842
1843
1844
1845
1846 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1847 ring->cbs_enable ? "enabled" : "disabled",
1848 ring->launchtime_enable ? "enabled" : "disabled",
1849 queue,
1850 ring->idleslope, ring->sendslope,
1851 ring->hicredit, ring->locredit);
1852}
1853
1854static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1855 bool enable)
1856{
1857 struct igb_ring *ring;
1858
1859 if (queue < 0 || queue > adapter->num_tx_queues)
1860 return -EINVAL;
1861
1862 ring = adapter->tx_ring[queue];
1863 ring->launchtime_enable = enable;
1864
1865 return 0;
1866}
1867
1868static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1869 bool enable, int idleslope, int sendslope,
1870 int hicredit, int locredit)
1871{
1872 struct igb_ring *ring;
1873
1874 if (queue < 0 || queue > adapter->num_tx_queues)
1875 return -EINVAL;
1876
1877 ring = adapter->tx_ring[queue];
1878
1879 ring->cbs_enable = enable;
1880 ring->idleslope = idleslope;
1881 ring->sendslope = sendslope;
1882 ring->hicredit = hicredit;
1883 ring->locredit = locredit;
1884
1885 return 0;
1886}
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static void igb_setup_tx_mode(struct igb_adapter *adapter)
1898{
1899 struct net_device *netdev = adapter->netdev;
1900 struct e1000_hw *hw = &adapter->hw;
1901 u32 val;
1902
1903
1904 if (hw->mac.type != e1000_i210)
1905 return;
1906
1907 if (is_fqtss_enabled(adapter)) {
1908 int i, max_queue;
1909
1910
1911
1912
1913
1914 val = rd32(E1000_I210_TQAVCTRL);
1915 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1916 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1917 wr32(E1000_I210_TQAVCTRL, val);
1918
1919
1920
1921
1922 val = rd32(E1000_TXPBS);
1923 val &= ~I210_TXPBSIZE_MASK;
1924 val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB |
1925 I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB;
1926 wr32(E1000_TXPBS, val);
1927
1928 val = rd32(E1000_RXPBS);
1929 val &= ~I210_RXPBSIZE_MASK;
1930 val |= I210_RXPBSIZE_PB_30KB;
1931 wr32(E1000_RXPBS, val);
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944 val = (4096 - 1) / 64;
1945 wr32(E1000_I210_DTXMXPKTSZ, val);
1946
1947
1948
1949
1950
1951
1952 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1953 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1954
1955 for (i = 0; i < max_queue; i++) {
1956 igb_config_tx_modes(adapter, i);
1957 }
1958 } else {
1959 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1960 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1961 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1962
1963 val = rd32(E1000_I210_TQAVCTRL);
1964
1965
1966
1967
1968 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1969 wr32(E1000_I210_TQAVCTRL, val);
1970 }
1971
1972 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1973 "enabled" : "disabled");
1974}
1975
1976
1977
1978
1979
1980static void igb_configure(struct igb_adapter *adapter)
1981{
1982 struct net_device *netdev = adapter->netdev;
1983 int i;
1984
1985 igb_get_hw_control(adapter);
1986 igb_set_rx_mode(netdev);
1987 igb_setup_tx_mode(adapter);
1988
1989 igb_restore_vlan(adapter);
1990
1991 igb_setup_tctl(adapter);
1992 igb_setup_mrqc(adapter);
1993 igb_setup_rctl(adapter);
1994
1995 igb_nfc_filter_restore(adapter);
1996 igb_configure_tx(adapter);
1997 igb_configure_rx(adapter);
1998
1999 igb_rx_fifo_flush_82575(&adapter->hw);
2000
2001
2002
2003
2004
2005 for (i = 0; i < adapter->num_rx_queues; i++) {
2006 struct igb_ring *ring = adapter->rx_ring[i];
2007 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2008 }
2009}
2010
2011
2012
2013
2014
2015void igb_power_up_link(struct igb_adapter *adapter)
2016{
2017 igb_reset_phy(&adapter->hw);
2018
2019 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2020 igb_power_up_phy_copper(&adapter->hw);
2021 else
2022 igb_power_up_serdes_link_82575(&adapter->hw);
2023
2024 igb_setup_link(&adapter->hw);
2025}
2026
2027
2028
2029
2030
2031static void igb_power_down_link(struct igb_adapter *adapter)
2032{
2033 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2034 igb_power_down_phy_copper_82575(&adapter->hw);
2035 else
2036 igb_shutdown_serdes_link_82575(&adapter->hw);
2037}
2038
2039
2040
2041
2042
2043static void igb_check_swap_media(struct igb_adapter *adapter)
2044{
2045 struct e1000_hw *hw = &adapter->hw;
2046 u32 ctrl_ext, connsw;
2047 bool swap_now = false;
2048
2049 ctrl_ext = rd32(E1000_CTRL_EXT);
2050 connsw = rd32(E1000_CONNSW);
2051
2052
2053
2054
2055
2056 if ((hw->phy.media_type == e1000_media_type_copper) &&
2057 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2058 swap_now = true;
2059 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2060 !(connsw & E1000_CONNSW_SERDESD)) {
2061
2062 if (adapter->copper_tries < 4) {
2063 adapter->copper_tries++;
2064 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2065 wr32(E1000_CONNSW, connsw);
2066 return;
2067 } else {
2068 adapter->copper_tries = 0;
2069 if ((connsw & E1000_CONNSW_PHYSD) &&
2070 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2071 swap_now = true;
2072 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2073 wr32(E1000_CONNSW, connsw);
2074 }
2075 }
2076 }
2077
2078 if (!swap_now)
2079 return;
2080
2081 switch (hw->phy.media_type) {
2082 case e1000_media_type_copper:
2083 netdev_info(adapter->netdev,
2084 "MAS: changing media to fiber/serdes\n");
2085 ctrl_ext |=
2086 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2087 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2088 adapter->copper_tries = 0;
2089 break;
2090 case e1000_media_type_internal_serdes:
2091 case e1000_media_type_fiber:
2092 netdev_info(adapter->netdev,
2093 "MAS: changing media to copper\n");
2094 ctrl_ext &=
2095 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2096 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2097 break;
2098 default:
2099
2100 netdev_err(adapter->netdev,
2101 "AMS: Invalid media type found, returning\n");
2102 break;
2103 }
2104 wr32(E1000_CTRL_EXT, ctrl_ext);
2105}
2106
2107
2108
2109
2110
2111int igb_up(struct igb_adapter *adapter)
2112{
2113 struct e1000_hw *hw = &adapter->hw;
2114 int i;
2115
2116
2117 igb_configure(adapter);
2118
2119 clear_bit(__IGB_DOWN, &adapter->state);
2120
2121 for (i = 0; i < adapter->num_q_vectors; i++)
2122 napi_enable(&(adapter->q_vector[i]->napi));
2123
2124 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2125 igb_configure_msix(adapter);
2126 else
2127 igb_assign_vector(adapter->q_vector[0], 0);
2128
2129
2130 rd32(E1000_TSICR);
2131 rd32(E1000_ICR);
2132 igb_irq_enable(adapter);
2133
2134
2135 if (adapter->vfs_allocated_count) {
2136 u32 reg_data = rd32(E1000_CTRL_EXT);
2137
2138 reg_data |= E1000_CTRL_EXT_PFRSTD;
2139 wr32(E1000_CTRL_EXT, reg_data);
2140 }
2141
2142 netif_tx_start_all_queues(adapter->netdev);
2143
2144
2145 hw->mac.get_link_status = 1;
2146 schedule_work(&adapter->watchdog_task);
2147
2148 if ((adapter->flags & IGB_FLAG_EEE) &&
2149 (!hw->dev_spec._82575.eee_disable))
2150 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2151
2152 return 0;
2153}
2154
2155void igb_down(struct igb_adapter *adapter)
2156{
2157 struct net_device *netdev = adapter->netdev;
2158 struct e1000_hw *hw = &adapter->hw;
2159 u32 tctl, rctl;
2160 int i;
2161
2162
2163
2164
2165 set_bit(__IGB_DOWN, &adapter->state);
2166
2167
2168 rctl = rd32(E1000_RCTL);
2169 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2170
2171
2172 igb_nfc_filter_exit(adapter);
2173
2174 netif_carrier_off(netdev);
2175 netif_tx_stop_all_queues(netdev);
2176
2177
2178 tctl = rd32(E1000_TCTL);
2179 tctl &= ~E1000_TCTL_EN;
2180 wr32(E1000_TCTL, tctl);
2181
2182 wrfl();
2183 usleep_range(10000, 11000);
2184
2185 igb_irq_disable(adapter);
2186
2187 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2188
2189 for (i = 0; i < adapter->num_q_vectors; i++) {
2190 if (adapter->q_vector[i]) {
2191 napi_synchronize(&adapter->q_vector[i]->napi);
2192 napi_disable(&adapter->q_vector[i]->napi);
2193 }
2194 }
2195
2196 del_timer_sync(&adapter->watchdog_timer);
2197 del_timer_sync(&adapter->phy_info_timer);
2198
2199
2200 spin_lock(&adapter->stats64_lock);
2201 igb_update_stats(adapter);
2202 spin_unlock(&adapter->stats64_lock);
2203
2204 adapter->link_speed = 0;
2205 adapter->link_duplex = 0;
2206
2207 if (!pci_channel_offline(adapter->pdev))
2208 igb_reset(adapter);
2209
2210
2211 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2212
2213 igb_clean_all_tx_rings(adapter);
2214 igb_clean_all_rx_rings(adapter);
2215#ifdef CONFIG_IGB_DCA
2216
2217
2218 igb_setup_dca(adapter);
2219#endif
2220}
2221
2222void igb_reinit_locked(struct igb_adapter *adapter)
2223{
2224 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2225 usleep_range(1000, 2000);
2226 igb_down(adapter);
2227 igb_up(adapter);
2228 clear_bit(__IGB_RESETTING, &adapter->state);
2229}
2230
2231
2232
2233
2234
2235static void igb_enable_mas(struct igb_adapter *adapter)
2236{
2237 struct e1000_hw *hw = &adapter->hw;
2238 u32 connsw = rd32(E1000_CONNSW);
2239
2240
2241 if ((hw->phy.media_type == e1000_media_type_copper) &&
2242 (!(connsw & E1000_CONNSW_SERDESD))) {
2243 connsw |= E1000_CONNSW_ENRGSRC;
2244 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2245 wr32(E1000_CONNSW, connsw);
2246 wrfl();
2247 }
2248}
2249
2250void igb_reset(struct igb_adapter *adapter)
2251{
2252 struct pci_dev *pdev = adapter->pdev;
2253 struct e1000_hw *hw = &adapter->hw;
2254 struct e1000_mac_info *mac = &hw->mac;
2255 struct e1000_fc_info *fc = &hw->fc;
2256 u32 pba, hwm;
2257
2258
2259
2260
2261 switch (mac->type) {
2262 case e1000_i350:
2263 case e1000_i354:
2264 case e1000_82580:
2265 pba = rd32(E1000_RXPBS);
2266 pba = igb_rxpbs_adjust_82580(pba);
2267 break;
2268 case e1000_82576:
2269 pba = rd32(E1000_RXPBS);
2270 pba &= E1000_RXPBS_SIZE_MASK_82576;
2271 break;
2272 case e1000_82575:
2273 case e1000_i210:
2274 case e1000_i211:
2275 default:
2276 pba = E1000_PBA_34K;
2277 break;
2278 }
2279
2280 if (mac->type == e1000_82575) {
2281 u32 min_rx_space, min_tx_space, needed_tx_space;
2282
2283
2284 wr32(E1000_PBA, pba);
2285
2286
2287
2288
2289
2290
2291
2292
2293 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2294
2295
2296
2297
2298
2299
2300 min_tx_space = adapter->max_frame_size;
2301 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2302 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2303
2304
2305 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2306
2307
2308
2309
2310
2311 if (needed_tx_space < pba) {
2312 pba -= needed_tx_space;
2313
2314
2315
2316
2317 if (pba < min_rx_space)
2318 pba = min_rx_space;
2319 }
2320
2321
2322 wr32(E1000_PBA, pba);
2323 }
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2334
2335 fc->high_water = hwm & 0xFFFFFFF0;
2336 fc->low_water = fc->high_water - 16;
2337 fc->pause_time = 0xFFFF;
2338 fc->send_xon = 1;
2339 fc->current_mode = fc->requested_mode;
2340
2341
2342 if (adapter->vfs_allocated_count) {
2343 int i;
2344
2345 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2346 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2347
2348
2349 igb_ping_all_vfs(adapter);
2350
2351
2352 wr32(E1000_VFRE, 0);
2353 wr32(E1000_VFTE, 0);
2354 }
2355
2356
2357 hw->mac.ops.reset_hw(hw);
2358 wr32(E1000_WUC, 0);
2359
2360 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2361
2362 adapter->ei.get_invariants(hw);
2363 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2364 }
2365 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2366 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2367 igb_enable_mas(adapter);
2368 }
2369 if (hw->mac.ops.init_hw(hw))
2370 dev_err(&pdev->dev, "Hardware Error\n");
2371
2372
2373 igb_flush_mac_table(adapter);
2374 __dev_uc_unsync(adapter->netdev, NULL);
2375
2376
2377 igb_set_default_mac_filter(adapter);
2378
2379
2380
2381
2382 if (!hw->mac.autoneg)
2383 igb_force_mac_fc(hw);
2384
2385 igb_init_dmac(adapter, pba);
2386#ifdef CONFIG_IGB_HWMON
2387
2388 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2389 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2390
2391
2392
2393 if (adapter->ets)
2394 mac->ops.init_thermal_sensor_thresh(hw);
2395 }
2396 }
2397#endif
2398
2399 if (hw->phy.media_type == e1000_media_type_copper) {
2400 switch (mac->type) {
2401 case e1000_i350:
2402 case e1000_i210:
2403 case e1000_i211:
2404 igb_set_eee_i350(hw, true, true);
2405 break;
2406 case e1000_i354:
2407 igb_set_eee_i354(hw, true, true);
2408 break;
2409 default:
2410 break;
2411 }
2412 }
2413 if (!netif_running(adapter->netdev))
2414 igb_power_down_link(adapter);
2415
2416 igb_update_mng_vlan(adapter);
2417
2418
2419 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2420
2421
2422 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2423 igb_ptp_reset(adapter);
2424
2425 igb_get_phy_info(hw);
2426}
2427
2428static netdev_features_t igb_fix_features(struct net_device *netdev,
2429 netdev_features_t features)
2430{
2431
2432
2433
2434 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2435 features |= NETIF_F_HW_VLAN_CTAG_TX;
2436 else
2437 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2438
2439 return features;
2440}
2441
2442static int igb_set_features(struct net_device *netdev,
2443 netdev_features_t features)
2444{
2445 netdev_features_t changed = netdev->features ^ features;
2446 struct igb_adapter *adapter = netdev_priv(netdev);
2447
2448 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2449 igb_vlan_mode(netdev, features);
2450
2451 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2452 return 0;
2453
2454 if (!(features & NETIF_F_NTUPLE)) {
2455 struct hlist_node *node2;
2456 struct igb_nfc_filter *rule;
2457
2458 spin_lock(&adapter->nfc_lock);
2459 hlist_for_each_entry_safe(rule, node2,
2460 &adapter->nfc_filter_list, nfc_node) {
2461 igb_erase_filter(adapter, rule);
2462 hlist_del(&rule->nfc_node);
2463 kfree(rule);
2464 }
2465 spin_unlock(&adapter->nfc_lock);
2466 adapter->nfc_filter_count = 0;
2467 }
2468
2469 netdev->features = features;
2470
2471 if (netif_running(netdev))
2472 igb_reinit_locked(adapter);
2473 else
2474 igb_reset(adapter);
2475
2476 return 1;
2477}
2478
2479static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2480 struct net_device *dev,
2481 const unsigned char *addr, u16 vid,
2482 u16 flags,
2483 struct netlink_ext_ack *extack)
2484{
2485
2486 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2487 struct igb_adapter *adapter = netdev_priv(dev);
2488 int vfn = adapter->vfs_allocated_count;
2489
2490 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2491 return -ENOMEM;
2492 }
2493
2494 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2495}
2496
2497#define IGB_MAX_MAC_HDR_LEN 127
2498#define IGB_MAX_NETWORK_HDR_LEN 511
2499
2500static netdev_features_t
2501igb_features_check(struct sk_buff *skb, struct net_device *dev,
2502 netdev_features_t features)
2503{
2504 unsigned int network_hdr_len, mac_hdr_len;
2505
2506
2507 mac_hdr_len = skb_network_header(skb) - skb->data;
2508 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2509 return features & ~(NETIF_F_HW_CSUM |
2510 NETIF_F_SCTP_CRC |
2511 NETIF_F_GSO_UDP_L4 |
2512 NETIF_F_HW_VLAN_CTAG_TX |
2513 NETIF_F_TSO |
2514 NETIF_F_TSO6);
2515
2516 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2517 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2518 return features & ~(NETIF_F_HW_CSUM |
2519 NETIF_F_SCTP_CRC |
2520 NETIF_F_GSO_UDP_L4 |
2521 NETIF_F_TSO |
2522 NETIF_F_TSO6);
2523
2524
2525
2526
2527 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2528 features &= ~NETIF_F_TSO;
2529
2530 return features;
2531}
2532
2533static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2534{
2535 if (!is_fqtss_enabled(adapter)) {
2536 enable_fqtss(adapter, true);
2537 return;
2538 }
2539
2540 igb_config_tx_modes(adapter, queue);
2541
2542 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2543 enable_fqtss(adapter, false);
2544}
2545
2546static int igb_offload_cbs(struct igb_adapter *adapter,
2547 struct tc_cbs_qopt_offload *qopt)
2548{
2549 struct e1000_hw *hw = &adapter->hw;
2550 int err;
2551
2552
2553 if (hw->mac.type != e1000_i210)
2554 return -EOPNOTSUPP;
2555
2556
2557 if (qopt->queue < 0 || qopt->queue > 1)
2558 return -EINVAL;
2559
2560 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2561 qopt->idleslope, qopt->sendslope,
2562 qopt->hicredit, qopt->locredit);
2563 if (err)
2564 return err;
2565
2566 igb_offload_apply(adapter, qopt->queue);
2567
2568 return 0;
2569}
2570
2571#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2572#define VLAN_PRIO_FULL_MASK (0x07)
2573
2574static int igb_parse_cls_flower(struct igb_adapter *adapter,
2575 struct flow_cls_offload *f,
2576 int traffic_class,
2577 struct igb_nfc_filter *input)
2578{
2579 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2580 struct flow_dissector *dissector = rule->match.dissector;
2581 struct netlink_ext_ack *extack = f->common.extack;
2582
2583 if (dissector->used_keys &
2584 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2585 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2586 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2587 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2588 NL_SET_ERR_MSG_MOD(extack,
2589 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2590 return -EOPNOTSUPP;
2591 }
2592
2593 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2594 struct flow_match_eth_addrs match;
2595
2596 flow_rule_match_eth_addrs(rule, &match);
2597 if (!is_zero_ether_addr(match.mask->dst)) {
2598 if (!is_broadcast_ether_addr(match.mask->dst)) {
2599 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2600 return -EINVAL;
2601 }
2602
2603 input->filter.match_flags |=
2604 IGB_FILTER_FLAG_DST_MAC_ADDR;
2605 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2606 }
2607
2608 if (!is_zero_ether_addr(match.mask->src)) {
2609 if (!is_broadcast_ether_addr(match.mask->src)) {
2610 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2611 return -EINVAL;
2612 }
2613
2614 input->filter.match_flags |=
2615 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2616 ether_addr_copy(input->filter.src_addr, match.key->src);
2617 }
2618 }
2619
2620 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2621 struct flow_match_basic match;
2622
2623 flow_rule_match_basic(rule, &match);
2624 if (match.mask->n_proto) {
2625 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2626 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2627 return -EINVAL;
2628 }
2629
2630 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2631 input->filter.etype = match.key->n_proto;
2632 }
2633 }
2634
2635 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2636 struct flow_match_vlan match;
2637
2638 flow_rule_match_vlan(rule, &match);
2639 if (match.mask->vlan_priority) {
2640 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2641 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2642 return -EINVAL;
2643 }
2644
2645 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2646 input->filter.vlan_tci = match.key->vlan_priority;
2647 }
2648 }
2649
2650 input->action = traffic_class;
2651 input->cookie = f->cookie;
2652
2653 return 0;
2654}
2655
2656static int igb_configure_clsflower(struct igb_adapter *adapter,
2657 struct flow_cls_offload *cls_flower)
2658{
2659 struct netlink_ext_ack *extack = cls_flower->common.extack;
2660 struct igb_nfc_filter *filter, *f;
2661 int err, tc;
2662
2663 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2664 if (tc < 0) {
2665 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2666 return -EINVAL;
2667 }
2668
2669 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2670 if (!filter)
2671 return -ENOMEM;
2672
2673 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2674 if (err < 0)
2675 goto err_parse;
2676
2677 spin_lock(&adapter->nfc_lock);
2678
2679 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2680 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2681 err = -EEXIST;
2682 NL_SET_ERR_MSG_MOD(extack,
2683 "This filter is already set in ethtool");
2684 goto err_locked;
2685 }
2686 }
2687
2688 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2689 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2690 err = -EEXIST;
2691 NL_SET_ERR_MSG_MOD(extack,
2692 "This filter is already set in cls_flower");
2693 goto err_locked;
2694 }
2695 }
2696
2697 err = igb_add_filter(adapter, filter);
2698 if (err < 0) {
2699 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2700 goto err_locked;
2701 }
2702
2703 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2704
2705 spin_unlock(&adapter->nfc_lock);
2706
2707 return 0;
2708
2709err_locked:
2710 spin_unlock(&adapter->nfc_lock);
2711
2712err_parse:
2713 kfree(filter);
2714
2715 return err;
2716}
2717
2718static int igb_delete_clsflower(struct igb_adapter *adapter,
2719 struct flow_cls_offload *cls_flower)
2720{
2721 struct igb_nfc_filter *filter;
2722 int err;
2723
2724 spin_lock(&adapter->nfc_lock);
2725
2726 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2727 if (filter->cookie == cls_flower->cookie)
2728 break;
2729
2730 if (!filter) {
2731 err = -ENOENT;
2732 goto out;
2733 }
2734
2735 err = igb_erase_filter(adapter, filter);
2736 if (err < 0)
2737 goto out;
2738
2739 hlist_del(&filter->nfc_node);
2740 kfree(filter);
2741
2742out:
2743 spin_unlock(&adapter->nfc_lock);
2744
2745 return err;
2746}
2747
2748static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2749 struct flow_cls_offload *cls_flower)
2750{
2751 switch (cls_flower->command) {
2752 case FLOW_CLS_REPLACE:
2753 return igb_configure_clsflower(adapter, cls_flower);
2754 case FLOW_CLS_DESTROY:
2755 return igb_delete_clsflower(adapter, cls_flower);
2756 case FLOW_CLS_STATS:
2757 return -EOPNOTSUPP;
2758 default:
2759 return -EOPNOTSUPP;
2760 }
2761}
2762
2763static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2764 void *cb_priv)
2765{
2766 struct igb_adapter *adapter = cb_priv;
2767
2768 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2769 return -EOPNOTSUPP;
2770
2771 switch (type) {
2772 case TC_SETUP_CLSFLOWER:
2773 return igb_setup_tc_cls_flower(adapter, type_data);
2774
2775 default:
2776 return -EOPNOTSUPP;
2777 }
2778}
2779
2780static int igb_offload_txtime(struct igb_adapter *adapter,
2781 struct tc_etf_qopt_offload *qopt)
2782{
2783 struct e1000_hw *hw = &adapter->hw;
2784 int err;
2785
2786
2787 if (hw->mac.type != e1000_i210)
2788 return -EOPNOTSUPP;
2789
2790
2791 if (qopt->queue < 0 || qopt->queue > 1)
2792 return -EINVAL;
2793
2794 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2795 if (err)
2796 return err;
2797
2798 igb_offload_apply(adapter, qopt->queue);
2799
2800 return 0;
2801}
2802
2803static LIST_HEAD(igb_block_cb_list);
2804
2805static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2806 void *type_data)
2807{
2808 struct igb_adapter *adapter = netdev_priv(dev);
2809
2810 switch (type) {
2811 case TC_SETUP_QDISC_CBS:
2812 return igb_offload_cbs(adapter, type_data);
2813 case TC_SETUP_BLOCK:
2814 return flow_block_cb_setup_simple(type_data,
2815 &igb_block_cb_list,
2816 igb_setup_tc_block_cb,
2817 adapter, adapter, true);
2818
2819 case TC_SETUP_QDISC_ETF:
2820 return igb_offload_txtime(adapter, type_data);
2821
2822 default:
2823 return -EOPNOTSUPP;
2824 }
2825}
2826
2827static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
2828{
2829 int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
2830 struct igb_adapter *adapter = netdev_priv(dev);
2831 struct bpf_prog *prog = bpf->prog, *old_prog;
2832 bool running = netif_running(dev);
2833 bool need_reset;
2834
2835
2836 for (i = 0; i < adapter->num_rx_queues; i++) {
2837 struct igb_ring *ring = adapter->rx_ring[i];
2838
2839 if (frame_size > igb_rx_bufsz(ring)) {
2840 NL_SET_ERR_MSG_MOD(bpf->extack,
2841 "The RX buffer size is too small for the frame size");
2842 netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
2843 igb_rx_bufsz(ring), frame_size);
2844 return -EINVAL;
2845 }
2846 }
2847
2848 old_prog = xchg(&adapter->xdp_prog, prog);
2849 need_reset = (!!prog != !!old_prog);
2850
2851
2852 if (need_reset && running) {
2853 igb_close(dev);
2854 } else {
2855 for (i = 0; i < adapter->num_rx_queues; i++)
2856 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
2857 adapter->xdp_prog);
2858 }
2859
2860 if (old_prog)
2861 bpf_prog_put(old_prog);
2862
2863
2864 if (!need_reset)
2865 return 0;
2866
2867 if (running)
2868 igb_open(dev);
2869
2870 return 0;
2871}
2872
2873static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2874{
2875 switch (xdp->command) {
2876 case XDP_SETUP_PROG:
2877 return igb_xdp_setup(dev, xdp);
2878 default:
2879 return -EINVAL;
2880 }
2881}
2882
2883static void igb_xdp_ring_update_tail(struct igb_ring *ring)
2884{
2885
2886
2887
2888 wmb();
2889 writel(ring->next_to_use, ring->tail);
2890}
2891
2892static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
2893{
2894 unsigned int r_idx = smp_processor_id();
2895
2896 if (r_idx >= adapter->num_tx_queues)
2897 r_idx = r_idx % adapter->num_tx_queues;
2898
2899 return adapter->tx_ring[r_idx];
2900}
2901
2902static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
2903{
2904 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2905 int cpu = smp_processor_id();
2906 struct igb_ring *tx_ring;
2907 struct netdev_queue *nq;
2908 u32 ret;
2909
2910 if (unlikely(!xdpf))
2911 return IGB_XDP_CONSUMED;
2912
2913
2914
2915
2916 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2917 if (unlikely(!tx_ring))
2918 return IGB_XDP_CONSUMED;
2919
2920 nq = txring_txq(tx_ring);
2921 __netif_tx_lock(nq, cpu);
2922
2923 nq->trans_start = jiffies;
2924 ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2925 __netif_tx_unlock(nq);
2926
2927 return ret;
2928}
2929
2930static int igb_xdp_xmit(struct net_device *dev, int n,
2931 struct xdp_frame **frames, u32 flags)
2932{
2933 struct igb_adapter *adapter = netdev_priv(dev);
2934 int cpu = smp_processor_id();
2935 struct igb_ring *tx_ring;
2936 struct netdev_queue *nq;
2937 int nxmit = 0;
2938 int i;
2939
2940 if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
2941 return -ENETDOWN;
2942
2943 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2944 return -EINVAL;
2945
2946
2947
2948
2949 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2950 if (unlikely(!tx_ring))
2951 return -ENXIO;
2952
2953 nq = txring_txq(tx_ring);
2954 __netif_tx_lock(nq, cpu);
2955
2956
2957 nq->trans_start = jiffies;
2958
2959 for (i = 0; i < n; i++) {
2960 struct xdp_frame *xdpf = frames[i];
2961 int err;
2962
2963 err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2964 if (err != IGB_XDP_TX)
2965 break;
2966 nxmit++;
2967 }
2968
2969 __netif_tx_unlock(nq);
2970
2971 if (unlikely(flags & XDP_XMIT_FLUSH))
2972 igb_xdp_ring_update_tail(tx_ring);
2973
2974 return nxmit;
2975}
2976
2977static const struct net_device_ops igb_netdev_ops = {
2978 .ndo_open = igb_open,
2979 .ndo_stop = igb_close,
2980 .ndo_start_xmit = igb_xmit_frame,
2981 .ndo_get_stats64 = igb_get_stats64,
2982 .ndo_set_rx_mode = igb_set_rx_mode,
2983 .ndo_set_mac_address = igb_set_mac,
2984 .ndo_change_mtu = igb_change_mtu,
2985 .ndo_do_ioctl = igb_ioctl,
2986 .ndo_tx_timeout = igb_tx_timeout,
2987 .ndo_validate_addr = eth_validate_addr,
2988 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2989 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2990 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2991 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2992 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2993 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2994 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2995 .ndo_get_vf_config = igb_ndo_get_vf_config,
2996 .ndo_fix_features = igb_fix_features,
2997 .ndo_set_features = igb_set_features,
2998 .ndo_fdb_add = igb_ndo_fdb_add,
2999 .ndo_features_check = igb_features_check,
3000 .ndo_setup_tc = igb_setup_tc,
3001 .ndo_bpf = igb_xdp,
3002 .ndo_xdp_xmit = igb_xdp_xmit,
3003};
3004
3005
3006
3007
3008
3009void igb_set_fw_version(struct igb_adapter *adapter)
3010{
3011 struct e1000_hw *hw = &adapter->hw;
3012 struct e1000_fw_version fw;
3013
3014 igb_get_fw_version(hw, &fw);
3015
3016 switch (hw->mac.type) {
3017 case e1000_i210:
3018 case e1000_i211:
3019 if (!(igb_get_flash_presence_i210(hw))) {
3020 snprintf(adapter->fw_version,
3021 sizeof(adapter->fw_version),
3022 "%2d.%2d-%d",
3023 fw.invm_major, fw.invm_minor,
3024 fw.invm_img_type);
3025 break;
3026 }
3027 fallthrough;
3028 default:
3029
3030 if (fw.or_valid) {
3031 snprintf(adapter->fw_version,
3032 sizeof(adapter->fw_version),
3033 "%d.%d, 0x%08x, %d.%d.%d",
3034 fw.eep_major, fw.eep_minor, fw.etrack_id,
3035 fw.or_major, fw.or_build, fw.or_patch);
3036
3037 } else if (fw.etrack_id != 0X0000) {
3038 snprintf(adapter->fw_version,
3039 sizeof(adapter->fw_version),
3040 "%d.%d, 0x%08x",
3041 fw.eep_major, fw.eep_minor, fw.etrack_id);
3042 } else {
3043 snprintf(adapter->fw_version,
3044 sizeof(adapter->fw_version),
3045 "%d.%d.%d",
3046 fw.eep_major, fw.eep_minor, fw.eep_build);
3047 }
3048 break;
3049 }
3050}
3051
3052
3053
3054
3055
3056
3057static void igb_init_mas(struct igb_adapter *adapter)
3058{
3059 struct e1000_hw *hw = &adapter->hw;
3060 u16 eeprom_data;
3061
3062 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
3063 switch (hw->bus.func) {
3064 case E1000_FUNC_0:
3065 if (eeprom_data & IGB_MAS_ENABLE_0) {
3066 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3067 netdev_info(adapter->netdev,
3068 "MAS: Enabling Media Autosense for port %d\n",
3069 hw->bus.func);
3070 }
3071 break;
3072 case E1000_FUNC_1:
3073 if (eeprom_data & IGB_MAS_ENABLE_1) {
3074 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3075 netdev_info(adapter->netdev,
3076 "MAS: Enabling Media Autosense for port %d\n",
3077 hw->bus.func);
3078 }
3079 break;
3080 case E1000_FUNC_2:
3081 if (eeprom_data & IGB_MAS_ENABLE_2) {
3082 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3083 netdev_info(adapter->netdev,
3084 "MAS: Enabling Media Autosense for port %d\n",
3085 hw->bus.func);
3086 }
3087 break;
3088 case E1000_FUNC_3:
3089 if (eeprom_data & IGB_MAS_ENABLE_3) {
3090 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3091 netdev_info(adapter->netdev,
3092 "MAS: Enabling Media Autosense for port %d\n",
3093 hw->bus.func);
3094 }
3095 break;
3096 default:
3097
3098 netdev_err(adapter->netdev,
3099 "MAS: Invalid port configuration, returning\n");
3100 break;
3101 }
3102}
3103
3104
3105
3106
3107
3108static s32 igb_init_i2c(struct igb_adapter *adapter)
3109{
3110 s32 status = 0;
3111
3112
3113 if (adapter->hw.mac.type != e1000_i350)
3114 return 0;
3115
3116
3117
3118
3119
3120 adapter->i2c_adap.owner = THIS_MODULE;
3121 adapter->i2c_algo = igb_i2c_algo;
3122 adapter->i2c_algo.data = adapter;
3123 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
3124 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
3125 strlcpy(adapter->i2c_adap.name, "igb BB",
3126 sizeof(adapter->i2c_adap.name));
3127 status = i2c_bit_add_bus(&adapter->i2c_adap);
3128 return status;
3129}
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3143{
3144 struct net_device *netdev;
3145 struct igb_adapter *adapter;
3146 struct e1000_hw *hw;
3147 u16 eeprom_data = 0;
3148 s32 ret_val;
3149 static int global_quad_port_a;
3150 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3151 int err, pci_using_dac;
3152 u8 part_str[E1000_PBANUM_LENGTH];
3153
3154
3155
3156
3157 if (pdev->is_virtfn) {
3158 WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n",
3159 pci_name(pdev), pdev->vendor, pdev->device);
3160 return -EINVAL;
3161 }
3162
3163 err = pci_enable_device_mem(pdev);
3164 if (err)
3165 return err;
3166
3167 pci_using_dac = 0;
3168 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3169 if (!err) {
3170 pci_using_dac = 1;
3171 } else {
3172 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3173 if (err) {
3174 dev_err(&pdev->dev,
3175 "No usable DMA configuration, aborting\n");
3176 goto err_dma;
3177 }
3178 }
3179
3180 err = pci_request_mem_regions(pdev, igb_driver_name);
3181 if (err)
3182 goto err_pci_reg;
3183
3184 pci_enable_pcie_error_reporting(pdev);
3185
3186 pci_set_master(pdev);
3187 pci_save_state(pdev);
3188
3189 err = -ENOMEM;
3190 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3191 IGB_MAX_TX_QUEUES);
3192 if (!netdev)
3193 goto err_alloc_etherdev;
3194
3195 SET_NETDEV_DEV(netdev, &pdev->dev);
3196
3197 pci_set_drvdata(pdev, netdev);
3198 adapter = netdev_priv(netdev);
3199 adapter->netdev = netdev;
3200 adapter->pdev = pdev;
3201 hw = &adapter->hw;
3202 hw->back = adapter;
3203 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3204
3205 err = -EIO;
3206 adapter->io_addr = pci_iomap(pdev, 0, 0);
3207 if (!adapter->io_addr)
3208 goto err_ioremap;
3209
3210 hw->hw_addr = adapter->io_addr;
3211
3212 netdev->netdev_ops = &igb_netdev_ops;
3213 igb_set_ethtool_ops(netdev);
3214 netdev->watchdog_timeo = 5 * HZ;
3215
3216 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3217
3218 netdev->mem_start = pci_resource_start(pdev, 0);
3219 netdev->mem_end = pci_resource_end(pdev, 0);
3220
3221
3222 hw->vendor_id = pdev->vendor;
3223 hw->device_id = pdev->device;
3224 hw->revision_id = pdev->revision;
3225 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3226 hw->subsystem_device_id = pdev->subsystem_device;
3227
3228
3229 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3230 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3231 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3232
3233 err = ei->get_invariants(hw);
3234 if (err)
3235 goto err_sw_init;
3236
3237
3238 err = igb_sw_init(adapter);
3239 if (err)
3240 goto err_sw_init;
3241
3242 igb_get_bus_info_pcie(hw);
3243
3244 hw->phy.autoneg_wait_to_complete = false;
3245
3246
3247 if (hw->phy.media_type == e1000_media_type_copper) {
3248 hw->phy.mdix = AUTO_ALL_MODES;
3249 hw->phy.disable_polarity_correction = false;
3250 hw->phy.ms_type = e1000_ms_hw_default;
3251 }
3252
3253 if (igb_check_reset_block(hw))
3254 dev_info(&pdev->dev,
3255 "PHY reset is blocked due to SOL/IDER session.\n");
3256
3257
3258
3259
3260
3261 netdev->features |= NETIF_F_SG |
3262 NETIF_F_TSO |
3263 NETIF_F_TSO6 |
3264 NETIF_F_RXHASH |
3265 NETIF_F_RXCSUM |
3266 NETIF_F_HW_CSUM;
3267
3268 if (hw->mac.type >= e1000_82576)
3269 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3270
3271 if (hw->mac.type >= e1000_i350)
3272 netdev->features |= NETIF_F_HW_TC;
3273
3274#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3275 NETIF_F_GSO_GRE_CSUM | \
3276 NETIF_F_GSO_IPXIP4 | \
3277 NETIF_F_GSO_IPXIP6 | \
3278 NETIF_F_GSO_UDP_TUNNEL | \
3279 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3280
3281 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3282 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3283
3284
3285 netdev->hw_features |= netdev->features |
3286 NETIF_F_HW_VLAN_CTAG_RX |
3287 NETIF_F_HW_VLAN_CTAG_TX |
3288 NETIF_F_RXALL;
3289
3290 if (hw->mac.type >= e1000_i350)
3291 netdev->hw_features |= NETIF_F_NTUPLE;
3292
3293 if (pci_using_dac)
3294 netdev->features |= NETIF_F_HIGHDMA;
3295
3296 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3297 netdev->mpls_features |= NETIF_F_HW_CSUM;
3298 netdev->hw_enc_features |= netdev->vlan_features;
3299
3300
3301 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3302 NETIF_F_HW_VLAN_CTAG_RX |
3303 NETIF_F_HW_VLAN_CTAG_TX;
3304
3305 netdev->priv_flags |= IFF_SUPP_NOFCS;
3306
3307 netdev->priv_flags |= IFF_UNICAST_FLT;
3308
3309
3310 netdev->min_mtu = ETH_MIN_MTU;
3311 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3312
3313 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3314
3315
3316
3317
3318 hw->mac.ops.reset_hw(hw);
3319
3320
3321
3322
3323 switch (hw->mac.type) {
3324 case e1000_i210:
3325 case e1000_i211:
3326 if (igb_get_flash_presence_i210(hw)) {
3327 if (hw->nvm.ops.validate(hw) < 0) {
3328 dev_err(&pdev->dev,
3329 "The NVM Checksum Is Not Valid\n");
3330 err = -EIO;
3331 goto err_eeprom;
3332 }
3333 }
3334 break;
3335 default:
3336 if (hw->nvm.ops.validate(hw) < 0) {
3337 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3338 err = -EIO;
3339 goto err_eeprom;
3340 }
3341 break;
3342 }
3343
3344 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3345
3346 if (hw->mac.ops.read_mac_addr(hw))
3347 dev_err(&pdev->dev, "NVM Read Error\n");
3348 }
3349
3350 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3351
3352 if (!is_valid_ether_addr(netdev->dev_addr)) {
3353 dev_err(&pdev->dev, "Invalid MAC Address\n");
3354 err = -EIO;
3355 goto err_eeprom;
3356 }
3357
3358 igb_set_default_mac_filter(adapter);
3359
3360
3361 igb_set_fw_version(adapter);
3362
3363
3364 if (hw->mac.type == e1000_i210) {
3365 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3366 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3367 }
3368
3369 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3370 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3371
3372 INIT_WORK(&adapter->reset_task, igb_reset_task);
3373 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3374
3375
3376 adapter->fc_autoneg = true;
3377 hw->mac.autoneg = true;
3378 hw->phy.autoneg_advertised = 0x2f;
3379
3380 hw->fc.requested_mode = e1000_fc_default;
3381 hw->fc.current_mode = e1000_fc_default;
3382
3383 igb_validate_mdi_setting(hw);
3384
3385
3386 if (hw->bus.func == 0)
3387 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3388
3389
3390 if (hw->mac.type >= e1000_82580)
3391 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3392 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3393 &eeprom_data);
3394 else if (hw->bus.func == 1)
3395 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3396
3397 if (eeprom_data & IGB_EEPROM_APME)
3398 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3399
3400
3401
3402
3403
3404 switch (pdev->device) {
3405 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3406 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3407 break;
3408 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3409 case E1000_DEV_ID_82576_FIBER:
3410 case E1000_DEV_ID_82576_SERDES:
3411
3412
3413
3414 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3415 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3416 break;
3417 case E1000_DEV_ID_82576_QUAD_COPPER:
3418 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3419
3420 if (global_quad_port_a != 0)
3421 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3422 else
3423 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3424
3425 if (++global_quad_port_a == 4)
3426 global_quad_port_a = 0;
3427 break;
3428 default:
3429
3430 if (!device_can_wakeup(&adapter->pdev->dev))
3431 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3432 }
3433
3434
3435 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3436 adapter->wol |= E1000_WUFC_MAG;
3437
3438
3439 if ((hw->mac.type == e1000_i350) &&
3440 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3441 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3442 adapter->wol = 0;
3443 }
3444
3445
3446
3447
3448 if (((hw->mac.type == e1000_i350) ||
3449 (hw->mac.type == e1000_i354)) &&
3450 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3451 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3452 adapter->wol = 0;
3453 }
3454 if (hw->mac.type == e1000_i350) {
3455 if (((pdev->subsystem_device == 0x5001) ||
3456 (pdev->subsystem_device == 0x5002)) &&
3457 (hw->bus.func == 0)) {
3458 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3459 adapter->wol = 0;
3460 }
3461 if (pdev->subsystem_device == 0x1F52)
3462 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3463 }
3464
3465 device_set_wakeup_enable(&adapter->pdev->dev,
3466 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3467
3468
3469 igb_reset(adapter);
3470
3471
3472 err = igb_init_i2c(adapter);
3473 if (err) {
3474 dev_err(&pdev->dev, "failed to init i2c interface\n");
3475 goto err_eeprom;
3476 }
3477
3478
3479
3480
3481 igb_get_hw_control(adapter);
3482
3483 strcpy(netdev->name, "eth%d");
3484 err = register_netdev(netdev);
3485 if (err)
3486 goto err_register;
3487
3488
3489 netif_carrier_off(netdev);
3490
3491#ifdef CONFIG_IGB_DCA
3492 if (dca_add_requester(&pdev->dev) == 0) {
3493 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3494 dev_info(&pdev->dev, "DCA enabled\n");
3495 igb_setup_dca(adapter);
3496 }
3497
3498#endif
3499#ifdef CONFIG_IGB_HWMON
3500
3501 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3502 u16 ets_word;
3503
3504
3505
3506
3507 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3508 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3509 adapter->ets = true;
3510 else
3511 adapter->ets = false;
3512 if (igb_sysfs_init(adapter))
3513 dev_err(&pdev->dev,
3514 "failed to allocate sysfs resources\n");
3515 } else {
3516 adapter->ets = false;
3517 }
3518#endif
3519
3520 adapter->ei = *ei;
3521 if (hw->dev_spec._82575.mas_capable)
3522 igb_init_mas(adapter);
3523
3524
3525 igb_ptp_init(adapter);
3526
3527 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3528
3529 if (hw->mac.type != e1000_i354) {
3530 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3531 netdev->name,
3532 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3533 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3534 "unknown"),
3535 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3536 "Width x4" :
3537 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3538 "Width x2" :
3539 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3540 "Width x1" : "unknown"), netdev->dev_addr);
3541 }
3542
3543 if ((hw->mac.type == e1000_82576 &&
3544 rd32(E1000_EECD) & E1000_EECD_PRES) ||
3545 (hw->mac.type >= e1000_i210 ||
3546 igb_get_flash_presence_i210(hw))) {
3547 ret_val = igb_read_part_string(hw, part_str,
3548 E1000_PBANUM_LENGTH);
3549 } else {
3550 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3551 }
3552
3553 if (ret_val)
3554 strcpy(part_str, "Unknown");
3555 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3556 dev_info(&pdev->dev,
3557 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3558 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3559 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3560 adapter->num_rx_queues, adapter->num_tx_queues);
3561 if (hw->phy.media_type == e1000_media_type_copper) {
3562 switch (hw->mac.type) {
3563 case e1000_i350:
3564 case e1000_i210:
3565 case e1000_i211:
3566
3567 err = igb_set_eee_i350(hw, true, true);
3568 if ((!err) &&
3569 (!hw->dev_spec._82575.eee_disable)) {
3570 adapter->eee_advert =
3571 MDIO_EEE_100TX | MDIO_EEE_1000T;
3572 adapter->flags |= IGB_FLAG_EEE;
3573 }
3574 break;
3575 case e1000_i354:
3576 if ((rd32(E1000_CTRL_EXT) &
3577 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3578 err = igb_set_eee_i354(hw, true, true);
3579 if ((!err) &&
3580 (!hw->dev_spec._82575.eee_disable)) {
3581 adapter->eee_advert =
3582 MDIO_EEE_100TX | MDIO_EEE_1000T;
3583 adapter->flags |= IGB_FLAG_EEE;
3584 }
3585 }
3586 break;
3587 default:
3588 break;
3589 }
3590 }
3591
3592 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3593
3594 pm_runtime_put_noidle(&pdev->dev);
3595 return 0;
3596
3597err_register:
3598 igb_release_hw_control(adapter);
3599 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3600err_eeprom:
3601 if (!igb_check_reset_block(hw))
3602 igb_reset_phy(hw);
3603
3604 if (hw->flash_address)
3605 iounmap(hw->flash_address);
3606err_sw_init:
3607 kfree(adapter->mac_table);
3608 kfree(adapter->shadow_vfta);
3609 igb_clear_interrupt_scheme(adapter);
3610#ifdef CONFIG_PCI_IOV
3611 igb_disable_sriov(pdev);
3612#endif
3613 pci_iounmap(pdev, adapter->io_addr);
3614err_ioremap:
3615 free_netdev(netdev);
3616err_alloc_etherdev:
3617 pci_release_mem_regions(pdev);
3618err_pci_reg:
3619err_dma:
3620 pci_disable_device(pdev);
3621 return err;
3622}
3623
3624#ifdef CONFIG_PCI_IOV
3625static int igb_disable_sriov(struct pci_dev *pdev)
3626{
3627 struct net_device *netdev = pci_get_drvdata(pdev);
3628 struct igb_adapter *adapter = netdev_priv(netdev);
3629 struct e1000_hw *hw = &adapter->hw;
3630
3631
3632 if (adapter->vf_data) {
3633
3634 if (pci_vfs_assigned(pdev)) {
3635 dev_warn(&pdev->dev,
3636 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3637 return -EPERM;
3638 } else {
3639 pci_disable_sriov(pdev);
3640 msleep(500);
3641 }
3642
3643 kfree(adapter->vf_mac_list);
3644 adapter->vf_mac_list = NULL;
3645 kfree(adapter->vf_data);
3646 adapter->vf_data = NULL;
3647 adapter->vfs_allocated_count = 0;
3648 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3649 wrfl();
3650 msleep(100);
3651 dev_info(&pdev->dev, "IOV Disabled\n");
3652
3653
3654 adapter->flags |= IGB_FLAG_DMAC;
3655 }
3656
3657 return 0;
3658}
3659
3660static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3661{
3662 struct net_device *netdev = pci_get_drvdata(pdev);
3663 struct igb_adapter *adapter = netdev_priv(netdev);
3664 int old_vfs = pci_num_vf(pdev);
3665 struct vf_mac_filter *mac_list;
3666 int err = 0;
3667 int num_vf_mac_filters, i;
3668
3669 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3670 err = -EPERM;
3671 goto out;
3672 }
3673 if (!num_vfs)
3674 goto out;
3675
3676 if (old_vfs) {
3677 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3678 old_vfs, max_vfs);
3679 adapter->vfs_allocated_count = old_vfs;
3680 } else
3681 adapter->vfs_allocated_count = num_vfs;
3682
3683 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3684 sizeof(struct vf_data_storage), GFP_KERNEL);
3685
3686
3687 if (!adapter->vf_data) {
3688 adapter->vfs_allocated_count = 0;
3689 err = -ENOMEM;
3690 goto out;
3691 }
3692
3693
3694
3695
3696
3697
3698 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3699 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3700 adapter->vfs_allocated_count);
3701
3702 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3703 sizeof(struct vf_mac_filter),
3704 GFP_KERNEL);
3705
3706 mac_list = adapter->vf_mac_list;
3707 INIT_LIST_HEAD(&adapter->vf_macs.l);
3708
3709 if (adapter->vf_mac_list) {
3710
3711 for (i = 0; i < num_vf_mac_filters; i++) {
3712 mac_list->vf = -1;
3713 mac_list->free = true;
3714 list_add(&mac_list->l, &adapter->vf_macs.l);
3715 mac_list++;
3716 }
3717 } else {
3718
3719
3720
3721 dev_err(&pdev->dev,
3722 "Unable to allocate memory for VF MAC filter list\n");
3723 }
3724
3725
3726 if (!old_vfs) {
3727 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3728 if (err)
3729 goto err_out;
3730 }
3731 dev_info(&pdev->dev, "%d VFs allocated\n",
3732 adapter->vfs_allocated_count);
3733 for (i = 0; i < adapter->vfs_allocated_count; i++)
3734 igb_vf_configure(adapter, i);
3735
3736
3737 adapter->flags &= ~IGB_FLAG_DMAC;
3738 goto out;
3739
3740err_out:
3741 kfree(adapter->vf_mac_list);
3742 adapter->vf_mac_list = NULL;
3743 kfree(adapter->vf_data);
3744 adapter->vf_data = NULL;
3745 adapter->vfs_allocated_count = 0;
3746out:
3747 return err;
3748}
3749
3750#endif
3751
3752
3753
3754
3755static void igb_remove_i2c(struct igb_adapter *adapter)
3756{
3757
3758 i2c_del_adapter(&adapter->i2c_adap);
3759}
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770static void igb_remove(struct pci_dev *pdev)
3771{
3772 struct net_device *netdev = pci_get_drvdata(pdev);
3773 struct igb_adapter *adapter = netdev_priv(netdev);
3774 struct e1000_hw *hw = &adapter->hw;
3775
3776 pm_runtime_get_noresume(&pdev->dev);
3777#ifdef CONFIG_IGB_HWMON
3778 igb_sysfs_exit(adapter);
3779#endif
3780 igb_remove_i2c(adapter);
3781 igb_ptp_stop(adapter);
3782
3783
3784
3785 set_bit(__IGB_DOWN, &adapter->state);
3786 del_timer_sync(&adapter->watchdog_timer);
3787 del_timer_sync(&adapter->phy_info_timer);
3788
3789 cancel_work_sync(&adapter->reset_task);
3790 cancel_work_sync(&adapter->watchdog_task);
3791
3792#ifdef CONFIG_IGB_DCA
3793 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3794 dev_info(&pdev->dev, "DCA disabled\n");
3795 dca_remove_requester(&pdev->dev);
3796 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3797 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3798 }
3799#endif
3800
3801
3802
3803
3804 igb_release_hw_control(adapter);
3805
3806#ifdef CONFIG_PCI_IOV
3807 igb_disable_sriov(pdev);
3808#endif
3809
3810 unregister_netdev(netdev);
3811
3812 igb_clear_interrupt_scheme(adapter);
3813
3814 pci_iounmap(pdev, adapter->io_addr);
3815 if (hw->flash_address)
3816 iounmap(hw->flash_address);
3817 pci_release_mem_regions(pdev);
3818
3819 kfree(adapter->mac_table);
3820 kfree(adapter->shadow_vfta);
3821 free_netdev(netdev);
3822
3823 pci_disable_pcie_error_reporting(pdev);
3824
3825 pci_disable_device(pdev);
3826}
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837static void igb_probe_vfs(struct igb_adapter *adapter)
3838{
3839#ifdef CONFIG_PCI_IOV
3840 struct pci_dev *pdev = adapter->pdev;
3841 struct e1000_hw *hw = &adapter->hw;
3842
3843
3844 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3845 return;
3846
3847
3848
3849
3850
3851 igb_set_interrupt_capability(adapter, true);
3852 igb_reset_interrupt_capability(adapter);
3853
3854 pci_sriov_set_totalvfs(pdev, 7);
3855 igb_enable_sriov(pdev, max_vfs);
3856
3857#endif
3858}
3859
3860unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3861{
3862 struct e1000_hw *hw = &adapter->hw;
3863 unsigned int max_rss_queues;
3864
3865
3866 switch (hw->mac.type) {
3867 case e1000_i211:
3868 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3869 break;
3870 case e1000_82575:
3871 case e1000_i210:
3872 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3873 break;
3874 case e1000_i350:
3875
3876 if (!!adapter->vfs_allocated_count) {
3877 max_rss_queues = 1;
3878 break;
3879 }
3880 fallthrough;
3881 case e1000_82576:
3882 if (!!adapter->vfs_allocated_count) {
3883 max_rss_queues = 2;
3884 break;
3885 }
3886 fallthrough;
3887 case e1000_82580:
3888 case e1000_i354:
3889 default:
3890 max_rss_queues = IGB_MAX_RX_QUEUES;
3891 break;
3892 }
3893
3894 return max_rss_queues;
3895}
3896
3897static void igb_init_queue_configuration(struct igb_adapter *adapter)
3898{
3899 u32 max_rss_queues;
3900
3901 max_rss_queues = igb_get_max_rss_queues(adapter);
3902 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3903
3904 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3905}
3906
3907void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3908 const u32 max_rss_queues)
3909{
3910 struct e1000_hw *hw = &adapter->hw;
3911
3912
3913 switch (hw->mac.type) {
3914 case e1000_82575:
3915 case e1000_i211:
3916
3917 break;
3918 case e1000_82576:
3919 case e1000_82580:
3920 case e1000_i350:
3921 case e1000_i354:
3922 case e1000_i210:
3923 default:
3924
3925
3926
3927 if (adapter->rss_queues > (max_rss_queues / 2))
3928 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3929 else
3930 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3931 break;
3932 }
3933}
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943static int igb_sw_init(struct igb_adapter *adapter)
3944{
3945 struct e1000_hw *hw = &adapter->hw;
3946 struct net_device *netdev = adapter->netdev;
3947 struct pci_dev *pdev = adapter->pdev;
3948
3949 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3950
3951
3952 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3953 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3954
3955
3956 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3957 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3958
3959
3960 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3961
3962 adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
3963 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3964
3965 spin_lock_init(&adapter->nfc_lock);
3966 spin_lock_init(&adapter->stats64_lock);
3967#ifdef CONFIG_PCI_IOV
3968 switch (hw->mac.type) {
3969 case e1000_82576:
3970 case e1000_i350:
3971 if (max_vfs > 7) {
3972 dev_warn(&pdev->dev,
3973 "Maximum of 7 VFs per PF, using max\n");
3974 max_vfs = adapter->vfs_allocated_count = 7;
3975 } else
3976 adapter->vfs_allocated_count = max_vfs;
3977 if (adapter->vfs_allocated_count)
3978 dev_warn(&pdev->dev,
3979 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3980 break;
3981 default:
3982 break;
3983 }
3984#endif
3985
3986
3987 adapter->flags |= IGB_FLAG_HAS_MSIX;
3988
3989 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3990 sizeof(struct igb_mac_addr),
3991 GFP_KERNEL);
3992 if (!adapter->mac_table)
3993 return -ENOMEM;
3994
3995 igb_probe_vfs(adapter);
3996
3997 igb_init_queue_configuration(adapter);
3998
3999
4000 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
4001 GFP_KERNEL);
4002 if (!adapter->shadow_vfta)
4003 return -ENOMEM;
4004
4005
4006 if (igb_init_interrupt_scheme(adapter, true)) {
4007 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4008 return -ENOMEM;
4009 }
4010
4011
4012 igb_irq_disable(adapter);
4013
4014 if (hw->mac.type >= e1000_i350)
4015 adapter->flags &= ~IGB_FLAG_DMAC;
4016
4017 set_bit(__IGB_DOWN, &adapter->state);
4018 return 0;
4019}
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034static int __igb_open(struct net_device *netdev, bool resuming)
4035{
4036 struct igb_adapter *adapter = netdev_priv(netdev);
4037 struct e1000_hw *hw = &adapter->hw;
4038 struct pci_dev *pdev = adapter->pdev;
4039 int err;
4040 int i;
4041
4042
4043 if (test_bit(__IGB_TESTING, &adapter->state)) {
4044 WARN_ON(resuming);
4045 return -EBUSY;
4046 }
4047
4048 if (!resuming)
4049 pm_runtime_get_sync(&pdev->dev);
4050
4051 netif_carrier_off(netdev);
4052
4053
4054 err = igb_setup_all_tx_resources(adapter);
4055 if (err)
4056 goto err_setup_tx;
4057
4058
4059 err = igb_setup_all_rx_resources(adapter);
4060 if (err)
4061 goto err_setup_rx;
4062
4063 igb_power_up_link(adapter);
4064
4065
4066
4067
4068
4069
4070 igb_configure(adapter);
4071
4072 err = igb_request_irq(adapter);
4073 if (err)
4074 goto err_req_irq;
4075
4076
4077 err = netif_set_real_num_tx_queues(adapter->netdev,
4078 adapter->num_tx_queues);
4079 if (err)
4080 goto err_set_queues;
4081
4082 err = netif_set_real_num_rx_queues(adapter->netdev,
4083 adapter->num_rx_queues);
4084 if (err)
4085 goto err_set_queues;
4086
4087
4088 clear_bit(__IGB_DOWN, &adapter->state);
4089
4090 for (i = 0; i < adapter->num_q_vectors; i++)
4091 napi_enable(&(adapter->q_vector[i]->napi));
4092
4093
4094 rd32(E1000_TSICR);
4095 rd32(E1000_ICR);
4096
4097 igb_irq_enable(adapter);
4098
4099
4100 if (adapter->vfs_allocated_count) {
4101 u32 reg_data = rd32(E1000_CTRL_EXT);
4102
4103 reg_data |= E1000_CTRL_EXT_PFRSTD;
4104 wr32(E1000_CTRL_EXT, reg_data);
4105 }
4106
4107 netif_tx_start_all_queues(netdev);
4108
4109 if (!resuming)
4110 pm_runtime_put(&pdev->dev);
4111
4112
4113 hw->mac.get_link_status = 1;
4114 schedule_work(&adapter->watchdog_task);
4115
4116 return 0;
4117
4118err_set_queues:
4119 igb_free_irq(adapter);
4120err_req_irq:
4121 igb_release_hw_control(adapter);
4122 igb_power_down_link(adapter);
4123 igb_free_all_rx_resources(adapter);
4124err_setup_rx:
4125 igb_free_all_tx_resources(adapter);
4126err_setup_tx:
4127 igb_reset(adapter);
4128 if (!resuming)
4129 pm_runtime_put(&pdev->dev);
4130
4131 return err;
4132}
4133
4134int igb_open(struct net_device *netdev)
4135{
4136 return __igb_open(netdev, false);
4137}
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151static int __igb_close(struct net_device *netdev, bool suspending)
4152{
4153 struct igb_adapter *adapter = netdev_priv(netdev);
4154 struct pci_dev *pdev = adapter->pdev;
4155
4156 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4157
4158 if (!suspending)
4159 pm_runtime_get_sync(&pdev->dev);
4160
4161 igb_down(adapter);
4162 igb_free_irq(adapter);
4163
4164 igb_free_all_tx_resources(adapter);
4165 igb_free_all_rx_resources(adapter);
4166
4167 if (!suspending)
4168 pm_runtime_put_sync(&pdev->dev);
4169 return 0;
4170}
4171
4172int igb_close(struct net_device *netdev)
4173{
4174 if (netif_device_present(netdev) || netdev->dismantle)
4175 return __igb_close(netdev, false);
4176 return 0;
4177}
4178
4179
4180
4181
4182
4183
4184
4185int igb_setup_tx_resources(struct igb_ring *tx_ring)
4186{
4187 struct device *dev = tx_ring->dev;
4188 int size;
4189
4190 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4191
4192 tx_ring->tx_buffer_info = vmalloc(size);
4193 if (!tx_ring->tx_buffer_info)
4194 goto err;
4195
4196
4197 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4198 tx_ring->size = ALIGN(tx_ring->size, 4096);
4199
4200 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4201 &tx_ring->dma, GFP_KERNEL);
4202 if (!tx_ring->desc)
4203 goto err;
4204
4205 tx_ring->next_to_use = 0;
4206 tx_ring->next_to_clean = 0;
4207
4208 return 0;
4209
4210err:
4211 vfree(tx_ring->tx_buffer_info);
4212 tx_ring->tx_buffer_info = NULL;
4213 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4214 return -ENOMEM;
4215}
4216
4217
4218
4219
4220
4221
4222
4223
4224static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4225{
4226 struct pci_dev *pdev = adapter->pdev;
4227 int i, err = 0;
4228
4229 for (i = 0; i < adapter->num_tx_queues; i++) {
4230 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4231 if (err) {
4232 dev_err(&pdev->dev,
4233 "Allocation for Tx Queue %u failed\n", i);
4234 for (i--; i >= 0; i--)
4235 igb_free_tx_resources(adapter->tx_ring[i]);
4236 break;
4237 }
4238 }
4239
4240 return err;
4241}
4242
4243
4244
4245
4246
4247void igb_setup_tctl(struct igb_adapter *adapter)
4248{
4249 struct e1000_hw *hw = &adapter->hw;
4250 u32 tctl;
4251
4252
4253 wr32(E1000_TXDCTL(0), 0);
4254
4255
4256 tctl = rd32(E1000_TCTL);
4257 tctl &= ~E1000_TCTL_CT;
4258 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4259 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4260
4261 igb_config_collision_dist(hw);
4262
4263
4264 tctl |= E1000_TCTL_EN;
4265
4266 wr32(E1000_TCTL, tctl);
4267}
4268
4269
4270
4271
4272
4273
4274
4275
4276void igb_configure_tx_ring(struct igb_adapter *adapter,
4277 struct igb_ring *ring)
4278{
4279 struct e1000_hw *hw = &adapter->hw;
4280 u32 txdctl = 0;
4281 u64 tdba = ring->dma;
4282 int reg_idx = ring->reg_idx;
4283
4284 wr32(E1000_TDLEN(reg_idx),
4285 ring->count * sizeof(union e1000_adv_tx_desc));
4286 wr32(E1000_TDBAL(reg_idx),
4287 tdba & 0x00000000ffffffffULL);
4288 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4289
4290 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4291 wr32(E1000_TDH(reg_idx), 0);
4292 writel(0, ring->tail);
4293
4294 txdctl |= IGB_TX_PTHRESH;
4295 txdctl |= IGB_TX_HTHRESH << 8;
4296 txdctl |= IGB_TX_WTHRESH << 16;
4297
4298
4299 memset(ring->tx_buffer_info, 0,
4300 sizeof(struct igb_tx_buffer) * ring->count);
4301
4302 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4303 wr32(E1000_TXDCTL(reg_idx), txdctl);
4304}
4305
4306
4307
4308
4309
4310
4311
4312static void igb_configure_tx(struct igb_adapter *adapter)
4313{
4314 struct e1000_hw *hw = &adapter->hw;
4315 int i;
4316
4317
4318 for (i = 0; i < adapter->num_tx_queues; i++)
4319 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4320
4321 wrfl();
4322 usleep_range(10000, 20000);
4323
4324 for (i = 0; i < adapter->num_tx_queues; i++)
4325 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4326}
4327
4328
4329
4330
4331
4332
4333
4334int igb_setup_rx_resources(struct igb_ring *rx_ring)
4335{
4336 struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
4337 struct device *dev = rx_ring->dev;
4338 int size;
4339
4340 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4341
4342 rx_ring->rx_buffer_info = vmalloc(size);
4343 if (!rx_ring->rx_buffer_info)
4344 goto err;
4345
4346
4347 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4348 rx_ring->size = ALIGN(rx_ring->size, 4096);
4349
4350 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4351 &rx_ring->dma, GFP_KERNEL);
4352 if (!rx_ring->desc)
4353 goto err;
4354
4355 rx_ring->next_to_alloc = 0;
4356 rx_ring->next_to_clean = 0;
4357 rx_ring->next_to_use = 0;
4358
4359 rx_ring->xdp_prog = adapter->xdp_prog;
4360
4361
4362 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
4363 rx_ring->queue_index, 0) < 0)
4364 goto err;
4365
4366 return 0;
4367
4368err:
4369 vfree(rx_ring->rx_buffer_info);
4370 rx_ring->rx_buffer_info = NULL;
4371 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4372 return -ENOMEM;
4373}
4374
4375
4376
4377
4378
4379
4380
4381
4382static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4383{
4384 struct pci_dev *pdev = adapter->pdev;
4385 int i, err = 0;
4386
4387 for (i = 0; i < adapter->num_rx_queues; i++) {
4388 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4389 if (err) {
4390 dev_err(&pdev->dev,
4391 "Allocation for Rx Queue %u failed\n", i);
4392 for (i--; i >= 0; i--)
4393 igb_free_rx_resources(adapter->rx_ring[i]);
4394 break;
4395 }
4396 }
4397
4398 return err;
4399}
4400
4401
4402
4403
4404
4405static void igb_setup_mrqc(struct igb_adapter *adapter)
4406{
4407 struct e1000_hw *hw = &adapter->hw;
4408 u32 mrqc, rxcsum;
4409 u32 j, num_rx_queues;
4410 u32 rss_key[10];
4411
4412 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4413 for (j = 0; j < 10; j++)
4414 wr32(E1000_RSSRK(j), rss_key[j]);
4415
4416 num_rx_queues = adapter->rss_queues;
4417
4418 switch (hw->mac.type) {
4419 case e1000_82576:
4420
4421 if (adapter->vfs_allocated_count)
4422 num_rx_queues = 2;
4423 break;
4424 default:
4425 break;
4426 }
4427
4428 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4429 for (j = 0; j < IGB_RETA_SIZE; j++)
4430 adapter->rss_indir_tbl[j] =
4431 (j * num_rx_queues) / IGB_RETA_SIZE;
4432 adapter->rss_indir_tbl_init = num_rx_queues;
4433 }
4434 igb_write_rss_indir_tbl(adapter);
4435
4436
4437
4438
4439
4440 rxcsum = rd32(E1000_RXCSUM);
4441 rxcsum |= E1000_RXCSUM_PCSD;
4442
4443 if (adapter->hw.mac.type >= e1000_82576)
4444
4445 rxcsum |= E1000_RXCSUM_CRCOFL;
4446
4447
4448 wr32(E1000_RXCSUM, rxcsum);
4449
4450
4451
4452
4453 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4454 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4455 E1000_MRQC_RSS_FIELD_IPV6 |
4456 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4457 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4458
4459 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4460 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4461 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4462 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4463
4464
4465
4466
4467
4468 if (adapter->vfs_allocated_count) {
4469 if (hw->mac.type > e1000_82575) {
4470
4471 u32 vtctl = rd32(E1000_VT_CTL);
4472
4473 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4474 E1000_VT_CTL_DISABLE_DEF_POOL);
4475 vtctl |= adapter->vfs_allocated_count <<
4476 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4477 wr32(E1000_VT_CTL, vtctl);
4478 }
4479 if (adapter->rss_queues > 1)
4480 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4481 else
4482 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4483 } else {
4484 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4485 }
4486 igb_vmm_control(adapter);
4487
4488 wr32(E1000_MRQC, mrqc);
4489}
4490
4491
4492
4493
4494
4495void igb_setup_rctl(struct igb_adapter *adapter)
4496{
4497 struct e1000_hw *hw = &adapter->hw;
4498 u32 rctl;
4499
4500 rctl = rd32(E1000_RCTL);
4501
4502 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4503 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4504
4505 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4506 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4507
4508
4509
4510
4511
4512 rctl |= E1000_RCTL_SECRC;
4513
4514
4515 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4516
4517
4518 rctl |= E1000_RCTL_LPE;
4519
4520
4521 wr32(E1000_RXDCTL(0), 0);
4522
4523
4524
4525
4526
4527 if (adapter->vfs_allocated_count) {
4528
4529 wr32(E1000_QDE, ALL_QUEUES);
4530 }
4531
4532
4533 if (adapter->netdev->features & NETIF_F_RXALL) {
4534
4535
4536
4537 rctl |= (E1000_RCTL_SBP |
4538 E1000_RCTL_BAM |
4539 E1000_RCTL_PMCF);
4540
4541 rctl &= ~(E1000_RCTL_DPF |
4542 E1000_RCTL_CFIEN);
4543
4544
4545
4546 }
4547
4548 wr32(E1000_RCTL, rctl);
4549}
4550
4551static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4552 int vfn)
4553{
4554 struct e1000_hw *hw = &adapter->hw;
4555 u32 vmolr;
4556
4557 if (size > MAX_JUMBO_FRAME_SIZE)
4558 size = MAX_JUMBO_FRAME_SIZE;
4559
4560 vmolr = rd32(E1000_VMOLR(vfn));
4561 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4562 vmolr |= size | E1000_VMOLR_LPE;
4563 wr32(E1000_VMOLR(vfn), vmolr);
4564
4565 return 0;
4566}
4567
4568static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4569 int vfn, bool enable)
4570{
4571 struct e1000_hw *hw = &adapter->hw;
4572 u32 val, reg;
4573
4574 if (hw->mac.type < e1000_82576)
4575 return;
4576
4577 if (hw->mac.type == e1000_i350)
4578 reg = E1000_DVMOLR(vfn);
4579 else
4580 reg = E1000_VMOLR(vfn);
4581
4582 val = rd32(reg);
4583 if (enable)
4584 val |= E1000_VMOLR_STRVLAN;
4585 else
4586 val &= ~(E1000_VMOLR_STRVLAN);
4587 wr32(reg, val);
4588}
4589
4590static inline void igb_set_vmolr(struct igb_adapter *adapter,
4591 int vfn, bool aupe)
4592{
4593 struct e1000_hw *hw = &adapter->hw;
4594 u32 vmolr;
4595
4596
4597
4598
4599 if (hw->mac.type < e1000_82576)
4600 return;
4601
4602 vmolr = rd32(E1000_VMOLR(vfn));
4603 if (aupe)
4604 vmolr |= E1000_VMOLR_AUPE;
4605 else
4606 vmolr &= ~(E1000_VMOLR_AUPE);
4607
4608
4609 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4610
4611 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4612 vmolr |= E1000_VMOLR_RSSE;
4613
4614
4615
4616 if (vfn <= adapter->vfs_allocated_count)
4617 vmolr |= E1000_VMOLR_BAM;
4618
4619 wr32(E1000_VMOLR(vfn), vmolr);
4620}
4621
4622
4623
4624
4625
4626
4627
4628void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4629{
4630 struct e1000_hw *hw = &adapter->hw;
4631 int reg_idx = ring->reg_idx;
4632 u32 srrctl = 0;
4633
4634 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4635 if (ring_uses_large_buffer(ring))
4636 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4637 else
4638 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4639 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4640 if (hw->mac.type >= e1000_82580)
4641 srrctl |= E1000_SRRCTL_TIMESTAMP;
4642
4643
4644
4645 if (adapter->vfs_allocated_count ||
4646 (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4647 adapter->num_rx_queues > 1))
4648 srrctl |= E1000_SRRCTL_DROP_EN;
4649
4650 wr32(E1000_SRRCTL(reg_idx), srrctl);
4651}
4652
4653
4654
4655
4656
4657
4658
4659
4660void igb_configure_rx_ring(struct igb_adapter *adapter,
4661 struct igb_ring *ring)
4662{
4663 struct e1000_hw *hw = &adapter->hw;
4664 union e1000_adv_rx_desc *rx_desc;
4665 u64 rdba = ring->dma;
4666 int reg_idx = ring->reg_idx;
4667 u32 rxdctl = 0;
4668
4669 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4670 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4671 MEM_TYPE_PAGE_SHARED, NULL));
4672
4673
4674 wr32(E1000_RXDCTL(reg_idx), 0);
4675
4676
4677 wr32(E1000_RDBAL(reg_idx),
4678 rdba & 0x00000000ffffffffULL);
4679 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4680 wr32(E1000_RDLEN(reg_idx),
4681 ring->count * sizeof(union e1000_adv_rx_desc));
4682
4683
4684 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4685 wr32(E1000_RDH(reg_idx), 0);
4686 writel(0, ring->tail);
4687
4688
4689 igb_setup_srrctl(adapter, ring);
4690
4691
4692 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4693
4694 rxdctl |= IGB_RX_PTHRESH;
4695 rxdctl |= IGB_RX_HTHRESH << 8;
4696 rxdctl |= IGB_RX_WTHRESH << 16;
4697
4698
4699 memset(ring->rx_buffer_info, 0,
4700 sizeof(struct igb_rx_buffer) * ring->count);
4701
4702
4703 rx_desc = IGB_RX_DESC(ring, 0);
4704 rx_desc->wb.upper.length = 0;
4705
4706
4707 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4708 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4709}
4710
4711static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4712 struct igb_ring *rx_ring)
4713{
4714
4715 clear_ring_build_skb_enabled(rx_ring);
4716 clear_ring_uses_large_buffer(rx_ring);
4717
4718 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4719 return;
4720
4721 set_ring_build_skb_enabled(rx_ring);
4722
4723#if (PAGE_SIZE < 8192)
4724 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4725 return;
4726
4727 set_ring_uses_large_buffer(rx_ring);
4728#endif
4729}
4730
4731
4732
4733
4734
4735
4736
4737static void igb_configure_rx(struct igb_adapter *adapter)
4738{
4739 int i;
4740
4741
4742 igb_set_default_mac_filter(adapter);
4743
4744
4745
4746
4747 for (i = 0; i < adapter->num_rx_queues; i++) {
4748 struct igb_ring *rx_ring = adapter->rx_ring[i];
4749
4750 igb_set_rx_buffer_len(adapter, rx_ring);
4751 igb_configure_rx_ring(adapter, rx_ring);
4752 }
4753}
4754
4755
4756
4757
4758
4759
4760
4761void igb_free_tx_resources(struct igb_ring *tx_ring)
4762{
4763 igb_clean_tx_ring(tx_ring);
4764
4765 vfree(tx_ring->tx_buffer_info);
4766 tx_ring->tx_buffer_info = NULL;
4767
4768
4769 if (!tx_ring->desc)
4770 return;
4771
4772 dma_free_coherent(tx_ring->dev, tx_ring->size,
4773 tx_ring->desc, tx_ring->dma);
4774
4775 tx_ring->desc = NULL;
4776}
4777
4778
4779
4780
4781
4782
4783
4784static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4785{
4786 int i;
4787
4788 for (i = 0; i < adapter->num_tx_queues; i++)
4789 if (adapter->tx_ring[i])
4790 igb_free_tx_resources(adapter->tx_ring[i]);
4791}
4792
4793
4794
4795
4796
4797static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4798{
4799 u16 i = tx_ring->next_to_clean;
4800 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4801
4802 while (i != tx_ring->next_to_use) {
4803 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4804
4805
4806 dev_kfree_skb_any(tx_buffer->skb);
4807
4808
4809 dma_unmap_single(tx_ring->dev,
4810 dma_unmap_addr(tx_buffer, dma),
4811 dma_unmap_len(tx_buffer, len),
4812 DMA_TO_DEVICE);
4813
4814
4815 eop_desc = tx_buffer->next_to_watch;
4816 tx_desc = IGB_TX_DESC(tx_ring, i);
4817
4818
4819 while (tx_desc != eop_desc) {
4820 tx_buffer++;
4821 tx_desc++;
4822 i++;
4823 if (unlikely(i == tx_ring->count)) {
4824 i = 0;
4825 tx_buffer = tx_ring->tx_buffer_info;
4826 tx_desc = IGB_TX_DESC(tx_ring, 0);
4827 }
4828
4829
4830 if (dma_unmap_len(tx_buffer, len))
4831 dma_unmap_page(tx_ring->dev,
4832 dma_unmap_addr(tx_buffer, dma),
4833 dma_unmap_len(tx_buffer, len),
4834 DMA_TO_DEVICE);
4835 }
4836
4837
4838 tx_buffer++;
4839 i++;
4840 if (unlikely(i == tx_ring->count)) {
4841 i = 0;
4842 tx_buffer = tx_ring->tx_buffer_info;
4843 }
4844 }
4845
4846
4847 netdev_tx_reset_queue(txring_txq(tx_ring));
4848
4849
4850 tx_ring->next_to_use = 0;
4851 tx_ring->next_to_clean = 0;
4852}
4853
4854
4855
4856
4857
4858static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4859{
4860 int i;
4861
4862 for (i = 0; i < adapter->num_tx_queues; i++)
4863 if (adapter->tx_ring[i])
4864 igb_clean_tx_ring(adapter->tx_ring[i]);
4865}
4866
4867
4868
4869
4870
4871
4872
4873void igb_free_rx_resources(struct igb_ring *rx_ring)
4874{
4875 igb_clean_rx_ring(rx_ring);
4876
4877 rx_ring->xdp_prog = NULL;
4878 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4879 vfree(rx_ring->rx_buffer_info);
4880 rx_ring->rx_buffer_info = NULL;
4881
4882
4883 if (!rx_ring->desc)
4884 return;
4885
4886 dma_free_coherent(rx_ring->dev, rx_ring->size,
4887 rx_ring->desc, rx_ring->dma);
4888
4889 rx_ring->desc = NULL;
4890}
4891
4892
4893
4894
4895
4896
4897
4898static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4899{
4900 int i;
4901
4902 for (i = 0; i < adapter->num_rx_queues; i++)
4903 if (adapter->rx_ring[i])
4904 igb_free_rx_resources(adapter->rx_ring[i]);
4905}
4906
4907
4908
4909
4910
4911static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4912{
4913 u16 i = rx_ring->next_to_clean;
4914
4915 dev_kfree_skb(rx_ring->skb);
4916 rx_ring->skb = NULL;
4917
4918
4919 while (i != rx_ring->next_to_alloc) {
4920 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4921
4922
4923
4924
4925 dma_sync_single_range_for_cpu(rx_ring->dev,
4926 buffer_info->dma,
4927 buffer_info->page_offset,
4928 igb_rx_bufsz(rx_ring),
4929 DMA_FROM_DEVICE);
4930
4931
4932 dma_unmap_page_attrs(rx_ring->dev,
4933 buffer_info->dma,
4934 igb_rx_pg_size(rx_ring),
4935 DMA_FROM_DEVICE,
4936 IGB_RX_DMA_ATTR);
4937 __page_frag_cache_drain(buffer_info->page,
4938 buffer_info->pagecnt_bias);
4939
4940 i++;
4941 if (i == rx_ring->count)
4942 i = 0;
4943 }
4944
4945 rx_ring->next_to_alloc = 0;
4946 rx_ring->next_to_clean = 0;
4947 rx_ring->next_to_use = 0;
4948}
4949
4950
4951
4952
4953
4954static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4955{
4956 int i;
4957
4958 for (i = 0; i < adapter->num_rx_queues; i++)
4959 if (adapter->rx_ring[i])
4960 igb_clean_rx_ring(adapter->rx_ring[i]);
4961}
4962
4963
4964
4965
4966
4967
4968
4969
4970static int igb_set_mac(struct net_device *netdev, void *p)
4971{
4972 struct igb_adapter *adapter = netdev_priv(netdev);
4973 struct e1000_hw *hw = &adapter->hw;
4974 struct sockaddr *addr = p;
4975
4976 if (!is_valid_ether_addr(addr->sa_data))
4977 return -EADDRNOTAVAIL;
4978
4979 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4980 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4981
4982
4983 igb_set_default_mac_filter(adapter);
4984
4985 return 0;
4986}
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997static int igb_write_mc_addr_list(struct net_device *netdev)
4998{
4999 struct igb_adapter *adapter = netdev_priv(netdev);
5000 struct e1000_hw *hw = &adapter->hw;
5001 struct netdev_hw_addr *ha;
5002 u8 *mta_list;
5003 int i;
5004
5005 if (netdev_mc_empty(netdev)) {
5006
5007 igb_update_mc_addr_list(hw, NULL, 0);
5008 igb_restore_vf_multicasts(adapter);
5009 return 0;
5010 }
5011
5012 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
5013 if (!mta_list)
5014 return -ENOMEM;
5015
5016
5017 i = 0;
5018 netdev_for_each_mc_addr(ha, netdev)
5019 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
5020
5021 igb_update_mc_addr_list(hw, mta_list, i);
5022 kfree(mta_list);
5023
5024 return netdev_mc_count(netdev);
5025}
5026
5027static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
5028{
5029 struct e1000_hw *hw = &adapter->hw;
5030 u32 i, pf_id;
5031
5032 switch (hw->mac.type) {
5033 case e1000_i210:
5034 case e1000_i211:
5035 case e1000_i350:
5036
5037 if (adapter->netdev->features & NETIF_F_NTUPLE)
5038 break;
5039 fallthrough;
5040 case e1000_82576:
5041 case e1000_82580:
5042 case e1000_i354:
5043
5044 if (adapter->vfs_allocated_count)
5045 break;
5046 fallthrough;
5047 default:
5048 return 1;
5049 }
5050
5051
5052 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
5053 return 0;
5054
5055 if (!adapter->vfs_allocated_count)
5056 goto set_vfta;
5057
5058
5059 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5060
5061 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5062 u32 vlvf = rd32(E1000_VLVF(i));
5063
5064 vlvf |= BIT(pf_id);
5065 wr32(E1000_VLVF(i), vlvf);
5066 }
5067
5068set_vfta:
5069
5070 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
5071 hw->mac.ops.write_vfta(hw, i, ~0U);
5072
5073
5074 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
5075
5076 return 0;
5077}
5078
5079#define VFTA_BLOCK_SIZE 8
5080static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
5081{
5082 struct e1000_hw *hw = &adapter->hw;
5083 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
5084 u32 vid_start = vfta_offset * 32;
5085 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
5086 u32 i, vid, word, bits, pf_id;
5087
5088
5089 vid = adapter->mng_vlan_id;
5090 if (vid >= vid_start && vid < vid_end)
5091 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5092
5093 if (!adapter->vfs_allocated_count)
5094 goto set_vfta;
5095
5096 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5097
5098 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5099 u32 vlvf = rd32(E1000_VLVF(i));
5100
5101
5102 vid = vlvf & VLAN_VID_MASK;
5103
5104
5105 if (vid < vid_start || vid >= vid_end)
5106 continue;
5107
5108 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
5109
5110 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5111
5112
5113 if (test_bit(vid, adapter->active_vlans))
5114 continue;
5115 }
5116
5117
5118 bits = ~BIT(pf_id);
5119 bits &= rd32(E1000_VLVF(i));
5120 wr32(E1000_VLVF(i), bits);
5121 }
5122
5123set_vfta:
5124
5125 for (i = VFTA_BLOCK_SIZE; i--;) {
5126 vid = (vfta_offset + i) * 32;
5127 word = vid / BITS_PER_LONG;
5128 bits = vid % BITS_PER_LONG;
5129
5130 vfta[i] |= adapter->active_vlans[word] >> bits;
5131
5132 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
5133 }
5134}
5135
5136static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
5137{
5138 u32 i;
5139
5140
5141 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
5142 return;
5143
5144
5145 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
5146
5147 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
5148 igb_scrub_vfta(adapter, i);
5149}
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160static void igb_set_rx_mode(struct net_device *netdev)
5161{
5162 struct igb_adapter *adapter = netdev_priv(netdev);
5163 struct e1000_hw *hw = &adapter->hw;
5164 unsigned int vfn = adapter->vfs_allocated_count;
5165 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5166 int count;
5167
5168
5169 if (netdev->flags & IFF_PROMISC) {
5170 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5171 vmolr |= E1000_VMOLR_MPME;
5172
5173
5174 if (hw->mac.type == e1000_82576)
5175 vmolr |= E1000_VMOLR_ROPE;
5176 } else {
5177 if (netdev->flags & IFF_ALLMULTI) {
5178 rctl |= E1000_RCTL_MPE;
5179 vmolr |= E1000_VMOLR_MPME;
5180 } else {
5181
5182
5183
5184
5185 count = igb_write_mc_addr_list(netdev);
5186 if (count < 0) {
5187 rctl |= E1000_RCTL_MPE;
5188 vmolr |= E1000_VMOLR_MPME;
5189 } else if (count) {
5190 vmolr |= E1000_VMOLR_ROMPE;
5191 }
5192 }
5193 }
5194
5195
5196
5197
5198
5199 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5200 rctl |= E1000_RCTL_UPE;
5201 vmolr |= E1000_VMOLR_ROPE;
5202 }
5203
5204
5205 rctl |= E1000_RCTL_VFE;
5206
5207
5208 if ((netdev->flags & IFF_PROMISC) ||
5209 (netdev->features & NETIF_F_RXALL)) {
5210
5211 if (igb_vlan_promisc_enable(adapter))
5212 rctl &= ~E1000_RCTL_VFE;
5213 } else {
5214 igb_vlan_promisc_disable(adapter);
5215 }
5216
5217
5218 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5219 E1000_RCTL_VFE);
5220 wr32(E1000_RCTL, rctl);
5221
5222#if (PAGE_SIZE < 8192)
5223 if (!adapter->vfs_allocated_count) {
5224 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5225 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5226 }
5227#endif
5228 wr32(E1000_RLPML, rlpml);
5229
5230
5231
5232
5233
5234
5235 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5236 return;
5237
5238
5239 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5240
5241 vmolr |= rd32(E1000_VMOLR(vfn)) &
5242 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5243
5244
5245 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5246#if (PAGE_SIZE < 8192)
5247 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5248 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5249 else
5250#endif
5251 vmolr |= MAX_JUMBO_FRAME_SIZE;
5252 vmolr |= E1000_VMOLR_LPE;
5253
5254 wr32(E1000_VMOLR(vfn), vmolr);
5255
5256 igb_restore_vf_multicasts(adapter);
5257}
5258
5259static void igb_check_wvbr(struct igb_adapter *adapter)
5260{
5261 struct e1000_hw *hw = &adapter->hw;
5262 u32 wvbr = 0;
5263
5264 switch (hw->mac.type) {
5265 case e1000_82576:
5266 case e1000_i350:
5267 wvbr = rd32(E1000_WVBR);
5268 if (!wvbr)
5269 return;
5270 break;
5271 default:
5272 break;
5273 }
5274
5275 adapter->wvbr |= wvbr;
5276}
5277
5278#define IGB_STAGGERED_QUEUE_OFFSET 8
5279
5280static void igb_spoof_check(struct igb_adapter *adapter)
5281{
5282 int j;
5283
5284 if (!adapter->wvbr)
5285 return;
5286
5287 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5288 if (adapter->wvbr & BIT(j) ||
5289 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5290 dev_warn(&adapter->pdev->dev,
5291 "Spoof event(s) detected on VF %d\n", j);
5292 adapter->wvbr &=
5293 ~(BIT(j) |
5294 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5295 }
5296 }
5297}
5298
5299
5300
5301
5302static void igb_update_phy_info(struct timer_list *t)
5303{
5304 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5305 igb_get_phy_info(&adapter->hw);
5306}
5307
5308
5309
5310
5311
5312bool igb_has_link(struct igb_adapter *adapter)
5313{
5314 struct e1000_hw *hw = &adapter->hw;
5315 bool link_active = false;
5316
5317
5318
5319
5320
5321
5322 switch (hw->phy.media_type) {
5323 case e1000_media_type_copper:
5324 if (!hw->mac.get_link_status)
5325 return true;
5326 fallthrough;
5327 case e1000_media_type_internal_serdes:
5328 hw->mac.ops.check_for_link(hw);
5329 link_active = !hw->mac.get_link_status;
5330 break;
5331 default:
5332 case e1000_media_type_unknown:
5333 break;
5334 }
5335
5336 if (((hw->mac.type == e1000_i210) ||
5337 (hw->mac.type == e1000_i211)) &&
5338 (hw->phy.id == I210_I_PHY_ID)) {
5339 if (!netif_carrier_ok(adapter->netdev)) {
5340 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5341 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5342 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5343 adapter->link_check_timeout = jiffies;
5344 }
5345 }
5346
5347 return link_active;
5348}
5349
5350static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5351{
5352 bool ret = false;
5353 u32 ctrl_ext, thstat;
5354
5355
5356 if (hw->mac.type == e1000_i350) {
5357 thstat = rd32(E1000_THSTAT);
5358 ctrl_ext = rd32(E1000_CTRL_EXT);
5359
5360 if ((hw->phy.media_type == e1000_media_type_copper) &&
5361 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5362 ret = !!(thstat & event);
5363 }
5364
5365 return ret;
5366}
5367
5368
5369
5370
5371
5372
5373static void igb_check_lvmmc(struct igb_adapter *adapter)
5374{
5375 struct e1000_hw *hw = &adapter->hw;
5376 u32 lvmmc;
5377
5378 lvmmc = rd32(E1000_LVMMC);
5379 if (lvmmc) {
5380 if (unlikely(net_ratelimit())) {
5381 netdev_warn(adapter->netdev,
5382 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5383 lvmmc);
5384 }
5385 }
5386}
5387
5388
5389
5390
5391
5392static void igb_watchdog(struct timer_list *t)
5393{
5394 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5395
5396 schedule_work(&adapter->watchdog_task);
5397}
5398
5399static void igb_watchdog_task(struct work_struct *work)
5400{
5401 struct igb_adapter *adapter = container_of(work,
5402 struct igb_adapter,
5403 watchdog_task);
5404 struct e1000_hw *hw = &adapter->hw;
5405 struct e1000_phy_info *phy = &hw->phy;
5406 struct net_device *netdev = adapter->netdev;
5407 u32 link;
5408 int i;
5409 u32 connsw;
5410 u16 phy_data, retry_count = 20;
5411
5412 link = igb_has_link(adapter);
5413
5414 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5415 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5416 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5417 else
5418 link = false;
5419 }
5420
5421
5422 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5423 if (hw->phy.media_type == e1000_media_type_copper) {
5424 connsw = rd32(E1000_CONNSW);
5425 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5426 link = 0;
5427 }
5428 }
5429 if (link) {
5430
5431 if (hw->dev_spec._82575.media_changed) {
5432 hw->dev_spec._82575.media_changed = false;
5433 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5434 igb_reset(adapter);
5435 }
5436
5437 pm_runtime_resume(netdev->dev.parent);
5438
5439 if (!netif_carrier_ok(netdev)) {
5440 u32 ctrl;
5441
5442 hw->mac.ops.get_speed_and_duplex(hw,
5443 &adapter->link_speed,
5444 &adapter->link_duplex);
5445
5446 ctrl = rd32(E1000_CTRL);
5447
5448 netdev_info(netdev,
5449 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5450 netdev->name,
5451 adapter->link_speed,
5452 adapter->link_duplex == FULL_DUPLEX ?
5453 "Full" : "Half",
5454 (ctrl & E1000_CTRL_TFCE) &&
5455 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5456 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5457 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5458
5459
5460 if ((adapter->flags & IGB_FLAG_EEE) &&
5461 (adapter->link_duplex == HALF_DUPLEX)) {
5462 dev_info(&adapter->pdev->dev,
5463 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5464 adapter->hw.dev_spec._82575.eee_disable = true;
5465 adapter->flags &= ~IGB_FLAG_EEE;
5466 }
5467
5468
5469 igb_check_downshift(hw);
5470 if (phy->speed_downgraded)
5471 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5472
5473
5474 if (igb_thermal_sensor_event(hw,
5475 E1000_THSTAT_LINK_THROTTLE))
5476 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5477
5478
5479 adapter->tx_timeout_factor = 1;
5480 switch (adapter->link_speed) {
5481 case SPEED_10:
5482 adapter->tx_timeout_factor = 14;
5483 break;
5484 case SPEED_100:
5485
5486 break;
5487 }
5488
5489 if (adapter->link_speed != SPEED_1000)
5490 goto no_wait;
5491
5492
5493retry_read_status:
5494 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5495 &phy_data)) {
5496 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5497 retry_count) {
5498 msleep(100);
5499 retry_count--;
5500 goto retry_read_status;
5501 } else if (!retry_count) {
5502 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5503 }
5504 } else {
5505 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5506 }
5507no_wait:
5508 netif_carrier_on(netdev);
5509
5510 igb_ping_all_vfs(adapter);
5511 igb_check_vf_rate_limit(adapter);
5512
5513
5514 if (!test_bit(__IGB_DOWN, &adapter->state))
5515 mod_timer(&adapter->phy_info_timer,
5516 round_jiffies(jiffies + 2 * HZ));
5517 }
5518 } else {
5519 if (netif_carrier_ok(netdev)) {
5520 adapter->link_speed = 0;
5521 adapter->link_duplex = 0;
5522
5523
5524 if (igb_thermal_sensor_event(hw,
5525 E1000_THSTAT_PWR_DOWN)) {
5526 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5527 }
5528
5529
5530 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5531 netdev->name);
5532 netif_carrier_off(netdev);
5533
5534 igb_ping_all_vfs(adapter);
5535
5536
5537 if (!test_bit(__IGB_DOWN, &adapter->state))
5538 mod_timer(&adapter->phy_info_timer,
5539 round_jiffies(jiffies + 2 * HZ));
5540
5541
5542 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5543 igb_check_swap_media(adapter);
5544 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5545 schedule_work(&adapter->reset_task);
5546
5547 return;
5548 }
5549 }
5550 pm_schedule_suspend(netdev->dev.parent,
5551 MSEC_PER_SEC * 5);
5552
5553
5554 } else if (!netif_carrier_ok(netdev) &&
5555 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5556 igb_check_swap_media(adapter);
5557 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5558 schedule_work(&adapter->reset_task);
5559
5560 return;
5561 }
5562 }
5563 }
5564
5565 spin_lock(&adapter->stats64_lock);
5566 igb_update_stats(adapter);
5567 spin_unlock(&adapter->stats64_lock);
5568
5569 for (i = 0; i < adapter->num_tx_queues; i++) {
5570 struct igb_ring *tx_ring = adapter->tx_ring[i];
5571 if (!netif_carrier_ok(netdev)) {
5572
5573
5574
5575
5576
5577 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5578 adapter->tx_timeout_count++;
5579 schedule_work(&adapter->reset_task);
5580
5581 return;
5582 }
5583 }
5584
5585
5586 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5587 }
5588
5589
5590 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5591 u32 eics = 0;
5592
5593 for (i = 0; i < adapter->num_q_vectors; i++)
5594 eics |= adapter->q_vector[i]->eims_value;
5595 wr32(E1000_EICS, eics);
5596 } else {
5597 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5598 }
5599
5600 igb_spoof_check(adapter);
5601 igb_ptp_rx_hang(adapter);
5602 igb_ptp_tx_hang(adapter);
5603
5604
5605 if ((adapter->hw.mac.type == e1000_i350) ||
5606 (adapter->hw.mac.type == e1000_i354))
5607 igb_check_lvmmc(adapter);
5608
5609
5610 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5611 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5612 mod_timer(&adapter->watchdog_timer,
5613 round_jiffies(jiffies + HZ));
5614 else
5615 mod_timer(&adapter->watchdog_timer,
5616 round_jiffies(jiffies + 2 * HZ));
5617 }
5618}
5619
5620enum latency_range {
5621 lowest_latency = 0,
5622 low_latency = 1,
5623 bulk_latency = 2,
5624 latency_invalid = 255
5625};
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5643{
5644 int new_val = q_vector->itr_val;
5645 int avg_wire_size = 0;
5646 struct igb_adapter *adapter = q_vector->adapter;
5647 unsigned int packets;
5648
5649
5650
5651
5652 if (adapter->link_speed != SPEED_1000) {
5653 new_val = IGB_4K_ITR;
5654 goto set_itr_val;
5655 }
5656
5657 packets = q_vector->rx.total_packets;
5658 if (packets)
5659 avg_wire_size = q_vector->rx.total_bytes / packets;
5660
5661 packets = q_vector->tx.total_packets;
5662 if (packets)
5663 avg_wire_size = max_t(u32, avg_wire_size,
5664 q_vector->tx.total_bytes / packets);
5665
5666
5667 if (!avg_wire_size)
5668 goto clear_counts;
5669
5670
5671 avg_wire_size += 24;
5672
5673
5674 avg_wire_size = min(avg_wire_size, 3000);
5675
5676
5677 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5678 new_val = avg_wire_size / 3;
5679 else
5680 new_val = avg_wire_size / 2;
5681
5682
5683 if (new_val < IGB_20K_ITR &&
5684 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5685 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5686 new_val = IGB_20K_ITR;
5687
5688set_itr_val:
5689 if (new_val != q_vector->itr_val) {
5690 q_vector->itr_val = new_val;
5691 q_vector->set_itr = 1;
5692 }
5693clear_counts:
5694 q_vector->rx.total_bytes = 0;
5695 q_vector->rx.total_packets = 0;
5696 q_vector->tx.total_bytes = 0;
5697 q_vector->tx.total_packets = 0;
5698}
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716static void igb_update_itr(struct igb_q_vector *q_vector,
5717 struct igb_ring_container *ring_container)
5718{
5719 unsigned int packets = ring_container->total_packets;
5720 unsigned int bytes = ring_container->total_bytes;
5721 u8 itrval = ring_container->itr;
5722
5723
5724 if (packets == 0)
5725 return;
5726
5727 switch (itrval) {
5728 case lowest_latency:
5729
5730 if (bytes/packets > 8000)
5731 itrval = bulk_latency;
5732 else if ((packets < 5) && (bytes > 512))
5733 itrval = low_latency;
5734 break;
5735 case low_latency:
5736 if (bytes > 10000) {
5737
5738 if (bytes/packets > 8000)
5739 itrval = bulk_latency;
5740 else if ((packets < 10) || ((bytes/packets) > 1200))
5741 itrval = bulk_latency;
5742 else if ((packets > 35))
5743 itrval = lowest_latency;
5744 } else if (bytes/packets > 2000) {
5745 itrval = bulk_latency;
5746 } else if (packets <= 2 && bytes < 512) {
5747 itrval = lowest_latency;
5748 }
5749 break;
5750 case bulk_latency:
5751 if (bytes > 25000) {
5752 if (packets > 35)
5753 itrval = low_latency;
5754 } else if (bytes < 1500) {
5755 itrval = low_latency;
5756 }
5757 break;
5758 }
5759
5760
5761 ring_container->total_bytes = 0;
5762 ring_container->total_packets = 0;
5763
5764
5765 ring_container->itr = itrval;
5766}
5767
5768static void igb_set_itr(struct igb_q_vector *q_vector)
5769{
5770 struct igb_adapter *adapter = q_vector->adapter;
5771 u32 new_itr = q_vector->itr_val;
5772 u8 current_itr = 0;
5773
5774
5775 if (adapter->link_speed != SPEED_1000) {
5776 current_itr = 0;
5777 new_itr = IGB_4K_ITR;
5778 goto set_itr_now;
5779 }
5780
5781 igb_update_itr(q_vector, &q_vector->tx);
5782 igb_update_itr(q_vector, &q_vector->rx);
5783
5784 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5785
5786
5787 if (current_itr == lowest_latency &&
5788 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5789 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5790 current_itr = low_latency;
5791
5792 switch (current_itr) {
5793
5794 case lowest_latency:
5795 new_itr = IGB_70K_ITR;
5796 break;
5797 case low_latency:
5798 new_itr = IGB_20K_ITR;
5799 break;
5800 case bulk_latency:
5801 new_itr = IGB_4K_ITR;
5802 break;
5803 default:
5804 break;
5805 }
5806
5807set_itr_now:
5808 if (new_itr != q_vector->itr_val) {
5809
5810
5811
5812
5813 new_itr = new_itr > q_vector->itr_val ?
5814 max((new_itr * q_vector->itr_val) /
5815 (new_itr + (q_vector->itr_val >> 2)),
5816 new_itr) : new_itr;
5817
5818
5819
5820
5821
5822
5823 q_vector->itr_val = new_itr;
5824 q_vector->set_itr = 1;
5825 }
5826}
5827
5828static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5829 struct igb_tx_buffer *first,
5830 u32 vlan_macip_lens, u32 type_tucmd,
5831 u32 mss_l4len_idx)
5832{
5833 struct e1000_adv_tx_context_desc *context_desc;
5834 u16 i = tx_ring->next_to_use;
5835 struct timespec64 ts;
5836
5837 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5838
5839 i++;
5840 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5841
5842
5843 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5844
5845
5846 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5847 mss_l4len_idx |= tx_ring->reg_idx << 4;
5848
5849 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5850 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5851 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5852
5853
5854
5855
5856 if (tx_ring->launchtime_enable) {
5857 ts = ktime_to_timespec64(first->skb->tstamp);
5858 skb_txtime_consumed(first->skb);
5859 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5860 } else {
5861 context_desc->seqnum_seed = 0;
5862 }
5863}
5864
5865static int igb_tso(struct igb_ring *tx_ring,
5866 struct igb_tx_buffer *first,
5867 u8 *hdr_len)
5868{
5869 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5870 struct sk_buff *skb = first->skb;
5871 union {
5872 struct iphdr *v4;
5873 struct ipv6hdr *v6;
5874 unsigned char *hdr;
5875 } ip;
5876 union {
5877 struct tcphdr *tcp;
5878 struct udphdr *udp;
5879 unsigned char *hdr;
5880 } l4;
5881 u32 paylen, l4_offset;
5882 int err;
5883
5884 if (skb->ip_summed != CHECKSUM_PARTIAL)
5885 return 0;
5886
5887 if (!skb_is_gso(skb))
5888 return 0;
5889
5890 err = skb_cow_head(skb, 0);
5891 if (err < 0)
5892 return err;
5893
5894 ip.hdr = skb_network_header(skb);
5895 l4.hdr = skb_checksum_start(skb);
5896
5897
5898 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5899 E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
5900
5901
5902 if (ip.v4->version == 4) {
5903 unsigned char *csum_start = skb_checksum_start(skb);
5904 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5905
5906
5907
5908
5909 ip.v4->check = csum_fold(csum_partial(trans_start,
5910 csum_start - trans_start,
5911 0));
5912 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5913
5914 ip.v4->tot_len = 0;
5915 first->tx_flags |= IGB_TX_FLAGS_TSO |
5916 IGB_TX_FLAGS_CSUM |
5917 IGB_TX_FLAGS_IPV4;
5918 } else {
5919 ip.v6->payload_len = 0;
5920 first->tx_flags |= IGB_TX_FLAGS_TSO |
5921 IGB_TX_FLAGS_CSUM;
5922 }
5923
5924
5925 l4_offset = l4.hdr - skb->data;
5926
5927
5928 paylen = skb->len - l4_offset;
5929 if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
5930
5931 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5932 csum_replace_by_diff(&l4.tcp->check,
5933 (__force __wsum)htonl(paylen));
5934 } else {
5935
5936 *hdr_len = sizeof(*l4.udp) + l4_offset;
5937 csum_replace_by_diff(&l4.udp->check,
5938 (__force __wsum)htonl(paylen));
5939 }
5940
5941
5942 first->gso_segs = skb_shinfo(skb)->gso_segs;
5943 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5944
5945
5946 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5947 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5948
5949
5950 vlan_macip_lens = l4.hdr - ip.hdr;
5951 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5952 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5953
5954 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5955 type_tucmd, mss_l4len_idx);
5956
5957 return 1;
5958}
5959
5960static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5961{
5962 struct sk_buff *skb = first->skb;
5963 u32 vlan_macip_lens = 0;
5964 u32 type_tucmd = 0;
5965
5966 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5967csum_failed:
5968 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5969 !tx_ring->launchtime_enable)
5970 return;
5971 goto no_csum;
5972 }
5973
5974 switch (skb->csum_offset) {
5975 case offsetof(struct tcphdr, check):
5976 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5977 fallthrough;
5978 case offsetof(struct udphdr, check):
5979 break;
5980 case offsetof(struct sctphdr, checksum):
5981
5982 if (skb_csum_is_sctp(skb)) {
5983 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5984 break;
5985 }
5986 fallthrough;
5987 default:
5988 skb_checksum_help(skb);
5989 goto csum_failed;
5990 }
5991
5992
5993 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5994 vlan_macip_lens = skb_checksum_start_offset(skb) -
5995 skb_network_offset(skb);
5996no_csum:
5997 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5998 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5999
6000 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
6001}
6002
6003#define IGB_SET_FLAG(_input, _flag, _result) \
6004 ((_flag <= _result) ? \
6005 ((u32)(_input & _flag) * (_result / _flag)) : \
6006 ((u32)(_input & _flag) / (_flag / _result)))
6007
6008static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6009{
6010
6011 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
6012 E1000_ADVTXD_DCMD_DEXT |
6013 E1000_ADVTXD_DCMD_IFCS;
6014
6015
6016 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
6017 (E1000_ADVTXD_DCMD_VLE));
6018
6019
6020 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
6021 (E1000_ADVTXD_DCMD_TSE));
6022
6023
6024 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
6025 (E1000_ADVTXD_MAC_TSTAMP));
6026
6027
6028 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
6029
6030 return cmd_type;
6031}
6032
6033static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
6034 union e1000_adv_tx_desc *tx_desc,
6035 u32 tx_flags, unsigned int paylen)
6036{
6037 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
6038
6039
6040 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6041 olinfo_status |= tx_ring->reg_idx << 4;
6042
6043
6044 olinfo_status |= IGB_SET_FLAG(tx_flags,
6045 IGB_TX_FLAGS_CSUM,
6046 (E1000_TXD_POPTS_TXSM << 8));
6047
6048
6049 olinfo_status |= IGB_SET_FLAG(tx_flags,
6050 IGB_TX_FLAGS_IPV4,
6051 (E1000_TXD_POPTS_IXSM << 8));
6052
6053 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6054}
6055
6056static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6057{
6058 struct net_device *netdev = tx_ring->netdev;
6059
6060 netif_stop_subqueue(netdev, tx_ring->queue_index);
6061
6062
6063
6064
6065
6066 smp_mb();
6067
6068
6069
6070
6071 if (igb_desc_unused(tx_ring) < size)
6072 return -EBUSY;
6073
6074
6075 netif_wake_subqueue(netdev, tx_ring->queue_index);
6076
6077 u64_stats_update_begin(&tx_ring->tx_syncp2);
6078 tx_ring->tx_stats.restart_queue2++;
6079 u64_stats_update_end(&tx_ring->tx_syncp2);
6080
6081 return 0;
6082}
6083
6084static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6085{
6086 if (igb_desc_unused(tx_ring) >= size)
6087 return 0;
6088 return __igb_maybe_stop_tx(tx_ring, size);
6089}
6090
6091static int igb_tx_map(struct igb_ring *tx_ring,
6092 struct igb_tx_buffer *first,
6093 const u8 hdr_len)
6094{
6095 struct sk_buff *skb = first->skb;
6096 struct igb_tx_buffer *tx_buffer;
6097 union e1000_adv_tx_desc *tx_desc;
6098 skb_frag_t *frag;
6099 dma_addr_t dma;
6100 unsigned int data_len, size;
6101 u32 tx_flags = first->tx_flags;
6102 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
6103 u16 i = tx_ring->next_to_use;
6104
6105 tx_desc = IGB_TX_DESC(tx_ring, i);
6106
6107 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
6108
6109 size = skb_headlen(skb);
6110 data_len = skb->data_len;
6111
6112 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6113
6114 tx_buffer = first;
6115
6116 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6117 if (dma_mapping_error(tx_ring->dev, dma))
6118 goto dma_error;
6119
6120
6121 dma_unmap_len_set(tx_buffer, len, size);
6122 dma_unmap_addr_set(tx_buffer, dma, dma);
6123
6124 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6125
6126 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
6127 tx_desc->read.cmd_type_len =
6128 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
6129
6130 i++;
6131 tx_desc++;
6132 if (i == tx_ring->count) {
6133 tx_desc = IGB_TX_DESC(tx_ring, 0);
6134 i = 0;
6135 }
6136 tx_desc->read.olinfo_status = 0;
6137
6138 dma += IGB_MAX_DATA_PER_TXD;
6139 size -= IGB_MAX_DATA_PER_TXD;
6140
6141 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6142 }
6143
6144 if (likely(!data_len))
6145 break;
6146
6147 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6148
6149 i++;
6150 tx_desc++;
6151 if (i == tx_ring->count) {
6152 tx_desc = IGB_TX_DESC(tx_ring, 0);
6153 i = 0;
6154 }
6155 tx_desc->read.olinfo_status = 0;
6156
6157 size = skb_frag_size(frag);
6158 data_len -= size;
6159
6160 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6161 size, DMA_TO_DEVICE);
6162
6163 tx_buffer = &tx_ring->tx_buffer_info[i];
6164 }
6165
6166
6167 cmd_type |= size | IGB_TXD_DCMD;
6168 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6169
6170 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6171
6172
6173 first->time_stamp = jiffies;
6174
6175 skb_tx_timestamp(skb);
6176
6177
6178
6179
6180
6181
6182
6183
6184 dma_wmb();
6185
6186
6187 first->next_to_watch = tx_desc;
6188
6189 i++;
6190 if (i == tx_ring->count)
6191 i = 0;
6192
6193 tx_ring->next_to_use = i;
6194
6195
6196 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6197
6198 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6199 writel(i, tx_ring->tail);
6200 }
6201 return 0;
6202
6203dma_error:
6204 dev_err(tx_ring->dev, "TX DMA map failed\n");
6205 tx_buffer = &tx_ring->tx_buffer_info[i];
6206
6207
6208 while (tx_buffer != first) {
6209 if (dma_unmap_len(tx_buffer, len))
6210 dma_unmap_page(tx_ring->dev,
6211 dma_unmap_addr(tx_buffer, dma),
6212 dma_unmap_len(tx_buffer, len),
6213 DMA_TO_DEVICE);
6214 dma_unmap_len_set(tx_buffer, len, 0);
6215
6216 if (i-- == 0)
6217 i += tx_ring->count;
6218 tx_buffer = &tx_ring->tx_buffer_info[i];
6219 }
6220
6221 if (dma_unmap_len(tx_buffer, len))
6222 dma_unmap_single(tx_ring->dev,
6223 dma_unmap_addr(tx_buffer, dma),
6224 dma_unmap_len(tx_buffer, len),
6225 DMA_TO_DEVICE);
6226 dma_unmap_len_set(tx_buffer, len, 0);
6227
6228 dev_kfree_skb_any(tx_buffer->skb);
6229 tx_buffer->skb = NULL;
6230
6231 tx_ring->next_to_use = i;
6232
6233 return -1;
6234}
6235
6236int igb_xmit_xdp_ring(struct igb_adapter *adapter,
6237 struct igb_ring *tx_ring,
6238 struct xdp_frame *xdpf)
6239{
6240 union e1000_adv_tx_desc *tx_desc;
6241 u32 len, cmd_type, olinfo_status;
6242 struct igb_tx_buffer *tx_buffer;
6243 dma_addr_t dma;
6244 u16 i;
6245
6246 len = xdpf->len;
6247
6248 if (unlikely(!igb_desc_unused(tx_ring)))
6249 return IGB_XDP_CONSUMED;
6250
6251 dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
6252 if (dma_mapping_error(tx_ring->dev, dma))
6253 return IGB_XDP_CONSUMED;
6254
6255
6256 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6257 tx_buffer->bytecount = len;
6258 tx_buffer->gso_segs = 1;
6259 tx_buffer->protocol = 0;
6260
6261 i = tx_ring->next_to_use;
6262 tx_desc = IGB_TX_DESC(tx_ring, i);
6263
6264 dma_unmap_len_set(tx_buffer, len, len);
6265 dma_unmap_addr_set(tx_buffer, dma, dma);
6266 tx_buffer->type = IGB_TYPE_XDP;
6267 tx_buffer->xdpf = xdpf;
6268
6269 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6270
6271
6272 cmd_type = E1000_ADVTXD_DTYP_DATA |
6273 E1000_ADVTXD_DCMD_DEXT |
6274 E1000_ADVTXD_DCMD_IFCS;
6275 cmd_type |= len | IGB_TXD_DCMD;
6276 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6277
6278 olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
6279
6280 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6281 olinfo_status |= tx_ring->reg_idx << 4;
6282
6283 tx_desc->read.olinfo_status = olinfo_status;
6284
6285 netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
6286
6287
6288 tx_buffer->time_stamp = jiffies;
6289
6290
6291 smp_wmb();
6292
6293
6294 i++;
6295 if (i == tx_ring->count)
6296 i = 0;
6297
6298 tx_buffer->next_to_watch = tx_desc;
6299 tx_ring->next_to_use = i;
6300
6301
6302 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6303
6304 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6305 writel(i, tx_ring->tail);
6306
6307 return IGB_XDP_TX;
6308}
6309
6310netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6311 struct igb_ring *tx_ring)
6312{
6313 struct igb_tx_buffer *first;
6314 int tso;
6315 u32 tx_flags = 0;
6316 unsigned short f;
6317 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6318 __be16 protocol = vlan_get_protocol(skb);
6319 u8 hdr_len = 0;
6320
6321
6322
6323
6324
6325
6326
6327 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6328 count += TXD_USE_COUNT(skb_frag_size(
6329 &skb_shinfo(skb)->frags[f]));
6330
6331 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6332
6333 return NETDEV_TX_BUSY;
6334 }
6335
6336
6337 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6338 first->type = IGB_TYPE_SKB;
6339 first->skb = skb;
6340 first->bytecount = skb->len;
6341 first->gso_segs = 1;
6342
6343 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6344 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6345
6346 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6347 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6348 &adapter->state)) {
6349 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6350 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6351
6352 adapter->ptp_tx_skb = skb_get(skb);
6353 adapter->ptp_tx_start = jiffies;
6354 if (adapter->hw.mac.type == e1000_82576)
6355 schedule_work(&adapter->ptp_tx_work);
6356 } else {
6357 adapter->tx_hwtstamp_skipped++;
6358 }
6359 }
6360
6361 if (skb_vlan_tag_present(skb)) {
6362 tx_flags |= IGB_TX_FLAGS_VLAN;
6363 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6364 }
6365
6366
6367 first->tx_flags = tx_flags;
6368 first->protocol = protocol;
6369
6370 tso = igb_tso(tx_ring, first, &hdr_len);
6371 if (tso < 0)
6372 goto out_drop;
6373 else if (!tso)
6374 igb_tx_csum(tx_ring, first);
6375
6376 if (igb_tx_map(tx_ring, first, hdr_len))
6377 goto cleanup_tx_tstamp;
6378
6379 return NETDEV_TX_OK;
6380
6381out_drop:
6382 dev_kfree_skb_any(first->skb);
6383 first->skb = NULL;
6384cleanup_tx_tstamp:
6385 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6386 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6387
6388 dev_kfree_skb_any(adapter->ptp_tx_skb);
6389 adapter->ptp_tx_skb = NULL;
6390 if (adapter->hw.mac.type == e1000_82576)
6391 cancel_work_sync(&adapter->ptp_tx_work);
6392 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6393 }
6394
6395 return NETDEV_TX_OK;
6396}
6397
6398static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6399 struct sk_buff *skb)
6400{
6401 unsigned int r_idx = skb->queue_mapping;
6402
6403 if (r_idx >= adapter->num_tx_queues)
6404 r_idx = r_idx % adapter->num_tx_queues;
6405
6406 return adapter->tx_ring[r_idx];
6407}
6408
6409static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6410 struct net_device *netdev)
6411{
6412 struct igb_adapter *adapter = netdev_priv(netdev);
6413
6414
6415
6416
6417 if (skb_put_padto(skb, 17))
6418 return NETDEV_TX_OK;
6419
6420 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6421}
6422
6423
6424
6425
6426
6427
6428static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6429{
6430 struct igb_adapter *adapter = netdev_priv(netdev);
6431 struct e1000_hw *hw = &adapter->hw;
6432
6433
6434 adapter->tx_timeout_count++;
6435
6436 if (hw->mac.type >= e1000_82580)
6437 hw->dev_spec._82575.global_device_reset = true;
6438
6439 schedule_work(&adapter->reset_task);
6440 wr32(E1000_EICS,
6441 (adapter->eims_enable_mask & ~adapter->eims_other));
6442}
6443
6444static void igb_reset_task(struct work_struct *work)
6445{
6446 struct igb_adapter *adapter;
6447 adapter = container_of(work, struct igb_adapter, reset_task);
6448
6449 rtnl_lock();
6450
6451 if (test_bit(__IGB_DOWN, &adapter->state) ||
6452 test_bit(__IGB_RESETTING, &adapter->state)) {
6453 rtnl_unlock();
6454 return;
6455 }
6456
6457 igb_dump(adapter);
6458 netdev_err(adapter->netdev, "Reset adapter\n");
6459 igb_reinit_locked(adapter);
6460 rtnl_unlock();
6461}
6462
6463
6464
6465
6466
6467
6468static void igb_get_stats64(struct net_device *netdev,
6469 struct rtnl_link_stats64 *stats)
6470{
6471 struct igb_adapter *adapter = netdev_priv(netdev);
6472
6473 spin_lock(&adapter->stats64_lock);
6474 igb_update_stats(adapter);
6475 memcpy(stats, &adapter->stats64, sizeof(*stats));
6476 spin_unlock(&adapter->stats64_lock);
6477}
6478
6479
6480
6481
6482
6483
6484
6485
6486static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6487{
6488 struct igb_adapter *adapter = netdev_priv(netdev);
6489 int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
6490
6491 if (adapter->xdp_prog) {
6492 int i;
6493
6494 for (i = 0; i < adapter->num_rx_queues; i++) {
6495 struct igb_ring *ring = adapter->rx_ring[i];
6496
6497 if (max_frame > igb_rx_bufsz(ring)) {
6498 netdev_warn(adapter->netdev,
6499 "Requested MTU size is not supported with XDP. Max frame size is %d\n",
6500 max_frame);
6501 return -EINVAL;
6502 }
6503 }
6504 }
6505
6506
6507 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6508 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6509
6510 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6511 usleep_range(1000, 2000);
6512
6513
6514 adapter->max_frame_size = max_frame;
6515
6516 if (netif_running(netdev))
6517 igb_down(adapter);
6518
6519 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6520 netdev->mtu, new_mtu);
6521 netdev->mtu = new_mtu;
6522
6523 if (netif_running(netdev))
6524 igb_up(adapter);
6525 else
6526 igb_reset(adapter);
6527
6528 clear_bit(__IGB_RESETTING, &adapter->state);
6529
6530 return 0;
6531}
6532
6533
6534
6535
6536
6537void igb_update_stats(struct igb_adapter *adapter)
6538{
6539 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6540 struct e1000_hw *hw = &adapter->hw;
6541 struct pci_dev *pdev = adapter->pdev;
6542 u32 reg, mpc;
6543 int i;
6544 u64 bytes, packets;
6545 unsigned int start;
6546 u64 _bytes, _packets;
6547
6548
6549
6550
6551 if (adapter->link_speed == 0)
6552 return;
6553 if (pci_channel_offline(pdev))
6554 return;
6555
6556 bytes = 0;
6557 packets = 0;
6558
6559 rcu_read_lock();
6560 for (i = 0; i < adapter->num_rx_queues; i++) {
6561 struct igb_ring *ring = adapter->rx_ring[i];
6562 u32 rqdpc = rd32(E1000_RQDPC(i));
6563 if (hw->mac.type >= e1000_i210)
6564 wr32(E1000_RQDPC(i), 0);
6565
6566 if (rqdpc) {
6567 ring->rx_stats.drops += rqdpc;
6568 net_stats->rx_fifo_errors += rqdpc;
6569 }
6570
6571 do {
6572 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6573 _bytes = ring->rx_stats.bytes;
6574 _packets = ring->rx_stats.packets;
6575 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6576 bytes += _bytes;
6577 packets += _packets;
6578 }
6579
6580 net_stats->rx_bytes = bytes;
6581 net_stats->rx_packets = packets;
6582
6583 bytes = 0;
6584 packets = 0;
6585 for (i = 0; i < adapter->num_tx_queues; i++) {
6586 struct igb_ring *ring = adapter->tx_ring[i];
6587 do {
6588 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6589 _bytes = ring->tx_stats.bytes;
6590 _packets = ring->tx_stats.packets;
6591 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6592 bytes += _bytes;
6593 packets += _packets;
6594 }
6595 net_stats->tx_bytes = bytes;
6596 net_stats->tx_packets = packets;
6597 rcu_read_unlock();
6598
6599
6600 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6601 adapter->stats.gprc += rd32(E1000_GPRC);
6602 adapter->stats.gorc += rd32(E1000_GORCL);
6603 rd32(E1000_GORCH);
6604 adapter->stats.bprc += rd32(E1000_BPRC);
6605 adapter->stats.mprc += rd32(E1000_MPRC);
6606 adapter->stats.roc += rd32(E1000_ROC);
6607
6608 adapter->stats.prc64 += rd32(E1000_PRC64);
6609 adapter->stats.prc127 += rd32(E1000_PRC127);
6610 adapter->stats.prc255 += rd32(E1000_PRC255);
6611 adapter->stats.prc511 += rd32(E1000_PRC511);
6612 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6613 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6614 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6615 adapter->stats.sec += rd32(E1000_SEC);
6616
6617 mpc = rd32(E1000_MPC);
6618 adapter->stats.mpc += mpc;
6619 net_stats->rx_fifo_errors += mpc;
6620 adapter->stats.scc += rd32(E1000_SCC);
6621 adapter->stats.ecol += rd32(E1000_ECOL);
6622 adapter->stats.mcc += rd32(E1000_MCC);
6623 adapter->stats.latecol += rd32(E1000_LATECOL);
6624 adapter->stats.dc += rd32(E1000_DC);
6625 adapter->stats.rlec += rd32(E1000_RLEC);
6626 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6627 adapter->stats.xontxc += rd32(E1000_XONTXC);
6628 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6629 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6630 adapter->stats.fcruc += rd32(E1000_FCRUC);
6631 adapter->stats.gptc += rd32(E1000_GPTC);
6632 adapter->stats.gotc += rd32(E1000_GOTCL);
6633 rd32(E1000_GOTCH);
6634 adapter->stats.rnbc += rd32(E1000_RNBC);
6635 adapter->stats.ruc += rd32(E1000_RUC);
6636 adapter->stats.rfc += rd32(E1000_RFC);
6637 adapter->stats.rjc += rd32(E1000_RJC);
6638 adapter->stats.tor += rd32(E1000_TORH);
6639 adapter->stats.tot += rd32(E1000_TOTH);
6640 adapter->stats.tpr += rd32(E1000_TPR);
6641
6642 adapter->stats.ptc64 += rd32(E1000_PTC64);
6643 adapter->stats.ptc127 += rd32(E1000_PTC127);
6644 adapter->stats.ptc255 += rd32(E1000_PTC255);
6645 adapter->stats.ptc511 += rd32(E1000_PTC511);
6646 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6647 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6648
6649 adapter->stats.mptc += rd32(E1000_MPTC);
6650 adapter->stats.bptc += rd32(E1000_BPTC);
6651
6652 adapter->stats.tpt += rd32(E1000_TPT);
6653 adapter->stats.colc += rd32(E1000_COLC);
6654
6655 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6656
6657 reg = rd32(E1000_CTRL_EXT);
6658 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6659 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6660
6661
6662 if ((hw->mac.type != e1000_i210) &&
6663 (hw->mac.type != e1000_i211))
6664 adapter->stats.tncrs += rd32(E1000_TNCRS);
6665 }
6666
6667 adapter->stats.tsctc += rd32(E1000_TSCTC);
6668 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6669
6670 adapter->stats.iac += rd32(E1000_IAC);
6671 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6672 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6673 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6674 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6675 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6676 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6677 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6678 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6679
6680
6681 net_stats->multicast = adapter->stats.mprc;
6682 net_stats->collisions = adapter->stats.colc;
6683
6684
6685
6686
6687
6688
6689 net_stats->rx_errors = adapter->stats.rxerrc +
6690 adapter->stats.crcerrs + adapter->stats.algnerrc +
6691 adapter->stats.ruc + adapter->stats.roc +
6692 adapter->stats.cexterr;
6693 net_stats->rx_length_errors = adapter->stats.ruc +
6694 adapter->stats.roc;
6695 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6696 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6697 net_stats->rx_missed_errors = adapter->stats.mpc;
6698
6699
6700 net_stats->tx_errors = adapter->stats.ecol +
6701 adapter->stats.latecol;
6702 net_stats->tx_aborted_errors = adapter->stats.ecol;
6703 net_stats->tx_window_errors = adapter->stats.latecol;
6704 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6705
6706
6707
6708
6709 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6710 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6711 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6712
6713
6714 reg = rd32(E1000_MANC);
6715 if (reg & E1000_MANC_EN_BMC2OS) {
6716 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6717 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6718 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6719 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6720 }
6721}
6722
6723static void igb_tsync_interrupt(struct igb_adapter *adapter)
6724{
6725 struct e1000_hw *hw = &adapter->hw;
6726 struct ptp_clock_event event;
6727 struct timespec64 ts;
6728 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6729
6730 if (tsicr & TSINTR_SYS_WRAP) {
6731 event.type = PTP_CLOCK_PPS;
6732 if (adapter->ptp_caps.pps)
6733 ptp_clock_event(adapter->ptp_clock, &event);
6734 ack |= TSINTR_SYS_WRAP;
6735 }
6736
6737 if (tsicr & E1000_TSICR_TXTS) {
6738
6739 schedule_work(&adapter->ptp_tx_work);
6740 ack |= E1000_TSICR_TXTS;
6741 }
6742
6743 if (tsicr & TSINTR_TT0) {
6744 spin_lock(&adapter->tmreg_lock);
6745 ts = timespec64_add(adapter->perout[0].start,
6746 adapter->perout[0].period);
6747
6748 wr32(E1000_TRGTTIML0, ts.tv_nsec);
6749 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6750 tsauxc = rd32(E1000_TSAUXC);
6751 tsauxc |= TSAUXC_EN_TT0;
6752 wr32(E1000_TSAUXC, tsauxc);
6753 adapter->perout[0].start = ts;
6754 spin_unlock(&adapter->tmreg_lock);
6755 ack |= TSINTR_TT0;
6756 }
6757
6758 if (tsicr & TSINTR_TT1) {
6759 spin_lock(&adapter->tmreg_lock);
6760 ts = timespec64_add(adapter->perout[1].start,
6761 adapter->perout[1].period);
6762 wr32(E1000_TRGTTIML1, ts.tv_nsec);
6763 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6764 tsauxc = rd32(E1000_TSAUXC);
6765 tsauxc |= TSAUXC_EN_TT1;
6766 wr32(E1000_TSAUXC, tsauxc);
6767 adapter->perout[1].start = ts;
6768 spin_unlock(&adapter->tmreg_lock);
6769 ack |= TSINTR_TT1;
6770 }
6771
6772 if (tsicr & TSINTR_AUTT0) {
6773 nsec = rd32(E1000_AUXSTMPL0);
6774 sec = rd32(E1000_AUXSTMPH0);
6775 event.type = PTP_CLOCK_EXTTS;
6776 event.index = 0;
6777 event.timestamp = sec * 1000000000ULL + nsec;
6778 ptp_clock_event(adapter->ptp_clock, &event);
6779 ack |= TSINTR_AUTT0;
6780 }
6781
6782 if (tsicr & TSINTR_AUTT1) {
6783 nsec = rd32(E1000_AUXSTMPL1);
6784 sec = rd32(E1000_AUXSTMPH1);
6785 event.type = PTP_CLOCK_EXTTS;
6786 event.index = 1;
6787 event.timestamp = sec * 1000000000ULL + nsec;
6788 ptp_clock_event(adapter->ptp_clock, &event);
6789 ack |= TSINTR_AUTT1;
6790 }
6791
6792
6793 wr32(E1000_TSICR, ack);
6794}
6795
6796static irqreturn_t igb_msix_other(int irq, void *data)
6797{
6798 struct igb_adapter *adapter = data;
6799 struct e1000_hw *hw = &adapter->hw;
6800 u32 icr = rd32(E1000_ICR);
6801
6802
6803 if (icr & E1000_ICR_DRSTA)
6804 schedule_work(&adapter->reset_task);
6805
6806 if (icr & E1000_ICR_DOUTSYNC) {
6807
6808 adapter->stats.doosync++;
6809
6810
6811
6812
6813 igb_check_wvbr(adapter);
6814 }
6815
6816
6817 if (icr & E1000_ICR_VMMB)
6818 igb_msg_task(adapter);
6819
6820 if (icr & E1000_ICR_LSC) {
6821 hw->mac.get_link_status = 1;
6822
6823 if (!test_bit(__IGB_DOWN, &adapter->state))
6824 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6825 }
6826
6827 if (icr & E1000_ICR_TS)
6828 igb_tsync_interrupt(adapter);
6829
6830 wr32(E1000_EIMS, adapter->eims_other);
6831
6832 return IRQ_HANDLED;
6833}
6834
6835static void igb_write_itr(struct igb_q_vector *q_vector)
6836{
6837 struct igb_adapter *adapter = q_vector->adapter;
6838 u32 itr_val = q_vector->itr_val & 0x7FFC;
6839
6840 if (!q_vector->set_itr)
6841 return;
6842
6843 if (!itr_val)
6844 itr_val = 0x4;
6845
6846 if (adapter->hw.mac.type == e1000_82575)
6847 itr_val |= itr_val << 16;
6848 else
6849 itr_val |= E1000_EITR_CNT_IGNR;
6850
6851 writel(itr_val, q_vector->itr_register);
6852 q_vector->set_itr = 0;
6853}
6854
6855static irqreturn_t igb_msix_ring(int irq, void *data)
6856{
6857 struct igb_q_vector *q_vector = data;
6858
6859
6860 igb_write_itr(q_vector);
6861
6862 napi_schedule(&q_vector->napi);
6863
6864 return IRQ_HANDLED;
6865}
6866
6867#ifdef CONFIG_IGB_DCA
6868static void igb_update_tx_dca(struct igb_adapter *adapter,
6869 struct igb_ring *tx_ring,
6870 int cpu)
6871{
6872 struct e1000_hw *hw = &adapter->hw;
6873 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6874
6875 if (hw->mac.type != e1000_82575)
6876 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6877
6878
6879
6880
6881
6882 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6883 E1000_DCA_TXCTRL_DATA_RRO_EN |
6884 E1000_DCA_TXCTRL_DESC_DCA_EN;
6885
6886 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6887}
6888
6889static void igb_update_rx_dca(struct igb_adapter *adapter,
6890 struct igb_ring *rx_ring,
6891 int cpu)
6892{
6893 struct e1000_hw *hw = &adapter->hw;
6894 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6895
6896 if (hw->mac.type != e1000_82575)
6897 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6898
6899
6900
6901
6902
6903 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6904 E1000_DCA_RXCTRL_DESC_DCA_EN;
6905
6906 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6907}
6908
6909static void igb_update_dca(struct igb_q_vector *q_vector)
6910{
6911 struct igb_adapter *adapter = q_vector->adapter;
6912 int cpu = get_cpu();
6913
6914 if (q_vector->cpu == cpu)
6915 goto out_no_update;
6916
6917 if (q_vector->tx.ring)
6918 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6919
6920 if (q_vector->rx.ring)
6921 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6922
6923 q_vector->cpu = cpu;
6924out_no_update:
6925 put_cpu();
6926}
6927
6928static void igb_setup_dca(struct igb_adapter *adapter)
6929{
6930 struct e1000_hw *hw = &adapter->hw;
6931 int i;
6932
6933 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6934 return;
6935
6936
6937 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6938
6939 for (i = 0; i < adapter->num_q_vectors; i++) {
6940 adapter->q_vector[i]->cpu = -1;
6941 igb_update_dca(adapter->q_vector[i]);
6942 }
6943}
6944
6945static int __igb_notify_dca(struct device *dev, void *data)
6946{
6947 struct net_device *netdev = dev_get_drvdata(dev);
6948 struct igb_adapter *adapter = netdev_priv(netdev);
6949 struct pci_dev *pdev = adapter->pdev;
6950 struct e1000_hw *hw = &adapter->hw;
6951 unsigned long event = *(unsigned long *)data;
6952
6953 switch (event) {
6954 case DCA_PROVIDER_ADD:
6955
6956 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6957 break;
6958 if (dca_add_requester(dev) == 0) {
6959 adapter->flags |= IGB_FLAG_DCA_ENABLED;
6960 dev_info(&pdev->dev, "DCA enabled\n");
6961 igb_setup_dca(adapter);
6962 break;
6963 }
6964 fallthrough;
6965 case DCA_PROVIDER_REMOVE:
6966 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6967
6968
6969
6970 dca_remove_requester(dev);
6971 dev_info(&pdev->dev, "DCA disabled\n");
6972 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6973 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6974 }
6975 break;
6976 }
6977
6978 return 0;
6979}
6980
6981static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6982 void *p)
6983{
6984 int ret_val;
6985
6986 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6987 __igb_notify_dca);
6988
6989 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6990}
6991#endif
6992
6993#ifdef CONFIG_PCI_IOV
6994static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6995{
6996 unsigned char mac_addr[ETH_ALEN];
6997
6998 eth_zero_addr(mac_addr);
6999 igb_set_vf_mac(adapter, vf, mac_addr);
7000
7001
7002 adapter->vf_data[vf].spoofchk_enabled = true;
7003
7004
7005 adapter->vf_data[vf].trusted = false;
7006
7007 return 0;
7008}
7009
7010#endif
7011static void igb_ping_all_vfs(struct igb_adapter *adapter)
7012{
7013 struct e1000_hw *hw = &adapter->hw;
7014 u32 ping;
7015 int i;
7016
7017 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
7018 ping = E1000_PF_CONTROL_MSG;
7019 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
7020 ping |= E1000_VT_MSGTYPE_CTS;
7021 igb_write_mbx(hw, &ping, 1, i);
7022 }
7023}
7024
7025static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7026{
7027 struct e1000_hw *hw = &adapter->hw;
7028 u32 vmolr = rd32(E1000_VMOLR(vf));
7029 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7030
7031 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7032 IGB_VF_FLAG_MULTI_PROMISC);
7033 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7034
7035 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
7036 vmolr |= E1000_VMOLR_MPME;
7037 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7038 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
7039 } else {
7040
7041
7042
7043
7044 if (vf_data->num_vf_mc_hashes > 30) {
7045 vmolr |= E1000_VMOLR_MPME;
7046 } else if (vf_data->num_vf_mc_hashes) {
7047 int j;
7048
7049 vmolr |= E1000_VMOLR_ROMPE;
7050 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7051 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7052 }
7053 }
7054
7055 wr32(E1000_VMOLR(vf), vmolr);
7056
7057
7058 if (*msgbuf & E1000_VT_MSGINFO_MASK)
7059 return -EINVAL;
7060
7061 return 0;
7062}
7063
7064static int igb_set_vf_multicasts(struct igb_adapter *adapter,
7065 u32 *msgbuf, u32 vf)
7066{
7067 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7068 u16 *hash_list = (u16 *)&msgbuf[1];
7069 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7070 int i;
7071
7072
7073
7074
7075
7076 vf_data->num_vf_mc_hashes = n;
7077
7078
7079 if (n > 30)
7080 n = 30;
7081
7082
7083 for (i = 0; i < n; i++)
7084 vf_data->vf_mc_hashes[i] = hash_list[i];
7085
7086
7087 igb_set_rx_mode(adapter->netdev);
7088
7089 return 0;
7090}
7091
7092static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
7093{
7094 struct e1000_hw *hw = &adapter->hw;
7095 struct vf_data_storage *vf_data;
7096 int i, j;
7097
7098 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7099 u32 vmolr = rd32(E1000_VMOLR(i));
7100
7101 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7102
7103 vf_data = &adapter->vf_data[i];
7104
7105 if ((vf_data->num_vf_mc_hashes > 30) ||
7106 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
7107 vmolr |= E1000_VMOLR_MPME;
7108 } else if (vf_data->num_vf_mc_hashes) {
7109 vmolr |= E1000_VMOLR_ROMPE;
7110 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7111 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7112 }
7113 wr32(E1000_VMOLR(i), vmolr);
7114 }
7115}
7116
7117static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
7118{
7119 struct e1000_hw *hw = &adapter->hw;
7120 u32 pool_mask, vlvf_mask, i;
7121
7122
7123 pool_mask = E1000_VLVF_POOLSEL_MASK;
7124 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
7125
7126
7127 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
7128 adapter->vfs_allocated_count);
7129
7130
7131 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
7132 u32 vlvf = rd32(E1000_VLVF(i));
7133 u32 vfta_mask, vid, vfta;
7134
7135
7136 if (!(vlvf & vlvf_mask))
7137 continue;
7138
7139
7140 vlvf ^= vlvf_mask;
7141
7142
7143 if (vlvf & pool_mask)
7144 goto update_vlvfb;
7145
7146
7147 if (vlvf & E1000_VLVF_POOLSEL_MASK)
7148 goto update_vlvf;
7149
7150 vid = vlvf & E1000_VLVF_VLANID_MASK;
7151 vfta_mask = BIT(vid % 32);
7152
7153
7154 vfta = adapter->shadow_vfta[vid / 32];
7155 if (vfta & vfta_mask)
7156 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
7157update_vlvf:
7158
7159 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7160 vlvf &= E1000_VLVF_POOLSEL_MASK;
7161 else
7162 vlvf = 0;
7163update_vlvfb:
7164
7165 wr32(E1000_VLVF(i), vlvf);
7166 }
7167}
7168
7169static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
7170{
7171 u32 vlvf;
7172 int idx;
7173
7174
7175 if (vlan == 0)
7176 return 0;
7177
7178
7179 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
7180 vlvf = rd32(E1000_VLVF(idx));
7181 if ((vlvf & VLAN_VID_MASK) == vlan)
7182 break;
7183 }
7184
7185 return idx;
7186}
7187
7188static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
7189{
7190 struct e1000_hw *hw = &adapter->hw;
7191 u32 bits, pf_id;
7192 int idx;
7193
7194 idx = igb_find_vlvf_entry(hw, vid);
7195 if (!idx)
7196 return;
7197
7198
7199
7200
7201 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
7202 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
7203 bits &= rd32(E1000_VLVF(idx));
7204
7205
7206 if (!bits) {
7207 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7208 wr32(E1000_VLVF(idx), BIT(pf_id));
7209 else
7210 wr32(E1000_VLVF(idx), 0);
7211 }
7212}
7213
7214static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
7215 bool add, u32 vf)
7216{
7217 int pf_id = adapter->vfs_allocated_count;
7218 struct e1000_hw *hw = &adapter->hw;
7219 int err;
7220
7221
7222
7223
7224
7225
7226 if (add && test_bit(vid, adapter->active_vlans)) {
7227 err = igb_vfta_set(hw, vid, pf_id, true, false);
7228 if (err)
7229 return err;
7230 }
7231
7232 err = igb_vfta_set(hw, vid, vf, add, false);
7233
7234 if (add && !err)
7235 return err;
7236
7237
7238
7239
7240
7241 if (test_bit(vid, adapter->active_vlans) ||
7242 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
7243 igb_update_pf_vlvf(adapter, vid);
7244
7245 return err;
7246}
7247
7248static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7249{
7250 struct e1000_hw *hw = &adapter->hw;
7251
7252 if (vid)
7253 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7254 else
7255 wr32(E1000_VMVIR(vf), 0);
7256}
7257
7258static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7259 u16 vlan, u8 qos)
7260{
7261 int err;
7262
7263 err = igb_set_vf_vlan(adapter, vlan, true, vf);
7264 if (err)
7265 return err;
7266
7267 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7268 igb_set_vmolr(adapter, vf, !vlan);
7269
7270
7271 if (vlan != adapter->vf_data[vf].pf_vlan)
7272 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7273 false, vf);
7274
7275 adapter->vf_data[vf].pf_vlan = vlan;
7276 adapter->vf_data[vf].pf_qos = qos;
7277 igb_set_vf_vlan_strip(adapter, vf, true);
7278 dev_info(&adapter->pdev->dev,
7279 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7280 if (test_bit(__IGB_DOWN, &adapter->state)) {
7281 dev_warn(&adapter->pdev->dev,
7282 "The VF VLAN has been set, but the PF device is not up.\n");
7283 dev_warn(&adapter->pdev->dev,
7284 "Bring the PF device up before attempting to use the VF device.\n");
7285 }
7286
7287 return err;
7288}
7289
7290static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7291{
7292
7293 igb_set_vf_vlan(adapter, 0, true, vf);
7294
7295 igb_set_vmvir(adapter, 0, vf);
7296 igb_set_vmolr(adapter, vf, true);
7297
7298
7299 if (adapter->vf_data[vf].pf_vlan)
7300 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7301 false, vf);
7302
7303 adapter->vf_data[vf].pf_vlan = 0;
7304 adapter->vf_data[vf].pf_qos = 0;
7305 igb_set_vf_vlan_strip(adapter, vf, false);
7306
7307 return 0;
7308}
7309
7310static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7311 u16 vlan, u8 qos, __be16 vlan_proto)
7312{
7313 struct igb_adapter *adapter = netdev_priv(netdev);
7314
7315 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7316 return -EINVAL;
7317
7318 if (vlan_proto != htons(ETH_P_8021Q))
7319 return -EPROTONOSUPPORT;
7320
7321 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7322 igb_disable_port_vlan(adapter, vf);
7323}
7324
7325static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7326{
7327 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7328 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7329 int ret;
7330
7331 if (adapter->vf_data[vf].pf_vlan)
7332 return -1;
7333
7334
7335 if (!vid && !add)
7336 return 0;
7337
7338 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7339 if (!ret)
7340 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7341 return ret;
7342}
7343
7344static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7345{
7346 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7347
7348
7349 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7350 vf_data->last_nack = jiffies;
7351
7352
7353 igb_clear_vf_vfta(adapter, vf);
7354 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7355 igb_set_vmvir(adapter, vf_data->pf_vlan |
7356 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7357 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7358 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7359
7360
7361 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7362
7363
7364 igb_set_rx_mode(adapter->netdev);
7365}
7366
7367static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7368{
7369 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7370
7371
7372 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7373 eth_zero_addr(vf_mac);
7374
7375
7376 igb_vf_reset(adapter, vf);
7377}
7378
7379static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7380{
7381 struct e1000_hw *hw = &adapter->hw;
7382 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7383 u32 reg, msgbuf[3];
7384 u8 *addr = (u8 *)(&msgbuf[1]);
7385
7386
7387 igb_vf_reset(adapter, vf);
7388
7389
7390 igb_set_vf_mac(adapter, vf, vf_mac);
7391
7392
7393 reg = rd32(E1000_VFTE);
7394 wr32(E1000_VFTE, reg | BIT(vf));
7395 reg = rd32(E1000_VFRE);
7396 wr32(E1000_VFRE, reg | BIT(vf));
7397
7398 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7399
7400
7401 if (!is_zero_ether_addr(vf_mac)) {
7402 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7403 memcpy(addr, vf_mac, ETH_ALEN);
7404 } else {
7405 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7406 }
7407 igb_write_mbx(hw, msgbuf, 3, vf);
7408}
7409
7410static void igb_flush_mac_table(struct igb_adapter *adapter)
7411{
7412 struct e1000_hw *hw = &adapter->hw;
7413 int i;
7414
7415 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7416 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7417 eth_zero_addr(adapter->mac_table[i].addr);
7418 adapter->mac_table[i].queue = 0;
7419 igb_rar_set_index(adapter, i);
7420 }
7421}
7422
7423static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7424{
7425 struct e1000_hw *hw = &adapter->hw;
7426
7427 int rar_entries = hw->mac.rar_entry_count -
7428 adapter->vfs_allocated_count;
7429 int i, count = 0;
7430
7431 for (i = 0; i < rar_entries; i++) {
7432
7433 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7434 continue;
7435
7436
7437 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7438 (adapter->mac_table[i].queue != queue))
7439 continue;
7440
7441 count++;
7442 }
7443
7444 return count;
7445}
7446
7447
7448static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7449{
7450 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7451
7452 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7453 mac_table->queue = adapter->vfs_allocated_count;
7454 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7455
7456 igb_rar_set_index(adapter, 0);
7457}
7458
7459
7460
7461
7462
7463
7464static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7465 const u8 *addr, const u8 flags)
7466{
7467 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7468 return true;
7469
7470 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7471 (flags & IGB_MAC_STATE_SRC_ADDR))
7472 return false;
7473
7474 if (!ether_addr_equal(addr, entry->addr))
7475 return false;
7476
7477 return true;
7478}
7479
7480
7481
7482
7483
7484
7485static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7486 const u8 *addr, const u8 queue,
7487 const u8 flags)
7488{
7489 struct e1000_hw *hw = &adapter->hw;
7490 int rar_entries = hw->mac.rar_entry_count -
7491 adapter->vfs_allocated_count;
7492 int i;
7493
7494 if (is_zero_ether_addr(addr))
7495 return -EINVAL;
7496
7497
7498
7499
7500
7501 for (i = 0; i < rar_entries; i++) {
7502 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7503 addr, flags))
7504 continue;
7505
7506 ether_addr_copy(adapter->mac_table[i].addr, addr);
7507 adapter->mac_table[i].queue = queue;
7508 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7509
7510 igb_rar_set_index(adapter, i);
7511 return i;
7512 }
7513
7514 return -ENOSPC;
7515}
7516
7517static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7518 const u8 queue)
7519{
7520 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7521}
7522
7523
7524
7525
7526
7527
7528
7529static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7530 const u8 *addr, const u8 queue,
7531 const u8 flags)
7532{
7533 struct e1000_hw *hw = &adapter->hw;
7534 int rar_entries = hw->mac.rar_entry_count -
7535 adapter->vfs_allocated_count;
7536 int i;
7537
7538 if (is_zero_ether_addr(addr))
7539 return -EINVAL;
7540
7541
7542
7543
7544
7545 for (i = 0; i < rar_entries; i++) {
7546 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7547 continue;
7548 if ((adapter->mac_table[i].state & flags) != flags)
7549 continue;
7550 if (adapter->mac_table[i].queue != queue)
7551 continue;
7552 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7553 continue;
7554
7555
7556
7557
7558 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7559 adapter->mac_table[i].state =
7560 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7561 adapter->mac_table[i].queue =
7562 adapter->vfs_allocated_count;
7563 } else {
7564 adapter->mac_table[i].state = 0;
7565 adapter->mac_table[i].queue = 0;
7566 eth_zero_addr(adapter->mac_table[i].addr);
7567 }
7568
7569 igb_rar_set_index(adapter, i);
7570 return 0;
7571 }
7572
7573 return -ENOENT;
7574}
7575
7576static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7577 const u8 queue)
7578{
7579 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7580}
7581
7582int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7583 const u8 *addr, u8 queue, u8 flags)
7584{
7585 struct e1000_hw *hw = &adapter->hw;
7586
7587
7588
7589
7590 if (hw->mac.type != e1000_i210)
7591 return -EOPNOTSUPP;
7592
7593 return igb_add_mac_filter_flags(adapter, addr, queue,
7594 IGB_MAC_STATE_QUEUE_STEERING | flags);
7595}
7596
7597int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7598 const u8 *addr, u8 queue, u8 flags)
7599{
7600 return igb_del_mac_filter_flags(adapter, addr, queue,
7601 IGB_MAC_STATE_QUEUE_STEERING | flags);
7602}
7603
7604static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7605{
7606 struct igb_adapter *adapter = netdev_priv(netdev);
7607 int ret;
7608
7609 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7610
7611 return min_t(int, ret, 0);
7612}
7613
7614static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7615{
7616 struct igb_adapter *adapter = netdev_priv(netdev);
7617
7618 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7619
7620 return 0;
7621}
7622
7623static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7624 const u32 info, const u8 *addr)
7625{
7626 struct pci_dev *pdev = adapter->pdev;
7627 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7628 struct list_head *pos;
7629 struct vf_mac_filter *entry = NULL;
7630 int ret = 0;
7631
7632 switch (info) {
7633 case E1000_VF_MAC_FILTER_CLR:
7634
7635 list_for_each(pos, &adapter->vf_macs.l) {
7636 entry = list_entry(pos, struct vf_mac_filter, l);
7637 if (entry->vf == vf) {
7638 entry->vf = -1;
7639 entry->free = true;
7640 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7641 }
7642 }
7643 break;
7644 case E1000_VF_MAC_FILTER_ADD:
7645 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7646 !vf_data->trusted) {
7647 dev_warn(&pdev->dev,
7648 "VF %d requested MAC filter but is administratively denied\n",
7649 vf);
7650 return -EINVAL;
7651 }
7652 if (!is_valid_ether_addr(addr)) {
7653 dev_warn(&pdev->dev,
7654 "VF %d attempted to set invalid MAC filter\n",
7655 vf);
7656 return -EINVAL;
7657 }
7658
7659
7660 list_for_each(pos, &adapter->vf_macs.l) {
7661 entry = list_entry(pos, struct vf_mac_filter, l);
7662 if (entry->free)
7663 break;
7664 }
7665
7666 if (entry && entry->free) {
7667 entry->free = false;
7668 entry->vf = vf;
7669 ether_addr_copy(entry->vf_mac, addr);
7670
7671 ret = igb_add_mac_filter(adapter, addr, vf);
7672 ret = min_t(int, ret, 0);
7673 } else {
7674 ret = -ENOSPC;
7675 }
7676
7677 if (ret == -ENOSPC)
7678 dev_warn(&pdev->dev,
7679 "VF %d has requested MAC filter but there is no space for it\n",
7680 vf);
7681 break;
7682 default:
7683 ret = -EINVAL;
7684 break;
7685 }
7686
7687 return ret;
7688}
7689
7690static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7691{
7692 struct pci_dev *pdev = adapter->pdev;
7693 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7694 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7695
7696
7697
7698
7699 unsigned char *addr = (unsigned char *)&msg[1];
7700 int ret = 0;
7701
7702 if (!info) {
7703 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7704 !vf_data->trusted) {
7705 dev_warn(&pdev->dev,
7706 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7707 vf);
7708 return -EINVAL;
7709 }
7710
7711 if (!is_valid_ether_addr(addr)) {
7712 dev_warn(&pdev->dev,
7713 "VF %d attempted to set invalid MAC\n",
7714 vf);
7715 return -EINVAL;
7716 }
7717
7718 ret = igb_set_vf_mac(adapter, vf, addr);
7719 } else {
7720 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7721 }
7722
7723 return ret;
7724}
7725
7726static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7727{
7728 struct e1000_hw *hw = &adapter->hw;
7729 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7730 u32 msg = E1000_VT_MSGTYPE_NACK;
7731
7732
7733 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7734 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7735 igb_write_mbx(hw, &msg, 1, vf);
7736 vf_data->last_nack = jiffies;
7737 }
7738}
7739
7740static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7741{
7742 struct pci_dev *pdev = adapter->pdev;
7743 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7744 struct e1000_hw *hw = &adapter->hw;
7745 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7746 s32 retval;
7747
7748 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7749
7750 if (retval) {
7751
7752 dev_err(&pdev->dev, "Error receiving message from VF\n");
7753 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7754 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7755 goto unlock;
7756 goto out;
7757 }
7758
7759
7760 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7761 goto unlock;
7762
7763
7764
7765
7766 if (msgbuf[0] == E1000_VF_RESET) {
7767
7768 igb_vf_reset_msg(adapter, vf);
7769 return;
7770 }
7771
7772 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7773 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7774 goto unlock;
7775 retval = -1;
7776 goto out;
7777 }
7778
7779 switch ((msgbuf[0] & 0xFFFF)) {
7780 case E1000_VF_SET_MAC_ADDR:
7781 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7782 break;
7783 case E1000_VF_SET_PROMISC:
7784 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7785 break;
7786 case E1000_VF_SET_MULTICAST:
7787 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7788 break;
7789 case E1000_VF_SET_LPE:
7790 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7791 break;
7792 case E1000_VF_SET_VLAN:
7793 retval = -1;
7794 if (vf_data->pf_vlan)
7795 dev_warn(&pdev->dev,
7796 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7797 vf);
7798 else
7799 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7800 break;
7801 default:
7802 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7803 retval = -1;
7804 break;
7805 }
7806
7807 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7808out:
7809
7810 if (retval)
7811 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7812 else
7813 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7814
7815
7816 igb_write_mbx(hw, msgbuf, 1, vf);
7817 return;
7818
7819unlock:
7820 igb_unlock_mbx(hw, vf);
7821}
7822
7823static void igb_msg_task(struct igb_adapter *adapter)
7824{
7825 struct e1000_hw *hw = &adapter->hw;
7826 u32 vf;
7827
7828 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7829
7830 if (!igb_check_for_rst(hw, vf))
7831 igb_vf_reset_event(adapter, vf);
7832
7833
7834 if (!igb_check_for_msg(hw, vf))
7835 igb_rcv_msg_from_vf(adapter, vf);
7836
7837
7838 if (!igb_check_for_ack(hw, vf))
7839 igb_rcv_ack_from_vf(adapter, vf);
7840 }
7841}
7842
7843
7844
7845
7846
7847
7848
7849
7850
7851
7852
7853
7854static void igb_set_uta(struct igb_adapter *adapter, bool set)
7855{
7856 struct e1000_hw *hw = &adapter->hw;
7857 u32 uta = set ? ~0 : 0;
7858 int i;
7859
7860
7861 if (!adapter->vfs_allocated_count)
7862 return;
7863
7864 for (i = hw->mac.uta_reg_count; i--;)
7865 array_wr32(E1000_UTA, i, uta);
7866}
7867
7868
7869
7870
7871
7872
7873static irqreturn_t igb_intr_msi(int irq, void *data)
7874{
7875 struct igb_adapter *adapter = data;
7876 struct igb_q_vector *q_vector = adapter->q_vector[0];
7877 struct e1000_hw *hw = &adapter->hw;
7878
7879 u32 icr = rd32(E1000_ICR);
7880
7881 igb_write_itr(q_vector);
7882
7883 if (icr & E1000_ICR_DRSTA)
7884 schedule_work(&adapter->reset_task);
7885
7886 if (icr & E1000_ICR_DOUTSYNC) {
7887
7888 adapter->stats.doosync++;
7889 }
7890
7891 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7892 hw->mac.get_link_status = 1;
7893 if (!test_bit(__IGB_DOWN, &adapter->state))
7894 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7895 }
7896
7897 if (icr & E1000_ICR_TS)
7898 igb_tsync_interrupt(adapter);
7899
7900 napi_schedule(&q_vector->napi);
7901
7902 return IRQ_HANDLED;
7903}
7904
7905
7906
7907
7908
7909
7910static irqreturn_t igb_intr(int irq, void *data)
7911{
7912 struct igb_adapter *adapter = data;
7913 struct igb_q_vector *q_vector = adapter->q_vector[0];
7914 struct e1000_hw *hw = &adapter->hw;
7915
7916
7917
7918 u32 icr = rd32(E1000_ICR);
7919
7920
7921
7922
7923 if (!(icr & E1000_ICR_INT_ASSERTED))
7924 return IRQ_NONE;
7925
7926 igb_write_itr(q_vector);
7927
7928 if (icr & E1000_ICR_DRSTA)
7929 schedule_work(&adapter->reset_task);
7930
7931 if (icr & E1000_ICR_DOUTSYNC) {
7932
7933 adapter->stats.doosync++;
7934 }
7935
7936 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7937 hw->mac.get_link_status = 1;
7938
7939 if (!test_bit(__IGB_DOWN, &adapter->state))
7940 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7941 }
7942
7943 if (icr & E1000_ICR_TS)
7944 igb_tsync_interrupt(adapter);
7945
7946 napi_schedule(&q_vector->napi);
7947
7948 return IRQ_HANDLED;
7949}
7950
7951static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7952{
7953 struct igb_adapter *adapter = q_vector->adapter;
7954 struct e1000_hw *hw = &adapter->hw;
7955
7956 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7957 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7958 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7959 igb_set_itr(q_vector);
7960 else
7961 igb_update_ring_itr(q_vector);
7962 }
7963
7964 if (!test_bit(__IGB_DOWN, &adapter->state)) {
7965 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7966 wr32(E1000_EIMS, q_vector->eims_value);
7967 else
7968 igb_irq_enable(adapter);
7969 }
7970}
7971
7972
7973
7974
7975
7976
7977static int igb_poll(struct napi_struct *napi, int budget)
7978{
7979 struct igb_q_vector *q_vector = container_of(napi,
7980 struct igb_q_vector,
7981 napi);
7982 bool clean_complete = true;
7983 int work_done = 0;
7984
7985#ifdef CONFIG_IGB_DCA
7986 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7987 igb_update_dca(q_vector);
7988#endif
7989 if (q_vector->tx.ring)
7990 clean_complete = igb_clean_tx_irq(q_vector, budget);
7991
7992 if (q_vector->rx.ring) {
7993 int cleaned = igb_clean_rx_irq(q_vector, budget);
7994
7995 work_done += cleaned;
7996 if (cleaned >= budget)
7997 clean_complete = false;
7998 }
7999
8000
8001 if (!clean_complete)
8002 return budget;
8003
8004
8005
8006
8007 if (likely(napi_complete_done(napi, work_done)))
8008 igb_ring_irq_enable(q_vector);
8009
8010 return min(work_done, budget - 1);
8011}
8012
8013
8014
8015
8016
8017
8018
8019
8020static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
8021{
8022 struct igb_adapter *adapter = q_vector->adapter;
8023 struct igb_ring *tx_ring = q_vector->tx.ring;
8024 struct igb_tx_buffer *tx_buffer;
8025 union e1000_adv_tx_desc *tx_desc;
8026 unsigned int total_bytes = 0, total_packets = 0;
8027 unsigned int budget = q_vector->tx.work_limit;
8028 unsigned int i = tx_ring->next_to_clean;
8029
8030 if (test_bit(__IGB_DOWN, &adapter->state))
8031 return true;
8032
8033 tx_buffer = &tx_ring->tx_buffer_info[i];
8034 tx_desc = IGB_TX_DESC(tx_ring, i);
8035 i -= tx_ring->count;
8036
8037 do {
8038 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8039
8040
8041 if (!eop_desc)
8042 break;
8043
8044
8045 smp_rmb();
8046
8047
8048 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
8049 break;
8050
8051
8052 tx_buffer->next_to_watch = NULL;
8053
8054
8055 total_bytes += tx_buffer->bytecount;
8056 total_packets += tx_buffer->gso_segs;
8057
8058
8059 if (tx_buffer->type == IGB_TYPE_SKB)
8060 napi_consume_skb(tx_buffer->skb, napi_budget);
8061 else
8062 xdp_return_frame(tx_buffer->xdpf);
8063
8064
8065 dma_unmap_single(tx_ring->dev,
8066 dma_unmap_addr(tx_buffer, dma),
8067 dma_unmap_len(tx_buffer, len),
8068 DMA_TO_DEVICE);
8069
8070
8071 dma_unmap_len_set(tx_buffer, len, 0);
8072
8073
8074 while (tx_desc != eop_desc) {
8075 tx_buffer++;
8076 tx_desc++;
8077 i++;
8078 if (unlikely(!i)) {
8079 i -= tx_ring->count;
8080 tx_buffer = tx_ring->tx_buffer_info;
8081 tx_desc = IGB_TX_DESC(tx_ring, 0);
8082 }
8083
8084
8085 if (dma_unmap_len(tx_buffer, len)) {
8086 dma_unmap_page(tx_ring->dev,
8087 dma_unmap_addr(tx_buffer, dma),
8088 dma_unmap_len(tx_buffer, len),
8089 DMA_TO_DEVICE);
8090 dma_unmap_len_set(tx_buffer, len, 0);
8091 }
8092 }
8093
8094
8095 tx_buffer++;
8096 tx_desc++;
8097 i++;
8098 if (unlikely(!i)) {
8099 i -= tx_ring->count;
8100 tx_buffer = tx_ring->tx_buffer_info;
8101 tx_desc = IGB_TX_DESC(tx_ring, 0);
8102 }
8103
8104
8105 prefetch(tx_desc);
8106
8107
8108 budget--;
8109 } while (likely(budget));
8110
8111 netdev_tx_completed_queue(txring_txq(tx_ring),
8112 total_packets, total_bytes);
8113 i += tx_ring->count;
8114 tx_ring->next_to_clean = i;
8115 u64_stats_update_begin(&tx_ring->tx_syncp);
8116 tx_ring->tx_stats.bytes += total_bytes;
8117 tx_ring->tx_stats.packets += total_packets;
8118 u64_stats_update_end(&tx_ring->tx_syncp);
8119 q_vector->tx.total_bytes += total_bytes;
8120 q_vector->tx.total_packets += total_packets;
8121
8122 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
8123 struct e1000_hw *hw = &adapter->hw;
8124
8125
8126
8127
8128 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8129 if (tx_buffer->next_to_watch &&
8130 time_after(jiffies, tx_buffer->time_stamp +
8131 (adapter->tx_timeout_factor * HZ)) &&
8132 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
8133
8134
8135 dev_err(tx_ring->dev,
8136 "Detected Tx Unit Hang\n"
8137 " Tx Queue <%d>\n"
8138 " TDH <%x>\n"
8139 " TDT <%x>\n"
8140 " next_to_use <%x>\n"
8141 " next_to_clean <%x>\n"
8142 "buffer_info[next_to_clean]\n"
8143 " time_stamp <%lx>\n"
8144 " next_to_watch <%p>\n"
8145 " jiffies <%lx>\n"
8146 " desc.status <%x>\n",
8147 tx_ring->queue_index,
8148 rd32(E1000_TDH(tx_ring->reg_idx)),
8149 readl(tx_ring->tail),
8150 tx_ring->next_to_use,
8151 tx_ring->next_to_clean,
8152 tx_buffer->time_stamp,
8153 tx_buffer->next_to_watch,
8154 jiffies,
8155 tx_buffer->next_to_watch->wb.status);
8156 netif_stop_subqueue(tx_ring->netdev,
8157 tx_ring->queue_index);
8158
8159
8160 return true;
8161 }
8162 }
8163
8164#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
8165 if (unlikely(total_packets &&
8166 netif_carrier_ok(tx_ring->netdev) &&
8167 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
8168
8169
8170
8171 smp_mb();
8172 if (__netif_subqueue_stopped(tx_ring->netdev,
8173 tx_ring->queue_index) &&
8174 !(test_bit(__IGB_DOWN, &adapter->state))) {
8175 netif_wake_subqueue(tx_ring->netdev,
8176 tx_ring->queue_index);
8177
8178 u64_stats_update_begin(&tx_ring->tx_syncp);
8179 tx_ring->tx_stats.restart_queue++;
8180 u64_stats_update_end(&tx_ring->tx_syncp);
8181 }
8182 }
8183
8184 return !!budget;
8185}
8186
8187
8188
8189
8190
8191
8192
8193
8194static void igb_reuse_rx_page(struct igb_ring *rx_ring,
8195 struct igb_rx_buffer *old_buff)
8196{
8197 struct igb_rx_buffer *new_buff;
8198 u16 nta = rx_ring->next_to_alloc;
8199
8200 new_buff = &rx_ring->rx_buffer_info[nta];
8201
8202
8203 nta++;
8204 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
8205
8206
8207
8208
8209
8210 new_buff->dma = old_buff->dma;
8211 new_buff->page = old_buff->page;
8212 new_buff->page_offset = old_buff->page_offset;
8213 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
8214}
8215
8216static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
8217 int rx_buf_pgcnt)
8218{
8219 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
8220 struct page *page = rx_buffer->page;
8221
8222
8223 if (!dev_page_is_reusable(page))
8224 return false;
8225
8226#if (PAGE_SIZE < 8192)
8227
8228 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
8229 return false;
8230#else
8231#define IGB_LAST_OFFSET \
8232 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
8233
8234 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
8235 return false;
8236#endif
8237
8238
8239
8240
8241
8242 if (unlikely(pagecnt_bias == 1)) {
8243 page_ref_add(page, USHRT_MAX - 1);
8244 rx_buffer->pagecnt_bias = USHRT_MAX;
8245 }
8246
8247 return true;
8248}
8249
8250
8251
8252
8253
8254
8255
8256
8257
8258
8259static void igb_add_rx_frag(struct igb_ring *rx_ring,
8260 struct igb_rx_buffer *rx_buffer,
8261 struct sk_buff *skb,
8262 unsigned int size)
8263{
8264#if (PAGE_SIZE < 8192)
8265 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8266#else
8267 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8268 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8269 SKB_DATA_ALIGN(size);
8270#endif
8271 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8272 rx_buffer->page_offset, size, truesize);
8273#if (PAGE_SIZE < 8192)
8274 rx_buffer->page_offset ^= truesize;
8275#else
8276 rx_buffer->page_offset += truesize;
8277#endif
8278}
8279
8280static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8281 struct igb_rx_buffer *rx_buffer,
8282 struct xdp_buff *xdp,
8283 ktime_t timestamp)
8284{
8285#if (PAGE_SIZE < 8192)
8286 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8287#else
8288 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
8289 xdp->data_hard_start);
8290#endif
8291 unsigned int size = xdp->data_end - xdp->data;
8292 unsigned int headlen;
8293 struct sk_buff *skb;
8294
8295
8296 net_prefetch(xdp->data);
8297
8298
8299 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8300 if (unlikely(!skb))
8301 return NULL;
8302
8303 if (timestamp)
8304 skb_hwtstamps(skb)->hwtstamp = timestamp;
8305
8306
8307 headlen = size;
8308 if (headlen > IGB_RX_HDR_LEN)
8309 headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
8310
8311
8312 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
8313
8314
8315 size -= headlen;
8316 if (size) {
8317 skb_add_rx_frag(skb, 0, rx_buffer->page,
8318 (xdp->data + headlen) - page_address(rx_buffer->page),
8319 size, truesize);
8320#if (PAGE_SIZE < 8192)
8321 rx_buffer->page_offset ^= truesize;
8322#else
8323 rx_buffer->page_offset += truesize;
8324#endif
8325 } else {
8326 rx_buffer->pagecnt_bias++;
8327 }
8328
8329 return skb;
8330}
8331
8332static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8333 struct igb_rx_buffer *rx_buffer,
8334 struct xdp_buff *xdp,
8335 ktime_t timestamp)
8336{
8337#if (PAGE_SIZE < 8192)
8338 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8339#else
8340 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8341 SKB_DATA_ALIGN(xdp->data_end -
8342 xdp->data_hard_start);
8343#endif
8344 unsigned int metasize = xdp->data - xdp->data_meta;
8345 struct sk_buff *skb;
8346
8347
8348 net_prefetch(xdp->data_meta);
8349
8350
8351 skb = build_skb(xdp->data_hard_start, truesize);
8352 if (unlikely(!skb))
8353 return NULL;
8354
8355
8356 skb_reserve(skb, xdp->data - xdp->data_hard_start);
8357 __skb_put(skb, xdp->data_end - xdp->data);
8358
8359 if (metasize)
8360 skb_metadata_set(skb, metasize);
8361
8362 if (timestamp)
8363 skb_hwtstamps(skb)->hwtstamp = timestamp;
8364
8365
8366#if (PAGE_SIZE < 8192)
8367 rx_buffer->page_offset ^= truesize;
8368#else
8369 rx_buffer->page_offset += truesize;
8370#endif
8371
8372 return skb;
8373}
8374
8375static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
8376 struct igb_ring *rx_ring,
8377 struct xdp_buff *xdp)
8378{
8379 int err, result = IGB_XDP_PASS;
8380 struct bpf_prog *xdp_prog;
8381 u32 act;
8382
8383 rcu_read_lock();
8384 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
8385
8386 if (!xdp_prog)
8387 goto xdp_out;
8388
8389 prefetchw(xdp->data_hard_start);
8390
8391 act = bpf_prog_run_xdp(xdp_prog, xdp);
8392 switch (act) {
8393 case XDP_PASS:
8394 break;
8395 case XDP_TX:
8396 result = igb_xdp_xmit_back(adapter, xdp);
8397 if (result == IGB_XDP_CONSUMED)
8398 goto out_failure;
8399 break;
8400 case XDP_REDIRECT:
8401 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
8402 if (err)
8403 goto out_failure;
8404 result = IGB_XDP_REDIR;
8405 break;
8406 default:
8407 bpf_warn_invalid_xdp_action(act);
8408 fallthrough;
8409 case XDP_ABORTED:
8410out_failure:
8411 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
8412 fallthrough;
8413 case XDP_DROP:
8414 result = IGB_XDP_CONSUMED;
8415 break;
8416 }
8417xdp_out:
8418 rcu_read_unlock();
8419 return ERR_PTR(-result);
8420}
8421
8422static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
8423 unsigned int size)
8424{
8425 unsigned int truesize;
8426
8427#if (PAGE_SIZE < 8192)
8428 truesize = igb_rx_pg_size(rx_ring) / 2;
8429#else
8430 truesize = ring_uses_build_skb(rx_ring) ?
8431 SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
8432 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
8433 SKB_DATA_ALIGN(size);
8434#endif
8435 return truesize;
8436}
8437
8438static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
8439 struct igb_rx_buffer *rx_buffer,
8440 unsigned int size)
8441{
8442 unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
8443#if (PAGE_SIZE < 8192)
8444 rx_buffer->page_offset ^= truesize;
8445#else
8446 rx_buffer->page_offset += truesize;
8447#endif
8448}
8449
8450static inline void igb_rx_checksum(struct igb_ring *ring,
8451 union e1000_adv_rx_desc *rx_desc,
8452 struct sk_buff *skb)
8453{
8454 skb_checksum_none_assert(skb);
8455
8456
8457 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8458 return;
8459
8460
8461 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8462 return;
8463
8464
8465 if (igb_test_staterr(rx_desc,
8466 E1000_RXDEXT_STATERR_TCPE |
8467 E1000_RXDEXT_STATERR_IPE)) {
8468
8469
8470
8471
8472 if (!((skb->len == 60) &&
8473 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8474 u64_stats_update_begin(&ring->rx_syncp);
8475 ring->rx_stats.csum_err++;
8476 u64_stats_update_end(&ring->rx_syncp);
8477 }
8478
8479 return;
8480 }
8481
8482 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8483 E1000_RXD_STAT_UDPCS))
8484 skb->ip_summed = CHECKSUM_UNNECESSARY;
8485
8486 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8487 le32_to_cpu(rx_desc->wb.upper.status_error));
8488}
8489
8490static inline void igb_rx_hash(struct igb_ring *ring,
8491 union e1000_adv_rx_desc *rx_desc,
8492 struct sk_buff *skb)
8493{
8494 if (ring->netdev->features & NETIF_F_RXHASH)
8495 skb_set_hash(skb,
8496 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8497 PKT_HASH_TYPE_L3);
8498}
8499
8500
8501
8502
8503
8504
8505
8506
8507
8508
8509
8510static bool igb_is_non_eop(struct igb_ring *rx_ring,
8511 union e1000_adv_rx_desc *rx_desc)
8512{
8513 u32 ntc = rx_ring->next_to_clean + 1;
8514
8515
8516 ntc = (ntc < rx_ring->count) ? ntc : 0;
8517 rx_ring->next_to_clean = ntc;
8518
8519 prefetch(IGB_RX_DESC(rx_ring, ntc));
8520
8521 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8522 return false;
8523
8524 return true;
8525}
8526
8527
8528
8529
8530
8531
8532
8533
8534
8535
8536
8537
8538
8539
8540
8541static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8542 union e1000_adv_rx_desc *rx_desc,
8543 struct sk_buff *skb)
8544{
8545
8546 if (IS_ERR(skb))
8547 return true;
8548
8549 if (unlikely((igb_test_staterr(rx_desc,
8550 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8551 struct net_device *netdev = rx_ring->netdev;
8552 if (!(netdev->features & NETIF_F_RXALL)) {
8553 dev_kfree_skb_any(skb);
8554 return true;
8555 }
8556 }
8557
8558
8559 if (eth_skb_pad(skb))
8560 return true;
8561
8562 return false;
8563}
8564
8565
8566
8567
8568
8569
8570
8571
8572
8573
8574
8575static void igb_process_skb_fields(struct igb_ring *rx_ring,
8576 union e1000_adv_rx_desc *rx_desc,
8577 struct sk_buff *skb)
8578{
8579 struct net_device *dev = rx_ring->netdev;
8580
8581 igb_rx_hash(rx_ring, rx_desc, skb);
8582
8583 igb_rx_checksum(rx_ring, rx_desc, skb);
8584
8585 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8586 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8587 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8588
8589 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8590 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8591 u16 vid;
8592
8593 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8594 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8595 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8596 else
8597 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8598
8599 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8600 }
8601
8602 skb_record_rx_queue(skb, rx_ring->queue_index);
8603
8604 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8605}
8606
8607static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8608{
8609 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8610}
8611
8612static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8613 const unsigned int size, int *rx_buf_pgcnt)
8614{
8615 struct igb_rx_buffer *rx_buffer;
8616
8617 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8618 *rx_buf_pgcnt =
8619#if (PAGE_SIZE < 8192)
8620 page_count(rx_buffer->page);
8621#else
8622 0;
8623#endif
8624 prefetchw(rx_buffer->page);
8625
8626
8627 dma_sync_single_range_for_cpu(rx_ring->dev,
8628 rx_buffer->dma,
8629 rx_buffer->page_offset,
8630 size,
8631 DMA_FROM_DEVICE);
8632
8633 rx_buffer->pagecnt_bias--;
8634
8635 return rx_buffer;
8636}
8637
8638static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8639 struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
8640{
8641 if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
8642
8643 igb_reuse_rx_page(rx_ring, rx_buffer);
8644 } else {
8645
8646
8647
8648 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8649 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8650 IGB_RX_DMA_ATTR);
8651 __page_frag_cache_drain(rx_buffer->page,
8652 rx_buffer->pagecnt_bias);
8653 }
8654
8655
8656 rx_buffer->page = NULL;
8657}
8658
8659static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8660{
8661 struct igb_adapter *adapter = q_vector->adapter;
8662 struct igb_ring *rx_ring = q_vector->rx.ring;
8663 struct sk_buff *skb = rx_ring->skb;
8664 unsigned int total_bytes = 0, total_packets = 0;
8665 u16 cleaned_count = igb_desc_unused(rx_ring);
8666 unsigned int xdp_xmit = 0;
8667 struct xdp_buff xdp;
8668 u32 frame_sz = 0;
8669 int rx_buf_pgcnt;
8670
8671
8672#if (PAGE_SIZE < 8192)
8673 frame_sz = igb_rx_frame_truesize(rx_ring, 0);
8674#endif
8675 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
8676
8677 while (likely(total_packets < budget)) {
8678 union e1000_adv_rx_desc *rx_desc;
8679 struct igb_rx_buffer *rx_buffer;
8680 ktime_t timestamp = 0;
8681 int pkt_offset = 0;
8682 unsigned int size;
8683 void *pktbuf;
8684
8685
8686 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8687 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8688 cleaned_count = 0;
8689 }
8690
8691 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8692 size = le16_to_cpu(rx_desc->wb.upper.length);
8693 if (!size)
8694 break;
8695
8696
8697
8698
8699
8700 dma_rmb();
8701
8702 rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
8703 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
8704
8705
8706 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8707 int ts_hdr_len;
8708
8709 ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
8710 pktbuf, ×tamp);
8711
8712 pkt_offset += ts_hdr_len;
8713 size -= ts_hdr_len;
8714 }
8715
8716
8717 if (!skb) {
8718 unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
8719 unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
8720
8721 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
8722#if (PAGE_SIZE > 4096)
8723
8724 xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
8725#endif
8726 skb = igb_run_xdp(adapter, rx_ring, &xdp);
8727 }
8728
8729 if (IS_ERR(skb)) {
8730 unsigned int xdp_res = -PTR_ERR(skb);
8731
8732 if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
8733 xdp_xmit |= xdp_res;
8734 igb_rx_buffer_flip(rx_ring, rx_buffer, size);
8735 } else {
8736 rx_buffer->pagecnt_bias++;
8737 }
8738 total_packets++;
8739 total_bytes += size;
8740 } else if (skb)
8741 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8742 else if (ring_uses_build_skb(rx_ring))
8743 skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
8744 timestamp);
8745 else
8746 skb = igb_construct_skb(rx_ring, rx_buffer,
8747 &xdp, timestamp);
8748
8749
8750 if (!skb) {
8751 rx_ring->rx_stats.alloc_failed++;
8752 rx_buffer->pagecnt_bias++;
8753 break;
8754 }
8755
8756 igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
8757 cleaned_count++;
8758
8759
8760 if (igb_is_non_eop(rx_ring, rx_desc))
8761 continue;
8762
8763
8764 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8765 skb = NULL;
8766 continue;
8767 }
8768
8769
8770 total_bytes += skb->len;
8771
8772
8773 igb_process_skb_fields(rx_ring, rx_desc, skb);
8774
8775 napi_gro_receive(&q_vector->napi, skb);
8776
8777
8778 skb = NULL;
8779
8780
8781 total_packets++;
8782 }
8783
8784
8785 rx_ring->skb = skb;
8786
8787 if (xdp_xmit & IGB_XDP_REDIR)
8788 xdp_do_flush();
8789
8790 if (xdp_xmit & IGB_XDP_TX) {
8791 struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
8792
8793 igb_xdp_ring_update_tail(tx_ring);
8794 }
8795
8796 u64_stats_update_begin(&rx_ring->rx_syncp);
8797 rx_ring->rx_stats.packets += total_packets;
8798 rx_ring->rx_stats.bytes += total_bytes;
8799 u64_stats_update_end(&rx_ring->rx_syncp);
8800 q_vector->rx.total_packets += total_packets;
8801 q_vector->rx.total_bytes += total_bytes;
8802
8803 if (cleaned_count)
8804 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8805
8806 return total_packets;
8807}
8808
8809static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8810 struct igb_rx_buffer *bi)
8811{
8812 struct page *page = bi->page;
8813 dma_addr_t dma;
8814
8815
8816 if (likely(page))
8817 return true;
8818
8819
8820 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8821 if (unlikely(!page)) {
8822 rx_ring->rx_stats.alloc_failed++;
8823 return false;
8824 }
8825
8826
8827 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8828 igb_rx_pg_size(rx_ring),
8829 DMA_FROM_DEVICE,
8830 IGB_RX_DMA_ATTR);
8831
8832
8833
8834
8835 if (dma_mapping_error(rx_ring->dev, dma)) {
8836 __free_pages(page, igb_rx_pg_order(rx_ring));
8837
8838 rx_ring->rx_stats.alloc_failed++;
8839 return false;
8840 }
8841
8842 bi->dma = dma;
8843 bi->page = page;
8844 bi->page_offset = igb_rx_offset(rx_ring);
8845 page_ref_add(page, USHRT_MAX - 1);
8846 bi->pagecnt_bias = USHRT_MAX;
8847
8848 return true;
8849}
8850
8851
8852
8853
8854
8855
8856void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8857{
8858 union e1000_adv_rx_desc *rx_desc;
8859 struct igb_rx_buffer *bi;
8860 u16 i = rx_ring->next_to_use;
8861 u16 bufsz;
8862
8863
8864 if (!cleaned_count)
8865 return;
8866
8867 rx_desc = IGB_RX_DESC(rx_ring, i);
8868 bi = &rx_ring->rx_buffer_info[i];
8869 i -= rx_ring->count;
8870
8871 bufsz = igb_rx_bufsz(rx_ring);
8872
8873 do {
8874 if (!igb_alloc_mapped_page(rx_ring, bi))
8875 break;
8876
8877
8878 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8879 bi->page_offset, bufsz,
8880 DMA_FROM_DEVICE);
8881
8882
8883
8884
8885 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8886
8887 rx_desc++;
8888 bi++;
8889 i++;
8890 if (unlikely(!i)) {
8891 rx_desc = IGB_RX_DESC(rx_ring, 0);
8892 bi = rx_ring->rx_buffer_info;
8893 i -= rx_ring->count;
8894 }
8895
8896
8897 rx_desc->wb.upper.length = 0;
8898
8899 cleaned_count--;
8900 } while (cleaned_count);
8901
8902 i += rx_ring->count;
8903
8904 if (rx_ring->next_to_use != i) {
8905
8906 rx_ring->next_to_use = i;
8907
8908
8909 rx_ring->next_to_alloc = i;
8910
8911
8912
8913
8914
8915
8916 dma_wmb();
8917 writel(i, rx_ring->tail);
8918 }
8919}
8920
8921
8922
8923
8924
8925
8926
8927static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8928{
8929 struct igb_adapter *adapter = netdev_priv(netdev);
8930 struct mii_ioctl_data *data = if_mii(ifr);
8931
8932 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8933 return -EOPNOTSUPP;
8934
8935 switch (cmd) {
8936 case SIOCGMIIPHY:
8937 data->phy_id = adapter->hw.phy.addr;
8938 break;
8939 case SIOCGMIIREG:
8940 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8941 &data->val_out))
8942 return -EIO;
8943 break;
8944 case SIOCSMIIREG:
8945 default:
8946 return -EOPNOTSUPP;
8947 }
8948 return 0;
8949}
8950
8951
8952
8953
8954
8955
8956
8957static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8958{
8959 switch (cmd) {
8960 case SIOCGMIIPHY:
8961 case SIOCGMIIREG:
8962 case SIOCSMIIREG:
8963 return igb_mii_ioctl(netdev, ifr, cmd);
8964 case SIOCGHWTSTAMP:
8965 return igb_ptp_get_ts_config(netdev, ifr);
8966 case SIOCSHWTSTAMP:
8967 return igb_ptp_set_ts_config(netdev, ifr);
8968 default:
8969 return -EOPNOTSUPP;
8970 }
8971}
8972
8973void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8974{
8975 struct igb_adapter *adapter = hw->back;
8976
8977 pci_read_config_word(adapter->pdev, reg, value);
8978}
8979
8980void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8981{
8982 struct igb_adapter *adapter = hw->back;
8983
8984 pci_write_config_word(adapter->pdev, reg, *value);
8985}
8986
8987s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8988{
8989 struct igb_adapter *adapter = hw->back;
8990
8991 if (pcie_capability_read_word(adapter->pdev, reg, value))
8992 return -E1000_ERR_CONFIG;
8993
8994 return 0;
8995}
8996
8997s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8998{
8999 struct igb_adapter *adapter = hw->back;
9000
9001 if (pcie_capability_write_word(adapter->pdev, reg, *value))
9002 return -E1000_ERR_CONFIG;
9003
9004 return 0;
9005}
9006
9007static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9008{
9009 struct igb_adapter *adapter = netdev_priv(netdev);
9010 struct e1000_hw *hw = &adapter->hw;
9011 u32 ctrl, rctl;
9012 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
9013
9014 if (enable) {
9015
9016 ctrl = rd32(E1000_CTRL);
9017 ctrl |= E1000_CTRL_VME;
9018 wr32(E1000_CTRL, ctrl);
9019
9020
9021 rctl = rd32(E1000_RCTL);
9022 rctl &= ~E1000_RCTL_CFIEN;
9023 wr32(E1000_RCTL, rctl);
9024 } else {
9025
9026 ctrl = rd32(E1000_CTRL);
9027 ctrl &= ~E1000_CTRL_VME;
9028 wr32(E1000_CTRL, ctrl);
9029 }
9030
9031 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
9032}
9033
9034static int igb_vlan_rx_add_vid(struct net_device *netdev,
9035 __be16 proto, u16 vid)
9036{
9037 struct igb_adapter *adapter = netdev_priv(netdev);
9038 struct e1000_hw *hw = &adapter->hw;
9039 int pf_id = adapter->vfs_allocated_count;
9040
9041
9042 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9043 igb_vfta_set(hw, vid, pf_id, true, !!vid);
9044
9045 set_bit(vid, adapter->active_vlans);
9046
9047 return 0;
9048}
9049
9050static int igb_vlan_rx_kill_vid(struct net_device *netdev,
9051 __be16 proto, u16 vid)
9052{
9053 struct igb_adapter *adapter = netdev_priv(netdev);
9054 int pf_id = adapter->vfs_allocated_count;
9055 struct e1000_hw *hw = &adapter->hw;
9056
9057
9058 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9059 igb_vfta_set(hw, vid, pf_id, false, true);
9060
9061 clear_bit(vid, adapter->active_vlans);
9062
9063 return 0;
9064}
9065
9066static void igb_restore_vlan(struct igb_adapter *adapter)
9067{
9068 u16 vid = 1;
9069
9070 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
9071 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
9072
9073 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
9074 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
9075}
9076
9077int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9078{
9079 struct pci_dev *pdev = adapter->pdev;
9080 struct e1000_mac_info *mac = &adapter->hw.mac;
9081
9082 mac->autoneg = 0;
9083
9084
9085
9086
9087 if ((spd & 1) || (dplx & ~1))
9088 goto err_inval;
9089
9090
9091
9092
9093 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
9094 switch (spd + dplx) {
9095 case SPEED_10 + DUPLEX_HALF:
9096 case SPEED_10 + DUPLEX_FULL:
9097 case SPEED_100 + DUPLEX_HALF:
9098 goto err_inval;
9099 default:
9100 break;
9101 }
9102 }
9103
9104 switch (spd + dplx) {
9105 case SPEED_10 + DUPLEX_HALF:
9106 mac->forced_speed_duplex = ADVERTISE_10_HALF;
9107 break;
9108 case SPEED_10 + DUPLEX_FULL:
9109 mac->forced_speed_duplex = ADVERTISE_10_FULL;
9110 break;
9111 case SPEED_100 + DUPLEX_HALF:
9112 mac->forced_speed_duplex = ADVERTISE_100_HALF;
9113 break;
9114 case SPEED_100 + DUPLEX_FULL:
9115 mac->forced_speed_duplex = ADVERTISE_100_FULL;
9116 break;
9117 case SPEED_1000 + DUPLEX_FULL:
9118 mac->autoneg = 1;
9119 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
9120 break;
9121 case SPEED_1000 + DUPLEX_HALF:
9122 default:
9123 goto err_inval;
9124 }
9125
9126
9127 adapter->hw.phy.mdix = AUTO_ALL_MODES;
9128
9129 return 0;
9130
9131err_inval:
9132 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
9133 return -EINVAL;
9134}
9135
9136static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
9137 bool runtime)
9138{
9139 struct net_device *netdev = pci_get_drvdata(pdev);
9140 struct igb_adapter *adapter = netdev_priv(netdev);
9141 struct e1000_hw *hw = &adapter->hw;
9142 u32 ctrl, rctl, status;
9143 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9144 bool wake;
9145
9146 rtnl_lock();
9147 netif_device_detach(netdev);
9148
9149 if (netif_running(netdev))
9150 __igb_close(netdev, true);
9151
9152 igb_ptp_suspend(adapter);
9153
9154 igb_clear_interrupt_scheme(adapter);
9155 rtnl_unlock();
9156
9157 status = rd32(E1000_STATUS);
9158 if (status & E1000_STATUS_LU)
9159 wufc &= ~E1000_WUFC_LNKC;
9160
9161 if (wufc) {
9162 igb_setup_rctl(adapter);
9163 igb_set_rx_mode(netdev);
9164
9165
9166 if (wufc & E1000_WUFC_MC) {
9167 rctl = rd32(E1000_RCTL);
9168 rctl |= E1000_RCTL_MPE;
9169 wr32(E1000_RCTL, rctl);
9170 }
9171
9172 ctrl = rd32(E1000_CTRL);
9173 ctrl |= E1000_CTRL_ADVD3WUC;
9174 wr32(E1000_CTRL, ctrl);
9175
9176
9177 igb_disable_pcie_master(hw);
9178
9179 wr32(E1000_WUC, E1000_WUC_PME_EN);
9180 wr32(E1000_WUFC, wufc);
9181 } else {
9182 wr32(E1000_WUC, 0);
9183 wr32(E1000_WUFC, 0);
9184 }
9185
9186 wake = wufc || adapter->en_mng_pt;
9187 if (!wake)
9188 igb_power_down_link(adapter);
9189 else
9190 igb_power_up_link(adapter);
9191
9192 if (enable_wake)
9193 *enable_wake = wake;
9194
9195
9196
9197
9198 igb_release_hw_control(adapter);
9199
9200 pci_disable_device(pdev);
9201
9202 return 0;
9203}
9204
9205static void igb_deliver_wake_packet(struct net_device *netdev)
9206{
9207 struct igb_adapter *adapter = netdev_priv(netdev);
9208 struct e1000_hw *hw = &adapter->hw;
9209 struct sk_buff *skb;
9210 u32 wupl;
9211
9212 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
9213
9214
9215
9216
9217 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
9218 return;
9219
9220 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
9221 if (!skb)
9222 return;
9223
9224 skb_put(skb, wupl);
9225
9226
9227 wupl = roundup(wupl, 4);
9228
9229 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
9230
9231 skb->protocol = eth_type_trans(skb, netdev);
9232 netif_rx(skb);
9233}
9234
9235static int __maybe_unused igb_suspend(struct device *dev)
9236{
9237 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
9238}
9239
9240static int __maybe_unused igb_resume(struct device *dev)
9241{
9242 struct pci_dev *pdev = to_pci_dev(dev);
9243 struct net_device *netdev = pci_get_drvdata(pdev);
9244 struct igb_adapter *adapter = netdev_priv(netdev);
9245 struct e1000_hw *hw = &adapter->hw;
9246 u32 err, val;
9247
9248 pci_set_power_state(pdev, PCI_D0);
9249 pci_restore_state(pdev);
9250 pci_save_state(pdev);
9251
9252 if (!pci_device_is_present(pdev))
9253 return -ENODEV;
9254 err = pci_enable_device_mem(pdev);
9255 if (err) {
9256 dev_err(&pdev->dev,
9257 "igb: Cannot enable PCI device from suspend\n");
9258 return err;
9259 }
9260 pci_set_master(pdev);
9261
9262 pci_enable_wake(pdev, PCI_D3hot, 0);
9263 pci_enable_wake(pdev, PCI_D3cold, 0);
9264
9265 if (igb_init_interrupt_scheme(adapter, true)) {
9266 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9267 return -ENOMEM;
9268 }
9269
9270 igb_reset(adapter);
9271
9272
9273
9274
9275 igb_get_hw_control(adapter);
9276
9277 val = rd32(E1000_WUS);
9278 if (val & WAKE_PKT_WUS)
9279 igb_deliver_wake_packet(netdev);
9280
9281 wr32(E1000_WUS, ~0);
9282
9283 rtnl_lock();
9284 if (!err && netif_running(netdev))
9285 err = __igb_open(netdev, true);
9286
9287 if (!err)
9288 netif_device_attach(netdev);
9289 rtnl_unlock();
9290
9291 return err;
9292}
9293
9294static int __maybe_unused igb_runtime_idle(struct device *dev)
9295{
9296 struct net_device *netdev = dev_get_drvdata(dev);
9297 struct igb_adapter *adapter = netdev_priv(netdev);
9298
9299 if (!igb_has_link(adapter))
9300 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
9301
9302 return -EBUSY;
9303}
9304
9305static int __maybe_unused igb_runtime_suspend(struct device *dev)
9306{
9307 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
9308}
9309
9310static int __maybe_unused igb_runtime_resume(struct device *dev)
9311{
9312 return igb_resume(dev);
9313}
9314
9315static void igb_shutdown(struct pci_dev *pdev)
9316{
9317 bool wake;
9318
9319 __igb_shutdown(pdev, &wake, 0);
9320
9321 if (system_state == SYSTEM_POWER_OFF) {
9322 pci_wake_from_d3(pdev, wake);
9323 pci_set_power_state(pdev, PCI_D3hot);
9324 }
9325}
9326
9327#ifdef CONFIG_PCI_IOV
9328static int igb_sriov_reinit(struct pci_dev *dev)
9329{
9330 struct net_device *netdev = pci_get_drvdata(dev);
9331 struct igb_adapter *adapter = netdev_priv(netdev);
9332 struct pci_dev *pdev = adapter->pdev;
9333
9334 rtnl_lock();
9335
9336 if (netif_running(netdev))
9337 igb_close(netdev);
9338 else
9339 igb_reset(adapter);
9340
9341 igb_clear_interrupt_scheme(adapter);
9342
9343 igb_init_queue_configuration(adapter);
9344
9345 if (igb_init_interrupt_scheme(adapter, true)) {
9346 rtnl_unlock();
9347 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9348 return -ENOMEM;
9349 }
9350
9351 if (netif_running(netdev))
9352 igb_open(netdev);
9353
9354 rtnl_unlock();
9355
9356 return 0;
9357}
9358
9359static int igb_pci_disable_sriov(struct pci_dev *dev)
9360{
9361 int err = igb_disable_sriov(dev);
9362
9363 if (!err)
9364 err = igb_sriov_reinit(dev);
9365
9366 return err;
9367}
9368
9369static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
9370{
9371 int err = igb_enable_sriov(dev, num_vfs);
9372
9373 if (err)
9374 goto out;
9375
9376 err = igb_sriov_reinit(dev);
9377 if (!err)
9378 return num_vfs;
9379
9380out:
9381 return err;
9382}
9383
9384#endif
9385static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9386{
9387#ifdef CONFIG_PCI_IOV
9388 if (num_vfs == 0)
9389 return igb_pci_disable_sriov(dev);
9390 else
9391 return igb_pci_enable_sriov(dev, num_vfs);
9392#endif
9393 return 0;
9394}
9395
9396
9397
9398
9399
9400
9401
9402
9403
9404static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9405 pci_channel_state_t state)
9406{
9407 struct net_device *netdev = pci_get_drvdata(pdev);
9408 struct igb_adapter *adapter = netdev_priv(netdev);
9409
9410 netif_device_detach(netdev);
9411
9412 if (state == pci_channel_io_perm_failure)
9413 return PCI_ERS_RESULT_DISCONNECT;
9414
9415 if (netif_running(netdev))
9416 igb_down(adapter);
9417 pci_disable_device(pdev);
9418
9419
9420 return PCI_ERS_RESULT_NEED_RESET;
9421}
9422
9423
9424
9425
9426
9427
9428
9429
9430static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9431{
9432 struct net_device *netdev = pci_get_drvdata(pdev);
9433 struct igb_adapter *adapter = netdev_priv(netdev);
9434 struct e1000_hw *hw = &adapter->hw;
9435 pci_ers_result_t result;
9436
9437 if (pci_enable_device_mem(pdev)) {
9438 dev_err(&pdev->dev,
9439 "Cannot re-enable PCI device after reset.\n");
9440 result = PCI_ERS_RESULT_DISCONNECT;
9441 } else {
9442 pci_set_master(pdev);
9443 pci_restore_state(pdev);
9444 pci_save_state(pdev);
9445
9446 pci_enable_wake(pdev, PCI_D3hot, 0);
9447 pci_enable_wake(pdev, PCI_D3cold, 0);
9448
9449
9450
9451
9452 hw->hw_addr = adapter->io_addr;
9453
9454 igb_reset(adapter);
9455 wr32(E1000_WUS, ~0);
9456 result = PCI_ERS_RESULT_RECOVERED;
9457 }
9458
9459 return result;
9460}
9461
9462
9463
9464
9465
9466
9467
9468
9469
9470static void igb_io_resume(struct pci_dev *pdev)
9471{
9472 struct net_device *netdev = pci_get_drvdata(pdev);
9473 struct igb_adapter *adapter = netdev_priv(netdev);
9474
9475 if (netif_running(netdev)) {
9476 if (igb_up(adapter)) {
9477 dev_err(&pdev->dev, "igb_up failed after reset\n");
9478 return;
9479 }
9480 }
9481
9482 netif_device_attach(netdev);
9483
9484
9485
9486
9487 igb_get_hw_control(adapter);
9488}
9489
9490
9491
9492
9493
9494
9495static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9496{
9497 struct e1000_hw *hw = &adapter->hw;
9498 u32 rar_low, rar_high;
9499 u8 *addr = adapter->mac_table[index].addr;
9500
9501
9502
9503
9504
9505
9506 rar_low = le32_to_cpup((__le32 *)(addr));
9507 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9508
9509
9510 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9511 if (is_valid_ether_addr(addr))
9512 rar_high |= E1000_RAH_AV;
9513
9514 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9515 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9516
9517 switch (hw->mac.type) {
9518 case e1000_82575:
9519 case e1000_i210:
9520 if (adapter->mac_table[index].state &
9521 IGB_MAC_STATE_QUEUE_STEERING)
9522 rar_high |= E1000_RAH_QSEL_ENABLE;
9523
9524 rar_high |= E1000_RAH_POOL_1 *
9525 adapter->mac_table[index].queue;
9526 break;
9527 default:
9528 rar_high |= E1000_RAH_POOL_1 <<
9529 adapter->mac_table[index].queue;
9530 break;
9531 }
9532 }
9533
9534 wr32(E1000_RAL(index), rar_low);
9535 wrfl();
9536 wr32(E1000_RAH(index), rar_high);
9537 wrfl();
9538}
9539
9540static int igb_set_vf_mac(struct igb_adapter *adapter,
9541 int vf, unsigned char *mac_addr)
9542{
9543 struct e1000_hw *hw = &adapter->hw;
9544
9545
9546
9547 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9548 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9549
9550 ether_addr_copy(vf_mac_addr, mac_addr);
9551 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9552 adapter->mac_table[rar_entry].queue = vf;
9553 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9554 igb_rar_set_index(adapter, rar_entry);
9555
9556 return 0;
9557}
9558
9559static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9560{
9561 struct igb_adapter *adapter = netdev_priv(netdev);
9562
9563 if (vf >= adapter->vfs_allocated_count)
9564 return -EINVAL;
9565
9566
9567
9568
9569
9570
9571
9572 if (is_zero_ether_addr(mac)) {
9573 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9574 dev_info(&adapter->pdev->dev,
9575 "remove administratively set MAC on VF %d\n",
9576 vf);
9577 } else if (is_valid_ether_addr(mac)) {
9578 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9579 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9580 mac, vf);
9581 dev_info(&adapter->pdev->dev,
9582 "Reload the VF driver to make this change effective.");
9583
9584 if (test_bit(__IGB_DOWN, &adapter->state)) {
9585 dev_warn(&adapter->pdev->dev,
9586 "The VF MAC address has been set, but the PF device is not up.\n");
9587 dev_warn(&adapter->pdev->dev,
9588 "Bring the PF device up before attempting to use the VF device.\n");
9589 }
9590 } else {
9591 return -EINVAL;
9592 }
9593 return igb_set_vf_mac(adapter, vf, mac);
9594}
9595
9596static int igb_link_mbps(int internal_link_speed)
9597{
9598 switch (internal_link_speed) {
9599 case SPEED_100:
9600 return 100;
9601 case SPEED_1000:
9602 return 1000;
9603 default:
9604 return 0;
9605 }
9606}
9607
9608static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9609 int link_speed)
9610{
9611 int rf_dec, rf_int;
9612 u32 bcnrc_val;
9613
9614 if (tx_rate != 0) {
9615
9616 rf_int = link_speed / tx_rate;
9617 rf_dec = (link_speed - (rf_int * tx_rate));
9618 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9619 tx_rate;
9620
9621 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9622 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9623 E1000_RTTBCNRC_RF_INT_MASK);
9624 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9625 } else {
9626 bcnrc_val = 0;
9627 }
9628
9629 wr32(E1000_RTTDQSEL, vf);
9630
9631
9632
9633 wr32(E1000_RTTBCNRM, 0x14);
9634 wr32(E1000_RTTBCNRC, bcnrc_val);
9635}
9636
9637static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9638{
9639 int actual_link_speed, i;
9640 bool reset_rate = false;
9641
9642
9643 if ((adapter->vf_rate_link_speed == 0) ||
9644 (adapter->hw.mac.type != e1000_82576))
9645 return;
9646
9647 actual_link_speed = igb_link_mbps(adapter->link_speed);
9648 if (actual_link_speed != adapter->vf_rate_link_speed) {
9649 reset_rate = true;
9650 adapter->vf_rate_link_speed = 0;
9651 dev_info(&adapter->pdev->dev,
9652 "Link speed has been changed. VF Transmit rate is disabled\n");
9653 }
9654
9655 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9656 if (reset_rate)
9657 adapter->vf_data[i].tx_rate = 0;
9658
9659 igb_set_vf_rate_limit(&adapter->hw, i,
9660 adapter->vf_data[i].tx_rate,
9661 actual_link_speed);
9662 }
9663}
9664
9665static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9666 int min_tx_rate, int max_tx_rate)
9667{
9668 struct igb_adapter *adapter = netdev_priv(netdev);
9669 struct e1000_hw *hw = &adapter->hw;
9670 int actual_link_speed;
9671
9672 if (hw->mac.type != e1000_82576)
9673 return -EOPNOTSUPP;
9674
9675 if (min_tx_rate)
9676 return -EINVAL;
9677
9678 actual_link_speed = igb_link_mbps(adapter->link_speed);
9679 if ((vf >= adapter->vfs_allocated_count) ||
9680 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9681 (max_tx_rate < 0) ||
9682 (max_tx_rate > actual_link_speed))
9683 return -EINVAL;
9684
9685 adapter->vf_rate_link_speed = actual_link_speed;
9686 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9687 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9688
9689 return 0;
9690}
9691
9692static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9693 bool setting)
9694{
9695 struct igb_adapter *adapter = netdev_priv(netdev);
9696 struct e1000_hw *hw = &adapter->hw;
9697 u32 reg_val, reg_offset;
9698
9699 if (!adapter->vfs_allocated_count)
9700 return -EOPNOTSUPP;
9701
9702 if (vf >= adapter->vfs_allocated_count)
9703 return -EINVAL;
9704
9705 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9706 reg_val = rd32(reg_offset);
9707 if (setting)
9708 reg_val |= (BIT(vf) |
9709 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9710 else
9711 reg_val &= ~(BIT(vf) |
9712 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9713 wr32(reg_offset, reg_val);
9714
9715 adapter->vf_data[vf].spoofchk_enabled = setting;
9716 return 0;
9717}
9718
9719static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9720{
9721 struct igb_adapter *adapter = netdev_priv(netdev);
9722
9723 if (vf >= adapter->vfs_allocated_count)
9724 return -EINVAL;
9725 if (adapter->vf_data[vf].trusted == setting)
9726 return 0;
9727
9728 adapter->vf_data[vf].trusted = setting;
9729
9730 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9731 vf, setting ? "" : "not ");
9732 return 0;
9733}
9734
9735static int igb_ndo_get_vf_config(struct net_device *netdev,
9736 int vf, struct ifla_vf_info *ivi)
9737{
9738 struct igb_adapter *adapter = netdev_priv(netdev);
9739 if (vf >= adapter->vfs_allocated_count)
9740 return -EINVAL;
9741 ivi->vf = vf;
9742 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9743 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9744 ivi->min_tx_rate = 0;
9745 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9746 ivi->qos = adapter->vf_data[vf].pf_qos;
9747 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9748 ivi->trusted = adapter->vf_data[vf].trusted;
9749 return 0;
9750}
9751
9752static void igb_vmm_control(struct igb_adapter *adapter)
9753{
9754 struct e1000_hw *hw = &adapter->hw;
9755 u32 reg;
9756
9757 switch (hw->mac.type) {
9758 case e1000_82575:
9759 case e1000_i210:
9760 case e1000_i211:
9761 case e1000_i354:
9762 default:
9763
9764 return;
9765 case e1000_82576:
9766
9767 reg = rd32(E1000_DTXCTL);
9768 reg |= E1000_DTXCTL_VLAN_ADDED;
9769 wr32(E1000_DTXCTL, reg);
9770 fallthrough;
9771 case e1000_82580:
9772
9773 reg = rd32(E1000_RPLOLR);
9774 reg |= E1000_RPLOLR_STRVLAN;
9775 wr32(E1000_RPLOLR, reg);
9776 fallthrough;
9777 case e1000_i350:
9778
9779 break;
9780 }
9781
9782 if (adapter->vfs_allocated_count) {
9783 igb_vmdq_set_loopback_pf(hw, true);
9784 igb_vmdq_set_replication_pf(hw, true);
9785 igb_vmdq_set_anti_spoofing_pf(hw, true,
9786 adapter->vfs_allocated_count);
9787 } else {
9788 igb_vmdq_set_loopback_pf(hw, false);
9789 igb_vmdq_set_replication_pf(hw, false);
9790 }
9791}
9792
9793static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9794{
9795 struct e1000_hw *hw = &adapter->hw;
9796 u32 dmac_thr;
9797 u16 hwm;
9798
9799 if (hw->mac.type > e1000_82580) {
9800 if (adapter->flags & IGB_FLAG_DMAC) {
9801 u32 reg;
9802
9803
9804 wr32(E1000_DMCTXTH, 0);
9805
9806
9807
9808
9809
9810 hwm = 64 * (pba - 6);
9811 reg = rd32(E1000_FCRTC);
9812 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9813 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9814 & E1000_FCRTC_RTH_COAL_MASK);
9815 wr32(E1000_FCRTC, reg);
9816
9817
9818
9819
9820 dmac_thr = pba - 10;
9821 reg = rd32(E1000_DMACR);
9822 reg &= ~E1000_DMACR_DMACTHR_MASK;
9823 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9824 & E1000_DMACR_DMACTHR_MASK);
9825
9826
9827 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9828
9829
9830 reg |= (1000 >> 5);
9831
9832
9833 if (hw->mac.type != e1000_i354)
9834 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9835
9836 wr32(E1000_DMACR, reg);
9837
9838
9839
9840
9841 wr32(E1000_DMCRTRH, 0);
9842
9843 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9844
9845 wr32(E1000_DMCTLX, reg);
9846
9847
9848
9849
9850 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9851 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9852
9853
9854
9855
9856 reg = rd32(E1000_PCIEMISC);
9857 reg &= ~E1000_PCIEMISC_LX_DECISION;
9858 wr32(E1000_PCIEMISC, reg);
9859 }
9860 } else if (hw->mac.type == e1000_82580) {
9861 u32 reg = rd32(E1000_PCIEMISC);
9862
9863 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9864 wr32(E1000_DMACR, 0);
9865 }
9866}
9867
9868
9869
9870
9871
9872
9873
9874
9875
9876
9877
9878s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9879 u8 dev_addr, u8 *data)
9880{
9881 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9882 struct i2c_client *this_client = adapter->i2c_client;
9883 s32 status;
9884 u16 swfw_mask = 0;
9885
9886 if (!this_client)
9887 return E1000_ERR_I2C;
9888
9889 swfw_mask = E1000_SWFW_PHY0_SM;
9890
9891 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9892 return E1000_ERR_SWFW_SYNC;
9893
9894 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9895 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9896
9897 if (status < 0)
9898 return E1000_ERR_I2C;
9899 else {
9900 *data = status;
9901 return 0;
9902 }
9903}
9904
9905
9906
9907
9908
9909
9910
9911
9912
9913
9914
9915s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9916 u8 dev_addr, u8 data)
9917{
9918 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9919 struct i2c_client *this_client = adapter->i2c_client;
9920 s32 status;
9921 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9922
9923 if (!this_client)
9924 return E1000_ERR_I2C;
9925
9926 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9927 return E1000_ERR_SWFW_SYNC;
9928 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9929 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9930
9931 if (status)
9932 return E1000_ERR_I2C;
9933 else
9934 return 0;
9935
9936}
9937
9938int igb_reinit_queues(struct igb_adapter *adapter)
9939{
9940 struct net_device *netdev = adapter->netdev;
9941 struct pci_dev *pdev = adapter->pdev;
9942 int err = 0;
9943
9944 if (netif_running(netdev))
9945 igb_close(netdev);
9946
9947 igb_reset_interrupt_capability(adapter);
9948
9949 if (igb_init_interrupt_scheme(adapter, true)) {
9950 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9951 return -ENOMEM;
9952 }
9953
9954 if (netif_running(netdev))
9955 err = igb_open(netdev);
9956
9957 return err;
9958}
9959
9960static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9961{
9962 struct igb_nfc_filter *rule;
9963
9964 spin_lock(&adapter->nfc_lock);
9965
9966 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9967 igb_erase_filter(adapter, rule);
9968
9969 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9970 igb_erase_filter(adapter, rule);
9971
9972 spin_unlock(&adapter->nfc_lock);
9973}
9974
9975static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9976{
9977 struct igb_nfc_filter *rule;
9978
9979 spin_lock(&adapter->nfc_lock);
9980
9981 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9982 igb_add_filter(adapter, rule);
9983
9984 spin_unlock(&adapter->nfc_lock);
9985}
9986
9987