1
2
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/bitops.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/netdevice.h>
13#include <linux/ipv6.h>
14#include <linux/slab.h>
15#include <net/checksum.h>
16#include <net/ip6_checksum.h>
17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
19#include <linux/net_tstamp.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/ip.h>
28#include <linux/tcp.h>
29#include <linux/sctp.h>
30#include <linux/if_ether.h>
31#include <linux/aer.h>
32#include <linux/prefetch.h>
33#include <linux/pm_runtime.h>
34#include <linux/etherdevice.h>
35#ifdef CONFIG_IGB_DCA
36#include <linux/dca.h>
37#endif
38#include <linux/i2c.h>
39#include "igb.h"
40
41enum queue_mode {
42 QUEUE_MODE_STRICT_PRIORITY,
43 QUEUE_MODE_STREAM_RESERVATION,
44};
45
46enum tx_queue_prio {
47 TX_QUEUE_PRIO_HIGH,
48 TX_QUEUE_PRIO_LOW,
49};
50
51char igb_driver_name[] = "igb";
52static const char igb_driver_string[] =
53 "Intel(R) Gigabit Ethernet Network Driver";
54static const char igb_copyright[] =
55 "Copyright (c) 2007-2014 Intel Corporation.";
56
57static const struct e1000_info *igb_info_tbl[] = {
58 [board_82575] = &e1000_82575_info,
59};
60
61static const struct pci_device_id igb_pci_tbl[] = {
62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
97
98 {0, }
99};
100
101MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
102
103static int igb_setup_all_tx_resources(struct igb_adapter *);
104static int igb_setup_all_rx_resources(struct igb_adapter *);
105static void igb_free_all_tx_resources(struct igb_adapter *);
106static void igb_free_all_rx_resources(struct igb_adapter *);
107static void igb_setup_mrqc(struct igb_adapter *);
108static int igb_probe(struct pci_dev *, const struct pci_device_id *);
109static void igb_remove(struct pci_dev *pdev);
110static int igb_sw_init(struct igb_adapter *);
111int igb_open(struct net_device *);
112int igb_close(struct net_device *);
113static void igb_configure(struct igb_adapter *);
114static void igb_configure_tx(struct igb_adapter *);
115static void igb_configure_rx(struct igb_adapter *);
116static void igb_clean_all_tx_rings(struct igb_adapter *);
117static void igb_clean_all_rx_rings(struct igb_adapter *);
118static void igb_clean_tx_ring(struct igb_ring *);
119static void igb_clean_rx_ring(struct igb_ring *);
120static void igb_set_rx_mode(struct net_device *);
121static void igb_update_phy_info(struct timer_list *);
122static void igb_watchdog(struct timer_list *);
123static void igb_watchdog_task(struct work_struct *);
124static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
125static void igb_get_stats64(struct net_device *dev,
126 struct rtnl_link_stats64 *stats);
127static int igb_change_mtu(struct net_device *, int);
128static int igb_set_mac(struct net_device *, void *);
129static void igb_set_uta(struct igb_adapter *adapter, bool set);
130static irqreturn_t igb_intr(int irq, void *);
131static irqreturn_t igb_intr_msi(int irq, void *);
132static irqreturn_t igb_msix_other(int irq, void *);
133static irqreturn_t igb_msix_ring(int irq, void *);
134#ifdef CONFIG_IGB_DCA
135static void igb_update_dca(struct igb_q_vector *);
136static void igb_setup_dca(struct igb_adapter *);
137#endif
138static int igb_poll(struct napi_struct *, int);
139static bool igb_clean_tx_irq(struct igb_q_vector *, int);
140static int igb_clean_rx_irq(struct igb_q_vector *, int);
141static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
142static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
143static void igb_reset_task(struct work_struct *);
144static void igb_vlan_mode(struct net_device *netdev,
145 netdev_features_t features);
146static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
147static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
148static void igb_restore_vlan(struct igb_adapter *);
149static void igb_rar_set_index(struct igb_adapter *, u32);
150static void igb_ping_all_vfs(struct igb_adapter *);
151static void igb_msg_task(struct igb_adapter *);
152static void igb_vmm_control(struct igb_adapter *);
153static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
154static void igb_flush_mac_table(struct igb_adapter *);
155static int igb_available_rars(struct igb_adapter *, u8);
156static void igb_set_default_mac_filter(struct igb_adapter *);
157static int igb_uc_sync(struct net_device *, const unsigned char *);
158static int igb_uc_unsync(struct net_device *, const unsigned char *);
159static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
160static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
161static int igb_ndo_set_vf_vlan(struct net_device *netdev,
162 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
163static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
164static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
165 bool setting);
166static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
167 bool setting);
168static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
169 struct ifla_vf_info *ivi);
170static void igb_check_vf_rate_limit(struct igb_adapter *);
171static void igb_nfc_filter_exit(struct igb_adapter *adapter);
172static void igb_nfc_filter_restore(struct igb_adapter *adapter);
173
174#ifdef CONFIG_PCI_IOV
175static int igb_vf_configure(struct igb_adapter *adapter, int vf);
176static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
177static int igb_disable_sriov(struct pci_dev *dev);
178static int igb_pci_disable_sriov(struct pci_dev *dev);
179#endif
180
181static int igb_suspend(struct device *);
182static int igb_resume(struct device *);
183static int igb_runtime_suspend(struct device *dev);
184static int igb_runtime_resume(struct device *dev);
185static int igb_runtime_idle(struct device *dev);
186static const struct dev_pm_ops igb_pm_ops = {
187 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
188 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
189 igb_runtime_idle)
190};
191static void igb_shutdown(struct pci_dev *);
192static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
193#ifdef CONFIG_IGB_DCA
194static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
195static struct notifier_block dca_notifier = {
196 .notifier_call = igb_notify_dca,
197 .next = NULL,
198 .priority = 0
199};
200#endif
201#ifdef CONFIG_PCI_IOV
202static unsigned int max_vfs;
203module_param(max_vfs, uint, 0);
204MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
205#endif
206
207static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
208 pci_channel_state_t);
209static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
210static void igb_io_resume(struct pci_dev *);
211
212static const struct pci_error_handlers igb_err_handler = {
213 .error_detected = igb_io_error_detected,
214 .slot_reset = igb_io_slot_reset,
215 .resume = igb_io_resume,
216};
217
218static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
219
220static struct pci_driver igb_driver = {
221 .name = igb_driver_name,
222 .id_table = igb_pci_tbl,
223 .probe = igb_probe,
224 .remove = igb_remove,
225#ifdef CONFIG_PM
226 .driver.pm = &igb_pm_ops,
227#endif
228 .shutdown = igb_shutdown,
229 .sriov_configure = igb_pci_sriov_configure,
230 .err_handler = &igb_err_handler
231};
232
233MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
234MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
235MODULE_LICENSE("GPL v2");
236
237#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
238static int debug = -1;
239module_param(debug, int, 0);
240MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
241
242struct igb_reg_info {
243 u32 ofs;
244 char *name;
245};
246
247static const struct igb_reg_info igb_reg_info_tbl[] = {
248
249
250 {E1000_CTRL, "CTRL"},
251 {E1000_STATUS, "STATUS"},
252 {E1000_CTRL_EXT, "CTRL_EXT"},
253
254
255 {E1000_ICR, "ICR"},
256
257
258 {E1000_RCTL, "RCTL"},
259 {E1000_RDLEN(0), "RDLEN"},
260 {E1000_RDH(0), "RDH"},
261 {E1000_RDT(0), "RDT"},
262 {E1000_RXDCTL(0), "RXDCTL"},
263 {E1000_RDBAL(0), "RDBAL"},
264 {E1000_RDBAH(0), "RDBAH"},
265
266
267 {E1000_TCTL, "TCTL"},
268 {E1000_TDBAL(0), "TDBAL"},
269 {E1000_TDBAH(0), "TDBAH"},
270 {E1000_TDLEN(0), "TDLEN"},
271 {E1000_TDH(0), "TDH"},
272 {E1000_TDT(0), "TDT"},
273 {E1000_TXDCTL(0), "TXDCTL"},
274 {E1000_TDFH, "TDFH"},
275 {E1000_TDFT, "TDFT"},
276 {E1000_TDFHS, "TDFHS"},
277 {E1000_TDFPC, "TDFPC"},
278
279
280 {}
281};
282
283
284static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
285{
286 int n = 0;
287 char rname[16];
288 u32 regs[8];
289
290 switch (reginfo->ofs) {
291 case E1000_RDLEN(0):
292 for (n = 0; n < 4; n++)
293 regs[n] = rd32(E1000_RDLEN(n));
294 break;
295 case E1000_RDH(0):
296 for (n = 0; n < 4; n++)
297 regs[n] = rd32(E1000_RDH(n));
298 break;
299 case E1000_RDT(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_RDT(n));
302 break;
303 case E1000_RXDCTL(0):
304 for (n = 0; n < 4; n++)
305 regs[n] = rd32(E1000_RXDCTL(n));
306 break;
307 case E1000_RDBAL(0):
308 for (n = 0; n < 4; n++)
309 regs[n] = rd32(E1000_RDBAL(n));
310 break;
311 case E1000_RDBAH(0):
312 for (n = 0; n < 4; n++)
313 regs[n] = rd32(E1000_RDBAH(n));
314 break;
315 case E1000_TDBAL(0):
316 for (n = 0; n < 4; n++)
317 regs[n] = rd32(E1000_RDBAL(n));
318 break;
319 case E1000_TDBAH(0):
320 for (n = 0; n < 4; n++)
321 regs[n] = rd32(E1000_TDBAH(n));
322 break;
323 case E1000_TDLEN(0):
324 for (n = 0; n < 4; n++)
325 regs[n] = rd32(E1000_TDLEN(n));
326 break;
327 case E1000_TDH(0):
328 for (n = 0; n < 4; n++)
329 regs[n] = rd32(E1000_TDH(n));
330 break;
331 case E1000_TDT(0):
332 for (n = 0; n < 4; n++)
333 regs[n] = rd32(E1000_TDT(n));
334 break;
335 case E1000_TXDCTL(0):
336 for (n = 0; n < 4; n++)
337 regs[n] = rd32(E1000_TXDCTL(n));
338 break;
339 default:
340 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
341 return;
342 }
343
344 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
345 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
346 regs[2], regs[3]);
347}
348
349
350static void igb_dump(struct igb_adapter *adapter)
351{
352 struct net_device *netdev = adapter->netdev;
353 struct e1000_hw *hw = &adapter->hw;
354 struct igb_reg_info *reginfo;
355 struct igb_ring *tx_ring;
356 union e1000_adv_tx_desc *tx_desc;
357 struct my_u0 { u64 a; u64 b; } *u0;
358 struct igb_ring *rx_ring;
359 union e1000_adv_rx_desc *rx_desc;
360 u32 staterr;
361 u16 i, n;
362
363 if (!netif_msg_hw(adapter))
364 return;
365
366
367 if (netdev) {
368 dev_info(&adapter->pdev->dev, "Net device Info\n");
369 pr_info("Device Name state trans_start\n");
370 pr_info("%-15s %016lX %016lX\n", netdev->name,
371 netdev->state, dev_trans_start(netdev));
372 }
373
374
375 dev_info(&adapter->pdev->dev, "Register Dump\n");
376 pr_info(" Register Name Value\n");
377 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
378 reginfo->name; reginfo++) {
379 igb_regdump(hw, reginfo);
380 }
381
382
383 if (!netdev || !netif_running(netdev))
384 goto exit;
385
386 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
387 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
388 for (n = 0; n < adapter->num_tx_queues; n++) {
389 struct igb_tx_buffer *buffer_info;
390 tx_ring = adapter->tx_ring[n];
391 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
392 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
393 n, tx_ring->next_to_use, tx_ring->next_to_clean,
394 (u64)dma_unmap_addr(buffer_info, dma),
395 dma_unmap_len(buffer_info, len),
396 buffer_info->next_to_watch,
397 (u64)buffer_info->time_stamp);
398 }
399
400
401 if (!netif_msg_tx_done(adapter))
402 goto rx_ring_summary;
403
404 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
405
406
407
408
409
410
411
412
413
414
415
416
417 for (n = 0; n < adapter->num_tx_queues; n++) {
418 tx_ring = adapter->tx_ring[n];
419 pr_info("------------------------------------\n");
420 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
421 pr_info("------------------------------------\n");
422 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
423
424 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
425 const char *next_desc;
426 struct igb_tx_buffer *buffer_info;
427 tx_desc = IGB_TX_DESC(tx_ring, i);
428 buffer_info = &tx_ring->tx_buffer_info[i];
429 u0 = (struct my_u0 *)tx_desc;
430 if (i == tx_ring->next_to_use &&
431 i == tx_ring->next_to_clean)
432 next_desc = " NTC/U";
433 else if (i == tx_ring->next_to_use)
434 next_desc = " NTU";
435 else if (i == tx_ring->next_to_clean)
436 next_desc = " NTC";
437 else
438 next_desc = "";
439
440 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
441 i, le64_to_cpu(u0->a),
442 le64_to_cpu(u0->b),
443 (u64)dma_unmap_addr(buffer_info, dma),
444 dma_unmap_len(buffer_info, len),
445 buffer_info->next_to_watch,
446 (u64)buffer_info->time_stamp,
447 buffer_info->skb, next_desc);
448
449 if (netif_msg_pktdata(adapter) && buffer_info->skb)
450 print_hex_dump(KERN_INFO, "",
451 DUMP_PREFIX_ADDRESS,
452 16, 1, buffer_info->skb->data,
453 dma_unmap_len(buffer_info, len),
454 true);
455 }
456 }
457
458
459rx_ring_summary:
460 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
461 pr_info("Queue [NTU] [NTC]\n");
462 for (n = 0; n < adapter->num_rx_queues; n++) {
463 rx_ring = adapter->rx_ring[n];
464 pr_info(" %5d %5X %5X\n",
465 n, rx_ring->next_to_use, rx_ring->next_to_clean);
466 }
467
468
469 if (!netif_msg_rx_status(adapter))
470 goto exit;
471
472 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495 for (n = 0; n < adapter->num_rx_queues; n++) {
496 rx_ring = adapter->rx_ring[n];
497 pr_info("------------------------------------\n");
498 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
499 pr_info("------------------------------------\n");
500 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
501 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
502
503 for (i = 0; i < rx_ring->count; i++) {
504 const char *next_desc;
505 struct igb_rx_buffer *buffer_info;
506 buffer_info = &rx_ring->rx_buffer_info[i];
507 rx_desc = IGB_RX_DESC(rx_ring, i);
508 u0 = (struct my_u0 *)rx_desc;
509 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
510
511 if (i == rx_ring->next_to_use)
512 next_desc = " NTU";
513 else if (i == rx_ring->next_to_clean)
514 next_desc = " NTC";
515 else
516 next_desc = "";
517
518 if (staterr & E1000_RXD_STAT_DD) {
519
520 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
521 "RWB", i,
522 le64_to_cpu(u0->a),
523 le64_to_cpu(u0->b),
524 next_desc);
525 } else {
526 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
527 "R ", i,
528 le64_to_cpu(u0->a),
529 le64_to_cpu(u0->b),
530 (u64)buffer_info->dma,
531 next_desc);
532
533 if (netif_msg_pktdata(adapter) &&
534 buffer_info->dma && buffer_info->page) {
535 print_hex_dump(KERN_INFO, "",
536 DUMP_PREFIX_ADDRESS,
537 16, 1,
538 page_address(buffer_info->page) +
539 buffer_info->page_offset,
540 igb_rx_bufsz(rx_ring), true);
541 }
542 }
543 }
544 }
545
546exit:
547 return;
548}
549
550
551
552
553
554
555
556
557static int igb_get_i2c_data(void *data)
558{
559 struct igb_adapter *adapter = (struct igb_adapter *)data;
560 struct e1000_hw *hw = &adapter->hw;
561 s32 i2cctl = rd32(E1000_I2CPARAMS);
562
563 return !!(i2cctl & E1000_I2C_DATA_IN);
564}
565
566
567
568
569
570
571
572
573static void igb_set_i2c_data(void *data, int state)
574{
575 struct igb_adapter *adapter = (struct igb_adapter *)data;
576 struct e1000_hw *hw = &adapter->hw;
577 s32 i2cctl = rd32(E1000_I2CPARAMS);
578
579 if (state)
580 i2cctl |= E1000_I2C_DATA_OUT;
581 else
582 i2cctl &= ~E1000_I2C_DATA_OUT;
583
584 i2cctl &= ~E1000_I2C_DATA_OE_N;
585 i2cctl |= E1000_I2C_CLK_OE_N;
586 wr32(E1000_I2CPARAMS, i2cctl);
587 wrfl();
588
589}
590
591
592
593
594
595
596
597
598static void igb_set_i2c_clk(void *data, int state)
599{
600 struct igb_adapter *adapter = (struct igb_adapter *)data;
601 struct e1000_hw *hw = &adapter->hw;
602 s32 i2cctl = rd32(E1000_I2CPARAMS);
603
604 if (state) {
605 i2cctl |= E1000_I2C_CLK_OUT;
606 i2cctl &= ~E1000_I2C_CLK_OE_N;
607 } else {
608 i2cctl &= ~E1000_I2C_CLK_OUT;
609 i2cctl &= ~E1000_I2C_CLK_OE_N;
610 }
611 wr32(E1000_I2CPARAMS, i2cctl);
612 wrfl();
613}
614
615
616
617
618
619
620
621static int igb_get_i2c_clk(void *data)
622{
623 struct igb_adapter *adapter = (struct igb_adapter *)data;
624 struct e1000_hw *hw = &adapter->hw;
625 s32 i2cctl = rd32(E1000_I2CPARAMS);
626
627 return !!(i2cctl & E1000_I2C_CLK_IN);
628}
629
630static const struct i2c_algo_bit_data igb_i2c_algo = {
631 .setsda = igb_set_i2c_data,
632 .setscl = igb_set_i2c_clk,
633 .getsda = igb_get_i2c_data,
634 .getscl = igb_get_i2c_clk,
635 .udelay = 5,
636 .timeout = 20,
637};
638
639
640
641
642
643
644
645struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
646{
647 struct igb_adapter *adapter = hw->back;
648 return adapter->netdev;
649}
650
651
652
653
654
655
656
657static int __init igb_init_module(void)
658{
659 int ret;
660
661 pr_info("%s\n", igb_driver_string);
662 pr_info("%s\n", igb_copyright);
663
664#ifdef CONFIG_IGB_DCA
665 dca_register_notify(&dca_notifier);
666#endif
667 ret = pci_register_driver(&igb_driver);
668 return ret;
669}
670
671module_init(igb_init_module);
672
673
674
675
676
677
678
679static void __exit igb_exit_module(void)
680{
681#ifdef CONFIG_IGB_DCA
682 dca_unregister_notify(&dca_notifier);
683#endif
684 pci_unregister_driver(&igb_driver);
685}
686
687module_exit(igb_exit_module);
688
689#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
690
691
692
693
694
695
696
697static void igb_cache_ring_register(struct igb_adapter *adapter)
698{
699 int i = 0, j = 0;
700 u32 rbase_offset = adapter->vfs_allocated_count;
701
702 switch (adapter->hw.mac.type) {
703 case e1000_82576:
704
705
706
707
708
709 if (adapter->vfs_allocated_count) {
710 for (; i < adapter->rss_queues; i++)
711 adapter->rx_ring[i]->reg_idx = rbase_offset +
712 Q_IDX_82576(i);
713 }
714 fallthrough;
715 case e1000_82575:
716 case e1000_82580:
717 case e1000_i350:
718 case e1000_i354:
719 case e1000_i210:
720 case e1000_i211:
721 default:
722 for (; i < adapter->num_rx_queues; i++)
723 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
724 for (; j < adapter->num_tx_queues; j++)
725 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
726 break;
727 }
728}
729
730u32 igb_rd32(struct e1000_hw *hw, u32 reg)
731{
732 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
733 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
734 u32 value = 0;
735
736 if (E1000_REMOVED(hw_addr))
737 return ~value;
738
739 value = readl(&hw_addr[reg]);
740
741
742 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
743 struct net_device *netdev = igb->netdev;
744 hw->hw_addr = NULL;
745 netdev_err(netdev, "PCIe link lost\n");
746 WARN(pci_device_is_present(igb->pdev),
747 "igb: Failed to read reg 0x%x!\n", reg);
748 }
749
750 return value;
751}
752
753
754
755
756
757
758
759
760
761
762
763
764
765static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
766 int index, int offset)
767{
768 u32 ivar = array_rd32(E1000_IVAR0, index);
769
770
771 ivar &= ~((u32)0xFF << offset);
772
773
774 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
775
776 array_wr32(E1000_IVAR0, index, ivar);
777}
778
779#define IGB_N0_QUEUE -1
780static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
781{
782 struct igb_adapter *adapter = q_vector->adapter;
783 struct e1000_hw *hw = &adapter->hw;
784 int rx_queue = IGB_N0_QUEUE;
785 int tx_queue = IGB_N0_QUEUE;
786 u32 msixbm = 0;
787
788 if (q_vector->rx.ring)
789 rx_queue = q_vector->rx.ring->reg_idx;
790 if (q_vector->tx.ring)
791 tx_queue = q_vector->tx.ring->reg_idx;
792
793 switch (hw->mac.type) {
794 case e1000_82575:
795
796
797
798
799
800 if (rx_queue > IGB_N0_QUEUE)
801 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
802 if (tx_queue > IGB_N0_QUEUE)
803 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
804 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
805 msixbm |= E1000_EIMS_OTHER;
806 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
807 q_vector->eims_value = msixbm;
808 break;
809 case e1000_82576:
810
811
812
813
814
815 if (rx_queue > IGB_N0_QUEUE)
816 igb_write_ivar(hw, msix_vector,
817 rx_queue & 0x7,
818 (rx_queue & 0x8) << 1);
819 if (tx_queue > IGB_N0_QUEUE)
820 igb_write_ivar(hw, msix_vector,
821 tx_queue & 0x7,
822 ((tx_queue & 0x8) << 1) + 8);
823 q_vector->eims_value = BIT(msix_vector);
824 break;
825 case e1000_82580:
826 case e1000_i350:
827 case e1000_i354:
828 case e1000_i210:
829 case e1000_i211:
830
831
832
833
834
835
836 if (rx_queue > IGB_N0_QUEUE)
837 igb_write_ivar(hw, msix_vector,
838 rx_queue >> 1,
839 (rx_queue & 0x1) << 4);
840 if (tx_queue > IGB_N0_QUEUE)
841 igb_write_ivar(hw, msix_vector,
842 tx_queue >> 1,
843 ((tx_queue & 0x1) << 4) + 8);
844 q_vector->eims_value = BIT(msix_vector);
845 break;
846 default:
847 BUG();
848 break;
849 }
850
851
852 adapter->eims_enable_mask |= q_vector->eims_value;
853
854
855 q_vector->set_itr = 1;
856}
857
858
859
860
861
862
863
864
865static void igb_configure_msix(struct igb_adapter *adapter)
866{
867 u32 tmp;
868 int i, vector = 0;
869 struct e1000_hw *hw = &adapter->hw;
870
871 adapter->eims_enable_mask = 0;
872
873
874 switch (hw->mac.type) {
875 case e1000_82575:
876 tmp = rd32(E1000_CTRL_EXT);
877
878 tmp |= E1000_CTRL_EXT_PBA_CLR;
879
880
881 tmp |= E1000_CTRL_EXT_EIAME;
882 tmp |= E1000_CTRL_EXT_IRCA;
883
884 wr32(E1000_CTRL_EXT, tmp);
885
886
887 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
888 adapter->eims_other = E1000_EIMS_OTHER;
889
890 break;
891
892 case e1000_82576:
893 case e1000_82580:
894 case e1000_i350:
895 case e1000_i354:
896 case e1000_i210:
897 case e1000_i211:
898
899
900
901 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
902 E1000_GPIE_PBA | E1000_GPIE_EIAME |
903 E1000_GPIE_NSICR);
904
905
906 adapter->eims_other = BIT(vector);
907 tmp = (vector++ | E1000_IVAR_VALID) << 8;
908
909 wr32(E1000_IVAR_MISC, tmp);
910 break;
911 default:
912
913 break;
914 }
915
916 adapter->eims_enable_mask |= adapter->eims_other;
917
918 for (i = 0; i < adapter->num_q_vectors; i++)
919 igb_assign_vector(adapter->q_vector[i], vector++);
920
921 wrfl();
922}
923
924
925
926
927
928
929
930
931static int igb_request_msix(struct igb_adapter *adapter)
932{
933 struct net_device *netdev = adapter->netdev;
934 int i, err = 0, vector = 0, free_vector = 0;
935
936 err = request_irq(adapter->msix_entries[vector].vector,
937 igb_msix_other, 0, netdev->name, adapter);
938 if (err)
939 goto err_out;
940
941 for (i = 0; i < adapter->num_q_vectors; i++) {
942 struct igb_q_vector *q_vector = adapter->q_vector[i];
943
944 vector++;
945
946 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
947
948 if (q_vector->rx.ring && q_vector->tx.ring)
949 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
950 q_vector->rx.ring->queue_index);
951 else if (q_vector->tx.ring)
952 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
953 q_vector->tx.ring->queue_index);
954 else if (q_vector->rx.ring)
955 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
956 q_vector->rx.ring->queue_index);
957 else
958 sprintf(q_vector->name, "%s-unused", netdev->name);
959
960 err = request_irq(adapter->msix_entries[vector].vector,
961 igb_msix_ring, 0, q_vector->name,
962 q_vector);
963 if (err)
964 goto err_free;
965 }
966
967 igb_configure_msix(adapter);
968 return 0;
969
970err_free:
971
972 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
973
974 vector--;
975 for (i = 0; i < vector; i++) {
976 free_irq(adapter->msix_entries[free_vector++].vector,
977 adapter->q_vector[i]);
978 }
979err_out:
980 return err;
981}
982
983
984
985
986
987
988
989
990static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
991{
992 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
993
994 adapter->q_vector[v_idx] = NULL;
995
996
997
998
999 if (q_vector)
1000 kfree_rcu(q_vector, rcu);
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1012{
1013 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1014
1015
1016
1017
1018 if (!q_vector)
1019 return;
1020
1021 if (q_vector->tx.ring)
1022 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1023
1024 if (q_vector->rx.ring)
1025 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1026
1027 netif_napi_del(&q_vector->napi);
1028
1029}
1030
1031static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1032{
1033 int v_idx = adapter->num_q_vectors;
1034
1035 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1036 pci_disable_msix(adapter->pdev);
1037 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1038 pci_disable_msi(adapter->pdev);
1039
1040 while (v_idx--)
1041 igb_reset_q_vector(adapter, v_idx);
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static void igb_free_q_vectors(struct igb_adapter *adapter)
1053{
1054 int v_idx = adapter->num_q_vectors;
1055
1056 adapter->num_tx_queues = 0;
1057 adapter->num_rx_queues = 0;
1058 adapter->num_q_vectors = 0;
1059
1060 while (v_idx--) {
1061 igb_reset_q_vector(adapter, v_idx);
1062 igb_free_q_vector(adapter, v_idx);
1063 }
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1074{
1075 igb_free_q_vectors(adapter);
1076 igb_reset_interrupt_capability(adapter);
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1088{
1089 int err;
1090 int numvecs, i;
1091
1092 if (!msix)
1093 goto msi_only;
1094 adapter->flags |= IGB_FLAG_HAS_MSIX;
1095
1096
1097 adapter->num_rx_queues = adapter->rss_queues;
1098 if (adapter->vfs_allocated_count)
1099 adapter->num_tx_queues = 1;
1100 else
1101 adapter->num_tx_queues = adapter->rss_queues;
1102
1103
1104 numvecs = adapter->num_rx_queues;
1105
1106
1107 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1108 numvecs += adapter->num_tx_queues;
1109
1110
1111 adapter->num_q_vectors = numvecs;
1112
1113
1114 numvecs++;
1115 for (i = 0; i < numvecs; i++)
1116 adapter->msix_entries[i].entry = i;
1117
1118 err = pci_enable_msix_range(adapter->pdev,
1119 adapter->msix_entries,
1120 numvecs,
1121 numvecs);
1122 if (err > 0)
1123 return;
1124
1125 igb_reset_interrupt_capability(adapter);
1126
1127
1128msi_only:
1129 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1130#ifdef CONFIG_PCI_IOV
1131
1132 if (adapter->vf_data) {
1133 struct e1000_hw *hw = &adapter->hw;
1134
1135 pci_disable_sriov(adapter->pdev);
1136 msleep(500);
1137
1138 kfree(adapter->vf_mac_list);
1139 adapter->vf_mac_list = NULL;
1140 kfree(adapter->vf_data);
1141 adapter->vf_data = NULL;
1142 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1143 wrfl();
1144 msleep(100);
1145 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1146 }
1147#endif
1148 adapter->vfs_allocated_count = 0;
1149 adapter->rss_queues = 1;
1150 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1151 adapter->num_rx_queues = 1;
1152 adapter->num_tx_queues = 1;
1153 adapter->num_q_vectors = 1;
1154 if (!pci_enable_msi(adapter->pdev))
1155 adapter->flags |= IGB_FLAG_HAS_MSI;
1156}
1157
1158static void igb_add_ring(struct igb_ring *ring,
1159 struct igb_ring_container *head)
1160{
1161 head->ring = ring;
1162 head->count++;
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177static int igb_alloc_q_vector(struct igb_adapter *adapter,
1178 int v_count, int v_idx,
1179 int txr_count, int txr_idx,
1180 int rxr_count, int rxr_idx)
1181{
1182 struct igb_q_vector *q_vector;
1183 struct igb_ring *ring;
1184 int ring_count;
1185 size_t size;
1186
1187
1188 if (txr_count > 1 || rxr_count > 1)
1189 return -ENOMEM;
1190
1191 ring_count = txr_count + rxr_count;
1192 size = struct_size(q_vector, ring, ring_count);
1193
1194
1195 q_vector = adapter->q_vector[v_idx];
1196 if (!q_vector) {
1197 q_vector = kzalloc(size, GFP_KERNEL);
1198 } else if (size > ksize(q_vector)) {
1199 kfree_rcu(q_vector, rcu);
1200 q_vector = kzalloc(size, GFP_KERNEL);
1201 } else {
1202 memset(q_vector, 0, size);
1203 }
1204 if (!q_vector)
1205 return -ENOMEM;
1206
1207
1208 netif_napi_add(adapter->netdev, &q_vector->napi,
1209 igb_poll, 64);
1210
1211
1212 adapter->q_vector[v_idx] = q_vector;
1213 q_vector->adapter = adapter;
1214
1215
1216 q_vector->tx.work_limit = adapter->tx_work_limit;
1217
1218
1219 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1220 q_vector->itr_val = IGB_START_ITR;
1221
1222
1223 ring = q_vector->ring;
1224
1225
1226 if (rxr_count) {
1227
1228 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1229 q_vector->itr_val = adapter->rx_itr_setting;
1230 } else {
1231
1232 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1233 q_vector->itr_val = adapter->tx_itr_setting;
1234 }
1235
1236 if (txr_count) {
1237
1238 ring->dev = &adapter->pdev->dev;
1239 ring->netdev = adapter->netdev;
1240
1241
1242 ring->q_vector = q_vector;
1243
1244
1245 igb_add_ring(ring, &q_vector->tx);
1246
1247
1248 if (adapter->hw.mac.type == e1000_82575)
1249 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1250
1251
1252 ring->count = adapter->tx_ring_count;
1253 ring->queue_index = txr_idx;
1254
1255 ring->cbs_enable = false;
1256 ring->idleslope = 0;
1257 ring->sendslope = 0;
1258 ring->hicredit = 0;
1259 ring->locredit = 0;
1260
1261 u64_stats_init(&ring->tx_syncp);
1262 u64_stats_init(&ring->tx_syncp2);
1263
1264
1265 adapter->tx_ring[txr_idx] = ring;
1266
1267
1268 ring++;
1269 }
1270
1271 if (rxr_count) {
1272
1273 ring->dev = &adapter->pdev->dev;
1274 ring->netdev = adapter->netdev;
1275
1276
1277 ring->q_vector = q_vector;
1278
1279
1280 igb_add_ring(ring, &q_vector->rx);
1281
1282
1283 if (adapter->hw.mac.type >= e1000_82576)
1284 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1285
1286
1287
1288
1289 if (adapter->hw.mac.type >= e1000_i350)
1290 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1291
1292
1293 ring->count = adapter->rx_ring_count;
1294 ring->queue_index = rxr_idx;
1295
1296 u64_stats_init(&ring->rx_syncp);
1297
1298
1299 adapter->rx_ring[rxr_idx] = ring;
1300 }
1301
1302 return 0;
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1314{
1315 int q_vectors = adapter->num_q_vectors;
1316 int rxr_remaining = adapter->num_rx_queues;
1317 int txr_remaining = adapter->num_tx_queues;
1318 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1319 int err;
1320
1321 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1322 for (; rxr_remaining; v_idx++) {
1323 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1324 0, 0, 1, rxr_idx);
1325
1326 if (err)
1327 goto err_out;
1328
1329
1330 rxr_remaining--;
1331 rxr_idx++;
1332 }
1333 }
1334
1335 for (; v_idx < q_vectors; v_idx++) {
1336 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1337 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1338
1339 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1340 tqpv, txr_idx, rqpv, rxr_idx);
1341
1342 if (err)
1343 goto err_out;
1344
1345
1346 rxr_remaining -= rqpv;
1347 txr_remaining -= tqpv;
1348 rxr_idx++;
1349 txr_idx++;
1350 }
1351
1352 return 0;
1353
1354err_out:
1355 adapter->num_tx_queues = 0;
1356 adapter->num_rx_queues = 0;
1357 adapter->num_q_vectors = 0;
1358
1359 while (v_idx--)
1360 igb_free_q_vector(adapter, v_idx);
1361
1362 return -ENOMEM;
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1373{
1374 struct pci_dev *pdev = adapter->pdev;
1375 int err;
1376
1377 igb_set_interrupt_capability(adapter, msix);
1378
1379 err = igb_alloc_q_vectors(adapter);
1380 if (err) {
1381 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1382 goto err_alloc_q_vectors;
1383 }
1384
1385 igb_cache_ring_register(adapter);
1386
1387 return 0;
1388
1389err_alloc_q_vectors:
1390 igb_reset_interrupt_capability(adapter);
1391 return err;
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401static int igb_request_irq(struct igb_adapter *adapter)
1402{
1403 struct net_device *netdev = adapter->netdev;
1404 struct pci_dev *pdev = adapter->pdev;
1405 int err = 0;
1406
1407 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1408 err = igb_request_msix(adapter);
1409 if (!err)
1410 goto request_done;
1411
1412 igb_free_all_tx_resources(adapter);
1413 igb_free_all_rx_resources(adapter);
1414
1415 igb_clear_interrupt_scheme(adapter);
1416 err = igb_init_interrupt_scheme(adapter, false);
1417 if (err)
1418 goto request_done;
1419
1420 igb_setup_all_tx_resources(adapter);
1421 igb_setup_all_rx_resources(adapter);
1422 igb_configure(adapter);
1423 }
1424
1425 igb_assign_vector(adapter->q_vector[0], 0);
1426
1427 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1428 err = request_irq(pdev->irq, igb_intr_msi, 0,
1429 netdev->name, adapter);
1430 if (!err)
1431 goto request_done;
1432
1433
1434 igb_reset_interrupt_capability(adapter);
1435 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1436 }
1437
1438 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1439 netdev->name, adapter);
1440
1441 if (err)
1442 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1443 err);
1444
1445request_done:
1446 return err;
1447}
1448
1449static void igb_free_irq(struct igb_adapter *adapter)
1450{
1451 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1452 int vector = 0, i;
1453
1454 free_irq(adapter->msix_entries[vector++].vector, adapter);
1455
1456 for (i = 0; i < adapter->num_q_vectors; i++)
1457 free_irq(adapter->msix_entries[vector++].vector,
1458 adapter->q_vector[i]);
1459 } else {
1460 free_irq(adapter->pdev->irq, adapter);
1461 }
1462}
1463
1464
1465
1466
1467
1468static void igb_irq_disable(struct igb_adapter *adapter)
1469{
1470 struct e1000_hw *hw = &adapter->hw;
1471
1472
1473
1474
1475
1476 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1477 u32 regval = rd32(E1000_EIAM);
1478
1479 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1480 wr32(E1000_EIMC, adapter->eims_enable_mask);
1481 regval = rd32(E1000_EIAC);
1482 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1483 }
1484
1485 wr32(E1000_IAM, 0);
1486 wr32(E1000_IMC, ~0);
1487 wrfl();
1488 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1489 int i;
1490
1491 for (i = 0; i < adapter->num_q_vectors; i++)
1492 synchronize_irq(adapter->msix_entries[i].vector);
1493 } else {
1494 synchronize_irq(adapter->pdev->irq);
1495 }
1496}
1497
1498
1499
1500
1501
1502static void igb_irq_enable(struct igb_adapter *adapter)
1503{
1504 struct e1000_hw *hw = &adapter->hw;
1505
1506 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1507 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1508 u32 regval = rd32(E1000_EIAC);
1509
1510 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1511 regval = rd32(E1000_EIAM);
1512 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1513 wr32(E1000_EIMS, adapter->eims_enable_mask);
1514 if (adapter->vfs_allocated_count) {
1515 wr32(E1000_MBVFIMR, 0xFF);
1516 ims |= E1000_IMS_VMMB;
1517 }
1518 wr32(E1000_IMS, ims);
1519 } else {
1520 wr32(E1000_IMS, IMS_ENABLE_MASK |
1521 E1000_IMS_DRSTA);
1522 wr32(E1000_IAM, IMS_ENABLE_MASK |
1523 E1000_IMS_DRSTA);
1524 }
1525}
1526
1527static void igb_update_mng_vlan(struct igb_adapter *adapter)
1528{
1529 struct e1000_hw *hw = &adapter->hw;
1530 u16 pf_id = adapter->vfs_allocated_count;
1531 u16 vid = adapter->hw.mng_cookie.vlan_id;
1532 u16 old_vid = adapter->mng_vlan_id;
1533
1534 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1535
1536 igb_vfta_set(hw, vid, pf_id, true, true);
1537 adapter->mng_vlan_id = vid;
1538 } else {
1539 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1540 }
1541
1542 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1543 (vid != old_vid) &&
1544 !test_bit(old_vid, adapter->active_vlans)) {
1545
1546 igb_vfta_set(hw, vid, pf_id, false, true);
1547 }
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558static void igb_release_hw_control(struct igb_adapter *adapter)
1559{
1560 struct e1000_hw *hw = &adapter->hw;
1561 u32 ctrl_ext;
1562
1563
1564 ctrl_ext = rd32(E1000_CTRL_EXT);
1565 wr32(E1000_CTRL_EXT,
1566 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static void igb_get_hw_control(struct igb_adapter *adapter)
1578{
1579 struct e1000_hw *hw = &adapter->hw;
1580 u32 ctrl_ext;
1581
1582
1583 ctrl_ext = rd32(E1000_CTRL_EXT);
1584 wr32(E1000_CTRL_EXT,
1585 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1586}
1587
1588static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1589{
1590 struct net_device *netdev = adapter->netdev;
1591 struct e1000_hw *hw = &adapter->hw;
1592
1593 WARN_ON(hw->mac.type != e1000_i210);
1594
1595 if (enable)
1596 adapter->flags |= IGB_FLAG_FQTSS;
1597 else
1598 adapter->flags &= ~IGB_FLAG_FQTSS;
1599
1600 if (netif_running(netdev))
1601 schedule_work(&adapter->reset_task);
1602}
1603
1604static bool is_fqtss_enabled(struct igb_adapter *adapter)
1605{
1606 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1607}
1608
1609static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1610 enum tx_queue_prio prio)
1611{
1612 u32 val;
1613
1614 WARN_ON(hw->mac.type != e1000_i210);
1615 WARN_ON(queue < 0 || queue > 4);
1616
1617 val = rd32(E1000_I210_TXDCTL(queue));
1618
1619 if (prio == TX_QUEUE_PRIO_HIGH)
1620 val |= E1000_TXDCTL_PRIORITY;
1621 else
1622 val &= ~E1000_TXDCTL_PRIORITY;
1623
1624 wr32(E1000_I210_TXDCTL(queue), val);
1625}
1626
1627static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1628{
1629 u32 val;
1630
1631 WARN_ON(hw->mac.type != e1000_i210);
1632 WARN_ON(queue < 0 || queue > 1);
1633
1634 val = rd32(E1000_I210_TQAVCC(queue));
1635
1636 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1637 val |= E1000_TQAVCC_QUEUEMODE;
1638 else
1639 val &= ~E1000_TQAVCC_QUEUEMODE;
1640
1641 wr32(E1000_I210_TQAVCC(queue), val);
1642}
1643
1644static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1645{
1646 int i;
1647
1648 for (i = 0; i < adapter->num_tx_queues; i++) {
1649 if (adapter->tx_ring[i]->cbs_enable)
1650 return true;
1651 }
1652
1653 return false;
1654}
1655
1656static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1657{
1658 int i;
1659
1660 for (i = 0; i < adapter->num_tx_queues; i++) {
1661 if (adapter->tx_ring[i]->launchtime_enable)
1662 return true;
1663 }
1664
1665 return false;
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1679{
1680 struct igb_ring *ring = adapter->tx_ring[queue];
1681 struct net_device *netdev = adapter->netdev;
1682 struct e1000_hw *hw = &adapter->hw;
1683 u32 tqavcc, tqavctrl;
1684 u16 value;
1685
1686 WARN_ON(hw->mac.type != e1000_i210);
1687 WARN_ON(queue < 0 || queue > 1);
1688
1689
1690
1691
1692
1693 if (ring->cbs_enable || ring->launchtime_enable) {
1694 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1695 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1696 } else {
1697 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1698 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1699 }
1700
1701
1702 if (ring->cbs_enable || queue == 0) {
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 if (queue == 0 && !ring->cbs_enable) {
1713
1714 ring->idleslope = 1000000;
1715 ring->hicredit = ETH_FRAME_LEN;
1716 }
1717
1718
1719
1720
1721
1722 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1723 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1724 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1784
1785 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1786 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1787 tqavcc |= value;
1788 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1789
1790 wr32(E1000_I210_TQAVHC(queue),
1791 0x80000000 + ring->hicredit * 0x7735);
1792 } else {
1793
1794
1795 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1796 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1797 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1798
1799
1800 wr32(E1000_I210_TQAVHC(queue), 0);
1801
1802
1803
1804
1805
1806 if (!is_any_cbs_enabled(adapter)) {
1807 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1808 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1809 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1810 }
1811 }
1812
1813
1814 if (ring->launchtime_enable) {
1815
1816
1817
1818
1819
1820
1821
1822
1823 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1824 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1825 E1000_TQAVCTRL_FETCHTIME_DELTA;
1826 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1827 } else {
1828
1829
1830
1831
1832 if (!is_any_txtime_enabled(adapter)) {
1833 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1834 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1835 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1836 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1837 }
1838 }
1839
1840
1841
1842
1843
1844
1845 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1846 ring->cbs_enable ? "enabled" : "disabled",
1847 ring->launchtime_enable ? "enabled" : "disabled",
1848 queue,
1849 ring->idleslope, ring->sendslope,
1850 ring->hicredit, ring->locredit);
1851}
1852
1853static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1854 bool enable)
1855{
1856 struct igb_ring *ring;
1857
1858 if (queue < 0 || queue > adapter->num_tx_queues)
1859 return -EINVAL;
1860
1861 ring = adapter->tx_ring[queue];
1862 ring->launchtime_enable = enable;
1863
1864 return 0;
1865}
1866
1867static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1868 bool enable, int idleslope, int sendslope,
1869 int hicredit, int locredit)
1870{
1871 struct igb_ring *ring;
1872
1873 if (queue < 0 || queue > adapter->num_tx_queues)
1874 return -EINVAL;
1875
1876 ring = adapter->tx_ring[queue];
1877
1878 ring->cbs_enable = enable;
1879 ring->idleslope = idleslope;
1880 ring->sendslope = sendslope;
1881 ring->hicredit = hicredit;
1882 ring->locredit = locredit;
1883
1884 return 0;
1885}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896static void igb_setup_tx_mode(struct igb_adapter *adapter)
1897{
1898 struct net_device *netdev = adapter->netdev;
1899 struct e1000_hw *hw = &adapter->hw;
1900 u32 val;
1901
1902
1903 if (hw->mac.type != e1000_i210)
1904 return;
1905
1906 if (is_fqtss_enabled(adapter)) {
1907 int i, max_queue;
1908
1909
1910
1911
1912
1913 val = rd32(E1000_I210_TQAVCTRL);
1914 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1915 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1916 wr32(E1000_I210_TQAVCTRL, val);
1917
1918
1919
1920
1921 val = rd32(E1000_TXPBS);
1922 val &= ~I210_TXPBSIZE_MASK;
1923 val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
1924 I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
1925 wr32(E1000_TXPBS, val);
1926
1927 val = rd32(E1000_RXPBS);
1928 val &= ~I210_RXPBSIZE_MASK;
1929 val |= I210_RXPBSIZE_PB_30KB;
1930 wr32(E1000_RXPBS, val);
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943 val = (4096 - 1) / 64;
1944 wr32(E1000_I210_DTXMXPKTSZ, val);
1945
1946
1947
1948
1949
1950
1951 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1952 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1953
1954 for (i = 0; i < max_queue; i++) {
1955 igb_config_tx_modes(adapter, i);
1956 }
1957 } else {
1958 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1959 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1960 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1961
1962 val = rd32(E1000_I210_TQAVCTRL);
1963
1964
1965
1966
1967 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1968 wr32(E1000_I210_TQAVCTRL, val);
1969 }
1970
1971 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1972 "enabled" : "disabled");
1973}
1974
1975
1976
1977
1978
1979static void igb_configure(struct igb_adapter *adapter)
1980{
1981 struct net_device *netdev = adapter->netdev;
1982 int i;
1983
1984 igb_get_hw_control(adapter);
1985 igb_set_rx_mode(netdev);
1986 igb_setup_tx_mode(adapter);
1987
1988 igb_restore_vlan(adapter);
1989
1990 igb_setup_tctl(adapter);
1991 igb_setup_mrqc(adapter);
1992 igb_setup_rctl(adapter);
1993
1994 igb_nfc_filter_restore(adapter);
1995 igb_configure_tx(adapter);
1996 igb_configure_rx(adapter);
1997
1998 igb_rx_fifo_flush_82575(&adapter->hw);
1999
2000
2001
2002
2003
2004 for (i = 0; i < adapter->num_rx_queues; i++) {
2005 struct igb_ring *ring = adapter->rx_ring[i];
2006 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
2007 }
2008}
2009
2010
2011
2012
2013
2014void igb_power_up_link(struct igb_adapter *adapter)
2015{
2016 igb_reset_phy(&adapter->hw);
2017
2018 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2019 igb_power_up_phy_copper(&adapter->hw);
2020 else
2021 igb_power_up_serdes_link_82575(&adapter->hw);
2022
2023 igb_setup_link(&adapter->hw);
2024}
2025
2026
2027
2028
2029
2030static void igb_power_down_link(struct igb_adapter *adapter)
2031{
2032 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2033 igb_power_down_phy_copper_82575(&adapter->hw);
2034 else
2035 igb_shutdown_serdes_link_82575(&adapter->hw);
2036}
2037
2038
2039
2040
2041
2042static void igb_check_swap_media(struct igb_adapter *adapter)
2043{
2044 struct e1000_hw *hw = &adapter->hw;
2045 u32 ctrl_ext, connsw;
2046 bool swap_now = false;
2047
2048 ctrl_ext = rd32(E1000_CTRL_EXT);
2049 connsw = rd32(E1000_CONNSW);
2050
2051
2052
2053
2054
2055 if ((hw->phy.media_type == e1000_media_type_copper) &&
2056 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2057 swap_now = true;
2058 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2059 !(connsw & E1000_CONNSW_SERDESD)) {
2060
2061 if (adapter->copper_tries < 4) {
2062 adapter->copper_tries++;
2063 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2064 wr32(E1000_CONNSW, connsw);
2065 return;
2066 } else {
2067 adapter->copper_tries = 0;
2068 if ((connsw & E1000_CONNSW_PHYSD) &&
2069 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2070 swap_now = true;
2071 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2072 wr32(E1000_CONNSW, connsw);
2073 }
2074 }
2075 }
2076
2077 if (!swap_now)
2078 return;
2079
2080 switch (hw->phy.media_type) {
2081 case e1000_media_type_copper:
2082 netdev_info(adapter->netdev,
2083 "MAS: changing media to fiber/serdes\n");
2084 ctrl_ext |=
2085 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2086 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2087 adapter->copper_tries = 0;
2088 break;
2089 case e1000_media_type_internal_serdes:
2090 case e1000_media_type_fiber:
2091 netdev_info(adapter->netdev,
2092 "MAS: changing media to copper\n");
2093 ctrl_ext &=
2094 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2095 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2096 break;
2097 default:
2098
2099 netdev_err(adapter->netdev,
2100 "AMS: Invalid media type found, returning\n");
2101 break;
2102 }
2103 wr32(E1000_CTRL_EXT, ctrl_ext);
2104}
2105
2106
2107
2108
2109
2110int igb_up(struct igb_adapter *adapter)
2111{
2112 struct e1000_hw *hw = &adapter->hw;
2113 int i;
2114
2115
2116 igb_configure(adapter);
2117
2118 clear_bit(__IGB_DOWN, &adapter->state);
2119
2120 for (i = 0; i < adapter->num_q_vectors; i++)
2121 napi_enable(&(adapter->q_vector[i]->napi));
2122
2123 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2124 igb_configure_msix(adapter);
2125 else
2126 igb_assign_vector(adapter->q_vector[0], 0);
2127
2128
2129 rd32(E1000_TSICR);
2130 rd32(E1000_ICR);
2131 igb_irq_enable(adapter);
2132
2133
2134 if (adapter->vfs_allocated_count) {
2135 u32 reg_data = rd32(E1000_CTRL_EXT);
2136
2137 reg_data |= E1000_CTRL_EXT_PFRSTD;
2138 wr32(E1000_CTRL_EXT, reg_data);
2139 }
2140
2141 netif_tx_start_all_queues(adapter->netdev);
2142
2143
2144 hw->mac.get_link_status = 1;
2145 schedule_work(&adapter->watchdog_task);
2146
2147 if ((adapter->flags & IGB_FLAG_EEE) &&
2148 (!hw->dev_spec._82575.eee_disable))
2149 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2150
2151 return 0;
2152}
2153
2154void igb_down(struct igb_adapter *adapter)
2155{
2156 struct net_device *netdev = adapter->netdev;
2157 struct e1000_hw *hw = &adapter->hw;
2158 u32 tctl, rctl;
2159 int i;
2160
2161
2162
2163
2164 set_bit(__IGB_DOWN, &adapter->state);
2165
2166
2167 rctl = rd32(E1000_RCTL);
2168 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2169
2170
2171 igb_nfc_filter_exit(adapter);
2172
2173 netif_carrier_off(netdev);
2174 netif_tx_stop_all_queues(netdev);
2175
2176
2177 tctl = rd32(E1000_TCTL);
2178 tctl &= ~E1000_TCTL_EN;
2179 wr32(E1000_TCTL, tctl);
2180
2181 wrfl();
2182 usleep_range(10000, 11000);
2183
2184 igb_irq_disable(adapter);
2185
2186 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2187
2188 for (i = 0; i < adapter->num_q_vectors; i++) {
2189 if (adapter->q_vector[i]) {
2190 napi_synchronize(&adapter->q_vector[i]->napi);
2191 napi_disable(&adapter->q_vector[i]->napi);
2192 }
2193 }
2194
2195 del_timer_sync(&adapter->watchdog_timer);
2196 del_timer_sync(&adapter->phy_info_timer);
2197
2198
2199 spin_lock(&adapter->stats64_lock);
2200 igb_update_stats(adapter);
2201 spin_unlock(&adapter->stats64_lock);
2202
2203 adapter->link_speed = 0;
2204 adapter->link_duplex = 0;
2205
2206 if (!pci_channel_offline(adapter->pdev))
2207 igb_reset(adapter);
2208
2209
2210 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2211
2212 igb_clean_all_tx_rings(adapter);
2213 igb_clean_all_rx_rings(adapter);
2214#ifdef CONFIG_IGB_DCA
2215
2216
2217 igb_setup_dca(adapter);
2218#endif
2219}
2220
2221void igb_reinit_locked(struct igb_adapter *adapter)
2222{
2223 WARN_ON(in_interrupt());
2224 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2225 usleep_range(1000, 2000);
2226 igb_down(adapter);
2227 igb_up(adapter);
2228 clear_bit(__IGB_RESETTING, &adapter->state);
2229}
2230
2231
2232
2233
2234
2235static void igb_enable_mas(struct igb_adapter *adapter)
2236{
2237 struct e1000_hw *hw = &adapter->hw;
2238 u32 connsw = rd32(E1000_CONNSW);
2239
2240
2241 if ((hw->phy.media_type == e1000_media_type_copper) &&
2242 (!(connsw & E1000_CONNSW_SERDESD))) {
2243 connsw |= E1000_CONNSW_ENRGSRC;
2244 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2245 wr32(E1000_CONNSW, connsw);
2246 wrfl();
2247 }
2248}
2249
2250void igb_reset(struct igb_adapter *adapter)
2251{
2252 struct pci_dev *pdev = adapter->pdev;
2253 struct e1000_hw *hw = &adapter->hw;
2254 struct e1000_mac_info *mac = &hw->mac;
2255 struct e1000_fc_info *fc = &hw->fc;
2256 u32 pba, hwm;
2257
2258
2259
2260
2261 switch (mac->type) {
2262 case e1000_i350:
2263 case e1000_i354:
2264 case e1000_82580:
2265 pba = rd32(E1000_RXPBS);
2266 pba = igb_rxpbs_adjust_82580(pba);
2267 break;
2268 case e1000_82576:
2269 pba = rd32(E1000_RXPBS);
2270 pba &= E1000_RXPBS_SIZE_MASK_82576;
2271 break;
2272 case e1000_82575:
2273 case e1000_i210:
2274 case e1000_i211:
2275 default:
2276 pba = E1000_PBA_34K;
2277 break;
2278 }
2279
2280 if (mac->type == e1000_82575) {
2281 u32 min_rx_space, min_tx_space, needed_tx_space;
2282
2283
2284 wr32(E1000_PBA, pba);
2285
2286
2287
2288
2289
2290
2291
2292
2293 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2294
2295
2296
2297
2298
2299
2300 min_tx_space = adapter->max_frame_size;
2301 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2302 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2303
2304
2305 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2306
2307
2308
2309
2310
2311 if (needed_tx_space < pba) {
2312 pba -= needed_tx_space;
2313
2314
2315
2316
2317 if (pba < min_rx_space)
2318 pba = min_rx_space;
2319 }
2320
2321
2322 wr32(E1000_PBA, pba);
2323 }
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2334
2335 fc->high_water = hwm & 0xFFFFFFF0;
2336 fc->low_water = fc->high_water - 16;
2337 fc->pause_time = 0xFFFF;
2338 fc->send_xon = 1;
2339 fc->current_mode = fc->requested_mode;
2340
2341
2342 if (adapter->vfs_allocated_count) {
2343 int i;
2344
2345 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2346 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2347
2348
2349 igb_ping_all_vfs(adapter);
2350
2351
2352 wr32(E1000_VFRE, 0);
2353 wr32(E1000_VFTE, 0);
2354 }
2355
2356
2357 hw->mac.ops.reset_hw(hw);
2358 wr32(E1000_WUC, 0);
2359
2360 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2361
2362 adapter->ei.get_invariants(hw);
2363 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2364 }
2365 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2366 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2367 igb_enable_mas(adapter);
2368 }
2369 if (hw->mac.ops.init_hw(hw))
2370 dev_err(&pdev->dev, "Hardware Error\n");
2371
2372
2373 igb_flush_mac_table(adapter);
2374 __dev_uc_unsync(adapter->netdev, NULL);
2375
2376
2377 igb_set_default_mac_filter(adapter);
2378
2379
2380
2381
2382 if (!hw->mac.autoneg)
2383 igb_force_mac_fc(hw);
2384
2385 igb_init_dmac(adapter, pba);
2386#ifdef CONFIG_IGB_HWMON
2387
2388 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2389 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2390
2391
2392
2393 if (adapter->ets)
2394 mac->ops.init_thermal_sensor_thresh(hw);
2395 }
2396 }
2397#endif
2398
2399 if (hw->phy.media_type == e1000_media_type_copper) {
2400 switch (mac->type) {
2401 case e1000_i350:
2402 case e1000_i210:
2403 case e1000_i211:
2404 igb_set_eee_i350(hw, true, true);
2405 break;
2406 case e1000_i354:
2407 igb_set_eee_i354(hw, true, true);
2408 break;
2409 default:
2410 break;
2411 }
2412 }
2413 if (!netif_running(adapter->netdev))
2414 igb_power_down_link(adapter);
2415
2416 igb_update_mng_vlan(adapter);
2417
2418
2419 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2420
2421
2422 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2423 igb_ptp_reset(adapter);
2424
2425 igb_get_phy_info(hw);
2426}
2427
2428static netdev_features_t igb_fix_features(struct net_device *netdev,
2429 netdev_features_t features)
2430{
2431
2432
2433
2434 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2435 features |= NETIF_F_HW_VLAN_CTAG_TX;
2436 else
2437 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2438
2439 return features;
2440}
2441
2442static int igb_set_features(struct net_device *netdev,
2443 netdev_features_t features)
2444{
2445 netdev_features_t changed = netdev->features ^ features;
2446 struct igb_adapter *adapter = netdev_priv(netdev);
2447
2448 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2449 igb_vlan_mode(netdev, features);
2450
2451 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2452 return 0;
2453
2454 if (!(features & NETIF_F_NTUPLE)) {
2455 struct hlist_node *node2;
2456 struct igb_nfc_filter *rule;
2457
2458 spin_lock(&adapter->nfc_lock);
2459 hlist_for_each_entry_safe(rule, node2,
2460 &adapter->nfc_filter_list, nfc_node) {
2461 igb_erase_filter(adapter, rule);
2462 hlist_del(&rule->nfc_node);
2463 kfree(rule);
2464 }
2465 spin_unlock(&adapter->nfc_lock);
2466 adapter->nfc_filter_count = 0;
2467 }
2468
2469 netdev->features = features;
2470
2471 if (netif_running(netdev))
2472 igb_reinit_locked(adapter);
2473 else
2474 igb_reset(adapter);
2475
2476 return 1;
2477}
2478
2479static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2480 struct net_device *dev,
2481 const unsigned char *addr, u16 vid,
2482 u16 flags,
2483 struct netlink_ext_ack *extack)
2484{
2485
2486 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2487 struct igb_adapter *adapter = netdev_priv(dev);
2488 int vfn = adapter->vfs_allocated_count;
2489
2490 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2491 return -ENOMEM;
2492 }
2493
2494 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2495}
2496
2497#define IGB_MAX_MAC_HDR_LEN 127
2498#define IGB_MAX_NETWORK_HDR_LEN 511
2499
2500static netdev_features_t
2501igb_features_check(struct sk_buff *skb, struct net_device *dev,
2502 netdev_features_t features)
2503{
2504 unsigned int network_hdr_len, mac_hdr_len;
2505
2506
2507 mac_hdr_len = skb_network_header(skb) - skb->data;
2508 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2509 return features & ~(NETIF_F_HW_CSUM |
2510 NETIF_F_SCTP_CRC |
2511 NETIF_F_GSO_UDP_L4 |
2512 NETIF_F_HW_VLAN_CTAG_TX |
2513 NETIF_F_TSO |
2514 NETIF_F_TSO6);
2515
2516 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2517 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2518 return features & ~(NETIF_F_HW_CSUM |
2519 NETIF_F_SCTP_CRC |
2520 NETIF_F_GSO_UDP_L4 |
2521 NETIF_F_TSO |
2522 NETIF_F_TSO6);
2523
2524
2525
2526
2527 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2528 features &= ~NETIF_F_TSO;
2529
2530 return features;
2531}
2532
2533static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2534{
2535 if (!is_fqtss_enabled(adapter)) {
2536 enable_fqtss(adapter, true);
2537 return;
2538 }
2539
2540 igb_config_tx_modes(adapter, queue);
2541
2542 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2543 enable_fqtss(adapter, false);
2544}
2545
2546static int igb_offload_cbs(struct igb_adapter *adapter,
2547 struct tc_cbs_qopt_offload *qopt)
2548{
2549 struct e1000_hw *hw = &adapter->hw;
2550 int err;
2551
2552
2553 if (hw->mac.type != e1000_i210)
2554 return -EOPNOTSUPP;
2555
2556
2557 if (qopt->queue < 0 || qopt->queue > 1)
2558 return -EINVAL;
2559
2560 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2561 qopt->idleslope, qopt->sendslope,
2562 qopt->hicredit, qopt->locredit);
2563 if (err)
2564 return err;
2565
2566 igb_offload_apply(adapter, qopt->queue);
2567
2568 return 0;
2569}
2570
2571#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2572#define VLAN_PRIO_FULL_MASK (0x07)
2573
2574static int igb_parse_cls_flower(struct igb_adapter *adapter,
2575 struct flow_cls_offload *f,
2576 int traffic_class,
2577 struct igb_nfc_filter *input)
2578{
2579 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2580 struct flow_dissector *dissector = rule->match.dissector;
2581 struct netlink_ext_ack *extack = f->common.extack;
2582
2583 if (dissector->used_keys &
2584 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
2585 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2586 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2587 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
2588 NL_SET_ERR_MSG_MOD(extack,
2589 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2590 return -EOPNOTSUPP;
2591 }
2592
2593 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2594 struct flow_match_eth_addrs match;
2595
2596 flow_rule_match_eth_addrs(rule, &match);
2597 if (!is_zero_ether_addr(match.mask->dst)) {
2598 if (!is_broadcast_ether_addr(match.mask->dst)) {
2599 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2600 return -EINVAL;
2601 }
2602
2603 input->filter.match_flags |=
2604 IGB_FILTER_FLAG_DST_MAC_ADDR;
2605 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2606 }
2607
2608 if (!is_zero_ether_addr(match.mask->src)) {
2609 if (!is_broadcast_ether_addr(match.mask->src)) {
2610 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2611 return -EINVAL;
2612 }
2613
2614 input->filter.match_flags |=
2615 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2616 ether_addr_copy(input->filter.src_addr, match.key->src);
2617 }
2618 }
2619
2620 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2621 struct flow_match_basic match;
2622
2623 flow_rule_match_basic(rule, &match);
2624 if (match.mask->n_proto) {
2625 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2626 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2627 return -EINVAL;
2628 }
2629
2630 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2631 input->filter.etype = match.key->n_proto;
2632 }
2633 }
2634
2635 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2636 struct flow_match_vlan match;
2637
2638 flow_rule_match_vlan(rule, &match);
2639 if (match.mask->vlan_priority) {
2640 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2641 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2642 return -EINVAL;
2643 }
2644
2645 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2646 input->filter.vlan_tci = match.key->vlan_priority;
2647 }
2648 }
2649
2650 input->action = traffic_class;
2651 input->cookie = f->cookie;
2652
2653 return 0;
2654}
2655
2656static int igb_configure_clsflower(struct igb_adapter *adapter,
2657 struct flow_cls_offload *cls_flower)
2658{
2659 struct netlink_ext_ack *extack = cls_flower->common.extack;
2660 struct igb_nfc_filter *filter, *f;
2661 int err, tc;
2662
2663 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2664 if (tc < 0) {
2665 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2666 return -EINVAL;
2667 }
2668
2669 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2670 if (!filter)
2671 return -ENOMEM;
2672
2673 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2674 if (err < 0)
2675 goto err_parse;
2676
2677 spin_lock(&adapter->nfc_lock);
2678
2679 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2680 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2681 err = -EEXIST;
2682 NL_SET_ERR_MSG_MOD(extack,
2683 "This filter is already set in ethtool");
2684 goto err_locked;
2685 }
2686 }
2687
2688 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2689 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2690 err = -EEXIST;
2691 NL_SET_ERR_MSG_MOD(extack,
2692 "This filter is already set in cls_flower");
2693 goto err_locked;
2694 }
2695 }
2696
2697 err = igb_add_filter(adapter, filter);
2698 if (err < 0) {
2699 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2700 goto err_locked;
2701 }
2702
2703 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2704
2705 spin_unlock(&adapter->nfc_lock);
2706
2707 return 0;
2708
2709err_locked:
2710 spin_unlock(&adapter->nfc_lock);
2711
2712err_parse:
2713 kfree(filter);
2714
2715 return err;
2716}
2717
2718static int igb_delete_clsflower(struct igb_adapter *adapter,
2719 struct flow_cls_offload *cls_flower)
2720{
2721 struct igb_nfc_filter *filter;
2722 int err;
2723
2724 spin_lock(&adapter->nfc_lock);
2725
2726 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2727 if (filter->cookie == cls_flower->cookie)
2728 break;
2729
2730 if (!filter) {
2731 err = -ENOENT;
2732 goto out;
2733 }
2734
2735 err = igb_erase_filter(adapter, filter);
2736 if (err < 0)
2737 goto out;
2738
2739 hlist_del(&filter->nfc_node);
2740 kfree(filter);
2741
2742out:
2743 spin_unlock(&adapter->nfc_lock);
2744
2745 return err;
2746}
2747
2748static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2749 struct flow_cls_offload *cls_flower)
2750{
2751 switch (cls_flower->command) {
2752 case FLOW_CLS_REPLACE:
2753 return igb_configure_clsflower(adapter, cls_flower);
2754 case FLOW_CLS_DESTROY:
2755 return igb_delete_clsflower(adapter, cls_flower);
2756 case FLOW_CLS_STATS:
2757 return -EOPNOTSUPP;
2758 default:
2759 return -EOPNOTSUPP;
2760 }
2761}
2762
2763static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2764 void *cb_priv)
2765{
2766 struct igb_adapter *adapter = cb_priv;
2767
2768 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2769 return -EOPNOTSUPP;
2770
2771 switch (type) {
2772 case TC_SETUP_CLSFLOWER:
2773 return igb_setup_tc_cls_flower(adapter, type_data);
2774
2775 default:
2776 return -EOPNOTSUPP;
2777 }
2778}
2779
2780static int igb_offload_txtime(struct igb_adapter *adapter,
2781 struct tc_etf_qopt_offload *qopt)
2782{
2783 struct e1000_hw *hw = &adapter->hw;
2784 int err;
2785
2786
2787 if (hw->mac.type != e1000_i210)
2788 return -EOPNOTSUPP;
2789
2790
2791 if (qopt->queue < 0 || qopt->queue > 1)
2792 return -EINVAL;
2793
2794 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2795 if (err)
2796 return err;
2797
2798 igb_offload_apply(adapter, qopt->queue);
2799
2800 return 0;
2801}
2802
2803static LIST_HEAD(igb_block_cb_list);
2804
2805static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2806 void *type_data)
2807{
2808 struct igb_adapter *adapter = netdev_priv(dev);
2809
2810 switch (type) {
2811 case TC_SETUP_QDISC_CBS:
2812 return igb_offload_cbs(adapter, type_data);
2813 case TC_SETUP_BLOCK:
2814 return flow_block_cb_setup_simple(type_data,
2815 &igb_block_cb_list,
2816 igb_setup_tc_block_cb,
2817 adapter, adapter, true);
2818
2819 case TC_SETUP_QDISC_ETF:
2820 return igb_offload_txtime(adapter, type_data);
2821
2822 default:
2823 return -EOPNOTSUPP;
2824 }
2825}
2826
2827static const struct net_device_ops igb_netdev_ops = {
2828 .ndo_open = igb_open,
2829 .ndo_stop = igb_close,
2830 .ndo_start_xmit = igb_xmit_frame,
2831 .ndo_get_stats64 = igb_get_stats64,
2832 .ndo_set_rx_mode = igb_set_rx_mode,
2833 .ndo_set_mac_address = igb_set_mac,
2834 .ndo_change_mtu = igb_change_mtu,
2835 .ndo_do_ioctl = igb_ioctl,
2836 .ndo_tx_timeout = igb_tx_timeout,
2837 .ndo_validate_addr = eth_validate_addr,
2838 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2839 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2840 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2841 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2842 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2843 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2844 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2845 .ndo_get_vf_config = igb_ndo_get_vf_config,
2846 .ndo_fix_features = igb_fix_features,
2847 .ndo_set_features = igb_set_features,
2848 .ndo_fdb_add = igb_ndo_fdb_add,
2849 .ndo_features_check = igb_features_check,
2850 .ndo_setup_tc = igb_setup_tc,
2851};
2852
2853
2854
2855
2856
2857void igb_set_fw_version(struct igb_adapter *adapter)
2858{
2859 struct e1000_hw *hw = &adapter->hw;
2860 struct e1000_fw_version fw;
2861
2862 igb_get_fw_version(hw, &fw);
2863
2864 switch (hw->mac.type) {
2865 case e1000_i210:
2866 case e1000_i211:
2867 if (!(igb_get_flash_presence_i210(hw))) {
2868 snprintf(adapter->fw_version,
2869 sizeof(adapter->fw_version),
2870 "%2d.%2d-%d",
2871 fw.invm_major, fw.invm_minor,
2872 fw.invm_img_type);
2873 break;
2874 }
2875 fallthrough;
2876 default:
2877
2878 if (fw.or_valid) {
2879 snprintf(adapter->fw_version,
2880 sizeof(adapter->fw_version),
2881 "%d.%d, 0x%08x, %d.%d.%d",
2882 fw.eep_major, fw.eep_minor, fw.etrack_id,
2883 fw.or_major, fw.or_build, fw.or_patch);
2884
2885 } else if (fw.etrack_id != 0X0000) {
2886 snprintf(adapter->fw_version,
2887 sizeof(adapter->fw_version),
2888 "%d.%d, 0x%08x",
2889 fw.eep_major, fw.eep_minor, fw.etrack_id);
2890 } else {
2891 snprintf(adapter->fw_version,
2892 sizeof(adapter->fw_version),
2893 "%d.%d.%d",
2894 fw.eep_major, fw.eep_minor, fw.eep_build);
2895 }
2896 break;
2897 }
2898}
2899
2900
2901
2902
2903
2904
2905static void igb_init_mas(struct igb_adapter *adapter)
2906{
2907 struct e1000_hw *hw = &adapter->hw;
2908 u16 eeprom_data;
2909
2910 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2911 switch (hw->bus.func) {
2912 case E1000_FUNC_0:
2913 if (eeprom_data & IGB_MAS_ENABLE_0) {
2914 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2915 netdev_info(adapter->netdev,
2916 "MAS: Enabling Media Autosense for port %d\n",
2917 hw->bus.func);
2918 }
2919 break;
2920 case E1000_FUNC_1:
2921 if (eeprom_data & IGB_MAS_ENABLE_1) {
2922 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2923 netdev_info(adapter->netdev,
2924 "MAS: Enabling Media Autosense for port %d\n",
2925 hw->bus.func);
2926 }
2927 break;
2928 case E1000_FUNC_2:
2929 if (eeprom_data & IGB_MAS_ENABLE_2) {
2930 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2931 netdev_info(adapter->netdev,
2932 "MAS: Enabling Media Autosense for port %d\n",
2933 hw->bus.func);
2934 }
2935 break;
2936 case E1000_FUNC_3:
2937 if (eeprom_data & IGB_MAS_ENABLE_3) {
2938 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2939 netdev_info(adapter->netdev,
2940 "MAS: Enabling Media Autosense for port %d\n",
2941 hw->bus.func);
2942 }
2943 break;
2944 default:
2945
2946 netdev_err(adapter->netdev,
2947 "MAS: Invalid port configuration, returning\n");
2948 break;
2949 }
2950}
2951
2952
2953
2954
2955
2956static s32 igb_init_i2c(struct igb_adapter *adapter)
2957{
2958 s32 status = 0;
2959
2960
2961 if (adapter->hw.mac.type != e1000_i350)
2962 return 0;
2963
2964
2965
2966
2967
2968 adapter->i2c_adap.owner = THIS_MODULE;
2969 adapter->i2c_algo = igb_i2c_algo;
2970 adapter->i2c_algo.data = adapter;
2971 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2972 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2973 strlcpy(adapter->i2c_adap.name, "igb BB",
2974 sizeof(adapter->i2c_adap.name));
2975 status = i2c_bit_add_bus(&adapter->i2c_adap);
2976 return status;
2977}
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2991{
2992 struct net_device *netdev;
2993 struct igb_adapter *adapter;
2994 struct e1000_hw *hw;
2995 u16 eeprom_data = 0;
2996 s32 ret_val;
2997 static int global_quad_port_a;
2998 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
2999 int err, pci_using_dac;
3000 u8 part_str[E1000_PBANUM_LENGTH];
3001
3002
3003
3004
3005 if (pdev->is_virtfn) {
3006 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
3007 pci_name(pdev), pdev->vendor, pdev->device);
3008 return -EINVAL;
3009 }
3010
3011 err = pci_enable_device_mem(pdev);
3012 if (err)
3013 return err;
3014
3015 pci_using_dac = 0;
3016 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3017 if (!err) {
3018 pci_using_dac = 1;
3019 } else {
3020 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3021 if (err) {
3022 dev_err(&pdev->dev,
3023 "No usable DMA configuration, aborting\n");
3024 goto err_dma;
3025 }
3026 }
3027
3028 err = pci_request_mem_regions(pdev, igb_driver_name);
3029 if (err)
3030 goto err_pci_reg;
3031
3032 pci_enable_pcie_error_reporting(pdev);
3033
3034 pci_set_master(pdev);
3035 pci_save_state(pdev);
3036
3037 err = -ENOMEM;
3038 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3039 IGB_MAX_TX_QUEUES);
3040 if (!netdev)
3041 goto err_alloc_etherdev;
3042
3043 SET_NETDEV_DEV(netdev, &pdev->dev);
3044
3045 pci_set_drvdata(pdev, netdev);
3046 adapter = netdev_priv(netdev);
3047 adapter->netdev = netdev;
3048 adapter->pdev = pdev;
3049 hw = &adapter->hw;
3050 hw->back = adapter;
3051 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3052
3053 err = -EIO;
3054 adapter->io_addr = pci_iomap(pdev, 0, 0);
3055 if (!adapter->io_addr)
3056 goto err_ioremap;
3057
3058 hw->hw_addr = adapter->io_addr;
3059
3060 netdev->netdev_ops = &igb_netdev_ops;
3061 igb_set_ethtool_ops(netdev);
3062 netdev->watchdog_timeo = 5 * HZ;
3063
3064 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
3065
3066 netdev->mem_start = pci_resource_start(pdev, 0);
3067 netdev->mem_end = pci_resource_end(pdev, 0);
3068
3069
3070 hw->vendor_id = pdev->vendor;
3071 hw->device_id = pdev->device;
3072 hw->revision_id = pdev->revision;
3073 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3074 hw->subsystem_device_id = pdev->subsystem_device;
3075
3076
3077 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3078 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3079 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3080
3081 err = ei->get_invariants(hw);
3082 if (err)
3083 goto err_sw_init;
3084
3085
3086 err = igb_sw_init(adapter);
3087 if (err)
3088 goto err_sw_init;
3089
3090 igb_get_bus_info_pcie(hw);
3091
3092 hw->phy.autoneg_wait_to_complete = false;
3093
3094
3095 if (hw->phy.media_type == e1000_media_type_copper) {
3096 hw->phy.mdix = AUTO_ALL_MODES;
3097 hw->phy.disable_polarity_correction = false;
3098 hw->phy.ms_type = e1000_ms_hw_default;
3099 }
3100
3101 if (igb_check_reset_block(hw))
3102 dev_info(&pdev->dev,
3103 "PHY reset is blocked due to SOL/IDER session.\n");
3104
3105
3106
3107
3108
3109 netdev->features |= NETIF_F_SG |
3110 NETIF_F_TSO |
3111 NETIF_F_TSO6 |
3112 NETIF_F_RXHASH |
3113 NETIF_F_RXCSUM |
3114 NETIF_F_HW_CSUM;
3115
3116 if (hw->mac.type >= e1000_82576)
3117 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3118
3119 if (hw->mac.type >= e1000_i350)
3120 netdev->features |= NETIF_F_HW_TC;
3121
3122#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3123 NETIF_F_GSO_GRE_CSUM | \
3124 NETIF_F_GSO_IPXIP4 | \
3125 NETIF_F_GSO_IPXIP6 | \
3126 NETIF_F_GSO_UDP_TUNNEL | \
3127 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3128
3129 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3130 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3131
3132
3133 netdev->hw_features |= netdev->features |
3134 NETIF_F_HW_VLAN_CTAG_RX |
3135 NETIF_F_HW_VLAN_CTAG_TX |
3136 NETIF_F_RXALL;
3137
3138 if (hw->mac.type >= e1000_i350)
3139 netdev->hw_features |= NETIF_F_NTUPLE;
3140
3141 if (pci_using_dac)
3142 netdev->features |= NETIF_F_HIGHDMA;
3143
3144 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3145 netdev->mpls_features |= NETIF_F_HW_CSUM;
3146 netdev->hw_enc_features |= netdev->vlan_features;
3147
3148
3149 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3150 NETIF_F_HW_VLAN_CTAG_RX |
3151 NETIF_F_HW_VLAN_CTAG_TX;
3152
3153 netdev->priv_flags |= IFF_SUPP_NOFCS;
3154
3155 netdev->priv_flags |= IFF_UNICAST_FLT;
3156
3157
3158 netdev->min_mtu = ETH_MIN_MTU;
3159 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3160
3161 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3162
3163
3164
3165
3166 hw->mac.ops.reset_hw(hw);
3167
3168
3169
3170
3171 switch (hw->mac.type) {
3172 case e1000_i210:
3173 case e1000_i211:
3174 if (igb_get_flash_presence_i210(hw)) {
3175 if (hw->nvm.ops.validate(hw) < 0) {
3176 dev_err(&pdev->dev,
3177 "The NVM Checksum Is Not Valid\n");
3178 err = -EIO;
3179 goto err_eeprom;
3180 }
3181 }
3182 break;
3183 default:
3184 if (hw->nvm.ops.validate(hw) < 0) {
3185 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3186 err = -EIO;
3187 goto err_eeprom;
3188 }
3189 break;
3190 }
3191
3192 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3193
3194 if (hw->mac.ops.read_mac_addr(hw))
3195 dev_err(&pdev->dev, "NVM Read Error\n");
3196 }
3197
3198 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
3199
3200 if (!is_valid_ether_addr(netdev->dev_addr)) {
3201 dev_err(&pdev->dev, "Invalid MAC Address\n");
3202 err = -EIO;
3203 goto err_eeprom;
3204 }
3205
3206 igb_set_default_mac_filter(adapter);
3207
3208
3209 igb_set_fw_version(adapter);
3210
3211
3212 if (hw->mac.type == e1000_i210) {
3213 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3214 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3215 }
3216
3217 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3218 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3219
3220 INIT_WORK(&adapter->reset_task, igb_reset_task);
3221 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3222
3223
3224 adapter->fc_autoneg = true;
3225 hw->mac.autoneg = true;
3226 hw->phy.autoneg_advertised = 0x2f;
3227
3228 hw->fc.requested_mode = e1000_fc_default;
3229 hw->fc.current_mode = e1000_fc_default;
3230
3231 igb_validate_mdi_setting(hw);
3232
3233
3234 if (hw->bus.func == 0)
3235 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3236
3237
3238 if (hw->mac.type >= e1000_82580)
3239 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3240 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3241 &eeprom_data);
3242 else if (hw->bus.func == 1)
3243 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3244
3245 if (eeprom_data & IGB_EEPROM_APME)
3246 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3247
3248
3249
3250
3251
3252 switch (pdev->device) {
3253 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3254 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3255 break;
3256 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3257 case E1000_DEV_ID_82576_FIBER:
3258 case E1000_DEV_ID_82576_SERDES:
3259
3260
3261
3262 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3263 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3264 break;
3265 case E1000_DEV_ID_82576_QUAD_COPPER:
3266 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3267
3268 if (global_quad_port_a != 0)
3269 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3270 else
3271 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3272
3273 if (++global_quad_port_a == 4)
3274 global_quad_port_a = 0;
3275 break;
3276 default:
3277
3278 if (!device_can_wakeup(&adapter->pdev->dev))
3279 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3280 }
3281
3282
3283 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3284 adapter->wol |= E1000_WUFC_MAG;
3285
3286
3287 if ((hw->mac.type == e1000_i350) &&
3288 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3289 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3290 adapter->wol = 0;
3291 }
3292
3293
3294
3295
3296 if (((hw->mac.type == e1000_i350) ||
3297 (hw->mac.type == e1000_i354)) &&
3298 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3299 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3300 adapter->wol = 0;
3301 }
3302 if (hw->mac.type == e1000_i350) {
3303 if (((pdev->subsystem_device == 0x5001) ||
3304 (pdev->subsystem_device == 0x5002)) &&
3305 (hw->bus.func == 0)) {
3306 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3307 adapter->wol = 0;
3308 }
3309 if (pdev->subsystem_device == 0x1F52)
3310 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3311 }
3312
3313 device_set_wakeup_enable(&adapter->pdev->dev,
3314 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3315
3316
3317 igb_reset(adapter);
3318
3319
3320 err = igb_init_i2c(adapter);
3321 if (err) {
3322 dev_err(&pdev->dev, "failed to init i2c interface\n");
3323 goto err_eeprom;
3324 }
3325
3326
3327
3328
3329 igb_get_hw_control(adapter);
3330
3331 strcpy(netdev->name, "eth%d");
3332 err = register_netdev(netdev);
3333 if (err)
3334 goto err_register;
3335
3336
3337 netif_carrier_off(netdev);
3338
3339#ifdef CONFIG_IGB_DCA
3340 if (dca_add_requester(&pdev->dev) == 0) {
3341 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3342 dev_info(&pdev->dev, "DCA enabled\n");
3343 igb_setup_dca(adapter);
3344 }
3345
3346#endif
3347#ifdef CONFIG_IGB_HWMON
3348
3349 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3350 u16 ets_word;
3351
3352
3353
3354
3355 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3356 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3357 adapter->ets = true;
3358 else
3359 adapter->ets = false;
3360 if (igb_sysfs_init(adapter))
3361 dev_err(&pdev->dev,
3362 "failed to allocate sysfs resources\n");
3363 } else {
3364 adapter->ets = false;
3365 }
3366#endif
3367
3368 adapter->ei = *ei;
3369 if (hw->dev_spec._82575.mas_capable)
3370 igb_init_mas(adapter);
3371
3372
3373 igb_ptp_init(adapter);
3374
3375 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3376
3377 if (hw->mac.type != e1000_i354) {
3378 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3379 netdev->name,
3380 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3381 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3382 "unknown"),
3383 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3384 "Width x4" :
3385 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3386 "Width x2" :
3387 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3388 "Width x1" : "unknown"), netdev->dev_addr);
3389 }
3390
3391 if ((hw->mac.type >= e1000_i210 ||
3392 igb_get_flash_presence_i210(hw))) {
3393 ret_val = igb_read_part_string(hw, part_str,
3394 E1000_PBANUM_LENGTH);
3395 } else {
3396 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3397 }
3398
3399 if (ret_val)
3400 strcpy(part_str, "Unknown");
3401 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3402 dev_info(&pdev->dev,
3403 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3404 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3405 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3406 adapter->num_rx_queues, adapter->num_tx_queues);
3407 if (hw->phy.media_type == e1000_media_type_copper) {
3408 switch (hw->mac.type) {
3409 case e1000_i350:
3410 case e1000_i210:
3411 case e1000_i211:
3412
3413 err = igb_set_eee_i350(hw, true, true);
3414 if ((!err) &&
3415 (!hw->dev_spec._82575.eee_disable)) {
3416 adapter->eee_advert =
3417 MDIO_EEE_100TX | MDIO_EEE_1000T;
3418 adapter->flags |= IGB_FLAG_EEE;
3419 }
3420 break;
3421 case e1000_i354:
3422 if ((rd32(E1000_CTRL_EXT) &
3423 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3424 err = igb_set_eee_i354(hw, true, true);
3425 if ((!err) &&
3426 (!hw->dev_spec._82575.eee_disable)) {
3427 adapter->eee_advert =
3428 MDIO_EEE_100TX | MDIO_EEE_1000T;
3429 adapter->flags |= IGB_FLAG_EEE;
3430 }
3431 }
3432 break;
3433 default:
3434 break;
3435 }
3436 }
3437
3438 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3439
3440 pm_runtime_put_noidle(&pdev->dev);
3441 return 0;
3442
3443err_register:
3444 igb_release_hw_control(adapter);
3445 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3446err_eeprom:
3447 if (!igb_check_reset_block(hw))
3448 igb_reset_phy(hw);
3449
3450 if (hw->flash_address)
3451 iounmap(hw->flash_address);
3452err_sw_init:
3453 kfree(adapter->mac_table);
3454 kfree(adapter->shadow_vfta);
3455 igb_clear_interrupt_scheme(adapter);
3456#ifdef CONFIG_PCI_IOV
3457 igb_disable_sriov(pdev);
3458#endif
3459 pci_iounmap(pdev, adapter->io_addr);
3460err_ioremap:
3461 free_netdev(netdev);
3462err_alloc_etherdev:
3463 pci_release_mem_regions(pdev);
3464err_pci_reg:
3465err_dma:
3466 pci_disable_device(pdev);
3467 return err;
3468}
3469
3470#ifdef CONFIG_PCI_IOV
3471static int igb_disable_sriov(struct pci_dev *pdev)
3472{
3473 struct net_device *netdev = pci_get_drvdata(pdev);
3474 struct igb_adapter *adapter = netdev_priv(netdev);
3475 struct e1000_hw *hw = &adapter->hw;
3476
3477
3478 if (adapter->vf_data) {
3479
3480 if (pci_vfs_assigned(pdev)) {
3481 dev_warn(&pdev->dev,
3482 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3483 return -EPERM;
3484 } else {
3485 pci_disable_sriov(pdev);
3486 msleep(500);
3487 }
3488
3489 kfree(adapter->vf_mac_list);
3490 adapter->vf_mac_list = NULL;
3491 kfree(adapter->vf_data);
3492 adapter->vf_data = NULL;
3493 adapter->vfs_allocated_count = 0;
3494 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3495 wrfl();
3496 msleep(100);
3497 dev_info(&pdev->dev, "IOV Disabled\n");
3498
3499
3500 adapter->flags |= IGB_FLAG_DMAC;
3501 }
3502
3503 return 0;
3504}
3505
3506static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
3507{
3508 struct net_device *netdev = pci_get_drvdata(pdev);
3509 struct igb_adapter *adapter = netdev_priv(netdev);
3510 int old_vfs = pci_num_vf(pdev);
3511 struct vf_mac_filter *mac_list;
3512 int err = 0;
3513 int num_vf_mac_filters, i;
3514
3515 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3516 err = -EPERM;
3517 goto out;
3518 }
3519 if (!num_vfs)
3520 goto out;
3521
3522 if (old_vfs) {
3523 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3524 old_vfs, max_vfs);
3525 adapter->vfs_allocated_count = old_vfs;
3526 } else
3527 adapter->vfs_allocated_count = num_vfs;
3528
3529 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3530 sizeof(struct vf_data_storage), GFP_KERNEL);
3531
3532
3533 if (!adapter->vf_data) {
3534 adapter->vfs_allocated_count = 0;
3535 err = -ENOMEM;
3536 goto out;
3537 }
3538
3539
3540
3541
3542
3543
3544 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3545 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3546 adapter->vfs_allocated_count);
3547
3548 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3549 sizeof(struct vf_mac_filter),
3550 GFP_KERNEL);
3551
3552 mac_list = adapter->vf_mac_list;
3553 INIT_LIST_HEAD(&adapter->vf_macs.l);
3554
3555 if (adapter->vf_mac_list) {
3556
3557 for (i = 0; i < num_vf_mac_filters; i++) {
3558 mac_list->vf = -1;
3559 mac_list->free = true;
3560 list_add(&mac_list->l, &adapter->vf_macs.l);
3561 mac_list++;
3562 }
3563 } else {
3564
3565
3566
3567 dev_err(&pdev->dev,
3568 "Unable to allocate memory for VF MAC filter list\n");
3569 }
3570
3571
3572 if (!old_vfs) {
3573 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3574 if (err)
3575 goto err_out;
3576 }
3577 dev_info(&pdev->dev, "%d VFs allocated\n",
3578 adapter->vfs_allocated_count);
3579 for (i = 0; i < adapter->vfs_allocated_count; i++)
3580 igb_vf_configure(adapter, i);
3581
3582
3583 adapter->flags &= ~IGB_FLAG_DMAC;
3584 goto out;
3585
3586err_out:
3587 kfree(adapter->vf_mac_list);
3588 adapter->vf_mac_list = NULL;
3589 kfree(adapter->vf_data);
3590 adapter->vf_data = NULL;
3591 adapter->vfs_allocated_count = 0;
3592out:
3593 return err;
3594}
3595
3596#endif
3597
3598
3599
3600
3601static void igb_remove_i2c(struct igb_adapter *adapter)
3602{
3603
3604 i2c_del_adapter(&adapter->i2c_adap);
3605}
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616static void igb_remove(struct pci_dev *pdev)
3617{
3618 struct net_device *netdev = pci_get_drvdata(pdev);
3619 struct igb_adapter *adapter = netdev_priv(netdev);
3620 struct e1000_hw *hw = &adapter->hw;
3621
3622 pm_runtime_get_noresume(&pdev->dev);
3623#ifdef CONFIG_IGB_HWMON
3624 igb_sysfs_exit(adapter);
3625#endif
3626 igb_remove_i2c(adapter);
3627 igb_ptp_stop(adapter);
3628
3629
3630
3631 set_bit(__IGB_DOWN, &adapter->state);
3632 del_timer_sync(&adapter->watchdog_timer);
3633 del_timer_sync(&adapter->phy_info_timer);
3634
3635 cancel_work_sync(&adapter->reset_task);
3636 cancel_work_sync(&adapter->watchdog_task);
3637
3638#ifdef CONFIG_IGB_DCA
3639 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3640 dev_info(&pdev->dev, "DCA disabled\n");
3641 dca_remove_requester(&pdev->dev);
3642 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3643 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3644 }
3645#endif
3646
3647
3648
3649
3650 igb_release_hw_control(adapter);
3651
3652#ifdef CONFIG_PCI_IOV
3653 igb_disable_sriov(pdev);
3654#endif
3655
3656 unregister_netdev(netdev);
3657
3658 igb_clear_interrupt_scheme(adapter);
3659
3660 pci_iounmap(pdev, adapter->io_addr);
3661 if (hw->flash_address)
3662 iounmap(hw->flash_address);
3663 pci_release_mem_regions(pdev);
3664
3665 kfree(adapter->mac_table);
3666 kfree(adapter->shadow_vfta);
3667 free_netdev(netdev);
3668
3669 pci_disable_pcie_error_reporting(pdev);
3670
3671 pci_disable_device(pdev);
3672}
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683static void igb_probe_vfs(struct igb_adapter *adapter)
3684{
3685#ifdef CONFIG_PCI_IOV
3686 struct pci_dev *pdev = adapter->pdev;
3687 struct e1000_hw *hw = &adapter->hw;
3688
3689
3690 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
3691 return;
3692
3693
3694
3695
3696
3697 igb_set_interrupt_capability(adapter, true);
3698 igb_reset_interrupt_capability(adapter);
3699
3700 pci_sriov_set_totalvfs(pdev, 7);
3701 igb_enable_sriov(pdev, max_vfs);
3702
3703#endif
3704}
3705
3706unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3707{
3708 struct e1000_hw *hw = &adapter->hw;
3709 unsigned int max_rss_queues;
3710
3711
3712 switch (hw->mac.type) {
3713 case e1000_i211:
3714 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3715 break;
3716 case e1000_82575:
3717 case e1000_i210:
3718 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3719 break;
3720 case e1000_i350:
3721
3722 if (!!adapter->vfs_allocated_count) {
3723 max_rss_queues = 1;
3724 break;
3725 }
3726 fallthrough;
3727 case e1000_82576:
3728 if (!!adapter->vfs_allocated_count) {
3729 max_rss_queues = 2;
3730 break;
3731 }
3732 fallthrough;
3733 case e1000_82580:
3734 case e1000_i354:
3735 default:
3736 max_rss_queues = IGB_MAX_RX_QUEUES;
3737 break;
3738 }
3739
3740 return max_rss_queues;
3741}
3742
3743static void igb_init_queue_configuration(struct igb_adapter *adapter)
3744{
3745 u32 max_rss_queues;
3746
3747 max_rss_queues = igb_get_max_rss_queues(adapter);
3748 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3749
3750 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3751}
3752
3753void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3754 const u32 max_rss_queues)
3755{
3756 struct e1000_hw *hw = &adapter->hw;
3757
3758
3759 switch (hw->mac.type) {
3760 case e1000_82575:
3761 case e1000_i211:
3762
3763 break;
3764 case e1000_82576:
3765 case e1000_82580:
3766 case e1000_i350:
3767 case e1000_i354:
3768 case e1000_i210:
3769 default:
3770
3771
3772
3773 if (adapter->rss_queues > (max_rss_queues / 2))
3774 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3775 else
3776 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3777 break;
3778 }
3779}
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789static int igb_sw_init(struct igb_adapter *adapter)
3790{
3791 struct e1000_hw *hw = &adapter->hw;
3792 struct net_device *netdev = adapter->netdev;
3793 struct pci_dev *pdev = adapter->pdev;
3794
3795 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3796
3797
3798 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3799 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3800
3801
3802 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3803 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3804
3805
3806 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3807
3808 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3809 VLAN_HLEN;
3810 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3811
3812 spin_lock_init(&adapter->nfc_lock);
3813 spin_lock_init(&adapter->stats64_lock);
3814#ifdef CONFIG_PCI_IOV
3815 switch (hw->mac.type) {
3816 case e1000_82576:
3817 case e1000_i350:
3818 if (max_vfs > 7) {
3819 dev_warn(&pdev->dev,
3820 "Maximum of 7 VFs per PF, using max\n");
3821 max_vfs = adapter->vfs_allocated_count = 7;
3822 } else
3823 adapter->vfs_allocated_count = max_vfs;
3824 if (adapter->vfs_allocated_count)
3825 dev_warn(&pdev->dev,
3826 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3827 break;
3828 default:
3829 break;
3830 }
3831#endif
3832
3833
3834 adapter->flags |= IGB_FLAG_HAS_MSIX;
3835
3836 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3837 sizeof(struct igb_mac_addr),
3838 GFP_KERNEL);
3839 if (!adapter->mac_table)
3840 return -ENOMEM;
3841
3842 igb_probe_vfs(adapter);
3843
3844 igb_init_queue_configuration(adapter);
3845
3846
3847 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3848 GFP_KERNEL);
3849 if (!adapter->shadow_vfta)
3850 return -ENOMEM;
3851
3852
3853 if (igb_init_interrupt_scheme(adapter, true)) {
3854 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3855 return -ENOMEM;
3856 }
3857
3858
3859 igb_irq_disable(adapter);
3860
3861 if (hw->mac.type >= e1000_i350)
3862 adapter->flags &= ~IGB_FLAG_DMAC;
3863
3864 set_bit(__IGB_DOWN, &adapter->state);
3865 return 0;
3866}
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880static int __igb_open(struct net_device *netdev, bool resuming)
3881{
3882 struct igb_adapter *adapter = netdev_priv(netdev);
3883 struct e1000_hw *hw = &adapter->hw;
3884 struct pci_dev *pdev = adapter->pdev;
3885 int err;
3886 int i;
3887
3888
3889 if (test_bit(__IGB_TESTING, &adapter->state)) {
3890 WARN_ON(resuming);
3891 return -EBUSY;
3892 }
3893
3894 if (!resuming)
3895 pm_runtime_get_sync(&pdev->dev);
3896
3897 netif_carrier_off(netdev);
3898
3899
3900 err = igb_setup_all_tx_resources(adapter);
3901 if (err)
3902 goto err_setup_tx;
3903
3904
3905 err = igb_setup_all_rx_resources(adapter);
3906 if (err)
3907 goto err_setup_rx;
3908
3909 igb_power_up_link(adapter);
3910
3911
3912
3913
3914
3915
3916 igb_configure(adapter);
3917
3918 err = igb_request_irq(adapter);
3919 if (err)
3920 goto err_req_irq;
3921
3922
3923 err = netif_set_real_num_tx_queues(adapter->netdev,
3924 adapter->num_tx_queues);
3925 if (err)
3926 goto err_set_queues;
3927
3928 err = netif_set_real_num_rx_queues(adapter->netdev,
3929 adapter->num_rx_queues);
3930 if (err)
3931 goto err_set_queues;
3932
3933
3934 clear_bit(__IGB_DOWN, &adapter->state);
3935
3936 for (i = 0; i < adapter->num_q_vectors; i++)
3937 napi_enable(&(adapter->q_vector[i]->napi));
3938
3939
3940 rd32(E1000_TSICR);
3941 rd32(E1000_ICR);
3942
3943 igb_irq_enable(adapter);
3944
3945
3946 if (adapter->vfs_allocated_count) {
3947 u32 reg_data = rd32(E1000_CTRL_EXT);
3948
3949 reg_data |= E1000_CTRL_EXT_PFRSTD;
3950 wr32(E1000_CTRL_EXT, reg_data);
3951 }
3952
3953 netif_tx_start_all_queues(netdev);
3954
3955 if (!resuming)
3956 pm_runtime_put(&pdev->dev);
3957
3958
3959 hw->mac.get_link_status = 1;
3960 schedule_work(&adapter->watchdog_task);
3961
3962 return 0;
3963
3964err_set_queues:
3965 igb_free_irq(adapter);
3966err_req_irq:
3967 igb_release_hw_control(adapter);
3968 igb_power_down_link(adapter);
3969 igb_free_all_rx_resources(adapter);
3970err_setup_rx:
3971 igb_free_all_tx_resources(adapter);
3972err_setup_tx:
3973 igb_reset(adapter);
3974 if (!resuming)
3975 pm_runtime_put(&pdev->dev);
3976
3977 return err;
3978}
3979
3980int igb_open(struct net_device *netdev)
3981{
3982 return __igb_open(netdev, false);
3983}
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996static int __igb_close(struct net_device *netdev, bool suspending)
3997{
3998 struct igb_adapter *adapter = netdev_priv(netdev);
3999 struct pci_dev *pdev = adapter->pdev;
4000
4001 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4002
4003 if (!suspending)
4004 pm_runtime_get_sync(&pdev->dev);
4005
4006 igb_down(adapter);
4007 igb_free_irq(adapter);
4008
4009 igb_free_all_tx_resources(adapter);
4010 igb_free_all_rx_resources(adapter);
4011
4012 if (!suspending)
4013 pm_runtime_put_sync(&pdev->dev);
4014 return 0;
4015}
4016
4017int igb_close(struct net_device *netdev)
4018{
4019 if (netif_device_present(netdev) || netdev->dismantle)
4020 return __igb_close(netdev, false);
4021 return 0;
4022}
4023
4024
4025
4026
4027
4028
4029
4030int igb_setup_tx_resources(struct igb_ring *tx_ring)
4031{
4032 struct device *dev = tx_ring->dev;
4033 int size;
4034
4035 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4036
4037 tx_ring->tx_buffer_info = vmalloc(size);
4038 if (!tx_ring->tx_buffer_info)
4039 goto err;
4040
4041
4042 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4043 tx_ring->size = ALIGN(tx_ring->size, 4096);
4044
4045 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4046 &tx_ring->dma, GFP_KERNEL);
4047 if (!tx_ring->desc)
4048 goto err;
4049
4050 tx_ring->next_to_use = 0;
4051 tx_ring->next_to_clean = 0;
4052
4053 return 0;
4054
4055err:
4056 vfree(tx_ring->tx_buffer_info);
4057 tx_ring->tx_buffer_info = NULL;
4058 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4059 return -ENOMEM;
4060}
4061
4062
4063
4064
4065
4066
4067
4068
4069static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4070{
4071 struct pci_dev *pdev = adapter->pdev;
4072 int i, err = 0;
4073
4074 for (i = 0; i < adapter->num_tx_queues; i++) {
4075 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4076 if (err) {
4077 dev_err(&pdev->dev,
4078 "Allocation for Tx Queue %u failed\n", i);
4079 for (i--; i >= 0; i--)
4080 igb_free_tx_resources(adapter->tx_ring[i]);
4081 break;
4082 }
4083 }
4084
4085 return err;
4086}
4087
4088
4089
4090
4091
4092void igb_setup_tctl(struct igb_adapter *adapter)
4093{
4094 struct e1000_hw *hw = &adapter->hw;
4095 u32 tctl;
4096
4097
4098 wr32(E1000_TXDCTL(0), 0);
4099
4100
4101 tctl = rd32(E1000_TCTL);
4102 tctl &= ~E1000_TCTL_CT;
4103 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4104 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4105
4106 igb_config_collision_dist(hw);
4107
4108
4109 tctl |= E1000_TCTL_EN;
4110
4111 wr32(E1000_TCTL, tctl);
4112}
4113
4114
4115
4116
4117
4118
4119
4120
4121void igb_configure_tx_ring(struct igb_adapter *adapter,
4122 struct igb_ring *ring)
4123{
4124 struct e1000_hw *hw = &adapter->hw;
4125 u32 txdctl = 0;
4126 u64 tdba = ring->dma;
4127 int reg_idx = ring->reg_idx;
4128
4129 wr32(E1000_TDLEN(reg_idx),
4130 ring->count * sizeof(union e1000_adv_tx_desc));
4131 wr32(E1000_TDBAL(reg_idx),
4132 tdba & 0x00000000ffffffffULL);
4133 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4134
4135 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4136 wr32(E1000_TDH(reg_idx), 0);
4137 writel(0, ring->tail);
4138
4139 txdctl |= IGB_TX_PTHRESH;
4140 txdctl |= IGB_TX_HTHRESH << 8;
4141 txdctl |= IGB_TX_WTHRESH << 16;
4142
4143
4144 memset(ring->tx_buffer_info, 0,
4145 sizeof(struct igb_tx_buffer) * ring->count);
4146
4147 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4148 wr32(E1000_TXDCTL(reg_idx), txdctl);
4149}
4150
4151
4152
4153
4154
4155
4156
4157static void igb_configure_tx(struct igb_adapter *adapter)
4158{
4159 struct e1000_hw *hw = &adapter->hw;
4160 int i;
4161
4162
4163 for (i = 0; i < adapter->num_tx_queues; i++)
4164 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4165
4166 wrfl();
4167 usleep_range(10000, 20000);
4168
4169 for (i = 0; i < adapter->num_tx_queues; i++)
4170 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4171}
4172
4173
4174
4175
4176
4177
4178
4179int igb_setup_rx_resources(struct igb_ring *rx_ring)
4180{
4181 struct device *dev = rx_ring->dev;
4182 int size;
4183
4184 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4185
4186 rx_ring->rx_buffer_info = vmalloc(size);
4187 if (!rx_ring->rx_buffer_info)
4188 goto err;
4189
4190
4191 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4192 rx_ring->size = ALIGN(rx_ring->size, 4096);
4193
4194 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4195 &rx_ring->dma, GFP_KERNEL);
4196 if (!rx_ring->desc)
4197 goto err;
4198
4199 rx_ring->next_to_alloc = 0;
4200 rx_ring->next_to_clean = 0;
4201 rx_ring->next_to_use = 0;
4202
4203 return 0;
4204
4205err:
4206 vfree(rx_ring->rx_buffer_info);
4207 rx_ring->rx_buffer_info = NULL;
4208 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4209 return -ENOMEM;
4210}
4211
4212
4213
4214
4215
4216
4217
4218
4219static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4220{
4221 struct pci_dev *pdev = adapter->pdev;
4222 int i, err = 0;
4223
4224 for (i = 0; i < adapter->num_rx_queues; i++) {
4225 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4226 if (err) {
4227 dev_err(&pdev->dev,
4228 "Allocation for Rx Queue %u failed\n", i);
4229 for (i--; i >= 0; i--)
4230 igb_free_rx_resources(adapter->rx_ring[i]);
4231 break;
4232 }
4233 }
4234
4235 return err;
4236}
4237
4238
4239
4240
4241
4242static void igb_setup_mrqc(struct igb_adapter *adapter)
4243{
4244 struct e1000_hw *hw = &adapter->hw;
4245 u32 mrqc, rxcsum;
4246 u32 j, num_rx_queues;
4247 u32 rss_key[10];
4248
4249 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4250 for (j = 0; j < 10; j++)
4251 wr32(E1000_RSSRK(j), rss_key[j]);
4252
4253 num_rx_queues = adapter->rss_queues;
4254
4255 switch (hw->mac.type) {
4256 case e1000_82576:
4257
4258 if (adapter->vfs_allocated_count)
4259 num_rx_queues = 2;
4260 break;
4261 default:
4262 break;
4263 }
4264
4265 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4266 for (j = 0; j < IGB_RETA_SIZE; j++)
4267 adapter->rss_indir_tbl[j] =
4268 (j * num_rx_queues) / IGB_RETA_SIZE;
4269 adapter->rss_indir_tbl_init = num_rx_queues;
4270 }
4271 igb_write_rss_indir_tbl(adapter);
4272
4273
4274
4275
4276
4277 rxcsum = rd32(E1000_RXCSUM);
4278 rxcsum |= E1000_RXCSUM_PCSD;
4279
4280 if (adapter->hw.mac.type >= e1000_82576)
4281
4282 rxcsum |= E1000_RXCSUM_CRCOFL;
4283
4284
4285 wr32(E1000_RXCSUM, rxcsum);
4286
4287
4288
4289
4290 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4291 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4292 E1000_MRQC_RSS_FIELD_IPV6 |
4293 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4294 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4295
4296 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4297 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4298 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4299 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4300
4301
4302
4303
4304
4305 if (adapter->vfs_allocated_count) {
4306 if (hw->mac.type > e1000_82575) {
4307
4308 u32 vtctl = rd32(E1000_VT_CTL);
4309
4310 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4311 E1000_VT_CTL_DISABLE_DEF_POOL);
4312 vtctl |= adapter->vfs_allocated_count <<
4313 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4314 wr32(E1000_VT_CTL, vtctl);
4315 }
4316 if (adapter->rss_queues > 1)
4317 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4318 else
4319 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4320 } else {
4321 if (hw->mac.type != e1000_i211)
4322 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4323 }
4324 igb_vmm_control(adapter);
4325
4326 wr32(E1000_MRQC, mrqc);
4327}
4328
4329
4330
4331
4332
4333void igb_setup_rctl(struct igb_adapter *adapter)
4334{
4335 struct e1000_hw *hw = &adapter->hw;
4336 u32 rctl;
4337
4338 rctl = rd32(E1000_RCTL);
4339
4340 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4341 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4342
4343 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4344 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4345
4346
4347
4348
4349
4350 rctl |= E1000_RCTL_SECRC;
4351
4352
4353 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4354
4355
4356 rctl |= E1000_RCTL_LPE;
4357
4358
4359 wr32(E1000_RXDCTL(0), 0);
4360
4361
4362
4363
4364
4365 if (adapter->vfs_allocated_count) {
4366
4367 wr32(E1000_QDE, ALL_QUEUES);
4368 }
4369
4370
4371 if (adapter->netdev->features & NETIF_F_RXALL) {
4372
4373
4374
4375 rctl |= (E1000_RCTL_SBP |
4376 E1000_RCTL_BAM |
4377 E1000_RCTL_PMCF);
4378
4379 rctl &= ~(E1000_RCTL_DPF |
4380 E1000_RCTL_CFIEN);
4381
4382
4383
4384 }
4385
4386 wr32(E1000_RCTL, rctl);
4387}
4388
4389static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4390 int vfn)
4391{
4392 struct e1000_hw *hw = &adapter->hw;
4393 u32 vmolr;
4394
4395 if (size > MAX_JUMBO_FRAME_SIZE)
4396 size = MAX_JUMBO_FRAME_SIZE;
4397
4398 vmolr = rd32(E1000_VMOLR(vfn));
4399 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4400 vmolr |= size | E1000_VMOLR_LPE;
4401 wr32(E1000_VMOLR(vfn), vmolr);
4402
4403 return 0;
4404}
4405
4406static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4407 int vfn, bool enable)
4408{
4409 struct e1000_hw *hw = &adapter->hw;
4410 u32 val, reg;
4411
4412 if (hw->mac.type < e1000_82576)
4413 return;
4414
4415 if (hw->mac.type == e1000_i350)
4416 reg = E1000_DVMOLR(vfn);
4417 else
4418 reg = E1000_VMOLR(vfn);
4419
4420 val = rd32(reg);
4421 if (enable)
4422 val |= E1000_VMOLR_STRVLAN;
4423 else
4424 val &= ~(E1000_VMOLR_STRVLAN);
4425 wr32(reg, val);
4426}
4427
4428static inline void igb_set_vmolr(struct igb_adapter *adapter,
4429 int vfn, bool aupe)
4430{
4431 struct e1000_hw *hw = &adapter->hw;
4432 u32 vmolr;
4433
4434
4435
4436
4437 if (hw->mac.type < e1000_82576)
4438 return;
4439
4440 vmolr = rd32(E1000_VMOLR(vfn));
4441 if (aupe)
4442 vmolr |= E1000_VMOLR_AUPE;
4443 else
4444 vmolr &= ~(E1000_VMOLR_AUPE);
4445
4446
4447 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4448
4449 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4450 vmolr |= E1000_VMOLR_RSSE;
4451
4452
4453
4454 if (vfn <= adapter->vfs_allocated_count)
4455 vmolr |= E1000_VMOLR_BAM;
4456
4457 wr32(E1000_VMOLR(vfn), vmolr);
4458}
4459
4460
4461
4462
4463
4464
4465
4466void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4467{
4468 struct e1000_hw *hw = &adapter->hw;
4469 int reg_idx = ring->reg_idx;
4470 u32 srrctl = 0;
4471
4472 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4473 if (ring_uses_large_buffer(ring))
4474 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4475 else
4476 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4477 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4478 if (hw->mac.type >= e1000_82580)
4479 srrctl |= E1000_SRRCTL_TIMESTAMP;
4480
4481
4482
4483 if (adapter->vfs_allocated_count ||
4484 (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4485 adapter->num_rx_queues > 1))
4486 srrctl |= E1000_SRRCTL_DROP_EN;
4487
4488 wr32(E1000_SRRCTL(reg_idx), srrctl);
4489}
4490
4491
4492
4493
4494
4495
4496
4497
4498void igb_configure_rx_ring(struct igb_adapter *adapter,
4499 struct igb_ring *ring)
4500{
4501 struct e1000_hw *hw = &adapter->hw;
4502 union e1000_adv_rx_desc *rx_desc;
4503 u64 rdba = ring->dma;
4504 int reg_idx = ring->reg_idx;
4505 u32 rxdctl = 0;
4506
4507
4508 wr32(E1000_RXDCTL(reg_idx), 0);
4509
4510
4511 wr32(E1000_RDBAL(reg_idx),
4512 rdba & 0x00000000ffffffffULL);
4513 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4514 wr32(E1000_RDLEN(reg_idx),
4515 ring->count * sizeof(union e1000_adv_rx_desc));
4516
4517
4518 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4519 wr32(E1000_RDH(reg_idx), 0);
4520 writel(0, ring->tail);
4521
4522
4523 igb_setup_srrctl(adapter, ring);
4524
4525
4526 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4527
4528 rxdctl |= IGB_RX_PTHRESH;
4529 rxdctl |= IGB_RX_HTHRESH << 8;
4530 rxdctl |= IGB_RX_WTHRESH << 16;
4531
4532
4533 memset(ring->rx_buffer_info, 0,
4534 sizeof(struct igb_rx_buffer) * ring->count);
4535
4536
4537 rx_desc = IGB_RX_DESC(ring, 0);
4538 rx_desc->wb.upper.length = 0;
4539
4540
4541 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4542 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4543}
4544
4545static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4546 struct igb_ring *rx_ring)
4547{
4548
4549 clear_ring_build_skb_enabled(rx_ring);
4550 clear_ring_uses_large_buffer(rx_ring);
4551
4552 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4553 return;
4554
4555 set_ring_build_skb_enabled(rx_ring);
4556
4557#if (PAGE_SIZE < 8192)
4558 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
4559 return;
4560
4561 set_ring_uses_large_buffer(rx_ring);
4562#endif
4563}
4564
4565
4566
4567
4568
4569
4570
4571static void igb_configure_rx(struct igb_adapter *adapter)
4572{
4573 int i;
4574
4575
4576 igb_set_default_mac_filter(adapter);
4577
4578
4579
4580
4581 for (i = 0; i < adapter->num_rx_queues; i++) {
4582 struct igb_ring *rx_ring = adapter->rx_ring[i];
4583
4584 igb_set_rx_buffer_len(adapter, rx_ring);
4585 igb_configure_rx_ring(adapter, rx_ring);
4586 }
4587}
4588
4589
4590
4591
4592
4593
4594
4595void igb_free_tx_resources(struct igb_ring *tx_ring)
4596{
4597 igb_clean_tx_ring(tx_ring);
4598
4599 vfree(tx_ring->tx_buffer_info);
4600 tx_ring->tx_buffer_info = NULL;
4601
4602
4603 if (!tx_ring->desc)
4604 return;
4605
4606 dma_free_coherent(tx_ring->dev, tx_ring->size,
4607 tx_ring->desc, tx_ring->dma);
4608
4609 tx_ring->desc = NULL;
4610}
4611
4612
4613
4614
4615
4616
4617
4618static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4619{
4620 int i;
4621
4622 for (i = 0; i < adapter->num_tx_queues; i++)
4623 if (adapter->tx_ring[i])
4624 igb_free_tx_resources(adapter->tx_ring[i]);
4625}
4626
4627
4628
4629
4630
4631static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4632{
4633 u16 i = tx_ring->next_to_clean;
4634 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4635
4636 while (i != tx_ring->next_to_use) {
4637 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4638
4639
4640 dev_kfree_skb_any(tx_buffer->skb);
4641
4642
4643 dma_unmap_single(tx_ring->dev,
4644 dma_unmap_addr(tx_buffer, dma),
4645 dma_unmap_len(tx_buffer, len),
4646 DMA_TO_DEVICE);
4647
4648
4649 eop_desc = tx_buffer->next_to_watch;
4650 tx_desc = IGB_TX_DESC(tx_ring, i);
4651
4652
4653 while (tx_desc != eop_desc) {
4654 tx_buffer++;
4655 tx_desc++;
4656 i++;
4657 if (unlikely(i == tx_ring->count)) {
4658 i = 0;
4659 tx_buffer = tx_ring->tx_buffer_info;
4660 tx_desc = IGB_TX_DESC(tx_ring, 0);
4661 }
4662
4663
4664 if (dma_unmap_len(tx_buffer, len))
4665 dma_unmap_page(tx_ring->dev,
4666 dma_unmap_addr(tx_buffer, dma),
4667 dma_unmap_len(tx_buffer, len),
4668 DMA_TO_DEVICE);
4669 }
4670
4671
4672 tx_buffer++;
4673 i++;
4674 if (unlikely(i == tx_ring->count)) {
4675 i = 0;
4676 tx_buffer = tx_ring->tx_buffer_info;
4677 }
4678 }
4679
4680
4681 netdev_tx_reset_queue(txring_txq(tx_ring));
4682
4683
4684 tx_ring->next_to_use = 0;
4685 tx_ring->next_to_clean = 0;
4686}
4687
4688
4689
4690
4691
4692static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4693{
4694 int i;
4695
4696 for (i = 0; i < adapter->num_tx_queues; i++)
4697 if (adapter->tx_ring[i])
4698 igb_clean_tx_ring(adapter->tx_ring[i]);
4699}
4700
4701
4702
4703
4704
4705
4706
4707void igb_free_rx_resources(struct igb_ring *rx_ring)
4708{
4709 igb_clean_rx_ring(rx_ring);
4710
4711 vfree(rx_ring->rx_buffer_info);
4712 rx_ring->rx_buffer_info = NULL;
4713
4714
4715 if (!rx_ring->desc)
4716 return;
4717
4718 dma_free_coherent(rx_ring->dev, rx_ring->size,
4719 rx_ring->desc, rx_ring->dma);
4720
4721 rx_ring->desc = NULL;
4722}
4723
4724
4725
4726
4727
4728
4729
4730static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4731{
4732 int i;
4733
4734 for (i = 0; i < adapter->num_rx_queues; i++)
4735 if (adapter->rx_ring[i])
4736 igb_free_rx_resources(adapter->rx_ring[i]);
4737}
4738
4739
4740
4741
4742
4743static void igb_clean_rx_ring(struct igb_ring *rx_ring)
4744{
4745 u16 i = rx_ring->next_to_clean;
4746
4747 dev_kfree_skb(rx_ring->skb);
4748 rx_ring->skb = NULL;
4749
4750
4751 while (i != rx_ring->next_to_alloc) {
4752 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
4753
4754
4755
4756
4757 dma_sync_single_range_for_cpu(rx_ring->dev,
4758 buffer_info->dma,
4759 buffer_info->page_offset,
4760 igb_rx_bufsz(rx_ring),
4761 DMA_FROM_DEVICE);
4762
4763
4764 dma_unmap_page_attrs(rx_ring->dev,
4765 buffer_info->dma,
4766 igb_rx_pg_size(rx_ring),
4767 DMA_FROM_DEVICE,
4768 IGB_RX_DMA_ATTR);
4769 __page_frag_cache_drain(buffer_info->page,
4770 buffer_info->pagecnt_bias);
4771
4772 i++;
4773 if (i == rx_ring->count)
4774 i = 0;
4775 }
4776
4777 rx_ring->next_to_alloc = 0;
4778 rx_ring->next_to_clean = 0;
4779 rx_ring->next_to_use = 0;
4780}
4781
4782
4783
4784
4785
4786static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
4787{
4788 int i;
4789
4790 for (i = 0; i < adapter->num_rx_queues; i++)
4791 if (adapter->rx_ring[i])
4792 igb_clean_rx_ring(adapter->rx_ring[i]);
4793}
4794
4795
4796
4797
4798
4799
4800
4801
4802static int igb_set_mac(struct net_device *netdev, void *p)
4803{
4804 struct igb_adapter *adapter = netdev_priv(netdev);
4805 struct e1000_hw *hw = &adapter->hw;
4806 struct sockaddr *addr = p;
4807
4808 if (!is_valid_ether_addr(addr->sa_data))
4809 return -EADDRNOTAVAIL;
4810
4811 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4812 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4813
4814
4815 igb_set_default_mac_filter(adapter);
4816
4817 return 0;
4818}
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829static int igb_write_mc_addr_list(struct net_device *netdev)
4830{
4831 struct igb_adapter *adapter = netdev_priv(netdev);
4832 struct e1000_hw *hw = &adapter->hw;
4833 struct netdev_hw_addr *ha;
4834 u8 *mta_list;
4835 int i;
4836
4837 if (netdev_mc_empty(netdev)) {
4838
4839 igb_update_mc_addr_list(hw, NULL, 0);
4840 igb_restore_vf_multicasts(adapter);
4841 return 0;
4842 }
4843
4844 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
4845 if (!mta_list)
4846 return -ENOMEM;
4847
4848
4849 i = 0;
4850 netdev_for_each_mc_addr(ha, netdev)
4851 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4852
4853 igb_update_mc_addr_list(hw, mta_list, i);
4854 kfree(mta_list);
4855
4856 return netdev_mc_count(netdev);
4857}
4858
4859static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4860{
4861 struct e1000_hw *hw = &adapter->hw;
4862 u32 i, pf_id;
4863
4864 switch (hw->mac.type) {
4865 case e1000_i210:
4866 case e1000_i211:
4867 case e1000_i350:
4868
4869 if (adapter->netdev->features & NETIF_F_NTUPLE)
4870 break;
4871 fallthrough;
4872 case e1000_82576:
4873 case e1000_82580:
4874 case e1000_i354:
4875
4876 if (adapter->vfs_allocated_count)
4877 break;
4878 fallthrough;
4879 default:
4880 return 1;
4881 }
4882
4883
4884 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4885 return 0;
4886
4887 if (!adapter->vfs_allocated_count)
4888 goto set_vfta;
4889
4890
4891 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4892
4893 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4894 u32 vlvf = rd32(E1000_VLVF(i));
4895
4896 vlvf |= BIT(pf_id);
4897 wr32(E1000_VLVF(i), vlvf);
4898 }
4899
4900set_vfta:
4901
4902 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4903 hw->mac.ops.write_vfta(hw, i, ~0U);
4904
4905
4906 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4907
4908 return 0;
4909}
4910
4911#define VFTA_BLOCK_SIZE 8
4912static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4913{
4914 struct e1000_hw *hw = &adapter->hw;
4915 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4916 u32 vid_start = vfta_offset * 32;
4917 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4918 u32 i, vid, word, bits, pf_id;
4919
4920
4921 vid = adapter->mng_vlan_id;
4922 if (vid >= vid_start && vid < vid_end)
4923 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4924
4925 if (!adapter->vfs_allocated_count)
4926 goto set_vfta;
4927
4928 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4929
4930 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4931 u32 vlvf = rd32(E1000_VLVF(i));
4932
4933
4934 vid = vlvf & VLAN_VID_MASK;
4935
4936
4937 if (vid < vid_start || vid >= vid_end)
4938 continue;
4939
4940 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4941
4942 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4943
4944
4945 if (test_bit(vid, adapter->active_vlans))
4946 continue;
4947 }
4948
4949
4950 bits = ~BIT(pf_id);
4951 bits &= rd32(E1000_VLVF(i));
4952 wr32(E1000_VLVF(i), bits);
4953 }
4954
4955set_vfta:
4956
4957 for (i = VFTA_BLOCK_SIZE; i--;) {
4958 vid = (vfta_offset + i) * 32;
4959 word = vid / BITS_PER_LONG;
4960 bits = vid % BITS_PER_LONG;
4961
4962 vfta[i] |= adapter->active_vlans[word] >> bits;
4963
4964 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4965 }
4966}
4967
4968static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4969{
4970 u32 i;
4971
4972
4973 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4974 return;
4975
4976
4977 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4978
4979 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4980 igb_scrub_vfta(adapter, i);
4981}
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992static void igb_set_rx_mode(struct net_device *netdev)
4993{
4994 struct igb_adapter *adapter = netdev_priv(netdev);
4995 struct e1000_hw *hw = &adapter->hw;
4996 unsigned int vfn = adapter->vfs_allocated_count;
4997 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
4998 int count;
4999
5000
5001 if (netdev->flags & IFF_PROMISC) {
5002 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5003 vmolr |= E1000_VMOLR_MPME;
5004
5005
5006 if (hw->mac.type == e1000_82576)
5007 vmolr |= E1000_VMOLR_ROPE;
5008 } else {
5009 if (netdev->flags & IFF_ALLMULTI) {
5010 rctl |= E1000_RCTL_MPE;
5011 vmolr |= E1000_VMOLR_MPME;
5012 } else {
5013
5014
5015
5016
5017 count = igb_write_mc_addr_list(netdev);
5018 if (count < 0) {
5019 rctl |= E1000_RCTL_MPE;
5020 vmolr |= E1000_VMOLR_MPME;
5021 } else if (count) {
5022 vmolr |= E1000_VMOLR_ROMPE;
5023 }
5024 }
5025 }
5026
5027
5028
5029
5030
5031 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5032 rctl |= E1000_RCTL_UPE;
5033 vmolr |= E1000_VMOLR_ROPE;
5034 }
5035
5036
5037 rctl |= E1000_RCTL_VFE;
5038
5039
5040 if ((netdev->flags & IFF_PROMISC) ||
5041 (netdev->features & NETIF_F_RXALL)) {
5042
5043 if (igb_vlan_promisc_enable(adapter))
5044 rctl &= ~E1000_RCTL_VFE;
5045 } else {
5046 igb_vlan_promisc_disable(adapter);
5047 }
5048
5049
5050 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5051 E1000_RCTL_VFE);
5052 wr32(E1000_RCTL, rctl);
5053
5054#if (PAGE_SIZE < 8192)
5055 if (!adapter->vfs_allocated_count) {
5056 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5057 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5058 }
5059#endif
5060 wr32(E1000_RLPML, rlpml);
5061
5062
5063
5064
5065
5066
5067 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5068 return;
5069
5070
5071 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5072
5073 vmolr |= rd32(E1000_VMOLR(vfn)) &
5074 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5075
5076
5077 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5078#if (PAGE_SIZE < 8192)
5079 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5080 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5081 else
5082#endif
5083 vmolr |= MAX_JUMBO_FRAME_SIZE;
5084 vmolr |= E1000_VMOLR_LPE;
5085
5086 wr32(E1000_VMOLR(vfn), vmolr);
5087
5088 igb_restore_vf_multicasts(adapter);
5089}
5090
5091static void igb_check_wvbr(struct igb_adapter *adapter)
5092{
5093 struct e1000_hw *hw = &adapter->hw;
5094 u32 wvbr = 0;
5095
5096 switch (hw->mac.type) {
5097 case e1000_82576:
5098 case e1000_i350:
5099 wvbr = rd32(E1000_WVBR);
5100 if (!wvbr)
5101 return;
5102 break;
5103 default:
5104 break;
5105 }
5106
5107 adapter->wvbr |= wvbr;
5108}
5109
5110#define IGB_STAGGERED_QUEUE_OFFSET 8
5111
5112static void igb_spoof_check(struct igb_adapter *adapter)
5113{
5114 int j;
5115
5116 if (!adapter->wvbr)
5117 return;
5118
5119 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5120 if (adapter->wvbr & BIT(j) ||
5121 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5122 dev_warn(&adapter->pdev->dev,
5123 "Spoof event(s) detected on VF %d\n", j);
5124 adapter->wvbr &=
5125 ~(BIT(j) |
5126 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5127 }
5128 }
5129}
5130
5131
5132
5133
5134static void igb_update_phy_info(struct timer_list *t)
5135{
5136 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5137 igb_get_phy_info(&adapter->hw);
5138}
5139
5140
5141
5142
5143
5144bool igb_has_link(struct igb_adapter *adapter)
5145{
5146 struct e1000_hw *hw = &adapter->hw;
5147 bool link_active = false;
5148
5149
5150
5151
5152
5153
5154 switch (hw->phy.media_type) {
5155 case e1000_media_type_copper:
5156 if (!hw->mac.get_link_status)
5157 return true;
5158 fallthrough;
5159 case e1000_media_type_internal_serdes:
5160 hw->mac.ops.check_for_link(hw);
5161 link_active = !hw->mac.get_link_status;
5162 break;
5163 default:
5164 case e1000_media_type_unknown:
5165 break;
5166 }
5167
5168 if (((hw->mac.type == e1000_i210) ||
5169 (hw->mac.type == e1000_i211)) &&
5170 (hw->phy.id == I210_I_PHY_ID)) {
5171 if (!netif_carrier_ok(adapter->netdev)) {
5172 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5173 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5174 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5175 adapter->link_check_timeout = jiffies;
5176 }
5177 }
5178
5179 return link_active;
5180}
5181
5182static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5183{
5184 bool ret = false;
5185 u32 ctrl_ext, thstat;
5186
5187
5188 if (hw->mac.type == e1000_i350) {
5189 thstat = rd32(E1000_THSTAT);
5190 ctrl_ext = rd32(E1000_CTRL_EXT);
5191
5192 if ((hw->phy.media_type == e1000_media_type_copper) &&
5193 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5194 ret = !!(thstat & event);
5195 }
5196
5197 return ret;
5198}
5199
5200
5201
5202
5203
5204
5205static void igb_check_lvmmc(struct igb_adapter *adapter)
5206{
5207 struct e1000_hw *hw = &adapter->hw;
5208 u32 lvmmc;
5209
5210 lvmmc = rd32(E1000_LVMMC);
5211 if (lvmmc) {
5212 if (unlikely(net_ratelimit())) {
5213 netdev_warn(adapter->netdev,
5214 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5215 lvmmc);
5216 }
5217 }
5218}
5219
5220
5221
5222
5223
5224static void igb_watchdog(struct timer_list *t)
5225{
5226 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5227
5228 schedule_work(&adapter->watchdog_task);
5229}
5230
5231static void igb_watchdog_task(struct work_struct *work)
5232{
5233 struct igb_adapter *adapter = container_of(work,
5234 struct igb_adapter,
5235 watchdog_task);
5236 struct e1000_hw *hw = &adapter->hw;
5237 struct e1000_phy_info *phy = &hw->phy;
5238 struct net_device *netdev = adapter->netdev;
5239 u32 link;
5240 int i;
5241 u32 connsw;
5242 u16 phy_data, retry_count = 20;
5243
5244 link = igb_has_link(adapter);
5245
5246 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5247 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5248 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5249 else
5250 link = false;
5251 }
5252
5253
5254 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5255 if (hw->phy.media_type == e1000_media_type_copper) {
5256 connsw = rd32(E1000_CONNSW);
5257 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5258 link = 0;
5259 }
5260 }
5261 if (link) {
5262
5263 if (hw->dev_spec._82575.media_changed) {
5264 hw->dev_spec._82575.media_changed = false;
5265 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5266 igb_reset(adapter);
5267 }
5268
5269 pm_runtime_resume(netdev->dev.parent);
5270
5271 if (!netif_carrier_ok(netdev)) {
5272 u32 ctrl;
5273
5274 hw->mac.ops.get_speed_and_duplex(hw,
5275 &adapter->link_speed,
5276 &adapter->link_duplex);
5277
5278 ctrl = rd32(E1000_CTRL);
5279
5280 netdev_info(netdev,
5281 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5282 netdev->name,
5283 adapter->link_speed,
5284 adapter->link_duplex == FULL_DUPLEX ?
5285 "Full" : "Half",
5286 (ctrl & E1000_CTRL_TFCE) &&
5287 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5288 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5289 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5290
5291
5292 if ((adapter->flags & IGB_FLAG_EEE) &&
5293 (adapter->link_duplex == HALF_DUPLEX)) {
5294 dev_info(&adapter->pdev->dev,
5295 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5296 adapter->hw.dev_spec._82575.eee_disable = true;
5297 adapter->flags &= ~IGB_FLAG_EEE;
5298 }
5299
5300
5301 igb_check_downshift(hw);
5302 if (phy->speed_downgraded)
5303 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5304
5305
5306 if (igb_thermal_sensor_event(hw,
5307 E1000_THSTAT_LINK_THROTTLE))
5308 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5309
5310
5311 adapter->tx_timeout_factor = 1;
5312 switch (adapter->link_speed) {
5313 case SPEED_10:
5314 adapter->tx_timeout_factor = 14;
5315 break;
5316 case SPEED_100:
5317
5318 break;
5319 }
5320
5321 if (adapter->link_speed != SPEED_1000)
5322 goto no_wait;
5323
5324
5325retry_read_status:
5326 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5327 &phy_data)) {
5328 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5329 retry_count) {
5330 msleep(100);
5331 retry_count--;
5332 goto retry_read_status;
5333 } else if (!retry_count) {
5334 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5335 }
5336 } else {
5337 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5338 }
5339no_wait:
5340 netif_carrier_on(netdev);
5341
5342 igb_ping_all_vfs(adapter);
5343 igb_check_vf_rate_limit(adapter);
5344
5345
5346 if (!test_bit(__IGB_DOWN, &adapter->state))
5347 mod_timer(&adapter->phy_info_timer,
5348 round_jiffies(jiffies + 2 * HZ));
5349 }
5350 } else {
5351 if (netif_carrier_ok(netdev)) {
5352 adapter->link_speed = 0;
5353 adapter->link_duplex = 0;
5354
5355
5356 if (igb_thermal_sensor_event(hw,
5357 E1000_THSTAT_PWR_DOWN)) {
5358 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5359 }
5360
5361
5362 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5363 netdev->name);
5364 netif_carrier_off(netdev);
5365
5366 igb_ping_all_vfs(adapter);
5367
5368
5369 if (!test_bit(__IGB_DOWN, &adapter->state))
5370 mod_timer(&adapter->phy_info_timer,
5371 round_jiffies(jiffies + 2 * HZ));
5372
5373
5374 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5375 igb_check_swap_media(adapter);
5376 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5377 schedule_work(&adapter->reset_task);
5378
5379 return;
5380 }
5381 }
5382 pm_schedule_suspend(netdev->dev.parent,
5383 MSEC_PER_SEC * 5);
5384
5385
5386 } else if (!netif_carrier_ok(netdev) &&
5387 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5388 igb_check_swap_media(adapter);
5389 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5390 schedule_work(&adapter->reset_task);
5391
5392 return;
5393 }
5394 }
5395 }
5396
5397 spin_lock(&adapter->stats64_lock);
5398 igb_update_stats(adapter);
5399 spin_unlock(&adapter->stats64_lock);
5400
5401 for (i = 0; i < adapter->num_tx_queues; i++) {
5402 struct igb_ring *tx_ring = adapter->tx_ring[i];
5403 if (!netif_carrier_ok(netdev)) {
5404
5405
5406
5407
5408
5409 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5410 adapter->tx_timeout_count++;
5411 schedule_work(&adapter->reset_task);
5412
5413 return;
5414 }
5415 }
5416
5417
5418 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5419 }
5420
5421
5422 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5423 u32 eics = 0;
5424
5425 for (i = 0; i < adapter->num_q_vectors; i++)
5426 eics |= adapter->q_vector[i]->eims_value;
5427 wr32(E1000_EICS, eics);
5428 } else {
5429 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5430 }
5431
5432 igb_spoof_check(adapter);
5433 igb_ptp_rx_hang(adapter);
5434 igb_ptp_tx_hang(adapter);
5435
5436
5437 if ((adapter->hw.mac.type == e1000_i350) ||
5438 (adapter->hw.mac.type == e1000_i354))
5439 igb_check_lvmmc(adapter);
5440
5441
5442 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5443 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5444 mod_timer(&adapter->watchdog_timer,
5445 round_jiffies(jiffies + HZ));
5446 else
5447 mod_timer(&adapter->watchdog_timer,
5448 round_jiffies(jiffies + 2 * HZ));
5449 }
5450}
5451
5452enum latency_range {
5453 lowest_latency = 0,
5454 low_latency = 1,
5455 bulk_latency = 2,
5456 latency_invalid = 255
5457};
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5475{
5476 int new_val = q_vector->itr_val;
5477 int avg_wire_size = 0;
5478 struct igb_adapter *adapter = q_vector->adapter;
5479 unsigned int packets;
5480
5481
5482
5483
5484 if (adapter->link_speed != SPEED_1000) {
5485 new_val = IGB_4K_ITR;
5486 goto set_itr_val;
5487 }
5488
5489 packets = q_vector->rx.total_packets;
5490 if (packets)
5491 avg_wire_size = q_vector->rx.total_bytes / packets;
5492
5493 packets = q_vector->tx.total_packets;
5494 if (packets)
5495 avg_wire_size = max_t(u32, avg_wire_size,
5496 q_vector->tx.total_bytes / packets);
5497
5498
5499 if (!avg_wire_size)
5500 goto clear_counts;
5501
5502
5503 avg_wire_size += 24;
5504
5505
5506 avg_wire_size = min(avg_wire_size, 3000);
5507
5508
5509 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5510 new_val = avg_wire_size / 3;
5511 else
5512 new_val = avg_wire_size / 2;
5513
5514
5515 if (new_val < IGB_20K_ITR &&
5516 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5517 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5518 new_val = IGB_20K_ITR;
5519
5520set_itr_val:
5521 if (new_val != q_vector->itr_val) {
5522 q_vector->itr_val = new_val;
5523 q_vector->set_itr = 1;
5524 }
5525clear_counts:
5526 q_vector->rx.total_bytes = 0;
5527 q_vector->rx.total_packets = 0;
5528 q_vector->tx.total_bytes = 0;
5529 q_vector->tx.total_packets = 0;
5530}
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548static void igb_update_itr(struct igb_q_vector *q_vector,
5549 struct igb_ring_container *ring_container)
5550{
5551 unsigned int packets = ring_container->total_packets;
5552 unsigned int bytes = ring_container->total_bytes;
5553 u8 itrval = ring_container->itr;
5554
5555
5556 if (packets == 0)
5557 return;
5558
5559 switch (itrval) {
5560 case lowest_latency:
5561
5562 if (bytes/packets > 8000)
5563 itrval = bulk_latency;
5564 else if ((packets < 5) && (bytes > 512))
5565 itrval = low_latency;
5566 break;
5567 case low_latency:
5568 if (bytes > 10000) {
5569
5570 if (bytes/packets > 8000)
5571 itrval = bulk_latency;
5572 else if ((packets < 10) || ((bytes/packets) > 1200))
5573 itrval = bulk_latency;
5574 else if ((packets > 35))
5575 itrval = lowest_latency;
5576 } else if (bytes/packets > 2000) {
5577 itrval = bulk_latency;
5578 } else if (packets <= 2 && bytes < 512) {
5579 itrval = lowest_latency;
5580 }
5581 break;
5582 case bulk_latency:
5583 if (bytes > 25000) {
5584 if (packets > 35)
5585 itrval = low_latency;
5586 } else if (bytes < 1500) {
5587 itrval = low_latency;
5588 }
5589 break;
5590 }
5591
5592
5593 ring_container->total_bytes = 0;
5594 ring_container->total_packets = 0;
5595
5596
5597 ring_container->itr = itrval;
5598}
5599
5600static void igb_set_itr(struct igb_q_vector *q_vector)
5601{
5602 struct igb_adapter *adapter = q_vector->adapter;
5603 u32 new_itr = q_vector->itr_val;
5604 u8 current_itr = 0;
5605
5606
5607 if (adapter->link_speed != SPEED_1000) {
5608 current_itr = 0;
5609 new_itr = IGB_4K_ITR;
5610 goto set_itr_now;
5611 }
5612
5613 igb_update_itr(q_vector, &q_vector->tx);
5614 igb_update_itr(q_vector, &q_vector->rx);
5615
5616 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5617
5618
5619 if (current_itr == lowest_latency &&
5620 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5621 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5622 current_itr = low_latency;
5623
5624 switch (current_itr) {
5625
5626 case lowest_latency:
5627 new_itr = IGB_70K_ITR;
5628 break;
5629 case low_latency:
5630 new_itr = IGB_20K_ITR;
5631 break;
5632 case bulk_latency:
5633 new_itr = IGB_4K_ITR;
5634 break;
5635 default:
5636 break;
5637 }
5638
5639set_itr_now:
5640 if (new_itr != q_vector->itr_val) {
5641
5642
5643
5644
5645 new_itr = new_itr > q_vector->itr_val ?
5646 max((new_itr * q_vector->itr_val) /
5647 (new_itr + (q_vector->itr_val >> 2)),
5648 new_itr) : new_itr;
5649
5650
5651
5652
5653
5654
5655 q_vector->itr_val = new_itr;
5656 q_vector->set_itr = 1;
5657 }
5658}
5659
5660static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5661 struct igb_tx_buffer *first,
5662 u32 vlan_macip_lens, u32 type_tucmd,
5663 u32 mss_l4len_idx)
5664{
5665 struct e1000_adv_tx_context_desc *context_desc;
5666 u16 i = tx_ring->next_to_use;
5667 struct timespec64 ts;
5668
5669 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5670
5671 i++;
5672 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5673
5674
5675 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5676
5677
5678 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5679 mss_l4len_idx |= tx_ring->reg_idx << 4;
5680
5681 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5682 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5683 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5684
5685
5686
5687
5688 if (tx_ring->launchtime_enable) {
5689 ts = ktime_to_timespec64(first->skb->tstamp);
5690 first->skb->tstamp = ktime_set(0, 0);
5691 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5692 } else {
5693 context_desc->seqnum_seed = 0;
5694 }
5695}
5696
5697static int igb_tso(struct igb_ring *tx_ring,
5698 struct igb_tx_buffer *first,
5699 u8 *hdr_len)
5700{
5701 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5702 struct sk_buff *skb = first->skb;
5703 union {
5704 struct iphdr *v4;
5705 struct ipv6hdr *v6;
5706 unsigned char *hdr;
5707 } ip;
5708 union {
5709 struct tcphdr *tcp;
5710 struct udphdr *udp;
5711 unsigned char *hdr;
5712 } l4;
5713 u32 paylen, l4_offset;
5714 int err;
5715
5716 if (skb->ip_summed != CHECKSUM_PARTIAL)
5717 return 0;
5718
5719 if (!skb_is_gso(skb))
5720 return 0;
5721
5722 err = skb_cow_head(skb, 0);
5723 if (err < 0)
5724 return err;
5725
5726 ip.hdr = skb_network_header(skb);
5727 l4.hdr = skb_checksum_start(skb);
5728
5729
5730 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5731 E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
5732
5733
5734 if (ip.v4->version == 4) {
5735 unsigned char *csum_start = skb_checksum_start(skb);
5736 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
5737
5738
5739
5740
5741 ip.v4->check = csum_fold(csum_partial(trans_start,
5742 csum_start - trans_start,
5743 0));
5744 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
5745
5746 ip.v4->tot_len = 0;
5747 first->tx_flags |= IGB_TX_FLAGS_TSO |
5748 IGB_TX_FLAGS_CSUM |
5749 IGB_TX_FLAGS_IPV4;
5750 } else {
5751 ip.v6->payload_len = 0;
5752 first->tx_flags |= IGB_TX_FLAGS_TSO |
5753 IGB_TX_FLAGS_CSUM;
5754 }
5755
5756
5757 l4_offset = l4.hdr - skb->data;
5758
5759
5760 paylen = skb->len - l4_offset;
5761 if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
5762
5763 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
5764 csum_replace_by_diff(&l4.tcp->check,
5765 (__force __wsum)htonl(paylen));
5766 } else {
5767
5768 *hdr_len = sizeof(*l4.udp) + l4_offset;
5769 csum_replace_by_diff(&l4.udp->check,
5770 (__force __wsum)htonl(paylen));
5771 }
5772
5773
5774 first->gso_segs = skb_shinfo(skb)->gso_segs;
5775 first->bytecount += (first->gso_segs - 1) * *hdr_len;
5776
5777
5778 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
5779 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
5780
5781
5782 vlan_macip_lens = l4.hdr - ip.hdr;
5783 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
5784 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5785
5786 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
5787 type_tucmd, mss_l4len_idx);
5788
5789 return 1;
5790}
5791
5792static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5793{
5794 unsigned int offset = 0;
5795
5796 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5797
5798 return offset == skb_checksum_start_offset(skb);
5799}
5800
5801static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5802{
5803 struct sk_buff *skb = first->skb;
5804 u32 vlan_macip_lens = 0;
5805 u32 type_tucmd = 0;
5806
5807 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5808csum_failed:
5809 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5810 !tx_ring->launchtime_enable)
5811 return;
5812 goto no_csum;
5813 }
5814
5815 switch (skb->csum_offset) {
5816 case offsetof(struct tcphdr, check):
5817 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5818 fallthrough;
5819 case offsetof(struct udphdr, check):
5820 break;
5821 case offsetof(struct sctphdr, checksum):
5822
5823 if (((first->protocol == htons(ETH_P_IP)) &&
5824 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5825 ((first->protocol == htons(ETH_P_IPV6)) &&
5826 igb_ipv6_csum_is_sctp(skb))) {
5827 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5828 break;
5829 }
5830 fallthrough;
5831 default:
5832 skb_checksum_help(skb);
5833 goto csum_failed;
5834 }
5835
5836
5837 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5838 vlan_macip_lens = skb_checksum_start_offset(skb) -
5839 skb_network_offset(skb);
5840no_csum:
5841 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5842 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5843
5844 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
5845}
5846
5847#define IGB_SET_FLAG(_input, _flag, _result) \
5848 ((_flag <= _result) ? \
5849 ((u32)(_input & _flag) * (_result / _flag)) : \
5850 ((u32)(_input & _flag) / (_flag / _result)))
5851
5852static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5853{
5854
5855 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5856 E1000_ADVTXD_DCMD_DEXT |
5857 E1000_ADVTXD_DCMD_IFCS;
5858
5859
5860 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5861 (E1000_ADVTXD_DCMD_VLE));
5862
5863
5864 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5865 (E1000_ADVTXD_DCMD_TSE));
5866
5867
5868 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5869 (E1000_ADVTXD_MAC_TSTAMP));
5870
5871
5872 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5873
5874 return cmd_type;
5875}
5876
5877static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5878 union e1000_adv_tx_desc *tx_desc,
5879 u32 tx_flags, unsigned int paylen)
5880{
5881 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5882
5883
5884 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5885 olinfo_status |= tx_ring->reg_idx << 4;
5886
5887
5888 olinfo_status |= IGB_SET_FLAG(tx_flags,
5889 IGB_TX_FLAGS_CSUM,
5890 (E1000_TXD_POPTS_TXSM << 8));
5891
5892
5893 olinfo_status |= IGB_SET_FLAG(tx_flags,
5894 IGB_TX_FLAGS_IPV4,
5895 (E1000_TXD_POPTS_IXSM << 8));
5896
5897 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5898}
5899
5900static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5901{
5902 struct net_device *netdev = tx_ring->netdev;
5903
5904 netif_stop_subqueue(netdev, tx_ring->queue_index);
5905
5906
5907
5908
5909
5910 smp_mb();
5911
5912
5913
5914
5915 if (igb_desc_unused(tx_ring) < size)
5916 return -EBUSY;
5917
5918
5919 netif_wake_subqueue(netdev, tx_ring->queue_index);
5920
5921 u64_stats_update_begin(&tx_ring->tx_syncp2);
5922 tx_ring->tx_stats.restart_queue2++;
5923 u64_stats_update_end(&tx_ring->tx_syncp2);
5924
5925 return 0;
5926}
5927
5928static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5929{
5930 if (igb_desc_unused(tx_ring) >= size)
5931 return 0;
5932 return __igb_maybe_stop_tx(tx_ring, size);
5933}
5934
5935static int igb_tx_map(struct igb_ring *tx_ring,
5936 struct igb_tx_buffer *first,
5937 const u8 hdr_len)
5938{
5939 struct sk_buff *skb = first->skb;
5940 struct igb_tx_buffer *tx_buffer;
5941 union e1000_adv_tx_desc *tx_desc;
5942 skb_frag_t *frag;
5943 dma_addr_t dma;
5944 unsigned int data_len, size;
5945 u32 tx_flags = first->tx_flags;
5946 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5947 u16 i = tx_ring->next_to_use;
5948
5949 tx_desc = IGB_TX_DESC(tx_ring, i);
5950
5951 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5952
5953 size = skb_headlen(skb);
5954 data_len = skb->data_len;
5955
5956 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5957
5958 tx_buffer = first;
5959
5960 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5961 if (dma_mapping_error(tx_ring->dev, dma))
5962 goto dma_error;
5963
5964
5965 dma_unmap_len_set(tx_buffer, len, size);
5966 dma_unmap_addr_set(tx_buffer, dma, dma);
5967
5968 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5969
5970 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5971 tx_desc->read.cmd_type_len =
5972 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5973
5974 i++;
5975 tx_desc++;
5976 if (i == tx_ring->count) {
5977 tx_desc = IGB_TX_DESC(tx_ring, 0);
5978 i = 0;
5979 }
5980 tx_desc->read.olinfo_status = 0;
5981
5982 dma += IGB_MAX_DATA_PER_TXD;
5983 size -= IGB_MAX_DATA_PER_TXD;
5984
5985 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5986 }
5987
5988 if (likely(!data_len))
5989 break;
5990
5991 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5992
5993 i++;
5994 tx_desc++;
5995 if (i == tx_ring->count) {
5996 tx_desc = IGB_TX_DESC(tx_ring, 0);
5997 i = 0;
5998 }
5999 tx_desc->read.olinfo_status = 0;
6000
6001 size = skb_frag_size(frag);
6002 data_len -= size;
6003
6004 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6005 size, DMA_TO_DEVICE);
6006
6007 tx_buffer = &tx_ring->tx_buffer_info[i];
6008 }
6009
6010
6011 cmd_type |= size | IGB_TXD_DCMD;
6012 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6013
6014 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6015
6016
6017 first->time_stamp = jiffies;
6018
6019 skb_tx_timestamp(skb);
6020
6021
6022
6023
6024
6025
6026
6027
6028 dma_wmb();
6029
6030
6031 first->next_to_watch = tx_desc;
6032
6033 i++;
6034 if (i == tx_ring->count)
6035 i = 0;
6036
6037 tx_ring->next_to_use = i;
6038
6039
6040 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6041
6042 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6043 writel(i, tx_ring->tail);
6044 }
6045 return 0;
6046
6047dma_error:
6048 dev_err(tx_ring->dev, "TX DMA map failed\n");
6049 tx_buffer = &tx_ring->tx_buffer_info[i];
6050
6051
6052 while (tx_buffer != first) {
6053 if (dma_unmap_len(tx_buffer, len))
6054 dma_unmap_page(tx_ring->dev,
6055 dma_unmap_addr(tx_buffer, dma),
6056 dma_unmap_len(tx_buffer, len),
6057 DMA_TO_DEVICE);
6058 dma_unmap_len_set(tx_buffer, len, 0);
6059
6060 if (i-- == 0)
6061 i += tx_ring->count;
6062 tx_buffer = &tx_ring->tx_buffer_info[i];
6063 }
6064
6065 if (dma_unmap_len(tx_buffer, len))
6066 dma_unmap_single(tx_ring->dev,
6067 dma_unmap_addr(tx_buffer, dma),
6068 dma_unmap_len(tx_buffer, len),
6069 DMA_TO_DEVICE);
6070 dma_unmap_len_set(tx_buffer, len, 0);
6071
6072 dev_kfree_skb_any(tx_buffer->skb);
6073 tx_buffer->skb = NULL;
6074
6075 tx_ring->next_to_use = i;
6076
6077 return -1;
6078}
6079
6080netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6081 struct igb_ring *tx_ring)
6082{
6083 struct igb_tx_buffer *first;
6084 int tso;
6085 u32 tx_flags = 0;
6086 unsigned short f;
6087 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6088 __be16 protocol = vlan_get_protocol(skb);
6089 u8 hdr_len = 0;
6090
6091
6092
6093
6094
6095
6096
6097 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6098 count += TXD_USE_COUNT(skb_frag_size(
6099 &skb_shinfo(skb)->frags[f]));
6100
6101 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6102
6103 return NETDEV_TX_BUSY;
6104 }
6105
6106
6107 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6108 first->skb = skb;
6109 first->bytecount = skb->len;
6110 first->gso_segs = 1;
6111
6112 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6113 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6114
6115 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6116 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6117 &adapter->state)) {
6118 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6119 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6120
6121 adapter->ptp_tx_skb = skb_get(skb);
6122 adapter->ptp_tx_start = jiffies;
6123 if (adapter->hw.mac.type == e1000_82576)
6124 schedule_work(&adapter->ptp_tx_work);
6125 } else {
6126 adapter->tx_hwtstamp_skipped++;
6127 }
6128 }
6129
6130 if (skb_vlan_tag_present(skb)) {
6131 tx_flags |= IGB_TX_FLAGS_VLAN;
6132 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6133 }
6134
6135
6136 first->tx_flags = tx_flags;
6137 first->protocol = protocol;
6138
6139 tso = igb_tso(tx_ring, first, &hdr_len);
6140 if (tso < 0)
6141 goto out_drop;
6142 else if (!tso)
6143 igb_tx_csum(tx_ring, first);
6144
6145 if (igb_tx_map(tx_ring, first, hdr_len))
6146 goto cleanup_tx_tstamp;
6147
6148 return NETDEV_TX_OK;
6149
6150out_drop:
6151 dev_kfree_skb_any(first->skb);
6152 first->skb = NULL;
6153cleanup_tx_tstamp:
6154 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6155 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6156
6157 dev_kfree_skb_any(adapter->ptp_tx_skb);
6158 adapter->ptp_tx_skb = NULL;
6159 if (adapter->hw.mac.type == e1000_82576)
6160 cancel_work_sync(&adapter->ptp_tx_work);
6161 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6162 }
6163
6164 return NETDEV_TX_OK;
6165}
6166
6167static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6168 struct sk_buff *skb)
6169{
6170 unsigned int r_idx = skb->queue_mapping;
6171
6172 if (r_idx >= adapter->num_tx_queues)
6173 r_idx = r_idx % adapter->num_tx_queues;
6174
6175 return adapter->tx_ring[r_idx];
6176}
6177
6178static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6179 struct net_device *netdev)
6180{
6181 struct igb_adapter *adapter = netdev_priv(netdev);
6182
6183
6184
6185
6186 if (skb_put_padto(skb, 17))
6187 return NETDEV_TX_OK;
6188
6189 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6190}
6191
6192
6193
6194
6195
6196static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6197{
6198 struct igb_adapter *adapter = netdev_priv(netdev);
6199 struct e1000_hw *hw = &adapter->hw;
6200
6201
6202 adapter->tx_timeout_count++;
6203
6204 if (hw->mac.type >= e1000_82580)
6205 hw->dev_spec._82575.global_device_reset = true;
6206
6207 schedule_work(&adapter->reset_task);
6208 wr32(E1000_EICS,
6209 (adapter->eims_enable_mask & ~adapter->eims_other));
6210}
6211
6212static void igb_reset_task(struct work_struct *work)
6213{
6214 struct igb_adapter *adapter;
6215 adapter = container_of(work, struct igb_adapter, reset_task);
6216
6217 rtnl_lock();
6218
6219 if (test_bit(__IGB_DOWN, &adapter->state) ||
6220 test_bit(__IGB_RESETTING, &adapter->state)) {
6221 rtnl_unlock();
6222 return;
6223 }
6224
6225 igb_dump(adapter);
6226 netdev_err(adapter->netdev, "Reset adapter\n");
6227 igb_reinit_locked(adapter);
6228 rtnl_unlock();
6229}
6230
6231
6232
6233
6234
6235
6236static void igb_get_stats64(struct net_device *netdev,
6237 struct rtnl_link_stats64 *stats)
6238{
6239 struct igb_adapter *adapter = netdev_priv(netdev);
6240
6241 spin_lock(&adapter->stats64_lock);
6242 igb_update_stats(adapter);
6243 memcpy(stats, &adapter->stats64, sizeof(*stats));
6244 spin_unlock(&adapter->stats64_lock);
6245}
6246
6247
6248
6249
6250
6251
6252
6253
6254static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6255{
6256 struct igb_adapter *adapter = netdev_priv(netdev);
6257 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
6258
6259
6260 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6261 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6262
6263 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6264 usleep_range(1000, 2000);
6265
6266
6267 adapter->max_frame_size = max_frame;
6268
6269 if (netif_running(netdev))
6270 igb_down(adapter);
6271
6272 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6273 netdev->mtu, new_mtu);
6274 netdev->mtu = new_mtu;
6275
6276 if (netif_running(netdev))
6277 igb_up(adapter);
6278 else
6279 igb_reset(adapter);
6280
6281 clear_bit(__IGB_RESETTING, &adapter->state);
6282
6283 return 0;
6284}
6285
6286
6287
6288
6289
6290void igb_update_stats(struct igb_adapter *adapter)
6291{
6292 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6293 struct e1000_hw *hw = &adapter->hw;
6294 struct pci_dev *pdev = adapter->pdev;
6295 u32 reg, mpc;
6296 int i;
6297 u64 bytes, packets;
6298 unsigned int start;
6299 u64 _bytes, _packets;
6300
6301
6302
6303
6304 if (adapter->link_speed == 0)
6305 return;
6306 if (pci_channel_offline(pdev))
6307 return;
6308
6309 bytes = 0;
6310 packets = 0;
6311
6312 rcu_read_lock();
6313 for (i = 0; i < adapter->num_rx_queues; i++) {
6314 struct igb_ring *ring = adapter->rx_ring[i];
6315 u32 rqdpc = rd32(E1000_RQDPC(i));
6316 if (hw->mac.type >= e1000_i210)
6317 wr32(E1000_RQDPC(i), 0);
6318
6319 if (rqdpc) {
6320 ring->rx_stats.drops += rqdpc;
6321 net_stats->rx_fifo_errors += rqdpc;
6322 }
6323
6324 do {
6325 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
6326 _bytes = ring->rx_stats.bytes;
6327 _packets = ring->rx_stats.packets;
6328 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
6329 bytes += _bytes;
6330 packets += _packets;
6331 }
6332
6333 net_stats->rx_bytes = bytes;
6334 net_stats->rx_packets = packets;
6335
6336 bytes = 0;
6337 packets = 0;
6338 for (i = 0; i < adapter->num_tx_queues; i++) {
6339 struct igb_ring *ring = adapter->tx_ring[i];
6340 do {
6341 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
6342 _bytes = ring->tx_stats.bytes;
6343 _packets = ring->tx_stats.packets;
6344 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
6345 bytes += _bytes;
6346 packets += _packets;
6347 }
6348 net_stats->tx_bytes = bytes;
6349 net_stats->tx_packets = packets;
6350 rcu_read_unlock();
6351
6352
6353 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6354 adapter->stats.gprc += rd32(E1000_GPRC);
6355 adapter->stats.gorc += rd32(E1000_GORCL);
6356 rd32(E1000_GORCH);
6357 adapter->stats.bprc += rd32(E1000_BPRC);
6358 adapter->stats.mprc += rd32(E1000_MPRC);
6359 adapter->stats.roc += rd32(E1000_ROC);
6360
6361 adapter->stats.prc64 += rd32(E1000_PRC64);
6362 adapter->stats.prc127 += rd32(E1000_PRC127);
6363 adapter->stats.prc255 += rd32(E1000_PRC255);
6364 adapter->stats.prc511 += rd32(E1000_PRC511);
6365 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6366 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6367 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6368 adapter->stats.sec += rd32(E1000_SEC);
6369
6370 mpc = rd32(E1000_MPC);
6371 adapter->stats.mpc += mpc;
6372 net_stats->rx_fifo_errors += mpc;
6373 adapter->stats.scc += rd32(E1000_SCC);
6374 adapter->stats.ecol += rd32(E1000_ECOL);
6375 adapter->stats.mcc += rd32(E1000_MCC);
6376 adapter->stats.latecol += rd32(E1000_LATECOL);
6377 adapter->stats.dc += rd32(E1000_DC);
6378 adapter->stats.rlec += rd32(E1000_RLEC);
6379 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6380 adapter->stats.xontxc += rd32(E1000_XONTXC);
6381 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6382 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6383 adapter->stats.fcruc += rd32(E1000_FCRUC);
6384 adapter->stats.gptc += rd32(E1000_GPTC);
6385 adapter->stats.gotc += rd32(E1000_GOTCL);
6386 rd32(E1000_GOTCH);
6387 adapter->stats.rnbc += rd32(E1000_RNBC);
6388 adapter->stats.ruc += rd32(E1000_RUC);
6389 adapter->stats.rfc += rd32(E1000_RFC);
6390 adapter->stats.rjc += rd32(E1000_RJC);
6391 adapter->stats.tor += rd32(E1000_TORH);
6392 adapter->stats.tot += rd32(E1000_TOTH);
6393 adapter->stats.tpr += rd32(E1000_TPR);
6394
6395 adapter->stats.ptc64 += rd32(E1000_PTC64);
6396 adapter->stats.ptc127 += rd32(E1000_PTC127);
6397 adapter->stats.ptc255 += rd32(E1000_PTC255);
6398 adapter->stats.ptc511 += rd32(E1000_PTC511);
6399 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6400 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6401
6402 adapter->stats.mptc += rd32(E1000_MPTC);
6403 adapter->stats.bptc += rd32(E1000_BPTC);
6404
6405 adapter->stats.tpt += rd32(E1000_TPT);
6406 adapter->stats.colc += rd32(E1000_COLC);
6407
6408 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6409
6410 reg = rd32(E1000_CTRL_EXT);
6411 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6412 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6413
6414
6415 if ((hw->mac.type != e1000_i210) &&
6416 (hw->mac.type != e1000_i211))
6417 adapter->stats.tncrs += rd32(E1000_TNCRS);
6418 }
6419
6420 adapter->stats.tsctc += rd32(E1000_TSCTC);
6421 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6422
6423 adapter->stats.iac += rd32(E1000_IAC);
6424 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6425 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6426 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6427 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6428 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6429 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6430 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6431 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6432
6433
6434 net_stats->multicast = adapter->stats.mprc;
6435 net_stats->collisions = adapter->stats.colc;
6436
6437
6438
6439
6440
6441
6442 net_stats->rx_errors = adapter->stats.rxerrc +
6443 adapter->stats.crcerrs + adapter->stats.algnerrc +
6444 adapter->stats.ruc + adapter->stats.roc +
6445 adapter->stats.cexterr;
6446 net_stats->rx_length_errors = adapter->stats.ruc +
6447 adapter->stats.roc;
6448 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6449 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6450 net_stats->rx_missed_errors = adapter->stats.mpc;
6451
6452
6453 net_stats->tx_errors = adapter->stats.ecol +
6454 adapter->stats.latecol;
6455 net_stats->tx_aborted_errors = adapter->stats.ecol;
6456 net_stats->tx_window_errors = adapter->stats.latecol;
6457 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6458
6459
6460
6461
6462 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6463 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6464 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6465
6466
6467 reg = rd32(E1000_MANC);
6468 if (reg & E1000_MANC_EN_BMC2OS) {
6469 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6470 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6471 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6472 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6473 }
6474}
6475
6476static void igb_tsync_interrupt(struct igb_adapter *adapter)
6477{
6478 struct e1000_hw *hw = &adapter->hw;
6479 struct ptp_clock_event event;
6480 struct timespec64 ts;
6481 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
6482
6483 if (tsicr & TSINTR_SYS_WRAP) {
6484 event.type = PTP_CLOCK_PPS;
6485 if (adapter->ptp_caps.pps)
6486 ptp_clock_event(adapter->ptp_clock, &event);
6487 ack |= TSINTR_SYS_WRAP;
6488 }
6489
6490 if (tsicr & E1000_TSICR_TXTS) {
6491
6492 schedule_work(&adapter->ptp_tx_work);
6493 ack |= E1000_TSICR_TXTS;
6494 }
6495
6496 if (tsicr & TSINTR_TT0) {
6497 spin_lock(&adapter->tmreg_lock);
6498 ts = timespec64_add(adapter->perout[0].start,
6499 adapter->perout[0].period);
6500
6501 wr32(E1000_TRGTTIML0, ts.tv_nsec);
6502 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
6503 tsauxc = rd32(E1000_TSAUXC);
6504 tsauxc |= TSAUXC_EN_TT0;
6505 wr32(E1000_TSAUXC, tsauxc);
6506 adapter->perout[0].start = ts;
6507 spin_unlock(&adapter->tmreg_lock);
6508 ack |= TSINTR_TT0;
6509 }
6510
6511 if (tsicr & TSINTR_TT1) {
6512 spin_lock(&adapter->tmreg_lock);
6513 ts = timespec64_add(adapter->perout[1].start,
6514 adapter->perout[1].period);
6515 wr32(E1000_TRGTTIML1, ts.tv_nsec);
6516 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
6517 tsauxc = rd32(E1000_TSAUXC);
6518 tsauxc |= TSAUXC_EN_TT1;
6519 wr32(E1000_TSAUXC, tsauxc);
6520 adapter->perout[1].start = ts;
6521 spin_unlock(&adapter->tmreg_lock);
6522 ack |= TSINTR_TT1;
6523 }
6524
6525 if (tsicr & TSINTR_AUTT0) {
6526 nsec = rd32(E1000_AUXSTMPL0);
6527 sec = rd32(E1000_AUXSTMPH0);
6528 event.type = PTP_CLOCK_EXTTS;
6529 event.index = 0;
6530 event.timestamp = sec * 1000000000ULL + nsec;
6531 ptp_clock_event(adapter->ptp_clock, &event);
6532 ack |= TSINTR_AUTT0;
6533 }
6534
6535 if (tsicr & TSINTR_AUTT1) {
6536 nsec = rd32(E1000_AUXSTMPL1);
6537 sec = rd32(E1000_AUXSTMPH1);
6538 event.type = PTP_CLOCK_EXTTS;
6539 event.index = 1;
6540 event.timestamp = sec * 1000000000ULL + nsec;
6541 ptp_clock_event(adapter->ptp_clock, &event);
6542 ack |= TSINTR_AUTT1;
6543 }
6544
6545
6546 wr32(E1000_TSICR, ack);
6547}
6548
6549static irqreturn_t igb_msix_other(int irq, void *data)
6550{
6551 struct igb_adapter *adapter = data;
6552 struct e1000_hw *hw = &adapter->hw;
6553 u32 icr = rd32(E1000_ICR);
6554
6555
6556 if (icr & E1000_ICR_DRSTA)
6557 schedule_work(&adapter->reset_task);
6558
6559 if (icr & E1000_ICR_DOUTSYNC) {
6560
6561 adapter->stats.doosync++;
6562
6563
6564
6565
6566 igb_check_wvbr(adapter);
6567 }
6568
6569
6570 if (icr & E1000_ICR_VMMB)
6571 igb_msg_task(adapter);
6572
6573 if (icr & E1000_ICR_LSC) {
6574 hw->mac.get_link_status = 1;
6575
6576 if (!test_bit(__IGB_DOWN, &adapter->state))
6577 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6578 }
6579
6580 if (icr & E1000_ICR_TS)
6581 igb_tsync_interrupt(adapter);
6582
6583 wr32(E1000_EIMS, adapter->eims_other);
6584
6585 return IRQ_HANDLED;
6586}
6587
6588static void igb_write_itr(struct igb_q_vector *q_vector)
6589{
6590 struct igb_adapter *adapter = q_vector->adapter;
6591 u32 itr_val = q_vector->itr_val & 0x7FFC;
6592
6593 if (!q_vector->set_itr)
6594 return;
6595
6596 if (!itr_val)
6597 itr_val = 0x4;
6598
6599 if (adapter->hw.mac.type == e1000_82575)
6600 itr_val |= itr_val << 16;
6601 else
6602 itr_val |= E1000_EITR_CNT_IGNR;
6603
6604 writel(itr_val, q_vector->itr_register);
6605 q_vector->set_itr = 0;
6606}
6607
6608static irqreturn_t igb_msix_ring(int irq, void *data)
6609{
6610 struct igb_q_vector *q_vector = data;
6611
6612
6613 igb_write_itr(q_vector);
6614
6615 napi_schedule(&q_vector->napi);
6616
6617 return IRQ_HANDLED;
6618}
6619
6620#ifdef CONFIG_IGB_DCA
6621static void igb_update_tx_dca(struct igb_adapter *adapter,
6622 struct igb_ring *tx_ring,
6623 int cpu)
6624{
6625 struct e1000_hw *hw = &adapter->hw;
6626 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
6627
6628 if (hw->mac.type != e1000_82575)
6629 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
6630
6631
6632
6633
6634
6635 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
6636 E1000_DCA_TXCTRL_DATA_RRO_EN |
6637 E1000_DCA_TXCTRL_DESC_DCA_EN;
6638
6639 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
6640}
6641
6642static void igb_update_rx_dca(struct igb_adapter *adapter,
6643 struct igb_ring *rx_ring,
6644 int cpu)
6645{
6646 struct e1000_hw *hw = &adapter->hw;
6647 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
6648
6649 if (hw->mac.type != e1000_82575)
6650 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
6651
6652
6653
6654
6655
6656 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
6657 E1000_DCA_RXCTRL_DESC_DCA_EN;
6658
6659 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
6660}
6661
6662static void igb_update_dca(struct igb_q_vector *q_vector)
6663{
6664 struct igb_adapter *adapter = q_vector->adapter;
6665 int cpu = get_cpu();
6666
6667 if (q_vector->cpu == cpu)
6668 goto out_no_update;
6669
6670 if (q_vector->tx.ring)
6671 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
6672
6673 if (q_vector->rx.ring)
6674 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
6675
6676 q_vector->cpu = cpu;
6677out_no_update:
6678 put_cpu();
6679}
6680
6681static void igb_setup_dca(struct igb_adapter *adapter)
6682{
6683 struct e1000_hw *hw = &adapter->hw;
6684 int i;
6685
6686 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
6687 return;
6688
6689
6690 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
6691
6692 for (i = 0; i < adapter->num_q_vectors; i++) {
6693 adapter->q_vector[i]->cpu = -1;
6694 igb_update_dca(adapter->q_vector[i]);
6695 }
6696}
6697
6698static int __igb_notify_dca(struct device *dev, void *data)
6699{
6700 struct net_device *netdev = dev_get_drvdata(dev);
6701 struct igb_adapter *adapter = netdev_priv(netdev);
6702 struct pci_dev *pdev = adapter->pdev;
6703 struct e1000_hw *hw = &adapter->hw;
6704 unsigned long event = *(unsigned long *)data;
6705
6706 switch (event) {
6707 case DCA_PROVIDER_ADD:
6708
6709 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
6710 break;
6711 if (dca_add_requester(dev) == 0) {
6712 adapter->flags |= IGB_FLAG_DCA_ENABLED;
6713 dev_info(&pdev->dev, "DCA enabled\n");
6714 igb_setup_dca(adapter);
6715 break;
6716 }
6717 fallthrough;
6718 case DCA_PROVIDER_REMOVE:
6719 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
6720
6721
6722
6723 dca_remove_requester(dev);
6724 dev_info(&pdev->dev, "DCA disabled\n");
6725 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
6726 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
6727 }
6728 break;
6729 }
6730
6731 return 0;
6732}
6733
6734static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
6735 void *p)
6736{
6737 int ret_val;
6738
6739 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
6740 __igb_notify_dca);
6741
6742 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6743}
6744#endif
6745
6746#ifdef CONFIG_PCI_IOV
6747static int igb_vf_configure(struct igb_adapter *adapter, int vf)
6748{
6749 unsigned char mac_addr[ETH_ALEN];
6750
6751 eth_zero_addr(mac_addr);
6752 igb_set_vf_mac(adapter, vf, mac_addr);
6753
6754
6755 adapter->vf_data[vf].spoofchk_enabled = true;
6756
6757
6758 adapter->vf_data[vf].trusted = false;
6759
6760 return 0;
6761}
6762
6763#endif
6764static void igb_ping_all_vfs(struct igb_adapter *adapter)
6765{
6766 struct e1000_hw *hw = &adapter->hw;
6767 u32 ping;
6768 int i;
6769
6770 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
6771 ping = E1000_PF_CONTROL_MSG;
6772 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
6773 ping |= E1000_VT_MSGTYPE_CTS;
6774 igb_write_mbx(hw, &ping, 1, i);
6775 }
6776}
6777
6778static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6779{
6780 struct e1000_hw *hw = &adapter->hw;
6781 u32 vmolr = rd32(E1000_VMOLR(vf));
6782 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6783
6784 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
6785 IGB_VF_FLAG_MULTI_PROMISC);
6786 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6787
6788 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
6789 vmolr |= E1000_VMOLR_MPME;
6790 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
6791 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
6792 } else {
6793
6794
6795
6796
6797 if (vf_data->num_vf_mc_hashes > 30) {
6798 vmolr |= E1000_VMOLR_MPME;
6799 } else if (vf_data->num_vf_mc_hashes) {
6800 int j;
6801
6802 vmolr |= E1000_VMOLR_ROMPE;
6803 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6804 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6805 }
6806 }
6807
6808 wr32(E1000_VMOLR(vf), vmolr);
6809
6810
6811 if (*msgbuf & E1000_VT_MSGINFO_MASK)
6812 return -EINVAL;
6813
6814 return 0;
6815}
6816
6817static int igb_set_vf_multicasts(struct igb_adapter *adapter,
6818 u32 *msgbuf, u32 vf)
6819{
6820 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6821 u16 *hash_list = (u16 *)&msgbuf[1];
6822 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6823 int i;
6824
6825
6826
6827
6828
6829 vf_data->num_vf_mc_hashes = n;
6830
6831
6832 if (n > 30)
6833 n = 30;
6834
6835
6836 for (i = 0; i < n; i++)
6837 vf_data->vf_mc_hashes[i] = hash_list[i];
6838
6839
6840 igb_set_rx_mode(adapter->netdev);
6841
6842 return 0;
6843}
6844
6845static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6846{
6847 struct e1000_hw *hw = &adapter->hw;
6848 struct vf_data_storage *vf_data;
6849 int i, j;
6850
6851 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6852 u32 vmolr = rd32(E1000_VMOLR(i));
6853
6854 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6855
6856 vf_data = &adapter->vf_data[i];
6857
6858 if ((vf_data->num_vf_mc_hashes > 30) ||
6859 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6860 vmolr |= E1000_VMOLR_MPME;
6861 } else if (vf_data->num_vf_mc_hashes) {
6862 vmolr |= E1000_VMOLR_ROMPE;
6863 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6864 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6865 }
6866 wr32(E1000_VMOLR(i), vmolr);
6867 }
6868}
6869
6870static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6871{
6872 struct e1000_hw *hw = &adapter->hw;
6873 u32 pool_mask, vlvf_mask, i;
6874
6875
6876 pool_mask = E1000_VLVF_POOLSEL_MASK;
6877 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6878
6879
6880 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6881 adapter->vfs_allocated_count);
6882
6883
6884 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6885 u32 vlvf = rd32(E1000_VLVF(i));
6886 u32 vfta_mask, vid, vfta;
6887
6888
6889 if (!(vlvf & vlvf_mask))
6890 continue;
6891
6892
6893 vlvf ^= vlvf_mask;
6894
6895
6896 if (vlvf & pool_mask)
6897 goto update_vlvfb;
6898
6899
6900 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6901 goto update_vlvf;
6902
6903 vid = vlvf & E1000_VLVF_VLANID_MASK;
6904 vfta_mask = BIT(vid % 32);
6905
6906
6907 vfta = adapter->shadow_vfta[vid / 32];
6908 if (vfta & vfta_mask)
6909 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6910update_vlvf:
6911
6912 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6913 vlvf &= E1000_VLVF_POOLSEL_MASK;
6914 else
6915 vlvf = 0;
6916update_vlvfb:
6917
6918 wr32(E1000_VLVF(i), vlvf);
6919 }
6920}
6921
6922static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6923{
6924 u32 vlvf;
6925 int idx;
6926
6927
6928 if (vlan == 0)
6929 return 0;
6930
6931
6932 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6933 vlvf = rd32(E1000_VLVF(idx));
6934 if ((vlvf & VLAN_VID_MASK) == vlan)
6935 break;
6936 }
6937
6938 return idx;
6939}
6940
6941static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6942{
6943 struct e1000_hw *hw = &adapter->hw;
6944 u32 bits, pf_id;
6945 int idx;
6946
6947 idx = igb_find_vlvf_entry(hw, vid);
6948 if (!idx)
6949 return;
6950
6951
6952
6953
6954 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6955 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6956 bits &= rd32(E1000_VLVF(idx));
6957
6958
6959 if (!bits) {
6960 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6961 wr32(E1000_VLVF(idx), BIT(pf_id));
6962 else
6963 wr32(E1000_VLVF(idx), 0);
6964 }
6965}
6966
6967static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6968 bool add, u32 vf)
6969{
6970 int pf_id = adapter->vfs_allocated_count;
6971 struct e1000_hw *hw = &adapter->hw;
6972 int err;
6973
6974
6975
6976
6977
6978
6979 if (add && test_bit(vid, adapter->active_vlans)) {
6980 err = igb_vfta_set(hw, vid, pf_id, true, false);
6981 if (err)
6982 return err;
6983 }
6984
6985 err = igb_vfta_set(hw, vid, vf, add, false);
6986
6987 if (add && !err)
6988 return err;
6989
6990
6991
6992
6993
6994 if (test_bit(vid, adapter->active_vlans) ||
6995 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6996 igb_update_pf_vlvf(adapter, vid);
6997
6998 return err;
6999}
7000
7001static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7002{
7003 struct e1000_hw *hw = &adapter->hw;
7004
7005 if (vid)
7006 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7007 else
7008 wr32(E1000_VMVIR(vf), 0);
7009}
7010
7011static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7012 u16 vlan, u8 qos)
7013{
7014 int err;
7015
7016 err = igb_set_vf_vlan(adapter, vlan, true, vf);
7017 if (err)
7018 return err;
7019
7020 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7021 igb_set_vmolr(adapter, vf, !vlan);
7022
7023
7024 if (vlan != adapter->vf_data[vf].pf_vlan)
7025 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7026 false, vf);
7027
7028 adapter->vf_data[vf].pf_vlan = vlan;
7029 adapter->vf_data[vf].pf_qos = qos;
7030 igb_set_vf_vlan_strip(adapter, vf, true);
7031 dev_info(&adapter->pdev->dev,
7032 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7033 if (test_bit(__IGB_DOWN, &adapter->state)) {
7034 dev_warn(&adapter->pdev->dev,
7035 "The VF VLAN has been set, but the PF device is not up.\n");
7036 dev_warn(&adapter->pdev->dev,
7037 "Bring the PF device up before attempting to use the VF device.\n");
7038 }
7039
7040 return err;
7041}
7042
7043static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7044{
7045
7046 igb_set_vf_vlan(adapter, 0, true, vf);
7047
7048 igb_set_vmvir(adapter, 0, vf);
7049 igb_set_vmolr(adapter, vf, true);
7050
7051
7052 if (adapter->vf_data[vf].pf_vlan)
7053 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7054 false, vf);
7055
7056 adapter->vf_data[vf].pf_vlan = 0;
7057 adapter->vf_data[vf].pf_qos = 0;
7058 igb_set_vf_vlan_strip(adapter, vf, false);
7059
7060 return 0;
7061}
7062
7063static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7064 u16 vlan, u8 qos, __be16 vlan_proto)
7065{
7066 struct igb_adapter *adapter = netdev_priv(netdev);
7067
7068 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7069 return -EINVAL;
7070
7071 if (vlan_proto != htons(ETH_P_8021Q))
7072 return -EPROTONOSUPPORT;
7073
7074 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7075 igb_disable_port_vlan(adapter, vf);
7076}
7077
7078static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7079{
7080 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
7081 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7082 int ret;
7083
7084 if (adapter->vf_data[vf].pf_vlan)
7085 return -1;
7086
7087
7088 if (!vid && !add)
7089 return 0;
7090
7091 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7092 if (!ret)
7093 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7094 return ret;
7095}
7096
7097static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7098{
7099 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7100
7101
7102 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7103 vf_data->last_nack = jiffies;
7104
7105
7106 igb_clear_vf_vfta(adapter, vf);
7107 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7108 igb_set_vmvir(adapter, vf_data->pf_vlan |
7109 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7110 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7111 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7112
7113
7114 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7115
7116
7117 igb_set_rx_mode(adapter->netdev);
7118}
7119
7120static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7121{
7122 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7123
7124
7125 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7126 eth_zero_addr(vf_mac);
7127
7128
7129 igb_vf_reset(adapter, vf);
7130}
7131
7132static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7133{
7134 struct e1000_hw *hw = &adapter->hw;
7135 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7136 u32 reg, msgbuf[3];
7137 u8 *addr = (u8 *)(&msgbuf[1]);
7138
7139
7140 igb_vf_reset(adapter, vf);
7141
7142
7143 igb_set_vf_mac(adapter, vf, vf_mac);
7144
7145
7146 reg = rd32(E1000_VFTE);
7147 wr32(E1000_VFTE, reg | BIT(vf));
7148 reg = rd32(E1000_VFRE);
7149 wr32(E1000_VFRE, reg | BIT(vf));
7150
7151 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7152
7153
7154 if (!is_zero_ether_addr(vf_mac)) {
7155 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7156 memcpy(addr, vf_mac, ETH_ALEN);
7157 } else {
7158 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7159 }
7160 igb_write_mbx(hw, msgbuf, 3, vf);
7161}
7162
7163static void igb_flush_mac_table(struct igb_adapter *adapter)
7164{
7165 struct e1000_hw *hw = &adapter->hw;
7166 int i;
7167
7168 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7169 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7170 eth_zero_addr(adapter->mac_table[i].addr);
7171 adapter->mac_table[i].queue = 0;
7172 igb_rar_set_index(adapter, i);
7173 }
7174}
7175
7176static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7177{
7178 struct e1000_hw *hw = &adapter->hw;
7179
7180 int rar_entries = hw->mac.rar_entry_count -
7181 adapter->vfs_allocated_count;
7182 int i, count = 0;
7183
7184 for (i = 0; i < rar_entries; i++) {
7185
7186 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7187 continue;
7188
7189
7190 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7191 (adapter->mac_table[i].queue != queue))
7192 continue;
7193
7194 count++;
7195 }
7196
7197 return count;
7198}
7199
7200
7201static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7202{
7203 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7204
7205 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7206 mac_table->queue = adapter->vfs_allocated_count;
7207 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7208
7209 igb_rar_set_index(adapter, 0);
7210}
7211
7212
7213
7214
7215
7216
7217static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7218 const u8 *addr, const u8 flags)
7219{
7220 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7221 return true;
7222
7223 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7224 (flags & IGB_MAC_STATE_SRC_ADDR))
7225 return false;
7226
7227 if (!ether_addr_equal(addr, entry->addr))
7228 return false;
7229
7230 return true;
7231}
7232
7233
7234
7235
7236
7237
7238static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7239 const u8 *addr, const u8 queue,
7240 const u8 flags)
7241{
7242 struct e1000_hw *hw = &adapter->hw;
7243 int rar_entries = hw->mac.rar_entry_count -
7244 adapter->vfs_allocated_count;
7245 int i;
7246
7247 if (is_zero_ether_addr(addr))
7248 return -EINVAL;
7249
7250
7251
7252
7253
7254 for (i = 0; i < rar_entries; i++) {
7255 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7256 addr, flags))
7257 continue;
7258
7259 ether_addr_copy(adapter->mac_table[i].addr, addr);
7260 adapter->mac_table[i].queue = queue;
7261 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7262
7263 igb_rar_set_index(adapter, i);
7264 return i;
7265 }
7266
7267 return -ENOSPC;
7268}
7269
7270static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7271 const u8 queue)
7272{
7273 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7274}
7275
7276
7277
7278
7279
7280
7281
7282static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7283 const u8 *addr, const u8 queue,
7284 const u8 flags)
7285{
7286 struct e1000_hw *hw = &adapter->hw;
7287 int rar_entries = hw->mac.rar_entry_count -
7288 adapter->vfs_allocated_count;
7289 int i;
7290
7291 if (is_zero_ether_addr(addr))
7292 return -EINVAL;
7293
7294
7295
7296
7297
7298 for (i = 0; i < rar_entries; i++) {
7299 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7300 continue;
7301 if ((adapter->mac_table[i].state & flags) != flags)
7302 continue;
7303 if (adapter->mac_table[i].queue != queue)
7304 continue;
7305 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7306 continue;
7307
7308
7309
7310
7311 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7312 adapter->mac_table[i].state =
7313 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7314 adapter->mac_table[i].queue =
7315 adapter->vfs_allocated_count;
7316 } else {
7317 adapter->mac_table[i].state = 0;
7318 adapter->mac_table[i].queue = 0;
7319 eth_zero_addr(adapter->mac_table[i].addr);
7320 }
7321
7322 igb_rar_set_index(adapter, i);
7323 return 0;
7324 }
7325
7326 return -ENOENT;
7327}
7328
7329static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7330 const u8 queue)
7331{
7332 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7333}
7334
7335int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7336 const u8 *addr, u8 queue, u8 flags)
7337{
7338 struct e1000_hw *hw = &adapter->hw;
7339
7340
7341
7342
7343 if (hw->mac.type != e1000_i210)
7344 return -EOPNOTSUPP;
7345
7346 return igb_add_mac_filter_flags(adapter, addr, queue,
7347 IGB_MAC_STATE_QUEUE_STEERING | flags);
7348}
7349
7350int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7351 const u8 *addr, u8 queue, u8 flags)
7352{
7353 return igb_del_mac_filter_flags(adapter, addr, queue,
7354 IGB_MAC_STATE_QUEUE_STEERING | flags);
7355}
7356
7357static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7358{
7359 struct igb_adapter *adapter = netdev_priv(netdev);
7360 int ret;
7361
7362 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7363
7364 return min_t(int, ret, 0);
7365}
7366
7367static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7368{
7369 struct igb_adapter *adapter = netdev_priv(netdev);
7370
7371 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7372
7373 return 0;
7374}
7375
7376static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7377 const u32 info, const u8 *addr)
7378{
7379 struct pci_dev *pdev = adapter->pdev;
7380 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7381 struct list_head *pos;
7382 struct vf_mac_filter *entry = NULL;
7383 int ret = 0;
7384
7385 switch (info) {
7386 case E1000_VF_MAC_FILTER_CLR:
7387
7388 list_for_each(pos, &adapter->vf_macs.l) {
7389 entry = list_entry(pos, struct vf_mac_filter, l);
7390 if (entry->vf == vf) {
7391 entry->vf = -1;
7392 entry->free = true;
7393 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7394 }
7395 }
7396 break;
7397 case E1000_VF_MAC_FILTER_ADD:
7398 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7399 !vf_data->trusted) {
7400 dev_warn(&pdev->dev,
7401 "VF %d requested MAC filter but is administratively denied\n",
7402 vf);
7403 return -EINVAL;
7404 }
7405 if (!is_valid_ether_addr(addr)) {
7406 dev_warn(&pdev->dev,
7407 "VF %d attempted to set invalid MAC filter\n",
7408 vf);
7409 return -EINVAL;
7410 }
7411
7412
7413 list_for_each(pos, &adapter->vf_macs.l) {
7414 entry = list_entry(pos, struct vf_mac_filter, l);
7415 if (entry->free)
7416 break;
7417 }
7418
7419 if (entry && entry->free) {
7420 entry->free = false;
7421 entry->vf = vf;
7422 ether_addr_copy(entry->vf_mac, addr);
7423
7424 ret = igb_add_mac_filter(adapter, addr, vf);
7425 ret = min_t(int, ret, 0);
7426 } else {
7427 ret = -ENOSPC;
7428 }
7429
7430 if (ret == -ENOSPC)
7431 dev_warn(&pdev->dev,
7432 "VF %d has requested MAC filter but there is no space for it\n",
7433 vf);
7434 break;
7435 default:
7436 ret = -EINVAL;
7437 break;
7438 }
7439
7440 return ret;
7441}
7442
7443static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7444{
7445 struct pci_dev *pdev = adapter->pdev;
7446 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7447 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7448
7449
7450
7451
7452 unsigned char *addr = (unsigned char *)&msg[1];
7453 int ret = 0;
7454
7455 if (!info) {
7456 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7457 !vf_data->trusted) {
7458 dev_warn(&pdev->dev,
7459 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7460 vf);
7461 return -EINVAL;
7462 }
7463
7464 if (!is_valid_ether_addr(addr)) {
7465 dev_warn(&pdev->dev,
7466 "VF %d attempted to set invalid MAC\n",
7467 vf);
7468 return -EINVAL;
7469 }
7470
7471 ret = igb_set_vf_mac(adapter, vf, addr);
7472 } else {
7473 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7474 }
7475
7476 return ret;
7477}
7478
7479static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7480{
7481 struct e1000_hw *hw = &adapter->hw;
7482 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7483 u32 msg = E1000_VT_MSGTYPE_NACK;
7484
7485
7486 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7487 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7488 igb_write_mbx(hw, &msg, 1, vf);
7489 vf_data->last_nack = jiffies;
7490 }
7491}
7492
7493static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7494{
7495 struct pci_dev *pdev = adapter->pdev;
7496 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7497 struct e1000_hw *hw = &adapter->hw;
7498 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7499 s32 retval;
7500
7501 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7502
7503 if (retval) {
7504
7505 dev_err(&pdev->dev, "Error receiving message from VF\n");
7506 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7507 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7508 goto unlock;
7509 goto out;
7510 }
7511
7512
7513 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7514 goto unlock;
7515
7516
7517
7518
7519 if (msgbuf[0] == E1000_VF_RESET) {
7520
7521 igb_vf_reset_msg(adapter, vf);
7522 return;
7523 }
7524
7525 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7526 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7527 goto unlock;
7528 retval = -1;
7529 goto out;
7530 }
7531
7532 switch ((msgbuf[0] & 0xFFFF)) {
7533 case E1000_VF_SET_MAC_ADDR:
7534 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7535 break;
7536 case E1000_VF_SET_PROMISC:
7537 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7538 break;
7539 case E1000_VF_SET_MULTICAST:
7540 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
7541 break;
7542 case E1000_VF_SET_LPE:
7543 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
7544 break;
7545 case E1000_VF_SET_VLAN:
7546 retval = -1;
7547 if (vf_data->pf_vlan)
7548 dev_warn(&pdev->dev,
7549 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
7550 vf);
7551 else
7552 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
7553 break;
7554 default:
7555 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
7556 retval = -1;
7557 break;
7558 }
7559
7560 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
7561out:
7562
7563 if (retval)
7564 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
7565 else
7566 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
7567
7568
7569 igb_write_mbx(hw, msgbuf, 1, vf);
7570 return;
7571
7572unlock:
7573 igb_unlock_mbx(hw, vf);
7574}
7575
7576static void igb_msg_task(struct igb_adapter *adapter)
7577{
7578 struct e1000_hw *hw = &adapter->hw;
7579 u32 vf;
7580
7581 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
7582
7583 if (!igb_check_for_rst(hw, vf))
7584 igb_vf_reset_event(adapter, vf);
7585
7586
7587 if (!igb_check_for_msg(hw, vf))
7588 igb_rcv_msg_from_vf(adapter, vf);
7589
7590
7591 if (!igb_check_for_ack(hw, vf))
7592 igb_rcv_ack_from_vf(adapter, vf);
7593 }
7594}
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607static void igb_set_uta(struct igb_adapter *adapter, bool set)
7608{
7609 struct e1000_hw *hw = &adapter->hw;
7610 u32 uta = set ? ~0 : 0;
7611 int i;
7612
7613
7614 if (!adapter->vfs_allocated_count)
7615 return;
7616
7617 for (i = hw->mac.uta_reg_count; i--;)
7618 array_wr32(E1000_UTA, i, uta);
7619}
7620
7621
7622
7623
7624
7625
7626static irqreturn_t igb_intr_msi(int irq, void *data)
7627{
7628 struct igb_adapter *adapter = data;
7629 struct igb_q_vector *q_vector = adapter->q_vector[0];
7630 struct e1000_hw *hw = &adapter->hw;
7631
7632 u32 icr = rd32(E1000_ICR);
7633
7634 igb_write_itr(q_vector);
7635
7636 if (icr & E1000_ICR_DRSTA)
7637 schedule_work(&adapter->reset_task);
7638
7639 if (icr & E1000_ICR_DOUTSYNC) {
7640
7641 adapter->stats.doosync++;
7642 }
7643
7644 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7645 hw->mac.get_link_status = 1;
7646 if (!test_bit(__IGB_DOWN, &adapter->state))
7647 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7648 }
7649
7650 if (icr & E1000_ICR_TS)
7651 igb_tsync_interrupt(adapter);
7652
7653 napi_schedule(&q_vector->napi);
7654
7655 return IRQ_HANDLED;
7656}
7657
7658
7659
7660
7661
7662
7663static irqreturn_t igb_intr(int irq, void *data)
7664{
7665 struct igb_adapter *adapter = data;
7666 struct igb_q_vector *q_vector = adapter->q_vector[0];
7667 struct e1000_hw *hw = &adapter->hw;
7668
7669
7670
7671 u32 icr = rd32(E1000_ICR);
7672
7673
7674
7675
7676 if (!(icr & E1000_ICR_INT_ASSERTED))
7677 return IRQ_NONE;
7678
7679 igb_write_itr(q_vector);
7680
7681 if (icr & E1000_ICR_DRSTA)
7682 schedule_work(&adapter->reset_task);
7683
7684 if (icr & E1000_ICR_DOUTSYNC) {
7685
7686 adapter->stats.doosync++;
7687 }
7688
7689 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
7690 hw->mac.get_link_status = 1;
7691
7692 if (!test_bit(__IGB_DOWN, &adapter->state))
7693 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7694 }
7695
7696 if (icr & E1000_ICR_TS)
7697 igb_tsync_interrupt(adapter);
7698
7699 napi_schedule(&q_vector->napi);
7700
7701 return IRQ_HANDLED;
7702}
7703
7704static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
7705{
7706 struct igb_adapter *adapter = q_vector->adapter;
7707 struct e1000_hw *hw = &adapter->hw;
7708
7709 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
7710 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
7711 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
7712 igb_set_itr(q_vector);
7713 else
7714 igb_update_ring_itr(q_vector);
7715 }
7716
7717 if (!test_bit(__IGB_DOWN, &adapter->state)) {
7718 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7719 wr32(E1000_EIMS, q_vector->eims_value);
7720 else
7721 igb_irq_enable(adapter);
7722 }
7723}
7724
7725
7726
7727
7728
7729
7730static int igb_poll(struct napi_struct *napi, int budget)
7731{
7732 struct igb_q_vector *q_vector = container_of(napi,
7733 struct igb_q_vector,
7734 napi);
7735 bool clean_complete = true;
7736 int work_done = 0;
7737
7738#ifdef CONFIG_IGB_DCA
7739 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
7740 igb_update_dca(q_vector);
7741#endif
7742 if (q_vector->tx.ring)
7743 clean_complete = igb_clean_tx_irq(q_vector, budget);
7744
7745 if (q_vector->rx.ring) {
7746 int cleaned = igb_clean_rx_irq(q_vector, budget);
7747
7748 work_done += cleaned;
7749 if (cleaned >= budget)
7750 clean_complete = false;
7751 }
7752
7753
7754 if (!clean_complete)
7755 return budget;
7756
7757
7758
7759
7760 if (likely(napi_complete_done(napi, work_done)))
7761 igb_ring_irq_enable(q_vector);
7762
7763 return min(work_done, budget - 1);
7764}
7765
7766
7767
7768
7769
7770
7771
7772
7773static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
7774{
7775 struct igb_adapter *adapter = q_vector->adapter;
7776 struct igb_ring *tx_ring = q_vector->tx.ring;
7777 struct igb_tx_buffer *tx_buffer;
7778 union e1000_adv_tx_desc *tx_desc;
7779 unsigned int total_bytes = 0, total_packets = 0;
7780 unsigned int budget = q_vector->tx.work_limit;
7781 unsigned int i = tx_ring->next_to_clean;
7782
7783 if (test_bit(__IGB_DOWN, &adapter->state))
7784 return true;
7785
7786 tx_buffer = &tx_ring->tx_buffer_info[i];
7787 tx_desc = IGB_TX_DESC(tx_ring, i);
7788 i -= tx_ring->count;
7789
7790 do {
7791 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
7792
7793
7794 if (!eop_desc)
7795 break;
7796
7797
7798 smp_rmb();
7799
7800
7801 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
7802 break;
7803
7804
7805 tx_buffer->next_to_watch = NULL;
7806
7807
7808 total_bytes += tx_buffer->bytecount;
7809 total_packets += tx_buffer->gso_segs;
7810
7811
7812 napi_consume_skb(tx_buffer->skb, napi_budget);
7813
7814
7815 dma_unmap_single(tx_ring->dev,
7816 dma_unmap_addr(tx_buffer, dma),
7817 dma_unmap_len(tx_buffer, len),
7818 DMA_TO_DEVICE);
7819
7820
7821 dma_unmap_len_set(tx_buffer, len, 0);
7822
7823
7824 while (tx_desc != eop_desc) {
7825 tx_buffer++;
7826 tx_desc++;
7827 i++;
7828 if (unlikely(!i)) {
7829 i -= tx_ring->count;
7830 tx_buffer = tx_ring->tx_buffer_info;
7831 tx_desc = IGB_TX_DESC(tx_ring, 0);
7832 }
7833
7834
7835 if (dma_unmap_len(tx_buffer, len)) {
7836 dma_unmap_page(tx_ring->dev,
7837 dma_unmap_addr(tx_buffer, dma),
7838 dma_unmap_len(tx_buffer, len),
7839 DMA_TO_DEVICE);
7840 dma_unmap_len_set(tx_buffer, len, 0);
7841 }
7842 }
7843
7844
7845 tx_buffer++;
7846 tx_desc++;
7847 i++;
7848 if (unlikely(!i)) {
7849 i -= tx_ring->count;
7850 tx_buffer = tx_ring->tx_buffer_info;
7851 tx_desc = IGB_TX_DESC(tx_ring, 0);
7852 }
7853
7854
7855 prefetch(tx_desc);
7856
7857
7858 budget--;
7859 } while (likely(budget));
7860
7861 netdev_tx_completed_queue(txring_txq(tx_ring),
7862 total_packets, total_bytes);
7863 i += tx_ring->count;
7864 tx_ring->next_to_clean = i;
7865 u64_stats_update_begin(&tx_ring->tx_syncp);
7866 tx_ring->tx_stats.bytes += total_bytes;
7867 tx_ring->tx_stats.packets += total_packets;
7868 u64_stats_update_end(&tx_ring->tx_syncp);
7869 q_vector->tx.total_bytes += total_bytes;
7870 q_vector->tx.total_packets += total_packets;
7871
7872 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
7873 struct e1000_hw *hw = &adapter->hw;
7874
7875
7876
7877
7878 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
7879 if (tx_buffer->next_to_watch &&
7880 time_after(jiffies, tx_buffer->time_stamp +
7881 (adapter->tx_timeout_factor * HZ)) &&
7882 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
7883
7884
7885 dev_err(tx_ring->dev,
7886 "Detected Tx Unit Hang\n"
7887 " Tx Queue <%d>\n"
7888 " TDH <%x>\n"
7889 " TDT <%x>\n"
7890 " next_to_use <%x>\n"
7891 " next_to_clean <%x>\n"
7892 "buffer_info[next_to_clean]\n"
7893 " time_stamp <%lx>\n"
7894 " next_to_watch <%p>\n"
7895 " jiffies <%lx>\n"
7896 " desc.status <%x>\n",
7897 tx_ring->queue_index,
7898 rd32(E1000_TDH(tx_ring->reg_idx)),
7899 readl(tx_ring->tail),
7900 tx_ring->next_to_use,
7901 tx_ring->next_to_clean,
7902 tx_buffer->time_stamp,
7903 tx_buffer->next_to_watch,
7904 jiffies,
7905 tx_buffer->next_to_watch->wb.status);
7906 netif_stop_subqueue(tx_ring->netdev,
7907 tx_ring->queue_index);
7908
7909
7910 return true;
7911 }
7912 }
7913
7914#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7915 if (unlikely(total_packets &&
7916 netif_carrier_ok(tx_ring->netdev) &&
7917 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
7918
7919
7920
7921 smp_mb();
7922 if (__netif_subqueue_stopped(tx_ring->netdev,
7923 tx_ring->queue_index) &&
7924 !(test_bit(__IGB_DOWN, &adapter->state))) {
7925 netif_wake_subqueue(tx_ring->netdev,
7926 tx_ring->queue_index);
7927
7928 u64_stats_update_begin(&tx_ring->tx_syncp);
7929 tx_ring->tx_stats.restart_queue++;
7930 u64_stats_update_end(&tx_ring->tx_syncp);
7931 }
7932 }
7933
7934 return !!budget;
7935}
7936
7937
7938
7939
7940
7941
7942
7943
7944static void igb_reuse_rx_page(struct igb_ring *rx_ring,
7945 struct igb_rx_buffer *old_buff)
7946{
7947 struct igb_rx_buffer *new_buff;
7948 u16 nta = rx_ring->next_to_alloc;
7949
7950 new_buff = &rx_ring->rx_buffer_info[nta];
7951
7952
7953 nta++;
7954 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
7955
7956
7957
7958
7959
7960 new_buff->dma = old_buff->dma;
7961 new_buff->page = old_buff->page;
7962 new_buff->page_offset = old_buff->page_offset;
7963 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
7964}
7965
7966static inline bool igb_page_is_reserved(struct page *page)
7967{
7968 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
7969}
7970
7971static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
7972{
7973 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
7974 struct page *page = rx_buffer->page;
7975
7976
7977 if (unlikely(igb_page_is_reserved(page)))
7978 return false;
7979
7980#if (PAGE_SIZE < 8192)
7981
7982 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
7983 return false;
7984#else
7985#define IGB_LAST_OFFSET \
7986 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
7987
7988 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
7989 return false;
7990#endif
7991
7992
7993
7994
7995
7996 if (unlikely(!pagecnt_bias)) {
7997 page_ref_add(page, USHRT_MAX);
7998 rx_buffer->pagecnt_bias = USHRT_MAX;
7999 }
8000
8001 return true;
8002}
8003
8004
8005
8006
8007
8008
8009
8010
8011
8012
8013static void igb_add_rx_frag(struct igb_ring *rx_ring,
8014 struct igb_rx_buffer *rx_buffer,
8015 struct sk_buff *skb,
8016 unsigned int size)
8017{
8018#if (PAGE_SIZE < 8192)
8019 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8020#else
8021 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8022 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8023 SKB_DATA_ALIGN(size);
8024#endif
8025 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8026 rx_buffer->page_offset, size, truesize);
8027#if (PAGE_SIZE < 8192)
8028 rx_buffer->page_offset ^= truesize;
8029#else
8030 rx_buffer->page_offset += truesize;
8031#endif
8032}
8033
8034static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8035 struct igb_rx_buffer *rx_buffer,
8036 union e1000_adv_rx_desc *rx_desc,
8037 unsigned int size)
8038{
8039 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8040#if (PAGE_SIZE < 8192)
8041 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8042#else
8043 unsigned int truesize = SKB_DATA_ALIGN(size);
8044#endif
8045 unsigned int headlen;
8046 struct sk_buff *skb;
8047
8048
8049 prefetch(va);
8050#if L1_CACHE_BYTES < 128
8051 prefetch(va + L1_CACHE_BYTES);
8052#endif
8053
8054
8055 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8056 if (unlikely(!skb))
8057 return NULL;
8058
8059 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
8060 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
8061 va += IGB_TS_HDR_LEN;
8062 size -= IGB_TS_HDR_LEN;
8063 }
8064
8065
8066 headlen = size;
8067 if (headlen > IGB_RX_HDR_LEN)
8068 headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
8069
8070
8071 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
8072
8073
8074 size -= headlen;
8075 if (size) {
8076 skb_add_rx_frag(skb, 0, rx_buffer->page,
8077 (va + headlen) - page_address(rx_buffer->page),
8078 size, truesize);
8079#if (PAGE_SIZE < 8192)
8080 rx_buffer->page_offset ^= truesize;
8081#else
8082 rx_buffer->page_offset += truesize;
8083#endif
8084 } else {
8085 rx_buffer->pagecnt_bias++;
8086 }
8087
8088 return skb;
8089}
8090
8091static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8092 struct igb_rx_buffer *rx_buffer,
8093 union e1000_adv_rx_desc *rx_desc,
8094 unsigned int size)
8095{
8096 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8097#if (PAGE_SIZE < 8192)
8098 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8099#else
8100 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8101 SKB_DATA_ALIGN(IGB_SKB_PAD + size);
8102#endif
8103 struct sk_buff *skb;
8104
8105
8106 prefetch(va);
8107#if L1_CACHE_BYTES < 128
8108 prefetch(va + L1_CACHE_BYTES);
8109#endif
8110
8111
8112 skb = build_skb(va - IGB_SKB_PAD, truesize);
8113 if (unlikely(!skb))
8114 return NULL;
8115
8116
8117 skb_reserve(skb, IGB_SKB_PAD);
8118 __skb_put(skb, size);
8119
8120
8121 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8122 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
8123 __skb_pull(skb, IGB_TS_HDR_LEN);
8124 }
8125
8126
8127#if (PAGE_SIZE < 8192)
8128 rx_buffer->page_offset ^= truesize;
8129#else
8130 rx_buffer->page_offset += truesize;
8131#endif
8132
8133 return skb;
8134}
8135
8136static inline void igb_rx_checksum(struct igb_ring *ring,
8137 union e1000_adv_rx_desc *rx_desc,
8138 struct sk_buff *skb)
8139{
8140 skb_checksum_none_assert(skb);
8141
8142
8143 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8144 return;
8145
8146
8147 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8148 return;
8149
8150
8151 if (igb_test_staterr(rx_desc,
8152 E1000_RXDEXT_STATERR_TCPE |
8153 E1000_RXDEXT_STATERR_IPE)) {
8154
8155
8156
8157
8158 if (!((skb->len == 60) &&
8159 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8160 u64_stats_update_begin(&ring->rx_syncp);
8161 ring->rx_stats.csum_err++;
8162 u64_stats_update_end(&ring->rx_syncp);
8163 }
8164
8165 return;
8166 }
8167
8168 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8169 E1000_RXD_STAT_UDPCS))
8170 skb->ip_summed = CHECKSUM_UNNECESSARY;
8171
8172 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8173 le32_to_cpu(rx_desc->wb.upper.status_error));
8174}
8175
8176static inline void igb_rx_hash(struct igb_ring *ring,
8177 union e1000_adv_rx_desc *rx_desc,
8178 struct sk_buff *skb)
8179{
8180 if (ring->netdev->features & NETIF_F_RXHASH)
8181 skb_set_hash(skb,
8182 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8183 PKT_HASH_TYPE_L3);
8184}
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197static bool igb_is_non_eop(struct igb_ring *rx_ring,
8198 union e1000_adv_rx_desc *rx_desc)
8199{
8200 u32 ntc = rx_ring->next_to_clean + 1;
8201
8202
8203 ntc = (ntc < rx_ring->count) ? ntc : 0;
8204 rx_ring->next_to_clean = ntc;
8205
8206 prefetch(IGB_RX_DESC(rx_ring, ntc));
8207
8208 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8209 return false;
8210
8211 return true;
8212}
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8229 union e1000_adv_rx_desc *rx_desc,
8230 struct sk_buff *skb)
8231{
8232 if (unlikely((igb_test_staterr(rx_desc,
8233 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8234 struct net_device *netdev = rx_ring->netdev;
8235 if (!(netdev->features & NETIF_F_RXALL)) {
8236 dev_kfree_skb_any(skb);
8237 return true;
8238 }
8239 }
8240
8241
8242 if (eth_skb_pad(skb))
8243 return true;
8244
8245 return false;
8246}
8247
8248
8249
8250
8251
8252
8253
8254
8255
8256
8257
8258static void igb_process_skb_fields(struct igb_ring *rx_ring,
8259 union e1000_adv_rx_desc *rx_desc,
8260 struct sk_buff *skb)
8261{
8262 struct net_device *dev = rx_ring->netdev;
8263
8264 igb_rx_hash(rx_ring, rx_desc, skb);
8265
8266 igb_rx_checksum(rx_ring, rx_desc, skb);
8267
8268 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8269 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8270 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8271
8272 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8273 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8274 u16 vid;
8275
8276 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8277 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8278 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
8279 else
8280 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8281
8282 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8283 }
8284
8285 skb_record_rx_queue(skb, rx_ring->queue_index);
8286
8287 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8288}
8289
8290static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8291 const unsigned int size)
8292{
8293 struct igb_rx_buffer *rx_buffer;
8294
8295 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8296 prefetchw(rx_buffer->page);
8297
8298
8299 dma_sync_single_range_for_cpu(rx_ring->dev,
8300 rx_buffer->dma,
8301 rx_buffer->page_offset,
8302 size,
8303 DMA_FROM_DEVICE);
8304
8305 rx_buffer->pagecnt_bias--;
8306
8307 return rx_buffer;
8308}
8309
8310static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8311 struct igb_rx_buffer *rx_buffer)
8312{
8313 if (igb_can_reuse_rx_page(rx_buffer)) {
8314
8315 igb_reuse_rx_page(rx_ring, rx_buffer);
8316 } else {
8317
8318
8319
8320 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8321 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8322 IGB_RX_DMA_ATTR);
8323 __page_frag_cache_drain(rx_buffer->page,
8324 rx_buffer->pagecnt_bias);
8325 }
8326
8327
8328 rx_buffer->page = NULL;
8329}
8330
8331static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8332{
8333 struct igb_ring *rx_ring = q_vector->rx.ring;
8334 struct sk_buff *skb = rx_ring->skb;
8335 unsigned int total_bytes = 0, total_packets = 0;
8336 u16 cleaned_count = igb_desc_unused(rx_ring);
8337
8338 while (likely(total_packets < budget)) {
8339 union e1000_adv_rx_desc *rx_desc;
8340 struct igb_rx_buffer *rx_buffer;
8341 unsigned int size;
8342
8343
8344 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8345 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8346 cleaned_count = 0;
8347 }
8348
8349 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8350 size = le16_to_cpu(rx_desc->wb.upper.length);
8351 if (!size)
8352 break;
8353
8354
8355
8356
8357
8358 dma_rmb();
8359
8360 rx_buffer = igb_get_rx_buffer(rx_ring, size);
8361
8362
8363 if (skb)
8364 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8365 else if (ring_uses_build_skb(rx_ring))
8366 skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
8367 else
8368 skb = igb_construct_skb(rx_ring, rx_buffer,
8369 rx_desc, size);
8370
8371
8372 if (!skb) {
8373 rx_ring->rx_stats.alloc_failed++;
8374 rx_buffer->pagecnt_bias++;
8375 break;
8376 }
8377
8378 igb_put_rx_buffer(rx_ring, rx_buffer);
8379 cleaned_count++;
8380
8381
8382 if (igb_is_non_eop(rx_ring, rx_desc))
8383 continue;
8384
8385
8386 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8387 skb = NULL;
8388 continue;
8389 }
8390
8391
8392 total_bytes += skb->len;
8393
8394
8395 igb_process_skb_fields(rx_ring, rx_desc, skb);
8396
8397 napi_gro_receive(&q_vector->napi, skb);
8398
8399
8400 skb = NULL;
8401
8402
8403 total_packets++;
8404 }
8405
8406
8407 rx_ring->skb = skb;
8408
8409 u64_stats_update_begin(&rx_ring->rx_syncp);
8410 rx_ring->rx_stats.packets += total_packets;
8411 rx_ring->rx_stats.bytes += total_bytes;
8412 u64_stats_update_end(&rx_ring->rx_syncp);
8413 q_vector->rx.total_packets += total_packets;
8414 q_vector->rx.total_bytes += total_bytes;
8415
8416 if (cleaned_count)
8417 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8418
8419 return total_packets;
8420}
8421
8422static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8423{
8424 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8425}
8426
8427static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
8428 struct igb_rx_buffer *bi)
8429{
8430 struct page *page = bi->page;
8431 dma_addr_t dma;
8432
8433
8434 if (likely(page))
8435 return true;
8436
8437
8438 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
8439 if (unlikely(!page)) {
8440 rx_ring->rx_stats.alloc_failed++;
8441 return false;
8442 }
8443
8444
8445 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
8446 igb_rx_pg_size(rx_ring),
8447 DMA_FROM_DEVICE,
8448 IGB_RX_DMA_ATTR);
8449
8450
8451
8452
8453 if (dma_mapping_error(rx_ring->dev, dma)) {
8454 __free_pages(page, igb_rx_pg_order(rx_ring));
8455
8456 rx_ring->rx_stats.alloc_failed++;
8457 return false;
8458 }
8459
8460 bi->dma = dma;
8461 bi->page = page;
8462 bi->page_offset = igb_rx_offset(rx_ring);
8463 bi->pagecnt_bias = 1;
8464
8465 return true;
8466}
8467
8468
8469
8470
8471
8472void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
8473{
8474 union e1000_adv_rx_desc *rx_desc;
8475 struct igb_rx_buffer *bi;
8476 u16 i = rx_ring->next_to_use;
8477 u16 bufsz;
8478
8479
8480 if (!cleaned_count)
8481 return;
8482
8483 rx_desc = IGB_RX_DESC(rx_ring, i);
8484 bi = &rx_ring->rx_buffer_info[i];
8485 i -= rx_ring->count;
8486
8487 bufsz = igb_rx_bufsz(rx_ring);
8488
8489 do {
8490 if (!igb_alloc_mapped_page(rx_ring, bi))
8491 break;
8492
8493
8494 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
8495 bi->page_offset, bufsz,
8496 DMA_FROM_DEVICE);
8497
8498
8499
8500
8501 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
8502
8503 rx_desc++;
8504 bi++;
8505 i++;
8506 if (unlikely(!i)) {
8507 rx_desc = IGB_RX_DESC(rx_ring, 0);
8508 bi = rx_ring->rx_buffer_info;
8509 i -= rx_ring->count;
8510 }
8511
8512
8513 rx_desc->wb.upper.length = 0;
8514
8515 cleaned_count--;
8516 } while (cleaned_count);
8517
8518 i += rx_ring->count;
8519
8520 if (rx_ring->next_to_use != i) {
8521
8522 rx_ring->next_to_use = i;
8523
8524
8525 rx_ring->next_to_alloc = i;
8526
8527
8528
8529
8530
8531
8532 dma_wmb();
8533 writel(i, rx_ring->tail);
8534 }
8535}
8536
8537
8538
8539
8540
8541
8542
8543static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8544{
8545 struct igb_adapter *adapter = netdev_priv(netdev);
8546 struct mii_ioctl_data *data = if_mii(ifr);
8547
8548 if (adapter->hw.phy.media_type != e1000_media_type_copper)
8549 return -EOPNOTSUPP;
8550
8551 switch (cmd) {
8552 case SIOCGMIIPHY:
8553 data->phy_id = adapter->hw.phy.addr;
8554 break;
8555 case SIOCGMIIREG:
8556 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8557 &data->val_out))
8558 return -EIO;
8559 break;
8560 case SIOCSMIIREG:
8561 default:
8562 return -EOPNOTSUPP;
8563 }
8564 return 0;
8565}
8566
8567
8568
8569
8570
8571
8572
8573static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8574{
8575 switch (cmd) {
8576 case SIOCGMIIPHY:
8577 case SIOCGMIIREG:
8578 case SIOCSMIIREG:
8579 return igb_mii_ioctl(netdev, ifr, cmd);
8580 case SIOCGHWTSTAMP:
8581 return igb_ptp_get_ts_config(netdev, ifr);
8582 case SIOCSHWTSTAMP:
8583 return igb_ptp_set_ts_config(netdev, ifr);
8584 default:
8585 return -EOPNOTSUPP;
8586 }
8587}
8588
8589void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8590{
8591 struct igb_adapter *adapter = hw->back;
8592
8593 pci_read_config_word(adapter->pdev, reg, value);
8594}
8595
8596void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
8597{
8598 struct igb_adapter *adapter = hw->back;
8599
8600 pci_write_config_word(adapter->pdev, reg, *value);
8601}
8602
8603s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8604{
8605 struct igb_adapter *adapter = hw->back;
8606
8607 if (pcie_capability_read_word(adapter->pdev, reg, value))
8608 return -E1000_ERR_CONFIG;
8609
8610 return 0;
8611}
8612
8613s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
8614{
8615 struct igb_adapter *adapter = hw->back;
8616
8617 if (pcie_capability_write_word(adapter->pdev, reg, *value))
8618 return -E1000_ERR_CONFIG;
8619
8620 return 0;
8621}
8622
8623static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
8624{
8625 struct igb_adapter *adapter = netdev_priv(netdev);
8626 struct e1000_hw *hw = &adapter->hw;
8627 u32 ctrl, rctl;
8628 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
8629
8630 if (enable) {
8631
8632 ctrl = rd32(E1000_CTRL);
8633 ctrl |= E1000_CTRL_VME;
8634 wr32(E1000_CTRL, ctrl);
8635
8636
8637 rctl = rd32(E1000_RCTL);
8638 rctl &= ~E1000_RCTL_CFIEN;
8639 wr32(E1000_RCTL, rctl);
8640 } else {
8641
8642 ctrl = rd32(E1000_CTRL);
8643 ctrl &= ~E1000_CTRL_VME;
8644 wr32(E1000_CTRL, ctrl);
8645 }
8646
8647 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
8648}
8649
8650static int igb_vlan_rx_add_vid(struct net_device *netdev,
8651 __be16 proto, u16 vid)
8652{
8653 struct igb_adapter *adapter = netdev_priv(netdev);
8654 struct e1000_hw *hw = &adapter->hw;
8655 int pf_id = adapter->vfs_allocated_count;
8656
8657
8658 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8659 igb_vfta_set(hw, vid, pf_id, true, !!vid);
8660
8661 set_bit(vid, adapter->active_vlans);
8662
8663 return 0;
8664}
8665
8666static int igb_vlan_rx_kill_vid(struct net_device *netdev,
8667 __be16 proto, u16 vid)
8668{
8669 struct igb_adapter *adapter = netdev_priv(netdev);
8670 int pf_id = adapter->vfs_allocated_count;
8671 struct e1000_hw *hw = &adapter->hw;
8672
8673
8674 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
8675 igb_vfta_set(hw, vid, pf_id, false, true);
8676
8677 clear_bit(vid, adapter->active_vlans);
8678
8679 return 0;
8680}
8681
8682static void igb_restore_vlan(struct igb_adapter *adapter)
8683{
8684 u16 vid = 1;
8685
8686 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
8687 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
8688
8689 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
8690 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
8691}
8692
8693int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
8694{
8695 struct pci_dev *pdev = adapter->pdev;
8696 struct e1000_mac_info *mac = &adapter->hw.mac;
8697
8698 mac->autoneg = 0;
8699
8700
8701
8702
8703 if ((spd & 1) || (dplx & ~1))
8704 goto err_inval;
8705
8706
8707
8708
8709 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
8710 switch (spd + dplx) {
8711 case SPEED_10 + DUPLEX_HALF:
8712 case SPEED_10 + DUPLEX_FULL:
8713 case SPEED_100 + DUPLEX_HALF:
8714 goto err_inval;
8715 default:
8716 break;
8717 }
8718 }
8719
8720 switch (spd + dplx) {
8721 case SPEED_10 + DUPLEX_HALF:
8722 mac->forced_speed_duplex = ADVERTISE_10_HALF;
8723 break;
8724 case SPEED_10 + DUPLEX_FULL:
8725 mac->forced_speed_duplex = ADVERTISE_10_FULL;
8726 break;
8727 case SPEED_100 + DUPLEX_HALF:
8728 mac->forced_speed_duplex = ADVERTISE_100_HALF;
8729 break;
8730 case SPEED_100 + DUPLEX_FULL:
8731 mac->forced_speed_duplex = ADVERTISE_100_FULL;
8732 break;
8733 case SPEED_1000 + DUPLEX_FULL:
8734 mac->autoneg = 1;
8735 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
8736 break;
8737 case SPEED_1000 + DUPLEX_HALF:
8738 default:
8739 goto err_inval;
8740 }
8741
8742
8743 adapter->hw.phy.mdix = AUTO_ALL_MODES;
8744
8745 return 0;
8746
8747err_inval:
8748 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
8749 return -EINVAL;
8750}
8751
8752static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8753 bool runtime)
8754{
8755 struct net_device *netdev = pci_get_drvdata(pdev);
8756 struct igb_adapter *adapter = netdev_priv(netdev);
8757 struct e1000_hw *hw = &adapter->hw;
8758 u32 ctrl, rctl, status;
8759 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8760 bool wake;
8761
8762 rtnl_lock();
8763 netif_device_detach(netdev);
8764
8765 if (netif_running(netdev))
8766 __igb_close(netdev, true);
8767
8768 igb_ptp_suspend(adapter);
8769
8770 igb_clear_interrupt_scheme(adapter);
8771 rtnl_unlock();
8772
8773 status = rd32(E1000_STATUS);
8774 if (status & E1000_STATUS_LU)
8775 wufc &= ~E1000_WUFC_LNKC;
8776
8777 if (wufc) {
8778 igb_setup_rctl(adapter);
8779 igb_set_rx_mode(netdev);
8780
8781
8782 if (wufc & E1000_WUFC_MC) {
8783 rctl = rd32(E1000_RCTL);
8784 rctl |= E1000_RCTL_MPE;
8785 wr32(E1000_RCTL, rctl);
8786 }
8787
8788 ctrl = rd32(E1000_CTRL);
8789 ctrl |= E1000_CTRL_ADVD3WUC;
8790 wr32(E1000_CTRL, ctrl);
8791
8792
8793 igb_disable_pcie_master(hw);
8794
8795 wr32(E1000_WUC, E1000_WUC_PME_EN);
8796 wr32(E1000_WUFC, wufc);
8797 } else {
8798 wr32(E1000_WUC, 0);
8799 wr32(E1000_WUFC, 0);
8800 }
8801
8802 wake = wufc || adapter->en_mng_pt;
8803 if (!wake)
8804 igb_power_down_link(adapter);
8805 else
8806 igb_power_up_link(adapter);
8807
8808 if (enable_wake)
8809 *enable_wake = wake;
8810
8811
8812
8813
8814 igb_release_hw_control(adapter);
8815
8816 pci_disable_device(pdev);
8817
8818 return 0;
8819}
8820
8821static void igb_deliver_wake_packet(struct net_device *netdev)
8822{
8823 struct igb_adapter *adapter = netdev_priv(netdev);
8824 struct e1000_hw *hw = &adapter->hw;
8825 struct sk_buff *skb;
8826 u32 wupl;
8827
8828 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
8829
8830
8831
8832
8833 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
8834 return;
8835
8836 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
8837 if (!skb)
8838 return;
8839
8840 skb_put(skb, wupl);
8841
8842
8843 wupl = roundup(wupl, 4);
8844
8845 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
8846
8847 skb->protocol = eth_type_trans(skb, netdev);
8848 netif_rx(skb);
8849}
8850
8851static int __maybe_unused igb_suspend(struct device *dev)
8852{
8853 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8854}
8855
8856static int __maybe_unused igb_resume(struct device *dev)
8857{
8858 struct pci_dev *pdev = to_pci_dev(dev);
8859 struct net_device *netdev = pci_get_drvdata(pdev);
8860 struct igb_adapter *adapter = netdev_priv(netdev);
8861 struct e1000_hw *hw = &adapter->hw;
8862 u32 err, val;
8863
8864 pci_set_power_state(pdev, PCI_D0);
8865 pci_restore_state(pdev);
8866 pci_save_state(pdev);
8867
8868 if (!pci_device_is_present(pdev))
8869 return -ENODEV;
8870 err = pci_enable_device_mem(pdev);
8871 if (err) {
8872 dev_err(&pdev->dev,
8873 "igb: Cannot enable PCI device from suspend\n");
8874 return err;
8875 }
8876 pci_set_master(pdev);
8877
8878 pci_enable_wake(pdev, PCI_D3hot, 0);
8879 pci_enable_wake(pdev, PCI_D3cold, 0);
8880
8881 if (igb_init_interrupt_scheme(adapter, true)) {
8882 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8883 return -ENOMEM;
8884 }
8885
8886 igb_reset(adapter);
8887
8888
8889
8890
8891 igb_get_hw_control(adapter);
8892
8893 val = rd32(E1000_WUS);
8894 if (val & WAKE_PKT_WUS)
8895 igb_deliver_wake_packet(netdev);
8896
8897 wr32(E1000_WUS, ~0);
8898
8899 rtnl_lock();
8900 if (!err && netif_running(netdev))
8901 err = __igb_open(netdev, true);
8902
8903 if (!err)
8904 netif_device_attach(netdev);
8905 rtnl_unlock();
8906
8907 return err;
8908}
8909
8910static int __maybe_unused igb_runtime_idle(struct device *dev)
8911{
8912 struct net_device *netdev = dev_get_drvdata(dev);
8913 struct igb_adapter *adapter = netdev_priv(netdev);
8914
8915 if (!igb_has_link(adapter))
8916 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
8917
8918 return -EBUSY;
8919}
8920
8921static int __maybe_unused igb_runtime_suspend(struct device *dev)
8922{
8923 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8924}
8925
8926static int __maybe_unused igb_runtime_resume(struct device *dev)
8927{
8928 return igb_resume(dev);
8929}
8930
8931static void igb_shutdown(struct pci_dev *pdev)
8932{
8933 bool wake;
8934
8935 __igb_shutdown(pdev, &wake, 0);
8936
8937 if (system_state == SYSTEM_POWER_OFF) {
8938 pci_wake_from_d3(pdev, wake);
8939 pci_set_power_state(pdev, PCI_D3hot);
8940 }
8941}
8942
8943#ifdef CONFIG_PCI_IOV
8944static int igb_sriov_reinit(struct pci_dev *dev)
8945{
8946 struct net_device *netdev = pci_get_drvdata(dev);
8947 struct igb_adapter *adapter = netdev_priv(netdev);
8948 struct pci_dev *pdev = adapter->pdev;
8949
8950 rtnl_lock();
8951
8952 if (netif_running(netdev))
8953 igb_close(netdev);
8954 else
8955 igb_reset(adapter);
8956
8957 igb_clear_interrupt_scheme(adapter);
8958
8959 igb_init_queue_configuration(adapter);
8960
8961 if (igb_init_interrupt_scheme(adapter, true)) {
8962 rtnl_unlock();
8963 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8964 return -ENOMEM;
8965 }
8966
8967 if (netif_running(netdev))
8968 igb_open(netdev);
8969
8970 rtnl_unlock();
8971
8972 return 0;
8973}
8974
8975static int igb_pci_disable_sriov(struct pci_dev *dev)
8976{
8977 int err = igb_disable_sriov(dev);
8978
8979 if (!err)
8980 err = igb_sriov_reinit(dev);
8981
8982 return err;
8983}
8984
8985static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
8986{
8987 int err = igb_enable_sriov(dev, num_vfs);
8988
8989 if (err)
8990 goto out;
8991
8992 err = igb_sriov_reinit(dev);
8993 if (!err)
8994 return num_vfs;
8995
8996out:
8997 return err;
8998}
8999
9000#endif
9001static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9002{
9003#ifdef CONFIG_PCI_IOV
9004 if (num_vfs == 0)
9005 return igb_pci_disable_sriov(dev);
9006 else
9007 return igb_pci_enable_sriov(dev, num_vfs);
9008#endif
9009 return 0;
9010}
9011
9012
9013
9014
9015
9016
9017
9018
9019
9020static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9021 pci_channel_state_t state)
9022{
9023 struct net_device *netdev = pci_get_drvdata(pdev);
9024 struct igb_adapter *adapter = netdev_priv(netdev);
9025
9026 netif_device_detach(netdev);
9027
9028 if (state == pci_channel_io_perm_failure)
9029 return PCI_ERS_RESULT_DISCONNECT;
9030
9031 if (netif_running(netdev))
9032 igb_down(adapter);
9033 pci_disable_device(pdev);
9034
9035
9036 return PCI_ERS_RESULT_NEED_RESET;
9037}
9038
9039
9040
9041
9042
9043
9044
9045
9046static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9047{
9048 struct net_device *netdev = pci_get_drvdata(pdev);
9049 struct igb_adapter *adapter = netdev_priv(netdev);
9050 struct e1000_hw *hw = &adapter->hw;
9051 pci_ers_result_t result;
9052
9053 if (pci_enable_device_mem(pdev)) {
9054 dev_err(&pdev->dev,
9055 "Cannot re-enable PCI device after reset.\n");
9056 result = PCI_ERS_RESULT_DISCONNECT;
9057 } else {
9058 pci_set_master(pdev);
9059 pci_restore_state(pdev);
9060 pci_save_state(pdev);
9061
9062 pci_enable_wake(pdev, PCI_D3hot, 0);
9063 pci_enable_wake(pdev, PCI_D3cold, 0);
9064
9065
9066
9067
9068 hw->hw_addr = adapter->io_addr;
9069
9070 igb_reset(adapter);
9071 wr32(E1000_WUS, ~0);
9072 result = PCI_ERS_RESULT_RECOVERED;
9073 }
9074
9075 return result;
9076}
9077
9078
9079
9080
9081
9082
9083
9084
9085
9086static void igb_io_resume(struct pci_dev *pdev)
9087{
9088 struct net_device *netdev = pci_get_drvdata(pdev);
9089 struct igb_adapter *adapter = netdev_priv(netdev);
9090
9091 if (netif_running(netdev)) {
9092 if (igb_up(adapter)) {
9093 dev_err(&pdev->dev, "igb_up failed after reset\n");
9094 return;
9095 }
9096 }
9097
9098 netif_device_attach(netdev);
9099
9100
9101
9102
9103 igb_get_hw_control(adapter);
9104}
9105
9106
9107
9108
9109
9110
9111static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9112{
9113 struct e1000_hw *hw = &adapter->hw;
9114 u32 rar_low, rar_high;
9115 u8 *addr = adapter->mac_table[index].addr;
9116
9117
9118
9119
9120
9121
9122 rar_low = le32_to_cpup((__le32 *)(addr));
9123 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9124
9125
9126 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9127 if (is_valid_ether_addr(addr))
9128 rar_high |= E1000_RAH_AV;
9129
9130 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9131 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9132
9133 switch (hw->mac.type) {
9134 case e1000_82575:
9135 case e1000_i210:
9136 if (adapter->mac_table[index].state &
9137 IGB_MAC_STATE_QUEUE_STEERING)
9138 rar_high |= E1000_RAH_QSEL_ENABLE;
9139
9140 rar_high |= E1000_RAH_POOL_1 *
9141 adapter->mac_table[index].queue;
9142 break;
9143 default:
9144 rar_high |= E1000_RAH_POOL_1 <<
9145 adapter->mac_table[index].queue;
9146 break;
9147 }
9148 }
9149
9150 wr32(E1000_RAL(index), rar_low);
9151 wrfl();
9152 wr32(E1000_RAH(index), rar_high);
9153 wrfl();
9154}
9155
9156static int igb_set_vf_mac(struct igb_adapter *adapter,
9157 int vf, unsigned char *mac_addr)
9158{
9159 struct e1000_hw *hw = &adapter->hw;
9160
9161
9162
9163 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9164 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9165
9166 ether_addr_copy(vf_mac_addr, mac_addr);
9167 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9168 adapter->mac_table[rar_entry].queue = vf;
9169 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9170 igb_rar_set_index(adapter, rar_entry);
9171
9172 return 0;
9173}
9174
9175static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9176{
9177 struct igb_adapter *adapter = netdev_priv(netdev);
9178
9179 if (vf >= adapter->vfs_allocated_count)
9180 return -EINVAL;
9181
9182
9183
9184
9185
9186
9187
9188 if (is_zero_ether_addr(mac)) {
9189 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9190 dev_info(&adapter->pdev->dev,
9191 "remove administratively set MAC on VF %d\n",
9192 vf);
9193 } else if (is_valid_ether_addr(mac)) {
9194 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9195 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9196 mac, vf);
9197 dev_info(&adapter->pdev->dev,
9198 "Reload the VF driver to make this change effective.");
9199
9200 if (test_bit(__IGB_DOWN, &adapter->state)) {
9201 dev_warn(&adapter->pdev->dev,
9202 "The VF MAC address has been set, but the PF device is not up.\n");
9203 dev_warn(&adapter->pdev->dev,
9204 "Bring the PF device up before attempting to use the VF device.\n");
9205 }
9206 } else {
9207 return -EINVAL;
9208 }
9209 return igb_set_vf_mac(adapter, vf, mac);
9210}
9211
9212static int igb_link_mbps(int internal_link_speed)
9213{
9214 switch (internal_link_speed) {
9215 case SPEED_100:
9216 return 100;
9217 case SPEED_1000:
9218 return 1000;
9219 default:
9220 return 0;
9221 }
9222}
9223
9224static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9225 int link_speed)
9226{
9227 int rf_dec, rf_int;
9228 u32 bcnrc_val;
9229
9230 if (tx_rate != 0) {
9231
9232 rf_int = link_speed / tx_rate;
9233 rf_dec = (link_speed - (rf_int * tx_rate));
9234 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9235 tx_rate;
9236
9237 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9238 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
9239 E1000_RTTBCNRC_RF_INT_MASK);
9240 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9241 } else {
9242 bcnrc_val = 0;
9243 }
9244
9245 wr32(E1000_RTTDQSEL, vf);
9246
9247
9248
9249 wr32(E1000_RTTBCNRM, 0x14);
9250 wr32(E1000_RTTBCNRC, bcnrc_val);
9251}
9252
9253static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9254{
9255 int actual_link_speed, i;
9256 bool reset_rate = false;
9257
9258
9259 if ((adapter->vf_rate_link_speed == 0) ||
9260 (adapter->hw.mac.type != e1000_82576))
9261 return;
9262
9263 actual_link_speed = igb_link_mbps(adapter->link_speed);
9264 if (actual_link_speed != adapter->vf_rate_link_speed) {
9265 reset_rate = true;
9266 adapter->vf_rate_link_speed = 0;
9267 dev_info(&adapter->pdev->dev,
9268 "Link speed has been changed. VF Transmit rate is disabled\n");
9269 }
9270
9271 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9272 if (reset_rate)
9273 adapter->vf_data[i].tx_rate = 0;
9274
9275 igb_set_vf_rate_limit(&adapter->hw, i,
9276 adapter->vf_data[i].tx_rate,
9277 actual_link_speed);
9278 }
9279}
9280
9281static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9282 int min_tx_rate, int max_tx_rate)
9283{
9284 struct igb_adapter *adapter = netdev_priv(netdev);
9285 struct e1000_hw *hw = &adapter->hw;
9286 int actual_link_speed;
9287
9288 if (hw->mac.type != e1000_82576)
9289 return -EOPNOTSUPP;
9290
9291 if (min_tx_rate)
9292 return -EINVAL;
9293
9294 actual_link_speed = igb_link_mbps(adapter->link_speed);
9295 if ((vf >= adapter->vfs_allocated_count) ||
9296 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9297 (max_tx_rate < 0) ||
9298 (max_tx_rate > actual_link_speed))
9299 return -EINVAL;
9300
9301 adapter->vf_rate_link_speed = actual_link_speed;
9302 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9303 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9304
9305 return 0;
9306}
9307
9308static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9309 bool setting)
9310{
9311 struct igb_adapter *adapter = netdev_priv(netdev);
9312 struct e1000_hw *hw = &adapter->hw;
9313 u32 reg_val, reg_offset;
9314
9315 if (!adapter->vfs_allocated_count)
9316 return -EOPNOTSUPP;
9317
9318 if (vf >= adapter->vfs_allocated_count)
9319 return -EINVAL;
9320
9321 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9322 reg_val = rd32(reg_offset);
9323 if (setting)
9324 reg_val |= (BIT(vf) |
9325 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9326 else
9327 reg_val &= ~(BIT(vf) |
9328 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9329 wr32(reg_offset, reg_val);
9330
9331 adapter->vf_data[vf].spoofchk_enabled = setting;
9332 return 0;
9333}
9334
9335static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9336{
9337 struct igb_adapter *adapter = netdev_priv(netdev);
9338
9339 if (vf >= adapter->vfs_allocated_count)
9340 return -EINVAL;
9341 if (adapter->vf_data[vf].trusted == setting)
9342 return 0;
9343
9344 adapter->vf_data[vf].trusted = setting;
9345
9346 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9347 vf, setting ? "" : "not ");
9348 return 0;
9349}
9350
9351static int igb_ndo_get_vf_config(struct net_device *netdev,
9352 int vf, struct ifla_vf_info *ivi)
9353{
9354 struct igb_adapter *adapter = netdev_priv(netdev);
9355 if (vf >= adapter->vfs_allocated_count)
9356 return -EINVAL;
9357 ivi->vf = vf;
9358 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9359 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9360 ivi->min_tx_rate = 0;
9361 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9362 ivi->qos = adapter->vf_data[vf].pf_qos;
9363 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9364 ivi->trusted = adapter->vf_data[vf].trusted;
9365 return 0;
9366}
9367
9368static void igb_vmm_control(struct igb_adapter *adapter)
9369{
9370 struct e1000_hw *hw = &adapter->hw;
9371 u32 reg;
9372
9373 switch (hw->mac.type) {
9374 case e1000_82575:
9375 case e1000_i210:
9376 case e1000_i211:
9377 case e1000_i354:
9378 default:
9379
9380 return;
9381 case e1000_82576:
9382
9383 reg = rd32(E1000_DTXCTL);
9384 reg |= E1000_DTXCTL_VLAN_ADDED;
9385 wr32(E1000_DTXCTL, reg);
9386 fallthrough;
9387 case e1000_82580:
9388
9389 reg = rd32(E1000_RPLOLR);
9390 reg |= E1000_RPLOLR_STRVLAN;
9391 wr32(E1000_RPLOLR, reg);
9392 fallthrough;
9393 case e1000_i350:
9394
9395 break;
9396 }
9397
9398 if (adapter->vfs_allocated_count) {
9399 igb_vmdq_set_loopback_pf(hw, true);
9400 igb_vmdq_set_replication_pf(hw, true);
9401 igb_vmdq_set_anti_spoofing_pf(hw, true,
9402 adapter->vfs_allocated_count);
9403 } else {
9404 igb_vmdq_set_loopback_pf(hw, false);
9405 igb_vmdq_set_replication_pf(hw, false);
9406 }
9407}
9408
9409static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9410{
9411 struct e1000_hw *hw = &adapter->hw;
9412 u32 dmac_thr;
9413 u16 hwm;
9414
9415 if (hw->mac.type > e1000_82580) {
9416 if (adapter->flags & IGB_FLAG_DMAC) {
9417 u32 reg;
9418
9419
9420 wr32(E1000_DMCTXTH, 0);
9421
9422
9423
9424
9425
9426 hwm = 64 * (pba - 6);
9427 reg = rd32(E1000_FCRTC);
9428 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9429 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
9430 & E1000_FCRTC_RTH_COAL_MASK);
9431 wr32(E1000_FCRTC, reg);
9432
9433
9434
9435
9436 dmac_thr = pba - 10;
9437 reg = rd32(E1000_DMACR);
9438 reg &= ~E1000_DMACR_DMACTHR_MASK;
9439 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
9440 & E1000_DMACR_DMACTHR_MASK);
9441
9442
9443 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
9444
9445
9446 reg |= (1000 >> 5);
9447
9448
9449 if (hw->mac.type != e1000_i354)
9450 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
9451
9452 wr32(E1000_DMACR, reg);
9453
9454
9455
9456
9457 wr32(E1000_DMCRTRH, 0);
9458
9459 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
9460
9461 wr32(E1000_DMCTLX, reg);
9462
9463
9464
9465
9466 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
9467 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
9468
9469
9470
9471
9472 reg = rd32(E1000_PCIEMISC);
9473 reg &= ~E1000_PCIEMISC_LX_DECISION;
9474 wr32(E1000_PCIEMISC, reg);
9475 }
9476 } else if (hw->mac.type == e1000_82580) {
9477 u32 reg = rd32(E1000_PCIEMISC);
9478
9479 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
9480 wr32(E1000_DMACR, 0);
9481 }
9482}
9483
9484
9485
9486
9487
9488
9489
9490
9491
9492
9493
9494s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9495 u8 dev_addr, u8 *data)
9496{
9497 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9498 struct i2c_client *this_client = adapter->i2c_client;
9499 s32 status;
9500 u16 swfw_mask = 0;
9501
9502 if (!this_client)
9503 return E1000_ERR_I2C;
9504
9505 swfw_mask = E1000_SWFW_PHY0_SM;
9506
9507 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9508 return E1000_ERR_SWFW_SYNC;
9509
9510 status = i2c_smbus_read_byte_data(this_client, byte_offset);
9511 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9512
9513 if (status < 0)
9514 return E1000_ERR_I2C;
9515 else {
9516 *data = status;
9517 return 0;
9518 }
9519}
9520
9521
9522
9523
9524
9525
9526
9527
9528
9529
9530
9531s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
9532 u8 dev_addr, u8 data)
9533{
9534 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
9535 struct i2c_client *this_client = adapter->i2c_client;
9536 s32 status;
9537 u16 swfw_mask = E1000_SWFW_PHY0_SM;
9538
9539 if (!this_client)
9540 return E1000_ERR_I2C;
9541
9542 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
9543 return E1000_ERR_SWFW_SYNC;
9544 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
9545 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
9546
9547 if (status)
9548 return E1000_ERR_I2C;
9549 else
9550 return 0;
9551
9552}
9553
9554int igb_reinit_queues(struct igb_adapter *adapter)
9555{
9556 struct net_device *netdev = adapter->netdev;
9557 struct pci_dev *pdev = adapter->pdev;
9558 int err = 0;
9559
9560 if (netif_running(netdev))
9561 igb_close(netdev);
9562
9563 igb_reset_interrupt_capability(adapter);
9564
9565 if (igb_init_interrupt_scheme(adapter, true)) {
9566 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9567 return -ENOMEM;
9568 }
9569
9570 if (netif_running(netdev))
9571 err = igb_open(netdev);
9572
9573 return err;
9574}
9575
9576static void igb_nfc_filter_exit(struct igb_adapter *adapter)
9577{
9578 struct igb_nfc_filter *rule;
9579
9580 spin_lock(&adapter->nfc_lock);
9581
9582 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9583 igb_erase_filter(adapter, rule);
9584
9585 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
9586 igb_erase_filter(adapter, rule);
9587
9588 spin_unlock(&adapter->nfc_lock);
9589}
9590
9591static void igb_nfc_filter_restore(struct igb_adapter *adapter)
9592{
9593 struct igb_nfc_filter *rule;
9594
9595 spin_lock(&adapter->nfc_lock);
9596
9597 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
9598 igb_add_filter(adapter, rule);
9599
9600 spin_unlock(&adapter->nfc_lock);
9601}
9602
9603