1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/types.h>
34#include <linux/bitops.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39#include <linux/string.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/sctp.h>
44#include <linux/ipv6.h>
45#include <linux/slab.h>
46#include <net/checksum.h>
47#include <net/ip6_checksum.h>
48#include <linux/ethtool.h>
49#include <linux/if.h>
50#include <linux/if_vlan.h>
51#include <linux/prefetch.h>
52#include <net/mpls.h>
53#include <linux/bpf.h>
54#include <linux/bpf_trace.h>
55#include <linux/atomic.h>
56
57#include "ixgbevf.h"
58
59const char ixgbevf_driver_name[] = "ixgbevf";
60static const char ixgbevf_driver_string[] =
61 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
62
63#define DRV_VERSION "4.1.0-k"
64const char ixgbevf_driver_version[] = DRV_VERSION;
65static char ixgbevf_copyright[] =
66 "Copyright (c) 2009 - 2015 Intel Corporation.";
67
68static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
69 [board_82599_vf] = &ixgbevf_82599_vf_info,
70 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
71 [board_X540_vf] = &ixgbevf_X540_vf_info,
72 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
73 [board_X550_vf] = &ixgbevf_X550_vf_info,
74 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
75 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
76 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
77 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
78};
79
80
81
82
83
84
85
86
87
88static const struct pci_device_id ixgbevf_pci_tbl[] = {
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
98
99 {0, }
100};
101MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
102
103MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
104MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
105MODULE_LICENSE("GPL");
106MODULE_VERSION(DRV_VERSION);
107
108#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
109static int debug = -1;
110module_param(debug, int, 0);
111MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
112
113static struct workqueue_struct *ixgbevf_wq;
114
115static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
116{
117 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
118 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
119 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
120 queue_work(ixgbevf_wq, &adapter->service_task);
121}
122
123static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
124{
125 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
126
127
128 smp_mb__before_atomic();
129 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
130}
131
132
133static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
134static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
135static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
136static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
137static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
138 struct ixgbevf_rx_buffer *old_buff);
139
140static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
141{
142 struct ixgbevf_adapter *adapter = hw->back;
143
144 if (!hw->hw_addr)
145 return;
146 hw->hw_addr = NULL;
147 dev_err(&adapter->pdev->dev, "Adapter removed\n");
148 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
149 ixgbevf_service_event_schedule(adapter);
150}
151
152static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
153{
154 u32 value;
155
156
157
158
159
160
161
162 if (reg == IXGBE_VFSTATUS) {
163 ixgbevf_remove_adapter(hw);
164 return;
165 }
166 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
167 if (value == IXGBE_FAILED_READ_REG)
168 ixgbevf_remove_adapter(hw);
169}
170
171u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
172{
173 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
174 u32 value;
175
176 if (IXGBE_REMOVED(reg_addr))
177 return IXGBE_FAILED_READ_REG;
178 value = readl(reg_addr + reg);
179 if (unlikely(value == IXGBE_FAILED_READ_REG))
180 ixgbevf_check_remove(hw, reg);
181 return value;
182}
183
184
185
186
187
188
189
190
191static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
192 u8 queue, u8 msix_vector)
193{
194 u32 ivar, index;
195 struct ixgbe_hw *hw = &adapter->hw;
196
197 if (direction == -1) {
198
199 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
200 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
201 ivar &= ~0xFF;
202 ivar |= msix_vector;
203 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
204 } else {
205
206 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
207 index = ((16 * (queue & 1)) + (8 * direction));
208 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
209 ivar &= ~(0xFF << index);
210 ivar |= (msix_vector << index);
211 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
212 }
213}
214
215static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
216{
217 return ring->stats.packets;
218}
219
220static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
221{
222 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
223 struct ixgbe_hw *hw = &adapter->hw;
224
225 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
226 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
227
228 if (head != tail)
229 return (head < tail) ?
230 tail - head : (tail + ring->count - head);
231
232 return 0;
233}
234
235static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
236{
237 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
238 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
239 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
240
241 clear_check_for_tx_hang(tx_ring);
242
243
244
245
246
247
248 if ((tx_done_old == tx_done) && tx_pending) {
249
250 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
251 &tx_ring->state);
252 }
253
254 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
255
256
257 tx_ring->tx_stats.tx_done_old = tx_done;
258
259 return false;
260}
261
262static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
263{
264
265 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
266 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
267 ixgbevf_service_event_schedule(adapter);
268 }
269}
270
271
272
273
274
275static void ixgbevf_tx_timeout(struct net_device *netdev)
276{
277 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
278
279 ixgbevf_tx_timeout_reset(adapter);
280}
281
282
283
284
285
286
287
288static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
289 struct ixgbevf_ring *tx_ring, int napi_budget)
290{
291 struct ixgbevf_adapter *adapter = q_vector->adapter;
292 struct ixgbevf_tx_buffer *tx_buffer;
293 union ixgbe_adv_tx_desc *tx_desc;
294 unsigned int total_bytes = 0, total_packets = 0;
295 unsigned int budget = tx_ring->count / 2;
296 unsigned int i = tx_ring->next_to_clean;
297
298 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
299 return true;
300
301 tx_buffer = &tx_ring->tx_buffer_info[i];
302 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
303 i -= tx_ring->count;
304
305 do {
306 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
307
308
309 if (!eop_desc)
310 break;
311
312
313 smp_rmb();
314
315
316 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
317 break;
318
319
320 tx_buffer->next_to_watch = NULL;
321
322
323 total_bytes += tx_buffer->bytecount;
324 total_packets += tx_buffer->gso_segs;
325
326
327 if (ring_is_xdp(tx_ring))
328 page_frag_free(tx_buffer->data);
329 else
330 napi_consume_skb(tx_buffer->skb, napi_budget);
331
332
333 dma_unmap_single(tx_ring->dev,
334 dma_unmap_addr(tx_buffer, dma),
335 dma_unmap_len(tx_buffer, len),
336 DMA_TO_DEVICE);
337
338
339 dma_unmap_len_set(tx_buffer, len, 0);
340
341
342 while (tx_desc != eop_desc) {
343 tx_buffer++;
344 tx_desc++;
345 i++;
346 if (unlikely(!i)) {
347 i -= tx_ring->count;
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
350 }
351
352
353 if (dma_unmap_len(tx_buffer, len)) {
354 dma_unmap_page(tx_ring->dev,
355 dma_unmap_addr(tx_buffer, dma),
356 dma_unmap_len(tx_buffer, len),
357 DMA_TO_DEVICE);
358 dma_unmap_len_set(tx_buffer, len, 0);
359 }
360 }
361
362
363 tx_buffer++;
364 tx_desc++;
365 i++;
366 if (unlikely(!i)) {
367 i -= tx_ring->count;
368 tx_buffer = tx_ring->tx_buffer_info;
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
370 }
371
372
373 prefetch(tx_desc);
374
375
376 budget--;
377 } while (likely(budget));
378
379 i += tx_ring->count;
380 tx_ring->next_to_clean = i;
381 u64_stats_update_begin(&tx_ring->syncp);
382 tx_ring->stats.bytes += total_bytes;
383 tx_ring->stats.packets += total_packets;
384 u64_stats_update_end(&tx_ring->syncp);
385 q_vector->tx.total_bytes += total_bytes;
386 q_vector->tx.total_packets += total_packets;
387
388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389 struct ixgbe_hw *hw = &adapter->hw;
390 union ixgbe_adv_tx_desc *eop_desc;
391
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
393
394 pr_err("Detected Tx Unit Hang%s\n"
395 " Tx Queue <%d>\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
403 " jiffies <%lx>\n",
404 ring_is_xdp(tx_ring) ? " XDP" : "",
405 tx_ring->queue_index,
406 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
407 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
408 tx_ring->next_to_use, i,
409 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
410 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
411
412 if (!ring_is_xdp(tx_ring))
413 netif_stop_subqueue(tx_ring->netdev,
414 tx_ring->queue_index);
415
416
417 ixgbevf_tx_timeout_reset(adapter);
418
419 return true;
420 }
421
422 if (ring_is_xdp(tx_ring))
423 return !!budget;
424
425#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
426 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
427 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
428
429
430
431 smp_mb();
432
433 if (__netif_subqueue_stopped(tx_ring->netdev,
434 tx_ring->queue_index) &&
435 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
436 netif_wake_subqueue(tx_ring->netdev,
437 tx_ring->queue_index);
438 ++tx_ring->tx_stats.restart_queue;
439 }
440 }
441
442 return !!budget;
443}
444
445
446
447
448
449
450static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
451 struct sk_buff *skb)
452{
453 napi_gro_receive(&q_vector->napi, skb);
454}
455
456#define IXGBE_RSS_L4_TYPES_MASK \
457 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
458 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
459 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
460 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
461
462static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
463 union ixgbe_adv_rx_desc *rx_desc,
464 struct sk_buff *skb)
465{
466 u16 rss_type;
467
468 if (!(ring->netdev->features & NETIF_F_RXHASH))
469 return;
470
471 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
472 IXGBE_RXDADV_RSSTYPE_MASK;
473
474 if (!rss_type)
475 return;
476
477 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
478 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
479 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
480}
481
482
483
484
485
486
487
488static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
489 union ixgbe_adv_rx_desc *rx_desc,
490 struct sk_buff *skb)
491{
492 skb_checksum_none_assert(skb);
493
494
495 if (!(ring->netdev->features & NETIF_F_RXCSUM))
496 return;
497
498
499 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
500 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
501 ring->rx_stats.csum_err++;
502 return;
503 }
504
505 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
506 return;
507
508 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
509 ring->rx_stats.csum_err++;
510 return;
511 }
512
513
514 skb->ip_summed = CHECKSUM_UNNECESSARY;
515}
516
517
518
519
520
521
522
523
524
525
526
527static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
528 union ixgbe_adv_rx_desc *rx_desc,
529 struct sk_buff *skb)
530{
531 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
532 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
533
534 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
535 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
536 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
537
538 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
539 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
540 }
541
542 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
543}
544
545static
546struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
547 const unsigned int size)
548{
549 struct ixgbevf_rx_buffer *rx_buffer;
550
551 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
552 prefetchw(rx_buffer->page);
553
554
555 dma_sync_single_range_for_cpu(rx_ring->dev,
556 rx_buffer->dma,
557 rx_buffer->page_offset,
558 size,
559 DMA_FROM_DEVICE);
560
561 rx_buffer->pagecnt_bias--;
562
563 return rx_buffer;
564}
565
566static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
567 struct ixgbevf_rx_buffer *rx_buffer,
568 struct sk_buff *skb)
569{
570 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
571
572 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
573 } else {
574 if (IS_ERR(skb))
575
576
577
578 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
579 ixgbevf_rx_pg_size(rx_ring),
580 DMA_FROM_DEVICE,
581 IXGBEVF_RX_DMA_ATTR);
582 __page_frag_cache_drain(rx_buffer->page,
583 rx_buffer->pagecnt_bias);
584 }
585
586
587 rx_buffer->page = NULL;
588}
589
590
591
592
593
594
595
596
597
598
599
600static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
601 union ixgbe_adv_rx_desc *rx_desc)
602{
603 u32 ntc = rx_ring->next_to_clean + 1;
604
605
606 ntc = (ntc < rx_ring->count) ? ntc : 0;
607 rx_ring->next_to_clean = ntc;
608
609 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
610
611 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
612 return false;
613
614 return true;
615}
616
617static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
618{
619 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
620}
621
622static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
623 struct ixgbevf_rx_buffer *bi)
624{
625 struct page *page = bi->page;
626 dma_addr_t dma;
627
628
629 if (likely(page))
630 return true;
631
632
633 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
634 if (unlikely(!page)) {
635 rx_ring->rx_stats.alloc_rx_page_failed++;
636 return false;
637 }
638
639
640 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
641 ixgbevf_rx_pg_size(rx_ring),
642 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
643
644
645
646
647 if (dma_mapping_error(rx_ring->dev, dma)) {
648 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
649
650 rx_ring->rx_stats.alloc_rx_page_failed++;
651 return false;
652 }
653
654 bi->dma = dma;
655 bi->page = page;
656 bi->page_offset = ixgbevf_rx_offset(rx_ring);
657 bi->pagecnt_bias = 1;
658 rx_ring->rx_stats.alloc_rx_page++;
659
660 return true;
661}
662
663
664
665
666
667
668static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
669 u16 cleaned_count)
670{
671 union ixgbe_adv_rx_desc *rx_desc;
672 struct ixgbevf_rx_buffer *bi;
673 unsigned int i = rx_ring->next_to_use;
674
675
676 if (!cleaned_count || !rx_ring->netdev)
677 return;
678
679 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
680 bi = &rx_ring->rx_buffer_info[i];
681 i -= rx_ring->count;
682
683 do {
684 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
685 break;
686
687
688 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
689 bi->page_offset,
690 ixgbevf_rx_bufsz(rx_ring),
691 DMA_FROM_DEVICE);
692
693
694
695
696 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
697
698 rx_desc++;
699 bi++;
700 i++;
701 if (unlikely(!i)) {
702 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
703 bi = rx_ring->rx_buffer_info;
704 i -= rx_ring->count;
705 }
706
707
708 rx_desc->wb.upper.length = 0;
709
710 cleaned_count--;
711 } while (cleaned_count);
712
713 i += rx_ring->count;
714
715 if (rx_ring->next_to_use != i) {
716
717 rx_ring->next_to_use = i;
718
719
720 rx_ring->next_to_alloc = i;
721
722
723
724
725
726
727 wmb();
728 ixgbevf_write_tail(rx_ring, i);
729 }
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
751 union ixgbe_adv_rx_desc *rx_desc,
752 struct sk_buff *skb)
753{
754
755 if (IS_ERR(skb))
756 return true;
757
758
759 if (unlikely(ixgbevf_test_staterr(rx_desc,
760 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
761 struct net_device *netdev = rx_ring->netdev;
762
763 if (!(netdev->features & NETIF_F_RXALL)) {
764 dev_kfree_skb_any(skb);
765 return true;
766 }
767 }
768
769
770 if (eth_skb_pad(skb))
771 return true;
772
773 return false;
774}
775
776
777
778
779
780
781
782
783static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
784 struct ixgbevf_rx_buffer *old_buff)
785{
786 struct ixgbevf_rx_buffer *new_buff;
787 u16 nta = rx_ring->next_to_alloc;
788
789 new_buff = &rx_ring->rx_buffer_info[nta];
790
791
792 nta++;
793 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
794
795
796 new_buff->page = old_buff->page;
797 new_buff->dma = old_buff->dma;
798 new_buff->page_offset = old_buff->page_offset;
799 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
800}
801
802static inline bool ixgbevf_page_is_reserved(struct page *page)
803{
804 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
805}
806
807static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
808{
809 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
810 struct page *page = rx_buffer->page;
811
812
813 if (unlikely(ixgbevf_page_is_reserved(page)))
814 return false;
815
816#if (PAGE_SIZE < 8192)
817
818 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
819 return false;
820#else
821#define IXGBEVF_LAST_OFFSET \
822 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
823
824 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
825 return false;
826
827#endif
828
829
830
831
832
833 if (unlikely(!pagecnt_bias)) {
834 page_ref_add(page, USHRT_MAX);
835 rx_buffer->pagecnt_bias = USHRT_MAX;
836 }
837
838 return true;
839}
840
841
842
843
844
845
846
847
848
849
850static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
851 struct ixgbevf_rx_buffer *rx_buffer,
852 struct sk_buff *skb,
853 unsigned int size)
854{
855#if (PAGE_SIZE < 8192)
856 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
857#else
858 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
859 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
860 SKB_DATA_ALIGN(size);
861#endif
862 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
863 rx_buffer->page_offset, size, truesize);
864#if (PAGE_SIZE < 8192)
865 rx_buffer->page_offset ^= truesize;
866#else
867 rx_buffer->page_offset += truesize;
868#endif
869}
870
871static
872struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
873 struct ixgbevf_rx_buffer *rx_buffer,
874 struct xdp_buff *xdp,
875 union ixgbe_adv_rx_desc *rx_desc)
876{
877 unsigned int size = xdp->data_end - xdp->data;
878#if (PAGE_SIZE < 8192)
879 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
880#else
881 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
882 xdp->data_hard_start);
883#endif
884 unsigned int headlen;
885 struct sk_buff *skb;
886
887
888 prefetch(xdp->data);
889#if L1_CACHE_BYTES < 128
890 prefetch(xdp->data + L1_CACHE_BYTES);
891#endif
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
909 if (unlikely(!skb))
910 return NULL;
911
912
913 headlen = size;
914 if (headlen > IXGBEVF_RX_HDR_SIZE)
915 headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
916
917
918 memcpy(__skb_put(skb, headlen), xdp->data,
919 ALIGN(headlen, sizeof(long)));
920
921
922 size -= headlen;
923 if (size) {
924 skb_add_rx_frag(skb, 0, rx_buffer->page,
925 (xdp->data + headlen) -
926 page_address(rx_buffer->page),
927 size, truesize);
928#if (PAGE_SIZE < 8192)
929 rx_buffer->page_offset ^= truesize;
930#else
931 rx_buffer->page_offset += truesize;
932#endif
933 } else {
934 rx_buffer->pagecnt_bias++;
935 }
936
937 return skb;
938}
939
940static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
941 u32 qmask)
942{
943 struct ixgbe_hw *hw = &adapter->hw;
944
945 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
946}
947
948static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
949 struct ixgbevf_rx_buffer *rx_buffer,
950 struct xdp_buff *xdp,
951 union ixgbe_adv_rx_desc *rx_desc)
952{
953 unsigned int metasize = xdp->data - xdp->data_meta;
954#if (PAGE_SIZE < 8192)
955 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
956#else
957 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
958 SKB_DATA_ALIGN(xdp->data_end -
959 xdp->data_hard_start);
960#endif
961 struct sk_buff *skb;
962
963
964
965
966
967
968 prefetch(xdp->data_meta);
969#if L1_CACHE_BYTES < 128
970 prefetch(xdp->data_meta + L1_CACHE_BYTES);
971#endif
972
973
974 skb = build_skb(xdp->data_hard_start, truesize);
975 if (unlikely(!skb))
976 return NULL;
977
978
979 skb_reserve(skb, xdp->data - xdp->data_hard_start);
980 __skb_put(skb, xdp->data_end - xdp->data);
981 if (metasize)
982 skb_metadata_set(skb, metasize);
983
984
985#if (PAGE_SIZE < 8192)
986 rx_buffer->page_offset ^= truesize;
987#else
988 rx_buffer->page_offset += truesize;
989#endif
990
991 return skb;
992}
993
994#define IXGBEVF_XDP_PASS 0
995#define IXGBEVF_XDP_CONSUMED 1
996#define IXGBEVF_XDP_TX 2
997
998static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
999 struct xdp_buff *xdp)
1000{
1001 struct ixgbevf_tx_buffer *tx_buffer;
1002 union ixgbe_adv_tx_desc *tx_desc;
1003 u32 len, cmd_type;
1004 dma_addr_t dma;
1005 u16 i;
1006
1007 len = xdp->data_end - xdp->data;
1008
1009 if (unlikely(!ixgbevf_desc_unused(ring)))
1010 return IXGBEVF_XDP_CONSUMED;
1011
1012 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
1013 if (dma_mapping_error(ring->dev, dma))
1014 return IXGBEVF_XDP_CONSUMED;
1015
1016
1017 tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
1018 tx_buffer->bytecount = len;
1019 tx_buffer->gso_segs = 1;
1020 tx_buffer->protocol = 0;
1021
1022 i = ring->next_to_use;
1023 tx_desc = IXGBEVF_TX_DESC(ring, i);
1024
1025 dma_unmap_len_set(tx_buffer, len, len);
1026 dma_unmap_addr_set(tx_buffer, dma, dma);
1027 tx_buffer->data = xdp->data;
1028 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1029
1030
1031 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1032 IXGBE_ADVTXD_DCMD_DEXT |
1033 IXGBE_ADVTXD_DCMD_IFCS;
1034 cmd_type |= len | IXGBE_TXD_CMD;
1035 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1036 tx_desc->read.olinfo_status =
1037 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1038 IXGBE_ADVTXD_CC);
1039
1040
1041 smp_wmb();
1042
1043
1044 i++;
1045 if (i == ring->count)
1046 i = 0;
1047
1048 tx_buffer->next_to_watch = tx_desc;
1049 ring->next_to_use = i;
1050
1051 return IXGBEVF_XDP_TX;
1052}
1053
1054static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1055 struct ixgbevf_ring *rx_ring,
1056 struct xdp_buff *xdp)
1057{
1058 int result = IXGBEVF_XDP_PASS;
1059 struct ixgbevf_ring *xdp_ring;
1060 struct bpf_prog *xdp_prog;
1061 u32 act;
1062
1063 rcu_read_lock();
1064 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1065
1066 if (!xdp_prog)
1067 goto xdp_out;
1068
1069 act = bpf_prog_run_xdp(xdp_prog, xdp);
1070 switch (act) {
1071 case XDP_PASS:
1072 break;
1073 case XDP_TX:
1074 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1075 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1076 break;
1077 default:
1078 bpf_warn_invalid_xdp_action(act);
1079
1080 case XDP_ABORTED:
1081 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1082
1083 case XDP_DROP:
1084 result = IXGBEVF_XDP_CONSUMED;
1085 break;
1086 }
1087xdp_out:
1088 rcu_read_unlock();
1089 return ERR_PTR(-result);
1090}
1091
1092static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1093 struct ixgbevf_rx_buffer *rx_buffer,
1094 unsigned int size)
1095{
1096#if (PAGE_SIZE < 8192)
1097 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1098
1099 rx_buffer->page_offset ^= truesize;
1100#else
1101 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1102 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
1103 SKB_DATA_ALIGN(size);
1104
1105 rx_buffer->page_offset += truesize;
1106#endif
1107}
1108
1109static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1110 struct ixgbevf_ring *rx_ring,
1111 int budget)
1112{
1113 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1114 struct ixgbevf_adapter *adapter = q_vector->adapter;
1115 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1116 struct sk_buff *skb = rx_ring->skb;
1117 bool xdp_xmit = false;
1118 struct xdp_buff xdp;
1119
1120 xdp.rxq = &rx_ring->xdp_rxq;
1121
1122 while (likely(total_rx_packets < budget)) {
1123 struct ixgbevf_rx_buffer *rx_buffer;
1124 union ixgbe_adv_rx_desc *rx_desc;
1125 unsigned int size;
1126
1127
1128 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1129 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1130 cleaned_count = 0;
1131 }
1132
1133 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1134 size = le16_to_cpu(rx_desc->wb.upper.length);
1135 if (!size)
1136 break;
1137
1138
1139
1140
1141
1142 rmb();
1143
1144 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1145
1146
1147 if (!skb) {
1148 xdp.data = page_address(rx_buffer->page) +
1149 rx_buffer->page_offset;
1150 xdp.data_meta = xdp.data;
1151 xdp.data_hard_start = xdp.data -
1152 ixgbevf_rx_offset(rx_ring);
1153 xdp.data_end = xdp.data + size;
1154
1155 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1156 }
1157
1158 if (IS_ERR(skb)) {
1159 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1160 xdp_xmit = true;
1161 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1162 size);
1163 } else {
1164 rx_buffer->pagecnt_bias++;
1165 }
1166 total_rx_packets++;
1167 total_rx_bytes += size;
1168 } else if (skb) {
1169 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1170 } else if (ring_uses_build_skb(rx_ring)) {
1171 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1172 &xdp, rx_desc);
1173 } else {
1174 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1175 &xdp, rx_desc);
1176 }
1177
1178
1179 if (!skb) {
1180 rx_ring->rx_stats.alloc_rx_buff_failed++;
1181 rx_buffer->pagecnt_bias++;
1182 break;
1183 }
1184
1185 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1186 cleaned_count++;
1187
1188
1189 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1190 continue;
1191
1192
1193 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1194 skb = NULL;
1195 continue;
1196 }
1197
1198
1199 total_rx_bytes += skb->len;
1200
1201
1202
1203
1204 if ((skb->pkt_type == PACKET_BROADCAST ||
1205 skb->pkt_type == PACKET_MULTICAST) &&
1206 ether_addr_equal(rx_ring->netdev->dev_addr,
1207 eth_hdr(skb)->h_source)) {
1208 dev_kfree_skb_irq(skb);
1209 continue;
1210 }
1211
1212
1213 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1214
1215 ixgbevf_rx_skb(q_vector, skb);
1216
1217
1218 skb = NULL;
1219
1220
1221 total_rx_packets++;
1222 }
1223
1224
1225 rx_ring->skb = skb;
1226
1227 if (xdp_xmit) {
1228 struct ixgbevf_ring *xdp_ring =
1229 adapter->xdp_ring[rx_ring->queue_index];
1230
1231
1232
1233
1234 wmb();
1235 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1236 }
1237
1238 u64_stats_update_begin(&rx_ring->syncp);
1239 rx_ring->stats.packets += total_rx_packets;
1240 rx_ring->stats.bytes += total_rx_bytes;
1241 u64_stats_update_end(&rx_ring->syncp);
1242 q_vector->rx.total_packets += total_rx_packets;
1243 q_vector->rx.total_bytes += total_rx_bytes;
1244
1245 return total_rx_packets;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256static int ixgbevf_poll(struct napi_struct *napi, int budget)
1257{
1258 struct ixgbevf_q_vector *q_vector =
1259 container_of(napi, struct ixgbevf_q_vector, napi);
1260 struct ixgbevf_adapter *adapter = q_vector->adapter;
1261 struct ixgbevf_ring *ring;
1262 int per_ring_budget, work_done = 0;
1263 bool clean_complete = true;
1264
1265 ixgbevf_for_each_ring(ring, q_vector->tx) {
1266 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1267 clean_complete = false;
1268 }
1269
1270 if (budget <= 0)
1271 return budget;
1272
1273
1274
1275
1276 if (q_vector->rx.count > 1)
1277 per_ring_budget = max(budget/q_vector->rx.count, 1);
1278 else
1279 per_ring_budget = budget;
1280
1281 ixgbevf_for_each_ring(ring, q_vector->rx) {
1282 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1283 per_ring_budget);
1284 work_done += cleaned;
1285 if (cleaned >= per_ring_budget)
1286 clean_complete = false;
1287 }
1288
1289
1290 if (!clean_complete)
1291 return budget;
1292
1293 napi_complete_done(napi, work_done);
1294 if (adapter->rx_itr_setting == 1)
1295 ixgbevf_set_itr(q_vector);
1296 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1297 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1298 ixgbevf_irq_enable_queues(adapter,
1299 BIT(q_vector->v_idx));
1300
1301 return 0;
1302}
1303
1304
1305
1306
1307
1308void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1309{
1310 struct ixgbevf_adapter *adapter = q_vector->adapter;
1311 struct ixgbe_hw *hw = &adapter->hw;
1312 int v_idx = q_vector->v_idx;
1313 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1314
1315
1316
1317
1318 itr_reg |= IXGBE_EITR_CNT_WDIS;
1319
1320 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1331{
1332 struct ixgbevf_q_vector *q_vector;
1333 int q_vectors, v_idx;
1334
1335 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1336 adapter->eims_enable_mask = 0;
1337
1338
1339
1340
1341 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1342 struct ixgbevf_ring *ring;
1343
1344 q_vector = adapter->q_vector[v_idx];
1345
1346 ixgbevf_for_each_ring(ring, q_vector->rx)
1347 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1348
1349 ixgbevf_for_each_ring(ring, q_vector->tx)
1350 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1351
1352 if (q_vector->tx.ring && !q_vector->rx.ring) {
1353
1354 if (adapter->tx_itr_setting == 1)
1355 q_vector->itr = IXGBE_12K_ITR;
1356 else
1357 q_vector->itr = adapter->tx_itr_setting;
1358 } else {
1359
1360 if (adapter->rx_itr_setting == 1)
1361 q_vector->itr = IXGBE_20K_ITR;
1362 else
1363 q_vector->itr = adapter->rx_itr_setting;
1364 }
1365
1366
1367 adapter->eims_enable_mask |= BIT(v_idx);
1368
1369 ixgbevf_write_eitr(q_vector);
1370 }
1371
1372 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1373
1374 adapter->eims_other = BIT(v_idx);
1375 adapter->eims_enable_mask |= adapter->eims_other;
1376}
1377
1378enum latency_range {
1379 lowest_latency = 0,
1380 low_latency = 1,
1381 bulk_latency = 2,
1382 latency_invalid = 255
1383};
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1399 struct ixgbevf_ring_container *ring_container)
1400{
1401 int bytes = ring_container->total_bytes;
1402 int packets = ring_container->total_packets;
1403 u32 timepassed_us;
1404 u64 bytes_perint;
1405 u8 itr_setting = ring_container->itr;
1406
1407 if (packets == 0)
1408 return;
1409
1410
1411
1412
1413
1414
1415
1416 timepassed_us = q_vector->itr >> 2;
1417 bytes_perint = bytes / timepassed_us;
1418
1419 switch (itr_setting) {
1420 case lowest_latency:
1421 if (bytes_perint > 10)
1422 itr_setting = low_latency;
1423 break;
1424 case low_latency:
1425 if (bytes_perint > 20)
1426 itr_setting = bulk_latency;
1427 else if (bytes_perint <= 10)
1428 itr_setting = lowest_latency;
1429 break;
1430 case bulk_latency:
1431 if (bytes_perint <= 20)
1432 itr_setting = low_latency;
1433 break;
1434 }
1435
1436
1437 ring_container->total_bytes = 0;
1438 ring_container->total_packets = 0;
1439
1440
1441 ring_container->itr = itr_setting;
1442}
1443
1444static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1445{
1446 u32 new_itr = q_vector->itr;
1447 u8 current_itr;
1448
1449 ixgbevf_update_itr(q_vector, &q_vector->tx);
1450 ixgbevf_update_itr(q_vector, &q_vector->rx);
1451
1452 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1453
1454 switch (current_itr) {
1455
1456 case lowest_latency:
1457 new_itr = IXGBE_100K_ITR;
1458 break;
1459 case low_latency:
1460 new_itr = IXGBE_20K_ITR;
1461 break;
1462 case bulk_latency:
1463 new_itr = IXGBE_12K_ITR;
1464 break;
1465 default:
1466 break;
1467 }
1468
1469 if (new_itr != q_vector->itr) {
1470
1471 new_itr = (10 * new_itr * q_vector->itr) /
1472 ((9 * new_itr) + q_vector->itr);
1473
1474
1475 q_vector->itr = new_itr;
1476
1477 ixgbevf_write_eitr(q_vector);
1478 }
1479}
1480
1481static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1482{
1483 struct ixgbevf_adapter *adapter = data;
1484 struct ixgbe_hw *hw = &adapter->hw;
1485
1486 hw->mac.get_link_status = 1;
1487
1488 ixgbevf_service_event_schedule(adapter);
1489
1490 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1491
1492 return IRQ_HANDLED;
1493}
1494
1495
1496
1497
1498
1499
1500static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1501{
1502 struct ixgbevf_q_vector *q_vector = data;
1503
1504
1505 if (q_vector->rx.ring || q_vector->tx.ring)
1506 napi_schedule_irqoff(&q_vector->napi);
1507
1508 return IRQ_HANDLED;
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1519{
1520 struct net_device *netdev = adapter->netdev;
1521 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1522 unsigned int ri = 0, ti = 0;
1523 int vector, err;
1524
1525 for (vector = 0; vector < q_vectors; vector++) {
1526 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1527 struct msix_entry *entry = &adapter->msix_entries[vector];
1528
1529 if (q_vector->tx.ring && q_vector->rx.ring) {
1530 snprintf(q_vector->name, sizeof(q_vector->name),
1531 "%s-TxRx-%u", netdev->name, ri++);
1532 ti++;
1533 } else if (q_vector->rx.ring) {
1534 snprintf(q_vector->name, sizeof(q_vector->name),
1535 "%s-rx-%u", netdev->name, ri++);
1536 } else if (q_vector->tx.ring) {
1537 snprintf(q_vector->name, sizeof(q_vector->name),
1538 "%s-tx-%u", netdev->name, ti++);
1539 } else {
1540
1541 continue;
1542 }
1543 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1544 q_vector->name, q_vector);
1545 if (err) {
1546 hw_dbg(&adapter->hw,
1547 "request_irq failed for MSIX interrupt Error: %d\n",
1548 err);
1549 goto free_queue_irqs;
1550 }
1551 }
1552
1553 err = request_irq(adapter->msix_entries[vector].vector,
1554 &ixgbevf_msix_other, 0, netdev->name, adapter);
1555 if (err) {
1556 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1557 err);
1558 goto free_queue_irqs;
1559 }
1560
1561 return 0;
1562
1563free_queue_irqs:
1564 while (vector) {
1565 vector--;
1566 free_irq(adapter->msix_entries[vector].vector,
1567 adapter->q_vector[vector]);
1568 }
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579 adapter->num_msix_vectors = 0;
1580 return err;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1591{
1592 int err = ixgbevf_request_msix_irqs(adapter);
1593
1594 if (err)
1595 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1596
1597 return err;
1598}
1599
1600static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1601{
1602 int i, q_vectors;
1603
1604 if (!adapter->msix_entries)
1605 return;
1606
1607 q_vectors = adapter->num_msix_vectors;
1608 i = q_vectors - 1;
1609
1610 free_irq(adapter->msix_entries[i].vector, adapter);
1611 i--;
1612
1613 for (; i >= 0; i--) {
1614
1615 if (!adapter->q_vector[i]->rx.ring &&
1616 !adapter->q_vector[i]->tx.ring)
1617 continue;
1618
1619 free_irq(adapter->msix_entries[i].vector,
1620 adapter->q_vector[i]);
1621 }
1622}
1623
1624
1625
1626
1627
1628static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1629{
1630 struct ixgbe_hw *hw = &adapter->hw;
1631 int i;
1632
1633 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1634 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1635 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1636
1637 IXGBE_WRITE_FLUSH(hw);
1638
1639 for (i = 0; i < adapter->num_msix_vectors; i++)
1640 synchronize_irq(adapter->msix_entries[i].vector);
1641}
1642
1643
1644
1645
1646
1647static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1648{
1649 struct ixgbe_hw *hw = &adapter->hw;
1650
1651 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1652 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1653 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1654}
1655
1656
1657
1658
1659
1660
1661
1662
1663static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1664 struct ixgbevf_ring *ring)
1665{
1666 struct ixgbe_hw *hw = &adapter->hw;
1667 u64 tdba = ring->dma;
1668 int wait_loop = 10;
1669 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1670 u8 reg_idx = ring->reg_idx;
1671
1672
1673 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1674 IXGBE_WRITE_FLUSH(hw);
1675
1676 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1677 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1678 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1679 ring->count * sizeof(union ixgbe_adv_tx_desc));
1680
1681
1682 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1683 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1684
1685
1686 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1687 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1688 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1689
1690
1691 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1692 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1693 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1694
1695
1696 ring->next_to_clean = 0;
1697 ring->next_to_use = 0;
1698
1699
1700
1701
1702
1703 txdctl |= (8 << 16);
1704
1705
1706 txdctl |= (1u << 8) |
1707 32;
1708
1709
1710 memset(ring->tx_buffer_info, 0,
1711 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1712
1713 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1714
1715 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1716
1717
1718 do {
1719 usleep_range(1000, 2000);
1720 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1721 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1722 if (!wait_loop)
1723 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1724}
1725
1726
1727
1728
1729
1730
1731
1732static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1733{
1734 u32 i;
1735
1736
1737 for (i = 0; i < adapter->num_tx_queues; i++)
1738 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1739 for (i = 0; i < adapter->num_xdp_queues; i++)
1740 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1741}
1742
1743#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1744
1745static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1746 struct ixgbevf_ring *ring, int index)
1747{
1748 struct ixgbe_hw *hw = &adapter->hw;
1749 u32 srrctl;
1750
1751 srrctl = IXGBE_SRRCTL_DROP_EN;
1752
1753 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1754 if (ring_uses_large_buffer(ring))
1755 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1756 else
1757 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1758 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1759
1760 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1761}
1762
1763static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1764{
1765 struct ixgbe_hw *hw = &adapter->hw;
1766
1767
1768 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1769 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1770 IXGBE_PSRTYPE_L2HDR;
1771
1772 if (adapter->num_rx_queues > 1)
1773 psrtype |= BIT(29);
1774
1775 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1776}
1777
1778#define IXGBEVF_MAX_RX_DESC_POLL 10
1779static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1780 struct ixgbevf_ring *ring)
1781{
1782 struct ixgbe_hw *hw = &adapter->hw;
1783 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1784 u32 rxdctl;
1785 u8 reg_idx = ring->reg_idx;
1786
1787 if (IXGBE_REMOVED(hw->hw_addr))
1788 return;
1789 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1790 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1791
1792
1793 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1794
1795
1796 do {
1797 udelay(10);
1798 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1799 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1800
1801 if (!wait_loop)
1802 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1803 reg_idx);
1804}
1805
1806static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1807 struct ixgbevf_ring *ring)
1808{
1809 struct ixgbe_hw *hw = &adapter->hw;
1810 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1811 u32 rxdctl;
1812 u8 reg_idx = ring->reg_idx;
1813
1814 if (IXGBE_REMOVED(hw->hw_addr))
1815 return;
1816 do {
1817 usleep_range(1000, 2000);
1818 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1819 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1820
1821 if (!wait_loop)
1822 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1823 reg_idx);
1824}
1825
1826
1827
1828
1829
1830
1831
1832static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1833{
1834 u32 *rss_key;
1835
1836 if (!adapter->rss_key) {
1837 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1838 if (unlikely(!rss_key))
1839 return -ENOMEM;
1840
1841 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1842 adapter->rss_key = rss_key;
1843 }
1844
1845 return 0;
1846}
1847
1848static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1849{
1850 struct ixgbe_hw *hw = &adapter->hw;
1851 u32 vfmrqc = 0, vfreta = 0;
1852 u16 rss_i = adapter->num_rx_queues;
1853 u8 i, j;
1854
1855
1856 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1857 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1858
1859 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1860 if (j == rss_i)
1861 j = 0;
1862
1863 adapter->rss_indir_tbl[i] = j;
1864
1865 vfreta |= j << (i & 0x3) * 8;
1866 if ((i & 3) == 3) {
1867 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1868 vfreta = 0;
1869 }
1870 }
1871
1872
1873 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1874 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1875 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1876 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1877
1878 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1879
1880 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1881}
1882
1883static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1884 struct ixgbevf_ring *ring)
1885{
1886 struct ixgbe_hw *hw = &adapter->hw;
1887 union ixgbe_adv_rx_desc *rx_desc;
1888 u64 rdba = ring->dma;
1889 u32 rxdctl;
1890 u8 reg_idx = ring->reg_idx;
1891
1892
1893 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1894 ixgbevf_disable_rx_queue(adapter, ring);
1895
1896 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1897 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1898 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1899 ring->count * sizeof(union ixgbe_adv_rx_desc));
1900
1901#ifndef CONFIG_SPARC
1902
1903 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1904 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1905#else
1906 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1907 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1908 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1909#endif
1910
1911
1912 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1913 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1914 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1915
1916
1917 memset(ring->rx_buffer_info, 0,
1918 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1919
1920
1921 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1922 rx_desc->wb.upper.length = 0;
1923
1924
1925 ring->next_to_clean = 0;
1926 ring->next_to_use = 0;
1927 ring->next_to_alloc = 0;
1928
1929 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1930
1931
1932 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1933 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1934 IXGBE_RXDCTL_RLPML_EN);
1935
1936#if (PAGE_SIZE < 8192)
1937
1938 if (ring_uses_build_skb(ring) &&
1939 !ring_uses_large_buffer(ring))
1940 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1941 IXGBE_RXDCTL_RLPML_EN;
1942#endif
1943 }
1944
1945 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1946 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1947
1948 ixgbevf_rx_desc_queue_enable(adapter, ring);
1949 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1950}
1951
1952static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1953 struct ixgbevf_ring *rx_ring)
1954{
1955 struct net_device *netdev = adapter->netdev;
1956 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1957
1958
1959 clear_ring_build_skb_enabled(rx_ring);
1960 clear_ring_uses_large_buffer(rx_ring);
1961
1962 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1963 return;
1964
1965 set_ring_build_skb_enabled(rx_ring);
1966
1967 if (PAGE_SIZE < 8192) {
1968 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1969 return;
1970
1971 set_ring_uses_large_buffer(rx_ring);
1972 }
1973}
1974
1975
1976
1977
1978
1979
1980
1981static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1982{
1983 struct ixgbe_hw *hw = &adapter->hw;
1984 struct net_device *netdev = adapter->netdev;
1985 int i, ret;
1986
1987 ixgbevf_setup_psrtype(adapter);
1988 if (hw->mac.type >= ixgbe_mac_X550_vf)
1989 ixgbevf_setup_vfmrqc(adapter);
1990
1991 spin_lock_bh(&adapter->mbx_lock);
1992
1993 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1994 spin_unlock_bh(&adapter->mbx_lock);
1995 if (ret)
1996 dev_err(&adapter->pdev->dev,
1997 "Failed to set MTU at %d\n", netdev->mtu);
1998
1999
2000
2001
2002 for (i = 0; i < adapter->num_rx_queues; i++) {
2003 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2004
2005 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2006 ixgbevf_configure_rx_ring(adapter, rx_ring);
2007 }
2008}
2009
2010static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2011 __be16 proto, u16 vid)
2012{
2013 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2014 struct ixgbe_hw *hw = &adapter->hw;
2015 int err;
2016
2017 spin_lock_bh(&adapter->mbx_lock);
2018
2019
2020 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2021
2022 spin_unlock_bh(&adapter->mbx_lock);
2023
2024
2025 if (err == IXGBE_ERR_MBX)
2026 return -EIO;
2027
2028 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2029 return -EACCES;
2030
2031 set_bit(vid, adapter->active_vlans);
2032
2033 return err;
2034}
2035
2036static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2037 __be16 proto, u16 vid)
2038{
2039 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2040 struct ixgbe_hw *hw = &adapter->hw;
2041 int err;
2042
2043 spin_lock_bh(&adapter->mbx_lock);
2044
2045
2046 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2047
2048 spin_unlock_bh(&adapter->mbx_lock);
2049
2050 clear_bit(vid, adapter->active_vlans);
2051
2052 return err;
2053}
2054
2055static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2056{
2057 u16 vid;
2058
2059 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2060 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2061 htons(ETH_P_8021Q), vid);
2062}
2063
2064static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2065{
2066 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2067 struct ixgbe_hw *hw = &adapter->hw;
2068 int count = 0;
2069
2070 if ((netdev_uc_count(netdev)) > 10) {
2071 pr_err("Too many unicast filters - No Space\n");
2072 return -ENOSPC;
2073 }
2074
2075 if (!netdev_uc_empty(netdev)) {
2076 struct netdev_hw_addr *ha;
2077
2078 netdev_for_each_uc_addr(ha, netdev) {
2079 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2080 udelay(200);
2081 }
2082 } else {
2083
2084
2085
2086 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2087 }
2088
2089 return count;
2090}
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101static void ixgbevf_set_rx_mode(struct net_device *netdev)
2102{
2103 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2104 struct ixgbe_hw *hw = &adapter->hw;
2105 unsigned int flags = netdev->flags;
2106 int xcast_mode;
2107
2108
2109 if (flags & IFF_PROMISC)
2110 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2111 else if (flags & IFF_ALLMULTI)
2112 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2113 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2114 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2115 else
2116 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2117
2118 spin_lock_bh(&adapter->mbx_lock);
2119
2120 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2121
2122
2123 hw->mac.ops.update_mc_addr_list(hw, netdev);
2124
2125 ixgbevf_write_uc_addr_list(netdev);
2126
2127 spin_unlock_bh(&adapter->mbx_lock);
2128}
2129
2130static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2131{
2132 int q_idx;
2133 struct ixgbevf_q_vector *q_vector;
2134 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2135
2136 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2137 q_vector = adapter->q_vector[q_idx];
2138 napi_enable(&q_vector->napi);
2139 }
2140}
2141
2142static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2143{
2144 int q_idx;
2145 struct ixgbevf_q_vector *q_vector;
2146 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2147
2148 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2149 q_vector = adapter->q_vector[q_idx];
2150 napi_disable(&q_vector->napi);
2151 }
2152}
2153
2154static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2155{
2156 struct ixgbe_hw *hw = &adapter->hw;
2157 unsigned int def_q = 0;
2158 unsigned int num_tcs = 0;
2159 unsigned int num_rx_queues = adapter->num_rx_queues;
2160 unsigned int num_tx_queues = adapter->num_tx_queues;
2161 int err;
2162
2163 spin_lock_bh(&adapter->mbx_lock);
2164
2165
2166 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2167
2168 spin_unlock_bh(&adapter->mbx_lock);
2169
2170 if (err)
2171 return err;
2172
2173 if (num_tcs > 1) {
2174
2175 num_tx_queues = 1;
2176
2177
2178 adapter->tx_ring[0]->reg_idx = def_q;
2179
2180
2181 num_rx_queues = num_tcs;
2182 }
2183
2184
2185 if ((adapter->num_rx_queues != num_rx_queues) ||
2186 (adapter->num_tx_queues != num_tx_queues)) {
2187
2188 hw->mbx.timeout = 0;
2189
2190
2191 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2192 }
2193
2194 return 0;
2195}
2196
2197static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2198{
2199 ixgbevf_configure_dcb(adapter);
2200
2201 ixgbevf_set_rx_mode(adapter->netdev);
2202
2203 ixgbevf_restore_vlan(adapter);
2204
2205 ixgbevf_configure_tx(adapter);
2206 ixgbevf_configure_rx(adapter);
2207}
2208
2209static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2210{
2211
2212 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2213 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2214 adapter->stats.base_vfgprc;
2215 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2216 adapter->stats.base_vfgptc;
2217 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2218 adapter->stats.base_vfgorc;
2219 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2220 adapter->stats.base_vfgotc;
2221 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2222 adapter->stats.base_vfmprc;
2223 }
2224}
2225
2226static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2227{
2228 struct ixgbe_hw *hw = &adapter->hw;
2229
2230 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2231 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2232 adapter->stats.last_vfgorc |=
2233 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2234 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2235 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2236 adapter->stats.last_vfgotc |=
2237 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2238 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2239
2240 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2241 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2242 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2243 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2244 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2245}
2246
2247static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2248{
2249 struct ixgbe_hw *hw = &adapter->hw;
2250 int api[] = { ixgbe_mbox_api_13,
2251 ixgbe_mbox_api_12,
2252 ixgbe_mbox_api_11,
2253 ixgbe_mbox_api_10,
2254 ixgbe_mbox_api_unknown };
2255 int err, idx = 0;
2256
2257 spin_lock_bh(&adapter->mbx_lock);
2258
2259 while (api[idx] != ixgbe_mbox_api_unknown) {
2260 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2261 if (!err)
2262 break;
2263 idx++;
2264 }
2265
2266 spin_unlock_bh(&adapter->mbx_lock);
2267}
2268
2269static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2270{
2271 struct net_device *netdev = adapter->netdev;
2272 struct ixgbe_hw *hw = &adapter->hw;
2273
2274 ixgbevf_configure_msix(adapter);
2275
2276 spin_lock_bh(&adapter->mbx_lock);
2277
2278 if (is_valid_ether_addr(hw->mac.addr))
2279 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2280 else
2281 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2282
2283 spin_unlock_bh(&adapter->mbx_lock);
2284
2285 smp_mb__before_atomic();
2286 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2287 ixgbevf_napi_enable_all(adapter);
2288
2289
2290 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2291 ixgbevf_irq_enable(adapter);
2292
2293
2294 netif_tx_start_all_queues(netdev);
2295
2296 ixgbevf_save_reset_stats(adapter);
2297 ixgbevf_init_last_counter_stats(adapter);
2298
2299 hw->mac.get_link_status = 1;
2300 mod_timer(&adapter->service_timer, jiffies);
2301}
2302
2303void ixgbevf_up(struct ixgbevf_adapter *adapter)
2304{
2305 ixgbevf_configure(adapter);
2306
2307 ixgbevf_up_complete(adapter);
2308}
2309
2310
2311
2312
2313
2314static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2315{
2316 u16 i = rx_ring->next_to_clean;
2317
2318
2319 if (rx_ring->skb) {
2320 dev_kfree_skb(rx_ring->skb);
2321 rx_ring->skb = NULL;
2322 }
2323
2324
2325 while (i != rx_ring->next_to_alloc) {
2326 struct ixgbevf_rx_buffer *rx_buffer;
2327
2328 rx_buffer = &rx_ring->rx_buffer_info[i];
2329
2330
2331
2332
2333 dma_sync_single_range_for_cpu(rx_ring->dev,
2334 rx_buffer->dma,
2335 rx_buffer->page_offset,
2336 ixgbevf_rx_bufsz(rx_ring),
2337 DMA_FROM_DEVICE);
2338
2339
2340 dma_unmap_page_attrs(rx_ring->dev,
2341 rx_buffer->dma,
2342 ixgbevf_rx_pg_size(rx_ring),
2343 DMA_FROM_DEVICE,
2344 IXGBEVF_RX_DMA_ATTR);
2345
2346 __page_frag_cache_drain(rx_buffer->page,
2347 rx_buffer->pagecnt_bias);
2348
2349 i++;
2350 if (i == rx_ring->count)
2351 i = 0;
2352 }
2353
2354 rx_ring->next_to_alloc = 0;
2355 rx_ring->next_to_clean = 0;
2356 rx_ring->next_to_use = 0;
2357}
2358
2359
2360
2361
2362
2363static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2364{
2365 u16 i = tx_ring->next_to_clean;
2366 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2367
2368 while (i != tx_ring->next_to_use) {
2369 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2370
2371
2372 if (ring_is_xdp(tx_ring))
2373 page_frag_free(tx_buffer->data);
2374 else
2375 dev_kfree_skb_any(tx_buffer->skb);
2376
2377
2378 dma_unmap_single(tx_ring->dev,
2379 dma_unmap_addr(tx_buffer, dma),
2380 dma_unmap_len(tx_buffer, len),
2381 DMA_TO_DEVICE);
2382
2383
2384 eop_desc = tx_buffer->next_to_watch;
2385 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2386
2387
2388 while (tx_desc != eop_desc) {
2389 tx_buffer++;
2390 tx_desc++;
2391 i++;
2392 if (unlikely(i == tx_ring->count)) {
2393 i = 0;
2394 tx_buffer = tx_ring->tx_buffer_info;
2395 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2396 }
2397
2398
2399 if (dma_unmap_len(tx_buffer, len))
2400 dma_unmap_page(tx_ring->dev,
2401 dma_unmap_addr(tx_buffer, dma),
2402 dma_unmap_len(tx_buffer, len),
2403 DMA_TO_DEVICE);
2404 }
2405
2406
2407 tx_buffer++;
2408 i++;
2409 if (unlikely(i == tx_ring->count)) {
2410 i = 0;
2411 tx_buffer = tx_ring->tx_buffer_info;
2412 }
2413 }
2414
2415
2416 tx_ring->next_to_use = 0;
2417 tx_ring->next_to_clean = 0;
2418
2419}
2420
2421
2422
2423
2424
2425static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2426{
2427 int i;
2428
2429 for (i = 0; i < adapter->num_rx_queues; i++)
2430 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2431}
2432
2433
2434
2435
2436
2437static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2438{
2439 int i;
2440
2441 for (i = 0; i < adapter->num_tx_queues; i++)
2442 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2443 for (i = 0; i < adapter->num_xdp_queues; i++)
2444 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2445}
2446
2447void ixgbevf_down(struct ixgbevf_adapter *adapter)
2448{
2449 struct net_device *netdev = adapter->netdev;
2450 struct ixgbe_hw *hw = &adapter->hw;
2451 int i;
2452
2453
2454 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2455 return;
2456
2457
2458 for (i = 0; i < adapter->num_rx_queues; i++)
2459 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2460
2461 usleep_range(10000, 20000);
2462
2463 netif_tx_stop_all_queues(netdev);
2464
2465
2466 netif_carrier_off(netdev);
2467 netif_tx_disable(netdev);
2468
2469 ixgbevf_irq_disable(adapter);
2470
2471 ixgbevf_napi_disable_all(adapter);
2472
2473 del_timer_sync(&adapter->service_timer);
2474
2475
2476 for (i = 0; i < adapter->num_tx_queues; i++) {
2477 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2478
2479 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2480 IXGBE_TXDCTL_SWFLSH);
2481 }
2482
2483 for (i = 0; i < adapter->num_xdp_queues; i++) {
2484 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2485
2486 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2487 IXGBE_TXDCTL_SWFLSH);
2488 }
2489
2490 if (!pci_channel_offline(adapter->pdev))
2491 ixgbevf_reset(adapter);
2492
2493 ixgbevf_clean_all_tx_rings(adapter);
2494 ixgbevf_clean_all_rx_rings(adapter);
2495}
2496
2497void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2498{
2499 WARN_ON(in_interrupt());
2500
2501 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2502 msleep(1);
2503
2504 ixgbevf_down(adapter);
2505 ixgbevf_up(adapter);
2506
2507 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2508}
2509
2510void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2511{
2512 struct ixgbe_hw *hw = &adapter->hw;
2513 struct net_device *netdev = adapter->netdev;
2514
2515 if (hw->mac.ops.reset_hw(hw)) {
2516 hw_dbg(hw, "PF still resetting\n");
2517 } else {
2518 hw->mac.ops.init_hw(hw);
2519 ixgbevf_negotiate_api(adapter);
2520 }
2521
2522 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2523 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2524 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2525 }
2526
2527 adapter->last_reset = jiffies;
2528}
2529
2530static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2531 int vectors)
2532{
2533 int vector_threshold;
2534
2535
2536
2537
2538
2539 vector_threshold = MIN_MSIX_COUNT;
2540
2541
2542
2543
2544
2545
2546 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2547 vector_threshold, vectors);
2548
2549 if (vectors < 0) {
2550 dev_err(&adapter->pdev->dev,
2551 "Unable to allocate MSI-X interrupts\n");
2552 kfree(adapter->msix_entries);
2553 adapter->msix_entries = NULL;
2554 return vectors;
2555 }
2556
2557
2558
2559
2560
2561 adapter->num_msix_vectors = vectors;
2562
2563 return 0;
2564}
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2578{
2579 struct ixgbe_hw *hw = &adapter->hw;
2580 unsigned int def_q = 0;
2581 unsigned int num_tcs = 0;
2582 int err;
2583
2584
2585 adapter->num_rx_queues = 1;
2586 adapter->num_tx_queues = 1;
2587 adapter->num_xdp_queues = 0;
2588
2589 spin_lock_bh(&adapter->mbx_lock);
2590
2591
2592 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2593
2594 spin_unlock_bh(&adapter->mbx_lock);
2595
2596 if (err)
2597 return;
2598
2599
2600 if (num_tcs > 1) {
2601 adapter->num_rx_queues = num_tcs;
2602 } else {
2603 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2604
2605 switch (hw->api_version) {
2606 case ixgbe_mbox_api_11:
2607 case ixgbe_mbox_api_12:
2608 case ixgbe_mbox_api_13:
2609 if (adapter->xdp_prog &&
2610 hw->mac.max_tx_queues == rss)
2611 rss = rss > 3 ? 2 : 1;
2612
2613 adapter->num_rx_queues = rss;
2614 adapter->num_tx_queues = rss;
2615 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2616 default:
2617 break;
2618 }
2619 }
2620}
2621
2622
2623
2624
2625
2626
2627
2628
2629static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2630{
2631 int vector, v_budget;
2632
2633
2634
2635
2636
2637
2638
2639 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2640 v_budget = min_t(int, v_budget, num_online_cpus());
2641 v_budget += NON_Q_VECTORS;
2642
2643 adapter->msix_entries = kcalloc(v_budget,
2644 sizeof(struct msix_entry), GFP_KERNEL);
2645 if (!adapter->msix_entries)
2646 return -ENOMEM;
2647
2648 for (vector = 0; vector < v_budget; vector++)
2649 adapter->msix_entries[vector].entry = vector;
2650
2651
2652
2653
2654
2655 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2656}
2657
2658static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2659 struct ixgbevf_ring_container *head)
2660{
2661 ring->next = head->ring;
2662 head->ring = ring;
2663 head->count++;
2664}
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2680 int txr_count, int txr_idx,
2681 int xdp_count, int xdp_idx,
2682 int rxr_count, int rxr_idx)
2683{
2684 struct ixgbevf_q_vector *q_vector;
2685 int reg_idx = txr_idx + xdp_idx;
2686 struct ixgbevf_ring *ring;
2687 int ring_count, size;
2688
2689 ring_count = txr_count + xdp_count + rxr_count;
2690 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2691
2692
2693 q_vector = kzalloc(size, GFP_KERNEL);
2694 if (!q_vector)
2695 return -ENOMEM;
2696
2697
2698 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2699
2700
2701 adapter->q_vector[v_idx] = q_vector;
2702 q_vector->adapter = adapter;
2703 q_vector->v_idx = v_idx;
2704
2705
2706 ring = q_vector->ring;
2707
2708 while (txr_count) {
2709
2710 ring->dev = &adapter->pdev->dev;
2711 ring->netdev = adapter->netdev;
2712
2713
2714 ring->q_vector = q_vector;
2715
2716
2717 ixgbevf_add_ring(ring, &q_vector->tx);
2718
2719
2720 ring->count = adapter->tx_ring_count;
2721 ring->queue_index = txr_idx;
2722 ring->reg_idx = reg_idx;
2723
2724
2725 adapter->tx_ring[txr_idx] = ring;
2726
2727
2728 txr_count--;
2729 txr_idx++;
2730 reg_idx++;
2731
2732
2733 ring++;
2734 }
2735
2736 while (xdp_count) {
2737
2738 ring->dev = &adapter->pdev->dev;
2739 ring->netdev = adapter->netdev;
2740
2741
2742 ring->q_vector = q_vector;
2743
2744
2745 ixgbevf_add_ring(ring, &q_vector->tx);
2746
2747
2748 ring->count = adapter->tx_ring_count;
2749 ring->queue_index = xdp_idx;
2750 ring->reg_idx = reg_idx;
2751 set_ring_xdp(ring);
2752
2753
2754 adapter->xdp_ring[xdp_idx] = ring;
2755
2756
2757 xdp_count--;
2758 xdp_idx++;
2759 reg_idx++;
2760
2761
2762 ring++;
2763 }
2764
2765 while (rxr_count) {
2766
2767 ring->dev = &adapter->pdev->dev;
2768 ring->netdev = adapter->netdev;
2769
2770
2771 ring->q_vector = q_vector;
2772
2773
2774 ixgbevf_add_ring(ring, &q_vector->rx);
2775
2776
2777 ring->count = adapter->rx_ring_count;
2778 ring->queue_index = rxr_idx;
2779 ring->reg_idx = rxr_idx;
2780
2781
2782 adapter->rx_ring[rxr_idx] = ring;
2783
2784
2785 rxr_count--;
2786 rxr_idx++;
2787
2788
2789 ring++;
2790 }
2791
2792 return 0;
2793}
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2805{
2806 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2807 struct ixgbevf_ring *ring;
2808
2809 ixgbevf_for_each_ring(ring, q_vector->tx) {
2810 if (ring_is_xdp(ring))
2811 adapter->xdp_ring[ring->queue_index] = NULL;
2812 else
2813 adapter->tx_ring[ring->queue_index] = NULL;
2814 }
2815
2816 ixgbevf_for_each_ring(ring, q_vector->rx)
2817 adapter->rx_ring[ring->queue_index] = NULL;
2818
2819 adapter->q_vector[v_idx] = NULL;
2820 netif_napi_del(&q_vector->napi);
2821
2822
2823
2824
2825 kfree_rcu(q_vector, rcu);
2826}
2827
2828
2829
2830
2831
2832
2833
2834
2835static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2836{
2837 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2838 int rxr_remaining = adapter->num_rx_queues;
2839 int txr_remaining = adapter->num_tx_queues;
2840 int xdp_remaining = adapter->num_xdp_queues;
2841 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2842 int err;
2843
2844 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2845 for (; rxr_remaining; v_idx++, q_vectors--) {
2846 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2847
2848 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2849 0, 0, 0, 0, rqpv, rxr_idx);
2850 if (err)
2851 goto err_out;
2852
2853
2854 rxr_remaining -= rqpv;
2855 rxr_idx += rqpv;
2856 }
2857 }
2858
2859 for (; q_vectors; v_idx++, q_vectors--) {
2860 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2861 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2862 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2863
2864 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2865 tqpv, txr_idx,
2866 xqpv, xdp_idx,
2867 rqpv, rxr_idx);
2868
2869 if (err)
2870 goto err_out;
2871
2872
2873 rxr_remaining -= rqpv;
2874 rxr_idx += rqpv;
2875 txr_remaining -= tqpv;
2876 txr_idx += tqpv;
2877 xdp_remaining -= xqpv;
2878 xdp_idx += xqpv;
2879 }
2880
2881 return 0;
2882
2883err_out:
2884 while (v_idx) {
2885 v_idx--;
2886 ixgbevf_free_q_vector(adapter, v_idx);
2887 }
2888
2889 return -ENOMEM;
2890}
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2901{
2902 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2903
2904 while (q_vectors) {
2905 q_vectors--;
2906 ixgbevf_free_q_vector(adapter, q_vectors);
2907 }
2908}
2909
2910
2911
2912
2913
2914
2915static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2916{
2917 if (!adapter->msix_entries)
2918 return;
2919
2920 pci_disable_msix(adapter->pdev);
2921 kfree(adapter->msix_entries);
2922 adapter->msix_entries = NULL;
2923}
2924
2925
2926
2927
2928
2929
2930static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2931{
2932 int err;
2933
2934
2935 ixgbevf_set_num_queues(adapter);
2936
2937 err = ixgbevf_set_interrupt_capability(adapter);
2938 if (err) {
2939 hw_dbg(&adapter->hw,
2940 "Unable to setup interrupt capabilities\n");
2941 goto err_set_interrupt;
2942 }
2943
2944 err = ixgbevf_alloc_q_vectors(adapter);
2945 if (err) {
2946 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2947 goto err_alloc_q_vectors;
2948 }
2949
2950 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2951 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2952 adapter->num_rx_queues, adapter->num_tx_queues,
2953 adapter->num_xdp_queues);
2954
2955 set_bit(__IXGBEVF_DOWN, &adapter->state);
2956
2957 return 0;
2958err_alloc_q_vectors:
2959 ixgbevf_reset_interrupt_capability(adapter);
2960err_set_interrupt:
2961 return err;
2962}
2963
2964
2965
2966
2967
2968
2969
2970
2971static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2972{
2973 adapter->num_tx_queues = 0;
2974 adapter->num_xdp_queues = 0;
2975 adapter->num_rx_queues = 0;
2976
2977 ixgbevf_free_q_vectors(adapter);
2978 ixgbevf_reset_interrupt_capability(adapter);
2979}
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2990{
2991 struct ixgbe_hw *hw = &adapter->hw;
2992 struct pci_dev *pdev = adapter->pdev;
2993 struct net_device *netdev = adapter->netdev;
2994 int err;
2995
2996
2997 hw->vendor_id = pdev->vendor;
2998 hw->device_id = pdev->device;
2999 hw->revision_id = pdev->revision;
3000 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3001 hw->subsystem_device_id = pdev->subsystem_device;
3002
3003 hw->mbx.ops.init_params(hw);
3004
3005 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3006 err = ixgbevf_init_rss_key(adapter);
3007 if (err)
3008 goto out;
3009 }
3010
3011
3012 hw->mac.max_tx_queues = 2;
3013 hw->mac.max_rx_queues = 2;
3014
3015
3016 spin_lock_init(&adapter->mbx_lock);
3017
3018 err = hw->mac.ops.reset_hw(hw);
3019 if (err) {
3020 dev_info(&pdev->dev,
3021 "PF still in reset state. Is the PF interface up?\n");
3022 } else {
3023 err = hw->mac.ops.init_hw(hw);
3024 if (err) {
3025 pr_err("init_shared_code failed: %d\n", err);
3026 goto out;
3027 }
3028 ixgbevf_negotiate_api(adapter);
3029 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3030 if (err)
3031 dev_info(&pdev->dev, "Error reading MAC address\n");
3032 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3033 dev_info(&pdev->dev,
3034 "MAC address not assigned by administrator.\n");
3035 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3036 }
3037
3038 if (!is_valid_ether_addr(netdev->dev_addr)) {
3039 dev_info(&pdev->dev, "Assigning random MAC address\n");
3040 eth_hw_addr_random(netdev);
3041 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3042 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3043 }
3044
3045
3046 adapter->rx_itr_setting = 1;
3047 adapter->tx_itr_setting = 1;
3048
3049
3050 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3051 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3052
3053 set_bit(__IXGBEVF_DOWN, &adapter->state);
3054 return 0;
3055
3056out:
3057 return err;
3058}
3059
3060#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3061 { \
3062 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3063 if (current_counter < last_counter) \
3064 counter += 0x100000000LL; \
3065 last_counter = current_counter; \
3066 counter &= 0xFFFFFFFF00000000LL; \
3067 counter |= current_counter; \
3068 }
3069
3070#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3071 { \
3072 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3073 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3074 u64 current_counter = (current_counter_msb << 32) | \
3075 current_counter_lsb; \
3076 if (current_counter < last_counter) \
3077 counter += 0x1000000000LL; \
3078 last_counter = current_counter; \
3079 counter &= 0xFFFFFFF000000000LL; \
3080 counter |= current_counter; \
3081 }
3082
3083
3084
3085
3086void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3087{
3088 struct ixgbe_hw *hw = &adapter->hw;
3089 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3090 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3091 int i;
3092
3093 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3094 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3095 return;
3096
3097 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3098 adapter->stats.vfgprc);
3099 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3100 adapter->stats.vfgptc);
3101 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3102 adapter->stats.last_vfgorc,
3103 adapter->stats.vfgorc);
3104 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3105 adapter->stats.last_vfgotc,
3106 adapter->stats.vfgotc);
3107 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3108 adapter->stats.vfmprc);
3109
3110 for (i = 0; i < adapter->num_rx_queues; i++) {
3111 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3112
3113 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3114 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3115 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3116 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3117 }
3118
3119 adapter->hw_csum_rx_error = hw_csum_rx_error;
3120 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3121 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3122 adapter->alloc_rx_page = alloc_rx_page;
3123}
3124
3125
3126
3127
3128
3129static void ixgbevf_service_timer(struct timer_list *t)
3130{
3131 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3132 service_timer);
3133
3134
3135 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3136
3137 ixgbevf_service_event_schedule(adapter);
3138}
3139
3140static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3141{
3142 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3143 return;
3144
3145
3146 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3147 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3148 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3149 return;
3150
3151 adapter->tx_timeout_count++;
3152
3153 rtnl_lock();
3154 ixgbevf_reinit_locked(adapter);
3155 rtnl_unlock();
3156}
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3168{
3169 struct ixgbe_hw *hw = &adapter->hw;
3170 u32 eics = 0;
3171 int i;
3172
3173
3174 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3175 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3176 return;
3177
3178
3179 if (netif_carrier_ok(adapter->netdev)) {
3180 for (i = 0; i < adapter->num_tx_queues; i++)
3181 set_check_for_tx_hang(adapter->tx_ring[i]);
3182 for (i = 0; i < adapter->num_xdp_queues; i++)
3183 set_check_for_tx_hang(adapter->xdp_ring[i]);
3184 }
3185
3186
3187 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3188 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3189
3190 if (qv->rx.ring || qv->tx.ring)
3191 eics |= BIT(i);
3192 }
3193
3194
3195 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3196}
3197
3198
3199
3200
3201
3202static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3203{
3204 struct ixgbe_hw *hw = &adapter->hw;
3205 u32 link_speed = adapter->link_speed;
3206 bool link_up = adapter->link_up;
3207 s32 err;
3208
3209 spin_lock_bh(&adapter->mbx_lock);
3210
3211 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3212
3213 spin_unlock_bh(&adapter->mbx_lock);
3214
3215
3216 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3217 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3218 link_up = false;
3219 }
3220
3221 adapter->link_up = link_up;
3222 adapter->link_speed = link_speed;
3223}
3224
3225
3226
3227
3228
3229
3230static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3231{
3232 struct net_device *netdev = adapter->netdev;
3233
3234
3235 if (netif_carrier_ok(netdev))
3236 return;
3237
3238 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3239 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3240 "10 Gbps" :
3241 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3242 "1 Gbps" :
3243 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3244 "100 Mbps" :
3245 "unknown speed");
3246
3247 netif_carrier_on(netdev);
3248}
3249
3250
3251
3252
3253
3254
3255static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3256{
3257 struct net_device *netdev = adapter->netdev;
3258
3259 adapter->link_speed = 0;
3260
3261
3262 if (!netif_carrier_ok(netdev))
3263 return;
3264
3265 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3266
3267 netif_carrier_off(netdev);
3268}
3269
3270
3271
3272
3273
3274static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3275{
3276
3277 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3278 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3279 return;
3280
3281 ixgbevf_watchdog_update_link(adapter);
3282
3283 if (adapter->link_up)
3284 ixgbevf_watchdog_link_is_up(adapter);
3285 else
3286 ixgbevf_watchdog_link_is_down(adapter);
3287
3288 ixgbevf_update_stats(adapter);
3289}
3290
3291
3292
3293
3294
3295static void ixgbevf_service_task(struct work_struct *work)
3296{
3297 struct ixgbevf_adapter *adapter = container_of(work,
3298 struct ixgbevf_adapter,
3299 service_task);
3300 struct ixgbe_hw *hw = &adapter->hw;
3301
3302 if (IXGBE_REMOVED(hw->hw_addr)) {
3303 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3304 rtnl_lock();
3305 ixgbevf_down(adapter);
3306 rtnl_unlock();
3307 }
3308 return;
3309 }
3310
3311 ixgbevf_queue_reset_subtask(adapter);
3312 ixgbevf_reset_subtask(adapter);
3313 ixgbevf_watchdog_subtask(adapter);
3314 ixgbevf_check_hang_subtask(adapter);
3315
3316 ixgbevf_service_event_complete(adapter);
3317}
3318
3319
3320
3321
3322
3323
3324
3325void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3326{
3327 ixgbevf_clean_tx_ring(tx_ring);
3328
3329 vfree(tx_ring->tx_buffer_info);
3330 tx_ring->tx_buffer_info = NULL;
3331
3332
3333 if (!tx_ring->desc)
3334 return;
3335
3336 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3337 tx_ring->dma);
3338
3339 tx_ring->desc = NULL;
3340}
3341
3342
3343
3344
3345
3346
3347
3348static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3349{
3350 int i;
3351
3352 for (i = 0; i < adapter->num_tx_queues; i++)
3353 if (adapter->tx_ring[i]->desc)
3354 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3355 for (i = 0; i < adapter->num_xdp_queues; i++)
3356 if (adapter->xdp_ring[i]->desc)
3357 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3358}
3359
3360
3361
3362
3363
3364
3365
3366int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3367{
3368 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3369 int size;
3370
3371 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3372 tx_ring->tx_buffer_info = vmalloc(size);
3373 if (!tx_ring->tx_buffer_info)
3374 goto err;
3375
3376 u64_stats_init(&tx_ring->syncp);
3377
3378
3379 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3380 tx_ring->size = ALIGN(tx_ring->size, 4096);
3381
3382 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3383 &tx_ring->dma, GFP_KERNEL);
3384 if (!tx_ring->desc)
3385 goto err;
3386
3387 return 0;
3388
3389err:
3390 vfree(tx_ring->tx_buffer_info);
3391 tx_ring->tx_buffer_info = NULL;
3392 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3393 return -ENOMEM;
3394}
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3407{
3408 int i, j = 0, err = 0;
3409
3410 for (i = 0; i < adapter->num_tx_queues; i++) {
3411 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3412 if (!err)
3413 continue;
3414 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3415 goto err_setup_tx;
3416 }
3417
3418 for (j = 0; j < adapter->num_xdp_queues; j++) {
3419 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3420 if (!err)
3421 continue;
3422 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3423 goto err_setup_tx;
3424 }
3425
3426 return 0;
3427err_setup_tx:
3428
3429 while (j--)
3430 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3431 while (i--)
3432 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3433
3434 return err;
3435}
3436
3437
3438
3439
3440
3441
3442
3443
3444int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3445 struct ixgbevf_ring *rx_ring)
3446{
3447 int size;
3448
3449 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3450 rx_ring->rx_buffer_info = vmalloc(size);
3451 if (!rx_ring->rx_buffer_info)
3452 goto err;
3453
3454 u64_stats_init(&rx_ring->syncp);
3455
3456
3457 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3458 rx_ring->size = ALIGN(rx_ring->size, 4096);
3459
3460 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3461 &rx_ring->dma, GFP_KERNEL);
3462
3463 if (!rx_ring->desc)
3464 goto err;
3465
3466
3467 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3468 rx_ring->queue_index) < 0)
3469 goto err;
3470
3471 rx_ring->xdp_prog = adapter->xdp_prog;
3472
3473 return 0;
3474err:
3475 vfree(rx_ring->rx_buffer_info);
3476 rx_ring->rx_buffer_info = NULL;
3477 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3478 return -ENOMEM;
3479}
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3492{
3493 int i, err = 0;
3494
3495 for (i = 0; i < adapter->num_rx_queues; i++) {
3496 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3497 if (!err)
3498 continue;
3499 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3500 goto err_setup_rx;
3501 }
3502
3503 return 0;
3504err_setup_rx:
3505
3506 while (i--)
3507 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3508 return err;
3509}
3510
3511
3512
3513
3514
3515
3516
3517void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3518{
3519 ixgbevf_clean_rx_ring(rx_ring);
3520
3521 rx_ring->xdp_prog = NULL;
3522 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3523 vfree(rx_ring->rx_buffer_info);
3524 rx_ring->rx_buffer_info = NULL;
3525
3526 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3527 rx_ring->dma);
3528
3529 rx_ring->desc = NULL;
3530}
3531
3532
3533
3534
3535
3536
3537
3538static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3539{
3540 int i;
3541
3542 for (i = 0; i < adapter->num_rx_queues; i++)
3543 if (adapter->rx_ring[i]->desc)
3544 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3545}
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559int ixgbevf_open(struct net_device *netdev)
3560{
3561 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3562 struct ixgbe_hw *hw = &adapter->hw;
3563 int err;
3564
3565
3566
3567
3568
3569
3570
3571 if (!adapter->num_msix_vectors)
3572 return -ENOMEM;
3573
3574 if (hw->adapter_stopped) {
3575 ixgbevf_reset(adapter);
3576
3577
3578
3579 if (hw->adapter_stopped) {
3580 err = IXGBE_ERR_MBX;
3581 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3582 goto err_setup_reset;
3583 }
3584 }
3585
3586
3587 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3588 return -EBUSY;
3589
3590 netif_carrier_off(netdev);
3591
3592
3593 err = ixgbevf_setup_all_tx_resources(adapter);
3594 if (err)
3595 goto err_setup_tx;
3596
3597
3598 err = ixgbevf_setup_all_rx_resources(adapter);
3599 if (err)
3600 goto err_setup_rx;
3601
3602 ixgbevf_configure(adapter);
3603
3604 err = ixgbevf_request_irq(adapter);
3605 if (err)
3606 goto err_req_irq;
3607
3608
3609 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3610 if (err)
3611 goto err_set_queues;
3612
3613 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3614 if (err)
3615 goto err_set_queues;
3616
3617 ixgbevf_up_complete(adapter);
3618
3619 return 0;
3620
3621err_set_queues:
3622 ixgbevf_free_irq(adapter);
3623err_req_irq:
3624 ixgbevf_free_all_rx_resources(adapter);
3625err_setup_rx:
3626 ixgbevf_free_all_tx_resources(adapter);
3627err_setup_tx:
3628 ixgbevf_reset(adapter);
3629err_setup_reset:
3630
3631 return err;
3632}
3633
3634
3635
3636
3637
3638
3639
3640
3641static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3642{
3643 ixgbevf_down(adapter);
3644 ixgbevf_free_irq(adapter);
3645 ixgbevf_free_all_tx_resources(adapter);
3646 ixgbevf_free_all_rx_resources(adapter);
3647}
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660int ixgbevf_close(struct net_device *netdev)
3661{
3662 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3663
3664 if (netif_device_present(netdev))
3665 ixgbevf_close_suspend(adapter);
3666
3667 return 0;
3668}
3669
3670static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3671{
3672 struct net_device *dev = adapter->netdev;
3673
3674 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3675 &adapter->state))
3676 return;
3677
3678
3679 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3680 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3681 return;
3682
3683
3684
3685
3686
3687 rtnl_lock();
3688
3689 if (netif_running(dev))
3690 ixgbevf_close(dev);
3691
3692 ixgbevf_clear_interrupt_scheme(adapter);
3693 ixgbevf_init_interrupt_scheme(adapter);
3694
3695 if (netif_running(dev))
3696 ixgbevf_open(dev);
3697
3698 rtnl_unlock();
3699}
3700
3701static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3702 u32 vlan_macip_lens, u32 type_tucmd,
3703 u32 mss_l4len_idx)
3704{
3705 struct ixgbe_adv_tx_context_desc *context_desc;
3706 u16 i = tx_ring->next_to_use;
3707
3708 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3709
3710 i++;
3711 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3712
3713
3714 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3715
3716 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3717 context_desc->seqnum_seed = 0;
3718 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3719 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3720}
3721
3722static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3723 struct ixgbevf_tx_buffer *first,
3724 u8 *hdr_len)
3725{
3726 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3727 struct sk_buff *skb = first->skb;
3728 union {
3729 struct iphdr *v4;
3730 struct ipv6hdr *v6;
3731 unsigned char *hdr;
3732 } ip;
3733 union {
3734 struct tcphdr *tcp;
3735 unsigned char *hdr;
3736 } l4;
3737 u32 paylen, l4_offset;
3738 int err;
3739
3740 if (skb->ip_summed != CHECKSUM_PARTIAL)
3741 return 0;
3742
3743 if (!skb_is_gso(skb))
3744 return 0;
3745
3746 err = skb_cow_head(skb, 0);
3747 if (err < 0)
3748 return err;
3749
3750 if (eth_p_mpls(first->protocol))
3751 ip.hdr = skb_inner_network_header(skb);
3752 else
3753 ip.hdr = skb_network_header(skb);
3754 l4.hdr = skb_checksum_start(skb);
3755
3756
3757 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3758
3759
3760 if (ip.v4->version == 4) {
3761 unsigned char *csum_start = skb_checksum_start(skb);
3762 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3763
3764
3765
3766
3767 ip.v4->check = csum_fold(csum_partial(trans_start,
3768 csum_start - trans_start,
3769 0));
3770 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3771
3772 ip.v4->tot_len = 0;
3773 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3774 IXGBE_TX_FLAGS_CSUM |
3775 IXGBE_TX_FLAGS_IPV4;
3776 } else {
3777 ip.v6->payload_len = 0;
3778 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3779 IXGBE_TX_FLAGS_CSUM;
3780 }
3781
3782
3783 l4_offset = l4.hdr - skb->data;
3784
3785
3786 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3787
3788
3789 paylen = skb->len - l4_offset;
3790 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3791
3792
3793 first->gso_segs = skb_shinfo(skb)->gso_segs;
3794 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3795
3796
3797 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3798 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3799 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3800
3801
3802 vlan_macip_lens = l4.hdr - ip.hdr;
3803 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3804 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3805
3806 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3807 type_tucmd, mss_l4len_idx);
3808
3809 return 1;
3810}
3811
3812static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3813{
3814 unsigned int offset = 0;
3815
3816 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3817
3818 return offset == skb_checksum_start_offset(skb);
3819}
3820
3821static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3822 struct ixgbevf_tx_buffer *first)
3823{
3824 struct sk_buff *skb = first->skb;
3825 u32 vlan_macip_lens = 0;
3826 u32 type_tucmd = 0;
3827
3828 if (skb->ip_summed != CHECKSUM_PARTIAL)
3829 goto no_csum;
3830
3831 switch (skb->csum_offset) {
3832 case offsetof(struct tcphdr, check):
3833 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3834
3835 case offsetof(struct udphdr, check):
3836 break;
3837 case offsetof(struct sctphdr, checksum):
3838
3839 if (((first->protocol == htons(ETH_P_IP)) &&
3840 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3841 ((first->protocol == htons(ETH_P_IPV6)) &&
3842 ixgbevf_ipv6_csum_is_sctp(skb))) {
3843 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3844 break;
3845 }
3846
3847 default:
3848 skb_checksum_help(skb);
3849 goto no_csum;
3850 }
3851
3852 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3853 vlan_macip_lens = skb_checksum_start_offset(skb) -
3854 skb_network_offset(skb);
3855no_csum:
3856
3857 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3858 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3859
3860 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
3861}
3862
3863static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3864{
3865
3866 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3867 IXGBE_ADVTXD_DCMD_IFCS |
3868 IXGBE_ADVTXD_DCMD_DEXT);
3869
3870
3871 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3872 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3873
3874
3875 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3876 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3877
3878 return cmd_type;
3879}
3880
3881static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3882 u32 tx_flags, unsigned int paylen)
3883{
3884 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3885
3886
3887 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3888 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3889
3890
3891 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3892 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3893
3894
3895 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3896 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3897
3898
3899
3900
3901 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3902
3903 tx_desc->read.olinfo_status = olinfo_status;
3904}
3905
3906static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3907 struct ixgbevf_tx_buffer *first,
3908 const u8 hdr_len)
3909{
3910 struct sk_buff *skb = first->skb;
3911 struct ixgbevf_tx_buffer *tx_buffer;
3912 union ixgbe_adv_tx_desc *tx_desc;
3913 struct skb_frag_struct *frag;
3914 dma_addr_t dma;
3915 unsigned int data_len, size;
3916 u32 tx_flags = first->tx_flags;
3917 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3918 u16 i = tx_ring->next_to_use;
3919
3920 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3921
3922 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3923
3924 size = skb_headlen(skb);
3925 data_len = skb->data_len;
3926
3927 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3928
3929 tx_buffer = first;
3930
3931 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3932 if (dma_mapping_error(tx_ring->dev, dma))
3933 goto dma_error;
3934
3935
3936 dma_unmap_len_set(tx_buffer, len, size);
3937 dma_unmap_addr_set(tx_buffer, dma, dma);
3938
3939 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3940
3941 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3942 tx_desc->read.cmd_type_len =
3943 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3944
3945 i++;
3946 tx_desc++;
3947 if (i == tx_ring->count) {
3948 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3949 i = 0;
3950 }
3951 tx_desc->read.olinfo_status = 0;
3952
3953 dma += IXGBE_MAX_DATA_PER_TXD;
3954 size -= IXGBE_MAX_DATA_PER_TXD;
3955
3956 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3957 }
3958
3959 if (likely(!data_len))
3960 break;
3961
3962 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3963
3964 i++;
3965 tx_desc++;
3966 if (i == tx_ring->count) {
3967 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3968 i = 0;
3969 }
3970 tx_desc->read.olinfo_status = 0;
3971
3972 size = skb_frag_size(frag);
3973 data_len -= size;
3974
3975 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3976 DMA_TO_DEVICE);
3977
3978 tx_buffer = &tx_ring->tx_buffer_info[i];
3979 }
3980
3981
3982 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3983 tx_desc->read.cmd_type_len = cmd_type;
3984
3985
3986 first->time_stamp = jiffies;
3987
3988
3989
3990
3991
3992
3993
3994
3995 wmb();
3996
3997
3998 first->next_to_watch = tx_desc;
3999
4000 i++;
4001 if (i == tx_ring->count)
4002 i = 0;
4003
4004 tx_ring->next_to_use = i;
4005
4006
4007 ixgbevf_write_tail(tx_ring, i);
4008
4009 return;
4010dma_error:
4011 dev_err(tx_ring->dev, "TX DMA map failed\n");
4012 tx_buffer = &tx_ring->tx_buffer_info[i];
4013
4014
4015 while (tx_buffer != first) {
4016 if (dma_unmap_len(tx_buffer, len))
4017 dma_unmap_page(tx_ring->dev,
4018 dma_unmap_addr(tx_buffer, dma),
4019 dma_unmap_len(tx_buffer, len),
4020 DMA_TO_DEVICE);
4021 dma_unmap_len_set(tx_buffer, len, 0);
4022
4023 if (i-- == 0)
4024 i += tx_ring->count;
4025 tx_buffer = &tx_ring->tx_buffer_info[i];
4026 }
4027
4028 if (dma_unmap_len(tx_buffer, len))
4029 dma_unmap_single(tx_ring->dev,
4030 dma_unmap_addr(tx_buffer, dma),
4031 dma_unmap_len(tx_buffer, len),
4032 DMA_TO_DEVICE);
4033 dma_unmap_len_set(tx_buffer, len, 0);
4034
4035 dev_kfree_skb_any(tx_buffer->skb);
4036 tx_buffer->skb = NULL;
4037
4038 tx_ring->next_to_use = i;
4039}
4040
4041static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4042{
4043 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4044
4045
4046
4047
4048 smp_mb();
4049
4050
4051
4052
4053 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4054 return -EBUSY;
4055
4056
4057 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4058 ++tx_ring->tx_stats.restart_queue;
4059
4060 return 0;
4061}
4062
4063static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4064{
4065 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4066 return 0;
4067 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4068}
4069
4070static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4071 struct ixgbevf_ring *tx_ring)
4072{
4073 struct ixgbevf_tx_buffer *first;
4074 int tso;
4075 u32 tx_flags = 0;
4076 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4077#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4078 unsigned short f;
4079#endif
4080 u8 hdr_len = 0;
4081 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4082
4083 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4084 dev_kfree_skb_any(skb);
4085 return NETDEV_TX_OK;
4086 }
4087
4088
4089
4090
4091
4092
4093
4094#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4095 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4096 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4097#else
4098 count += skb_shinfo(skb)->nr_frags;
4099#endif
4100 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4101 tx_ring->tx_stats.tx_busy++;
4102 return NETDEV_TX_BUSY;
4103 }
4104
4105
4106 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4107 first->skb = skb;
4108 first->bytecount = skb->len;
4109 first->gso_segs = 1;
4110
4111 if (skb_vlan_tag_present(skb)) {
4112 tx_flags |= skb_vlan_tag_get(skb);
4113 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4114 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4115 }
4116
4117
4118 first->tx_flags = tx_flags;
4119 first->protocol = vlan_get_protocol(skb);
4120
4121 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
4122 if (tso < 0)
4123 goto out_drop;
4124 else if (!tso)
4125 ixgbevf_tx_csum(tx_ring, first);
4126
4127 ixgbevf_tx_map(tx_ring, first, hdr_len);
4128
4129 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4130
4131 return NETDEV_TX_OK;
4132
4133out_drop:
4134 dev_kfree_skb_any(first->skb);
4135 first->skb = NULL;
4136
4137 return NETDEV_TX_OK;
4138}
4139
4140static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4141{
4142 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4143 struct ixgbevf_ring *tx_ring;
4144
4145 if (skb->len <= 0) {
4146 dev_kfree_skb_any(skb);
4147 return NETDEV_TX_OK;
4148 }
4149
4150
4151
4152
4153 if (skb->len < 17) {
4154 if (skb_padto(skb, 17))
4155 return NETDEV_TX_OK;
4156 skb->len = 17;
4157 }
4158
4159 tx_ring = adapter->tx_ring[skb->queue_mapping];
4160 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4161}
4162
4163
4164
4165
4166
4167
4168
4169
4170static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4171{
4172 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4173 struct ixgbe_hw *hw = &adapter->hw;
4174 struct sockaddr *addr = p;
4175 int err;
4176
4177 if (!is_valid_ether_addr(addr->sa_data))
4178 return -EADDRNOTAVAIL;
4179
4180 spin_lock_bh(&adapter->mbx_lock);
4181
4182 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4183
4184 spin_unlock_bh(&adapter->mbx_lock);
4185
4186 if (err)
4187 return -EPERM;
4188
4189 ether_addr_copy(hw->mac.addr, addr->sa_data);
4190 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4191
4192 return 0;
4193}
4194
4195
4196
4197
4198
4199
4200
4201
4202static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4203{
4204 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4205 struct ixgbe_hw *hw = &adapter->hw;
4206 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4207 int ret;
4208
4209
4210 if (adapter->xdp_prog) {
4211 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4212 return -EPERM;
4213 }
4214
4215 spin_lock_bh(&adapter->mbx_lock);
4216
4217 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4218 spin_unlock_bh(&adapter->mbx_lock);
4219 if (ret)
4220 return -EINVAL;
4221
4222 hw_dbg(hw, "changing MTU from %d to %d\n",
4223 netdev->mtu, new_mtu);
4224
4225
4226 netdev->mtu = new_mtu;
4227
4228 if (netif_running(netdev))
4229 ixgbevf_reinit_locked(adapter);
4230
4231 return 0;
4232}
4233
4234#ifdef CONFIG_NET_POLL_CONTROLLER
4235
4236
4237
4238
4239static void ixgbevf_netpoll(struct net_device *netdev)
4240{
4241 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4242 int i;
4243
4244
4245 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
4246 return;
4247 for (i = 0; i < adapter->num_rx_queues; i++)
4248 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
4249}
4250#endif
4251
4252static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4253{
4254 struct net_device *netdev = pci_get_drvdata(pdev);
4255 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4256#ifdef CONFIG_PM
4257 int retval = 0;
4258#endif
4259
4260 rtnl_lock();
4261 netif_device_detach(netdev);
4262
4263 if (netif_running(netdev))
4264 ixgbevf_close_suspend(adapter);
4265
4266 ixgbevf_clear_interrupt_scheme(adapter);
4267 rtnl_unlock();
4268
4269#ifdef CONFIG_PM
4270 retval = pci_save_state(pdev);
4271 if (retval)
4272 return retval;
4273
4274#endif
4275 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4276 pci_disable_device(pdev);
4277
4278 return 0;
4279}
4280
4281#ifdef CONFIG_PM
4282static int ixgbevf_resume(struct pci_dev *pdev)
4283{
4284 struct net_device *netdev = pci_get_drvdata(pdev);
4285 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4286 u32 err;
4287
4288 pci_restore_state(pdev);
4289
4290
4291
4292 pci_save_state(pdev);
4293
4294 err = pci_enable_device_mem(pdev);
4295 if (err) {
4296 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
4297 return err;
4298 }
4299
4300 adapter->hw.hw_addr = adapter->io_addr;
4301 smp_mb__before_atomic();
4302 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4303 pci_set_master(pdev);
4304
4305 ixgbevf_reset(adapter);
4306
4307 rtnl_lock();
4308 err = ixgbevf_init_interrupt_scheme(adapter);
4309 if (!err && netif_running(netdev))
4310 err = ixgbevf_open(netdev);
4311 rtnl_unlock();
4312 if (err)
4313 return err;
4314
4315 netif_device_attach(netdev);
4316
4317 return err;
4318}
4319
4320#endif
4321static void ixgbevf_shutdown(struct pci_dev *pdev)
4322{
4323 ixgbevf_suspend(pdev, PMSG_SUSPEND);
4324}
4325
4326static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4327 const struct ixgbevf_ring *ring)
4328{
4329 u64 bytes, packets;
4330 unsigned int start;
4331
4332 if (ring) {
4333 do {
4334 start = u64_stats_fetch_begin_irq(&ring->syncp);
4335 bytes = ring->stats.bytes;
4336 packets = ring->stats.packets;
4337 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4338 stats->tx_bytes += bytes;
4339 stats->tx_packets += packets;
4340 }
4341}
4342
4343static void ixgbevf_get_stats(struct net_device *netdev,
4344 struct rtnl_link_stats64 *stats)
4345{
4346 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4347 unsigned int start;
4348 u64 bytes, packets;
4349 const struct ixgbevf_ring *ring;
4350 int i;
4351
4352 ixgbevf_update_stats(adapter);
4353
4354 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4355
4356 rcu_read_lock();
4357 for (i = 0; i < adapter->num_rx_queues; i++) {
4358 ring = adapter->rx_ring[i];
4359 do {
4360 start = u64_stats_fetch_begin_irq(&ring->syncp);
4361 bytes = ring->stats.bytes;
4362 packets = ring->stats.packets;
4363 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4364 stats->rx_bytes += bytes;
4365 stats->rx_packets += packets;
4366 }
4367
4368 for (i = 0; i < adapter->num_tx_queues; i++) {
4369 ring = adapter->tx_ring[i];
4370 ixgbevf_get_tx_ring_stats(stats, ring);
4371 }
4372
4373 for (i = 0; i < adapter->num_xdp_queues; i++) {
4374 ring = adapter->xdp_ring[i];
4375 ixgbevf_get_tx_ring_stats(stats, ring);
4376 }
4377 rcu_read_unlock();
4378}
4379
4380#define IXGBEVF_MAX_MAC_HDR_LEN 127
4381#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4382
4383static netdev_features_t
4384ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4385 netdev_features_t features)
4386{
4387 unsigned int network_hdr_len, mac_hdr_len;
4388
4389
4390 mac_hdr_len = skb_network_header(skb) - skb->data;
4391 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4392 return features & ~(NETIF_F_HW_CSUM |
4393 NETIF_F_SCTP_CRC |
4394 NETIF_F_HW_VLAN_CTAG_TX |
4395 NETIF_F_TSO |
4396 NETIF_F_TSO6);
4397
4398 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4399 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4400 return features & ~(NETIF_F_HW_CSUM |
4401 NETIF_F_SCTP_CRC |
4402 NETIF_F_TSO |
4403 NETIF_F_TSO6);
4404
4405
4406
4407
4408 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4409 features &= ~NETIF_F_TSO;
4410
4411 return features;
4412}
4413
4414static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4415{
4416 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4417 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4418 struct bpf_prog *old_prog;
4419
4420
4421 for (i = 0; i < adapter->num_rx_queues; i++) {
4422 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4423
4424 if (frame_size > ixgbevf_rx_bufsz(ring))
4425 return -EINVAL;
4426 }
4427
4428 old_prog = xchg(&adapter->xdp_prog, prog);
4429
4430
4431 if (!!prog != !!old_prog) {
4432
4433
4434
4435
4436 if (netif_running(dev))
4437 ixgbevf_close(dev);
4438
4439 ixgbevf_clear_interrupt_scheme(adapter);
4440 ixgbevf_init_interrupt_scheme(adapter);
4441
4442 if (netif_running(dev))
4443 ixgbevf_open(dev);
4444 } else {
4445 for (i = 0; i < adapter->num_rx_queues; i++)
4446 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4447 }
4448
4449 if (old_prog)
4450 bpf_prog_put(old_prog);
4451
4452 return 0;
4453}
4454
4455static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4456{
4457 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4458
4459 switch (xdp->command) {
4460 case XDP_SETUP_PROG:
4461 return ixgbevf_xdp_setup(dev, xdp->prog);
4462 case XDP_QUERY_PROG:
4463 xdp->prog_attached = !!(adapter->xdp_prog);
4464 xdp->prog_id = adapter->xdp_prog ?
4465 adapter->xdp_prog->aux->id : 0;
4466 return 0;
4467 default:
4468 return -EINVAL;
4469 }
4470}
4471
4472static const struct net_device_ops ixgbevf_netdev_ops = {
4473 .ndo_open = ixgbevf_open,
4474 .ndo_stop = ixgbevf_close,
4475 .ndo_start_xmit = ixgbevf_xmit_frame,
4476 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4477 .ndo_get_stats64 = ixgbevf_get_stats,
4478 .ndo_validate_addr = eth_validate_addr,
4479 .ndo_set_mac_address = ixgbevf_set_mac,
4480 .ndo_change_mtu = ixgbevf_change_mtu,
4481 .ndo_tx_timeout = ixgbevf_tx_timeout,
4482 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4483 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4484#ifdef CONFIG_NET_POLL_CONTROLLER
4485 .ndo_poll_controller = ixgbevf_netpoll,
4486#endif
4487 .ndo_features_check = ixgbevf_features_check,
4488 .ndo_bpf = ixgbevf_xdp,
4489};
4490
4491static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4492{
4493 dev->netdev_ops = &ixgbevf_netdev_ops;
4494 ixgbevf_set_ethtool_ops(dev);
4495 dev->watchdog_timeo = 5 * HZ;
4496}
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4510{
4511 struct net_device *netdev;
4512 struct ixgbevf_adapter *adapter = NULL;
4513 struct ixgbe_hw *hw = NULL;
4514 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4515 int err, pci_using_dac;
4516 bool disable_dev = false;
4517
4518 err = pci_enable_device(pdev);
4519 if (err)
4520 return err;
4521
4522 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4523 pci_using_dac = 1;
4524 } else {
4525 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4526 if (err) {
4527 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4528 goto err_dma;
4529 }
4530 pci_using_dac = 0;
4531 }
4532
4533 err = pci_request_regions(pdev, ixgbevf_driver_name);
4534 if (err) {
4535 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4536 goto err_pci_reg;
4537 }
4538
4539 pci_set_master(pdev);
4540
4541 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4542 MAX_TX_QUEUES);
4543 if (!netdev) {
4544 err = -ENOMEM;
4545 goto err_alloc_etherdev;
4546 }
4547
4548 SET_NETDEV_DEV(netdev, &pdev->dev);
4549
4550 adapter = netdev_priv(netdev);
4551
4552 adapter->netdev = netdev;
4553 adapter->pdev = pdev;
4554 hw = &adapter->hw;
4555 hw->back = adapter;
4556 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4557
4558
4559
4560
4561 pci_save_state(pdev);
4562
4563 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4564 pci_resource_len(pdev, 0));
4565 adapter->io_addr = hw->hw_addr;
4566 if (!hw->hw_addr) {
4567 err = -EIO;
4568 goto err_ioremap;
4569 }
4570
4571 ixgbevf_assign_netdev_ops(netdev);
4572
4573
4574 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4575 hw->mac.type = ii->mac;
4576
4577 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4578 sizeof(struct ixgbe_mbx_operations));
4579
4580
4581 err = ixgbevf_sw_init(adapter);
4582 if (err)
4583 goto err_sw_init;
4584
4585
4586 if (!is_valid_ether_addr(netdev->dev_addr)) {
4587 pr_err("invalid MAC address\n");
4588 err = -EIO;
4589 goto err_sw_init;
4590 }
4591
4592 netdev->hw_features = NETIF_F_SG |
4593 NETIF_F_TSO |
4594 NETIF_F_TSO6 |
4595 NETIF_F_RXCSUM |
4596 NETIF_F_HW_CSUM |
4597 NETIF_F_SCTP_CRC;
4598
4599#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4600 NETIF_F_GSO_GRE_CSUM | \
4601 NETIF_F_GSO_IPXIP4 | \
4602 NETIF_F_GSO_IPXIP6 | \
4603 NETIF_F_GSO_UDP_TUNNEL | \
4604 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4605
4606 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4607 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4608 IXGBEVF_GSO_PARTIAL_FEATURES;
4609
4610 netdev->features = netdev->hw_features;
4611
4612 if (pci_using_dac)
4613 netdev->features |= NETIF_F_HIGHDMA;
4614
4615 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4616 netdev->mpls_features |= NETIF_F_SG |
4617 NETIF_F_TSO |
4618 NETIF_F_TSO6 |
4619 NETIF_F_HW_CSUM;
4620 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4621 netdev->hw_enc_features |= netdev->vlan_features;
4622
4623
4624 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4625 NETIF_F_HW_VLAN_CTAG_RX |
4626 NETIF_F_HW_VLAN_CTAG_TX;
4627
4628 netdev->priv_flags |= IFF_UNICAST_FLT;
4629
4630
4631 netdev->min_mtu = ETH_MIN_MTU;
4632 switch (adapter->hw.api_version) {
4633 case ixgbe_mbox_api_11:
4634 case ixgbe_mbox_api_12:
4635 case ixgbe_mbox_api_13:
4636 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4637 (ETH_HLEN + ETH_FCS_LEN);
4638 break;
4639 default:
4640 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4641 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4642 (ETH_HLEN + ETH_FCS_LEN);
4643 else
4644 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4645 break;
4646 }
4647
4648 if (IXGBE_REMOVED(hw->hw_addr)) {
4649 err = -EIO;
4650 goto err_sw_init;
4651 }
4652
4653 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4654
4655 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4656 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4657 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4658
4659 err = ixgbevf_init_interrupt_scheme(adapter);
4660 if (err)
4661 goto err_sw_init;
4662
4663 strcpy(netdev->name, "eth%d");
4664
4665 err = register_netdev(netdev);
4666 if (err)
4667 goto err_register;
4668
4669 pci_set_drvdata(pdev, netdev);
4670 netif_carrier_off(netdev);
4671
4672 ixgbevf_init_last_counter_stats(adapter);
4673
4674
4675 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4676 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4677
4678 switch (hw->mac.type) {
4679 case ixgbe_mac_X550_vf:
4680 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4681 break;
4682 case ixgbe_mac_X540_vf:
4683 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4684 break;
4685 case ixgbe_mac_82599_vf:
4686 default:
4687 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4688 break;
4689 }
4690
4691 return 0;
4692
4693err_register:
4694 ixgbevf_clear_interrupt_scheme(adapter);
4695err_sw_init:
4696 ixgbevf_reset_interrupt_capability(adapter);
4697 iounmap(adapter->io_addr);
4698 kfree(adapter->rss_key);
4699err_ioremap:
4700 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4701 free_netdev(netdev);
4702err_alloc_etherdev:
4703 pci_release_regions(pdev);
4704err_pci_reg:
4705err_dma:
4706 if (!adapter || disable_dev)
4707 pci_disable_device(pdev);
4708 return err;
4709}
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720static void ixgbevf_remove(struct pci_dev *pdev)
4721{
4722 struct net_device *netdev = pci_get_drvdata(pdev);
4723 struct ixgbevf_adapter *adapter;
4724 bool disable_dev;
4725
4726 if (!netdev)
4727 return;
4728
4729 adapter = netdev_priv(netdev);
4730
4731 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4732 cancel_work_sync(&adapter->service_task);
4733
4734 if (netdev->reg_state == NETREG_REGISTERED)
4735 unregister_netdev(netdev);
4736
4737 ixgbevf_clear_interrupt_scheme(adapter);
4738 ixgbevf_reset_interrupt_capability(adapter);
4739
4740 iounmap(adapter->io_addr);
4741 pci_release_regions(pdev);
4742
4743 hw_dbg(&adapter->hw, "Remove complete\n");
4744
4745 kfree(adapter->rss_key);
4746 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4747 free_netdev(netdev);
4748
4749 if (disable_dev)
4750 pci_disable_device(pdev);
4751}
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4762 pci_channel_state_t state)
4763{
4764 struct net_device *netdev = pci_get_drvdata(pdev);
4765 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4766
4767 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4768 return PCI_ERS_RESULT_DISCONNECT;
4769
4770 rtnl_lock();
4771 netif_device_detach(netdev);
4772
4773 if (state == pci_channel_io_perm_failure) {
4774 rtnl_unlock();
4775 return PCI_ERS_RESULT_DISCONNECT;
4776 }
4777
4778 if (netif_running(netdev))
4779 ixgbevf_close_suspend(adapter);
4780
4781 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4782 pci_disable_device(pdev);
4783 rtnl_unlock();
4784
4785
4786 return PCI_ERS_RESULT_NEED_RESET;
4787}
4788
4789
4790
4791
4792
4793
4794
4795
4796static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4797{
4798 struct net_device *netdev = pci_get_drvdata(pdev);
4799 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4800
4801 if (pci_enable_device_mem(pdev)) {
4802 dev_err(&pdev->dev,
4803 "Cannot re-enable PCI device after reset.\n");
4804 return PCI_ERS_RESULT_DISCONNECT;
4805 }
4806
4807 adapter->hw.hw_addr = adapter->io_addr;
4808 smp_mb__before_atomic();
4809 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4810 pci_set_master(pdev);
4811
4812 ixgbevf_reset(adapter);
4813
4814 return PCI_ERS_RESULT_RECOVERED;
4815}
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825static void ixgbevf_io_resume(struct pci_dev *pdev)
4826{
4827 struct net_device *netdev = pci_get_drvdata(pdev);
4828
4829 rtnl_lock();
4830 if (netif_running(netdev))
4831 ixgbevf_open(netdev);
4832
4833 netif_device_attach(netdev);
4834 rtnl_unlock();
4835}
4836
4837
4838static const struct pci_error_handlers ixgbevf_err_handler = {
4839 .error_detected = ixgbevf_io_error_detected,
4840 .slot_reset = ixgbevf_io_slot_reset,
4841 .resume = ixgbevf_io_resume,
4842};
4843
4844static struct pci_driver ixgbevf_driver = {
4845 .name = ixgbevf_driver_name,
4846 .id_table = ixgbevf_pci_tbl,
4847 .probe = ixgbevf_probe,
4848 .remove = ixgbevf_remove,
4849#ifdef CONFIG_PM
4850
4851 .suspend = ixgbevf_suspend,
4852 .resume = ixgbevf_resume,
4853#endif
4854 .shutdown = ixgbevf_shutdown,
4855 .err_handler = &ixgbevf_err_handler
4856};
4857
4858
4859
4860
4861
4862
4863
4864static int __init ixgbevf_init_module(void)
4865{
4866 pr_info("%s - version %s\n", ixgbevf_driver_string,
4867 ixgbevf_driver_version);
4868
4869 pr_info("%s\n", ixgbevf_copyright);
4870 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4871 if (!ixgbevf_wq) {
4872 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4873 return -ENOMEM;
4874 }
4875
4876 return pci_register_driver(&ixgbevf_driver);
4877}
4878
4879module_init(ixgbevf_init_module);
4880
4881
4882
4883
4884
4885
4886
4887static void __exit ixgbevf_exit_module(void)
4888{
4889 pci_unregister_driver(&ixgbevf_driver);
4890 if (ixgbevf_wq) {
4891 destroy_workqueue(ixgbevf_wq);
4892 ixgbevf_wq = NULL;
4893 }
4894}
4895
4896#ifdef DEBUG
4897
4898
4899
4900
4901
4902char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4903{
4904 struct ixgbevf_adapter *adapter = hw->back;
4905
4906 return adapter->netdev->name;
4907}
4908
4909#endif
4910module_exit(ixgbevf_exit_module);
4911
4912
4913