1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/types.h>
11#include <linux/bitops.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/vmalloc.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/ip.h>
19#include <linux/tcp.h>
20#include <linux/sctp.h>
21#include <linux/ipv6.h>
22#include <linux/slab.h>
23#include <net/checksum.h>
24#include <net/ip6_checksum.h>
25#include <linux/ethtool.h>
26#include <linux/if.h>
27#include <linux/if_vlan.h>
28#include <linux/prefetch.h>
29#include <net/mpls.h>
30#include <linux/bpf.h>
31#include <linux/bpf_trace.h>
32#include <linux/atomic.h>
33#include <net/xfrm.h>
34
35#include "ixgbevf.h"
36
37const char ixgbevf_driver_name[] = "ixgbevf";
38static const char ixgbevf_driver_string[] =
39 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
40
41static char ixgbevf_copyright[] =
42 "Copyright (c) 2009 - 2018 Intel Corporation.";
43
44static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
45 [board_82599_vf] = &ixgbevf_82599_vf_info,
46 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
47 [board_X540_vf] = &ixgbevf_X540_vf_info,
48 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
49 [board_X550_vf] = &ixgbevf_X550_vf_info,
50 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
51 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
52 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
53 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
54};
55
56
57
58
59
60
61
62
63
64static const struct pci_device_id ixgbevf_pci_tbl[] = {
65 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
74
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
78
79MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
80MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
81MODULE_LICENSE("GPL v2");
82
83#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
84static int debug = -1;
85module_param(debug, int, 0);
86MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87
88static struct workqueue_struct *ixgbevf_wq;
89
90static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
91{
92 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
93 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
94 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
95 queue_work(ixgbevf_wq, &adapter->service_task);
96}
97
98static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
99{
100 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
101
102
103 smp_mb__before_atomic();
104 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
105}
106
107
108static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
109static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
110static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
111static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
112static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
113 struct ixgbevf_rx_buffer *old_buff);
114
115static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
116{
117 struct ixgbevf_adapter *adapter = hw->back;
118
119 if (!hw->hw_addr)
120 return;
121 hw->hw_addr = NULL;
122 dev_err(&adapter->pdev->dev, "Adapter removed\n");
123 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
124 ixgbevf_service_event_schedule(adapter);
125}
126
127static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
128{
129 u32 value;
130
131
132
133
134
135
136
137 if (reg == IXGBE_VFSTATUS) {
138 ixgbevf_remove_adapter(hw);
139 return;
140 }
141 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
142 if (value == IXGBE_FAILED_READ_REG)
143 ixgbevf_remove_adapter(hw);
144}
145
146u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
147{
148 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
149 u32 value;
150
151 if (IXGBE_REMOVED(reg_addr))
152 return IXGBE_FAILED_READ_REG;
153 value = readl(reg_addr + reg);
154 if (unlikely(value == IXGBE_FAILED_READ_REG))
155 ixgbevf_check_remove(hw, reg);
156 return value;
157}
158
159
160
161
162
163
164
165
166static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
167 u8 queue, u8 msix_vector)
168{
169 u32 ivar, index;
170 struct ixgbe_hw *hw = &adapter->hw;
171
172 if (direction == -1) {
173
174 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
175 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
176 ivar &= ~0xFF;
177 ivar |= msix_vector;
178 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
179 } else {
180
181 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
182 index = ((16 * (queue & 1)) + (8 * direction));
183 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
184 ivar &= ~(0xFF << index);
185 ivar |= (msix_vector << index);
186 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
187 }
188}
189
190static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
191{
192 return ring->stats.packets;
193}
194
195static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
196{
197 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
198 struct ixgbe_hw *hw = &adapter->hw;
199
200 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
201 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
202
203 if (head != tail)
204 return (head < tail) ?
205 tail - head : (tail + ring->count - head);
206
207 return 0;
208}
209
210static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
211{
212 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
213 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
214 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
215
216 clear_check_for_tx_hang(tx_ring);
217
218
219
220
221
222
223 if ((tx_done_old == tx_done) && tx_pending) {
224
225 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
226 &tx_ring->state);
227 }
228
229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
230
231
232 tx_ring->tx_stats.tx_done_old = tx_done;
233
234 return false;
235}
236
237static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
238{
239
240 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
241 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
242 ixgbevf_service_event_schedule(adapter);
243 }
244}
245
246
247
248
249
250static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
251{
252 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
253
254 ixgbevf_tx_timeout_reset(adapter);
255}
256
257
258
259
260
261
262
263static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
264 struct ixgbevf_ring *tx_ring, int napi_budget)
265{
266 struct ixgbevf_adapter *adapter = q_vector->adapter;
267 struct ixgbevf_tx_buffer *tx_buffer;
268 union ixgbe_adv_tx_desc *tx_desc;
269 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
270 unsigned int budget = tx_ring->count / 2;
271 unsigned int i = tx_ring->next_to_clean;
272
273 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
274 return true;
275
276 tx_buffer = &tx_ring->tx_buffer_info[i];
277 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
278 i -= tx_ring->count;
279
280 do {
281 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
282
283
284 if (!eop_desc)
285 break;
286
287
288 smp_rmb();
289
290
291 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
292 break;
293
294
295 tx_buffer->next_to_watch = NULL;
296
297
298 total_bytes += tx_buffer->bytecount;
299 total_packets += tx_buffer->gso_segs;
300 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
301 total_ipsec++;
302
303
304 if (ring_is_xdp(tx_ring))
305 page_frag_free(tx_buffer->data);
306 else
307 napi_consume_skb(tx_buffer->skb, napi_budget);
308
309
310 dma_unmap_single(tx_ring->dev,
311 dma_unmap_addr(tx_buffer, dma),
312 dma_unmap_len(tx_buffer, len),
313 DMA_TO_DEVICE);
314
315
316 dma_unmap_len_set(tx_buffer, len, 0);
317
318
319 while (tx_desc != eop_desc) {
320 tx_buffer++;
321 tx_desc++;
322 i++;
323 if (unlikely(!i)) {
324 i -= tx_ring->count;
325 tx_buffer = tx_ring->tx_buffer_info;
326 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
327 }
328
329
330 if (dma_unmap_len(tx_buffer, len)) {
331 dma_unmap_page(tx_ring->dev,
332 dma_unmap_addr(tx_buffer, dma),
333 dma_unmap_len(tx_buffer, len),
334 DMA_TO_DEVICE);
335 dma_unmap_len_set(tx_buffer, len, 0);
336 }
337 }
338
339
340 tx_buffer++;
341 tx_desc++;
342 i++;
343 if (unlikely(!i)) {
344 i -= tx_ring->count;
345 tx_buffer = tx_ring->tx_buffer_info;
346 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
347 }
348
349
350 prefetch(tx_desc);
351
352
353 budget--;
354 } while (likely(budget));
355
356 i += tx_ring->count;
357 tx_ring->next_to_clean = i;
358 u64_stats_update_begin(&tx_ring->syncp);
359 tx_ring->stats.bytes += total_bytes;
360 tx_ring->stats.packets += total_packets;
361 u64_stats_update_end(&tx_ring->syncp);
362 q_vector->tx.total_bytes += total_bytes;
363 q_vector->tx.total_packets += total_packets;
364 adapter->tx_ipsec += total_ipsec;
365
366 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
367 struct ixgbe_hw *hw = &adapter->hw;
368 union ixgbe_adv_tx_desc *eop_desc;
369
370 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
371
372 pr_err("Detected Tx Unit Hang%s\n"
373 " Tx Queue <%d>\n"
374 " TDH, TDT <%x>, <%x>\n"
375 " next_to_use <%x>\n"
376 " next_to_clean <%x>\n"
377 "tx_buffer_info[next_to_clean]\n"
378 " next_to_watch <%p>\n"
379 " eop_desc->wb.status <%x>\n"
380 " time_stamp <%lx>\n"
381 " jiffies <%lx>\n",
382 ring_is_xdp(tx_ring) ? " XDP" : "",
383 tx_ring->queue_index,
384 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
385 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
386 tx_ring->next_to_use, i,
387 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
388 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
389
390 if (!ring_is_xdp(tx_ring))
391 netif_stop_subqueue(tx_ring->netdev,
392 tx_ring->queue_index);
393
394
395 ixgbevf_tx_timeout_reset(adapter);
396
397 return true;
398 }
399
400 if (ring_is_xdp(tx_ring))
401 return !!budget;
402
403#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
404 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
405 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
406
407
408
409 smp_mb();
410
411 if (__netif_subqueue_stopped(tx_ring->netdev,
412 tx_ring->queue_index) &&
413 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
414 netif_wake_subqueue(tx_ring->netdev,
415 tx_ring->queue_index);
416 ++tx_ring->tx_stats.restart_queue;
417 }
418 }
419
420 return !!budget;
421}
422
423
424
425
426
427
428static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
429 struct sk_buff *skb)
430{
431 napi_gro_receive(&q_vector->napi, skb);
432}
433
434#define IXGBE_RSS_L4_TYPES_MASK \
435 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
436 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
437 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
439
440static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
441 union ixgbe_adv_rx_desc *rx_desc,
442 struct sk_buff *skb)
443{
444 u16 rss_type;
445
446 if (!(ring->netdev->features & NETIF_F_RXHASH))
447 return;
448
449 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
450 IXGBE_RXDADV_RSSTYPE_MASK;
451
452 if (!rss_type)
453 return;
454
455 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
456 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
457 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
458}
459
460
461
462
463
464
465
466static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
467 union ixgbe_adv_rx_desc *rx_desc,
468 struct sk_buff *skb)
469{
470 skb_checksum_none_assert(skb);
471
472
473 if (!(ring->netdev->features & NETIF_F_RXCSUM))
474 return;
475
476
477 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
478 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
479 ring->rx_stats.csum_err++;
480 return;
481 }
482
483 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
484 return;
485
486 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
487 ring->rx_stats.csum_err++;
488 return;
489 }
490
491
492 skb->ip_summed = CHECKSUM_UNNECESSARY;
493}
494
495
496
497
498
499
500
501
502
503
504
505static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
506 union ixgbe_adv_rx_desc *rx_desc,
507 struct sk_buff *skb)
508{
509 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
510 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
511
512 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
513 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
514 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
515
516 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
517 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
518 }
519
520 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
521 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
522
523 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
524}
525
526static
527struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
528 const unsigned int size)
529{
530 struct ixgbevf_rx_buffer *rx_buffer;
531
532 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
533 prefetchw(rx_buffer->page);
534
535
536 dma_sync_single_range_for_cpu(rx_ring->dev,
537 rx_buffer->dma,
538 rx_buffer->page_offset,
539 size,
540 DMA_FROM_DEVICE);
541
542 rx_buffer->pagecnt_bias--;
543
544 return rx_buffer;
545}
546
547static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
548 struct ixgbevf_rx_buffer *rx_buffer,
549 struct sk_buff *skb)
550{
551 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
552
553 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
554 } else {
555 if (IS_ERR(skb))
556
557
558
559 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
560 ixgbevf_rx_pg_size(rx_ring),
561 DMA_FROM_DEVICE,
562 IXGBEVF_RX_DMA_ATTR);
563 __page_frag_cache_drain(rx_buffer->page,
564 rx_buffer->pagecnt_bias);
565 }
566
567
568 rx_buffer->page = NULL;
569}
570
571
572
573
574
575
576
577
578
579
580
581static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
582 union ixgbe_adv_rx_desc *rx_desc)
583{
584 u32 ntc = rx_ring->next_to_clean + 1;
585
586
587 ntc = (ntc < rx_ring->count) ? ntc : 0;
588 rx_ring->next_to_clean = ntc;
589
590 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
591
592 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
593 return false;
594
595 return true;
596}
597
598static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
599{
600 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
601}
602
603static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
604 struct ixgbevf_rx_buffer *bi)
605{
606 struct page *page = bi->page;
607 dma_addr_t dma;
608
609
610 if (likely(page))
611 return true;
612
613
614 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
615 if (unlikely(!page)) {
616 rx_ring->rx_stats.alloc_rx_page_failed++;
617 return false;
618 }
619
620
621 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
622 ixgbevf_rx_pg_size(rx_ring),
623 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
624
625
626
627
628 if (dma_mapping_error(rx_ring->dev, dma)) {
629 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
630
631 rx_ring->rx_stats.alloc_rx_page_failed++;
632 return false;
633 }
634
635 bi->dma = dma;
636 bi->page = page;
637 bi->page_offset = ixgbevf_rx_offset(rx_ring);
638 bi->pagecnt_bias = 1;
639 rx_ring->rx_stats.alloc_rx_page++;
640
641 return true;
642}
643
644
645
646
647
648
649static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
650 u16 cleaned_count)
651{
652 union ixgbe_adv_rx_desc *rx_desc;
653 struct ixgbevf_rx_buffer *bi;
654 unsigned int i = rx_ring->next_to_use;
655
656
657 if (!cleaned_count || !rx_ring->netdev)
658 return;
659
660 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
661 bi = &rx_ring->rx_buffer_info[i];
662 i -= rx_ring->count;
663
664 do {
665 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
666 break;
667
668
669 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
670 bi->page_offset,
671 ixgbevf_rx_bufsz(rx_ring),
672 DMA_FROM_DEVICE);
673
674
675
676
677 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
678
679 rx_desc++;
680 bi++;
681 i++;
682 if (unlikely(!i)) {
683 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
684 bi = rx_ring->rx_buffer_info;
685 i -= rx_ring->count;
686 }
687
688
689 rx_desc->wb.upper.length = 0;
690
691 cleaned_count--;
692 } while (cleaned_count);
693
694 i += rx_ring->count;
695
696 if (rx_ring->next_to_use != i) {
697
698 rx_ring->next_to_use = i;
699
700
701 rx_ring->next_to_alloc = i;
702
703
704
705
706
707
708 wmb();
709 ixgbevf_write_tail(rx_ring, i);
710 }
711}
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
732 union ixgbe_adv_rx_desc *rx_desc,
733 struct sk_buff *skb)
734{
735
736 if (IS_ERR(skb))
737 return true;
738
739
740 if (unlikely(ixgbevf_test_staterr(rx_desc,
741 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
742 struct net_device *netdev = rx_ring->netdev;
743
744 if (!(netdev->features & NETIF_F_RXALL)) {
745 dev_kfree_skb_any(skb);
746 return true;
747 }
748 }
749
750
751 if (eth_skb_pad(skb))
752 return true;
753
754 return false;
755}
756
757
758
759
760
761
762
763
764static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
765 struct ixgbevf_rx_buffer *old_buff)
766{
767 struct ixgbevf_rx_buffer *new_buff;
768 u16 nta = rx_ring->next_to_alloc;
769
770 new_buff = &rx_ring->rx_buffer_info[nta];
771
772
773 nta++;
774 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
775
776
777 new_buff->page = old_buff->page;
778 new_buff->dma = old_buff->dma;
779 new_buff->page_offset = old_buff->page_offset;
780 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
781}
782
783static inline bool ixgbevf_page_is_reserved(struct page *page)
784{
785 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
786}
787
788static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
789{
790 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
791 struct page *page = rx_buffer->page;
792
793
794 if (unlikely(ixgbevf_page_is_reserved(page)))
795 return false;
796
797#if (PAGE_SIZE < 8192)
798
799 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
800 return false;
801#else
802#define IXGBEVF_LAST_OFFSET \
803 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
804
805 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
806 return false;
807
808#endif
809
810
811
812
813
814 if (unlikely(!pagecnt_bias)) {
815 page_ref_add(page, USHRT_MAX);
816 rx_buffer->pagecnt_bias = USHRT_MAX;
817 }
818
819 return true;
820}
821
822
823
824
825
826
827
828
829
830
831static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
832 struct ixgbevf_rx_buffer *rx_buffer,
833 struct sk_buff *skb,
834 unsigned int size)
835{
836#if (PAGE_SIZE < 8192)
837 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
838#else
839 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
840 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
841 SKB_DATA_ALIGN(size);
842#endif
843 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
844 rx_buffer->page_offset, size, truesize);
845#if (PAGE_SIZE < 8192)
846 rx_buffer->page_offset ^= truesize;
847#else
848 rx_buffer->page_offset += truesize;
849#endif
850}
851
852static
853struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
854 struct ixgbevf_rx_buffer *rx_buffer,
855 struct xdp_buff *xdp,
856 union ixgbe_adv_rx_desc *rx_desc)
857{
858 unsigned int size = xdp->data_end - xdp->data;
859#if (PAGE_SIZE < 8192)
860 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
861#else
862 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
863 xdp->data_hard_start);
864#endif
865 unsigned int headlen;
866 struct sk_buff *skb;
867
868
869 prefetch(xdp->data);
870#if L1_CACHE_BYTES < 128
871 prefetch(xdp->data + L1_CACHE_BYTES);
872#endif
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
890 if (unlikely(!skb))
891 return NULL;
892
893
894 headlen = size;
895 if (headlen > IXGBEVF_RX_HDR_SIZE)
896 headlen = eth_get_headlen(skb->dev, xdp->data,
897 IXGBEVF_RX_HDR_SIZE);
898
899
900 memcpy(__skb_put(skb, headlen), xdp->data,
901 ALIGN(headlen, sizeof(long)));
902
903
904 size -= headlen;
905 if (size) {
906 skb_add_rx_frag(skb, 0, rx_buffer->page,
907 (xdp->data + headlen) -
908 page_address(rx_buffer->page),
909 size, truesize);
910#if (PAGE_SIZE < 8192)
911 rx_buffer->page_offset ^= truesize;
912#else
913 rx_buffer->page_offset += truesize;
914#endif
915 } else {
916 rx_buffer->pagecnt_bias++;
917 }
918
919 return skb;
920}
921
922static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
923 u32 qmask)
924{
925 struct ixgbe_hw *hw = &adapter->hw;
926
927 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
928}
929
930static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
931 struct ixgbevf_rx_buffer *rx_buffer,
932 struct xdp_buff *xdp,
933 union ixgbe_adv_rx_desc *rx_desc)
934{
935 unsigned int metasize = xdp->data - xdp->data_meta;
936#if (PAGE_SIZE < 8192)
937 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
938#else
939 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
940 SKB_DATA_ALIGN(xdp->data_end -
941 xdp->data_hard_start);
942#endif
943 struct sk_buff *skb;
944
945
946
947
948
949
950 prefetch(xdp->data_meta);
951#if L1_CACHE_BYTES < 128
952 prefetch(xdp->data_meta + L1_CACHE_BYTES);
953#endif
954
955
956 skb = build_skb(xdp->data_hard_start, truesize);
957 if (unlikely(!skb))
958 return NULL;
959
960
961 skb_reserve(skb, xdp->data - xdp->data_hard_start);
962 __skb_put(skb, xdp->data_end - xdp->data);
963 if (metasize)
964 skb_metadata_set(skb, metasize);
965
966
967#if (PAGE_SIZE < 8192)
968 rx_buffer->page_offset ^= truesize;
969#else
970 rx_buffer->page_offset += truesize;
971#endif
972
973 return skb;
974}
975
976#define IXGBEVF_XDP_PASS 0
977#define IXGBEVF_XDP_CONSUMED 1
978#define IXGBEVF_XDP_TX 2
979
980static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
981 struct xdp_buff *xdp)
982{
983 struct ixgbevf_tx_buffer *tx_buffer;
984 union ixgbe_adv_tx_desc *tx_desc;
985 u32 len, cmd_type;
986 dma_addr_t dma;
987 u16 i;
988
989 len = xdp->data_end - xdp->data;
990
991 if (unlikely(!ixgbevf_desc_unused(ring)))
992 return IXGBEVF_XDP_CONSUMED;
993
994 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
995 if (dma_mapping_error(ring->dev, dma))
996 return IXGBEVF_XDP_CONSUMED;
997
998
999 i = ring->next_to_use;
1000 tx_buffer = &ring->tx_buffer_info[i];
1001
1002 dma_unmap_len_set(tx_buffer, len, len);
1003 dma_unmap_addr_set(tx_buffer, dma, dma);
1004 tx_buffer->data = xdp->data;
1005 tx_buffer->bytecount = len;
1006 tx_buffer->gso_segs = 1;
1007 tx_buffer->protocol = 0;
1008
1009
1010
1011
1012 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1013 struct ixgbe_adv_tx_context_desc *context_desc;
1014
1015 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1016
1017 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1018 context_desc->vlan_macip_lens =
1019 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1020 context_desc->fceof_saidx = 0;
1021 context_desc->type_tucmd_mlhl =
1022 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1023 IXGBE_ADVTXD_DTYP_CTXT);
1024 context_desc->mss_l4len_idx = 0;
1025
1026 i = 1;
1027 }
1028
1029
1030 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1031 IXGBE_ADVTXD_DCMD_DEXT |
1032 IXGBE_ADVTXD_DCMD_IFCS;
1033 cmd_type |= len | IXGBE_TXD_CMD;
1034
1035 tx_desc = IXGBEVF_TX_DESC(ring, i);
1036 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1037
1038 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1039 tx_desc->read.olinfo_status =
1040 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1041 IXGBE_ADVTXD_CC);
1042
1043
1044 smp_wmb();
1045
1046
1047 i++;
1048 if (i == ring->count)
1049 i = 0;
1050
1051 tx_buffer->next_to_watch = tx_desc;
1052 ring->next_to_use = i;
1053
1054 return IXGBEVF_XDP_TX;
1055}
1056
1057static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1058 struct ixgbevf_ring *rx_ring,
1059 struct xdp_buff *xdp)
1060{
1061 int result = IXGBEVF_XDP_PASS;
1062 struct ixgbevf_ring *xdp_ring;
1063 struct bpf_prog *xdp_prog;
1064 u32 act;
1065
1066 rcu_read_lock();
1067 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1068
1069 if (!xdp_prog)
1070 goto xdp_out;
1071
1072 act = bpf_prog_run_xdp(xdp_prog, xdp);
1073 switch (act) {
1074 case XDP_PASS:
1075 break;
1076 case XDP_TX:
1077 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1078 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1079 break;
1080 default:
1081 bpf_warn_invalid_xdp_action(act);
1082 fallthrough;
1083 case XDP_ABORTED:
1084 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1085 fallthrough;
1086 case XDP_DROP:
1087 result = IXGBEVF_XDP_CONSUMED;
1088 break;
1089 }
1090xdp_out:
1091 rcu_read_unlock();
1092 return ERR_PTR(-result);
1093}
1094
1095static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
1096 unsigned int size)
1097{
1098 unsigned int truesize;
1099
1100#if (PAGE_SIZE < 8192)
1101 truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1102#else
1103 truesize = ring_uses_build_skb(rx_ring) ?
1104 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
1105 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1106 SKB_DATA_ALIGN(size);
1107#endif
1108 return truesize;
1109}
1110
1111static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1112 struct ixgbevf_rx_buffer *rx_buffer,
1113 unsigned int size)
1114{
1115 unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
1116
1117#if (PAGE_SIZE < 8192)
1118 rx_buffer->page_offset ^= truesize;
1119#else
1120 rx_buffer->page_offset += truesize;
1121#endif
1122}
1123
1124static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1125 struct ixgbevf_ring *rx_ring,
1126 int budget)
1127{
1128 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1129 struct ixgbevf_adapter *adapter = q_vector->adapter;
1130 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1131 struct sk_buff *skb = rx_ring->skb;
1132 bool xdp_xmit = false;
1133 struct xdp_buff xdp;
1134
1135 xdp.rxq = &rx_ring->xdp_rxq;
1136
1137
1138#if (PAGE_SIZE < 8192)
1139 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
1140#endif
1141
1142 while (likely(total_rx_packets < budget)) {
1143 struct ixgbevf_rx_buffer *rx_buffer;
1144 union ixgbe_adv_rx_desc *rx_desc;
1145 unsigned int size;
1146
1147
1148 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1149 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1150 cleaned_count = 0;
1151 }
1152
1153 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1154 size = le16_to_cpu(rx_desc->wb.upper.length);
1155 if (!size)
1156 break;
1157
1158
1159
1160
1161
1162 rmb();
1163
1164 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1165
1166
1167 if (!skb) {
1168 xdp.data = page_address(rx_buffer->page) +
1169 rx_buffer->page_offset;
1170 xdp.data_meta = xdp.data;
1171 xdp.data_hard_start = xdp.data -
1172 ixgbevf_rx_offset(rx_ring);
1173 xdp.data_end = xdp.data + size;
1174#if (PAGE_SIZE > 4096)
1175
1176 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
1177#endif
1178 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1179 }
1180
1181 if (IS_ERR(skb)) {
1182 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1183 xdp_xmit = true;
1184 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1185 size);
1186 } else {
1187 rx_buffer->pagecnt_bias++;
1188 }
1189 total_rx_packets++;
1190 total_rx_bytes += size;
1191 } else if (skb) {
1192 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1193 } else if (ring_uses_build_skb(rx_ring)) {
1194 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1195 &xdp, rx_desc);
1196 } else {
1197 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1198 &xdp, rx_desc);
1199 }
1200
1201
1202 if (!skb) {
1203 rx_ring->rx_stats.alloc_rx_buff_failed++;
1204 rx_buffer->pagecnt_bias++;
1205 break;
1206 }
1207
1208 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1209 cleaned_count++;
1210
1211
1212 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1213 continue;
1214
1215
1216 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1217 skb = NULL;
1218 continue;
1219 }
1220
1221
1222 total_rx_bytes += skb->len;
1223
1224
1225
1226
1227 if ((skb->pkt_type == PACKET_BROADCAST ||
1228 skb->pkt_type == PACKET_MULTICAST) &&
1229 ether_addr_equal(rx_ring->netdev->dev_addr,
1230 eth_hdr(skb)->h_source)) {
1231 dev_kfree_skb_irq(skb);
1232 continue;
1233 }
1234
1235
1236 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1237
1238 ixgbevf_rx_skb(q_vector, skb);
1239
1240
1241 skb = NULL;
1242
1243
1244 total_rx_packets++;
1245 }
1246
1247
1248 rx_ring->skb = skb;
1249
1250 if (xdp_xmit) {
1251 struct ixgbevf_ring *xdp_ring =
1252 adapter->xdp_ring[rx_ring->queue_index];
1253
1254
1255
1256
1257 wmb();
1258 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1259 }
1260
1261 u64_stats_update_begin(&rx_ring->syncp);
1262 rx_ring->stats.packets += total_rx_packets;
1263 rx_ring->stats.bytes += total_rx_bytes;
1264 u64_stats_update_end(&rx_ring->syncp);
1265 q_vector->rx.total_packets += total_rx_packets;
1266 q_vector->rx.total_bytes += total_rx_bytes;
1267
1268 return total_rx_packets;
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static int ixgbevf_poll(struct napi_struct *napi, int budget)
1280{
1281 struct ixgbevf_q_vector *q_vector =
1282 container_of(napi, struct ixgbevf_q_vector, napi);
1283 struct ixgbevf_adapter *adapter = q_vector->adapter;
1284 struct ixgbevf_ring *ring;
1285 int per_ring_budget, work_done = 0;
1286 bool clean_complete = true;
1287
1288 ixgbevf_for_each_ring(ring, q_vector->tx) {
1289 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1290 clean_complete = false;
1291 }
1292
1293 if (budget <= 0)
1294 return budget;
1295
1296
1297
1298
1299 if (q_vector->rx.count > 1)
1300 per_ring_budget = max(budget/q_vector->rx.count, 1);
1301 else
1302 per_ring_budget = budget;
1303
1304 ixgbevf_for_each_ring(ring, q_vector->rx) {
1305 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1306 per_ring_budget);
1307 work_done += cleaned;
1308 if (cleaned >= per_ring_budget)
1309 clean_complete = false;
1310 }
1311
1312
1313 if (!clean_complete)
1314 return budget;
1315
1316
1317
1318
1319 if (likely(napi_complete_done(napi, work_done))) {
1320 if (adapter->rx_itr_setting == 1)
1321 ixgbevf_set_itr(q_vector);
1322 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1323 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1324 ixgbevf_irq_enable_queues(adapter,
1325 BIT(q_vector->v_idx));
1326 }
1327
1328 return min(work_done, budget - 1);
1329}
1330
1331
1332
1333
1334
1335void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1336{
1337 struct ixgbevf_adapter *adapter = q_vector->adapter;
1338 struct ixgbe_hw *hw = &adapter->hw;
1339 int v_idx = q_vector->v_idx;
1340 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1341
1342
1343
1344
1345 itr_reg |= IXGBE_EITR_CNT_WDIS;
1346
1347 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1358{
1359 struct ixgbevf_q_vector *q_vector;
1360 int q_vectors, v_idx;
1361
1362 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1363 adapter->eims_enable_mask = 0;
1364
1365
1366
1367
1368 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1369 struct ixgbevf_ring *ring;
1370
1371 q_vector = adapter->q_vector[v_idx];
1372
1373 ixgbevf_for_each_ring(ring, q_vector->rx)
1374 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1375
1376 ixgbevf_for_each_ring(ring, q_vector->tx)
1377 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1378
1379 if (q_vector->tx.ring && !q_vector->rx.ring) {
1380
1381 if (adapter->tx_itr_setting == 1)
1382 q_vector->itr = IXGBE_12K_ITR;
1383 else
1384 q_vector->itr = adapter->tx_itr_setting;
1385 } else {
1386
1387 if (adapter->rx_itr_setting == 1)
1388 q_vector->itr = IXGBE_20K_ITR;
1389 else
1390 q_vector->itr = adapter->rx_itr_setting;
1391 }
1392
1393
1394 adapter->eims_enable_mask |= BIT(v_idx);
1395
1396 ixgbevf_write_eitr(q_vector);
1397 }
1398
1399 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1400
1401 adapter->eims_other = BIT(v_idx);
1402 adapter->eims_enable_mask |= adapter->eims_other;
1403}
1404
1405enum latency_range {
1406 lowest_latency = 0,
1407 low_latency = 1,
1408 bulk_latency = 2,
1409 latency_invalid = 255
1410};
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1426 struct ixgbevf_ring_container *ring_container)
1427{
1428 int bytes = ring_container->total_bytes;
1429 int packets = ring_container->total_packets;
1430 u32 timepassed_us;
1431 u64 bytes_perint;
1432 u8 itr_setting = ring_container->itr;
1433
1434 if (packets == 0)
1435 return;
1436
1437
1438
1439
1440
1441
1442
1443 timepassed_us = q_vector->itr >> 2;
1444 if (timepassed_us == 0)
1445 return;
1446
1447 bytes_perint = bytes / timepassed_us;
1448
1449 switch (itr_setting) {
1450 case lowest_latency:
1451 if (bytes_perint > 10)
1452 itr_setting = low_latency;
1453 break;
1454 case low_latency:
1455 if (bytes_perint > 20)
1456 itr_setting = bulk_latency;
1457 else if (bytes_perint <= 10)
1458 itr_setting = lowest_latency;
1459 break;
1460 case bulk_latency:
1461 if (bytes_perint <= 20)
1462 itr_setting = low_latency;
1463 break;
1464 }
1465
1466
1467 ring_container->total_bytes = 0;
1468 ring_container->total_packets = 0;
1469
1470
1471 ring_container->itr = itr_setting;
1472}
1473
1474static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1475{
1476 u32 new_itr = q_vector->itr;
1477 u8 current_itr;
1478
1479 ixgbevf_update_itr(q_vector, &q_vector->tx);
1480 ixgbevf_update_itr(q_vector, &q_vector->rx);
1481
1482 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1483
1484 switch (current_itr) {
1485
1486 case lowest_latency:
1487 new_itr = IXGBE_100K_ITR;
1488 break;
1489 case low_latency:
1490 new_itr = IXGBE_20K_ITR;
1491 break;
1492 case bulk_latency:
1493 new_itr = IXGBE_12K_ITR;
1494 break;
1495 default:
1496 break;
1497 }
1498
1499 if (new_itr != q_vector->itr) {
1500
1501 new_itr = (10 * new_itr * q_vector->itr) /
1502 ((9 * new_itr) + q_vector->itr);
1503
1504
1505 q_vector->itr = new_itr;
1506
1507 ixgbevf_write_eitr(q_vector);
1508 }
1509}
1510
1511static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1512{
1513 struct ixgbevf_adapter *adapter = data;
1514 struct ixgbe_hw *hw = &adapter->hw;
1515
1516 hw->mac.get_link_status = 1;
1517
1518 ixgbevf_service_event_schedule(adapter);
1519
1520 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1521
1522 return IRQ_HANDLED;
1523}
1524
1525
1526
1527
1528
1529
1530static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1531{
1532 struct ixgbevf_q_vector *q_vector = data;
1533
1534
1535 if (q_vector->rx.ring || q_vector->tx.ring)
1536 napi_schedule_irqoff(&q_vector->napi);
1537
1538 return IRQ_HANDLED;
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1549{
1550 struct net_device *netdev = adapter->netdev;
1551 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1552 unsigned int ri = 0, ti = 0;
1553 int vector, err;
1554
1555 for (vector = 0; vector < q_vectors; vector++) {
1556 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1557 struct msix_entry *entry = &adapter->msix_entries[vector];
1558
1559 if (q_vector->tx.ring && q_vector->rx.ring) {
1560 snprintf(q_vector->name, sizeof(q_vector->name),
1561 "%s-TxRx-%u", netdev->name, ri++);
1562 ti++;
1563 } else if (q_vector->rx.ring) {
1564 snprintf(q_vector->name, sizeof(q_vector->name),
1565 "%s-rx-%u", netdev->name, ri++);
1566 } else if (q_vector->tx.ring) {
1567 snprintf(q_vector->name, sizeof(q_vector->name),
1568 "%s-tx-%u", netdev->name, ti++);
1569 } else {
1570
1571 continue;
1572 }
1573 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1574 q_vector->name, q_vector);
1575 if (err) {
1576 hw_dbg(&adapter->hw,
1577 "request_irq failed for MSIX interrupt Error: %d\n",
1578 err);
1579 goto free_queue_irqs;
1580 }
1581 }
1582
1583 err = request_irq(adapter->msix_entries[vector].vector,
1584 &ixgbevf_msix_other, 0, netdev->name, adapter);
1585 if (err) {
1586 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1587 err);
1588 goto free_queue_irqs;
1589 }
1590
1591 return 0;
1592
1593free_queue_irqs:
1594 while (vector) {
1595 vector--;
1596 free_irq(adapter->msix_entries[vector].vector,
1597 adapter->q_vector[vector]);
1598 }
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 adapter->num_msix_vectors = 0;
1610 return err;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1621{
1622 int err = ixgbevf_request_msix_irqs(adapter);
1623
1624 if (err)
1625 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1626
1627 return err;
1628}
1629
1630static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1631{
1632 int i, q_vectors;
1633
1634 if (!adapter->msix_entries)
1635 return;
1636
1637 q_vectors = adapter->num_msix_vectors;
1638 i = q_vectors - 1;
1639
1640 free_irq(adapter->msix_entries[i].vector, adapter);
1641 i--;
1642
1643 for (; i >= 0; i--) {
1644
1645 if (!adapter->q_vector[i]->rx.ring &&
1646 !adapter->q_vector[i]->tx.ring)
1647 continue;
1648
1649 free_irq(adapter->msix_entries[i].vector,
1650 adapter->q_vector[i]);
1651 }
1652}
1653
1654
1655
1656
1657
1658static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1659{
1660 struct ixgbe_hw *hw = &adapter->hw;
1661 int i;
1662
1663 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1664 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1665 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1666
1667 IXGBE_WRITE_FLUSH(hw);
1668
1669 for (i = 0; i < adapter->num_msix_vectors; i++)
1670 synchronize_irq(adapter->msix_entries[i].vector);
1671}
1672
1673
1674
1675
1676
1677static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1678{
1679 struct ixgbe_hw *hw = &adapter->hw;
1680
1681 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1682 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1683 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1694 struct ixgbevf_ring *ring)
1695{
1696 struct ixgbe_hw *hw = &adapter->hw;
1697 u64 tdba = ring->dma;
1698 int wait_loop = 10;
1699 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1700 u8 reg_idx = ring->reg_idx;
1701
1702
1703 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1704 IXGBE_WRITE_FLUSH(hw);
1705
1706 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1707 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1708 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1709 ring->count * sizeof(union ixgbe_adv_tx_desc));
1710
1711
1712 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1713 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1714
1715
1716 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1717 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1718 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1719
1720
1721 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1722 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1723 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1724
1725
1726 ring->next_to_clean = 0;
1727 ring->next_to_use = 0;
1728
1729
1730
1731
1732
1733 txdctl |= (8 << 16);
1734
1735
1736 txdctl |= (1u << 8) |
1737 32;
1738
1739
1740 memset(ring->tx_buffer_info, 0,
1741 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1742
1743 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1744 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1745
1746 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1747
1748
1749 do {
1750 usleep_range(1000, 2000);
1751 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1752 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1753 if (!wait_loop)
1754 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1755}
1756
1757
1758
1759
1760
1761
1762
1763static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1764{
1765 u32 i;
1766
1767
1768 for (i = 0; i < adapter->num_tx_queues; i++)
1769 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1770 for (i = 0; i < adapter->num_xdp_queues; i++)
1771 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1772}
1773
1774#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1775
1776static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1777 struct ixgbevf_ring *ring, int index)
1778{
1779 struct ixgbe_hw *hw = &adapter->hw;
1780 u32 srrctl;
1781
1782 srrctl = IXGBE_SRRCTL_DROP_EN;
1783
1784 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1785 if (ring_uses_large_buffer(ring))
1786 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1787 else
1788 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1789 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1790
1791 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1792}
1793
1794static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1795{
1796 struct ixgbe_hw *hw = &adapter->hw;
1797
1798
1799 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1800 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1801 IXGBE_PSRTYPE_L2HDR;
1802
1803 if (adapter->num_rx_queues > 1)
1804 psrtype |= BIT(29);
1805
1806 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1807}
1808
1809#define IXGBEVF_MAX_RX_DESC_POLL 10
1810static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1811 struct ixgbevf_ring *ring)
1812{
1813 struct ixgbe_hw *hw = &adapter->hw;
1814 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1815 u32 rxdctl;
1816 u8 reg_idx = ring->reg_idx;
1817
1818 if (IXGBE_REMOVED(hw->hw_addr))
1819 return;
1820 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1821 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1822
1823
1824 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1825
1826
1827 do {
1828 udelay(10);
1829 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1830 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1831
1832 if (!wait_loop)
1833 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1834 reg_idx);
1835}
1836
1837static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1838 struct ixgbevf_ring *ring)
1839{
1840 struct ixgbe_hw *hw = &adapter->hw;
1841 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1842 u32 rxdctl;
1843 u8 reg_idx = ring->reg_idx;
1844
1845 if (IXGBE_REMOVED(hw->hw_addr))
1846 return;
1847 do {
1848 usleep_range(1000, 2000);
1849 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1850 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1851
1852 if (!wait_loop)
1853 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1854 reg_idx);
1855}
1856
1857
1858
1859
1860
1861
1862
1863static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1864{
1865 u32 *rss_key;
1866
1867 if (!adapter->rss_key) {
1868 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1869 if (unlikely(!rss_key))
1870 return -ENOMEM;
1871
1872 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1873 adapter->rss_key = rss_key;
1874 }
1875
1876 return 0;
1877}
1878
1879static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1880{
1881 struct ixgbe_hw *hw = &adapter->hw;
1882 u32 vfmrqc = 0, vfreta = 0;
1883 u16 rss_i = adapter->num_rx_queues;
1884 u8 i, j;
1885
1886
1887 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1888 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1889
1890 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1891 if (j == rss_i)
1892 j = 0;
1893
1894 adapter->rss_indir_tbl[i] = j;
1895
1896 vfreta |= j << (i & 0x3) * 8;
1897 if ((i & 3) == 3) {
1898 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1899 vfreta = 0;
1900 }
1901 }
1902
1903
1904 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1905 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1906 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1907 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1908
1909 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1910
1911 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1912}
1913
1914static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1915 struct ixgbevf_ring *ring)
1916{
1917 struct ixgbe_hw *hw = &adapter->hw;
1918 union ixgbe_adv_rx_desc *rx_desc;
1919 u64 rdba = ring->dma;
1920 u32 rxdctl;
1921 u8 reg_idx = ring->reg_idx;
1922
1923
1924 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1925 ixgbevf_disable_rx_queue(adapter, ring);
1926
1927 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1928 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1929 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1930 ring->count * sizeof(union ixgbe_adv_rx_desc));
1931
1932#ifndef CONFIG_SPARC
1933
1934 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1935 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1936#else
1937 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1938 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1939 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1940#endif
1941
1942
1943 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1944 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1945 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1946
1947
1948 memset(ring->rx_buffer_info, 0,
1949 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1950
1951
1952 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1953 rx_desc->wb.upper.length = 0;
1954
1955
1956 ring->next_to_clean = 0;
1957 ring->next_to_use = 0;
1958 ring->next_to_alloc = 0;
1959
1960 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1961
1962
1963 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1964 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1965 IXGBE_RXDCTL_RLPML_EN);
1966
1967#if (PAGE_SIZE < 8192)
1968
1969 if (ring_uses_build_skb(ring) &&
1970 !ring_uses_large_buffer(ring))
1971 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1972 IXGBE_RXDCTL_RLPML_EN;
1973#endif
1974 }
1975
1976 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1977 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1978
1979 ixgbevf_rx_desc_queue_enable(adapter, ring);
1980 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1981}
1982
1983static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1984 struct ixgbevf_ring *rx_ring)
1985{
1986 struct net_device *netdev = adapter->netdev;
1987 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1988
1989
1990 clear_ring_build_skb_enabled(rx_ring);
1991 clear_ring_uses_large_buffer(rx_ring);
1992
1993 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1994 return;
1995
1996 set_ring_build_skb_enabled(rx_ring);
1997
1998 if (PAGE_SIZE < 8192) {
1999 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
2000 return;
2001
2002 set_ring_uses_large_buffer(rx_ring);
2003 }
2004}
2005
2006
2007
2008
2009
2010
2011
2012static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
2013{
2014 struct ixgbe_hw *hw = &adapter->hw;
2015 struct net_device *netdev = adapter->netdev;
2016 int i, ret;
2017
2018 ixgbevf_setup_psrtype(adapter);
2019 if (hw->mac.type >= ixgbe_mac_X550_vf)
2020 ixgbevf_setup_vfmrqc(adapter);
2021
2022 spin_lock_bh(&adapter->mbx_lock);
2023
2024 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
2025 spin_unlock_bh(&adapter->mbx_lock);
2026 if (ret)
2027 dev_err(&adapter->pdev->dev,
2028 "Failed to set MTU at %d\n", netdev->mtu);
2029
2030
2031
2032
2033 for (i = 0; i < adapter->num_rx_queues; i++) {
2034 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2035
2036 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2037 ixgbevf_configure_rx_ring(adapter, rx_ring);
2038 }
2039}
2040
2041static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2042 __be16 proto, u16 vid)
2043{
2044 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2045 struct ixgbe_hw *hw = &adapter->hw;
2046 int err;
2047
2048 spin_lock_bh(&adapter->mbx_lock);
2049
2050
2051 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2052
2053 spin_unlock_bh(&adapter->mbx_lock);
2054
2055
2056 if (err == IXGBE_ERR_MBX)
2057 return -EIO;
2058
2059 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2060 return -EACCES;
2061
2062 set_bit(vid, adapter->active_vlans);
2063
2064 return err;
2065}
2066
2067static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2068 __be16 proto, u16 vid)
2069{
2070 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2071 struct ixgbe_hw *hw = &adapter->hw;
2072 int err;
2073
2074 spin_lock_bh(&adapter->mbx_lock);
2075
2076
2077 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2078
2079 spin_unlock_bh(&adapter->mbx_lock);
2080
2081 clear_bit(vid, adapter->active_vlans);
2082
2083 return err;
2084}
2085
2086static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2087{
2088 u16 vid;
2089
2090 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2091 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2092 htons(ETH_P_8021Q), vid);
2093}
2094
2095static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2096{
2097 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2098 struct ixgbe_hw *hw = &adapter->hw;
2099 int count = 0;
2100
2101 if (!netdev_uc_empty(netdev)) {
2102 struct netdev_hw_addr *ha;
2103
2104 netdev_for_each_uc_addr(ha, netdev) {
2105 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2106 udelay(200);
2107 }
2108 } else {
2109
2110
2111
2112 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2113 }
2114
2115 return count;
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127static void ixgbevf_set_rx_mode(struct net_device *netdev)
2128{
2129 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2130 struct ixgbe_hw *hw = &adapter->hw;
2131 unsigned int flags = netdev->flags;
2132 int xcast_mode;
2133
2134
2135 if (flags & IFF_PROMISC)
2136 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2137 else if (flags & IFF_ALLMULTI)
2138 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2139 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2140 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2141 else
2142 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2143
2144 spin_lock_bh(&adapter->mbx_lock);
2145
2146 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2147
2148
2149 hw->mac.ops.update_mc_addr_list(hw, netdev);
2150
2151 ixgbevf_write_uc_addr_list(netdev);
2152
2153 spin_unlock_bh(&adapter->mbx_lock);
2154}
2155
2156static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2157{
2158 int q_idx;
2159 struct ixgbevf_q_vector *q_vector;
2160 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2161
2162 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2163 q_vector = adapter->q_vector[q_idx];
2164 napi_enable(&q_vector->napi);
2165 }
2166}
2167
2168static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2169{
2170 int q_idx;
2171 struct ixgbevf_q_vector *q_vector;
2172 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2173
2174 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2175 q_vector = adapter->q_vector[q_idx];
2176 napi_disable(&q_vector->napi);
2177 }
2178}
2179
2180static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2181{
2182 struct ixgbe_hw *hw = &adapter->hw;
2183 unsigned int def_q = 0;
2184 unsigned int num_tcs = 0;
2185 unsigned int num_rx_queues = adapter->num_rx_queues;
2186 unsigned int num_tx_queues = adapter->num_tx_queues;
2187 int err;
2188
2189 spin_lock_bh(&adapter->mbx_lock);
2190
2191
2192 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2193
2194 spin_unlock_bh(&adapter->mbx_lock);
2195
2196 if (err)
2197 return err;
2198
2199 if (num_tcs > 1) {
2200
2201 num_tx_queues = 1;
2202
2203
2204 adapter->tx_ring[0]->reg_idx = def_q;
2205
2206
2207 num_rx_queues = num_tcs;
2208 }
2209
2210
2211 if ((adapter->num_rx_queues != num_rx_queues) ||
2212 (adapter->num_tx_queues != num_tx_queues)) {
2213
2214 hw->mbx.timeout = 0;
2215
2216
2217 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2218 }
2219
2220 return 0;
2221}
2222
2223static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2224{
2225 ixgbevf_configure_dcb(adapter);
2226
2227 ixgbevf_set_rx_mode(adapter->netdev);
2228
2229 ixgbevf_restore_vlan(adapter);
2230 ixgbevf_ipsec_restore(adapter);
2231
2232 ixgbevf_configure_tx(adapter);
2233 ixgbevf_configure_rx(adapter);
2234}
2235
2236static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2237{
2238
2239 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2240 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2241 adapter->stats.base_vfgprc;
2242 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2243 adapter->stats.base_vfgptc;
2244 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2245 adapter->stats.base_vfgorc;
2246 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2247 adapter->stats.base_vfgotc;
2248 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2249 adapter->stats.base_vfmprc;
2250 }
2251}
2252
2253static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2254{
2255 struct ixgbe_hw *hw = &adapter->hw;
2256
2257 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2258 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2259 adapter->stats.last_vfgorc |=
2260 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2261 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2262 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2263 adapter->stats.last_vfgotc |=
2264 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2265 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2266
2267 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2268 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2269 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2270 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2271 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2272}
2273
2274static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2275{
2276 struct ixgbe_hw *hw = &adapter->hw;
2277 static const int api[] = {
2278 ixgbe_mbox_api_14,
2279 ixgbe_mbox_api_13,
2280 ixgbe_mbox_api_12,
2281 ixgbe_mbox_api_11,
2282 ixgbe_mbox_api_10,
2283 ixgbe_mbox_api_unknown
2284 };
2285 int err, idx = 0;
2286
2287 spin_lock_bh(&adapter->mbx_lock);
2288
2289 while (api[idx] != ixgbe_mbox_api_unknown) {
2290 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2291 if (!err)
2292 break;
2293 idx++;
2294 }
2295
2296 spin_unlock_bh(&adapter->mbx_lock);
2297}
2298
2299static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2300{
2301 struct net_device *netdev = adapter->netdev;
2302 struct ixgbe_hw *hw = &adapter->hw;
2303
2304 ixgbevf_configure_msix(adapter);
2305
2306 spin_lock_bh(&adapter->mbx_lock);
2307
2308 if (is_valid_ether_addr(hw->mac.addr))
2309 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2310 else
2311 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2312
2313 spin_unlock_bh(&adapter->mbx_lock);
2314
2315 smp_mb__before_atomic();
2316 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2317 ixgbevf_napi_enable_all(adapter);
2318
2319
2320 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2321 ixgbevf_irq_enable(adapter);
2322
2323
2324 netif_tx_start_all_queues(netdev);
2325
2326 ixgbevf_save_reset_stats(adapter);
2327 ixgbevf_init_last_counter_stats(adapter);
2328
2329 hw->mac.get_link_status = 1;
2330 mod_timer(&adapter->service_timer, jiffies);
2331}
2332
2333void ixgbevf_up(struct ixgbevf_adapter *adapter)
2334{
2335 ixgbevf_configure(adapter);
2336
2337 ixgbevf_up_complete(adapter);
2338}
2339
2340
2341
2342
2343
2344static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2345{
2346 u16 i = rx_ring->next_to_clean;
2347
2348
2349 if (rx_ring->skb) {
2350 dev_kfree_skb(rx_ring->skb);
2351 rx_ring->skb = NULL;
2352 }
2353
2354
2355 while (i != rx_ring->next_to_alloc) {
2356 struct ixgbevf_rx_buffer *rx_buffer;
2357
2358 rx_buffer = &rx_ring->rx_buffer_info[i];
2359
2360
2361
2362
2363 dma_sync_single_range_for_cpu(rx_ring->dev,
2364 rx_buffer->dma,
2365 rx_buffer->page_offset,
2366 ixgbevf_rx_bufsz(rx_ring),
2367 DMA_FROM_DEVICE);
2368
2369
2370 dma_unmap_page_attrs(rx_ring->dev,
2371 rx_buffer->dma,
2372 ixgbevf_rx_pg_size(rx_ring),
2373 DMA_FROM_DEVICE,
2374 IXGBEVF_RX_DMA_ATTR);
2375
2376 __page_frag_cache_drain(rx_buffer->page,
2377 rx_buffer->pagecnt_bias);
2378
2379 i++;
2380 if (i == rx_ring->count)
2381 i = 0;
2382 }
2383
2384 rx_ring->next_to_alloc = 0;
2385 rx_ring->next_to_clean = 0;
2386 rx_ring->next_to_use = 0;
2387}
2388
2389
2390
2391
2392
2393static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2394{
2395 u16 i = tx_ring->next_to_clean;
2396 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2397
2398 while (i != tx_ring->next_to_use) {
2399 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2400
2401
2402 if (ring_is_xdp(tx_ring))
2403 page_frag_free(tx_buffer->data);
2404 else
2405 dev_kfree_skb_any(tx_buffer->skb);
2406
2407
2408 dma_unmap_single(tx_ring->dev,
2409 dma_unmap_addr(tx_buffer, dma),
2410 dma_unmap_len(tx_buffer, len),
2411 DMA_TO_DEVICE);
2412
2413
2414 eop_desc = tx_buffer->next_to_watch;
2415 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2416
2417
2418 while (tx_desc != eop_desc) {
2419 tx_buffer++;
2420 tx_desc++;
2421 i++;
2422 if (unlikely(i == tx_ring->count)) {
2423 i = 0;
2424 tx_buffer = tx_ring->tx_buffer_info;
2425 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2426 }
2427
2428
2429 if (dma_unmap_len(tx_buffer, len))
2430 dma_unmap_page(tx_ring->dev,
2431 dma_unmap_addr(tx_buffer, dma),
2432 dma_unmap_len(tx_buffer, len),
2433 DMA_TO_DEVICE);
2434 }
2435
2436
2437 tx_buffer++;
2438 i++;
2439 if (unlikely(i == tx_ring->count)) {
2440 i = 0;
2441 tx_buffer = tx_ring->tx_buffer_info;
2442 }
2443 }
2444
2445
2446 tx_ring->next_to_use = 0;
2447 tx_ring->next_to_clean = 0;
2448
2449}
2450
2451
2452
2453
2454
2455static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2456{
2457 int i;
2458
2459 for (i = 0; i < adapter->num_rx_queues; i++)
2460 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2461}
2462
2463
2464
2465
2466
2467static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2468{
2469 int i;
2470
2471 for (i = 0; i < adapter->num_tx_queues; i++)
2472 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2473 for (i = 0; i < adapter->num_xdp_queues; i++)
2474 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2475}
2476
2477void ixgbevf_down(struct ixgbevf_adapter *adapter)
2478{
2479 struct net_device *netdev = adapter->netdev;
2480 struct ixgbe_hw *hw = &adapter->hw;
2481 int i;
2482
2483
2484 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2485 return;
2486
2487
2488 for (i = 0; i < adapter->num_rx_queues; i++)
2489 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2490
2491 usleep_range(10000, 20000);
2492
2493 netif_tx_stop_all_queues(netdev);
2494
2495
2496 netif_carrier_off(netdev);
2497 netif_tx_disable(netdev);
2498
2499 ixgbevf_irq_disable(adapter);
2500
2501 ixgbevf_napi_disable_all(adapter);
2502
2503 del_timer_sync(&adapter->service_timer);
2504
2505
2506 for (i = 0; i < adapter->num_tx_queues; i++) {
2507 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2508
2509 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2510 IXGBE_TXDCTL_SWFLSH);
2511 }
2512
2513 for (i = 0; i < adapter->num_xdp_queues; i++) {
2514 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2515
2516 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2517 IXGBE_TXDCTL_SWFLSH);
2518 }
2519
2520 if (!pci_channel_offline(adapter->pdev))
2521 ixgbevf_reset(adapter);
2522
2523 ixgbevf_clean_all_tx_rings(adapter);
2524 ixgbevf_clean_all_rx_rings(adapter);
2525}
2526
2527void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2528{
2529 WARN_ON(in_interrupt());
2530
2531 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2532 msleep(1);
2533
2534 ixgbevf_down(adapter);
2535 pci_set_master(adapter->pdev);
2536 ixgbevf_up(adapter);
2537
2538 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2539}
2540
2541void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2542{
2543 struct ixgbe_hw *hw = &adapter->hw;
2544 struct net_device *netdev = adapter->netdev;
2545
2546 if (hw->mac.ops.reset_hw(hw)) {
2547 hw_dbg(hw, "PF still resetting\n");
2548 } else {
2549 hw->mac.ops.init_hw(hw);
2550 ixgbevf_negotiate_api(adapter);
2551 }
2552
2553 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2554 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2555 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2556 }
2557
2558 adapter->last_reset = jiffies;
2559}
2560
2561static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2562 int vectors)
2563{
2564 int vector_threshold;
2565
2566
2567
2568
2569
2570 vector_threshold = MIN_MSIX_COUNT;
2571
2572
2573
2574
2575
2576
2577 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2578 vector_threshold, vectors);
2579
2580 if (vectors < 0) {
2581 dev_err(&adapter->pdev->dev,
2582 "Unable to allocate MSI-X interrupts\n");
2583 kfree(adapter->msix_entries);
2584 adapter->msix_entries = NULL;
2585 return vectors;
2586 }
2587
2588
2589
2590
2591
2592 adapter->num_msix_vectors = vectors;
2593
2594 return 0;
2595}
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2609{
2610 struct ixgbe_hw *hw = &adapter->hw;
2611 unsigned int def_q = 0;
2612 unsigned int num_tcs = 0;
2613 int err;
2614
2615
2616 adapter->num_rx_queues = 1;
2617 adapter->num_tx_queues = 1;
2618 adapter->num_xdp_queues = 0;
2619
2620 spin_lock_bh(&adapter->mbx_lock);
2621
2622
2623 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2624
2625 spin_unlock_bh(&adapter->mbx_lock);
2626
2627 if (err)
2628 return;
2629
2630
2631 if (num_tcs > 1) {
2632 adapter->num_rx_queues = num_tcs;
2633 } else {
2634 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2635
2636 switch (hw->api_version) {
2637 case ixgbe_mbox_api_11:
2638 case ixgbe_mbox_api_12:
2639 case ixgbe_mbox_api_13:
2640 case ixgbe_mbox_api_14:
2641 if (adapter->xdp_prog &&
2642 hw->mac.max_tx_queues == rss)
2643 rss = rss > 3 ? 2 : 1;
2644
2645 adapter->num_rx_queues = rss;
2646 adapter->num_tx_queues = rss;
2647 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2648 default:
2649 break;
2650 }
2651 }
2652}
2653
2654
2655
2656
2657
2658
2659
2660
2661static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2662{
2663 int vector, v_budget;
2664
2665
2666
2667
2668
2669
2670
2671 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2672 v_budget = min_t(int, v_budget, num_online_cpus());
2673 v_budget += NON_Q_VECTORS;
2674
2675 adapter->msix_entries = kcalloc(v_budget,
2676 sizeof(struct msix_entry), GFP_KERNEL);
2677 if (!adapter->msix_entries)
2678 return -ENOMEM;
2679
2680 for (vector = 0; vector < v_budget; vector++)
2681 adapter->msix_entries[vector].entry = vector;
2682
2683
2684
2685
2686
2687 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2688}
2689
2690static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2691 struct ixgbevf_ring_container *head)
2692{
2693 ring->next = head->ring;
2694 head->ring = ring;
2695 head->count++;
2696}
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2712 int txr_count, int txr_idx,
2713 int xdp_count, int xdp_idx,
2714 int rxr_count, int rxr_idx)
2715{
2716 struct ixgbevf_q_vector *q_vector;
2717 int reg_idx = txr_idx + xdp_idx;
2718 struct ixgbevf_ring *ring;
2719 int ring_count, size;
2720
2721 ring_count = txr_count + xdp_count + rxr_count;
2722 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2723
2724
2725 q_vector = kzalloc(size, GFP_KERNEL);
2726 if (!q_vector)
2727 return -ENOMEM;
2728
2729
2730 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2731
2732
2733 adapter->q_vector[v_idx] = q_vector;
2734 q_vector->adapter = adapter;
2735 q_vector->v_idx = v_idx;
2736
2737
2738 ring = q_vector->ring;
2739
2740 while (txr_count) {
2741
2742 ring->dev = &adapter->pdev->dev;
2743 ring->netdev = adapter->netdev;
2744
2745
2746 ring->q_vector = q_vector;
2747
2748
2749 ixgbevf_add_ring(ring, &q_vector->tx);
2750
2751
2752 ring->count = adapter->tx_ring_count;
2753 ring->queue_index = txr_idx;
2754 ring->reg_idx = reg_idx;
2755
2756
2757 adapter->tx_ring[txr_idx] = ring;
2758
2759
2760 txr_count--;
2761 txr_idx++;
2762 reg_idx++;
2763
2764
2765 ring++;
2766 }
2767
2768 while (xdp_count) {
2769
2770 ring->dev = &adapter->pdev->dev;
2771 ring->netdev = adapter->netdev;
2772
2773
2774 ring->q_vector = q_vector;
2775
2776
2777 ixgbevf_add_ring(ring, &q_vector->tx);
2778
2779
2780 ring->count = adapter->tx_ring_count;
2781 ring->queue_index = xdp_idx;
2782 ring->reg_idx = reg_idx;
2783 set_ring_xdp(ring);
2784
2785
2786 adapter->xdp_ring[xdp_idx] = ring;
2787
2788
2789 xdp_count--;
2790 xdp_idx++;
2791 reg_idx++;
2792
2793
2794 ring++;
2795 }
2796
2797 while (rxr_count) {
2798
2799 ring->dev = &adapter->pdev->dev;
2800 ring->netdev = adapter->netdev;
2801
2802
2803 ring->q_vector = q_vector;
2804
2805
2806 ixgbevf_add_ring(ring, &q_vector->rx);
2807
2808
2809 ring->count = adapter->rx_ring_count;
2810 ring->queue_index = rxr_idx;
2811 ring->reg_idx = rxr_idx;
2812
2813
2814 adapter->rx_ring[rxr_idx] = ring;
2815
2816
2817 rxr_count--;
2818 rxr_idx++;
2819
2820
2821 ring++;
2822 }
2823
2824 return 0;
2825}
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2837{
2838 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2839 struct ixgbevf_ring *ring;
2840
2841 ixgbevf_for_each_ring(ring, q_vector->tx) {
2842 if (ring_is_xdp(ring))
2843 adapter->xdp_ring[ring->queue_index] = NULL;
2844 else
2845 adapter->tx_ring[ring->queue_index] = NULL;
2846 }
2847
2848 ixgbevf_for_each_ring(ring, q_vector->rx)
2849 adapter->rx_ring[ring->queue_index] = NULL;
2850
2851 adapter->q_vector[v_idx] = NULL;
2852 netif_napi_del(&q_vector->napi);
2853
2854
2855
2856
2857 kfree_rcu(q_vector, rcu);
2858}
2859
2860
2861
2862
2863
2864
2865
2866
2867static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2868{
2869 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2870 int rxr_remaining = adapter->num_rx_queues;
2871 int txr_remaining = adapter->num_tx_queues;
2872 int xdp_remaining = adapter->num_xdp_queues;
2873 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2874 int err;
2875
2876 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2877 for (; rxr_remaining; v_idx++, q_vectors--) {
2878 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2879
2880 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2881 0, 0, 0, 0, rqpv, rxr_idx);
2882 if (err)
2883 goto err_out;
2884
2885
2886 rxr_remaining -= rqpv;
2887 rxr_idx += rqpv;
2888 }
2889 }
2890
2891 for (; q_vectors; v_idx++, q_vectors--) {
2892 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2893 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2894 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2895
2896 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2897 tqpv, txr_idx,
2898 xqpv, xdp_idx,
2899 rqpv, rxr_idx);
2900
2901 if (err)
2902 goto err_out;
2903
2904
2905 rxr_remaining -= rqpv;
2906 rxr_idx += rqpv;
2907 txr_remaining -= tqpv;
2908 txr_idx += tqpv;
2909 xdp_remaining -= xqpv;
2910 xdp_idx += xqpv;
2911 }
2912
2913 return 0;
2914
2915err_out:
2916 while (v_idx) {
2917 v_idx--;
2918 ixgbevf_free_q_vector(adapter, v_idx);
2919 }
2920
2921 return -ENOMEM;
2922}
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2933{
2934 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2935
2936 while (q_vectors) {
2937 q_vectors--;
2938 ixgbevf_free_q_vector(adapter, q_vectors);
2939 }
2940}
2941
2942
2943
2944
2945
2946
2947static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2948{
2949 if (!adapter->msix_entries)
2950 return;
2951
2952 pci_disable_msix(adapter->pdev);
2953 kfree(adapter->msix_entries);
2954 adapter->msix_entries = NULL;
2955}
2956
2957
2958
2959
2960
2961
2962static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2963{
2964 int err;
2965
2966
2967 ixgbevf_set_num_queues(adapter);
2968
2969 err = ixgbevf_set_interrupt_capability(adapter);
2970 if (err) {
2971 hw_dbg(&adapter->hw,
2972 "Unable to setup interrupt capabilities\n");
2973 goto err_set_interrupt;
2974 }
2975
2976 err = ixgbevf_alloc_q_vectors(adapter);
2977 if (err) {
2978 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2979 goto err_alloc_q_vectors;
2980 }
2981
2982 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2983 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2984 adapter->num_rx_queues, adapter->num_tx_queues,
2985 adapter->num_xdp_queues);
2986
2987 set_bit(__IXGBEVF_DOWN, &adapter->state);
2988
2989 return 0;
2990err_alloc_q_vectors:
2991 ixgbevf_reset_interrupt_capability(adapter);
2992err_set_interrupt:
2993 return err;
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
3004{
3005 adapter->num_tx_queues = 0;
3006 adapter->num_xdp_queues = 0;
3007 adapter->num_rx_queues = 0;
3008
3009 ixgbevf_free_q_vectors(adapter);
3010 ixgbevf_reset_interrupt_capability(adapter);
3011}
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3022{
3023 struct ixgbe_hw *hw = &adapter->hw;
3024 struct pci_dev *pdev = adapter->pdev;
3025 struct net_device *netdev = adapter->netdev;
3026 int err;
3027
3028
3029 hw->vendor_id = pdev->vendor;
3030 hw->device_id = pdev->device;
3031 hw->revision_id = pdev->revision;
3032 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3033 hw->subsystem_device_id = pdev->subsystem_device;
3034
3035 hw->mbx.ops.init_params(hw);
3036
3037 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3038 err = ixgbevf_init_rss_key(adapter);
3039 if (err)
3040 goto out;
3041 }
3042
3043
3044 hw->mac.max_tx_queues = 2;
3045 hw->mac.max_rx_queues = 2;
3046
3047
3048 spin_lock_init(&adapter->mbx_lock);
3049
3050 err = hw->mac.ops.reset_hw(hw);
3051 if (err) {
3052 dev_info(&pdev->dev,
3053 "PF still in reset state. Is the PF interface up?\n");
3054 } else {
3055 err = hw->mac.ops.init_hw(hw);
3056 if (err) {
3057 pr_err("init_shared_code failed: %d\n", err);
3058 goto out;
3059 }
3060 ixgbevf_negotiate_api(adapter);
3061 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3062 if (err)
3063 dev_info(&pdev->dev, "Error reading MAC address\n");
3064 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3065 dev_info(&pdev->dev,
3066 "MAC address not assigned by administrator.\n");
3067 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3068 }
3069
3070 if (!is_valid_ether_addr(netdev->dev_addr)) {
3071 dev_info(&pdev->dev, "Assigning random MAC address\n");
3072 eth_hw_addr_random(netdev);
3073 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3074 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3075 }
3076
3077
3078 adapter->rx_itr_setting = 1;
3079 adapter->tx_itr_setting = 1;
3080
3081
3082 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3083 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3084
3085 set_bit(__IXGBEVF_DOWN, &adapter->state);
3086 return 0;
3087
3088out:
3089 return err;
3090}
3091
3092#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3093 { \
3094 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3095 if (current_counter < last_counter) \
3096 counter += 0x100000000LL; \
3097 last_counter = current_counter; \
3098 counter &= 0xFFFFFFFF00000000LL; \
3099 counter |= current_counter; \
3100 }
3101
3102#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3103 { \
3104 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3105 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3106 u64 current_counter = (current_counter_msb << 32) | \
3107 current_counter_lsb; \
3108 if (current_counter < last_counter) \
3109 counter += 0x1000000000LL; \
3110 last_counter = current_counter; \
3111 counter &= 0xFFFFFFF000000000LL; \
3112 counter |= current_counter; \
3113 }
3114
3115
3116
3117
3118void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3119{
3120 struct ixgbe_hw *hw = &adapter->hw;
3121 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3122 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3123 int i;
3124
3125 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3126 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3127 return;
3128
3129 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3130 adapter->stats.vfgprc);
3131 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3132 adapter->stats.vfgptc);
3133 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3134 adapter->stats.last_vfgorc,
3135 adapter->stats.vfgorc);
3136 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3137 adapter->stats.last_vfgotc,
3138 adapter->stats.vfgotc);
3139 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3140 adapter->stats.vfmprc);
3141
3142 for (i = 0; i < adapter->num_rx_queues; i++) {
3143 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3144
3145 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3146 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3147 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3148 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3149 }
3150
3151 adapter->hw_csum_rx_error = hw_csum_rx_error;
3152 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3153 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3154 adapter->alloc_rx_page = alloc_rx_page;
3155}
3156
3157
3158
3159
3160
3161static void ixgbevf_service_timer(struct timer_list *t)
3162{
3163 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3164 service_timer);
3165
3166
3167 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3168
3169 ixgbevf_service_event_schedule(adapter);
3170}
3171
3172static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3173{
3174 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3175 return;
3176
3177 rtnl_lock();
3178
3179 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3180 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3181 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3182 rtnl_unlock();
3183 return;
3184 }
3185
3186 adapter->tx_timeout_count++;
3187
3188 ixgbevf_reinit_locked(adapter);
3189 rtnl_unlock();
3190}
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3202{
3203 struct ixgbe_hw *hw = &adapter->hw;
3204 u32 eics = 0;
3205 int i;
3206
3207
3208 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3209 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3210 return;
3211
3212
3213 if (netif_carrier_ok(adapter->netdev)) {
3214 for (i = 0; i < adapter->num_tx_queues; i++)
3215 set_check_for_tx_hang(adapter->tx_ring[i]);
3216 for (i = 0; i < adapter->num_xdp_queues; i++)
3217 set_check_for_tx_hang(adapter->xdp_ring[i]);
3218 }
3219
3220
3221 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3222 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3223
3224 if (qv->rx.ring || qv->tx.ring)
3225 eics |= BIT(i);
3226 }
3227
3228
3229 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3230}
3231
3232
3233
3234
3235
3236static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3237{
3238 struct ixgbe_hw *hw = &adapter->hw;
3239 u32 link_speed = adapter->link_speed;
3240 bool link_up = adapter->link_up;
3241 s32 err;
3242
3243 spin_lock_bh(&adapter->mbx_lock);
3244
3245 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3246
3247 spin_unlock_bh(&adapter->mbx_lock);
3248
3249
3250 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3251 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3252 link_up = false;
3253 }
3254
3255 adapter->link_up = link_up;
3256 adapter->link_speed = link_speed;
3257}
3258
3259
3260
3261
3262
3263
3264static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3265{
3266 struct net_device *netdev = adapter->netdev;
3267
3268
3269 if (netif_carrier_ok(netdev))
3270 return;
3271
3272 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3273 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3274 "10 Gbps" :
3275 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3276 "1 Gbps" :
3277 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3278 "100 Mbps" :
3279 "unknown speed");
3280
3281 netif_carrier_on(netdev);
3282}
3283
3284
3285
3286
3287
3288
3289static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3290{
3291 struct net_device *netdev = adapter->netdev;
3292
3293 adapter->link_speed = 0;
3294
3295
3296 if (!netif_carrier_ok(netdev))
3297 return;
3298
3299 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3300
3301 netif_carrier_off(netdev);
3302}
3303
3304
3305
3306
3307
3308static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3309{
3310
3311 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3312 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3313 return;
3314
3315 ixgbevf_watchdog_update_link(adapter);
3316
3317 if (adapter->link_up)
3318 ixgbevf_watchdog_link_is_up(adapter);
3319 else
3320 ixgbevf_watchdog_link_is_down(adapter);
3321
3322 ixgbevf_update_stats(adapter);
3323}
3324
3325
3326
3327
3328
3329static void ixgbevf_service_task(struct work_struct *work)
3330{
3331 struct ixgbevf_adapter *adapter = container_of(work,
3332 struct ixgbevf_adapter,
3333 service_task);
3334 struct ixgbe_hw *hw = &adapter->hw;
3335
3336 if (IXGBE_REMOVED(hw->hw_addr)) {
3337 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3338 rtnl_lock();
3339 ixgbevf_down(adapter);
3340 rtnl_unlock();
3341 }
3342 return;
3343 }
3344
3345 ixgbevf_queue_reset_subtask(adapter);
3346 ixgbevf_reset_subtask(adapter);
3347 ixgbevf_watchdog_subtask(adapter);
3348 ixgbevf_check_hang_subtask(adapter);
3349
3350 ixgbevf_service_event_complete(adapter);
3351}
3352
3353
3354
3355
3356
3357
3358
3359void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3360{
3361 ixgbevf_clean_tx_ring(tx_ring);
3362
3363 vfree(tx_ring->tx_buffer_info);
3364 tx_ring->tx_buffer_info = NULL;
3365
3366
3367 if (!tx_ring->desc)
3368 return;
3369
3370 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3371 tx_ring->dma);
3372
3373 tx_ring->desc = NULL;
3374}
3375
3376
3377
3378
3379
3380
3381
3382static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3383{
3384 int i;
3385
3386 for (i = 0; i < adapter->num_tx_queues; i++)
3387 if (adapter->tx_ring[i]->desc)
3388 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3389 for (i = 0; i < adapter->num_xdp_queues; i++)
3390 if (adapter->xdp_ring[i]->desc)
3391 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3392}
3393
3394
3395
3396
3397
3398
3399
3400int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3401{
3402 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3403 int size;
3404
3405 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3406 tx_ring->tx_buffer_info = vmalloc(size);
3407 if (!tx_ring->tx_buffer_info)
3408 goto err;
3409
3410 u64_stats_init(&tx_ring->syncp);
3411
3412
3413 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3414 tx_ring->size = ALIGN(tx_ring->size, 4096);
3415
3416 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3417 &tx_ring->dma, GFP_KERNEL);
3418 if (!tx_ring->desc)
3419 goto err;
3420
3421 return 0;
3422
3423err:
3424 vfree(tx_ring->tx_buffer_info);
3425 tx_ring->tx_buffer_info = NULL;
3426 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3427 return -ENOMEM;
3428}
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3441{
3442 int i, j = 0, err = 0;
3443
3444 for (i = 0; i < adapter->num_tx_queues; i++) {
3445 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3446 if (!err)
3447 continue;
3448 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3449 goto err_setup_tx;
3450 }
3451
3452 for (j = 0; j < adapter->num_xdp_queues; j++) {
3453 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3454 if (!err)
3455 continue;
3456 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3457 goto err_setup_tx;
3458 }
3459
3460 return 0;
3461err_setup_tx:
3462
3463 while (j--)
3464 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3465 while (i--)
3466 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3467
3468 return err;
3469}
3470
3471
3472
3473
3474
3475
3476
3477
3478int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3479 struct ixgbevf_ring *rx_ring)
3480{
3481 int size;
3482
3483 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3484 rx_ring->rx_buffer_info = vmalloc(size);
3485 if (!rx_ring->rx_buffer_info)
3486 goto err;
3487
3488 u64_stats_init(&rx_ring->syncp);
3489
3490
3491 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3492 rx_ring->size = ALIGN(rx_ring->size, 4096);
3493
3494 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3495 &rx_ring->dma, GFP_KERNEL);
3496
3497 if (!rx_ring->desc)
3498 goto err;
3499
3500
3501 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3502 rx_ring->queue_index) < 0)
3503 goto err;
3504
3505 rx_ring->xdp_prog = adapter->xdp_prog;
3506
3507 return 0;
3508err:
3509 vfree(rx_ring->rx_buffer_info);
3510 rx_ring->rx_buffer_info = NULL;
3511 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3512 return -ENOMEM;
3513}
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3526{
3527 int i, err = 0;
3528
3529 for (i = 0; i < adapter->num_rx_queues; i++) {
3530 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3531 if (!err)
3532 continue;
3533 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3534 goto err_setup_rx;
3535 }
3536
3537 return 0;
3538err_setup_rx:
3539
3540 while (i--)
3541 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3542 return err;
3543}
3544
3545
3546
3547
3548
3549
3550
3551void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3552{
3553 ixgbevf_clean_rx_ring(rx_ring);
3554
3555 rx_ring->xdp_prog = NULL;
3556 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3557 vfree(rx_ring->rx_buffer_info);
3558 rx_ring->rx_buffer_info = NULL;
3559
3560 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3561 rx_ring->dma);
3562
3563 rx_ring->desc = NULL;
3564}
3565
3566
3567
3568
3569
3570
3571
3572static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3573{
3574 int i;
3575
3576 for (i = 0; i < adapter->num_rx_queues; i++)
3577 if (adapter->rx_ring[i]->desc)
3578 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3579}
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593int ixgbevf_open(struct net_device *netdev)
3594{
3595 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3596 struct ixgbe_hw *hw = &adapter->hw;
3597 int err;
3598
3599
3600
3601
3602
3603
3604
3605 if (!adapter->num_msix_vectors)
3606 return -ENOMEM;
3607
3608 if (hw->adapter_stopped) {
3609 ixgbevf_reset(adapter);
3610
3611
3612
3613 if (hw->adapter_stopped) {
3614 err = IXGBE_ERR_MBX;
3615 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3616 goto err_setup_reset;
3617 }
3618 }
3619
3620
3621 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3622 return -EBUSY;
3623
3624 netif_carrier_off(netdev);
3625
3626
3627 err = ixgbevf_setup_all_tx_resources(adapter);
3628 if (err)
3629 goto err_setup_tx;
3630
3631
3632 err = ixgbevf_setup_all_rx_resources(adapter);
3633 if (err)
3634 goto err_setup_rx;
3635
3636 ixgbevf_configure(adapter);
3637
3638 err = ixgbevf_request_irq(adapter);
3639 if (err)
3640 goto err_req_irq;
3641
3642
3643 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3644 if (err)
3645 goto err_set_queues;
3646
3647 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3648 if (err)
3649 goto err_set_queues;
3650
3651 ixgbevf_up_complete(adapter);
3652
3653 return 0;
3654
3655err_set_queues:
3656 ixgbevf_free_irq(adapter);
3657err_req_irq:
3658 ixgbevf_free_all_rx_resources(adapter);
3659err_setup_rx:
3660 ixgbevf_free_all_tx_resources(adapter);
3661err_setup_tx:
3662 ixgbevf_reset(adapter);
3663err_setup_reset:
3664
3665 return err;
3666}
3667
3668
3669
3670
3671
3672
3673
3674
3675static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3676{
3677 ixgbevf_down(adapter);
3678 ixgbevf_free_irq(adapter);
3679 ixgbevf_free_all_tx_resources(adapter);
3680 ixgbevf_free_all_rx_resources(adapter);
3681}
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694int ixgbevf_close(struct net_device *netdev)
3695{
3696 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3697
3698 if (netif_device_present(netdev))
3699 ixgbevf_close_suspend(adapter);
3700
3701 return 0;
3702}
3703
3704static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3705{
3706 struct net_device *dev = adapter->netdev;
3707
3708 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3709 &adapter->state))
3710 return;
3711
3712
3713 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3714 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3715 return;
3716
3717
3718
3719
3720
3721 rtnl_lock();
3722
3723 if (netif_running(dev))
3724 ixgbevf_close(dev);
3725
3726 ixgbevf_clear_interrupt_scheme(adapter);
3727 ixgbevf_init_interrupt_scheme(adapter);
3728
3729 if (netif_running(dev))
3730 ixgbevf_open(dev);
3731
3732 rtnl_unlock();
3733}
3734
3735static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3736 u32 vlan_macip_lens, u32 fceof_saidx,
3737 u32 type_tucmd, u32 mss_l4len_idx)
3738{
3739 struct ixgbe_adv_tx_context_desc *context_desc;
3740 u16 i = tx_ring->next_to_use;
3741
3742 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3743
3744 i++;
3745 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3746
3747
3748 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3749
3750 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3751 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
3752 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3753 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3754}
3755
3756static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3757 struct ixgbevf_tx_buffer *first,
3758 u8 *hdr_len,
3759 struct ixgbevf_ipsec_tx_data *itd)
3760{
3761 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3762 struct sk_buff *skb = first->skb;
3763 union {
3764 struct iphdr *v4;
3765 struct ipv6hdr *v6;
3766 unsigned char *hdr;
3767 } ip;
3768 union {
3769 struct tcphdr *tcp;
3770 unsigned char *hdr;
3771 } l4;
3772 u32 paylen, l4_offset;
3773 u32 fceof_saidx = 0;
3774 int err;
3775
3776 if (skb->ip_summed != CHECKSUM_PARTIAL)
3777 return 0;
3778
3779 if (!skb_is_gso(skb))
3780 return 0;
3781
3782 err = skb_cow_head(skb, 0);
3783 if (err < 0)
3784 return err;
3785
3786 if (eth_p_mpls(first->protocol))
3787 ip.hdr = skb_inner_network_header(skb);
3788 else
3789 ip.hdr = skb_network_header(skb);
3790 l4.hdr = skb_checksum_start(skb);
3791
3792
3793 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3794
3795
3796 if (ip.v4->version == 4) {
3797 unsigned char *csum_start = skb_checksum_start(skb);
3798 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3799 int len = csum_start - trans_start;
3800
3801
3802
3803
3804
3805 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3806 csum_fold(csum_partial(trans_start,
3807 len, 0)) : 0;
3808 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3809
3810 ip.v4->tot_len = 0;
3811 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3812 IXGBE_TX_FLAGS_CSUM |
3813 IXGBE_TX_FLAGS_IPV4;
3814 } else {
3815 ip.v6->payload_len = 0;
3816 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3817 IXGBE_TX_FLAGS_CSUM;
3818 }
3819
3820
3821 l4_offset = l4.hdr - skb->data;
3822
3823
3824 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3825
3826
3827 paylen = skb->len - l4_offset;
3828 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3829
3830
3831 first->gso_segs = skb_shinfo(skb)->gso_segs;
3832 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3833
3834
3835 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3836 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3837 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3838
3839 fceof_saidx |= itd->pfsa;
3840 type_tucmd |= itd->flags | itd->trailer_len;
3841
3842
3843 vlan_macip_lens = l4.hdr - ip.hdr;
3844 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3845 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3846
3847 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3848 mss_l4len_idx);
3849
3850 return 1;
3851}
3852
3853static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3854{
3855 unsigned int offset = 0;
3856
3857 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3858
3859 return offset == skb_checksum_start_offset(skb);
3860}
3861
3862static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3863 struct ixgbevf_tx_buffer *first,
3864 struct ixgbevf_ipsec_tx_data *itd)
3865{
3866 struct sk_buff *skb = first->skb;
3867 u32 vlan_macip_lens = 0;
3868 u32 fceof_saidx = 0;
3869 u32 type_tucmd = 0;
3870
3871 if (skb->ip_summed != CHECKSUM_PARTIAL)
3872 goto no_csum;
3873
3874 switch (skb->csum_offset) {
3875 case offsetof(struct tcphdr, check):
3876 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3877 fallthrough;
3878 case offsetof(struct udphdr, check):
3879 break;
3880 case offsetof(struct sctphdr, checksum):
3881
3882 if (((first->protocol == htons(ETH_P_IP)) &&
3883 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3884 ((first->protocol == htons(ETH_P_IPV6)) &&
3885 ixgbevf_ipv6_csum_is_sctp(skb))) {
3886 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3887 break;
3888 }
3889 fallthrough;
3890 default:
3891 skb_checksum_help(skb);
3892 goto no_csum;
3893 }
3894
3895 if (first->protocol == htons(ETH_P_IP))
3896 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3897
3898
3899 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3900 vlan_macip_lens = skb_checksum_start_offset(skb) -
3901 skb_network_offset(skb);
3902no_csum:
3903
3904 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3905 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3906
3907 fceof_saidx |= itd->pfsa;
3908 type_tucmd |= itd->flags | itd->trailer_len;
3909
3910 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3911 fceof_saidx, type_tucmd, 0);
3912}
3913
3914static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3915{
3916
3917 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3918 IXGBE_ADVTXD_DCMD_IFCS |
3919 IXGBE_ADVTXD_DCMD_DEXT);
3920
3921
3922 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3923 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3924
3925
3926 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3927 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3928
3929 return cmd_type;
3930}
3931
3932static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3933 u32 tx_flags, unsigned int paylen)
3934{
3935 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3936
3937
3938 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3939 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3940
3941
3942 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3943 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3944
3945
3946 if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3947 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3948
3949
3950 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
3951 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3952
3953
3954
3955
3956 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3957
3958 tx_desc->read.olinfo_status = olinfo_status;
3959}
3960
3961static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3962 struct ixgbevf_tx_buffer *first,
3963 const u8 hdr_len)
3964{
3965 struct sk_buff *skb = first->skb;
3966 struct ixgbevf_tx_buffer *tx_buffer;
3967 union ixgbe_adv_tx_desc *tx_desc;
3968 skb_frag_t *frag;
3969 dma_addr_t dma;
3970 unsigned int data_len, size;
3971 u32 tx_flags = first->tx_flags;
3972 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3973 u16 i = tx_ring->next_to_use;
3974
3975 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3976
3977 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3978
3979 size = skb_headlen(skb);
3980 data_len = skb->data_len;
3981
3982 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3983
3984 tx_buffer = first;
3985
3986 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3987 if (dma_mapping_error(tx_ring->dev, dma))
3988 goto dma_error;
3989
3990
3991 dma_unmap_len_set(tx_buffer, len, size);
3992 dma_unmap_addr_set(tx_buffer, dma, dma);
3993
3994 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3995
3996 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3997 tx_desc->read.cmd_type_len =
3998 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3999
4000 i++;
4001 tx_desc++;
4002 if (i == tx_ring->count) {
4003 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4004 i = 0;
4005 }
4006 tx_desc->read.olinfo_status = 0;
4007
4008 dma += IXGBE_MAX_DATA_PER_TXD;
4009 size -= IXGBE_MAX_DATA_PER_TXD;
4010
4011 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4012 }
4013
4014 if (likely(!data_len))
4015 break;
4016
4017 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4018
4019 i++;
4020 tx_desc++;
4021 if (i == tx_ring->count) {
4022 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4023 i = 0;
4024 }
4025 tx_desc->read.olinfo_status = 0;
4026
4027 size = skb_frag_size(frag);
4028 data_len -= size;
4029
4030 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
4031 DMA_TO_DEVICE);
4032
4033 tx_buffer = &tx_ring->tx_buffer_info[i];
4034 }
4035
4036
4037 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
4038 tx_desc->read.cmd_type_len = cmd_type;
4039
4040
4041 first->time_stamp = jiffies;
4042
4043 skb_tx_timestamp(skb);
4044
4045
4046
4047
4048
4049
4050
4051
4052 wmb();
4053
4054
4055 first->next_to_watch = tx_desc;
4056
4057 i++;
4058 if (i == tx_ring->count)
4059 i = 0;
4060
4061 tx_ring->next_to_use = i;
4062
4063
4064 ixgbevf_write_tail(tx_ring, i);
4065
4066 return;
4067dma_error:
4068 dev_err(tx_ring->dev, "TX DMA map failed\n");
4069 tx_buffer = &tx_ring->tx_buffer_info[i];
4070
4071
4072 while (tx_buffer != first) {
4073 if (dma_unmap_len(tx_buffer, len))
4074 dma_unmap_page(tx_ring->dev,
4075 dma_unmap_addr(tx_buffer, dma),
4076 dma_unmap_len(tx_buffer, len),
4077 DMA_TO_DEVICE);
4078 dma_unmap_len_set(tx_buffer, len, 0);
4079
4080 if (i-- == 0)
4081 i += tx_ring->count;
4082 tx_buffer = &tx_ring->tx_buffer_info[i];
4083 }
4084
4085 if (dma_unmap_len(tx_buffer, len))
4086 dma_unmap_single(tx_ring->dev,
4087 dma_unmap_addr(tx_buffer, dma),
4088 dma_unmap_len(tx_buffer, len),
4089 DMA_TO_DEVICE);
4090 dma_unmap_len_set(tx_buffer, len, 0);
4091
4092 dev_kfree_skb_any(tx_buffer->skb);
4093 tx_buffer->skb = NULL;
4094
4095 tx_ring->next_to_use = i;
4096}
4097
4098static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4099{
4100 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4101
4102
4103
4104
4105 smp_mb();
4106
4107
4108
4109
4110 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4111 return -EBUSY;
4112
4113
4114 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4115 ++tx_ring->tx_stats.restart_queue;
4116
4117 return 0;
4118}
4119
4120static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4121{
4122 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4123 return 0;
4124 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4125}
4126
4127static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4128 struct ixgbevf_ring *tx_ring)
4129{
4130 struct ixgbevf_tx_buffer *first;
4131 int tso;
4132 u32 tx_flags = 0;
4133 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4134 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
4135#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4136 unsigned short f;
4137#endif
4138 u8 hdr_len = 0;
4139 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4140
4141 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4142 dev_kfree_skb_any(skb);
4143 return NETDEV_TX_OK;
4144 }
4145
4146
4147
4148
4149
4150
4151
4152#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4153 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
4154 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
4155
4156 count += TXD_USE_COUNT(skb_frag_size(frag));
4157 }
4158#else
4159 count += skb_shinfo(skb)->nr_frags;
4160#endif
4161 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4162 tx_ring->tx_stats.tx_busy++;
4163 return NETDEV_TX_BUSY;
4164 }
4165
4166
4167 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4168 first->skb = skb;
4169 first->bytecount = skb->len;
4170 first->gso_segs = 1;
4171
4172 if (skb_vlan_tag_present(skb)) {
4173 tx_flags |= skb_vlan_tag_get(skb);
4174 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4175 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4176 }
4177
4178
4179 first->tx_flags = tx_flags;
4180 first->protocol = vlan_get_protocol(skb);
4181
4182#ifdef CONFIG_IXGBEVF_IPSEC
4183 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4184 goto out_drop;
4185#endif
4186 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
4187 if (tso < 0)
4188 goto out_drop;
4189 else if (!tso)
4190 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4191
4192 ixgbevf_tx_map(tx_ring, first, hdr_len);
4193
4194 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4195
4196 return NETDEV_TX_OK;
4197
4198out_drop:
4199 dev_kfree_skb_any(first->skb);
4200 first->skb = NULL;
4201
4202 return NETDEV_TX_OK;
4203}
4204
4205static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4206{
4207 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4208 struct ixgbevf_ring *tx_ring;
4209
4210 if (skb->len <= 0) {
4211 dev_kfree_skb_any(skb);
4212 return NETDEV_TX_OK;
4213 }
4214
4215
4216
4217
4218 if (skb->len < 17) {
4219 if (skb_padto(skb, 17))
4220 return NETDEV_TX_OK;
4221 skb->len = 17;
4222 }
4223
4224 tx_ring = adapter->tx_ring[skb->queue_mapping];
4225 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4226}
4227
4228
4229
4230
4231
4232
4233
4234
4235static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4236{
4237 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4238 struct ixgbe_hw *hw = &adapter->hw;
4239 struct sockaddr *addr = p;
4240 int err;
4241
4242 if (!is_valid_ether_addr(addr->sa_data))
4243 return -EADDRNOTAVAIL;
4244
4245 spin_lock_bh(&adapter->mbx_lock);
4246
4247 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4248
4249 spin_unlock_bh(&adapter->mbx_lock);
4250
4251 if (err)
4252 return -EPERM;
4253
4254 ether_addr_copy(hw->mac.addr, addr->sa_data);
4255 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4256 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4257
4258 return 0;
4259}
4260
4261
4262
4263
4264
4265
4266
4267
4268static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4269{
4270 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4271 struct ixgbe_hw *hw = &adapter->hw;
4272 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4273 int ret;
4274
4275
4276 if (adapter->xdp_prog) {
4277 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4278 return -EPERM;
4279 }
4280
4281 spin_lock_bh(&adapter->mbx_lock);
4282
4283 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4284 spin_unlock_bh(&adapter->mbx_lock);
4285 if (ret)
4286 return -EINVAL;
4287
4288 hw_dbg(hw, "changing MTU from %d to %d\n",
4289 netdev->mtu, new_mtu);
4290
4291
4292 netdev->mtu = new_mtu;
4293
4294 if (netif_running(netdev))
4295 ixgbevf_reinit_locked(adapter);
4296
4297 return 0;
4298}
4299
4300static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
4301{
4302 struct net_device *netdev = dev_get_drvdata(dev_d);
4303 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4304
4305 rtnl_lock();
4306 netif_device_detach(netdev);
4307
4308 if (netif_running(netdev))
4309 ixgbevf_close_suspend(adapter);
4310
4311 ixgbevf_clear_interrupt_scheme(adapter);
4312 rtnl_unlock();
4313
4314 return 0;
4315}
4316
4317static int __maybe_unused ixgbevf_resume(struct device *dev_d)
4318{
4319 struct pci_dev *pdev = to_pci_dev(dev_d);
4320 struct net_device *netdev = pci_get_drvdata(pdev);
4321 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4322 u32 err;
4323
4324 adapter->hw.hw_addr = adapter->io_addr;
4325 smp_mb__before_atomic();
4326 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4327 pci_set_master(pdev);
4328
4329 ixgbevf_reset(adapter);
4330
4331 rtnl_lock();
4332 err = ixgbevf_init_interrupt_scheme(adapter);
4333 if (!err && netif_running(netdev))
4334 err = ixgbevf_open(netdev);
4335 rtnl_unlock();
4336 if (err)
4337 return err;
4338
4339 netif_device_attach(netdev);
4340
4341 return err;
4342}
4343
4344static void ixgbevf_shutdown(struct pci_dev *pdev)
4345{
4346 ixgbevf_suspend(&pdev->dev);
4347}
4348
4349static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4350 const struct ixgbevf_ring *ring)
4351{
4352 u64 bytes, packets;
4353 unsigned int start;
4354
4355 if (ring) {
4356 do {
4357 start = u64_stats_fetch_begin_irq(&ring->syncp);
4358 bytes = ring->stats.bytes;
4359 packets = ring->stats.packets;
4360 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4361 stats->tx_bytes += bytes;
4362 stats->tx_packets += packets;
4363 }
4364}
4365
4366static void ixgbevf_get_stats(struct net_device *netdev,
4367 struct rtnl_link_stats64 *stats)
4368{
4369 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4370 unsigned int start;
4371 u64 bytes, packets;
4372 const struct ixgbevf_ring *ring;
4373 int i;
4374
4375 ixgbevf_update_stats(adapter);
4376
4377 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4378
4379 rcu_read_lock();
4380 for (i = 0; i < adapter->num_rx_queues; i++) {
4381 ring = adapter->rx_ring[i];
4382 do {
4383 start = u64_stats_fetch_begin_irq(&ring->syncp);
4384 bytes = ring->stats.bytes;
4385 packets = ring->stats.packets;
4386 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4387 stats->rx_bytes += bytes;
4388 stats->rx_packets += packets;
4389 }
4390
4391 for (i = 0; i < adapter->num_tx_queues; i++) {
4392 ring = adapter->tx_ring[i];
4393 ixgbevf_get_tx_ring_stats(stats, ring);
4394 }
4395
4396 for (i = 0; i < adapter->num_xdp_queues; i++) {
4397 ring = adapter->xdp_ring[i];
4398 ixgbevf_get_tx_ring_stats(stats, ring);
4399 }
4400 rcu_read_unlock();
4401}
4402
4403#define IXGBEVF_MAX_MAC_HDR_LEN 127
4404#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4405
4406static netdev_features_t
4407ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4408 netdev_features_t features)
4409{
4410 unsigned int network_hdr_len, mac_hdr_len;
4411
4412
4413 mac_hdr_len = skb_network_header(skb) - skb->data;
4414 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4415 return features & ~(NETIF_F_HW_CSUM |
4416 NETIF_F_SCTP_CRC |
4417 NETIF_F_HW_VLAN_CTAG_TX |
4418 NETIF_F_TSO |
4419 NETIF_F_TSO6);
4420
4421 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4422 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4423 return features & ~(NETIF_F_HW_CSUM |
4424 NETIF_F_SCTP_CRC |
4425 NETIF_F_TSO |
4426 NETIF_F_TSO6);
4427
4428
4429
4430
4431 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4432 features &= ~NETIF_F_TSO;
4433
4434 return features;
4435}
4436
4437static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4438{
4439 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4440 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4441 struct bpf_prog *old_prog;
4442
4443
4444 for (i = 0; i < adapter->num_rx_queues; i++) {
4445 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4446
4447 if (frame_size > ixgbevf_rx_bufsz(ring))
4448 return -EINVAL;
4449 }
4450
4451 old_prog = xchg(&adapter->xdp_prog, prog);
4452
4453
4454 if (!!prog != !!old_prog) {
4455
4456
4457
4458
4459 if (netif_running(dev))
4460 ixgbevf_close(dev);
4461
4462 ixgbevf_clear_interrupt_scheme(adapter);
4463 ixgbevf_init_interrupt_scheme(adapter);
4464
4465 if (netif_running(dev))
4466 ixgbevf_open(dev);
4467 } else {
4468 for (i = 0; i < adapter->num_rx_queues; i++)
4469 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4470 }
4471
4472 if (old_prog)
4473 bpf_prog_put(old_prog);
4474
4475 return 0;
4476}
4477
4478static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4479{
4480 switch (xdp->command) {
4481 case XDP_SETUP_PROG:
4482 return ixgbevf_xdp_setup(dev, xdp->prog);
4483 default:
4484 return -EINVAL;
4485 }
4486}
4487
4488static const struct net_device_ops ixgbevf_netdev_ops = {
4489 .ndo_open = ixgbevf_open,
4490 .ndo_stop = ixgbevf_close,
4491 .ndo_start_xmit = ixgbevf_xmit_frame,
4492 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4493 .ndo_get_stats64 = ixgbevf_get_stats,
4494 .ndo_validate_addr = eth_validate_addr,
4495 .ndo_set_mac_address = ixgbevf_set_mac,
4496 .ndo_change_mtu = ixgbevf_change_mtu,
4497 .ndo_tx_timeout = ixgbevf_tx_timeout,
4498 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4499 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4500 .ndo_features_check = ixgbevf_features_check,
4501 .ndo_bpf = ixgbevf_xdp,
4502};
4503
4504static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4505{
4506 dev->netdev_ops = &ixgbevf_netdev_ops;
4507 ixgbevf_set_ethtool_ops(dev);
4508 dev->watchdog_timeo = 5 * HZ;
4509}
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4523{
4524 struct net_device *netdev;
4525 struct ixgbevf_adapter *adapter = NULL;
4526 struct ixgbe_hw *hw = NULL;
4527 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4528 int err, pci_using_dac;
4529 bool disable_dev = false;
4530
4531 err = pci_enable_device(pdev);
4532 if (err)
4533 return err;
4534
4535 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4536 pci_using_dac = 1;
4537 } else {
4538 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4539 if (err) {
4540 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4541 goto err_dma;
4542 }
4543 pci_using_dac = 0;
4544 }
4545
4546 err = pci_request_regions(pdev, ixgbevf_driver_name);
4547 if (err) {
4548 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4549 goto err_pci_reg;
4550 }
4551
4552 pci_set_master(pdev);
4553
4554 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4555 MAX_TX_QUEUES);
4556 if (!netdev) {
4557 err = -ENOMEM;
4558 goto err_alloc_etherdev;
4559 }
4560
4561 SET_NETDEV_DEV(netdev, &pdev->dev);
4562
4563 adapter = netdev_priv(netdev);
4564
4565 adapter->netdev = netdev;
4566 adapter->pdev = pdev;
4567 hw = &adapter->hw;
4568 hw->back = adapter;
4569 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4570
4571
4572
4573
4574 pci_save_state(pdev);
4575
4576 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4577 pci_resource_len(pdev, 0));
4578 adapter->io_addr = hw->hw_addr;
4579 if (!hw->hw_addr) {
4580 err = -EIO;
4581 goto err_ioremap;
4582 }
4583
4584 ixgbevf_assign_netdev_ops(netdev);
4585
4586
4587 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4588 hw->mac.type = ii->mac;
4589
4590 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4591 sizeof(struct ixgbe_mbx_operations));
4592
4593
4594 err = ixgbevf_sw_init(adapter);
4595 if (err)
4596 goto err_sw_init;
4597
4598
4599 if (!is_valid_ether_addr(netdev->dev_addr)) {
4600 pr_err("invalid MAC address\n");
4601 err = -EIO;
4602 goto err_sw_init;
4603 }
4604
4605 netdev->hw_features = NETIF_F_SG |
4606 NETIF_F_TSO |
4607 NETIF_F_TSO6 |
4608 NETIF_F_RXCSUM |
4609 NETIF_F_HW_CSUM |
4610 NETIF_F_SCTP_CRC;
4611
4612#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4613 NETIF_F_GSO_GRE_CSUM | \
4614 NETIF_F_GSO_IPXIP4 | \
4615 NETIF_F_GSO_IPXIP6 | \
4616 NETIF_F_GSO_UDP_TUNNEL | \
4617 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4618
4619 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4620 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4621 IXGBEVF_GSO_PARTIAL_FEATURES;
4622
4623 netdev->features = netdev->hw_features;
4624
4625 if (pci_using_dac)
4626 netdev->features |= NETIF_F_HIGHDMA;
4627
4628 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4629 netdev->mpls_features |= NETIF_F_SG |
4630 NETIF_F_TSO |
4631 NETIF_F_TSO6 |
4632 NETIF_F_HW_CSUM;
4633 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4634 netdev->hw_enc_features |= netdev->vlan_features;
4635
4636
4637 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4638 NETIF_F_HW_VLAN_CTAG_RX |
4639 NETIF_F_HW_VLAN_CTAG_TX;
4640
4641 netdev->priv_flags |= IFF_UNICAST_FLT;
4642
4643
4644 netdev->min_mtu = ETH_MIN_MTU;
4645 switch (adapter->hw.api_version) {
4646 case ixgbe_mbox_api_11:
4647 case ixgbe_mbox_api_12:
4648 case ixgbe_mbox_api_13:
4649 case ixgbe_mbox_api_14:
4650 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4651 (ETH_HLEN + ETH_FCS_LEN);
4652 break;
4653 default:
4654 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4655 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4656 (ETH_HLEN + ETH_FCS_LEN);
4657 else
4658 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4659 break;
4660 }
4661
4662 if (IXGBE_REMOVED(hw->hw_addr)) {
4663 err = -EIO;
4664 goto err_sw_init;
4665 }
4666
4667 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4668
4669 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4670 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4671 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4672
4673 err = ixgbevf_init_interrupt_scheme(adapter);
4674 if (err)
4675 goto err_sw_init;
4676
4677 strcpy(netdev->name, "eth%d");
4678
4679 err = register_netdev(netdev);
4680 if (err)
4681 goto err_register;
4682
4683 pci_set_drvdata(pdev, netdev);
4684 netif_carrier_off(netdev);
4685 ixgbevf_init_ipsec_offload(adapter);
4686
4687 ixgbevf_init_last_counter_stats(adapter);
4688
4689
4690 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4691 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4692
4693 switch (hw->mac.type) {
4694 case ixgbe_mac_X550_vf:
4695 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4696 break;
4697 case ixgbe_mac_X540_vf:
4698 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4699 break;
4700 case ixgbe_mac_82599_vf:
4701 default:
4702 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4703 break;
4704 }
4705
4706 return 0;
4707
4708err_register:
4709 ixgbevf_clear_interrupt_scheme(adapter);
4710err_sw_init:
4711 ixgbevf_reset_interrupt_capability(adapter);
4712 iounmap(adapter->io_addr);
4713 kfree(adapter->rss_key);
4714err_ioremap:
4715 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4716 free_netdev(netdev);
4717err_alloc_etherdev:
4718 pci_release_regions(pdev);
4719err_pci_reg:
4720err_dma:
4721 if (!adapter || disable_dev)
4722 pci_disable_device(pdev);
4723 return err;
4724}
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735static void ixgbevf_remove(struct pci_dev *pdev)
4736{
4737 struct net_device *netdev = pci_get_drvdata(pdev);
4738 struct ixgbevf_adapter *adapter;
4739 bool disable_dev;
4740
4741 if (!netdev)
4742 return;
4743
4744 adapter = netdev_priv(netdev);
4745
4746 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4747 cancel_work_sync(&adapter->service_task);
4748
4749 if (netdev->reg_state == NETREG_REGISTERED)
4750 unregister_netdev(netdev);
4751
4752 ixgbevf_stop_ipsec_offload(adapter);
4753 ixgbevf_clear_interrupt_scheme(adapter);
4754 ixgbevf_reset_interrupt_capability(adapter);
4755
4756 iounmap(adapter->io_addr);
4757 pci_release_regions(pdev);
4758
4759 hw_dbg(&adapter->hw, "Remove complete\n");
4760
4761 kfree(adapter->rss_key);
4762 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4763 free_netdev(netdev);
4764
4765 if (disable_dev)
4766 pci_disable_device(pdev);
4767}
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4778 pci_channel_state_t state)
4779{
4780 struct net_device *netdev = pci_get_drvdata(pdev);
4781 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4782
4783 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4784 return PCI_ERS_RESULT_DISCONNECT;
4785
4786 rtnl_lock();
4787 netif_device_detach(netdev);
4788
4789 if (netif_running(netdev))
4790 ixgbevf_close_suspend(adapter);
4791
4792 if (state == pci_channel_io_perm_failure) {
4793 rtnl_unlock();
4794 return PCI_ERS_RESULT_DISCONNECT;
4795 }
4796
4797 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4798 pci_disable_device(pdev);
4799 rtnl_unlock();
4800
4801
4802 return PCI_ERS_RESULT_NEED_RESET;
4803}
4804
4805
4806
4807
4808
4809
4810
4811
4812static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4813{
4814 struct net_device *netdev = pci_get_drvdata(pdev);
4815 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4816
4817 if (pci_enable_device_mem(pdev)) {
4818 dev_err(&pdev->dev,
4819 "Cannot re-enable PCI device after reset.\n");
4820 return PCI_ERS_RESULT_DISCONNECT;
4821 }
4822
4823 adapter->hw.hw_addr = adapter->io_addr;
4824 smp_mb__before_atomic();
4825 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4826 pci_set_master(pdev);
4827
4828 ixgbevf_reset(adapter);
4829
4830 return PCI_ERS_RESULT_RECOVERED;
4831}
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841static void ixgbevf_io_resume(struct pci_dev *pdev)
4842{
4843 struct net_device *netdev = pci_get_drvdata(pdev);
4844
4845 rtnl_lock();
4846 if (netif_running(netdev))
4847 ixgbevf_open(netdev);
4848
4849 netif_device_attach(netdev);
4850 rtnl_unlock();
4851}
4852
4853
4854static const struct pci_error_handlers ixgbevf_err_handler = {
4855 .error_detected = ixgbevf_io_error_detected,
4856 .slot_reset = ixgbevf_io_slot_reset,
4857 .resume = ixgbevf_io_resume,
4858};
4859
4860static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
4861
4862static struct pci_driver ixgbevf_driver = {
4863 .name = ixgbevf_driver_name,
4864 .id_table = ixgbevf_pci_tbl,
4865 .probe = ixgbevf_probe,
4866 .remove = ixgbevf_remove,
4867
4868
4869 .driver.pm = &ixgbevf_pm_ops,
4870
4871 .shutdown = ixgbevf_shutdown,
4872 .err_handler = &ixgbevf_err_handler
4873};
4874
4875
4876
4877
4878
4879
4880
4881static int __init ixgbevf_init_module(void)
4882{
4883 pr_info("%s\n", ixgbevf_driver_string);
4884 pr_info("%s\n", ixgbevf_copyright);
4885 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4886 if (!ixgbevf_wq) {
4887 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4888 return -ENOMEM;
4889 }
4890
4891 return pci_register_driver(&ixgbevf_driver);
4892}
4893
4894module_init(ixgbevf_init_module);
4895
4896
4897
4898
4899
4900
4901
4902static void __exit ixgbevf_exit_module(void)
4903{
4904 pci_unregister_driver(&ixgbevf_driver);
4905 if (ixgbevf_wq) {
4906 destroy_workqueue(ixgbevf_wq);
4907 ixgbevf_wq = NULL;
4908 }
4909}
4910
4911#ifdef DEBUG
4912
4913
4914
4915
4916
4917char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4918{
4919 struct ixgbevf_adapter *adapter = hw->back;
4920
4921 return adapter->netdev->name;
4922}
4923
4924#endif
4925module_exit(ixgbevf_exit_module);
4926
4927
4928