1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/types.h>
11#include <linux/bitops.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/vmalloc.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/ip.h>
19#include <linux/tcp.h>
20#include <linux/sctp.h>
21#include <linux/ipv6.h>
22#include <linux/slab.h>
23#include <net/checksum.h>
24#include <net/ip6_checksum.h>
25#include <linux/ethtool.h>
26#include <linux/if.h>
27#include <linux/if_vlan.h>
28#include <linux/prefetch.h>
29#include <net/mpls.h>
30#include <linux/bpf.h>
31#include <linux/bpf_trace.h>
32#include <linux/atomic.h>
33#include <net/xfrm.h>
34
35#include "ixgbevf.h"
36
37const char ixgbevf_driver_name[] = "ixgbevf";
38static const char ixgbevf_driver_string[] =
39 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
40
41static char ixgbevf_copyright[] =
42 "Copyright (c) 2009 - 2018 Intel Corporation.";
43
44static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
45 [board_82599_vf] = &ixgbevf_82599_vf_info,
46 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
47 [board_X540_vf] = &ixgbevf_X540_vf_info,
48 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
49 [board_X550_vf] = &ixgbevf_X550_vf_info,
50 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
51 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
52 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
53 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
54};
55
56
57
58
59
60
61
62
63
64static const struct pci_device_id ixgbevf_pci_tbl[] = {
65 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
74
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
78
79MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
80MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
81MODULE_LICENSE("GPL v2");
82
83#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
84static int debug = -1;
85module_param(debug, int, 0);
86MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87
88static struct workqueue_struct *ixgbevf_wq;
89
90static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
91{
92 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
93 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
94 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
95 queue_work(ixgbevf_wq, &adapter->service_task);
96}
97
98static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
99{
100 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
101
102
103 smp_mb__before_atomic();
104 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
105}
106
107
108static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
109static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
110static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
111static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
112static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
113 struct ixgbevf_rx_buffer *old_buff);
114
115static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
116{
117 struct ixgbevf_adapter *adapter = hw->back;
118
119 if (!hw->hw_addr)
120 return;
121 hw->hw_addr = NULL;
122 dev_err(&adapter->pdev->dev, "Adapter removed\n");
123 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
124 ixgbevf_service_event_schedule(adapter);
125}
126
127static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
128{
129 u32 value;
130
131
132
133
134
135
136
137 if (reg == IXGBE_VFSTATUS) {
138 ixgbevf_remove_adapter(hw);
139 return;
140 }
141 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
142 if (value == IXGBE_FAILED_READ_REG)
143 ixgbevf_remove_adapter(hw);
144}
145
146u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
147{
148 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
149 u32 value;
150
151 if (IXGBE_REMOVED(reg_addr))
152 return IXGBE_FAILED_READ_REG;
153 value = readl(reg_addr + reg);
154 if (unlikely(value == IXGBE_FAILED_READ_REG))
155 ixgbevf_check_remove(hw, reg);
156 return value;
157}
158
159
160
161
162
163
164
165
166static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
167 u8 queue, u8 msix_vector)
168{
169 u32 ivar, index;
170 struct ixgbe_hw *hw = &adapter->hw;
171
172 if (direction == -1) {
173
174 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
175 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
176 ivar &= ~0xFF;
177 ivar |= msix_vector;
178 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
179 } else {
180
181 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
182 index = ((16 * (queue & 1)) + (8 * direction));
183 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
184 ivar &= ~(0xFF << index);
185 ivar |= (msix_vector << index);
186 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
187 }
188}
189
190static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
191{
192 return ring->stats.packets;
193}
194
195static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
196{
197 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
198 struct ixgbe_hw *hw = &adapter->hw;
199
200 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
201 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
202
203 if (head != tail)
204 return (head < tail) ?
205 tail - head : (tail + ring->count - head);
206
207 return 0;
208}
209
210static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
211{
212 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
213 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
214 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
215
216 clear_check_for_tx_hang(tx_ring);
217
218
219
220
221
222
223 if ((tx_done_old == tx_done) && tx_pending) {
224
225 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
226 &tx_ring->state);
227 }
228
229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
230
231
232 tx_ring->tx_stats.tx_done_old = tx_done;
233
234 return false;
235}
236
237static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
238{
239
240 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
241 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
242 ixgbevf_service_event_schedule(adapter);
243 }
244}
245
246
247
248
249
250
251static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
252{
253 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
254
255 ixgbevf_tx_timeout_reset(adapter);
256}
257
258
259
260
261
262
263
264static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
265 struct ixgbevf_ring *tx_ring, int napi_budget)
266{
267 struct ixgbevf_adapter *adapter = q_vector->adapter;
268 struct ixgbevf_tx_buffer *tx_buffer;
269 union ixgbe_adv_tx_desc *tx_desc;
270 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
271 unsigned int budget = tx_ring->count / 2;
272 unsigned int i = tx_ring->next_to_clean;
273
274 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
275 return true;
276
277 tx_buffer = &tx_ring->tx_buffer_info[i];
278 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
279 i -= tx_ring->count;
280
281 do {
282 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
283
284
285 if (!eop_desc)
286 break;
287
288
289 smp_rmb();
290
291
292 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
293 break;
294
295
296 tx_buffer->next_to_watch = NULL;
297
298
299 total_bytes += tx_buffer->bytecount;
300 total_packets += tx_buffer->gso_segs;
301 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
302 total_ipsec++;
303
304
305 if (ring_is_xdp(tx_ring))
306 page_frag_free(tx_buffer->data);
307 else
308 napi_consume_skb(tx_buffer->skb, napi_budget);
309
310
311 dma_unmap_single(tx_ring->dev,
312 dma_unmap_addr(tx_buffer, dma),
313 dma_unmap_len(tx_buffer, len),
314 DMA_TO_DEVICE);
315
316
317 dma_unmap_len_set(tx_buffer, len, 0);
318
319
320 while (tx_desc != eop_desc) {
321 tx_buffer++;
322 tx_desc++;
323 i++;
324 if (unlikely(!i)) {
325 i -= tx_ring->count;
326 tx_buffer = tx_ring->tx_buffer_info;
327 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
328 }
329
330
331 if (dma_unmap_len(tx_buffer, len)) {
332 dma_unmap_page(tx_ring->dev,
333 dma_unmap_addr(tx_buffer, dma),
334 dma_unmap_len(tx_buffer, len),
335 DMA_TO_DEVICE);
336 dma_unmap_len_set(tx_buffer, len, 0);
337 }
338 }
339
340
341 tx_buffer++;
342 tx_desc++;
343 i++;
344 if (unlikely(!i)) {
345 i -= tx_ring->count;
346 tx_buffer = tx_ring->tx_buffer_info;
347 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
348 }
349
350
351 prefetch(tx_desc);
352
353
354 budget--;
355 } while (likely(budget));
356
357 i += tx_ring->count;
358 tx_ring->next_to_clean = i;
359 u64_stats_update_begin(&tx_ring->syncp);
360 tx_ring->stats.bytes += total_bytes;
361 tx_ring->stats.packets += total_packets;
362 u64_stats_update_end(&tx_ring->syncp);
363 q_vector->tx.total_bytes += total_bytes;
364 q_vector->tx.total_packets += total_packets;
365 adapter->tx_ipsec += total_ipsec;
366
367 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
368 struct ixgbe_hw *hw = &adapter->hw;
369 union ixgbe_adv_tx_desc *eop_desc;
370
371 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
372
373 pr_err("Detected Tx Unit Hang%s\n"
374 " Tx Queue <%d>\n"
375 " TDH, TDT <%x>, <%x>\n"
376 " next_to_use <%x>\n"
377 " next_to_clean <%x>\n"
378 "tx_buffer_info[next_to_clean]\n"
379 " next_to_watch <%p>\n"
380 " eop_desc->wb.status <%x>\n"
381 " time_stamp <%lx>\n"
382 " jiffies <%lx>\n",
383 ring_is_xdp(tx_ring) ? " XDP" : "",
384 tx_ring->queue_index,
385 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
386 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
387 tx_ring->next_to_use, i,
388 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
389 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
390
391 if (!ring_is_xdp(tx_ring))
392 netif_stop_subqueue(tx_ring->netdev,
393 tx_ring->queue_index);
394
395
396 ixgbevf_tx_timeout_reset(adapter);
397
398 return true;
399 }
400
401 if (ring_is_xdp(tx_ring))
402 return !!budget;
403
404#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
405 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
406 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
407
408
409
410 smp_mb();
411
412 if (__netif_subqueue_stopped(tx_ring->netdev,
413 tx_ring->queue_index) &&
414 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
415 netif_wake_subqueue(tx_ring->netdev,
416 tx_ring->queue_index);
417 ++tx_ring->tx_stats.restart_queue;
418 }
419 }
420
421 return !!budget;
422}
423
424
425
426
427
428
429static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
430 struct sk_buff *skb)
431{
432 napi_gro_receive(&q_vector->napi, skb);
433}
434
435#define IXGBE_RSS_L4_TYPES_MASK \
436 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
437 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
439 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
440
441static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
442 union ixgbe_adv_rx_desc *rx_desc,
443 struct sk_buff *skb)
444{
445 u16 rss_type;
446
447 if (!(ring->netdev->features & NETIF_F_RXHASH))
448 return;
449
450 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
451 IXGBE_RXDADV_RSSTYPE_MASK;
452
453 if (!rss_type)
454 return;
455
456 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
457 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
458 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
459}
460
461
462
463
464
465
466
467static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
468 union ixgbe_adv_rx_desc *rx_desc,
469 struct sk_buff *skb)
470{
471 skb_checksum_none_assert(skb);
472
473
474 if (!(ring->netdev->features & NETIF_F_RXCSUM))
475 return;
476
477
478 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
479 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
480 ring->rx_stats.csum_err++;
481 return;
482 }
483
484 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
485 return;
486
487 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
488 ring->rx_stats.csum_err++;
489 return;
490 }
491
492
493 skb->ip_summed = CHECKSUM_UNNECESSARY;
494}
495
496
497
498
499
500
501
502
503
504
505
506static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
507 union ixgbe_adv_rx_desc *rx_desc,
508 struct sk_buff *skb)
509{
510 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
511 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
512
513 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
514 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
516
517 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
518 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
519 }
520
521 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
522 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
523
524 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
525}
526
527static
528struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
529 const unsigned int size)
530{
531 struct ixgbevf_rx_buffer *rx_buffer;
532
533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
534 prefetchw(rx_buffer->page);
535
536
537 dma_sync_single_range_for_cpu(rx_ring->dev,
538 rx_buffer->dma,
539 rx_buffer->page_offset,
540 size,
541 DMA_FROM_DEVICE);
542
543 rx_buffer->pagecnt_bias--;
544
545 return rx_buffer;
546}
547
548static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
549 struct ixgbevf_rx_buffer *rx_buffer,
550 struct sk_buff *skb)
551{
552 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
553
554 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
555 } else {
556 if (IS_ERR(skb))
557
558
559
560 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
561 ixgbevf_rx_pg_size(rx_ring),
562 DMA_FROM_DEVICE,
563 IXGBEVF_RX_DMA_ATTR);
564 __page_frag_cache_drain(rx_buffer->page,
565 rx_buffer->pagecnt_bias);
566 }
567
568
569 rx_buffer->page = NULL;
570}
571
572
573
574
575
576
577
578
579
580
581
582static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
583 union ixgbe_adv_rx_desc *rx_desc)
584{
585 u32 ntc = rx_ring->next_to_clean + 1;
586
587
588 ntc = (ntc < rx_ring->count) ? ntc : 0;
589 rx_ring->next_to_clean = ntc;
590
591 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
592
593 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
594 return false;
595
596 return true;
597}
598
599static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
600{
601 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
602}
603
604static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
605 struct ixgbevf_rx_buffer *bi)
606{
607 struct page *page = bi->page;
608 dma_addr_t dma;
609
610
611 if (likely(page))
612 return true;
613
614
615 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
616 if (unlikely(!page)) {
617 rx_ring->rx_stats.alloc_rx_page_failed++;
618 return false;
619 }
620
621
622 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
623 ixgbevf_rx_pg_size(rx_ring),
624 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
625
626
627
628
629 if (dma_mapping_error(rx_ring->dev, dma)) {
630 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
631
632 rx_ring->rx_stats.alloc_rx_page_failed++;
633 return false;
634 }
635
636 bi->dma = dma;
637 bi->page = page;
638 bi->page_offset = ixgbevf_rx_offset(rx_ring);
639 bi->pagecnt_bias = 1;
640 rx_ring->rx_stats.alloc_rx_page++;
641
642 return true;
643}
644
645
646
647
648
649
650static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
651 u16 cleaned_count)
652{
653 union ixgbe_adv_rx_desc *rx_desc;
654 struct ixgbevf_rx_buffer *bi;
655 unsigned int i = rx_ring->next_to_use;
656
657
658 if (!cleaned_count || !rx_ring->netdev)
659 return;
660
661 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
662 bi = &rx_ring->rx_buffer_info[i];
663 i -= rx_ring->count;
664
665 do {
666 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
667 break;
668
669
670 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
671 bi->page_offset,
672 ixgbevf_rx_bufsz(rx_ring),
673 DMA_FROM_DEVICE);
674
675
676
677
678 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
679
680 rx_desc++;
681 bi++;
682 i++;
683 if (unlikely(!i)) {
684 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
685 bi = rx_ring->rx_buffer_info;
686 i -= rx_ring->count;
687 }
688
689
690 rx_desc->wb.upper.length = 0;
691
692 cleaned_count--;
693 } while (cleaned_count);
694
695 i += rx_ring->count;
696
697 if (rx_ring->next_to_use != i) {
698
699 rx_ring->next_to_use = i;
700
701
702 rx_ring->next_to_alloc = i;
703
704
705
706
707
708
709 wmb();
710 ixgbevf_write_tail(rx_ring, i);
711 }
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
733 union ixgbe_adv_rx_desc *rx_desc,
734 struct sk_buff *skb)
735{
736
737 if (IS_ERR(skb))
738 return true;
739
740
741 if (unlikely(ixgbevf_test_staterr(rx_desc,
742 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
743 struct net_device *netdev = rx_ring->netdev;
744
745 if (!(netdev->features & NETIF_F_RXALL)) {
746 dev_kfree_skb_any(skb);
747 return true;
748 }
749 }
750
751
752 if (eth_skb_pad(skb))
753 return true;
754
755 return false;
756}
757
758
759
760
761
762
763
764
765static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
766 struct ixgbevf_rx_buffer *old_buff)
767{
768 struct ixgbevf_rx_buffer *new_buff;
769 u16 nta = rx_ring->next_to_alloc;
770
771 new_buff = &rx_ring->rx_buffer_info[nta];
772
773
774 nta++;
775 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
776
777
778 new_buff->page = old_buff->page;
779 new_buff->dma = old_buff->dma;
780 new_buff->page_offset = old_buff->page_offset;
781 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
782}
783
784static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
785{
786 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
787 struct page *page = rx_buffer->page;
788
789
790 if (!dev_page_is_reusable(page))
791 return false;
792
793#if (PAGE_SIZE < 8192)
794
795 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
796 return false;
797#else
798#define IXGBEVF_LAST_OFFSET \
799 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
800
801 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
802 return false;
803
804#endif
805
806
807
808
809
810 if (unlikely(!pagecnt_bias)) {
811 page_ref_add(page, USHRT_MAX);
812 rx_buffer->pagecnt_bias = USHRT_MAX;
813 }
814
815 return true;
816}
817
818
819
820
821
822
823
824
825
826
827static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
828 struct ixgbevf_rx_buffer *rx_buffer,
829 struct sk_buff *skb,
830 unsigned int size)
831{
832#if (PAGE_SIZE < 8192)
833 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
834#else
835 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
836 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
837 SKB_DATA_ALIGN(size);
838#endif
839 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
840 rx_buffer->page_offset, size, truesize);
841#if (PAGE_SIZE < 8192)
842 rx_buffer->page_offset ^= truesize;
843#else
844 rx_buffer->page_offset += truesize;
845#endif
846}
847
848static
849struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
850 struct ixgbevf_rx_buffer *rx_buffer,
851 struct xdp_buff *xdp,
852 union ixgbe_adv_rx_desc *rx_desc)
853{
854 unsigned int size = xdp->data_end - xdp->data;
855#if (PAGE_SIZE < 8192)
856 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
857#else
858 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
859 xdp->data_hard_start);
860#endif
861 unsigned int headlen;
862 struct sk_buff *skb;
863
864
865 net_prefetch(xdp->data);
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
884 if (unlikely(!skb))
885 return NULL;
886
887
888 headlen = size;
889 if (headlen > IXGBEVF_RX_HDR_SIZE)
890 headlen = eth_get_headlen(skb->dev, xdp->data,
891 IXGBEVF_RX_HDR_SIZE);
892
893
894 memcpy(__skb_put(skb, headlen), xdp->data,
895 ALIGN(headlen, sizeof(long)));
896
897
898 size -= headlen;
899 if (size) {
900 skb_add_rx_frag(skb, 0, rx_buffer->page,
901 (xdp->data + headlen) -
902 page_address(rx_buffer->page),
903 size, truesize);
904#if (PAGE_SIZE < 8192)
905 rx_buffer->page_offset ^= truesize;
906#else
907 rx_buffer->page_offset += truesize;
908#endif
909 } else {
910 rx_buffer->pagecnt_bias++;
911 }
912
913 return skb;
914}
915
916static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
917 u32 qmask)
918{
919 struct ixgbe_hw *hw = &adapter->hw;
920
921 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
922}
923
924static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
925 struct ixgbevf_rx_buffer *rx_buffer,
926 struct xdp_buff *xdp,
927 union ixgbe_adv_rx_desc *rx_desc)
928{
929 unsigned int metasize = xdp->data - xdp->data_meta;
930#if (PAGE_SIZE < 8192)
931 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
932#else
933 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
934 SKB_DATA_ALIGN(xdp->data_end -
935 xdp->data_hard_start);
936#endif
937 struct sk_buff *skb;
938
939
940
941
942
943
944 net_prefetch(xdp->data_meta);
945
946
947 skb = build_skb(xdp->data_hard_start, truesize);
948 if (unlikely(!skb))
949 return NULL;
950
951
952 skb_reserve(skb, xdp->data - xdp->data_hard_start);
953 __skb_put(skb, xdp->data_end - xdp->data);
954 if (metasize)
955 skb_metadata_set(skb, metasize);
956
957
958#if (PAGE_SIZE < 8192)
959 rx_buffer->page_offset ^= truesize;
960#else
961 rx_buffer->page_offset += truesize;
962#endif
963
964 return skb;
965}
966
967#define IXGBEVF_XDP_PASS 0
968#define IXGBEVF_XDP_CONSUMED 1
969#define IXGBEVF_XDP_TX 2
970
971static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
972 struct xdp_buff *xdp)
973{
974 struct ixgbevf_tx_buffer *tx_buffer;
975 union ixgbe_adv_tx_desc *tx_desc;
976 u32 len, cmd_type;
977 dma_addr_t dma;
978 u16 i;
979
980 len = xdp->data_end - xdp->data;
981
982 if (unlikely(!ixgbevf_desc_unused(ring)))
983 return IXGBEVF_XDP_CONSUMED;
984
985 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
986 if (dma_mapping_error(ring->dev, dma))
987 return IXGBEVF_XDP_CONSUMED;
988
989
990 i = ring->next_to_use;
991 tx_buffer = &ring->tx_buffer_info[i];
992
993 dma_unmap_len_set(tx_buffer, len, len);
994 dma_unmap_addr_set(tx_buffer, dma, dma);
995 tx_buffer->data = xdp->data;
996 tx_buffer->bytecount = len;
997 tx_buffer->gso_segs = 1;
998 tx_buffer->protocol = 0;
999
1000
1001
1002
1003 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1004 struct ixgbe_adv_tx_context_desc *context_desc;
1005
1006 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1007
1008 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1009 context_desc->vlan_macip_lens =
1010 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1011 context_desc->fceof_saidx = 0;
1012 context_desc->type_tucmd_mlhl =
1013 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1014 IXGBE_ADVTXD_DTYP_CTXT);
1015 context_desc->mss_l4len_idx = 0;
1016
1017 i = 1;
1018 }
1019
1020
1021 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1022 IXGBE_ADVTXD_DCMD_DEXT |
1023 IXGBE_ADVTXD_DCMD_IFCS;
1024 cmd_type |= len | IXGBE_TXD_CMD;
1025
1026 tx_desc = IXGBEVF_TX_DESC(ring, i);
1027 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1028
1029 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1030 tx_desc->read.olinfo_status =
1031 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1032 IXGBE_ADVTXD_CC);
1033
1034
1035 smp_wmb();
1036
1037
1038 i++;
1039 if (i == ring->count)
1040 i = 0;
1041
1042 tx_buffer->next_to_watch = tx_desc;
1043 ring->next_to_use = i;
1044
1045 return IXGBEVF_XDP_TX;
1046}
1047
1048static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1049 struct ixgbevf_ring *rx_ring,
1050 struct xdp_buff *xdp)
1051{
1052 int result = IXGBEVF_XDP_PASS;
1053 struct ixgbevf_ring *xdp_ring;
1054 struct bpf_prog *xdp_prog;
1055 u32 act;
1056
1057 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1058
1059 if (!xdp_prog)
1060 goto xdp_out;
1061
1062 act = bpf_prog_run_xdp(xdp_prog, xdp);
1063 switch (act) {
1064 case XDP_PASS:
1065 break;
1066 case XDP_TX:
1067 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1068 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1069 if (result == IXGBEVF_XDP_CONSUMED)
1070 goto out_failure;
1071 break;
1072 default:
1073 bpf_warn_invalid_xdp_action(act);
1074 fallthrough;
1075 case XDP_ABORTED:
1076out_failure:
1077 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1078 fallthrough;
1079 case XDP_DROP:
1080 result = IXGBEVF_XDP_CONSUMED;
1081 break;
1082 }
1083xdp_out:
1084 return ERR_PTR(-result);
1085}
1086
1087static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
1088 unsigned int size)
1089{
1090 unsigned int truesize;
1091
1092#if (PAGE_SIZE < 8192)
1093 truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1094#else
1095 truesize = ring_uses_build_skb(rx_ring) ?
1096 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) +
1097 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1098 SKB_DATA_ALIGN(size);
1099#endif
1100 return truesize;
1101}
1102
1103static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1104 struct ixgbevf_rx_buffer *rx_buffer,
1105 unsigned int size)
1106{
1107 unsigned int truesize = ixgbevf_rx_frame_truesize(rx_ring, size);
1108
1109#if (PAGE_SIZE < 8192)
1110 rx_buffer->page_offset ^= truesize;
1111#else
1112 rx_buffer->page_offset += truesize;
1113#endif
1114}
1115
1116static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1117 struct ixgbevf_ring *rx_ring,
1118 int budget)
1119{
1120 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
1121 struct ixgbevf_adapter *adapter = q_vector->adapter;
1122 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1123 struct sk_buff *skb = rx_ring->skb;
1124 bool xdp_xmit = false;
1125 struct xdp_buff xdp;
1126
1127
1128#if (PAGE_SIZE < 8192)
1129 frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
1130#endif
1131 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1132
1133 while (likely(total_rx_packets < budget)) {
1134 struct ixgbevf_rx_buffer *rx_buffer;
1135 union ixgbe_adv_rx_desc *rx_desc;
1136 unsigned int size;
1137
1138
1139 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1140 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1141 cleaned_count = 0;
1142 }
1143
1144 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1145 size = le16_to_cpu(rx_desc->wb.upper.length);
1146 if (!size)
1147 break;
1148
1149
1150
1151
1152
1153 rmb();
1154
1155 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1156
1157
1158 if (!skb) {
1159 unsigned int offset = ixgbevf_rx_offset(rx_ring);
1160 unsigned char *hard_start;
1161
1162 hard_start = page_address(rx_buffer->page) +
1163 rx_buffer->page_offset - offset;
1164 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1165#if (PAGE_SIZE > 4096)
1166
1167 xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
1168#endif
1169 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1170 }
1171
1172 if (IS_ERR(skb)) {
1173 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1174 xdp_xmit = true;
1175 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1176 size);
1177 } else {
1178 rx_buffer->pagecnt_bias++;
1179 }
1180 total_rx_packets++;
1181 total_rx_bytes += size;
1182 } else if (skb) {
1183 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1184 } else if (ring_uses_build_skb(rx_ring)) {
1185 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1186 &xdp, rx_desc);
1187 } else {
1188 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1189 &xdp, rx_desc);
1190 }
1191
1192
1193 if (!skb) {
1194 rx_ring->rx_stats.alloc_rx_buff_failed++;
1195 rx_buffer->pagecnt_bias++;
1196 break;
1197 }
1198
1199 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1200 cleaned_count++;
1201
1202
1203 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1204 continue;
1205
1206
1207 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1208 skb = NULL;
1209 continue;
1210 }
1211
1212
1213 total_rx_bytes += skb->len;
1214
1215
1216
1217
1218 if ((skb->pkt_type == PACKET_BROADCAST ||
1219 skb->pkt_type == PACKET_MULTICAST) &&
1220 ether_addr_equal(rx_ring->netdev->dev_addr,
1221 eth_hdr(skb)->h_source)) {
1222 dev_kfree_skb_irq(skb);
1223 continue;
1224 }
1225
1226
1227 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1228
1229 ixgbevf_rx_skb(q_vector, skb);
1230
1231
1232 skb = NULL;
1233
1234
1235 total_rx_packets++;
1236 }
1237
1238
1239 rx_ring->skb = skb;
1240
1241 if (xdp_xmit) {
1242 struct ixgbevf_ring *xdp_ring =
1243 adapter->xdp_ring[rx_ring->queue_index];
1244
1245
1246
1247
1248 wmb();
1249 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1250 }
1251
1252 u64_stats_update_begin(&rx_ring->syncp);
1253 rx_ring->stats.packets += total_rx_packets;
1254 rx_ring->stats.bytes += total_rx_bytes;
1255 u64_stats_update_end(&rx_ring->syncp);
1256 q_vector->rx.total_packets += total_rx_packets;
1257 q_vector->rx.total_bytes += total_rx_bytes;
1258
1259 return total_rx_packets;
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270static int ixgbevf_poll(struct napi_struct *napi, int budget)
1271{
1272 struct ixgbevf_q_vector *q_vector =
1273 container_of(napi, struct ixgbevf_q_vector, napi);
1274 struct ixgbevf_adapter *adapter = q_vector->adapter;
1275 struct ixgbevf_ring *ring;
1276 int per_ring_budget, work_done = 0;
1277 bool clean_complete = true;
1278
1279 ixgbevf_for_each_ring(ring, q_vector->tx) {
1280 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1281 clean_complete = false;
1282 }
1283
1284 if (budget <= 0)
1285 return budget;
1286
1287
1288
1289
1290 if (q_vector->rx.count > 1)
1291 per_ring_budget = max(budget/q_vector->rx.count, 1);
1292 else
1293 per_ring_budget = budget;
1294
1295 ixgbevf_for_each_ring(ring, q_vector->rx) {
1296 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1297 per_ring_budget);
1298 work_done += cleaned;
1299 if (cleaned >= per_ring_budget)
1300 clean_complete = false;
1301 }
1302
1303
1304 if (!clean_complete)
1305 return budget;
1306
1307
1308
1309
1310 if (likely(napi_complete_done(napi, work_done))) {
1311 if (adapter->rx_itr_setting == 1)
1312 ixgbevf_set_itr(q_vector);
1313 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1314 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1315 ixgbevf_irq_enable_queues(adapter,
1316 BIT(q_vector->v_idx));
1317 }
1318
1319 return min(work_done, budget - 1);
1320}
1321
1322
1323
1324
1325
1326void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1327{
1328 struct ixgbevf_adapter *adapter = q_vector->adapter;
1329 struct ixgbe_hw *hw = &adapter->hw;
1330 int v_idx = q_vector->v_idx;
1331 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1332
1333
1334
1335
1336 itr_reg |= IXGBE_EITR_CNT_WDIS;
1337
1338 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1349{
1350 struct ixgbevf_q_vector *q_vector;
1351 int q_vectors, v_idx;
1352
1353 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1354 adapter->eims_enable_mask = 0;
1355
1356
1357
1358
1359 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1360 struct ixgbevf_ring *ring;
1361
1362 q_vector = adapter->q_vector[v_idx];
1363
1364 ixgbevf_for_each_ring(ring, q_vector->rx)
1365 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1366
1367 ixgbevf_for_each_ring(ring, q_vector->tx)
1368 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1369
1370 if (q_vector->tx.ring && !q_vector->rx.ring) {
1371
1372 if (adapter->tx_itr_setting == 1)
1373 q_vector->itr = IXGBE_12K_ITR;
1374 else
1375 q_vector->itr = adapter->tx_itr_setting;
1376 } else {
1377
1378 if (adapter->rx_itr_setting == 1)
1379 q_vector->itr = IXGBE_20K_ITR;
1380 else
1381 q_vector->itr = adapter->rx_itr_setting;
1382 }
1383
1384
1385 adapter->eims_enable_mask |= BIT(v_idx);
1386
1387 ixgbevf_write_eitr(q_vector);
1388 }
1389
1390 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1391
1392 adapter->eims_other = BIT(v_idx);
1393 adapter->eims_enable_mask |= adapter->eims_other;
1394}
1395
1396enum latency_range {
1397 lowest_latency = 0,
1398 low_latency = 1,
1399 bulk_latency = 2,
1400 latency_invalid = 255
1401};
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1417 struct ixgbevf_ring_container *ring_container)
1418{
1419 int bytes = ring_container->total_bytes;
1420 int packets = ring_container->total_packets;
1421 u32 timepassed_us;
1422 u64 bytes_perint;
1423 u8 itr_setting = ring_container->itr;
1424
1425 if (packets == 0)
1426 return;
1427
1428
1429
1430
1431
1432
1433
1434 timepassed_us = q_vector->itr >> 2;
1435 if (timepassed_us == 0)
1436 return;
1437
1438 bytes_perint = bytes / timepassed_us;
1439
1440 switch (itr_setting) {
1441 case lowest_latency:
1442 if (bytes_perint > 10)
1443 itr_setting = low_latency;
1444 break;
1445 case low_latency:
1446 if (bytes_perint > 20)
1447 itr_setting = bulk_latency;
1448 else if (bytes_perint <= 10)
1449 itr_setting = lowest_latency;
1450 break;
1451 case bulk_latency:
1452 if (bytes_perint <= 20)
1453 itr_setting = low_latency;
1454 break;
1455 }
1456
1457
1458 ring_container->total_bytes = 0;
1459 ring_container->total_packets = 0;
1460
1461
1462 ring_container->itr = itr_setting;
1463}
1464
1465static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1466{
1467 u32 new_itr = q_vector->itr;
1468 u8 current_itr;
1469
1470 ixgbevf_update_itr(q_vector, &q_vector->tx);
1471 ixgbevf_update_itr(q_vector, &q_vector->rx);
1472
1473 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1474
1475 switch (current_itr) {
1476
1477 case lowest_latency:
1478 new_itr = IXGBE_100K_ITR;
1479 break;
1480 case low_latency:
1481 new_itr = IXGBE_20K_ITR;
1482 break;
1483 case bulk_latency:
1484 new_itr = IXGBE_12K_ITR;
1485 break;
1486 default:
1487 break;
1488 }
1489
1490 if (new_itr != q_vector->itr) {
1491
1492 new_itr = (10 * new_itr * q_vector->itr) /
1493 ((9 * new_itr) + q_vector->itr);
1494
1495
1496 q_vector->itr = new_itr;
1497
1498 ixgbevf_write_eitr(q_vector);
1499 }
1500}
1501
1502static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1503{
1504 struct ixgbevf_adapter *adapter = data;
1505 struct ixgbe_hw *hw = &adapter->hw;
1506
1507 hw->mac.get_link_status = 1;
1508
1509 ixgbevf_service_event_schedule(adapter);
1510
1511 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1512
1513 return IRQ_HANDLED;
1514}
1515
1516
1517
1518
1519
1520
1521static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1522{
1523 struct ixgbevf_q_vector *q_vector = data;
1524
1525
1526 if (q_vector->rx.ring || q_vector->tx.ring)
1527 napi_schedule_irqoff(&q_vector->napi);
1528
1529 return IRQ_HANDLED;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1540{
1541 struct net_device *netdev = adapter->netdev;
1542 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1543 unsigned int ri = 0, ti = 0;
1544 int vector, err;
1545
1546 for (vector = 0; vector < q_vectors; vector++) {
1547 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1548 struct msix_entry *entry = &adapter->msix_entries[vector];
1549
1550 if (q_vector->tx.ring && q_vector->rx.ring) {
1551 snprintf(q_vector->name, sizeof(q_vector->name),
1552 "%s-TxRx-%u", netdev->name, ri++);
1553 ti++;
1554 } else if (q_vector->rx.ring) {
1555 snprintf(q_vector->name, sizeof(q_vector->name),
1556 "%s-rx-%u", netdev->name, ri++);
1557 } else if (q_vector->tx.ring) {
1558 snprintf(q_vector->name, sizeof(q_vector->name),
1559 "%s-tx-%u", netdev->name, ti++);
1560 } else {
1561
1562 continue;
1563 }
1564 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1565 q_vector->name, q_vector);
1566 if (err) {
1567 hw_dbg(&adapter->hw,
1568 "request_irq failed for MSIX interrupt Error: %d\n",
1569 err);
1570 goto free_queue_irqs;
1571 }
1572 }
1573
1574 err = request_irq(adapter->msix_entries[vector].vector,
1575 &ixgbevf_msix_other, 0, netdev->name, adapter);
1576 if (err) {
1577 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1578 err);
1579 goto free_queue_irqs;
1580 }
1581
1582 return 0;
1583
1584free_queue_irqs:
1585 while (vector) {
1586 vector--;
1587 free_irq(adapter->msix_entries[vector].vector,
1588 adapter->q_vector[vector]);
1589 }
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 adapter->num_msix_vectors = 0;
1601 return err;
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1612{
1613 int err = ixgbevf_request_msix_irqs(adapter);
1614
1615 if (err)
1616 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1617
1618 return err;
1619}
1620
1621static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1622{
1623 int i, q_vectors;
1624
1625 if (!adapter->msix_entries)
1626 return;
1627
1628 q_vectors = adapter->num_msix_vectors;
1629 i = q_vectors - 1;
1630
1631 free_irq(adapter->msix_entries[i].vector, adapter);
1632 i--;
1633
1634 for (; i >= 0; i--) {
1635
1636 if (!adapter->q_vector[i]->rx.ring &&
1637 !adapter->q_vector[i]->tx.ring)
1638 continue;
1639
1640 free_irq(adapter->msix_entries[i].vector,
1641 adapter->q_vector[i]);
1642 }
1643}
1644
1645
1646
1647
1648
1649static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1650{
1651 struct ixgbe_hw *hw = &adapter->hw;
1652 int i;
1653
1654 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1655 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1656 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1657
1658 IXGBE_WRITE_FLUSH(hw);
1659
1660 for (i = 0; i < adapter->num_msix_vectors; i++)
1661 synchronize_irq(adapter->msix_entries[i].vector);
1662}
1663
1664
1665
1666
1667
1668static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1669{
1670 struct ixgbe_hw *hw = &adapter->hw;
1671
1672 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1673 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1674 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1685 struct ixgbevf_ring *ring)
1686{
1687 struct ixgbe_hw *hw = &adapter->hw;
1688 u64 tdba = ring->dma;
1689 int wait_loop = 10;
1690 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1691 u8 reg_idx = ring->reg_idx;
1692
1693
1694 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1695 IXGBE_WRITE_FLUSH(hw);
1696
1697 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1698 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1699 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1700 ring->count * sizeof(union ixgbe_adv_tx_desc));
1701
1702
1703 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1704 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1705
1706
1707 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1708 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1709 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1710
1711
1712 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1713 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1714 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1715
1716
1717 ring->next_to_clean = 0;
1718 ring->next_to_use = 0;
1719
1720
1721
1722
1723
1724 txdctl |= (8 << 16);
1725
1726
1727 txdctl |= (1u << 8) |
1728 32;
1729
1730
1731 memset(ring->tx_buffer_info, 0,
1732 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1733
1734 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1735 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1736
1737 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1738
1739
1740 do {
1741 usleep_range(1000, 2000);
1742 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1743 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1744 if (!wait_loop)
1745 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1746}
1747
1748
1749
1750
1751
1752
1753
1754static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1755{
1756 u32 i;
1757
1758
1759 for (i = 0; i < adapter->num_tx_queues; i++)
1760 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1761 for (i = 0; i < adapter->num_xdp_queues; i++)
1762 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1763}
1764
1765#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1766
1767static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1768 struct ixgbevf_ring *ring, int index)
1769{
1770 struct ixgbe_hw *hw = &adapter->hw;
1771 u32 srrctl;
1772
1773 srrctl = IXGBE_SRRCTL_DROP_EN;
1774
1775 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1776 if (ring_uses_large_buffer(ring))
1777 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1778 else
1779 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1780 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1781
1782 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1783}
1784
1785static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1786{
1787 struct ixgbe_hw *hw = &adapter->hw;
1788
1789
1790 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1791 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1792 IXGBE_PSRTYPE_L2HDR;
1793
1794 if (adapter->num_rx_queues > 1)
1795 psrtype |= BIT(29);
1796
1797 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1798}
1799
1800#define IXGBEVF_MAX_RX_DESC_POLL 10
1801static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1802 struct ixgbevf_ring *ring)
1803{
1804 struct ixgbe_hw *hw = &adapter->hw;
1805 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1806 u32 rxdctl;
1807 u8 reg_idx = ring->reg_idx;
1808
1809 if (IXGBE_REMOVED(hw->hw_addr))
1810 return;
1811 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1812 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1813
1814
1815 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1816
1817
1818 do {
1819 udelay(10);
1820 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1821 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1822
1823 if (!wait_loop)
1824 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1825 reg_idx);
1826}
1827
1828static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1829 struct ixgbevf_ring *ring)
1830{
1831 struct ixgbe_hw *hw = &adapter->hw;
1832 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1833 u32 rxdctl;
1834 u8 reg_idx = ring->reg_idx;
1835
1836 if (IXGBE_REMOVED(hw->hw_addr))
1837 return;
1838 do {
1839 usleep_range(1000, 2000);
1840 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1841 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1842
1843 if (!wait_loop)
1844 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1845 reg_idx);
1846}
1847
1848
1849
1850
1851
1852
1853
1854static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1855{
1856 u32 *rss_key;
1857
1858 if (!adapter->rss_key) {
1859 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1860 if (unlikely(!rss_key))
1861 return -ENOMEM;
1862
1863 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1864 adapter->rss_key = rss_key;
1865 }
1866
1867 return 0;
1868}
1869
1870static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1871{
1872 struct ixgbe_hw *hw = &adapter->hw;
1873 u32 vfmrqc = 0, vfreta = 0;
1874 u16 rss_i = adapter->num_rx_queues;
1875 u8 i, j;
1876
1877
1878 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1879 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1880
1881 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1882 if (j == rss_i)
1883 j = 0;
1884
1885 adapter->rss_indir_tbl[i] = j;
1886
1887 vfreta |= j << (i & 0x3) * 8;
1888 if ((i & 3) == 3) {
1889 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1890 vfreta = 0;
1891 }
1892 }
1893
1894
1895 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1896 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1897 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1898 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1899
1900 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1901
1902 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1903}
1904
1905static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1906 struct ixgbevf_ring *ring)
1907{
1908 struct ixgbe_hw *hw = &adapter->hw;
1909 union ixgbe_adv_rx_desc *rx_desc;
1910 u64 rdba = ring->dma;
1911 u32 rxdctl;
1912 u8 reg_idx = ring->reg_idx;
1913
1914
1915 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1916 ixgbevf_disable_rx_queue(adapter, ring);
1917
1918 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1919 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1920 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1921 ring->count * sizeof(union ixgbe_adv_rx_desc));
1922
1923#ifndef CONFIG_SPARC
1924
1925 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1926 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1927#else
1928 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1929 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1930 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1931#endif
1932
1933
1934 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1935 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1936 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1937
1938
1939 memset(ring->rx_buffer_info, 0,
1940 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1941
1942
1943 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1944 rx_desc->wb.upper.length = 0;
1945
1946
1947 ring->next_to_clean = 0;
1948 ring->next_to_use = 0;
1949 ring->next_to_alloc = 0;
1950
1951 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1952
1953
1954 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1955 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1956 IXGBE_RXDCTL_RLPML_EN);
1957
1958#if (PAGE_SIZE < 8192)
1959
1960 if (ring_uses_build_skb(ring) &&
1961 !ring_uses_large_buffer(ring))
1962 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1963 IXGBE_RXDCTL_RLPML_EN;
1964#endif
1965 }
1966
1967 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1968 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1969
1970 ixgbevf_rx_desc_queue_enable(adapter, ring);
1971 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1972}
1973
1974static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1975 struct ixgbevf_ring *rx_ring)
1976{
1977 struct net_device *netdev = adapter->netdev;
1978 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1979
1980
1981 clear_ring_build_skb_enabled(rx_ring);
1982 clear_ring_uses_large_buffer(rx_ring);
1983
1984 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1985 return;
1986
1987 set_ring_build_skb_enabled(rx_ring);
1988
1989 if (PAGE_SIZE < 8192) {
1990 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1991 return;
1992
1993 set_ring_uses_large_buffer(rx_ring);
1994 }
1995}
1996
1997
1998
1999
2000
2001
2002
2003static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
2004{
2005 struct ixgbe_hw *hw = &adapter->hw;
2006 struct net_device *netdev = adapter->netdev;
2007 int i, ret;
2008
2009 ixgbevf_setup_psrtype(adapter);
2010 if (hw->mac.type >= ixgbe_mac_X550_vf)
2011 ixgbevf_setup_vfmrqc(adapter);
2012
2013 spin_lock_bh(&adapter->mbx_lock);
2014
2015 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
2016 spin_unlock_bh(&adapter->mbx_lock);
2017 if (ret)
2018 dev_err(&adapter->pdev->dev,
2019 "Failed to set MTU at %d\n", netdev->mtu);
2020
2021
2022
2023
2024 for (i = 0; i < adapter->num_rx_queues; i++) {
2025 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2026
2027 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2028 ixgbevf_configure_rx_ring(adapter, rx_ring);
2029 }
2030}
2031
2032static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2033 __be16 proto, u16 vid)
2034{
2035 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2036 struct ixgbe_hw *hw = &adapter->hw;
2037 int err;
2038
2039 spin_lock_bh(&adapter->mbx_lock);
2040
2041
2042 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2043
2044 spin_unlock_bh(&adapter->mbx_lock);
2045
2046
2047 if (err == IXGBE_ERR_MBX)
2048 return -EIO;
2049
2050 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2051 return -EACCES;
2052
2053 set_bit(vid, adapter->active_vlans);
2054
2055 return err;
2056}
2057
2058static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2059 __be16 proto, u16 vid)
2060{
2061 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2062 struct ixgbe_hw *hw = &adapter->hw;
2063 int err;
2064
2065 spin_lock_bh(&adapter->mbx_lock);
2066
2067
2068 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2069
2070 spin_unlock_bh(&adapter->mbx_lock);
2071
2072 clear_bit(vid, adapter->active_vlans);
2073
2074 return err;
2075}
2076
2077static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2078{
2079 u16 vid;
2080
2081 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2082 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2083 htons(ETH_P_8021Q), vid);
2084}
2085
2086static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2087{
2088 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2089 struct ixgbe_hw *hw = &adapter->hw;
2090 int count = 0;
2091
2092 if (!netdev_uc_empty(netdev)) {
2093 struct netdev_hw_addr *ha;
2094
2095 netdev_for_each_uc_addr(ha, netdev) {
2096 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2097 udelay(200);
2098 }
2099 } else {
2100
2101
2102
2103 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2104 }
2105
2106 return count;
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118static void ixgbevf_set_rx_mode(struct net_device *netdev)
2119{
2120 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2121 struct ixgbe_hw *hw = &adapter->hw;
2122 unsigned int flags = netdev->flags;
2123 int xcast_mode;
2124
2125
2126 if (flags & IFF_PROMISC)
2127 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2128 else if (flags & IFF_ALLMULTI)
2129 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2130 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2131 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2132 else
2133 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2134
2135 spin_lock_bh(&adapter->mbx_lock);
2136
2137 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2138
2139
2140 hw->mac.ops.update_mc_addr_list(hw, netdev);
2141
2142 ixgbevf_write_uc_addr_list(netdev);
2143
2144 spin_unlock_bh(&adapter->mbx_lock);
2145}
2146
2147static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2148{
2149 int q_idx;
2150 struct ixgbevf_q_vector *q_vector;
2151 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2152
2153 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2154 q_vector = adapter->q_vector[q_idx];
2155 napi_enable(&q_vector->napi);
2156 }
2157}
2158
2159static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2160{
2161 int q_idx;
2162 struct ixgbevf_q_vector *q_vector;
2163 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2164
2165 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2166 q_vector = adapter->q_vector[q_idx];
2167 napi_disable(&q_vector->napi);
2168 }
2169}
2170
2171static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2172{
2173 struct ixgbe_hw *hw = &adapter->hw;
2174 unsigned int def_q = 0;
2175 unsigned int num_tcs = 0;
2176 unsigned int num_rx_queues = adapter->num_rx_queues;
2177 unsigned int num_tx_queues = adapter->num_tx_queues;
2178 int err;
2179
2180 spin_lock_bh(&adapter->mbx_lock);
2181
2182
2183 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2184
2185 spin_unlock_bh(&adapter->mbx_lock);
2186
2187 if (err)
2188 return err;
2189
2190 if (num_tcs > 1) {
2191
2192 num_tx_queues = 1;
2193
2194
2195 adapter->tx_ring[0]->reg_idx = def_q;
2196
2197
2198 num_rx_queues = num_tcs;
2199 }
2200
2201
2202 if ((adapter->num_rx_queues != num_rx_queues) ||
2203 (adapter->num_tx_queues != num_tx_queues)) {
2204
2205 hw->mbx.timeout = 0;
2206
2207
2208 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2209 }
2210
2211 return 0;
2212}
2213
2214static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2215{
2216 ixgbevf_configure_dcb(adapter);
2217
2218 ixgbevf_set_rx_mode(adapter->netdev);
2219
2220 ixgbevf_restore_vlan(adapter);
2221 ixgbevf_ipsec_restore(adapter);
2222
2223 ixgbevf_configure_tx(adapter);
2224 ixgbevf_configure_rx(adapter);
2225}
2226
2227static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2228{
2229
2230 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2231 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2232 adapter->stats.base_vfgprc;
2233 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2234 adapter->stats.base_vfgptc;
2235 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2236 adapter->stats.base_vfgorc;
2237 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2238 adapter->stats.base_vfgotc;
2239 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2240 adapter->stats.base_vfmprc;
2241 }
2242}
2243
2244static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2245{
2246 struct ixgbe_hw *hw = &adapter->hw;
2247
2248 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2249 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2250 adapter->stats.last_vfgorc |=
2251 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2252 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2253 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2254 adapter->stats.last_vfgotc |=
2255 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2256 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2257
2258 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2259 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2260 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2261 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2262 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2263}
2264
2265static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2266{
2267 struct ixgbe_hw *hw = &adapter->hw;
2268 static const int api[] = {
2269 ixgbe_mbox_api_14,
2270 ixgbe_mbox_api_13,
2271 ixgbe_mbox_api_12,
2272 ixgbe_mbox_api_11,
2273 ixgbe_mbox_api_10,
2274 ixgbe_mbox_api_unknown
2275 };
2276 int err, idx = 0;
2277
2278 spin_lock_bh(&adapter->mbx_lock);
2279
2280 while (api[idx] != ixgbe_mbox_api_unknown) {
2281 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2282 if (!err)
2283 break;
2284 idx++;
2285 }
2286
2287 spin_unlock_bh(&adapter->mbx_lock);
2288}
2289
2290static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2291{
2292 struct net_device *netdev = adapter->netdev;
2293 struct ixgbe_hw *hw = &adapter->hw;
2294
2295 ixgbevf_configure_msix(adapter);
2296
2297 spin_lock_bh(&adapter->mbx_lock);
2298
2299 if (is_valid_ether_addr(hw->mac.addr))
2300 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2301 else
2302 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2303
2304 spin_unlock_bh(&adapter->mbx_lock);
2305
2306 smp_mb__before_atomic();
2307 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2308 ixgbevf_napi_enable_all(adapter);
2309
2310
2311 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2312 ixgbevf_irq_enable(adapter);
2313
2314
2315 netif_tx_start_all_queues(netdev);
2316
2317 ixgbevf_save_reset_stats(adapter);
2318 ixgbevf_init_last_counter_stats(adapter);
2319
2320 hw->mac.get_link_status = 1;
2321 mod_timer(&adapter->service_timer, jiffies);
2322}
2323
2324void ixgbevf_up(struct ixgbevf_adapter *adapter)
2325{
2326 ixgbevf_configure(adapter);
2327
2328 ixgbevf_up_complete(adapter);
2329}
2330
2331
2332
2333
2334
2335static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2336{
2337 u16 i = rx_ring->next_to_clean;
2338
2339
2340 if (rx_ring->skb) {
2341 dev_kfree_skb(rx_ring->skb);
2342 rx_ring->skb = NULL;
2343 }
2344
2345
2346 while (i != rx_ring->next_to_alloc) {
2347 struct ixgbevf_rx_buffer *rx_buffer;
2348
2349 rx_buffer = &rx_ring->rx_buffer_info[i];
2350
2351
2352
2353
2354 dma_sync_single_range_for_cpu(rx_ring->dev,
2355 rx_buffer->dma,
2356 rx_buffer->page_offset,
2357 ixgbevf_rx_bufsz(rx_ring),
2358 DMA_FROM_DEVICE);
2359
2360
2361 dma_unmap_page_attrs(rx_ring->dev,
2362 rx_buffer->dma,
2363 ixgbevf_rx_pg_size(rx_ring),
2364 DMA_FROM_DEVICE,
2365 IXGBEVF_RX_DMA_ATTR);
2366
2367 __page_frag_cache_drain(rx_buffer->page,
2368 rx_buffer->pagecnt_bias);
2369
2370 i++;
2371 if (i == rx_ring->count)
2372 i = 0;
2373 }
2374
2375 rx_ring->next_to_alloc = 0;
2376 rx_ring->next_to_clean = 0;
2377 rx_ring->next_to_use = 0;
2378}
2379
2380
2381
2382
2383
2384static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2385{
2386 u16 i = tx_ring->next_to_clean;
2387 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2388
2389 while (i != tx_ring->next_to_use) {
2390 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2391
2392
2393 if (ring_is_xdp(tx_ring))
2394 page_frag_free(tx_buffer->data);
2395 else
2396 dev_kfree_skb_any(tx_buffer->skb);
2397
2398
2399 dma_unmap_single(tx_ring->dev,
2400 dma_unmap_addr(tx_buffer, dma),
2401 dma_unmap_len(tx_buffer, len),
2402 DMA_TO_DEVICE);
2403
2404
2405 eop_desc = tx_buffer->next_to_watch;
2406 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2407
2408
2409 while (tx_desc != eop_desc) {
2410 tx_buffer++;
2411 tx_desc++;
2412 i++;
2413 if (unlikely(i == tx_ring->count)) {
2414 i = 0;
2415 tx_buffer = tx_ring->tx_buffer_info;
2416 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2417 }
2418
2419
2420 if (dma_unmap_len(tx_buffer, len))
2421 dma_unmap_page(tx_ring->dev,
2422 dma_unmap_addr(tx_buffer, dma),
2423 dma_unmap_len(tx_buffer, len),
2424 DMA_TO_DEVICE);
2425 }
2426
2427
2428 tx_buffer++;
2429 i++;
2430 if (unlikely(i == tx_ring->count)) {
2431 i = 0;
2432 tx_buffer = tx_ring->tx_buffer_info;
2433 }
2434 }
2435
2436
2437 tx_ring->next_to_use = 0;
2438 tx_ring->next_to_clean = 0;
2439
2440}
2441
2442
2443
2444
2445
2446static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2447{
2448 int i;
2449
2450 for (i = 0; i < adapter->num_rx_queues; i++)
2451 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2452}
2453
2454
2455
2456
2457
2458static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2459{
2460 int i;
2461
2462 for (i = 0; i < adapter->num_tx_queues; i++)
2463 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2464 for (i = 0; i < adapter->num_xdp_queues; i++)
2465 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2466}
2467
2468void ixgbevf_down(struct ixgbevf_adapter *adapter)
2469{
2470 struct net_device *netdev = adapter->netdev;
2471 struct ixgbe_hw *hw = &adapter->hw;
2472 int i;
2473
2474
2475 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2476 return;
2477
2478
2479 for (i = 0; i < adapter->num_rx_queues; i++)
2480 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2481
2482 usleep_range(10000, 20000);
2483
2484 netif_tx_stop_all_queues(netdev);
2485
2486
2487 netif_carrier_off(netdev);
2488 netif_tx_disable(netdev);
2489
2490 ixgbevf_irq_disable(adapter);
2491
2492 ixgbevf_napi_disable_all(adapter);
2493
2494 del_timer_sync(&adapter->service_timer);
2495
2496
2497 for (i = 0; i < adapter->num_tx_queues; i++) {
2498 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2499
2500 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2501 IXGBE_TXDCTL_SWFLSH);
2502 }
2503
2504 for (i = 0; i < adapter->num_xdp_queues; i++) {
2505 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2506
2507 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2508 IXGBE_TXDCTL_SWFLSH);
2509 }
2510
2511 if (!pci_channel_offline(adapter->pdev))
2512 ixgbevf_reset(adapter);
2513
2514 ixgbevf_clean_all_tx_rings(adapter);
2515 ixgbevf_clean_all_rx_rings(adapter);
2516}
2517
2518void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2519{
2520 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2521 msleep(1);
2522
2523 ixgbevf_down(adapter);
2524 pci_set_master(adapter->pdev);
2525 ixgbevf_up(adapter);
2526
2527 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2528}
2529
2530void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2531{
2532 struct ixgbe_hw *hw = &adapter->hw;
2533 struct net_device *netdev = adapter->netdev;
2534
2535 if (hw->mac.ops.reset_hw(hw)) {
2536 hw_dbg(hw, "PF still resetting\n");
2537 } else {
2538 hw->mac.ops.init_hw(hw);
2539 ixgbevf_negotiate_api(adapter);
2540 }
2541
2542 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2543 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2544 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2545 }
2546
2547 adapter->last_reset = jiffies;
2548}
2549
2550static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2551 int vectors)
2552{
2553 int vector_threshold;
2554
2555
2556
2557
2558
2559 vector_threshold = MIN_MSIX_COUNT;
2560
2561
2562
2563
2564
2565
2566 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2567 vector_threshold, vectors);
2568
2569 if (vectors < 0) {
2570 dev_err(&adapter->pdev->dev,
2571 "Unable to allocate MSI-X interrupts\n");
2572 kfree(adapter->msix_entries);
2573 adapter->msix_entries = NULL;
2574 return vectors;
2575 }
2576
2577
2578
2579
2580
2581 adapter->num_msix_vectors = vectors;
2582
2583 return 0;
2584}
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2598{
2599 struct ixgbe_hw *hw = &adapter->hw;
2600 unsigned int def_q = 0;
2601 unsigned int num_tcs = 0;
2602 int err;
2603
2604
2605 adapter->num_rx_queues = 1;
2606 adapter->num_tx_queues = 1;
2607 adapter->num_xdp_queues = 0;
2608
2609 spin_lock_bh(&adapter->mbx_lock);
2610
2611
2612 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2613
2614 spin_unlock_bh(&adapter->mbx_lock);
2615
2616 if (err)
2617 return;
2618
2619
2620 if (num_tcs > 1) {
2621 adapter->num_rx_queues = num_tcs;
2622 } else {
2623 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2624
2625 switch (hw->api_version) {
2626 case ixgbe_mbox_api_11:
2627 case ixgbe_mbox_api_12:
2628 case ixgbe_mbox_api_13:
2629 case ixgbe_mbox_api_14:
2630 if (adapter->xdp_prog &&
2631 hw->mac.max_tx_queues == rss)
2632 rss = rss > 3 ? 2 : 1;
2633
2634 adapter->num_rx_queues = rss;
2635 adapter->num_tx_queues = rss;
2636 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2637 break;
2638 default:
2639 break;
2640 }
2641 }
2642}
2643
2644
2645
2646
2647
2648
2649
2650
2651static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2652{
2653 int vector, v_budget;
2654
2655
2656
2657
2658
2659
2660
2661 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2662 v_budget = min_t(int, v_budget, num_online_cpus());
2663 v_budget += NON_Q_VECTORS;
2664
2665 adapter->msix_entries = kcalloc(v_budget,
2666 sizeof(struct msix_entry), GFP_KERNEL);
2667 if (!adapter->msix_entries)
2668 return -ENOMEM;
2669
2670 for (vector = 0; vector < v_budget; vector++)
2671 adapter->msix_entries[vector].entry = vector;
2672
2673
2674
2675
2676
2677 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2678}
2679
2680static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2681 struct ixgbevf_ring_container *head)
2682{
2683 ring->next = head->ring;
2684 head->ring = ring;
2685 head->count++;
2686}
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2702 int txr_count, int txr_idx,
2703 int xdp_count, int xdp_idx,
2704 int rxr_count, int rxr_idx)
2705{
2706 struct ixgbevf_q_vector *q_vector;
2707 int reg_idx = txr_idx + xdp_idx;
2708 struct ixgbevf_ring *ring;
2709 int ring_count, size;
2710
2711 ring_count = txr_count + xdp_count + rxr_count;
2712 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2713
2714
2715 q_vector = kzalloc(size, GFP_KERNEL);
2716 if (!q_vector)
2717 return -ENOMEM;
2718
2719
2720 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2721
2722
2723 adapter->q_vector[v_idx] = q_vector;
2724 q_vector->adapter = adapter;
2725 q_vector->v_idx = v_idx;
2726
2727
2728 ring = q_vector->ring;
2729
2730 while (txr_count) {
2731
2732 ring->dev = &adapter->pdev->dev;
2733 ring->netdev = adapter->netdev;
2734
2735
2736 ring->q_vector = q_vector;
2737
2738
2739 ixgbevf_add_ring(ring, &q_vector->tx);
2740
2741
2742 ring->count = adapter->tx_ring_count;
2743 ring->queue_index = txr_idx;
2744 ring->reg_idx = reg_idx;
2745
2746
2747 adapter->tx_ring[txr_idx] = ring;
2748
2749
2750 txr_count--;
2751 txr_idx++;
2752 reg_idx++;
2753
2754
2755 ring++;
2756 }
2757
2758 while (xdp_count) {
2759
2760 ring->dev = &adapter->pdev->dev;
2761 ring->netdev = adapter->netdev;
2762
2763
2764 ring->q_vector = q_vector;
2765
2766
2767 ixgbevf_add_ring(ring, &q_vector->tx);
2768
2769
2770 ring->count = adapter->tx_ring_count;
2771 ring->queue_index = xdp_idx;
2772 ring->reg_idx = reg_idx;
2773 set_ring_xdp(ring);
2774
2775
2776 adapter->xdp_ring[xdp_idx] = ring;
2777
2778
2779 xdp_count--;
2780 xdp_idx++;
2781 reg_idx++;
2782
2783
2784 ring++;
2785 }
2786
2787 while (rxr_count) {
2788
2789 ring->dev = &adapter->pdev->dev;
2790 ring->netdev = adapter->netdev;
2791
2792
2793 ring->q_vector = q_vector;
2794
2795
2796 ixgbevf_add_ring(ring, &q_vector->rx);
2797
2798
2799 ring->count = adapter->rx_ring_count;
2800 ring->queue_index = rxr_idx;
2801 ring->reg_idx = rxr_idx;
2802
2803
2804 adapter->rx_ring[rxr_idx] = ring;
2805
2806
2807 rxr_count--;
2808 rxr_idx++;
2809
2810
2811 ring++;
2812 }
2813
2814 return 0;
2815}
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2827{
2828 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2829 struct ixgbevf_ring *ring;
2830
2831 ixgbevf_for_each_ring(ring, q_vector->tx) {
2832 if (ring_is_xdp(ring))
2833 adapter->xdp_ring[ring->queue_index] = NULL;
2834 else
2835 adapter->tx_ring[ring->queue_index] = NULL;
2836 }
2837
2838 ixgbevf_for_each_ring(ring, q_vector->rx)
2839 adapter->rx_ring[ring->queue_index] = NULL;
2840
2841 adapter->q_vector[v_idx] = NULL;
2842 netif_napi_del(&q_vector->napi);
2843
2844
2845
2846
2847 kfree_rcu(q_vector, rcu);
2848}
2849
2850
2851
2852
2853
2854
2855
2856
2857static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2858{
2859 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2860 int rxr_remaining = adapter->num_rx_queues;
2861 int txr_remaining = adapter->num_tx_queues;
2862 int xdp_remaining = adapter->num_xdp_queues;
2863 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2864 int err;
2865
2866 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2867 for (; rxr_remaining; v_idx++, q_vectors--) {
2868 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2869
2870 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2871 0, 0, 0, 0, rqpv, rxr_idx);
2872 if (err)
2873 goto err_out;
2874
2875
2876 rxr_remaining -= rqpv;
2877 rxr_idx += rqpv;
2878 }
2879 }
2880
2881 for (; q_vectors; v_idx++, q_vectors--) {
2882 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2883 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2884 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2885
2886 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2887 tqpv, txr_idx,
2888 xqpv, xdp_idx,
2889 rqpv, rxr_idx);
2890
2891 if (err)
2892 goto err_out;
2893
2894
2895 rxr_remaining -= rqpv;
2896 rxr_idx += rqpv;
2897 txr_remaining -= tqpv;
2898 txr_idx += tqpv;
2899 xdp_remaining -= xqpv;
2900 xdp_idx += xqpv;
2901 }
2902
2903 return 0;
2904
2905err_out:
2906 while (v_idx) {
2907 v_idx--;
2908 ixgbevf_free_q_vector(adapter, v_idx);
2909 }
2910
2911 return -ENOMEM;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2923{
2924 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2925
2926 while (q_vectors) {
2927 q_vectors--;
2928 ixgbevf_free_q_vector(adapter, q_vectors);
2929 }
2930}
2931
2932
2933
2934
2935
2936
2937static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2938{
2939 if (!adapter->msix_entries)
2940 return;
2941
2942 pci_disable_msix(adapter->pdev);
2943 kfree(adapter->msix_entries);
2944 adapter->msix_entries = NULL;
2945}
2946
2947
2948
2949
2950
2951
2952static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2953{
2954 int err;
2955
2956
2957 ixgbevf_set_num_queues(adapter);
2958
2959 err = ixgbevf_set_interrupt_capability(adapter);
2960 if (err) {
2961 hw_dbg(&adapter->hw,
2962 "Unable to setup interrupt capabilities\n");
2963 goto err_set_interrupt;
2964 }
2965
2966 err = ixgbevf_alloc_q_vectors(adapter);
2967 if (err) {
2968 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2969 goto err_alloc_q_vectors;
2970 }
2971
2972 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2973 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2974 adapter->num_rx_queues, adapter->num_tx_queues,
2975 adapter->num_xdp_queues);
2976
2977 set_bit(__IXGBEVF_DOWN, &adapter->state);
2978
2979 return 0;
2980err_alloc_q_vectors:
2981 ixgbevf_reset_interrupt_capability(adapter);
2982err_set_interrupt:
2983 return err;
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2994{
2995 adapter->num_tx_queues = 0;
2996 adapter->num_xdp_queues = 0;
2997 adapter->num_rx_queues = 0;
2998
2999 ixgbevf_free_q_vectors(adapter);
3000 ixgbevf_reset_interrupt_capability(adapter);
3001}
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3012{
3013 struct ixgbe_hw *hw = &adapter->hw;
3014 struct pci_dev *pdev = adapter->pdev;
3015 struct net_device *netdev = adapter->netdev;
3016 int err;
3017
3018
3019 hw->vendor_id = pdev->vendor;
3020 hw->device_id = pdev->device;
3021 hw->revision_id = pdev->revision;
3022 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3023 hw->subsystem_device_id = pdev->subsystem_device;
3024
3025 hw->mbx.ops.init_params(hw);
3026
3027 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3028 err = ixgbevf_init_rss_key(adapter);
3029 if (err)
3030 goto out;
3031 }
3032
3033
3034 hw->mac.max_tx_queues = 2;
3035 hw->mac.max_rx_queues = 2;
3036
3037
3038 spin_lock_init(&adapter->mbx_lock);
3039
3040 err = hw->mac.ops.reset_hw(hw);
3041 if (err) {
3042 dev_info(&pdev->dev,
3043 "PF still in reset state. Is the PF interface up?\n");
3044 } else {
3045 err = hw->mac.ops.init_hw(hw);
3046 if (err) {
3047 pr_err("init_shared_code failed: %d\n", err);
3048 goto out;
3049 }
3050 ixgbevf_negotiate_api(adapter);
3051 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3052 if (err)
3053 dev_info(&pdev->dev, "Error reading MAC address\n");
3054 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3055 dev_info(&pdev->dev,
3056 "MAC address not assigned by administrator.\n");
3057 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3058 }
3059
3060 if (!is_valid_ether_addr(netdev->dev_addr)) {
3061 dev_info(&pdev->dev, "Assigning random MAC address\n");
3062 eth_hw_addr_random(netdev);
3063 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3064 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3065 }
3066
3067
3068 adapter->rx_itr_setting = 1;
3069 adapter->tx_itr_setting = 1;
3070
3071
3072 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3073 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3074
3075 set_bit(__IXGBEVF_DOWN, &adapter->state);
3076 return 0;
3077
3078out:
3079 return err;
3080}
3081
3082#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3083 { \
3084 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3085 if (current_counter < last_counter) \
3086 counter += 0x100000000LL; \
3087 last_counter = current_counter; \
3088 counter &= 0xFFFFFFFF00000000LL; \
3089 counter |= current_counter; \
3090 }
3091
3092#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3093 { \
3094 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3095 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3096 u64 current_counter = (current_counter_msb << 32) | \
3097 current_counter_lsb; \
3098 if (current_counter < last_counter) \
3099 counter += 0x1000000000LL; \
3100 last_counter = current_counter; \
3101 counter &= 0xFFFFFFF000000000LL; \
3102 counter |= current_counter; \
3103 }
3104
3105
3106
3107
3108void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3109{
3110 struct ixgbe_hw *hw = &adapter->hw;
3111 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3112 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3113 int i;
3114
3115 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3116 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3117 return;
3118
3119 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3120 adapter->stats.vfgprc);
3121 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3122 adapter->stats.vfgptc);
3123 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3124 adapter->stats.last_vfgorc,
3125 adapter->stats.vfgorc);
3126 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3127 adapter->stats.last_vfgotc,
3128 adapter->stats.vfgotc);
3129 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3130 adapter->stats.vfmprc);
3131
3132 for (i = 0; i < adapter->num_rx_queues; i++) {
3133 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3134
3135 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3136 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3137 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3138 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3139 }
3140
3141 adapter->hw_csum_rx_error = hw_csum_rx_error;
3142 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3143 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3144 adapter->alloc_rx_page = alloc_rx_page;
3145}
3146
3147
3148
3149
3150
3151static void ixgbevf_service_timer(struct timer_list *t)
3152{
3153 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3154 service_timer);
3155
3156
3157 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3158
3159 ixgbevf_service_event_schedule(adapter);
3160}
3161
3162static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3163{
3164 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3165 return;
3166
3167 rtnl_lock();
3168
3169 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3170 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3171 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3172 rtnl_unlock();
3173 return;
3174 }
3175
3176 adapter->tx_timeout_count++;
3177
3178 ixgbevf_reinit_locked(adapter);
3179 rtnl_unlock();
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3192{
3193 struct ixgbe_hw *hw = &adapter->hw;
3194 u32 eics = 0;
3195 int i;
3196
3197
3198 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3199 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3200 return;
3201
3202
3203 if (netif_carrier_ok(adapter->netdev)) {
3204 for (i = 0; i < adapter->num_tx_queues; i++)
3205 set_check_for_tx_hang(adapter->tx_ring[i]);
3206 for (i = 0; i < adapter->num_xdp_queues; i++)
3207 set_check_for_tx_hang(adapter->xdp_ring[i]);
3208 }
3209
3210
3211 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3212 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3213
3214 if (qv->rx.ring || qv->tx.ring)
3215 eics |= BIT(i);
3216 }
3217
3218
3219 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3220}
3221
3222
3223
3224
3225
3226static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3227{
3228 struct ixgbe_hw *hw = &adapter->hw;
3229 u32 link_speed = adapter->link_speed;
3230 bool link_up = adapter->link_up;
3231 s32 err;
3232
3233 spin_lock_bh(&adapter->mbx_lock);
3234
3235 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3236
3237 spin_unlock_bh(&adapter->mbx_lock);
3238
3239
3240 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3241 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3242 link_up = false;
3243 }
3244
3245 adapter->link_up = link_up;
3246 adapter->link_speed = link_speed;
3247}
3248
3249
3250
3251
3252
3253
3254static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3255{
3256 struct net_device *netdev = adapter->netdev;
3257
3258
3259 if (netif_carrier_ok(netdev))
3260 return;
3261
3262 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3263 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3264 "10 Gbps" :
3265 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3266 "1 Gbps" :
3267 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3268 "100 Mbps" :
3269 "unknown speed");
3270
3271 netif_carrier_on(netdev);
3272}
3273
3274
3275
3276
3277
3278
3279static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3280{
3281 struct net_device *netdev = adapter->netdev;
3282
3283 adapter->link_speed = 0;
3284
3285
3286 if (!netif_carrier_ok(netdev))
3287 return;
3288
3289 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3290
3291 netif_carrier_off(netdev);
3292}
3293
3294
3295
3296
3297
3298static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3299{
3300
3301 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3302 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3303 return;
3304
3305 ixgbevf_watchdog_update_link(adapter);
3306
3307 if (adapter->link_up)
3308 ixgbevf_watchdog_link_is_up(adapter);
3309 else
3310 ixgbevf_watchdog_link_is_down(adapter);
3311
3312 ixgbevf_update_stats(adapter);
3313}
3314
3315
3316
3317
3318
3319static void ixgbevf_service_task(struct work_struct *work)
3320{
3321 struct ixgbevf_adapter *adapter = container_of(work,
3322 struct ixgbevf_adapter,
3323 service_task);
3324 struct ixgbe_hw *hw = &adapter->hw;
3325
3326 if (IXGBE_REMOVED(hw->hw_addr)) {
3327 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3328 rtnl_lock();
3329 ixgbevf_down(adapter);
3330 rtnl_unlock();
3331 }
3332 return;
3333 }
3334
3335 ixgbevf_queue_reset_subtask(adapter);
3336 ixgbevf_reset_subtask(adapter);
3337 ixgbevf_watchdog_subtask(adapter);
3338 ixgbevf_check_hang_subtask(adapter);
3339
3340 ixgbevf_service_event_complete(adapter);
3341}
3342
3343
3344
3345
3346
3347
3348
3349void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3350{
3351 ixgbevf_clean_tx_ring(tx_ring);
3352
3353 vfree(tx_ring->tx_buffer_info);
3354 tx_ring->tx_buffer_info = NULL;
3355
3356
3357 if (!tx_ring->desc)
3358 return;
3359
3360 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3361 tx_ring->dma);
3362
3363 tx_ring->desc = NULL;
3364}
3365
3366
3367
3368
3369
3370
3371
3372static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3373{
3374 int i;
3375
3376 for (i = 0; i < adapter->num_tx_queues; i++)
3377 if (adapter->tx_ring[i]->desc)
3378 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3379 for (i = 0; i < adapter->num_xdp_queues; i++)
3380 if (adapter->xdp_ring[i]->desc)
3381 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3382}
3383
3384
3385
3386
3387
3388
3389
3390int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3391{
3392 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3393 int size;
3394
3395 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3396 tx_ring->tx_buffer_info = vmalloc(size);
3397 if (!tx_ring->tx_buffer_info)
3398 goto err;
3399
3400 u64_stats_init(&tx_ring->syncp);
3401
3402
3403 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3404 tx_ring->size = ALIGN(tx_ring->size, 4096);
3405
3406 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3407 &tx_ring->dma, GFP_KERNEL);
3408 if (!tx_ring->desc)
3409 goto err;
3410
3411 return 0;
3412
3413err:
3414 vfree(tx_ring->tx_buffer_info);
3415 tx_ring->tx_buffer_info = NULL;
3416 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3417 return -ENOMEM;
3418}
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3431{
3432 int i, j = 0, err = 0;
3433
3434 for (i = 0; i < adapter->num_tx_queues; i++) {
3435 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3436 if (!err)
3437 continue;
3438 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3439 goto err_setup_tx;
3440 }
3441
3442 for (j = 0; j < adapter->num_xdp_queues; j++) {
3443 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3444 if (!err)
3445 continue;
3446 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3447 goto err_setup_tx;
3448 }
3449
3450 return 0;
3451err_setup_tx:
3452
3453 while (j--)
3454 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3455 while (i--)
3456 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3457
3458 return err;
3459}
3460
3461
3462
3463
3464
3465
3466
3467
3468int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3469 struct ixgbevf_ring *rx_ring)
3470{
3471 int size;
3472
3473 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3474 rx_ring->rx_buffer_info = vmalloc(size);
3475 if (!rx_ring->rx_buffer_info)
3476 goto err;
3477
3478 u64_stats_init(&rx_ring->syncp);
3479
3480
3481 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3482 rx_ring->size = ALIGN(rx_ring->size, 4096);
3483
3484 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3485 &rx_ring->dma, GFP_KERNEL);
3486
3487 if (!rx_ring->desc)
3488 goto err;
3489
3490
3491 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3492 rx_ring->queue_index, 0) < 0)
3493 goto err;
3494
3495 rx_ring->xdp_prog = adapter->xdp_prog;
3496
3497 return 0;
3498err:
3499 vfree(rx_ring->rx_buffer_info);
3500 rx_ring->rx_buffer_info = NULL;
3501 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3502 return -ENOMEM;
3503}
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3516{
3517 int i, err = 0;
3518
3519 for (i = 0; i < adapter->num_rx_queues; i++) {
3520 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3521 if (!err)
3522 continue;
3523 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3524 goto err_setup_rx;
3525 }
3526
3527 return 0;
3528err_setup_rx:
3529
3530 while (i--)
3531 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3532 return err;
3533}
3534
3535
3536
3537
3538
3539
3540
3541void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3542{
3543 ixgbevf_clean_rx_ring(rx_ring);
3544
3545 rx_ring->xdp_prog = NULL;
3546 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3547 vfree(rx_ring->rx_buffer_info);
3548 rx_ring->rx_buffer_info = NULL;
3549
3550 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3551 rx_ring->dma);
3552
3553 rx_ring->desc = NULL;
3554}
3555
3556
3557
3558
3559
3560
3561
3562static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3563{
3564 int i;
3565
3566 for (i = 0; i < adapter->num_rx_queues; i++)
3567 if (adapter->rx_ring[i]->desc)
3568 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3569}
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583int ixgbevf_open(struct net_device *netdev)
3584{
3585 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3586 struct ixgbe_hw *hw = &adapter->hw;
3587 int err;
3588
3589
3590
3591
3592
3593
3594
3595 if (!adapter->num_msix_vectors)
3596 return -ENOMEM;
3597
3598 if (hw->adapter_stopped) {
3599 ixgbevf_reset(adapter);
3600
3601
3602
3603 if (hw->adapter_stopped) {
3604 err = IXGBE_ERR_MBX;
3605 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3606 goto err_setup_reset;
3607 }
3608 }
3609
3610
3611 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3612 return -EBUSY;
3613
3614 netif_carrier_off(netdev);
3615
3616
3617 err = ixgbevf_setup_all_tx_resources(adapter);
3618 if (err)
3619 goto err_setup_tx;
3620
3621
3622 err = ixgbevf_setup_all_rx_resources(adapter);
3623 if (err)
3624 goto err_setup_rx;
3625
3626 ixgbevf_configure(adapter);
3627
3628 err = ixgbevf_request_irq(adapter);
3629 if (err)
3630 goto err_req_irq;
3631
3632
3633 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3634 if (err)
3635 goto err_set_queues;
3636
3637 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3638 if (err)
3639 goto err_set_queues;
3640
3641 ixgbevf_up_complete(adapter);
3642
3643 return 0;
3644
3645err_set_queues:
3646 ixgbevf_free_irq(adapter);
3647err_req_irq:
3648 ixgbevf_free_all_rx_resources(adapter);
3649err_setup_rx:
3650 ixgbevf_free_all_tx_resources(adapter);
3651err_setup_tx:
3652 ixgbevf_reset(adapter);
3653err_setup_reset:
3654
3655 return err;
3656}
3657
3658
3659
3660
3661
3662
3663
3664
3665static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3666{
3667 ixgbevf_down(adapter);
3668 ixgbevf_free_irq(adapter);
3669 ixgbevf_free_all_tx_resources(adapter);
3670 ixgbevf_free_all_rx_resources(adapter);
3671}
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684int ixgbevf_close(struct net_device *netdev)
3685{
3686 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3687
3688 if (netif_device_present(netdev))
3689 ixgbevf_close_suspend(adapter);
3690
3691 return 0;
3692}
3693
3694static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3695{
3696 struct net_device *dev = adapter->netdev;
3697
3698 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3699 &adapter->state))
3700 return;
3701
3702
3703 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3704 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3705 return;
3706
3707
3708
3709
3710
3711 rtnl_lock();
3712
3713 if (netif_running(dev))
3714 ixgbevf_close(dev);
3715
3716 ixgbevf_clear_interrupt_scheme(adapter);
3717 ixgbevf_init_interrupt_scheme(adapter);
3718
3719 if (netif_running(dev))
3720 ixgbevf_open(dev);
3721
3722 rtnl_unlock();
3723}
3724
3725static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3726 u32 vlan_macip_lens, u32 fceof_saidx,
3727 u32 type_tucmd, u32 mss_l4len_idx)
3728{
3729 struct ixgbe_adv_tx_context_desc *context_desc;
3730 u16 i = tx_ring->next_to_use;
3731
3732 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3733
3734 i++;
3735 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3736
3737
3738 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3739
3740 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3741 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
3742 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3743 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3744}
3745
3746static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3747 struct ixgbevf_tx_buffer *first,
3748 u8 *hdr_len,
3749 struct ixgbevf_ipsec_tx_data *itd)
3750{
3751 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3752 struct sk_buff *skb = first->skb;
3753 union {
3754 struct iphdr *v4;
3755 struct ipv6hdr *v6;
3756 unsigned char *hdr;
3757 } ip;
3758 union {
3759 struct tcphdr *tcp;
3760 unsigned char *hdr;
3761 } l4;
3762 u32 paylen, l4_offset;
3763 u32 fceof_saidx = 0;
3764 int err;
3765
3766 if (skb->ip_summed != CHECKSUM_PARTIAL)
3767 return 0;
3768
3769 if (!skb_is_gso(skb))
3770 return 0;
3771
3772 err = skb_cow_head(skb, 0);
3773 if (err < 0)
3774 return err;
3775
3776 if (eth_p_mpls(first->protocol))
3777 ip.hdr = skb_inner_network_header(skb);
3778 else
3779 ip.hdr = skb_network_header(skb);
3780 l4.hdr = skb_checksum_start(skb);
3781
3782
3783 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3784
3785
3786 if (ip.v4->version == 4) {
3787 unsigned char *csum_start = skb_checksum_start(skb);
3788 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3789 int len = csum_start - trans_start;
3790
3791
3792
3793
3794
3795 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3796 csum_fold(csum_partial(trans_start,
3797 len, 0)) : 0;
3798 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3799
3800 ip.v4->tot_len = 0;
3801 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3802 IXGBE_TX_FLAGS_CSUM |
3803 IXGBE_TX_FLAGS_IPV4;
3804 } else {
3805 ip.v6->payload_len = 0;
3806 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3807 IXGBE_TX_FLAGS_CSUM;
3808 }
3809
3810
3811 l4_offset = l4.hdr - skb->data;
3812
3813
3814 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3815
3816
3817 paylen = skb->len - l4_offset;
3818 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3819
3820
3821 first->gso_segs = skb_shinfo(skb)->gso_segs;
3822 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3823
3824
3825 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3826 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3827 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3828
3829 fceof_saidx |= itd->pfsa;
3830 type_tucmd |= itd->flags | itd->trailer_len;
3831
3832
3833 vlan_macip_lens = l4.hdr - ip.hdr;
3834 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3835 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3836
3837 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3838 mss_l4len_idx);
3839
3840 return 1;
3841}
3842
3843static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3844 struct ixgbevf_tx_buffer *first,
3845 struct ixgbevf_ipsec_tx_data *itd)
3846{
3847 struct sk_buff *skb = first->skb;
3848 u32 vlan_macip_lens = 0;
3849 u32 fceof_saidx = 0;
3850 u32 type_tucmd = 0;
3851
3852 if (skb->ip_summed != CHECKSUM_PARTIAL)
3853 goto no_csum;
3854
3855 switch (skb->csum_offset) {
3856 case offsetof(struct tcphdr, check):
3857 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3858 fallthrough;
3859 case offsetof(struct udphdr, check):
3860 break;
3861 case offsetof(struct sctphdr, checksum):
3862
3863 if (skb_csum_is_sctp(skb)) {
3864 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3865 break;
3866 }
3867 fallthrough;
3868 default:
3869 skb_checksum_help(skb);
3870 goto no_csum;
3871 }
3872
3873 if (first->protocol == htons(ETH_P_IP))
3874 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3875
3876
3877 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3878 vlan_macip_lens = skb_checksum_start_offset(skb) -
3879 skb_network_offset(skb);
3880no_csum:
3881
3882 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3883 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3884
3885 fceof_saidx |= itd->pfsa;
3886 type_tucmd |= itd->flags | itd->trailer_len;
3887
3888 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3889 fceof_saidx, type_tucmd, 0);
3890}
3891
3892static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3893{
3894
3895 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3896 IXGBE_ADVTXD_DCMD_IFCS |
3897 IXGBE_ADVTXD_DCMD_DEXT);
3898
3899
3900 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3901 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3902
3903
3904 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3905 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3906
3907 return cmd_type;
3908}
3909
3910static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3911 u32 tx_flags, unsigned int paylen)
3912{
3913 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3914
3915
3916 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3917 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3918
3919
3920 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3921 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3922
3923
3924 if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3925 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3926
3927
3928 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
3929 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3930
3931
3932
3933
3934 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3935
3936 tx_desc->read.olinfo_status = olinfo_status;
3937}
3938
3939static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3940 struct ixgbevf_tx_buffer *first,
3941 const u8 hdr_len)
3942{
3943 struct sk_buff *skb = first->skb;
3944 struct ixgbevf_tx_buffer *tx_buffer;
3945 union ixgbe_adv_tx_desc *tx_desc;
3946 skb_frag_t *frag;
3947 dma_addr_t dma;
3948 unsigned int data_len, size;
3949 u32 tx_flags = first->tx_flags;
3950 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3951 u16 i = tx_ring->next_to_use;
3952
3953 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3954
3955 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3956
3957 size = skb_headlen(skb);
3958 data_len = skb->data_len;
3959
3960 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3961
3962 tx_buffer = first;
3963
3964 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3965 if (dma_mapping_error(tx_ring->dev, dma))
3966 goto dma_error;
3967
3968
3969 dma_unmap_len_set(tx_buffer, len, size);
3970 dma_unmap_addr_set(tx_buffer, dma, dma);
3971
3972 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3973
3974 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3975 tx_desc->read.cmd_type_len =
3976 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3977
3978 i++;
3979 tx_desc++;
3980 if (i == tx_ring->count) {
3981 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3982 i = 0;
3983 }
3984 tx_desc->read.olinfo_status = 0;
3985
3986 dma += IXGBE_MAX_DATA_PER_TXD;
3987 size -= IXGBE_MAX_DATA_PER_TXD;
3988
3989 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3990 }
3991
3992 if (likely(!data_len))
3993 break;
3994
3995 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3996
3997 i++;
3998 tx_desc++;
3999 if (i == tx_ring->count) {
4000 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4001 i = 0;
4002 }
4003 tx_desc->read.olinfo_status = 0;
4004
4005 size = skb_frag_size(frag);
4006 data_len -= size;
4007
4008 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
4009 DMA_TO_DEVICE);
4010
4011 tx_buffer = &tx_ring->tx_buffer_info[i];
4012 }
4013
4014
4015 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
4016 tx_desc->read.cmd_type_len = cmd_type;
4017
4018
4019 first->time_stamp = jiffies;
4020
4021 skb_tx_timestamp(skb);
4022
4023
4024
4025
4026
4027
4028
4029
4030 wmb();
4031
4032
4033 first->next_to_watch = tx_desc;
4034
4035 i++;
4036 if (i == tx_ring->count)
4037 i = 0;
4038
4039 tx_ring->next_to_use = i;
4040
4041
4042 ixgbevf_write_tail(tx_ring, i);
4043
4044 return;
4045dma_error:
4046 dev_err(tx_ring->dev, "TX DMA map failed\n");
4047 tx_buffer = &tx_ring->tx_buffer_info[i];
4048
4049
4050 while (tx_buffer != first) {
4051 if (dma_unmap_len(tx_buffer, len))
4052 dma_unmap_page(tx_ring->dev,
4053 dma_unmap_addr(tx_buffer, dma),
4054 dma_unmap_len(tx_buffer, len),
4055 DMA_TO_DEVICE);
4056 dma_unmap_len_set(tx_buffer, len, 0);
4057
4058 if (i-- == 0)
4059 i += tx_ring->count;
4060 tx_buffer = &tx_ring->tx_buffer_info[i];
4061 }
4062
4063 if (dma_unmap_len(tx_buffer, len))
4064 dma_unmap_single(tx_ring->dev,
4065 dma_unmap_addr(tx_buffer, dma),
4066 dma_unmap_len(tx_buffer, len),
4067 DMA_TO_DEVICE);
4068 dma_unmap_len_set(tx_buffer, len, 0);
4069
4070 dev_kfree_skb_any(tx_buffer->skb);
4071 tx_buffer->skb = NULL;
4072
4073 tx_ring->next_to_use = i;
4074}
4075
4076static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4077{
4078 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4079
4080
4081
4082
4083 smp_mb();
4084
4085
4086
4087
4088 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4089 return -EBUSY;
4090
4091
4092 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4093 ++tx_ring->tx_stats.restart_queue;
4094
4095 return 0;
4096}
4097
4098static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4099{
4100 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4101 return 0;
4102 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4103}
4104
4105static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4106 struct ixgbevf_ring *tx_ring)
4107{
4108 struct ixgbevf_tx_buffer *first;
4109 int tso;
4110 u32 tx_flags = 0;
4111 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4112 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
4113#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4114 unsigned short f;
4115#endif
4116 u8 hdr_len = 0;
4117 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4118
4119 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4120 dev_kfree_skb_any(skb);
4121 return NETDEV_TX_OK;
4122 }
4123
4124
4125
4126
4127
4128
4129
4130#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4131 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
4132 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
4133
4134 count += TXD_USE_COUNT(skb_frag_size(frag));
4135 }
4136#else
4137 count += skb_shinfo(skb)->nr_frags;
4138#endif
4139 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4140 tx_ring->tx_stats.tx_busy++;
4141 return NETDEV_TX_BUSY;
4142 }
4143
4144
4145 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4146 first->skb = skb;
4147 first->bytecount = skb->len;
4148 first->gso_segs = 1;
4149
4150 if (skb_vlan_tag_present(skb)) {
4151 tx_flags |= skb_vlan_tag_get(skb);
4152 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4153 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4154 }
4155
4156
4157 first->tx_flags = tx_flags;
4158 first->protocol = vlan_get_protocol(skb);
4159
4160#ifdef CONFIG_IXGBEVF_IPSEC
4161 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4162 goto out_drop;
4163#endif
4164 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
4165 if (tso < 0)
4166 goto out_drop;
4167 else if (!tso)
4168 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4169
4170 ixgbevf_tx_map(tx_ring, first, hdr_len);
4171
4172 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4173
4174 return NETDEV_TX_OK;
4175
4176out_drop:
4177 dev_kfree_skb_any(first->skb);
4178 first->skb = NULL;
4179
4180 return NETDEV_TX_OK;
4181}
4182
4183static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4184{
4185 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4186 struct ixgbevf_ring *tx_ring;
4187
4188 if (skb->len <= 0) {
4189 dev_kfree_skb_any(skb);
4190 return NETDEV_TX_OK;
4191 }
4192
4193
4194
4195
4196 if (skb->len < 17) {
4197 if (skb_padto(skb, 17))
4198 return NETDEV_TX_OK;
4199 skb->len = 17;
4200 }
4201
4202 tx_ring = adapter->tx_ring[skb->queue_mapping];
4203 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4204}
4205
4206
4207
4208
4209
4210
4211
4212
4213static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4214{
4215 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4216 struct ixgbe_hw *hw = &adapter->hw;
4217 struct sockaddr *addr = p;
4218 int err;
4219
4220 if (!is_valid_ether_addr(addr->sa_data))
4221 return -EADDRNOTAVAIL;
4222
4223 spin_lock_bh(&adapter->mbx_lock);
4224
4225 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4226
4227 spin_unlock_bh(&adapter->mbx_lock);
4228
4229 if (err)
4230 return -EPERM;
4231
4232 ether_addr_copy(hw->mac.addr, addr->sa_data);
4233 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4234 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4235
4236 return 0;
4237}
4238
4239
4240
4241
4242
4243
4244
4245
4246static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4247{
4248 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4249 struct ixgbe_hw *hw = &adapter->hw;
4250 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4251 int ret;
4252
4253
4254 if (adapter->xdp_prog) {
4255 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4256 return -EPERM;
4257 }
4258
4259 spin_lock_bh(&adapter->mbx_lock);
4260
4261 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4262 spin_unlock_bh(&adapter->mbx_lock);
4263 if (ret)
4264 return -EINVAL;
4265
4266 hw_dbg(hw, "changing MTU from %d to %d\n",
4267 netdev->mtu, new_mtu);
4268
4269
4270 netdev->mtu = new_mtu;
4271
4272 if (netif_running(netdev))
4273 ixgbevf_reinit_locked(adapter);
4274
4275 return 0;
4276}
4277
4278static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
4279{
4280 struct net_device *netdev = dev_get_drvdata(dev_d);
4281 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4282
4283 rtnl_lock();
4284 netif_device_detach(netdev);
4285
4286 if (netif_running(netdev))
4287 ixgbevf_close_suspend(adapter);
4288
4289 ixgbevf_clear_interrupt_scheme(adapter);
4290 rtnl_unlock();
4291
4292 return 0;
4293}
4294
4295static int __maybe_unused ixgbevf_resume(struct device *dev_d)
4296{
4297 struct pci_dev *pdev = to_pci_dev(dev_d);
4298 struct net_device *netdev = pci_get_drvdata(pdev);
4299 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4300 u32 err;
4301
4302 adapter->hw.hw_addr = adapter->io_addr;
4303 smp_mb__before_atomic();
4304 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4305 pci_set_master(pdev);
4306
4307 ixgbevf_reset(adapter);
4308
4309 rtnl_lock();
4310 err = ixgbevf_init_interrupt_scheme(adapter);
4311 if (!err && netif_running(netdev))
4312 err = ixgbevf_open(netdev);
4313 rtnl_unlock();
4314 if (err)
4315 return err;
4316
4317 netif_device_attach(netdev);
4318
4319 return err;
4320}
4321
4322static void ixgbevf_shutdown(struct pci_dev *pdev)
4323{
4324 ixgbevf_suspend(&pdev->dev);
4325}
4326
4327static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4328 const struct ixgbevf_ring *ring)
4329{
4330 u64 bytes, packets;
4331 unsigned int start;
4332
4333 if (ring) {
4334 do {
4335 start = u64_stats_fetch_begin_irq(&ring->syncp);
4336 bytes = ring->stats.bytes;
4337 packets = ring->stats.packets;
4338 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4339 stats->tx_bytes += bytes;
4340 stats->tx_packets += packets;
4341 }
4342}
4343
4344static void ixgbevf_get_stats(struct net_device *netdev,
4345 struct rtnl_link_stats64 *stats)
4346{
4347 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4348 unsigned int start;
4349 u64 bytes, packets;
4350 const struct ixgbevf_ring *ring;
4351 int i;
4352
4353 ixgbevf_update_stats(adapter);
4354
4355 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4356
4357 rcu_read_lock();
4358 for (i = 0; i < adapter->num_rx_queues; i++) {
4359 ring = adapter->rx_ring[i];
4360 do {
4361 start = u64_stats_fetch_begin_irq(&ring->syncp);
4362 bytes = ring->stats.bytes;
4363 packets = ring->stats.packets;
4364 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4365 stats->rx_bytes += bytes;
4366 stats->rx_packets += packets;
4367 }
4368
4369 for (i = 0; i < adapter->num_tx_queues; i++) {
4370 ring = adapter->tx_ring[i];
4371 ixgbevf_get_tx_ring_stats(stats, ring);
4372 }
4373
4374 for (i = 0; i < adapter->num_xdp_queues; i++) {
4375 ring = adapter->xdp_ring[i];
4376 ixgbevf_get_tx_ring_stats(stats, ring);
4377 }
4378 rcu_read_unlock();
4379}
4380
4381#define IXGBEVF_MAX_MAC_HDR_LEN 127
4382#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4383
4384static netdev_features_t
4385ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4386 netdev_features_t features)
4387{
4388 unsigned int network_hdr_len, mac_hdr_len;
4389
4390
4391 mac_hdr_len = skb_network_header(skb) - skb->data;
4392 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4393 return features & ~(NETIF_F_HW_CSUM |
4394 NETIF_F_SCTP_CRC |
4395 NETIF_F_HW_VLAN_CTAG_TX |
4396 NETIF_F_TSO |
4397 NETIF_F_TSO6);
4398
4399 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4400 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4401 return features & ~(NETIF_F_HW_CSUM |
4402 NETIF_F_SCTP_CRC |
4403 NETIF_F_TSO |
4404 NETIF_F_TSO6);
4405
4406
4407
4408
4409 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4410 features &= ~NETIF_F_TSO;
4411
4412 return features;
4413}
4414
4415static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4416{
4417 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4418 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4419 struct bpf_prog *old_prog;
4420
4421
4422 for (i = 0; i < adapter->num_rx_queues; i++) {
4423 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4424
4425 if (frame_size > ixgbevf_rx_bufsz(ring))
4426 return -EINVAL;
4427 }
4428
4429 old_prog = xchg(&adapter->xdp_prog, prog);
4430
4431
4432 if (!!prog != !!old_prog) {
4433
4434
4435
4436
4437 if (netif_running(dev))
4438 ixgbevf_close(dev);
4439
4440 ixgbevf_clear_interrupt_scheme(adapter);
4441 ixgbevf_init_interrupt_scheme(adapter);
4442
4443 if (netif_running(dev))
4444 ixgbevf_open(dev);
4445 } else {
4446 for (i = 0; i < adapter->num_rx_queues; i++)
4447 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4448 }
4449
4450 if (old_prog)
4451 bpf_prog_put(old_prog);
4452
4453 return 0;
4454}
4455
4456static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4457{
4458 switch (xdp->command) {
4459 case XDP_SETUP_PROG:
4460 return ixgbevf_xdp_setup(dev, xdp->prog);
4461 default:
4462 return -EINVAL;
4463 }
4464}
4465
4466static const struct net_device_ops ixgbevf_netdev_ops = {
4467 .ndo_open = ixgbevf_open,
4468 .ndo_stop = ixgbevf_close,
4469 .ndo_start_xmit = ixgbevf_xmit_frame,
4470 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4471 .ndo_get_stats64 = ixgbevf_get_stats,
4472 .ndo_validate_addr = eth_validate_addr,
4473 .ndo_set_mac_address = ixgbevf_set_mac,
4474 .ndo_change_mtu = ixgbevf_change_mtu,
4475 .ndo_tx_timeout = ixgbevf_tx_timeout,
4476 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4477 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4478 .ndo_features_check = ixgbevf_features_check,
4479 .ndo_bpf = ixgbevf_xdp,
4480};
4481
4482static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4483{
4484 dev->netdev_ops = &ixgbevf_netdev_ops;
4485 ixgbevf_set_ethtool_ops(dev);
4486 dev->watchdog_timeo = 5 * HZ;
4487}
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4501{
4502 struct net_device *netdev;
4503 struct ixgbevf_adapter *adapter = NULL;
4504 struct ixgbe_hw *hw = NULL;
4505 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4506 int err, pci_using_dac;
4507 bool disable_dev = false;
4508
4509 err = pci_enable_device(pdev);
4510 if (err)
4511 return err;
4512
4513 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4514 pci_using_dac = 1;
4515 } else {
4516 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4517 if (err) {
4518 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4519 goto err_dma;
4520 }
4521 pci_using_dac = 0;
4522 }
4523
4524 err = pci_request_regions(pdev, ixgbevf_driver_name);
4525 if (err) {
4526 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4527 goto err_pci_reg;
4528 }
4529
4530 pci_set_master(pdev);
4531
4532 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4533 MAX_TX_QUEUES);
4534 if (!netdev) {
4535 err = -ENOMEM;
4536 goto err_alloc_etherdev;
4537 }
4538
4539 SET_NETDEV_DEV(netdev, &pdev->dev);
4540
4541 adapter = netdev_priv(netdev);
4542
4543 adapter->netdev = netdev;
4544 adapter->pdev = pdev;
4545 hw = &adapter->hw;
4546 hw->back = adapter;
4547 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4548
4549
4550
4551
4552 pci_save_state(pdev);
4553
4554 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4555 pci_resource_len(pdev, 0));
4556 adapter->io_addr = hw->hw_addr;
4557 if (!hw->hw_addr) {
4558 err = -EIO;
4559 goto err_ioremap;
4560 }
4561
4562 ixgbevf_assign_netdev_ops(netdev);
4563
4564
4565 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4566 hw->mac.type = ii->mac;
4567
4568 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4569 sizeof(struct ixgbe_mbx_operations));
4570
4571
4572 err = ixgbevf_sw_init(adapter);
4573 if (err)
4574 goto err_sw_init;
4575
4576
4577 if (!is_valid_ether_addr(netdev->dev_addr)) {
4578 pr_err("invalid MAC address\n");
4579 err = -EIO;
4580 goto err_sw_init;
4581 }
4582
4583 netdev->hw_features = NETIF_F_SG |
4584 NETIF_F_TSO |
4585 NETIF_F_TSO6 |
4586 NETIF_F_RXCSUM |
4587 NETIF_F_HW_CSUM |
4588 NETIF_F_SCTP_CRC;
4589
4590#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4591 NETIF_F_GSO_GRE_CSUM | \
4592 NETIF_F_GSO_IPXIP4 | \
4593 NETIF_F_GSO_IPXIP6 | \
4594 NETIF_F_GSO_UDP_TUNNEL | \
4595 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4596
4597 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4598 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4599 IXGBEVF_GSO_PARTIAL_FEATURES;
4600
4601 netdev->features = netdev->hw_features;
4602
4603 if (pci_using_dac)
4604 netdev->features |= NETIF_F_HIGHDMA;
4605
4606 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4607 netdev->mpls_features |= NETIF_F_SG |
4608 NETIF_F_TSO |
4609 NETIF_F_TSO6 |
4610 NETIF_F_HW_CSUM;
4611 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4612 netdev->hw_enc_features |= netdev->vlan_features;
4613
4614
4615 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4616 NETIF_F_HW_VLAN_CTAG_RX |
4617 NETIF_F_HW_VLAN_CTAG_TX;
4618
4619 netdev->priv_flags |= IFF_UNICAST_FLT;
4620
4621
4622 netdev->min_mtu = ETH_MIN_MTU;
4623 switch (adapter->hw.api_version) {
4624 case ixgbe_mbox_api_11:
4625 case ixgbe_mbox_api_12:
4626 case ixgbe_mbox_api_13:
4627 case ixgbe_mbox_api_14:
4628 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4629 (ETH_HLEN + ETH_FCS_LEN);
4630 break;
4631 default:
4632 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4633 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4634 (ETH_HLEN + ETH_FCS_LEN);
4635 else
4636 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4637 break;
4638 }
4639
4640 if (IXGBE_REMOVED(hw->hw_addr)) {
4641 err = -EIO;
4642 goto err_sw_init;
4643 }
4644
4645 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4646
4647 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4648 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4649 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4650
4651 err = ixgbevf_init_interrupt_scheme(adapter);
4652 if (err)
4653 goto err_sw_init;
4654
4655 strcpy(netdev->name, "eth%d");
4656
4657 err = register_netdev(netdev);
4658 if (err)
4659 goto err_register;
4660
4661 pci_set_drvdata(pdev, netdev);
4662 netif_carrier_off(netdev);
4663 ixgbevf_init_ipsec_offload(adapter);
4664
4665 ixgbevf_init_last_counter_stats(adapter);
4666
4667
4668 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4669 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4670
4671 switch (hw->mac.type) {
4672 case ixgbe_mac_X550_vf:
4673 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4674 break;
4675 case ixgbe_mac_X540_vf:
4676 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4677 break;
4678 case ixgbe_mac_82599_vf:
4679 default:
4680 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4681 break;
4682 }
4683
4684 return 0;
4685
4686err_register:
4687 ixgbevf_clear_interrupt_scheme(adapter);
4688err_sw_init:
4689 ixgbevf_reset_interrupt_capability(adapter);
4690 iounmap(adapter->io_addr);
4691 kfree(adapter->rss_key);
4692err_ioremap:
4693 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4694 free_netdev(netdev);
4695err_alloc_etherdev:
4696 pci_release_regions(pdev);
4697err_pci_reg:
4698err_dma:
4699 if (!adapter || disable_dev)
4700 pci_disable_device(pdev);
4701 return err;
4702}
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713static void ixgbevf_remove(struct pci_dev *pdev)
4714{
4715 struct net_device *netdev = pci_get_drvdata(pdev);
4716 struct ixgbevf_adapter *adapter;
4717 bool disable_dev;
4718
4719 if (!netdev)
4720 return;
4721
4722 adapter = netdev_priv(netdev);
4723
4724 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4725 cancel_work_sync(&adapter->service_task);
4726
4727 if (netdev->reg_state == NETREG_REGISTERED)
4728 unregister_netdev(netdev);
4729
4730 ixgbevf_stop_ipsec_offload(adapter);
4731 ixgbevf_clear_interrupt_scheme(adapter);
4732 ixgbevf_reset_interrupt_capability(adapter);
4733
4734 iounmap(adapter->io_addr);
4735 pci_release_regions(pdev);
4736
4737 hw_dbg(&adapter->hw, "Remove complete\n");
4738
4739 kfree(adapter->rss_key);
4740 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4741 free_netdev(netdev);
4742
4743 if (disable_dev)
4744 pci_disable_device(pdev);
4745}
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4756 pci_channel_state_t state)
4757{
4758 struct net_device *netdev = pci_get_drvdata(pdev);
4759 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4760
4761 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4762 return PCI_ERS_RESULT_DISCONNECT;
4763
4764 rtnl_lock();
4765 netif_device_detach(netdev);
4766
4767 if (netif_running(netdev))
4768 ixgbevf_close_suspend(adapter);
4769
4770 if (state == pci_channel_io_perm_failure) {
4771 rtnl_unlock();
4772 return PCI_ERS_RESULT_DISCONNECT;
4773 }
4774
4775 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4776 pci_disable_device(pdev);
4777 rtnl_unlock();
4778
4779
4780 return PCI_ERS_RESULT_NEED_RESET;
4781}
4782
4783
4784
4785
4786
4787
4788
4789
4790static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4791{
4792 struct net_device *netdev = pci_get_drvdata(pdev);
4793 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4794
4795 if (pci_enable_device_mem(pdev)) {
4796 dev_err(&pdev->dev,
4797 "Cannot re-enable PCI device after reset.\n");
4798 return PCI_ERS_RESULT_DISCONNECT;
4799 }
4800
4801 adapter->hw.hw_addr = adapter->io_addr;
4802 smp_mb__before_atomic();
4803 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4804 pci_set_master(pdev);
4805
4806 ixgbevf_reset(adapter);
4807
4808 return PCI_ERS_RESULT_RECOVERED;
4809}
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819static void ixgbevf_io_resume(struct pci_dev *pdev)
4820{
4821 struct net_device *netdev = pci_get_drvdata(pdev);
4822
4823 rtnl_lock();
4824 if (netif_running(netdev))
4825 ixgbevf_open(netdev);
4826
4827 netif_device_attach(netdev);
4828 rtnl_unlock();
4829}
4830
4831
4832static const struct pci_error_handlers ixgbevf_err_handler = {
4833 .error_detected = ixgbevf_io_error_detected,
4834 .slot_reset = ixgbevf_io_slot_reset,
4835 .resume = ixgbevf_io_resume,
4836};
4837
4838static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
4839
4840static struct pci_driver ixgbevf_driver = {
4841 .name = ixgbevf_driver_name,
4842 .id_table = ixgbevf_pci_tbl,
4843 .probe = ixgbevf_probe,
4844 .remove = ixgbevf_remove,
4845
4846
4847 .driver.pm = &ixgbevf_pm_ops,
4848
4849 .shutdown = ixgbevf_shutdown,
4850 .err_handler = &ixgbevf_err_handler
4851};
4852
4853
4854
4855
4856
4857
4858
4859static int __init ixgbevf_init_module(void)
4860{
4861 pr_info("%s\n", ixgbevf_driver_string);
4862 pr_info("%s\n", ixgbevf_copyright);
4863 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4864 if (!ixgbevf_wq) {
4865 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4866 return -ENOMEM;
4867 }
4868
4869 return pci_register_driver(&ixgbevf_driver);
4870}
4871
4872module_init(ixgbevf_init_module);
4873
4874
4875
4876
4877
4878
4879
4880static void __exit ixgbevf_exit_module(void)
4881{
4882 pci_unregister_driver(&ixgbevf_driver);
4883 if (ixgbevf_wq) {
4884 destroy_workqueue(ixgbevf_wq);
4885 ixgbevf_wq = NULL;
4886 }
4887}
4888
4889#ifdef DEBUG
4890
4891
4892
4893
4894
4895char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4896{
4897 struct ixgbevf_adapter *adapter = hw->back;
4898
4899 return adapter->netdev->name;
4900}
4901
4902#endif
4903module_exit(ixgbevf_exit_module);
4904
4905
4906