1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/types.h>
11#include <linux/bitops.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/vmalloc.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/ip.h>
19#include <linux/tcp.h>
20#include <linux/sctp.h>
21#include <linux/ipv6.h>
22#include <linux/slab.h>
23#include <net/checksum.h>
24#include <net/ip6_checksum.h>
25#include <linux/ethtool.h>
26#include <linux/if.h>
27#include <linux/if_vlan.h>
28#include <linux/prefetch.h>
29#include <net/mpls.h>
30#include <linux/bpf.h>
31#include <linux/bpf_trace.h>
32#include <linux/atomic.h>
33
34#include "ixgbevf.h"
35
36const char ixgbevf_driver_name[] = "ixgbevf";
37static const char ixgbevf_driver_string[] =
38 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
39
40#define DRV_VERSION "4.1.0-k"
41const char ixgbevf_driver_version[] = DRV_VERSION;
42static char ixgbevf_copyright[] =
43 "Copyright (c) 2009 - 2018 Intel Corporation.";
44
45static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
46 [board_82599_vf] = &ixgbevf_82599_vf_info,
47 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
48 [board_X540_vf] = &ixgbevf_X540_vf_info,
49 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
50 [board_X550_vf] = &ixgbevf_X550_vf_info,
51 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
52 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
53 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
54 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
55};
56
57
58
59
60
61
62
63
64
65static const struct pci_device_id ixgbevf_pci_tbl[] = {
66 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
75
76 {0, }
77};
78MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
79
80MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
81MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
82MODULE_LICENSE("GPL v2");
83MODULE_VERSION(DRV_VERSION);
84
85#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
86static int debug = -1;
87module_param(debug, int, 0);
88MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
89
90static struct workqueue_struct *ixgbevf_wq;
91
92static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
93{
94 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
95 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
96 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
97 queue_work(ixgbevf_wq, &adapter->service_task);
98}
99
100static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
101{
102 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
103
104
105 smp_mb__before_atomic();
106 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
107}
108
109
110static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
111static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
112static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
113static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
114static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
115 struct ixgbevf_rx_buffer *old_buff);
116
117static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
118{
119 struct ixgbevf_adapter *adapter = hw->back;
120
121 if (!hw->hw_addr)
122 return;
123 hw->hw_addr = NULL;
124 dev_err(&adapter->pdev->dev, "Adapter removed\n");
125 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
126 ixgbevf_service_event_schedule(adapter);
127}
128
129static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
130{
131 u32 value;
132
133
134
135
136
137
138
139 if (reg == IXGBE_VFSTATUS) {
140 ixgbevf_remove_adapter(hw);
141 return;
142 }
143 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
144 if (value == IXGBE_FAILED_READ_REG)
145 ixgbevf_remove_adapter(hw);
146}
147
148u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
149{
150 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
151 u32 value;
152
153 if (IXGBE_REMOVED(reg_addr))
154 return IXGBE_FAILED_READ_REG;
155 value = readl(reg_addr + reg);
156 if (unlikely(value == IXGBE_FAILED_READ_REG))
157 ixgbevf_check_remove(hw, reg);
158 return value;
159}
160
161
162
163
164
165
166
167
168static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
169 u8 queue, u8 msix_vector)
170{
171 u32 ivar, index;
172 struct ixgbe_hw *hw = &adapter->hw;
173
174 if (direction == -1) {
175
176 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
177 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
178 ivar &= ~0xFF;
179 ivar |= msix_vector;
180 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
181 } else {
182
183 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
184 index = ((16 * (queue & 1)) + (8 * direction));
185 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
186 ivar &= ~(0xFF << index);
187 ivar |= (msix_vector << index);
188 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
189 }
190}
191
192static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
193{
194 return ring->stats.packets;
195}
196
197static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
198{
199 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
200 struct ixgbe_hw *hw = &adapter->hw;
201
202 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
203 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
204
205 if (head != tail)
206 return (head < tail) ?
207 tail - head : (tail + ring->count - head);
208
209 return 0;
210}
211
212static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
213{
214 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
215 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
216 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
217
218 clear_check_for_tx_hang(tx_ring);
219
220
221
222
223
224
225 if ((tx_done_old == tx_done) && tx_pending) {
226
227 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
228 &tx_ring->state);
229 }
230
231 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
232
233
234 tx_ring->tx_stats.tx_done_old = tx_done;
235
236 return false;
237}
238
239static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
240{
241
242 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
243 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
244 ixgbevf_service_event_schedule(adapter);
245 }
246}
247
248
249
250
251
252static void ixgbevf_tx_timeout(struct net_device *netdev)
253{
254 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
255
256 ixgbevf_tx_timeout_reset(adapter);
257}
258
259
260
261
262
263
264
265static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
266 struct ixgbevf_ring *tx_ring, int napi_budget)
267{
268 struct ixgbevf_adapter *adapter = q_vector->adapter;
269 struct ixgbevf_tx_buffer *tx_buffer;
270 union ixgbe_adv_tx_desc *tx_desc;
271 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
272 unsigned int budget = tx_ring->count / 2;
273 unsigned int i = tx_ring->next_to_clean;
274
275 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
276 return true;
277
278 tx_buffer = &tx_ring->tx_buffer_info[i];
279 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
280 i -= tx_ring->count;
281
282 do {
283 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
284
285
286 if (!eop_desc)
287 break;
288
289
290 smp_rmb();
291
292
293 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
294 break;
295
296
297 tx_buffer->next_to_watch = NULL;
298
299
300 total_bytes += tx_buffer->bytecount;
301 total_packets += tx_buffer->gso_segs;
302 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
303 total_ipsec++;
304
305
306 if (ring_is_xdp(tx_ring))
307 page_frag_free(tx_buffer->data);
308 else
309 napi_consume_skb(tx_buffer->skb, napi_budget);
310
311
312 dma_unmap_single(tx_ring->dev,
313 dma_unmap_addr(tx_buffer, dma),
314 dma_unmap_len(tx_buffer, len),
315 DMA_TO_DEVICE);
316
317
318 dma_unmap_len_set(tx_buffer, len, 0);
319
320
321 while (tx_desc != eop_desc) {
322 tx_buffer++;
323 tx_desc++;
324 i++;
325 if (unlikely(!i)) {
326 i -= tx_ring->count;
327 tx_buffer = tx_ring->tx_buffer_info;
328 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
329 }
330
331
332 if (dma_unmap_len(tx_buffer, len)) {
333 dma_unmap_page(tx_ring->dev,
334 dma_unmap_addr(tx_buffer, dma),
335 dma_unmap_len(tx_buffer, len),
336 DMA_TO_DEVICE);
337 dma_unmap_len_set(tx_buffer, len, 0);
338 }
339 }
340
341
342 tx_buffer++;
343 tx_desc++;
344 i++;
345 if (unlikely(!i)) {
346 i -= tx_ring->count;
347 tx_buffer = tx_ring->tx_buffer_info;
348 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
349 }
350
351
352 prefetch(tx_desc);
353
354
355 budget--;
356 } while (likely(budget));
357
358 i += tx_ring->count;
359 tx_ring->next_to_clean = i;
360 u64_stats_update_begin(&tx_ring->syncp);
361 tx_ring->stats.bytes += total_bytes;
362 tx_ring->stats.packets += total_packets;
363 u64_stats_update_end(&tx_ring->syncp);
364 q_vector->tx.total_bytes += total_bytes;
365 q_vector->tx.total_packets += total_packets;
366 adapter->tx_ipsec += total_ipsec;
367
368 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
369 struct ixgbe_hw *hw = &adapter->hw;
370 union ixgbe_adv_tx_desc *eop_desc;
371
372 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
373
374 pr_err("Detected Tx Unit Hang%s\n"
375 " Tx Queue <%d>\n"
376 " TDH, TDT <%x>, <%x>\n"
377 " next_to_use <%x>\n"
378 " next_to_clean <%x>\n"
379 "tx_buffer_info[next_to_clean]\n"
380 " next_to_watch <%p>\n"
381 " eop_desc->wb.status <%x>\n"
382 " time_stamp <%lx>\n"
383 " jiffies <%lx>\n",
384 ring_is_xdp(tx_ring) ? " XDP" : "",
385 tx_ring->queue_index,
386 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
387 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
388 tx_ring->next_to_use, i,
389 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
390 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
391
392 if (!ring_is_xdp(tx_ring))
393 netif_stop_subqueue(tx_ring->netdev,
394 tx_ring->queue_index);
395
396
397 ixgbevf_tx_timeout_reset(adapter);
398
399 return true;
400 }
401
402 if (ring_is_xdp(tx_ring))
403 return !!budget;
404
405#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
406 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
407 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
408
409
410
411 smp_mb();
412
413 if (__netif_subqueue_stopped(tx_ring->netdev,
414 tx_ring->queue_index) &&
415 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
416 netif_wake_subqueue(tx_ring->netdev,
417 tx_ring->queue_index);
418 ++tx_ring->tx_stats.restart_queue;
419 }
420 }
421
422 return !!budget;
423}
424
425
426
427
428
429
430static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
431 struct sk_buff *skb)
432{
433 napi_gro_receive(&q_vector->napi, skb);
434}
435
436#define IXGBE_RSS_L4_TYPES_MASK \
437 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
438 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
439 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
440 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
441
442static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
443 union ixgbe_adv_rx_desc *rx_desc,
444 struct sk_buff *skb)
445{
446 u16 rss_type;
447
448 if (!(ring->netdev->features & NETIF_F_RXHASH))
449 return;
450
451 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
452 IXGBE_RXDADV_RSSTYPE_MASK;
453
454 if (!rss_type)
455 return;
456
457 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
458 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
459 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
460}
461
462
463
464
465
466
467
468static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
469 union ixgbe_adv_rx_desc *rx_desc,
470 struct sk_buff *skb)
471{
472 skb_checksum_none_assert(skb);
473
474
475 if (!(ring->netdev->features & NETIF_F_RXCSUM))
476 return;
477
478
479 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
480 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
481 ring->rx_stats.csum_err++;
482 return;
483 }
484
485 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
486 return;
487
488 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
489 ring->rx_stats.csum_err++;
490 return;
491 }
492
493
494 skb->ip_summed = CHECKSUM_UNNECESSARY;
495}
496
497
498
499
500
501
502
503
504
505
506
507static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
508 union ixgbe_adv_rx_desc *rx_desc,
509 struct sk_buff *skb)
510{
511 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
512 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
513
514 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
515 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
516 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
517
518 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
519 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
520 }
521
522 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
523 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
524
525 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
526}
527
528static
529struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
530 const unsigned int size)
531{
532 struct ixgbevf_rx_buffer *rx_buffer;
533
534 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
535 prefetchw(rx_buffer->page);
536
537
538 dma_sync_single_range_for_cpu(rx_ring->dev,
539 rx_buffer->dma,
540 rx_buffer->page_offset,
541 size,
542 DMA_FROM_DEVICE);
543
544 rx_buffer->pagecnt_bias--;
545
546 return rx_buffer;
547}
548
549static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
550 struct ixgbevf_rx_buffer *rx_buffer,
551 struct sk_buff *skb)
552{
553 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
554
555 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
556 } else {
557 if (IS_ERR(skb))
558
559
560
561 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
562 ixgbevf_rx_pg_size(rx_ring),
563 DMA_FROM_DEVICE,
564 IXGBEVF_RX_DMA_ATTR);
565 __page_frag_cache_drain(rx_buffer->page,
566 rx_buffer->pagecnt_bias);
567 }
568
569
570 rx_buffer->page = NULL;
571}
572
573
574
575
576
577
578
579
580
581
582
583static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
584 union ixgbe_adv_rx_desc *rx_desc)
585{
586 u32 ntc = rx_ring->next_to_clean + 1;
587
588
589 ntc = (ntc < rx_ring->count) ? ntc : 0;
590 rx_ring->next_to_clean = ntc;
591
592 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
593
594 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
595 return false;
596
597 return true;
598}
599
600static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
601{
602 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
603}
604
605static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
606 struct ixgbevf_rx_buffer *bi)
607{
608 struct page *page = bi->page;
609 dma_addr_t dma;
610
611
612 if (likely(page))
613 return true;
614
615
616 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
617 if (unlikely(!page)) {
618 rx_ring->rx_stats.alloc_rx_page_failed++;
619 return false;
620 }
621
622
623 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
624 ixgbevf_rx_pg_size(rx_ring),
625 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
626
627
628
629
630 if (dma_mapping_error(rx_ring->dev, dma)) {
631 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
632
633 rx_ring->rx_stats.alloc_rx_page_failed++;
634 return false;
635 }
636
637 bi->dma = dma;
638 bi->page = page;
639 bi->page_offset = ixgbevf_rx_offset(rx_ring);
640 bi->pagecnt_bias = 1;
641 rx_ring->rx_stats.alloc_rx_page++;
642
643 return true;
644}
645
646
647
648
649
650
651static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
652 u16 cleaned_count)
653{
654 union ixgbe_adv_rx_desc *rx_desc;
655 struct ixgbevf_rx_buffer *bi;
656 unsigned int i = rx_ring->next_to_use;
657
658
659 if (!cleaned_count || !rx_ring->netdev)
660 return;
661
662 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
663 bi = &rx_ring->rx_buffer_info[i];
664 i -= rx_ring->count;
665
666 do {
667 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
668 break;
669
670
671 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
672 bi->page_offset,
673 ixgbevf_rx_bufsz(rx_ring),
674 DMA_FROM_DEVICE);
675
676
677
678
679 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
680
681 rx_desc++;
682 bi++;
683 i++;
684 if (unlikely(!i)) {
685 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
686 bi = rx_ring->rx_buffer_info;
687 i -= rx_ring->count;
688 }
689
690
691 rx_desc->wb.upper.length = 0;
692
693 cleaned_count--;
694 } while (cleaned_count);
695
696 i += rx_ring->count;
697
698 if (rx_ring->next_to_use != i) {
699
700 rx_ring->next_to_use = i;
701
702
703 rx_ring->next_to_alloc = i;
704
705
706
707
708
709
710 wmb();
711 ixgbevf_write_tail(rx_ring, i);
712 }
713}
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
734 union ixgbe_adv_rx_desc *rx_desc,
735 struct sk_buff *skb)
736{
737
738 if (IS_ERR(skb))
739 return true;
740
741
742 if (unlikely(ixgbevf_test_staterr(rx_desc,
743 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
744 struct net_device *netdev = rx_ring->netdev;
745
746 if (!(netdev->features & NETIF_F_RXALL)) {
747 dev_kfree_skb_any(skb);
748 return true;
749 }
750 }
751
752
753 if (eth_skb_pad(skb))
754 return true;
755
756 return false;
757}
758
759
760
761
762
763
764
765
766static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
767 struct ixgbevf_rx_buffer *old_buff)
768{
769 struct ixgbevf_rx_buffer *new_buff;
770 u16 nta = rx_ring->next_to_alloc;
771
772 new_buff = &rx_ring->rx_buffer_info[nta];
773
774
775 nta++;
776 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
777
778
779 new_buff->page = old_buff->page;
780 new_buff->dma = old_buff->dma;
781 new_buff->page_offset = old_buff->page_offset;
782 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
783}
784
785static inline bool ixgbevf_page_is_reserved(struct page *page)
786{
787 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
788}
789
790static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
791{
792 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
793 struct page *page = rx_buffer->page;
794
795
796 if (unlikely(ixgbevf_page_is_reserved(page)))
797 return false;
798
799#if (PAGE_SIZE < 8192)
800
801 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
802 return false;
803#else
804#define IXGBEVF_LAST_OFFSET \
805 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
806
807 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
808 return false;
809
810#endif
811
812
813
814
815
816 if (unlikely(!pagecnt_bias)) {
817 page_ref_add(page, USHRT_MAX);
818 rx_buffer->pagecnt_bias = USHRT_MAX;
819 }
820
821 return true;
822}
823
824
825
826
827
828
829
830
831
832
833static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
834 struct ixgbevf_rx_buffer *rx_buffer,
835 struct sk_buff *skb,
836 unsigned int size)
837{
838#if (PAGE_SIZE < 8192)
839 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
840#else
841 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
842 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
843 SKB_DATA_ALIGN(size);
844#endif
845 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
846 rx_buffer->page_offset, size, truesize);
847#if (PAGE_SIZE < 8192)
848 rx_buffer->page_offset ^= truesize;
849#else
850 rx_buffer->page_offset += truesize;
851#endif
852}
853
854static
855struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
856 struct ixgbevf_rx_buffer *rx_buffer,
857 struct xdp_buff *xdp,
858 union ixgbe_adv_rx_desc *rx_desc)
859{
860 unsigned int size = xdp->data_end - xdp->data;
861#if (PAGE_SIZE < 8192)
862 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
863#else
864 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
865 xdp->data_hard_start);
866#endif
867 unsigned int headlen;
868 struct sk_buff *skb;
869
870
871 prefetch(xdp->data);
872#if L1_CACHE_BYTES < 128
873 prefetch(xdp->data + L1_CACHE_BYTES);
874#endif
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
892 if (unlikely(!skb))
893 return NULL;
894
895
896 headlen = size;
897 if (headlen > IXGBEVF_RX_HDR_SIZE)
898 headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
899
900
901 memcpy(__skb_put(skb, headlen), xdp->data,
902 ALIGN(headlen, sizeof(long)));
903
904
905 size -= headlen;
906 if (size) {
907 skb_add_rx_frag(skb, 0, rx_buffer->page,
908 (xdp->data + headlen) -
909 page_address(rx_buffer->page),
910 size, truesize);
911#if (PAGE_SIZE < 8192)
912 rx_buffer->page_offset ^= truesize;
913#else
914 rx_buffer->page_offset += truesize;
915#endif
916 } else {
917 rx_buffer->pagecnt_bias++;
918 }
919
920 return skb;
921}
922
923static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
924 u32 qmask)
925{
926 struct ixgbe_hw *hw = &adapter->hw;
927
928 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
929}
930
931static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
932 struct ixgbevf_rx_buffer *rx_buffer,
933 struct xdp_buff *xdp,
934 union ixgbe_adv_rx_desc *rx_desc)
935{
936 unsigned int metasize = xdp->data - xdp->data_meta;
937#if (PAGE_SIZE < 8192)
938 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
939#else
940 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
941 SKB_DATA_ALIGN(xdp->data_end -
942 xdp->data_hard_start);
943#endif
944 struct sk_buff *skb;
945
946
947
948
949
950
951 prefetch(xdp->data_meta);
952#if L1_CACHE_BYTES < 128
953 prefetch(xdp->data_meta + L1_CACHE_BYTES);
954#endif
955
956
957 skb = build_skb(xdp->data_hard_start, truesize);
958 if (unlikely(!skb))
959 return NULL;
960
961
962 skb_reserve(skb, xdp->data - xdp->data_hard_start);
963 __skb_put(skb, xdp->data_end - xdp->data);
964 if (metasize)
965 skb_metadata_set(skb, metasize);
966
967
968#if (PAGE_SIZE < 8192)
969 rx_buffer->page_offset ^= truesize;
970#else
971 rx_buffer->page_offset += truesize;
972#endif
973
974 return skb;
975}
976
977#define IXGBEVF_XDP_PASS 0
978#define IXGBEVF_XDP_CONSUMED 1
979#define IXGBEVF_XDP_TX 2
980
981static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
982 struct xdp_buff *xdp)
983{
984 struct ixgbevf_tx_buffer *tx_buffer;
985 union ixgbe_adv_tx_desc *tx_desc;
986 u32 len, cmd_type;
987 dma_addr_t dma;
988 u16 i;
989
990 len = xdp->data_end - xdp->data;
991
992 if (unlikely(!ixgbevf_desc_unused(ring)))
993 return IXGBEVF_XDP_CONSUMED;
994
995 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
996 if (dma_mapping_error(ring->dev, dma))
997 return IXGBEVF_XDP_CONSUMED;
998
999
1000 i = ring->next_to_use;
1001 tx_buffer = &ring->tx_buffer_info[i];
1002
1003 dma_unmap_len_set(tx_buffer, len, len);
1004 dma_unmap_addr_set(tx_buffer, dma, dma);
1005 tx_buffer->data = xdp->data;
1006 tx_buffer->bytecount = len;
1007 tx_buffer->gso_segs = 1;
1008 tx_buffer->protocol = 0;
1009
1010
1011
1012
1013 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1014 struct ixgbe_adv_tx_context_desc *context_desc;
1015
1016 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1017
1018 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1019 context_desc->vlan_macip_lens =
1020 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1021 context_desc->fceof_saidx = 0;
1022 context_desc->type_tucmd_mlhl =
1023 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1024 IXGBE_ADVTXD_DTYP_CTXT);
1025 context_desc->mss_l4len_idx = 0;
1026
1027 i = 1;
1028 }
1029
1030
1031 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1032 IXGBE_ADVTXD_DCMD_DEXT |
1033 IXGBE_ADVTXD_DCMD_IFCS;
1034 cmd_type |= len | IXGBE_TXD_CMD;
1035
1036 tx_desc = IXGBEVF_TX_DESC(ring, i);
1037 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1038
1039 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1040 tx_desc->read.olinfo_status =
1041 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1042 IXGBE_ADVTXD_CC);
1043
1044
1045 smp_wmb();
1046
1047
1048 i++;
1049 if (i == ring->count)
1050 i = 0;
1051
1052 tx_buffer->next_to_watch = tx_desc;
1053 ring->next_to_use = i;
1054
1055 return IXGBEVF_XDP_TX;
1056}
1057
1058static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1059 struct ixgbevf_ring *rx_ring,
1060 struct xdp_buff *xdp)
1061{
1062 int result = IXGBEVF_XDP_PASS;
1063 struct ixgbevf_ring *xdp_ring;
1064 struct bpf_prog *xdp_prog;
1065 u32 act;
1066
1067 rcu_read_lock();
1068 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1069
1070 if (!xdp_prog)
1071 goto xdp_out;
1072
1073 act = bpf_prog_run_xdp(xdp_prog, xdp);
1074 switch (act) {
1075 case XDP_PASS:
1076 break;
1077 case XDP_TX:
1078 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1079 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1080 break;
1081 default:
1082 bpf_warn_invalid_xdp_action(act);
1083
1084 case XDP_ABORTED:
1085 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1086
1087 case XDP_DROP:
1088 result = IXGBEVF_XDP_CONSUMED;
1089 break;
1090 }
1091xdp_out:
1092 rcu_read_unlock();
1093 return ERR_PTR(-result);
1094}
1095
1096static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1097 struct ixgbevf_rx_buffer *rx_buffer,
1098 unsigned int size)
1099{
1100#if (PAGE_SIZE < 8192)
1101 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1102
1103 rx_buffer->page_offset ^= truesize;
1104#else
1105 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1106 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
1107 SKB_DATA_ALIGN(size);
1108
1109 rx_buffer->page_offset += truesize;
1110#endif
1111}
1112
1113static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1114 struct ixgbevf_ring *rx_ring,
1115 int budget)
1116{
1117 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1118 struct ixgbevf_adapter *adapter = q_vector->adapter;
1119 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1120 struct sk_buff *skb = rx_ring->skb;
1121 bool xdp_xmit = false;
1122 struct xdp_buff xdp;
1123
1124 xdp.rxq = &rx_ring->xdp_rxq;
1125
1126 while (likely(total_rx_packets < budget)) {
1127 struct ixgbevf_rx_buffer *rx_buffer;
1128 union ixgbe_adv_rx_desc *rx_desc;
1129 unsigned int size;
1130
1131
1132 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1133 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1134 cleaned_count = 0;
1135 }
1136
1137 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1138 size = le16_to_cpu(rx_desc->wb.upper.length);
1139 if (!size)
1140 break;
1141
1142
1143
1144
1145
1146 rmb();
1147
1148 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1149
1150
1151 if (!skb) {
1152 xdp.data = page_address(rx_buffer->page) +
1153 rx_buffer->page_offset;
1154 xdp.data_meta = xdp.data;
1155 xdp.data_hard_start = xdp.data -
1156 ixgbevf_rx_offset(rx_ring);
1157 xdp.data_end = xdp.data + size;
1158
1159 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1160 }
1161
1162 if (IS_ERR(skb)) {
1163 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1164 xdp_xmit = true;
1165 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1166 size);
1167 } else {
1168 rx_buffer->pagecnt_bias++;
1169 }
1170 total_rx_packets++;
1171 total_rx_bytes += size;
1172 } else if (skb) {
1173 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1174 } else if (ring_uses_build_skb(rx_ring)) {
1175 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1176 &xdp, rx_desc);
1177 } else {
1178 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1179 &xdp, rx_desc);
1180 }
1181
1182
1183 if (!skb) {
1184 rx_ring->rx_stats.alloc_rx_buff_failed++;
1185 rx_buffer->pagecnt_bias++;
1186 break;
1187 }
1188
1189 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1190 cleaned_count++;
1191
1192
1193 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1194 continue;
1195
1196
1197 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1198 skb = NULL;
1199 continue;
1200 }
1201
1202
1203 total_rx_bytes += skb->len;
1204
1205
1206
1207
1208 if ((skb->pkt_type == PACKET_BROADCAST ||
1209 skb->pkt_type == PACKET_MULTICAST) &&
1210 ether_addr_equal(rx_ring->netdev->dev_addr,
1211 eth_hdr(skb)->h_source)) {
1212 dev_kfree_skb_irq(skb);
1213 continue;
1214 }
1215
1216
1217 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1218
1219 ixgbevf_rx_skb(q_vector, skb);
1220
1221
1222 skb = NULL;
1223
1224
1225 total_rx_packets++;
1226 }
1227
1228
1229 rx_ring->skb = skb;
1230
1231 if (xdp_xmit) {
1232 struct ixgbevf_ring *xdp_ring =
1233 adapter->xdp_ring[rx_ring->queue_index];
1234
1235
1236
1237
1238 wmb();
1239 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1240 }
1241
1242 u64_stats_update_begin(&rx_ring->syncp);
1243 rx_ring->stats.packets += total_rx_packets;
1244 rx_ring->stats.bytes += total_rx_bytes;
1245 u64_stats_update_end(&rx_ring->syncp);
1246 q_vector->rx.total_packets += total_rx_packets;
1247 q_vector->rx.total_bytes += total_rx_bytes;
1248
1249 return total_rx_packets;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260static int ixgbevf_poll(struct napi_struct *napi, int budget)
1261{
1262 struct ixgbevf_q_vector *q_vector =
1263 container_of(napi, struct ixgbevf_q_vector, napi);
1264 struct ixgbevf_adapter *adapter = q_vector->adapter;
1265 struct ixgbevf_ring *ring;
1266 int per_ring_budget, work_done = 0;
1267 bool clean_complete = true;
1268
1269 ixgbevf_for_each_ring(ring, q_vector->tx) {
1270 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1271 clean_complete = false;
1272 }
1273
1274 if (budget <= 0)
1275 return budget;
1276
1277
1278
1279
1280 if (q_vector->rx.count > 1)
1281 per_ring_budget = max(budget/q_vector->rx.count, 1);
1282 else
1283 per_ring_budget = budget;
1284
1285 ixgbevf_for_each_ring(ring, q_vector->rx) {
1286 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1287 per_ring_budget);
1288 work_done += cleaned;
1289 if (cleaned >= per_ring_budget)
1290 clean_complete = false;
1291 }
1292
1293
1294 if (!clean_complete)
1295 return budget;
1296
1297
1298
1299
1300 if (likely(napi_complete_done(napi, work_done))) {
1301 if (adapter->rx_itr_setting == 1)
1302 ixgbevf_set_itr(q_vector);
1303 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1304 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1305 ixgbevf_irq_enable_queues(adapter,
1306 BIT(q_vector->v_idx));
1307 }
1308
1309 return min(work_done, budget - 1);
1310}
1311
1312
1313
1314
1315
1316void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1317{
1318 struct ixgbevf_adapter *adapter = q_vector->adapter;
1319 struct ixgbe_hw *hw = &adapter->hw;
1320 int v_idx = q_vector->v_idx;
1321 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1322
1323
1324
1325
1326 itr_reg |= IXGBE_EITR_CNT_WDIS;
1327
1328 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1339{
1340 struct ixgbevf_q_vector *q_vector;
1341 int q_vectors, v_idx;
1342
1343 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1344 adapter->eims_enable_mask = 0;
1345
1346
1347
1348
1349 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1350 struct ixgbevf_ring *ring;
1351
1352 q_vector = adapter->q_vector[v_idx];
1353
1354 ixgbevf_for_each_ring(ring, q_vector->rx)
1355 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1356
1357 ixgbevf_for_each_ring(ring, q_vector->tx)
1358 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1359
1360 if (q_vector->tx.ring && !q_vector->rx.ring) {
1361
1362 if (adapter->tx_itr_setting == 1)
1363 q_vector->itr = IXGBE_12K_ITR;
1364 else
1365 q_vector->itr = adapter->tx_itr_setting;
1366 } else {
1367
1368 if (adapter->rx_itr_setting == 1)
1369 q_vector->itr = IXGBE_20K_ITR;
1370 else
1371 q_vector->itr = adapter->rx_itr_setting;
1372 }
1373
1374
1375 adapter->eims_enable_mask |= BIT(v_idx);
1376
1377 ixgbevf_write_eitr(q_vector);
1378 }
1379
1380 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1381
1382 adapter->eims_other = BIT(v_idx);
1383 adapter->eims_enable_mask |= adapter->eims_other;
1384}
1385
1386enum latency_range {
1387 lowest_latency = 0,
1388 low_latency = 1,
1389 bulk_latency = 2,
1390 latency_invalid = 255
1391};
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1407 struct ixgbevf_ring_container *ring_container)
1408{
1409 int bytes = ring_container->total_bytes;
1410 int packets = ring_container->total_packets;
1411 u32 timepassed_us;
1412 u64 bytes_perint;
1413 u8 itr_setting = ring_container->itr;
1414
1415 if (packets == 0)
1416 return;
1417
1418
1419
1420
1421
1422
1423
1424 timepassed_us = q_vector->itr >> 2;
1425 bytes_perint = bytes / timepassed_us;
1426
1427 switch (itr_setting) {
1428 case lowest_latency:
1429 if (bytes_perint > 10)
1430 itr_setting = low_latency;
1431 break;
1432 case low_latency:
1433 if (bytes_perint > 20)
1434 itr_setting = bulk_latency;
1435 else if (bytes_perint <= 10)
1436 itr_setting = lowest_latency;
1437 break;
1438 case bulk_latency:
1439 if (bytes_perint <= 20)
1440 itr_setting = low_latency;
1441 break;
1442 }
1443
1444
1445 ring_container->total_bytes = 0;
1446 ring_container->total_packets = 0;
1447
1448
1449 ring_container->itr = itr_setting;
1450}
1451
1452static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1453{
1454 u32 new_itr = q_vector->itr;
1455 u8 current_itr;
1456
1457 ixgbevf_update_itr(q_vector, &q_vector->tx);
1458 ixgbevf_update_itr(q_vector, &q_vector->rx);
1459
1460 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1461
1462 switch (current_itr) {
1463
1464 case lowest_latency:
1465 new_itr = IXGBE_100K_ITR;
1466 break;
1467 case low_latency:
1468 new_itr = IXGBE_20K_ITR;
1469 break;
1470 case bulk_latency:
1471 new_itr = IXGBE_12K_ITR;
1472 break;
1473 default:
1474 break;
1475 }
1476
1477 if (new_itr != q_vector->itr) {
1478
1479 new_itr = (10 * new_itr * q_vector->itr) /
1480 ((9 * new_itr) + q_vector->itr);
1481
1482
1483 q_vector->itr = new_itr;
1484
1485 ixgbevf_write_eitr(q_vector);
1486 }
1487}
1488
1489static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1490{
1491 struct ixgbevf_adapter *adapter = data;
1492 struct ixgbe_hw *hw = &adapter->hw;
1493
1494 hw->mac.get_link_status = 1;
1495
1496 ixgbevf_service_event_schedule(adapter);
1497
1498 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1499
1500 return IRQ_HANDLED;
1501}
1502
1503
1504
1505
1506
1507
1508static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1509{
1510 struct ixgbevf_q_vector *q_vector = data;
1511
1512
1513 if (q_vector->rx.ring || q_vector->tx.ring)
1514 napi_schedule_irqoff(&q_vector->napi);
1515
1516 return IRQ_HANDLED;
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1527{
1528 struct net_device *netdev = adapter->netdev;
1529 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1530 unsigned int ri = 0, ti = 0;
1531 int vector, err;
1532
1533 for (vector = 0; vector < q_vectors; vector++) {
1534 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1535 struct msix_entry *entry = &adapter->msix_entries[vector];
1536
1537 if (q_vector->tx.ring && q_vector->rx.ring) {
1538 snprintf(q_vector->name, sizeof(q_vector->name),
1539 "%s-TxRx-%u", netdev->name, ri++);
1540 ti++;
1541 } else if (q_vector->rx.ring) {
1542 snprintf(q_vector->name, sizeof(q_vector->name),
1543 "%s-rx-%u", netdev->name, ri++);
1544 } else if (q_vector->tx.ring) {
1545 snprintf(q_vector->name, sizeof(q_vector->name),
1546 "%s-tx-%u", netdev->name, ti++);
1547 } else {
1548
1549 continue;
1550 }
1551 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1552 q_vector->name, q_vector);
1553 if (err) {
1554 hw_dbg(&adapter->hw,
1555 "request_irq failed for MSIX interrupt Error: %d\n",
1556 err);
1557 goto free_queue_irqs;
1558 }
1559 }
1560
1561 err = request_irq(adapter->msix_entries[vector].vector,
1562 &ixgbevf_msix_other, 0, netdev->name, adapter);
1563 if (err) {
1564 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1565 err);
1566 goto free_queue_irqs;
1567 }
1568
1569 return 0;
1570
1571free_queue_irqs:
1572 while (vector) {
1573 vector--;
1574 free_irq(adapter->msix_entries[vector].vector,
1575 adapter->q_vector[vector]);
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587 adapter->num_msix_vectors = 0;
1588 return err;
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1599{
1600 int err = ixgbevf_request_msix_irqs(adapter);
1601
1602 if (err)
1603 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1604
1605 return err;
1606}
1607
1608static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1609{
1610 int i, q_vectors;
1611
1612 if (!adapter->msix_entries)
1613 return;
1614
1615 q_vectors = adapter->num_msix_vectors;
1616 i = q_vectors - 1;
1617
1618 free_irq(adapter->msix_entries[i].vector, adapter);
1619 i--;
1620
1621 for (; i >= 0; i--) {
1622
1623 if (!adapter->q_vector[i]->rx.ring &&
1624 !adapter->q_vector[i]->tx.ring)
1625 continue;
1626
1627 free_irq(adapter->msix_entries[i].vector,
1628 adapter->q_vector[i]);
1629 }
1630}
1631
1632
1633
1634
1635
1636static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1637{
1638 struct ixgbe_hw *hw = &adapter->hw;
1639 int i;
1640
1641 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1642 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1643 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1644
1645 IXGBE_WRITE_FLUSH(hw);
1646
1647 for (i = 0; i < adapter->num_msix_vectors; i++)
1648 synchronize_irq(adapter->msix_entries[i].vector);
1649}
1650
1651
1652
1653
1654
1655static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1656{
1657 struct ixgbe_hw *hw = &adapter->hw;
1658
1659 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1660 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1661 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1672 struct ixgbevf_ring *ring)
1673{
1674 struct ixgbe_hw *hw = &adapter->hw;
1675 u64 tdba = ring->dma;
1676 int wait_loop = 10;
1677 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1678 u8 reg_idx = ring->reg_idx;
1679
1680
1681 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1682 IXGBE_WRITE_FLUSH(hw);
1683
1684 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1685 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1686 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1687 ring->count * sizeof(union ixgbe_adv_tx_desc));
1688
1689
1690 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1691 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1692
1693
1694 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1695 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1696 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1697
1698
1699 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1700 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1701 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1702
1703
1704 ring->next_to_clean = 0;
1705 ring->next_to_use = 0;
1706
1707
1708
1709
1710
1711 txdctl |= (8 << 16);
1712
1713
1714 txdctl |= (1u << 8) |
1715 32;
1716
1717
1718 memset(ring->tx_buffer_info, 0,
1719 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1720
1721 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1722 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1723
1724 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1725
1726
1727 do {
1728 usleep_range(1000, 2000);
1729 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1730 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1731 if (!wait_loop)
1732 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1733}
1734
1735
1736
1737
1738
1739
1740
1741static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1742{
1743 u32 i;
1744
1745
1746 for (i = 0; i < adapter->num_tx_queues; i++)
1747 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1748 for (i = 0; i < adapter->num_xdp_queues; i++)
1749 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1750}
1751
1752#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1753
1754static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1755 struct ixgbevf_ring *ring, int index)
1756{
1757 struct ixgbe_hw *hw = &adapter->hw;
1758 u32 srrctl;
1759
1760 srrctl = IXGBE_SRRCTL_DROP_EN;
1761
1762 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1763 if (ring_uses_large_buffer(ring))
1764 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1765 else
1766 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1767 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1768
1769 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1770}
1771
1772static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1773{
1774 struct ixgbe_hw *hw = &adapter->hw;
1775
1776
1777 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1778 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1779 IXGBE_PSRTYPE_L2HDR;
1780
1781 if (adapter->num_rx_queues > 1)
1782 psrtype |= BIT(29);
1783
1784 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1785}
1786
1787#define IXGBEVF_MAX_RX_DESC_POLL 10
1788static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1789 struct ixgbevf_ring *ring)
1790{
1791 struct ixgbe_hw *hw = &adapter->hw;
1792 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1793 u32 rxdctl;
1794 u8 reg_idx = ring->reg_idx;
1795
1796 if (IXGBE_REMOVED(hw->hw_addr))
1797 return;
1798 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1799 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1800
1801
1802 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1803
1804
1805 do {
1806 udelay(10);
1807 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1808 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1809
1810 if (!wait_loop)
1811 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1812 reg_idx);
1813}
1814
1815static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1816 struct ixgbevf_ring *ring)
1817{
1818 struct ixgbe_hw *hw = &adapter->hw;
1819 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1820 u32 rxdctl;
1821 u8 reg_idx = ring->reg_idx;
1822
1823 if (IXGBE_REMOVED(hw->hw_addr))
1824 return;
1825 do {
1826 usleep_range(1000, 2000);
1827 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1828 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1829
1830 if (!wait_loop)
1831 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1832 reg_idx);
1833}
1834
1835
1836
1837
1838
1839
1840
1841static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1842{
1843 u32 *rss_key;
1844
1845 if (!adapter->rss_key) {
1846 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1847 if (unlikely(!rss_key))
1848 return -ENOMEM;
1849
1850 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1851 adapter->rss_key = rss_key;
1852 }
1853
1854 return 0;
1855}
1856
1857static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1858{
1859 struct ixgbe_hw *hw = &adapter->hw;
1860 u32 vfmrqc = 0, vfreta = 0;
1861 u16 rss_i = adapter->num_rx_queues;
1862 u8 i, j;
1863
1864
1865 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1866 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1867
1868 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1869 if (j == rss_i)
1870 j = 0;
1871
1872 adapter->rss_indir_tbl[i] = j;
1873
1874 vfreta |= j << (i & 0x3) * 8;
1875 if ((i & 3) == 3) {
1876 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1877 vfreta = 0;
1878 }
1879 }
1880
1881
1882 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1883 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1884 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1885 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1886
1887 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1888
1889 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1890}
1891
1892static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1893 struct ixgbevf_ring *ring)
1894{
1895 struct ixgbe_hw *hw = &adapter->hw;
1896 union ixgbe_adv_rx_desc *rx_desc;
1897 u64 rdba = ring->dma;
1898 u32 rxdctl;
1899 u8 reg_idx = ring->reg_idx;
1900
1901
1902 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1903 ixgbevf_disable_rx_queue(adapter, ring);
1904
1905 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1906 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1907 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1908 ring->count * sizeof(union ixgbe_adv_rx_desc));
1909
1910#ifndef CONFIG_SPARC
1911
1912 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1913 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1914#else
1915 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1916 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1917 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1918#endif
1919
1920
1921 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1922 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1923 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1924
1925
1926 memset(ring->rx_buffer_info, 0,
1927 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1928
1929
1930 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1931 rx_desc->wb.upper.length = 0;
1932
1933
1934 ring->next_to_clean = 0;
1935 ring->next_to_use = 0;
1936 ring->next_to_alloc = 0;
1937
1938 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1939
1940
1941 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1942 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1943 IXGBE_RXDCTL_RLPML_EN);
1944
1945#if (PAGE_SIZE < 8192)
1946
1947 if (ring_uses_build_skb(ring) &&
1948 !ring_uses_large_buffer(ring))
1949 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1950 IXGBE_RXDCTL_RLPML_EN;
1951#endif
1952 }
1953
1954 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1955 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1956
1957 ixgbevf_rx_desc_queue_enable(adapter, ring);
1958 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1959}
1960
1961static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1962 struct ixgbevf_ring *rx_ring)
1963{
1964 struct net_device *netdev = adapter->netdev;
1965 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1966
1967
1968 clear_ring_build_skb_enabled(rx_ring);
1969 clear_ring_uses_large_buffer(rx_ring);
1970
1971 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1972 return;
1973
1974 set_ring_build_skb_enabled(rx_ring);
1975
1976 if (PAGE_SIZE < 8192) {
1977 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1978 return;
1979
1980 set_ring_uses_large_buffer(rx_ring);
1981 }
1982}
1983
1984
1985
1986
1987
1988
1989
1990static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1991{
1992 struct ixgbe_hw *hw = &adapter->hw;
1993 struct net_device *netdev = adapter->netdev;
1994 int i, ret;
1995
1996 ixgbevf_setup_psrtype(adapter);
1997 if (hw->mac.type >= ixgbe_mac_X550_vf)
1998 ixgbevf_setup_vfmrqc(adapter);
1999
2000 spin_lock_bh(&adapter->mbx_lock);
2001
2002 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
2003 spin_unlock_bh(&adapter->mbx_lock);
2004 if (ret)
2005 dev_err(&adapter->pdev->dev,
2006 "Failed to set MTU at %d\n", netdev->mtu);
2007
2008
2009
2010
2011 for (i = 0; i < adapter->num_rx_queues; i++) {
2012 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2013
2014 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2015 ixgbevf_configure_rx_ring(adapter, rx_ring);
2016 }
2017}
2018
2019static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2020 __be16 proto, u16 vid)
2021{
2022 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2023 struct ixgbe_hw *hw = &adapter->hw;
2024 int err;
2025
2026 spin_lock_bh(&adapter->mbx_lock);
2027
2028
2029 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2030
2031 spin_unlock_bh(&adapter->mbx_lock);
2032
2033
2034 if (err == IXGBE_ERR_MBX)
2035 return -EIO;
2036
2037 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2038 return -EACCES;
2039
2040 set_bit(vid, adapter->active_vlans);
2041
2042 return err;
2043}
2044
2045static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2046 __be16 proto, u16 vid)
2047{
2048 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2049 struct ixgbe_hw *hw = &adapter->hw;
2050 int err;
2051
2052 spin_lock_bh(&adapter->mbx_lock);
2053
2054
2055 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2056
2057 spin_unlock_bh(&adapter->mbx_lock);
2058
2059 clear_bit(vid, adapter->active_vlans);
2060
2061 return err;
2062}
2063
2064static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2065{
2066 u16 vid;
2067
2068 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2069 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2070 htons(ETH_P_8021Q), vid);
2071}
2072
2073static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2074{
2075 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2076 struct ixgbe_hw *hw = &adapter->hw;
2077 int count = 0;
2078
2079 if ((netdev_uc_count(netdev)) > 10) {
2080 pr_err("Too many unicast filters - No Space\n");
2081 return -ENOSPC;
2082 }
2083
2084 if (!netdev_uc_empty(netdev)) {
2085 struct netdev_hw_addr *ha;
2086
2087 netdev_for_each_uc_addr(ha, netdev) {
2088 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2089 udelay(200);
2090 }
2091 } else {
2092
2093
2094
2095 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2096 }
2097
2098 return count;
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static void ixgbevf_set_rx_mode(struct net_device *netdev)
2111{
2112 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2113 struct ixgbe_hw *hw = &adapter->hw;
2114 unsigned int flags = netdev->flags;
2115 int xcast_mode;
2116
2117
2118 if (flags & IFF_PROMISC)
2119 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2120 else if (flags & IFF_ALLMULTI)
2121 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2122 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2123 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2124 else
2125 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2126
2127 spin_lock_bh(&adapter->mbx_lock);
2128
2129 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2130
2131
2132 hw->mac.ops.update_mc_addr_list(hw, netdev);
2133
2134 ixgbevf_write_uc_addr_list(netdev);
2135
2136 spin_unlock_bh(&adapter->mbx_lock);
2137}
2138
2139static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2140{
2141 int q_idx;
2142 struct ixgbevf_q_vector *q_vector;
2143 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2144
2145 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2146 q_vector = adapter->q_vector[q_idx];
2147 napi_enable(&q_vector->napi);
2148 }
2149}
2150
2151static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2152{
2153 int q_idx;
2154 struct ixgbevf_q_vector *q_vector;
2155 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2156
2157 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2158 q_vector = adapter->q_vector[q_idx];
2159 napi_disable(&q_vector->napi);
2160 }
2161}
2162
2163static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2164{
2165 struct ixgbe_hw *hw = &adapter->hw;
2166 unsigned int def_q = 0;
2167 unsigned int num_tcs = 0;
2168 unsigned int num_rx_queues = adapter->num_rx_queues;
2169 unsigned int num_tx_queues = adapter->num_tx_queues;
2170 int err;
2171
2172 spin_lock_bh(&adapter->mbx_lock);
2173
2174
2175 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2176
2177 spin_unlock_bh(&adapter->mbx_lock);
2178
2179 if (err)
2180 return err;
2181
2182 if (num_tcs > 1) {
2183
2184 num_tx_queues = 1;
2185
2186
2187 adapter->tx_ring[0]->reg_idx = def_q;
2188
2189
2190 num_rx_queues = num_tcs;
2191 }
2192
2193
2194 if ((adapter->num_rx_queues != num_rx_queues) ||
2195 (adapter->num_tx_queues != num_tx_queues)) {
2196
2197 hw->mbx.timeout = 0;
2198
2199
2200 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2201 }
2202
2203 return 0;
2204}
2205
2206static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2207{
2208 ixgbevf_configure_dcb(adapter);
2209
2210 ixgbevf_set_rx_mode(adapter->netdev);
2211
2212 ixgbevf_restore_vlan(adapter);
2213 ixgbevf_ipsec_restore(adapter);
2214
2215 ixgbevf_configure_tx(adapter);
2216 ixgbevf_configure_rx(adapter);
2217}
2218
2219static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2220{
2221
2222 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2223 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2224 adapter->stats.base_vfgprc;
2225 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2226 adapter->stats.base_vfgptc;
2227 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2228 adapter->stats.base_vfgorc;
2229 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2230 adapter->stats.base_vfgotc;
2231 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2232 adapter->stats.base_vfmprc;
2233 }
2234}
2235
2236static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2237{
2238 struct ixgbe_hw *hw = &adapter->hw;
2239
2240 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2241 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2242 adapter->stats.last_vfgorc |=
2243 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2244 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2245 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2246 adapter->stats.last_vfgotc |=
2247 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2248 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2249
2250 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2251 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2252 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2253 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2254 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2255}
2256
2257static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2258{
2259 struct ixgbe_hw *hw = &adapter->hw;
2260 int api[] = { ixgbe_mbox_api_14,
2261 ixgbe_mbox_api_13,
2262 ixgbe_mbox_api_12,
2263 ixgbe_mbox_api_11,
2264 ixgbe_mbox_api_10,
2265 ixgbe_mbox_api_unknown };
2266 int err, idx = 0;
2267
2268 spin_lock_bh(&adapter->mbx_lock);
2269
2270 while (api[idx] != ixgbe_mbox_api_unknown) {
2271 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2272 if (!err)
2273 break;
2274 idx++;
2275 }
2276
2277 spin_unlock_bh(&adapter->mbx_lock);
2278}
2279
2280static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2281{
2282 struct net_device *netdev = adapter->netdev;
2283 struct ixgbe_hw *hw = &adapter->hw;
2284
2285 ixgbevf_configure_msix(adapter);
2286
2287 spin_lock_bh(&adapter->mbx_lock);
2288
2289 if (is_valid_ether_addr(hw->mac.addr))
2290 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2291 else
2292 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2293
2294 spin_unlock_bh(&adapter->mbx_lock);
2295
2296 smp_mb__before_atomic();
2297 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2298 ixgbevf_napi_enable_all(adapter);
2299
2300
2301 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2302 ixgbevf_irq_enable(adapter);
2303
2304
2305 netif_tx_start_all_queues(netdev);
2306
2307 ixgbevf_save_reset_stats(adapter);
2308 ixgbevf_init_last_counter_stats(adapter);
2309
2310 hw->mac.get_link_status = 1;
2311 mod_timer(&adapter->service_timer, jiffies);
2312}
2313
2314void ixgbevf_up(struct ixgbevf_adapter *adapter)
2315{
2316 ixgbevf_configure(adapter);
2317
2318 ixgbevf_up_complete(adapter);
2319}
2320
2321
2322
2323
2324
2325static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2326{
2327 u16 i = rx_ring->next_to_clean;
2328
2329
2330 if (rx_ring->skb) {
2331 dev_kfree_skb(rx_ring->skb);
2332 rx_ring->skb = NULL;
2333 }
2334
2335
2336 while (i != rx_ring->next_to_alloc) {
2337 struct ixgbevf_rx_buffer *rx_buffer;
2338
2339 rx_buffer = &rx_ring->rx_buffer_info[i];
2340
2341
2342
2343
2344 dma_sync_single_range_for_cpu(rx_ring->dev,
2345 rx_buffer->dma,
2346 rx_buffer->page_offset,
2347 ixgbevf_rx_bufsz(rx_ring),
2348 DMA_FROM_DEVICE);
2349
2350
2351 dma_unmap_page_attrs(rx_ring->dev,
2352 rx_buffer->dma,
2353 ixgbevf_rx_pg_size(rx_ring),
2354 DMA_FROM_DEVICE,
2355 IXGBEVF_RX_DMA_ATTR);
2356
2357 __page_frag_cache_drain(rx_buffer->page,
2358 rx_buffer->pagecnt_bias);
2359
2360 i++;
2361 if (i == rx_ring->count)
2362 i = 0;
2363 }
2364
2365 rx_ring->next_to_alloc = 0;
2366 rx_ring->next_to_clean = 0;
2367 rx_ring->next_to_use = 0;
2368}
2369
2370
2371
2372
2373
2374static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2375{
2376 u16 i = tx_ring->next_to_clean;
2377 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2378
2379 while (i != tx_ring->next_to_use) {
2380 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2381
2382
2383 if (ring_is_xdp(tx_ring))
2384 page_frag_free(tx_buffer->data);
2385 else
2386 dev_kfree_skb_any(tx_buffer->skb);
2387
2388
2389 dma_unmap_single(tx_ring->dev,
2390 dma_unmap_addr(tx_buffer, dma),
2391 dma_unmap_len(tx_buffer, len),
2392 DMA_TO_DEVICE);
2393
2394
2395 eop_desc = tx_buffer->next_to_watch;
2396 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2397
2398
2399 while (tx_desc != eop_desc) {
2400 tx_buffer++;
2401 tx_desc++;
2402 i++;
2403 if (unlikely(i == tx_ring->count)) {
2404 i = 0;
2405 tx_buffer = tx_ring->tx_buffer_info;
2406 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2407 }
2408
2409
2410 if (dma_unmap_len(tx_buffer, len))
2411 dma_unmap_page(tx_ring->dev,
2412 dma_unmap_addr(tx_buffer, dma),
2413 dma_unmap_len(tx_buffer, len),
2414 DMA_TO_DEVICE);
2415 }
2416
2417
2418 tx_buffer++;
2419 i++;
2420 if (unlikely(i == tx_ring->count)) {
2421 i = 0;
2422 tx_buffer = tx_ring->tx_buffer_info;
2423 }
2424 }
2425
2426
2427 tx_ring->next_to_use = 0;
2428 tx_ring->next_to_clean = 0;
2429
2430}
2431
2432
2433
2434
2435
2436static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2437{
2438 int i;
2439
2440 for (i = 0; i < adapter->num_rx_queues; i++)
2441 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2442}
2443
2444
2445
2446
2447
2448static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2449{
2450 int i;
2451
2452 for (i = 0; i < adapter->num_tx_queues; i++)
2453 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2454 for (i = 0; i < adapter->num_xdp_queues; i++)
2455 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2456}
2457
2458void ixgbevf_down(struct ixgbevf_adapter *adapter)
2459{
2460 struct net_device *netdev = adapter->netdev;
2461 struct ixgbe_hw *hw = &adapter->hw;
2462 int i;
2463
2464
2465 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2466 return;
2467
2468
2469 for (i = 0; i < adapter->num_rx_queues; i++)
2470 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2471
2472 usleep_range(10000, 20000);
2473
2474 netif_tx_stop_all_queues(netdev);
2475
2476
2477 netif_carrier_off(netdev);
2478 netif_tx_disable(netdev);
2479
2480 ixgbevf_irq_disable(adapter);
2481
2482 ixgbevf_napi_disable_all(adapter);
2483
2484 del_timer_sync(&adapter->service_timer);
2485
2486
2487 for (i = 0; i < adapter->num_tx_queues; i++) {
2488 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2489
2490 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2491 IXGBE_TXDCTL_SWFLSH);
2492 }
2493
2494 for (i = 0; i < adapter->num_xdp_queues; i++) {
2495 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2496
2497 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2498 IXGBE_TXDCTL_SWFLSH);
2499 }
2500
2501 if (!pci_channel_offline(adapter->pdev))
2502 ixgbevf_reset(adapter);
2503
2504 ixgbevf_clean_all_tx_rings(adapter);
2505 ixgbevf_clean_all_rx_rings(adapter);
2506}
2507
2508void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2509{
2510 WARN_ON(in_interrupt());
2511
2512 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2513 msleep(1);
2514
2515 ixgbevf_down(adapter);
2516 ixgbevf_up(adapter);
2517
2518 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2519}
2520
2521void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2522{
2523 struct ixgbe_hw *hw = &adapter->hw;
2524 struct net_device *netdev = adapter->netdev;
2525
2526 if (hw->mac.ops.reset_hw(hw)) {
2527 hw_dbg(hw, "PF still resetting\n");
2528 } else {
2529 hw->mac.ops.init_hw(hw);
2530 ixgbevf_negotiate_api(adapter);
2531 }
2532
2533 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2534 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2535 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2536 }
2537
2538 adapter->last_reset = jiffies;
2539}
2540
2541static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2542 int vectors)
2543{
2544 int vector_threshold;
2545
2546
2547
2548
2549
2550 vector_threshold = MIN_MSIX_COUNT;
2551
2552
2553
2554
2555
2556
2557 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2558 vector_threshold, vectors);
2559
2560 if (vectors < 0) {
2561 dev_err(&adapter->pdev->dev,
2562 "Unable to allocate MSI-X interrupts\n");
2563 kfree(adapter->msix_entries);
2564 adapter->msix_entries = NULL;
2565 return vectors;
2566 }
2567
2568
2569
2570
2571
2572 adapter->num_msix_vectors = vectors;
2573
2574 return 0;
2575}
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2589{
2590 struct ixgbe_hw *hw = &adapter->hw;
2591 unsigned int def_q = 0;
2592 unsigned int num_tcs = 0;
2593 int err;
2594
2595
2596 adapter->num_rx_queues = 1;
2597 adapter->num_tx_queues = 1;
2598 adapter->num_xdp_queues = 0;
2599
2600 spin_lock_bh(&adapter->mbx_lock);
2601
2602
2603 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2604
2605 spin_unlock_bh(&adapter->mbx_lock);
2606
2607 if (err)
2608 return;
2609
2610
2611 if (num_tcs > 1) {
2612 adapter->num_rx_queues = num_tcs;
2613 } else {
2614 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2615
2616 switch (hw->api_version) {
2617 case ixgbe_mbox_api_11:
2618 case ixgbe_mbox_api_12:
2619 case ixgbe_mbox_api_13:
2620 case ixgbe_mbox_api_14:
2621 if (adapter->xdp_prog &&
2622 hw->mac.max_tx_queues == rss)
2623 rss = rss > 3 ? 2 : 1;
2624
2625 adapter->num_rx_queues = rss;
2626 adapter->num_tx_queues = rss;
2627 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2628 default:
2629 break;
2630 }
2631 }
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2642{
2643 int vector, v_budget;
2644
2645
2646
2647
2648
2649
2650
2651 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2652 v_budget = min_t(int, v_budget, num_online_cpus());
2653 v_budget += NON_Q_VECTORS;
2654
2655 adapter->msix_entries = kcalloc(v_budget,
2656 sizeof(struct msix_entry), GFP_KERNEL);
2657 if (!adapter->msix_entries)
2658 return -ENOMEM;
2659
2660 for (vector = 0; vector < v_budget; vector++)
2661 adapter->msix_entries[vector].entry = vector;
2662
2663
2664
2665
2666
2667 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2668}
2669
2670static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2671 struct ixgbevf_ring_container *head)
2672{
2673 ring->next = head->ring;
2674 head->ring = ring;
2675 head->count++;
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2692 int txr_count, int txr_idx,
2693 int xdp_count, int xdp_idx,
2694 int rxr_count, int rxr_idx)
2695{
2696 struct ixgbevf_q_vector *q_vector;
2697 int reg_idx = txr_idx + xdp_idx;
2698 struct ixgbevf_ring *ring;
2699 int ring_count, size;
2700
2701 ring_count = txr_count + xdp_count + rxr_count;
2702 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2703
2704
2705 q_vector = kzalloc(size, GFP_KERNEL);
2706 if (!q_vector)
2707 return -ENOMEM;
2708
2709
2710 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2711
2712
2713 adapter->q_vector[v_idx] = q_vector;
2714 q_vector->adapter = adapter;
2715 q_vector->v_idx = v_idx;
2716
2717
2718 ring = q_vector->ring;
2719
2720 while (txr_count) {
2721
2722 ring->dev = &adapter->pdev->dev;
2723 ring->netdev = adapter->netdev;
2724
2725
2726 ring->q_vector = q_vector;
2727
2728
2729 ixgbevf_add_ring(ring, &q_vector->tx);
2730
2731
2732 ring->count = adapter->tx_ring_count;
2733 ring->queue_index = txr_idx;
2734 ring->reg_idx = reg_idx;
2735
2736
2737 adapter->tx_ring[txr_idx] = ring;
2738
2739
2740 txr_count--;
2741 txr_idx++;
2742 reg_idx++;
2743
2744
2745 ring++;
2746 }
2747
2748 while (xdp_count) {
2749
2750 ring->dev = &adapter->pdev->dev;
2751 ring->netdev = adapter->netdev;
2752
2753
2754 ring->q_vector = q_vector;
2755
2756
2757 ixgbevf_add_ring(ring, &q_vector->tx);
2758
2759
2760 ring->count = adapter->tx_ring_count;
2761 ring->queue_index = xdp_idx;
2762 ring->reg_idx = reg_idx;
2763 set_ring_xdp(ring);
2764
2765
2766 adapter->xdp_ring[xdp_idx] = ring;
2767
2768
2769 xdp_count--;
2770 xdp_idx++;
2771 reg_idx++;
2772
2773
2774 ring++;
2775 }
2776
2777 while (rxr_count) {
2778
2779 ring->dev = &adapter->pdev->dev;
2780 ring->netdev = adapter->netdev;
2781
2782
2783 ring->q_vector = q_vector;
2784
2785
2786 ixgbevf_add_ring(ring, &q_vector->rx);
2787
2788
2789 ring->count = adapter->rx_ring_count;
2790 ring->queue_index = rxr_idx;
2791 ring->reg_idx = rxr_idx;
2792
2793
2794 adapter->rx_ring[rxr_idx] = ring;
2795
2796
2797 rxr_count--;
2798 rxr_idx++;
2799
2800
2801 ring++;
2802 }
2803
2804 return 0;
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2817{
2818 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2819 struct ixgbevf_ring *ring;
2820
2821 ixgbevf_for_each_ring(ring, q_vector->tx) {
2822 if (ring_is_xdp(ring))
2823 adapter->xdp_ring[ring->queue_index] = NULL;
2824 else
2825 adapter->tx_ring[ring->queue_index] = NULL;
2826 }
2827
2828 ixgbevf_for_each_ring(ring, q_vector->rx)
2829 adapter->rx_ring[ring->queue_index] = NULL;
2830
2831 adapter->q_vector[v_idx] = NULL;
2832 netif_napi_del(&q_vector->napi);
2833
2834
2835
2836
2837 kfree_rcu(q_vector, rcu);
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2848{
2849 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2850 int rxr_remaining = adapter->num_rx_queues;
2851 int txr_remaining = adapter->num_tx_queues;
2852 int xdp_remaining = adapter->num_xdp_queues;
2853 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2854 int err;
2855
2856 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2857 for (; rxr_remaining; v_idx++, q_vectors--) {
2858 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2859
2860 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2861 0, 0, 0, 0, rqpv, rxr_idx);
2862 if (err)
2863 goto err_out;
2864
2865
2866 rxr_remaining -= rqpv;
2867 rxr_idx += rqpv;
2868 }
2869 }
2870
2871 for (; q_vectors; v_idx++, q_vectors--) {
2872 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2873 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2874 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2875
2876 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2877 tqpv, txr_idx,
2878 xqpv, xdp_idx,
2879 rqpv, rxr_idx);
2880
2881 if (err)
2882 goto err_out;
2883
2884
2885 rxr_remaining -= rqpv;
2886 rxr_idx += rqpv;
2887 txr_remaining -= tqpv;
2888 txr_idx += tqpv;
2889 xdp_remaining -= xqpv;
2890 xdp_idx += xqpv;
2891 }
2892
2893 return 0;
2894
2895err_out:
2896 while (v_idx) {
2897 v_idx--;
2898 ixgbevf_free_q_vector(adapter, v_idx);
2899 }
2900
2901 return -ENOMEM;
2902}
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2913{
2914 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2915
2916 while (q_vectors) {
2917 q_vectors--;
2918 ixgbevf_free_q_vector(adapter, q_vectors);
2919 }
2920}
2921
2922
2923
2924
2925
2926
2927static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2928{
2929 if (!adapter->msix_entries)
2930 return;
2931
2932 pci_disable_msix(adapter->pdev);
2933 kfree(adapter->msix_entries);
2934 adapter->msix_entries = NULL;
2935}
2936
2937
2938
2939
2940
2941
2942static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2943{
2944 int err;
2945
2946
2947 ixgbevf_set_num_queues(adapter);
2948
2949 err = ixgbevf_set_interrupt_capability(adapter);
2950 if (err) {
2951 hw_dbg(&adapter->hw,
2952 "Unable to setup interrupt capabilities\n");
2953 goto err_set_interrupt;
2954 }
2955
2956 err = ixgbevf_alloc_q_vectors(adapter);
2957 if (err) {
2958 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2959 goto err_alloc_q_vectors;
2960 }
2961
2962 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2963 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2964 adapter->num_rx_queues, adapter->num_tx_queues,
2965 adapter->num_xdp_queues);
2966
2967 set_bit(__IXGBEVF_DOWN, &adapter->state);
2968
2969 return 0;
2970err_alloc_q_vectors:
2971 ixgbevf_reset_interrupt_capability(adapter);
2972err_set_interrupt:
2973 return err;
2974}
2975
2976
2977
2978
2979
2980
2981
2982
2983static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2984{
2985 adapter->num_tx_queues = 0;
2986 adapter->num_xdp_queues = 0;
2987 adapter->num_rx_queues = 0;
2988
2989 ixgbevf_free_q_vectors(adapter);
2990 ixgbevf_reset_interrupt_capability(adapter);
2991}
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3002{
3003 struct ixgbe_hw *hw = &adapter->hw;
3004 struct pci_dev *pdev = adapter->pdev;
3005 struct net_device *netdev = adapter->netdev;
3006 int err;
3007
3008
3009 hw->vendor_id = pdev->vendor;
3010 hw->device_id = pdev->device;
3011 hw->revision_id = pdev->revision;
3012 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3013 hw->subsystem_device_id = pdev->subsystem_device;
3014
3015 hw->mbx.ops.init_params(hw);
3016
3017 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3018 err = ixgbevf_init_rss_key(adapter);
3019 if (err)
3020 goto out;
3021 }
3022
3023
3024 hw->mac.max_tx_queues = 2;
3025 hw->mac.max_rx_queues = 2;
3026
3027
3028 spin_lock_init(&adapter->mbx_lock);
3029
3030 err = hw->mac.ops.reset_hw(hw);
3031 if (err) {
3032 dev_info(&pdev->dev,
3033 "PF still in reset state. Is the PF interface up?\n");
3034 } else {
3035 err = hw->mac.ops.init_hw(hw);
3036 if (err) {
3037 pr_err("init_shared_code failed: %d\n", err);
3038 goto out;
3039 }
3040 ixgbevf_negotiate_api(adapter);
3041 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3042 if (err)
3043 dev_info(&pdev->dev, "Error reading MAC address\n");
3044 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3045 dev_info(&pdev->dev,
3046 "MAC address not assigned by administrator.\n");
3047 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3048 }
3049
3050 if (!is_valid_ether_addr(netdev->dev_addr)) {
3051 dev_info(&pdev->dev, "Assigning random MAC address\n");
3052 eth_hw_addr_random(netdev);
3053 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3054 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3055 }
3056
3057
3058 adapter->rx_itr_setting = 1;
3059 adapter->tx_itr_setting = 1;
3060
3061
3062 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3063 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3064
3065 set_bit(__IXGBEVF_DOWN, &adapter->state);
3066 return 0;
3067
3068out:
3069 return err;
3070}
3071
3072#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3073 { \
3074 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3075 if (current_counter < last_counter) \
3076 counter += 0x100000000LL; \
3077 last_counter = current_counter; \
3078 counter &= 0xFFFFFFFF00000000LL; \
3079 counter |= current_counter; \
3080 }
3081
3082#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3083 { \
3084 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3085 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3086 u64 current_counter = (current_counter_msb << 32) | \
3087 current_counter_lsb; \
3088 if (current_counter < last_counter) \
3089 counter += 0x1000000000LL; \
3090 last_counter = current_counter; \
3091 counter &= 0xFFFFFFF000000000LL; \
3092 counter |= current_counter; \
3093 }
3094
3095
3096
3097
3098void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3099{
3100 struct ixgbe_hw *hw = &adapter->hw;
3101 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3102 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3103 int i;
3104
3105 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3106 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3107 return;
3108
3109 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3110 adapter->stats.vfgprc);
3111 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3112 adapter->stats.vfgptc);
3113 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3114 adapter->stats.last_vfgorc,
3115 adapter->stats.vfgorc);
3116 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3117 adapter->stats.last_vfgotc,
3118 adapter->stats.vfgotc);
3119 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3120 adapter->stats.vfmprc);
3121
3122 for (i = 0; i < adapter->num_rx_queues; i++) {
3123 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3124
3125 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3126 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3127 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3128 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3129 }
3130
3131 adapter->hw_csum_rx_error = hw_csum_rx_error;
3132 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3133 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3134 adapter->alloc_rx_page = alloc_rx_page;
3135}
3136
3137
3138
3139
3140
3141static void ixgbevf_service_timer(struct timer_list *t)
3142{
3143 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3144 service_timer);
3145
3146
3147 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3148
3149 ixgbevf_service_event_schedule(adapter);
3150}
3151
3152static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3153{
3154 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3155 return;
3156
3157 rtnl_lock();
3158
3159 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3160 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3161 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3162 rtnl_unlock();
3163 return;
3164 }
3165
3166 adapter->tx_timeout_count++;
3167
3168 ixgbevf_reinit_locked(adapter);
3169 rtnl_unlock();
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3182{
3183 struct ixgbe_hw *hw = &adapter->hw;
3184 u32 eics = 0;
3185 int i;
3186
3187
3188 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3189 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3190 return;
3191
3192
3193 if (netif_carrier_ok(adapter->netdev)) {
3194 for (i = 0; i < adapter->num_tx_queues; i++)
3195 set_check_for_tx_hang(adapter->tx_ring[i]);
3196 for (i = 0; i < adapter->num_xdp_queues; i++)
3197 set_check_for_tx_hang(adapter->xdp_ring[i]);
3198 }
3199
3200
3201 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3202 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3203
3204 if (qv->rx.ring || qv->tx.ring)
3205 eics |= BIT(i);
3206 }
3207
3208
3209 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3210}
3211
3212
3213
3214
3215
3216static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3217{
3218 struct ixgbe_hw *hw = &adapter->hw;
3219 u32 link_speed = adapter->link_speed;
3220 bool link_up = adapter->link_up;
3221 s32 err;
3222
3223 spin_lock_bh(&adapter->mbx_lock);
3224
3225 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3226
3227 spin_unlock_bh(&adapter->mbx_lock);
3228
3229
3230 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3231 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3232 link_up = false;
3233 }
3234
3235 adapter->link_up = link_up;
3236 adapter->link_speed = link_speed;
3237}
3238
3239
3240
3241
3242
3243
3244static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3245{
3246 struct net_device *netdev = adapter->netdev;
3247
3248
3249 if (netif_carrier_ok(netdev))
3250 return;
3251
3252 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3253 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3254 "10 Gbps" :
3255 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3256 "1 Gbps" :
3257 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3258 "100 Mbps" :
3259 "unknown speed");
3260
3261 netif_carrier_on(netdev);
3262}
3263
3264
3265
3266
3267
3268
3269static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3270{
3271 struct net_device *netdev = adapter->netdev;
3272
3273 adapter->link_speed = 0;
3274
3275
3276 if (!netif_carrier_ok(netdev))
3277 return;
3278
3279 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3280
3281 netif_carrier_off(netdev);
3282}
3283
3284
3285
3286
3287
3288static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3289{
3290
3291 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3292 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3293 return;
3294
3295 ixgbevf_watchdog_update_link(adapter);
3296
3297 if (adapter->link_up)
3298 ixgbevf_watchdog_link_is_up(adapter);
3299 else
3300 ixgbevf_watchdog_link_is_down(adapter);
3301
3302 ixgbevf_update_stats(adapter);
3303}
3304
3305
3306
3307
3308
3309static void ixgbevf_service_task(struct work_struct *work)
3310{
3311 struct ixgbevf_adapter *adapter = container_of(work,
3312 struct ixgbevf_adapter,
3313 service_task);
3314 struct ixgbe_hw *hw = &adapter->hw;
3315
3316 if (IXGBE_REMOVED(hw->hw_addr)) {
3317 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3318 rtnl_lock();
3319 ixgbevf_down(adapter);
3320 rtnl_unlock();
3321 }
3322 return;
3323 }
3324
3325 ixgbevf_queue_reset_subtask(adapter);
3326 ixgbevf_reset_subtask(adapter);
3327 ixgbevf_watchdog_subtask(adapter);
3328 ixgbevf_check_hang_subtask(adapter);
3329
3330 ixgbevf_service_event_complete(adapter);
3331}
3332
3333
3334
3335
3336
3337
3338
3339void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3340{
3341 ixgbevf_clean_tx_ring(tx_ring);
3342
3343 vfree(tx_ring->tx_buffer_info);
3344 tx_ring->tx_buffer_info = NULL;
3345
3346
3347 if (!tx_ring->desc)
3348 return;
3349
3350 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3351 tx_ring->dma);
3352
3353 tx_ring->desc = NULL;
3354}
3355
3356
3357
3358
3359
3360
3361
3362static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3363{
3364 int i;
3365
3366 for (i = 0; i < adapter->num_tx_queues; i++)
3367 if (adapter->tx_ring[i]->desc)
3368 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3369 for (i = 0; i < adapter->num_xdp_queues; i++)
3370 if (adapter->xdp_ring[i]->desc)
3371 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3372}
3373
3374
3375
3376
3377
3378
3379
3380int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3381{
3382 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3383 int size;
3384
3385 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3386 tx_ring->tx_buffer_info = vmalloc(size);
3387 if (!tx_ring->tx_buffer_info)
3388 goto err;
3389
3390 u64_stats_init(&tx_ring->syncp);
3391
3392
3393 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3394 tx_ring->size = ALIGN(tx_ring->size, 4096);
3395
3396 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3397 &tx_ring->dma, GFP_KERNEL);
3398 if (!tx_ring->desc)
3399 goto err;
3400
3401 return 0;
3402
3403err:
3404 vfree(tx_ring->tx_buffer_info);
3405 tx_ring->tx_buffer_info = NULL;
3406 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3407 return -ENOMEM;
3408}
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3421{
3422 int i, j = 0, err = 0;
3423
3424 for (i = 0; i < adapter->num_tx_queues; i++) {
3425 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3426 if (!err)
3427 continue;
3428 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3429 goto err_setup_tx;
3430 }
3431
3432 for (j = 0; j < adapter->num_xdp_queues; j++) {
3433 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3434 if (!err)
3435 continue;
3436 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3437 goto err_setup_tx;
3438 }
3439
3440 return 0;
3441err_setup_tx:
3442
3443 while (j--)
3444 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3445 while (i--)
3446 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3447
3448 return err;
3449}
3450
3451
3452
3453
3454
3455
3456
3457
3458int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3459 struct ixgbevf_ring *rx_ring)
3460{
3461 int size;
3462
3463 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3464 rx_ring->rx_buffer_info = vmalloc(size);
3465 if (!rx_ring->rx_buffer_info)
3466 goto err;
3467
3468 u64_stats_init(&rx_ring->syncp);
3469
3470
3471 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3472 rx_ring->size = ALIGN(rx_ring->size, 4096);
3473
3474 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3475 &rx_ring->dma, GFP_KERNEL);
3476
3477 if (!rx_ring->desc)
3478 goto err;
3479
3480
3481 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3482 rx_ring->queue_index) < 0)
3483 goto err;
3484
3485 rx_ring->xdp_prog = adapter->xdp_prog;
3486
3487 return 0;
3488err:
3489 vfree(rx_ring->rx_buffer_info);
3490 rx_ring->rx_buffer_info = NULL;
3491 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3492 return -ENOMEM;
3493}
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3506{
3507 int i, err = 0;
3508
3509 for (i = 0; i < adapter->num_rx_queues; i++) {
3510 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3511 if (!err)
3512 continue;
3513 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3514 goto err_setup_rx;
3515 }
3516
3517 return 0;
3518err_setup_rx:
3519
3520 while (i--)
3521 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3522 return err;
3523}
3524
3525
3526
3527
3528
3529
3530
3531void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3532{
3533 ixgbevf_clean_rx_ring(rx_ring);
3534
3535 rx_ring->xdp_prog = NULL;
3536 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3537 vfree(rx_ring->rx_buffer_info);
3538 rx_ring->rx_buffer_info = NULL;
3539
3540 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3541 rx_ring->dma);
3542
3543 rx_ring->desc = NULL;
3544}
3545
3546
3547
3548
3549
3550
3551
3552static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3553{
3554 int i;
3555
3556 for (i = 0; i < adapter->num_rx_queues; i++)
3557 if (adapter->rx_ring[i]->desc)
3558 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3559}
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573int ixgbevf_open(struct net_device *netdev)
3574{
3575 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3576 struct ixgbe_hw *hw = &adapter->hw;
3577 int err;
3578
3579
3580
3581
3582
3583
3584
3585 if (!adapter->num_msix_vectors)
3586 return -ENOMEM;
3587
3588 if (hw->adapter_stopped) {
3589 ixgbevf_reset(adapter);
3590
3591
3592
3593 if (hw->adapter_stopped) {
3594 err = IXGBE_ERR_MBX;
3595 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3596 goto err_setup_reset;
3597 }
3598 }
3599
3600
3601 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3602 return -EBUSY;
3603
3604 netif_carrier_off(netdev);
3605
3606
3607 err = ixgbevf_setup_all_tx_resources(adapter);
3608 if (err)
3609 goto err_setup_tx;
3610
3611
3612 err = ixgbevf_setup_all_rx_resources(adapter);
3613 if (err)
3614 goto err_setup_rx;
3615
3616 ixgbevf_configure(adapter);
3617
3618 err = ixgbevf_request_irq(adapter);
3619 if (err)
3620 goto err_req_irq;
3621
3622
3623 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3624 if (err)
3625 goto err_set_queues;
3626
3627 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3628 if (err)
3629 goto err_set_queues;
3630
3631 ixgbevf_up_complete(adapter);
3632
3633 return 0;
3634
3635err_set_queues:
3636 ixgbevf_free_irq(adapter);
3637err_req_irq:
3638 ixgbevf_free_all_rx_resources(adapter);
3639err_setup_rx:
3640 ixgbevf_free_all_tx_resources(adapter);
3641err_setup_tx:
3642 ixgbevf_reset(adapter);
3643err_setup_reset:
3644
3645 return err;
3646}
3647
3648
3649
3650
3651
3652
3653
3654
3655static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3656{
3657 ixgbevf_down(adapter);
3658 ixgbevf_free_irq(adapter);
3659 ixgbevf_free_all_tx_resources(adapter);
3660 ixgbevf_free_all_rx_resources(adapter);
3661}
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674int ixgbevf_close(struct net_device *netdev)
3675{
3676 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3677
3678 if (netif_device_present(netdev))
3679 ixgbevf_close_suspend(adapter);
3680
3681 return 0;
3682}
3683
3684static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3685{
3686 struct net_device *dev = adapter->netdev;
3687
3688 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3689 &adapter->state))
3690 return;
3691
3692
3693 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3694 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3695 return;
3696
3697
3698
3699
3700
3701 rtnl_lock();
3702
3703 if (netif_running(dev))
3704 ixgbevf_close(dev);
3705
3706 ixgbevf_clear_interrupt_scheme(adapter);
3707 ixgbevf_init_interrupt_scheme(adapter);
3708
3709 if (netif_running(dev))
3710 ixgbevf_open(dev);
3711
3712 rtnl_unlock();
3713}
3714
3715static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3716 u32 vlan_macip_lens, u32 fceof_saidx,
3717 u32 type_tucmd, u32 mss_l4len_idx)
3718{
3719 struct ixgbe_adv_tx_context_desc *context_desc;
3720 u16 i = tx_ring->next_to_use;
3721
3722 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3723
3724 i++;
3725 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3726
3727
3728 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3729
3730 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3731 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
3732 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3733 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3734}
3735
3736static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3737 struct ixgbevf_tx_buffer *first,
3738 u8 *hdr_len,
3739 struct ixgbevf_ipsec_tx_data *itd)
3740{
3741 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3742 struct sk_buff *skb = first->skb;
3743 union {
3744 struct iphdr *v4;
3745 struct ipv6hdr *v6;
3746 unsigned char *hdr;
3747 } ip;
3748 union {
3749 struct tcphdr *tcp;
3750 unsigned char *hdr;
3751 } l4;
3752 u32 paylen, l4_offset;
3753 u32 fceof_saidx = 0;
3754 int err;
3755
3756 if (skb->ip_summed != CHECKSUM_PARTIAL)
3757 return 0;
3758
3759 if (!skb_is_gso(skb))
3760 return 0;
3761
3762 err = skb_cow_head(skb, 0);
3763 if (err < 0)
3764 return err;
3765
3766 if (eth_p_mpls(first->protocol))
3767 ip.hdr = skb_inner_network_header(skb);
3768 else
3769 ip.hdr = skb_network_header(skb);
3770 l4.hdr = skb_checksum_start(skb);
3771
3772
3773 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3774
3775
3776 if (ip.v4->version == 4) {
3777 unsigned char *csum_start = skb_checksum_start(skb);
3778 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3779 int len = csum_start - trans_start;
3780
3781
3782
3783
3784
3785 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3786 csum_fold(csum_partial(trans_start,
3787 len, 0)) : 0;
3788 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3789
3790 ip.v4->tot_len = 0;
3791 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3792 IXGBE_TX_FLAGS_CSUM |
3793 IXGBE_TX_FLAGS_IPV4;
3794 } else {
3795 ip.v6->payload_len = 0;
3796 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3797 IXGBE_TX_FLAGS_CSUM;
3798 }
3799
3800
3801 l4_offset = l4.hdr - skb->data;
3802
3803
3804 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3805
3806
3807 paylen = skb->len - l4_offset;
3808 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3809
3810
3811 first->gso_segs = skb_shinfo(skb)->gso_segs;
3812 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3813
3814
3815 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3816 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3817 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3818
3819 fceof_saidx |= itd->pfsa;
3820 type_tucmd |= itd->flags | itd->trailer_len;
3821
3822
3823 vlan_macip_lens = l4.hdr - ip.hdr;
3824 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3825 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3826
3827 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3828 mss_l4len_idx);
3829
3830 return 1;
3831}
3832
3833static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3834{
3835 unsigned int offset = 0;
3836
3837 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3838
3839 return offset == skb_checksum_start_offset(skb);
3840}
3841
3842static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3843 struct ixgbevf_tx_buffer *first,
3844 struct ixgbevf_ipsec_tx_data *itd)
3845{
3846 struct sk_buff *skb = first->skb;
3847 u32 vlan_macip_lens = 0;
3848 u32 fceof_saidx = 0;
3849 u32 type_tucmd = 0;
3850
3851 if (skb->ip_summed != CHECKSUM_PARTIAL)
3852 goto no_csum;
3853
3854 switch (skb->csum_offset) {
3855 case offsetof(struct tcphdr, check):
3856 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3857
3858 case offsetof(struct udphdr, check):
3859 break;
3860 case offsetof(struct sctphdr, checksum):
3861
3862 if (((first->protocol == htons(ETH_P_IP)) &&
3863 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3864 ((first->protocol == htons(ETH_P_IPV6)) &&
3865 ixgbevf_ipv6_csum_is_sctp(skb))) {
3866 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3867 break;
3868 }
3869
3870 default:
3871 skb_checksum_help(skb);
3872 goto no_csum;
3873 }
3874
3875 if (first->protocol == htons(ETH_P_IP))
3876 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3877
3878
3879 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3880 vlan_macip_lens = skb_checksum_start_offset(skb) -
3881 skb_network_offset(skb);
3882no_csum:
3883
3884 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3885 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3886
3887 fceof_saidx |= itd->pfsa;
3888 type_tucmd |= itd->flags | itd->trailer_len;
3889
3890 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3891 fceof_saidx, type_tucmd, 0);
3892}
3893
3894static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3895{
3896
3897 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3898 IXGBE_ADVTXD_DCMD_IFCS |
3899 IXGBE_ADVTXD_DCMD_DEXT);
3900
3901
3902 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3903 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3904
3905
3906 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3907 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3908
3909 return cmd_type;
3910}
3911
3912static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3913 u32 tx_flags, unsigned int paylen)
3914{
3915 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3916
3917
3918 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3919 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3920
3921
3922 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3923 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3924
3925
3926 if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3927 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3928
3929
3930 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
3931 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3932
3933
3934
3935
3936 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3937
3938 tx_desc->read.olinfo_status = olinfo_status;
3939}
3940
3941static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3942 struct ixgbevf_tx_buffer *first,
3943 const u8 hdr_len)
3944{
3945 struct sk_buff *skb = first->skb;
3946 struct ixgbevf_tx_buffer *tx_buffer;
3947 union ixgbe_adv_tx_desc *tx_desc;
3948 struct skb_frag_struct *frag;
3949 dma_addr_t dma;
3950 unsigned int data_len, size;
3951 u32 tx_flags = first->tx_flags;
3952 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3953 u16 i = tx_ring->next_to_use;
3954
3955 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3956
3957 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3958
3959 size = skb_headlen(skb);
3960 data_len = skb->data_len;
3961
3962 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3963
3964 tx_buffer = first;
3965
3966 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3967 if (dma_mapping_error(tx_ring->dev, dma))
3968 goto dma_error;
3969
3970
3971 dma_unmap_len_set(tx_buffer, len, size);
3972 dma_unmap_addr_set(tx_buffer, dma, dma);
3973
3974 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3975
3976 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3977 tx_desc->read.cmd_type_len =
3978 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3979
3980 i++;
3981 tx_desc++;
3982 if (i == tx_ring->count) {
3983 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3984 i = 0;
3985 }
3986 tx_desc->read.olinfo_status = 0;
3987
3988 dma += IXGBE_MAX_DATA_PER_TXD;
3989 size -= IXGBE_MAX_DATA_PER_TXD;
3990
3991 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3992 }
3993
3994 if (likely(!data_len))
3995 break;
3996
3997 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3998
3999 i++;
4000 tx_desc++;
4001 if (i == tx_ring->count) {
4002 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4003 i = 0;
4004 }
4005 tx_desc->read.olinfo_status = 0;
4006
4007 size = skb_frag_size(frag);
4008 data_len -= size;
4009
4010 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
4011 DMA_TO_DEVICE);
4012
4013 tx_buffer = &tx_ring->tx_buffer_info[i];
4014 }
4015
4016
4017 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
4018 tx_desc->read.cmd_type_len = cmd_type;
4019
4020
4021 first->time_stamp = jiffies;
4022
4023 skb_tx_timestamp(skb);
4024
4025
4026
4027
4028
4029
4030
4031
4032 wmb();
4033
4034
4035 first->next_to_watch = tx_desc;
4036
4037 i++;
4038 if (i == tx_ring->count)
4039 i = 0;
4040
4041 tx_ring->next_to_use = i;
4042
4043
4044 ixgbevf_write_tail(tx_ring, i);
4045
4046 return;
4047dma_error:
4048 dev_err(tx_ring->dev, "TX DMA map failed\n");
4049 tx_buffer = &tx_ring->tx_buffer_info[i];
4050
4051
4052 while (tx_buffer != first) {
4053 if (dma_unmap_len(tx_buffer, len))
4054 dma_unmap_page(tx_ring->dev,
4055 dma_unmap_addr(tx_buffer, dma),
4056 dma_unmap_len(tx_buffer, len),
4057 DMA_TO_DEVICE);
4058 dma_unmap_len_set(tx_buffer, len, 0);
4059
4060 if (i-- == 0)
4061 i += tx_ring->count;
4062 tx_buffer = &tx_ring->tx_buffer_info[i];
4063 }
4064
4065 if (dma_unmap_len(tx_buffer, len))
4066 dma_unmap_single(tx_ring->dev,
4067 dma_unmap_addr(tx_buffer, dma),
4068 dma_unmap_len(tx_buffer, len),
4069 DMA_TO_DEVICE);
4070 dma_unmap_len_set(tx_buffer, len, 0);
4071
4072 dev_kfree_skb_any(tx_buffer->skb);
4073 tx_buffer->skb = NULL;
4074
4075 tx_ring->next_to_use = i;
4076}
4077
4078static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4079{
4080 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4081
4082
4083
4084
4085 smp_mb();
4086
4087
4088
4089
4090 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4091 return -EBUSY;
4092
4093
4094 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4095 ++tx_ring->tx_stats.restart_queue;
4096
4097 return 0;
4098}
4099
4100static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4101{
4102 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4103 return 0;
4104 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4105}
4106
4107static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4108 struct ixgbevf_ring *tx_ring)
4109{
4110 struct ixgbevf_tx_buffer *first;
4111 int tso;
4112 u32 tx_flags = 0;
4113 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4114 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
4115#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4116 unsigned short f;
4117#endif
4118 u8 hdr_len = 0;
4119 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4120
4121 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4122 dev_kfree_skb_any(skb);
4123 return NETDEV_TX_OK;
4124 }
4125
4126
4127
4128
4129
4130
4131
4132#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4133 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4134 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4135#else
4136 count += skb_shinfo(skb)->nr_frags;
4137#endif
4138 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4139 tx_ring->tx_stats.tx_busy++;
4140 return NETDEV_TX_BUSY;
4141 }
4142
4143
4144 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4145 first->skb = skb;
4146 first->bytecount = skb->len;
4147 first->gso_segs = 1;
4148
4149 if (skb_vlan_tag_present(skb)) {
4150 tx_flags |= skb_vlan_tag_get(skb);
4151 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4152 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4153 }
4154
4155
4156 first->tx_flags = tx_flags;
4157 first->protocol = vlan_get_protocol(skb);
4158
4159#ifdef CONFIG_IXGBEVF_IPSEC
4160 if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4161 goto out_drop;
4162#endif
4163 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
4164 if (tso < 0)
4165 goto out_drop;
4166 else if (!tso)
4167 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4168
4169 ixgbevf_tx_map(tx_ring, first, hdr_len);
4170
4171 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4172
4173 return NETDEV_TX_OK;
4174
4175out_drop:
4176 dev_kfree_skb_any(first->skb);
4177 first->skb = NULL;
4178
4179 return NETDEV_TX_OK;
4180}
4181
4182static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4183{
4184 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4185 struct ixgbevf_ring *tx_ring;
4186
4187 if (skb->len <= 0) {
4188 dev_kfree_skb_any(skb);
4189 return NETDEV_TX_OK;
4190 }
4191
4192
4193
4194
4195 if (skb->len < 17) {
4196 if (skb_padto(skb, 17))
4197 return NETDEV_TX_OK;
4198 skb->len = 17;
4199 }
4200
4201 tx_ring = adapter->tx_ring[skb->queue_mapping];
4202 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4203}
4204
4205
4206
4207
4208
4209
4210
4211
4212static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4213{
4214 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4215 struct ixgbe_hw *hw = &adapter->hw;
4216 struct sockaddr *addr = p;
4217 int err;
4218
4219 if (!is_valid_ether_addr(addr->sa_data))
4220 return -EADDRNOTAVAIL;
4221
4222 spin_lock_bh(&adapter->mbx_lock);
4223
4224 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4225
4226 spin_unlock_bh(&adapter->mbx_lock);
4227
4228 if (err)
4229 return -EPERM;
4230
4231 ether_addr_copy(hw->mac.addr, addr->sa_data);
4232 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4233 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4234
4235 return 0;
4236}
4237
4238
4239
4240
4241
4242
4243
4244
4245static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4246{
4247 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4248 struct ixgbe_hw *hw = &adapter->hw;
4249 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4250 int ret;
4251
4252
4253 if (adapter->xdp_prog) {
4254 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4255 return -EPERM;
4256 }
4257
4258 spin_lock_bh(&adapter->mbx_lock);
4259
4260 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4261 spin_unlock_bh(&adapter->mbx_lock);
4262 if (ret)
4263 return -EINVAL;
4264
4265 hw_dbg(hw, "changing MTU from %d to %d\n",
4266 netdev->mtu, new_mtu);
4267
4268
4269 netdev->mtu = new_mtu;
4270
4271 if (netif_running(netdev))
4272 ixgbevf_reinit_locked(adapter);
4273
4274 return 0;
4275}
4276
4277static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4278{
4279 struct net_device *netdev = pci_get_drvdata(pdev);
4280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4281#ifdef CONFIG_PM
4282 int retval = 0;
4283#endif
4284
4285 rtnl_lock();
4286 netif_device_detach(netdev);
4287
4288 if (netif_running(netdev))
4289 ixgbevf_close_suspend(adapter);
4290
4291 ixgbevf_clear_interrupt_scheme(adapter);
4292 rtnl_unlock();
4293
4294#ifdef CONFIG_PM
4295 retval = pci_save_state(pdev);
4296 if (retval)
4297 return retval;
4298
4299#endif
4300 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4301 pci_disable_device(pdev);
4302
4303 return 0;
4304}
4305
4306#ifdef CONFIG_PM
4307static int ixgbevf_resume(struct pci_dev *pdev)
4308{
4309 struct net_device *netdev = pci_get_drvdata(pdev);
4310 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4311 u32 err;
4312
4313 pci_restore_state(pdev);
4314
4315
4316
4317 pci_save_state(pdev);
4318
4319 err = pci_enable_device_mem(pdev);
4320 if (err) {
4321 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
4322 return err;
4323 }
4324
4325 adapter->hw.hw_addr = adapter->io_addr;
4326 smp_mb__before_atomic();
4327 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4328 pci_set_master(pdev);
4329
4330 ixgbevf_reset(adapter);
4331
4332 rtnl_lock();
4333 err = ixgbevf_init_interrupt_scheme(adapter);
4334 if (!err && netif_running(netdev))
4335 err = ixgbevf_open(netdev);
4336 rtnl_unlock();
4337 if (err)
4338 return err;
4339
4340 netif_device_attach(netdev);
4341
4342 return err;
4343}
4344
4345#endif
4346static void ixgbevf_shutdown(struct pci_dev *pdev)
4347{
4348 ixgbevf_suspend(pdev, PMSG_SUSPEND);
4349}
4350
4351static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4352 const struct ixgbevf_ring *ring)
4353{
4354 u64 bytes, packets;
4355 unsigned int start;
4356
4357 if (ring) {
4358 do {
4359 start = u64_stats_fetch_begin_irq(&ring->syncp);
4360 bytes = ring->stats.bytes;
4361 packets = ring->stats.packets;
4362 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4363 stats->tx_bytes += bytes;
4364 stats->tx_packets += packets;
4365 }
4366}
4367
4368static void ixgbevf_get_stats(struct net_device *netdev,
4369 struct rtnl_link_stats64 *stats)
4370{
4371 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4372 unsigned int start;
4373 u64 bytes, packets;
4374 const struct ixgbevf_ring *ring;
4375 int i;
4376
4377 ixgbevf_update_stats(adapter);
4378
4379 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4380
4381 rcu_read_lock();
4382 for (i = 0; i < adapter->num_rx_queues; i++) {
4383 ring = adapter->rx_ring[i];
4384 do {
4385 start = u64_stats_fetch_begin_irq(&ring->syncp);
4386 bytes = ring->stats.bytes;
4387 packets = ring->stats.packets;
4388 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4389 stats->rx_bytes += bytes;
4390 stats->rx_packets += packets;
4391 }
4392
4393 for (i = 0; i < adapter->num_tx_queues; i++) {
4394 ring = adapter->tx_ring[i];
4395 ixgbevf_get_tx_ring_stats(stats, ring);
4396 }
4397
4398 for (i = 0; i < adapter->num_xdp_queues; i++) {
4399 ring = adapter->xdp_ring[i];
4400 ixgbevf_get_tx_ring_stats(stats, ring);
4401 }
4402 rcu_read_unlock();
4403}
4404
4405#define IXGBEVF_MAX_MAC_HDR_LEN 127
4406#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4407
4408static netdev_features_t
4409ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4410 netdev_features_t features)
4411{
4412 unsigned int network_hdr_len, mac_hdr_len;
4413
4414
4415 mac_hdr_len = skb_network_header(skb) - skb->data;
4416 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4417 return features & ~(NETIF_F_HW_CSUM |
4418 NETIF_F_SCTP_CRC |
4419 NETIF_F_HW_VLAN_CTAG_TX |
4420 NETIF_F_TSO |
4421 NETIF_F_TSO6);
4422
4423 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4424 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4425 return features & ~(NETIF_F_HW_CSUM |
4426 NETIF_F_SCTP_CRC |
4427 NETIF_F_TSO |
4428 NETIF_F_TSO6);
4429
4430
4431
4432
4433 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4434 features &= ~NETIF_F_TSO;
4435
4436 return features;
4437}
4438
4439static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4440{
4441 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4442 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4443 struct bpf_prog *old_prog;
4444
4445
4446 for (i = 0; i < adapter->num_rx_queues; i++) {
4447 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4448
4449 if (frame_size > ixgbevf_rx_bufsz(ring))
4450 return -EINVAL;
4451 }
4452
4453 old_prog = xchg(&adapter->xdp_prog, prog);
4454
4455
4456 if (!!prog != !!old_prog) {
4457
4458
4459
4460
4461 if (netif_running(dev))
4462 ixgbevf_close(dev);
4463
4464 ixgbevf_clear_interrupt_scheme(adapter);
4465 ixgbevf_init_interrupt_scheme(adapter);
4466
4467 if (netif_running(dev))
4468 ixgbevf_open(dev);
4469 } else {
4470 for (i = 0; i < adapter->num_rx_queues; i++)
4471 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4472 }
4473
4474 if (old_prog)
4475 bpf_prog_put(old_prog);
4476
4477 return 0;
4478}
4479
4480static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4481{
4482 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4483
4484 switch (xdp->command) {
4485 case XDP_SETUP_PROG:
4486 return ixgbevf_xdp_setup(dev, xdp->prog);
4487 case XDP_QUERY_PROG:
4488 xdp->prog_id = adapter->xdp_prog ?
4489 adapter->xdp_prog->aux->id : 0;
4490 return 0;
4491 default:
4492 return -EINVAL;
4493 }
4494}
4495
4496static const struct net_device_ops ixgbevf_netdev_ops = {
4497 .ndo_open = ixgbevf_open,
4498 .ndo_stop = ixgbevf_close,
4499 .ndo_start_xmit = ixgbevf_xmit_frame,
4500 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4501 .ndo_get_stats64 = ixgbevf_get_stats,
4502 .ndo_validate_addr = eth_validate_addr,
4503 .ndo_set_mac_address = ixgbevf_set_mac,
4504 .ndo_change_mtu = ixgbevf_change_mtu,
4505 .ndo_tx_timeout = ixgbevf_tx_timeout,
4506 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4507 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4508 .ndo_features_check = ixgbevf_features_check,
4509 .ndo_bpf = ixgbevf_xdp,
4510};
4511
4512static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4513{
4514 dev->netdev_ops = &ixgbevf_netdev_ops;
4515 ixgbevf_set_ethtool_ops(dev);
4516 dev->watchdog_timeo = 5 * HZ;
4517}
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4531{
4532 struct net_device *netdev;
4533 struct ixgbevf_adapter *adapter = NULL;
4534 struct ixgbe_hw *hw = NULL;
4535 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4536 int err, pci_using_dac;
4537 bool disable_dev = false;
4538
4539 err = pci_enable_device(pdev);
4540 if (err)
4541 return err;
4542
4543 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4544 pci_using_dac = 1;
4545 } else {
4546 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4547 if (err) {
4548 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4549 goto err_dma;
4550 }
4551 pci_using_dac = 0;
4552 }
4553
4554 err = pci_request_regions(pdev, ixgbevf_driver_name);
4555 if (err) {
4556 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4557 goto err_pci_reg;
4558 }
4559
4560 pci_set_master(pdev);
4561
4562 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4563 MAX_TX_QUEUES);
4564 if (!netdev) {
4565 err = -ENOMEM;
4566 goto err_alloc_etherdev;
4567 }
4568
4569 SET_NETDEV_DEV(netdev, &pdev->dev);
4570
4571 adapter = netdev_priv(netdev);
4572
4573 adapter->netdev = netdev;
4574 adapter->pdev = pdev;
4575 hw = &adapter->hw;
4576 hw->back = adapter;
4577 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4578
4579
4580
4581
4582 pci_save_state(pdev);
4583
4584 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4585 pci_resource_len(pdev, 0));
4586 adapter->io_addr = hw->hw_addr;
4587 if (!hw->hw_addr) {
4588 err = -EIO;
4589 goto err_ioremap;
4590 }
4591
4592 ixgbevf_assign_netdev_ops(netdev);
4593
4594
4595 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4596 hw->mac.type = ii->mac;
4597
4598 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4599 sizeof(struct ixgbe_mbx_operations));
4600
4601
4602 err = ixgbevf_sw_init(adapter);
4603 if (err)
4604 goto err_sw_init;
4605
4606
4607 if (!is_valid_ether_addr(netdev->dev_addr)) {
4608 pr_err("invalid MAC address\n");
4609 err = -EIO;
4610 goto err_sw_init;
4611 }
4612
4613 netdev->hw_features = NETIF_F_SG |
4614 NETIF_F_TSO |
4615 NETIF_F_TSO6 |
4616 NETIF_F_RXCSUM |
4617 NETIF_F_HW_CSUM |
4618 NETIF_F_SCTP_CRC;
4619
4620#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4621 NETIF_F_GSO_GRE_CSUM | \
4622 NETIF_F_GSO_IPXIP4 | \
4623 NETIF_F_GSO_IPXIP6 | \
4624 NETIF_F_GSO_UDP_TUNNEL | \
4625 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4626
4627 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4628 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4629 IXGBEVF_GSO_PARTIAL_FEATURES;
4630
4631 netdev->features = netdev->hw_features;
4632
4633 if (pci_using_dac)
4634 netdev->features |= NETIF_F_HIGHDMA;
4635
4636 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4637 netdev->mpls_features |= NETIF_F_SG |
4638 NETIF_F_TSO |
4639 NETIF_F_TSO6 |
4640 NETIF_F_HW_CSUM;
4641 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4642 netdev->hw_enc_features |= netdev->vlan_features;
4643
4644
4645 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4646 NETIF_F_HW_VLAN_CTAG_RX |
4647 NETIF_F_HW_VLAN_CTAG_TX;
4648
4649 netdev->priv_flags |= IFF_UNICAST_FLT;
4650
4651
4652 netdev->min_mtu = ETH_MIN_MTU;
4653 switch (adapter->hw.api_version) {
4654 case ixgbe_mbox_api_11:
4655 case ixgbe_mbox_api_12:
4656 case ixgbe_mbox_api_13:
4657 case ixgbe_mbox_api_14:
4658 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4659 (ETH_HLEN + ETH_FCS_LEN);
4660 break;
4661 default:
4662 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4663 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4664 (ETH_HLEN + ETH_FCS_LEN);
4665 else
4666 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4667 break;
4668 }
4669
4670 if (IXGBE_REMOVED(hw->hw_addr)) {
4671 err = -EIO;
4672 goto err_sw_init;
4673 }
4674
4675 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4676
4677 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4678 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4679 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4680
4681 err = ixgbevf_init_interrupt_scheme(adapter);
4682 if (err)
4683 goto err_sw_init;
4684
4685 strcpy(netdev->name, "eth%d");
4686
4687 err = register_netdev(netdev);
4688 if (err)
4689 goto err_register;
4690
4691 pci_set_drvdata(pdev, netdev);
4692 netif_carrier_off(netdev);
4693 ixgbevf_init_ipsec_offload(adapter);
4694
4695 ixgbevf_init_last_counter_stats(adapter);
4696
4697
4698 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4699 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4700
4701 switch (hw->mac.type) {
4702 case ixgbe_mac_X550_vf:
4703 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4704 break;
4705 case ixgbe_mac_X540_vf:
4706 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4707 break;
4708 case ixgbe_mac_82599_vf:
4709 default:
4710 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4711 break;
4712 }
4713
4714 return 0;
4715
4716err_register:
4717 ixgbevf_clear_interrupt_scheme(adapter);
4718err_sw_init:
4719 ixgbevf_reset_interrupt_capability(adapter);
4720 iounmap(adapter->io_addr);
4721 kfree(adapter->rss_key);
4722err_ioremap:
4723 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4724 free_netdev(netdev);
4725err_alloc_etherdev:
4726 pci_release_regions(pdev);
4727err_pci_reg:
4728err_dma:
4729 if (!adapter || disable_dev)
4730 pci_disable_device(pdev);
4731 return err;
4732}
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743static void ixgbevf_remove(struct pci_dev *pdev)
4744{
4745 struct net_device *netdev = pci_get_drvdata(pdev);
4746 struct ixgbevf_adapter *adapter;
4747 bool disable_dev;
4748
4749 if (!netdev)
4750 return;
4751
4752 adapter = netdev_priv(netdev);
4753
4754 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4755 cancel_work_sync(&adapter->service_task);
4756
4757 if (netdev->reg_state == NETREG_REGISTERED)
4758 unregister_netdev(netdev);
4759
4760 ixgbevf_stop_ipsec_offload(adapter);
4761 ixgbevf_clear_interrupt_scheme(adapter);
4762 ixgbevf_reset_interrupt_capability(adapter);
4763
4764 iounmap(adapter->io_addr);
4765 pci_release_regions(pdev);
4766
4767 hw_dbg(&adapter->hw, "Remove complete\n");
4768
4769 kfree(adapter->rss_key);
4770 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4771 free_netdev(netdev);
4772
4773 if (disable_dev)
4774 pci_disable_device(pdev);
4775}
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4786 pci_channel_state_t state)
4787{
4788 struct net_device *netdev = pci_get_drvdata(pdev);
4789 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4790
4791 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4792 return PCI_ERS_RESULT_DISCONNECT;
4793
4794 rtnl_lock();
4795 netif_device_detach(netdev);
4796
4797 if (netif_running(netdev))
4798 ixgbevf_close_suspend(adapter);
4799
4800 if (state == pci_channel_io_perm_failure) {
4801 rtnl_unlock();
4802 return PCI_ERS_RESULT_DISCONNECT;
4803 }
4804
4805 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4806 pci_disable_device(pdev);
4807 rtnl_unlock();
4808
4809
4810 return PCI_ERS_RESULT_NEED_RESET;
4811}
4812
4813
4814
4815
4816
4817
4818
4819
4820static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4821{
4822 struct net_device *netdev = pci_get_drvdata(pdev);
4823 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4824
4825 if (pci_enable_device_mem(pdev)) {
4826 dev_err(&pdev->dev,
4827 "Cannot re-enable PCI device after reset.\n");
4828 return PCI_ERS_RESULT_DISCONNECT;
4829 }
4830
4831 adapter->hw.hw_addr = adapter->io_addr;
4832 smp_mb__before_atomic();
4833 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4834 pci_set_master(pdev);
4835
4836 ixgbevf_reset(adapter);
4837
4838 return PCI_ERS_RESULT_RECOVERED;
4839}
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849static void ixgbevf_io_resume(struct pci_dev *pdev)
4850{
4851 struct net_device *netdev = pci_get_drvdata(pdev);
4852
4853 rtnl_lock();
4854 if (netif_running(netdev))
4855 ixgbevf_open(netdev);
4856
4857 netif_device_attach(netdev);
4858 rtnl_unlock();
4859}
4860
4861
4862static const struct pci_error_handlers ixgbevf_err_handler = {
4863 .error_detected = ixgbevf_io_error_detected,
4864 .slot_reset = ixgbevf_io_slot_reset,
4865 .resume = ixgbevf_io_resume,
4866};
4867
4868static struct pci_driver ixgbevf_driver = {
4869 .name = ixgbevf_driver_name,
4870 .id_table = ixgbevf_pci_tbl,
4871 .probe = ixgbevf_probe,
4872 .remove = ixgbevf_remove,
4873#ifdef CONFIG_PM
4874
4875 .suspend = ixgbevf_suspend,
4876 .resume = ixgbevf_resume,
4877#endif
4878 .shutdown = ixgbevf_shutdown,
4879 .err_handler = &ixgbevf_err_handler
4880};
4881
4882
4883
4884
4885
4886
4887
4888static int __init ixgbevf_init_module(void)
4889{
4890 pr_info("%s - version %s\n", ixgbevf_driver_string,
4891 ixgbevf_driver_version);
4892
4893 pr_info("%s\n", ixgbevf_copyright);
4894 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4895 if (!ixgbevf_wq) {
4896 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4897 return -ENOMEM;
4898 }
4899
4900 return pci_register_driver(&ixgbevf_driver);
4901}
4902
4903module_init(ixgbevf_init_module);
4904
4905
4906
4907
4908
4909
4910
4911static void __exit ixgbevf_exit_module(void)
4912{
4913 pci_unregister_driver(&ixgbevf_driver);
4914 if (ixgbevf_wq) {
4915 destroy_workqueue(ixgbevf_wq);
4916 ixgbevf_wq = NULL;
4917 }
4918}
4919
4920#ifdef DEBUG
4921
4922
4923
4924
4925
4926char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4927{
4928 struct ixgbevf_adapter *adapter = hw->back;
4929
4930 return adapter->netdev->name;
4931}
4932
4933#endif
4934module_exit(ixgbevf_exit_module);
4935
4936
4937