1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/types.h>
11#include <linux/bitops.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/netdevice.h>
15#include <linux/vmalloc.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/ip.h>
19#include <linux/tcp.h>
20#include <linux/sctp.h>
21#include <linux/ipv6.h>
22#include <linux/slab.h>
23#include <net/checksum.h>
24#include <net/ip6_checksum.h>
25#include <linux/ethtool.h>
26#include <linux/if.h>
27#include <linux/if_vlan.h>
28#include <linux/prefetch.h>
29#include <net/mpls.h>
30#include <linux/bpf.h>
31#include <linux/bpf_trace.h>
32#include <linux/atomic.h>
33#include <net/xfrm.h>
34
35#include "ixgbevf.h"
36
37const char ixgbevf_driver_name[] = "ixgbevf";
38static const char ixgbevf_driver_string[] =
39 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
40
41#define DRV_VERSION "4.1.0-k"
42const char ixgbevf_driver_version[] = DRV_VERSION;
43static char ixgbevf_copyright[] =
44 "Copyright (c) 2009 - 2018 Intel Corporation.";
45
46static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
47 [board_82599_vf] = &ixgbevf_82599_vf_info,
48 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
49 [board_X540_vf] = &ixgbevf_X540_vf_info,
50 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
51 [board_X550_vf] = &ixgbevf_X550_vf_info,
52 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
53 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
54 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
55 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
56};
57
58
59
60
61
62
63
64
65
66static const struct pci_device_id ixgbevf_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
68 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
70 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
72 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
74 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
76
77 {0, }
78};
79MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
80
81MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
82MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
83MODULE_LICENSE("GPL v2");
84MODULE_VERSION(DRV_VERSION);
85
86#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
87static int debug = -1;
88module_param(debug, int, 0);
89MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
90
91static struct workqueue_struct *ixgbevf_wq;
92
93static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
94{
95 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
96 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
97 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
98 queue_work(ixgbevf_wq, &adapter->service_task);
99}
100
101static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
102{
103 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
104
105
106 smp_mb__before_atomic();
107 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
108}
109
110
111static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
112static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
113static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
114static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
115static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
116 struct ixgbevf_rx_buffer *old_buff);
117
118static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
119{
120 struct ixgbevf_adapter *adapter = hw->back;
121
122 if (!hw->hw_addr)
123 return;
124 hw->hw_addr = NULL;
125 dev_err(&adapter->pdev->dev, "Adapter removed\n");
126 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
127 ixgbevf_service_event_schedule(adapter);
128}
129
130static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
131{
132 u32 value;
133
134
135
136
137
138
139
140 if (reg == IXGBE_VFSTATUS) {
141 ixgbevf_remove_adapter(hw);
142 return;
143 }
144 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
145 if (value == IXGBE_FAILED_READ_REG)
146 ixgbevf_remove_adapter(hw);
147}
148
149u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
150{
151 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
152 u32 value;
153
154 if (IXGBE_REMOVED(reg_addr))
155 return IXGBE_FAILED_READ_REG;
156 value = readl(reg_addr + reg);
157 if (unlikely(value == IXGBE_FAILED_READ_REG))
158 ixgbevf_check_remove(hw, reg);
159 return value;
160}
161
162
163
164
165
166
167
168
169static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
170 u8 queue, u8 msix_vector)
171{
172 u32 ivar, index;
173 struct ixgbe_hw *hw = &adapter->hw;
174
175 if (direction == -1) {
176
177 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
178 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
179 ivar &= ~0xFF;
180 ivar |= msix_vector;
181 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
182 } else {
183
184 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
185 index = ((16 * (queue & 1)) + (8 * direction));
186 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
187 ivar &= ~(0xFF << index);
188 ivar |= (msix_vector << index);
189 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
190 }
191}
192
193static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
194{
195 return ring->stats.packets;
196}
197
198static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
199{
200 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
201 struct ixgbe_hw *hw = &adapter->hw;
202
203 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
204 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
205
206 if (head != tail)
207 return (head < tail) ?
208 tail - head : (tail + ring->count - head);
209
210 return 0;
211}
212
213static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
214{
215 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
216 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
217 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
218
219 clear_check_for_tx_hang(tx_ring);
220
221
222
223
224
225
226 if ((tx_done_old == tx_done) && tx_pending) {
227
228 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
229 &tx_ring->state);
230 }
231
232 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
233
234
235 tx_ring->tx_stats.tx_done_old = tx_done;
236
237 return false;
238}
239
240static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
241{
242
243 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
244 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
245 ixgbevf_service_event_schedule(adapter);
246 }
247}
248
249
250
251
252
253static void ixgbevf_tx_timeout(struct net_device *netdev)
254{
255 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
256
257 ixgbevf_tx_timeout_reset(adapter);
258}
259
260
261
262
263
264
265
266static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
267 struct ixgbevf_ring *tx_ring, int napi_budget)
268{
269 struct ixgbevf_adapter *adapter = q_vector->adapter;
270 struct ixgbevf_tx_buffer *tx_buffer;
271 union ixgbe_adv_tx_desc *tx_desc;
272 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
273 unsigned int budget = tx_ring->count / 2;
274 unsigned int i = tx_ring->next_to_clean;
275
276 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
277 return true;
278
279 tx_buffer = &tx_ring->tx_buffer_info[i];
280 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
281 i -= tx_ring->count;
282
283 do {
284 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
285
286
287 if (!eop_desc)
288 break;
289
290
291 smp_rmb();
292
293
294 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
295 break;
296
297
298 tx_buffer->next_to_watch = NULL;
299
300
301 total_bytes += tx_buffer->bytecount;
302 total_packets += tx_buffer->gso_segs;
303 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
304 total_ipsec++;
305
306
307 if (ring_is_xdp(tx_ring))
308 page_frag_free(tx_buffer->data);
309 else
310 napi_consume_skb(tx_buffer->skb, napi_budget);
311
312
313 dma_unmap_single(tx_ring->dev,
314 dma_unmap_addr(tx_buffer, dma),
315 dma_unmap_len(tx_buffer, len),
316 DMA_TO_DEVICE);
317
318
319 dma_unmap_len_set(tx_buffer, len, 0);
320
321
322 while (tx_desc != eop_desc) {
323 tx_buffer++;
324 tx_desc++;
325 i++;
326 if (unlikely(!i)) {
327 i -= tx_ring->count;
328 tx_buffer = tx_ring->tx_buffer_info;
329 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
330 }
331
332
333 if (dma_unmap_len(tx_buffer, len)) {
334 dma_unmap_page(tx_ring->dev,
335 dma_unmap_addr(tx_buffer, dma),
336 dma_unmap_len(tx_buffer, len),
337 DMA_TO_DEVICE);
338 dma_unmap_len_set(tx_buffer, len, 0);
339 }
340 }
341
342
343 tx_buffer++;
344 tx_desc++;
345 i++;
346 if (unlikely(!i)) {
347 i -= tx_ring->count;
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
350 }
351
352
353 prefetch(tx_desc);
354
355
356 budget--;
357 } while (likely(budget));
358
359 i += tx_ring->count;
360 tx_ring->next_to_clean = i;
361 u64_stats_update_begin(&tx_ring->syncp);
362 tx_ring->stats.bytes += total_bytes;
363 tx_ring->stats.packets += total_packets;
364 u64_stats_update_end(&tx_ring->syncp);
365 q_vector->tx.total_bytes += total_bytes;
366 q_vector->tx.total_packets += total_packets;
367 adapter->tx_ipsec += total_ipsec;
368
369 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
370 struct ixgbe_hw *hw = &adapter->hw;
371 union ixgbe_adv_tx_desc *eop_desc;
372
373 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
374
375 pr_err("Detected Tx Unit Hang%s\n"
376 " Tx Queue <%d>\n"
377 " TDH, TDT <%x>, <%x>\n"
378 " next_to_use <%x>\n"
379 " next_to_clean <%x>\n"
380 "tx_buffer_info[next_to_clean]\n"
381 " next_to_watch <%p>\n"
382 " eop_desc->wb.status <%x>\n"
383 " time_stamp <%lx>\n"
384 " jiffies <%lx>\n",
385 ring_is_xdp(tx_ring) ? " XDP" : "",
386 tx_ring->queue_index,
387 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
388 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
389 tx_ring->next_to_use, i,
390 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
391 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
392
393 if (!ring_is_xdp(tx_ring))
394 netif_stop_subqueue(tx_ring->netdev,
395 tx_ring->queue_index);
396
397
398 ixgbevf_tx_timeout_reset(adapter);
399
400 return true;
401 }
402
403 if (ring_is_xdp(tx_ring))
404 return !!budget;
405
406#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
407 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
408 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
409
410
411
412 smp_mb();
413
414 if (__netif_subqueue_stopped(tx_ring->netdev,
415 tx_ring->queue_index) &&
416 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
417 netif_wake_subqueue(tx_ring->netdev,
418 tx_ring->queue_index);
419 ++tx_ring->tx_stats.restart_queue;
420 }
421 }
422
423 return !!budget;
424}
425
426
427
428
429
430
431static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
432 struct sk_buff *skb)
433{
434 napi_gro_receive(&q_vector->napi, skb);
435}
436
437#define IXGBE_RSS_L4_TYPES_MASK \
438 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
439 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
440 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
441 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
442
443static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
444 union ixgbe_adv_rx_desc *rx_desc,
445 struct sk_buff *skb)
446{
447 u16 rss_type;
448
449 if (!(ring->netdev->features & NETIF_F_RXHASH))
450 return;
451
452 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
453 IXGBE_RXDADV_RSSTYPE_MASK;
454
455 if (!rss_type)
456 return;
457
458 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
459 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
460 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
461}
462
463
464
465
466
467
468
469static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
470 union ixgbe_adv_rx_desc *rx_desc,
471 struct sk_buff *skb)
472{
473 skb_checksum_none_assert(skb);
474
475
476 if (!(ring->netdev->features & NETIF_F_RXCSUM))
477 return;
478
479
480 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
481 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
482 ring->rx_stats.csum_err++;
483 return;
484 }
485
486 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
487 return;
488
489 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
490 ring->rx_stats.csum_err++;
491 return;
492 }
493
494
495 skb->ip_summed = CHECKSUM_UNNECESSARY;
496}
497
498
499
500
501
502
503
504
505
506
507
508static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
509 union ixgbe_adv_rx_desc *rx_desc,
510 struct sk_buff *skb)
511{
512 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
513 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
514
515 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
516 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
517 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
518
519 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
520 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
521 }
522
523 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
524 ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
525
526 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
527}
528
529static
530struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
531 const unsigned int size)
532{
533 struct ixgbevf_rx_buffer *rx_buffer;
534
535 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
536 prefetchw(rx_buffer->page);
537
538
539 dma_sync_single_range_for_cpu(rx_ring->dev,
540 rx_buffer->dma,
541 rx_buffer->page_offset,
542 size,
543 DMA_FROM_DEVICE);
544
545 rx_buffer->pagecnt_bias--;
546
547 return rx_buffer;
548}
549
550static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
551 struct ixgbevf_rx_buffer *rx_buffer,
552 struct sk_buff *skb)
553{
554 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
555
556 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
557 } else {
558 if (IS_ERR(skb))
559
560
561
562 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
563 ixgbevf_rx_pg_size(rx_ring),
564 DMA_FROM_DEVICE,
565 IXGBEVF_RX_DMA_ATTR);
566 __page_frag_cache_drain(rx_buffer->page,
567 rx_buffer->pagecnt_bias);
568 }
569
570
571 rx_buffer->page = NULL;
572}
573
574
575
576
577
578
579
580
581
582
583
584static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
585 union ixgbe_adv_rx_desc *rx_desc)
586{
587 u32 ntc = rx_ring->next_to_clean + 1;
588
589
590 ntc = (ntc < rx_ring->count) ? ntc : 0;
591 rx_ring->next_to_clean = ntc;
592
593 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
594
595 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
596 return false;
597
598 return true;
599}
600
601static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
602{
603 return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
604}
605
606static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
607 struct ixgbevf_rx_buffer *bi)
608{
609 struct page *page = bi->page;
610 dma_addr_t dma;
611
612
613 if (likely(page))
614 return true;
615
616
617 page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
618 if (unlikely(!page)) {
619 rx_ring->rx_stats.alloc_rx_page_failed++;
620 return false;
621 }
622
623
624 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
625 ixgbevf_rx_pg_size(rx_ring),
626 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
627
628
629
630
631 if (dma_mapping_error(rx_ring->dev, dma)) {
632 __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
633
634 rx_ring->rx_stats.alloc_rx_page_failed++;
635 return false;
636 }
637
638 bi->dma = dma;
639 bi->page = page;
640 bi->page_offset = ixgbevf_rx_offset(rx_ring);
641 bi->pagecnt_bias = 1;
642 rx_ring->rx_stats.alloc_rx_page++;
643
644 return true;
645}
646
647
648
649
650
651
652static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
653 u16 cleaned_count)
654{
655 union ixgbe_adv_rx_desc *rx_desc;
656 struct ixgbevf_rx_buffer *bi;
657 unsigned int i = rx_ring->next_to_use;
658
659
660 if (!cleaned_count || !rx_ring->netdev)
661 return;
662
663 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
664 bi = &rx_ring->rx_buffer_info[i];
665 i -= rx_ring->count;
666
667 do {
668 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
669 break;
670
671
672 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
673 bi->page_offset,
674 ixgbevf_rx_bufsz(rx_ring),
675 DMA_FROM_DEVICE);
676
677
678
679
680 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
681
682 rx_desc++;
683 bi++;
684 i++;
685 if (unlikely(!i)) {
686 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
687 bi = rx_ring->rx_buffer_info;
688 i -= rx_ring->count;
689 }
690
691
692 rx_desc->wb.upper.length = 0;
693
694 cleaned_count--;
695 } while (cleaned_count);
696
697 i += rx_ring->count;
698
699 if (rx_ring->next_to_use != i) {
700
701 rx_ring->next_to_use = i;
702
703
704 rx_ring->next_to_alloc = i;
705
706
707
708
709
710
711 wmb();
712 ixgbevf_write_tail(rx_ring, i);
713 }
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
735 union ixgbe_adv_rx_desc *rx_desc,
736 struct sk_buff *skb)
737{
738
739 if (IS_ERR(skb))
740 return true;
741
742
743 if (unlikely(ixgbevf_test_staterr(rx_desc,
744 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
745 struct net_device *netdev = rx_ring->netdev;
746
747 if (!(netdev->features & NETIF_F_RXALL)) {
748 dev_kfree_skb_any(skb);
749 return true;
750 }
751 }
752
753
754 if (eth_skb_pad(skb))
755 return true;
756
757 return false;
758}
759
760
761
762
763
764
765
766
767static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
768 struct ixgbevf_rx_buffer *old_buff)
769{
770 struct ixgbevf_rx_buffer *new_buff;
771 u16 nta = rx_ring->next_to_alloc;
772
773 new_buff = &rx_ring->rx_buffer_info[nta];
774
775
776 nta++;
777 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
778
779
780 new_buff->page = old_buff->page;
781 new_buff->dma = old_buff->dma;
782 new_buff->page_offset = old_buff->page_offset;
783 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
784}
785
786static inline bool ixgbevf_page_is_reserved(struct page *page)
787{
788 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
789}
790
791static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
792{
793 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
794 struct page *page = rx_buffer->page;
795
796
797 if (unlikely(ixgbevf_page_is_reserved(page)))
798 return false;
799
800#if (PAGE_SIZE < 8192)
801
802 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
803 return false;
804#else
805#define IXGBEVF_LAST_OFFSET \
806 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
807
808 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
809 return false;
810
811#endif
812
813
814
815
816
817 if (unlikely(!pagecnt_bias)) {
818 page_ref_add(page, USHRT_MAX);
819 rx_buffer->pagecnt_bias = USHRT_MAX;
820 }
821
822 return true;
823}
824
825
826
827
828
829
830
831
832
833
834static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
835 struct ixgbevf_rx_buffer *rx_buffer,
836 struct sk_buff *skb,
837 unsigned int size)
838{
839#if (PAGE_SIZE < 8192)
840 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
841#else
842 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
843 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
844 SKB_DATA_ALIGN(size);
845#endif
846 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
847 rx_buffer->page_offset, size, truesize);
848#if (PAGE_SIZE < 8192)
849 rx_buffer->page_offset ^= truesize;
850#else
851 rx_buffer->page_offset += truesize;
852#endif
853}
854
855static
856struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
857 struct ixgbevf_rx_buffer *rx_buffer,
858 struct xdp_buff *xdp,
859 union ixgbe_adv_rx_desc *rx_desc)
860{
861 unsigned int size = xdp->data_end - xdp->data;
862#if (PAGE_SIZE < 8192)
863 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
864#else
865 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
866 xdp->data_hard_start);
867#endif
868 unsigned int headlen;
869 struct sk_buff *skb;
870
871
872 prefetch(xdp->data);
873#if L1_CACHE_BYTES < 128
874 prefetch(xdp->data + L1_CACHE_BYTES);
875#endif
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
893 if (unlikely(!skb))
894 return NULL;
895
896
897 headlen = size;
898 if (headlen > IXGBEVF_RX_HDR_SIZE)
899 headlen = eth_get_headlen(skb->dev, xdp->data,
900 IXGBEVF_RX_HDR_SIZE);
901
902
903 memcpy(__skb_put(skb, headlen), xdp->data,
904 ALIGN(headlen, sizeof(long)));
905
906
907 size -= headlen;
908 if (size) {
909 skb_add_rx_frag(skb, 0, rx_buffer->page,
910 (xdp->data + headlen) -
911 page_address(rx_buffer->page),
912 size, truesize);
913#if (PAGE_SIZE < 8192)
914 rx_buffer->page_offset ^= truesize;
915#else
916 rx_buffer->page_offset += truesize;
917#endif
918 } else {
919 rx_buffer->pagecnt_bias++;
920 }
921
922 return skb;
923}
924
925static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
926 u32 qmask)
927{
928 struct ixgbe_hw *hw = &adapter->hw;
929
930 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
931}
932
933static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
934 struct ixgbevf_rx_buffer *rx_buffer,
935 struct xdp_buff *xdp,
936 union ixgbe_adv_rx_desc *rx_desc)
937{
938 unsigned int metasize = xdp->data - xdp->data_meta;
939#if (PAGE_SIZE < 8192)
940 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
941#else
942 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
943 SKB_DATA_ALIGN(xdp->data_end -
944 xdp->data_hard_start);
945#endif
946 struct sk_buff *skb;
947
948
949
950
951
952
953 prefetch(xdp->data_meta);
954#if L1_CACHE_BYTES < 128
955 prefetch(xdp->data_meta + L1_CACHE_BYTES);
956#endif
957
958
959 skb = build_skb(xdp->data_hard_start, truesize);
960 if (unlikely(!skb))
961 return NULL;
962
963
964 skb_reserve(skb, xdp->data - xdp->data_hard_start);
965 __skb_put(skb, xdp->data_end - xdp->data);
966 if (metasize)
967 skb_metadata_set(skb, metasize);
968
969
970#if (PAGE_SIZE < 8192)
971 rx_buffer->page_offset ^= truesize;
972#else
973 rx_buffer->page_offset += truesize;
974#endif
975
976 return skb;
977}
978
979#define IXGBEVF_XDP_PASS 0
980#define IXGBEVF_XDP_CONSUMED 1
981#define IXGBEVF_XDP_TX 2
982
983static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
984 struct xdp_buff *xdp)
985{
986 struct ixgbevf_tx_buffer *tx_buffer;
987 union ixgbe_adv_tx_desc *tx_desc;
988 u32 len, cmd_type;
989 dma_addr_t dma;
990 u16 i;
991
992 len = xdp->data_end - xdp->data;
993
994 if (unlikely(!ixgbevf_desc_unused(ring)))
995 return IXGBEVF_XDP_CONSUMED;
996
997 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
998 if (dma_mapping_error(ring->dev, dma))
999 return IXGBEVF_XDP_CONSUMED;
1000
1001
1002 i = ring->next_to_use;
1003 tx_buffer = &ring->tx_buffer_info[i];
1004
1005 dma_unmap_len_set(tx_buffer, len, len);
1006 dma_unmap_addr_set(tx_buffer, dma, dma);
1007 tx_buffer->data = xdp->data;
1008 tx_buffer->bytecount = len;
1009 tx_buffer->gso_segs = 1;
1010 tx_buffer->protocol = 0;
1011
1012
1013
1014
1015 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
1016 struct ixgbe_adv_tx_context_desc *context_desc;
1017
1018 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1019
1020 context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
1021 context_desc->vlan_macip_lens =
1022 cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
1023 context_desc->fceof_saidx = 0;
1024 context_desc->type_tucmd_mlhl =
1025 cpu_to_le32(IXGBE_TXD_CMD_DEXT |
1026 IXGBE_ADVTXD_DTYP_CTXT);
1027 context_desc->mss_l4len_idx = 0;
1028
1029 i = 1;
1030 }
1031
1032
1033 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
1034 IXGBE_ADVTXD_DCMD_DEXT |
1035 IXGBE_ADVTXD_DCMD_IFCS;
1036 cmd_type |= len | IXGBE_TXD_CMD;
1037
1038 tx_desc = IXGBEVF_TX_DESC(ring, i);
1039 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1040
1041 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1042 tx_desc->read.olinfo_status =
1043 cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
1044 IXGBE_ADVTXD_CC);
1045
1046
1047 smp_wmb();
1048
1049
1050 i++;
1051 if (i == ring->count)
1052 i = 0;
1053
1054 tx_buffer->next_to_watch = tx_desc;
1055 ring->next_to_use = i;
1056
1057 return IXGBEVF_XDP_TX;
1058}
1059
1060static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1061 struct ixgbevf_ring *rx_ring,
1062 struct xdp_buff *xdp)
1063{
1064 int result = IXGBEVF_XDP_PASS;
1065 struct ixgbevf_ring *xdp_ring;
1066 struct bpf_prog *xdp_prog;
1067 u32 act;
1068
1069 rcu_read_lock();
1070 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1071
1072 if (!xdp_prog)
1073 goto xdp_out;
1074
1075 act = bpf_prog_run_xdp(xdp_prog, xdp);
1076 switch (act) {
1077 case XDP_PASS:
1078 break;
1079 case XDP_TX:
1080 xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1081 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1082 break;
1083 default:
1084 bpf_warn_invalid_xdp_action(act);
1085
1086 case XDP_ABORTED:
1087 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1088
1089 case XDP_DROP:
1090 result = IXGBEVF_XDP_CONSUMED;
1091 break;
1092 }
1093xdp_out:
1094 rcu_read_unlock();
1095 return ERR_PTR(-result);
1096}
1097
1098static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
1099 struct ixgbevf_rx_buffer *rx_buffer,
1100 unsigned int size)
1101{
1102#if (PAGE_SIZE < 8192)
1103 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
1104
1105 rx_buffer->page_offset ^= truesize;
1106#else
1107 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1108 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
1109 SKB_DATA_ALIGN(size);
1110
1111 rx_buffer->page_offset += truesize;
1112#endif
1113}
1114
1115static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
1116 struct ixgbevf_ring *rx_ring,
1117 int budget)
1118{
1119 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1120 struct ixgbevf_adapter *adapter = q_vector->adapter;
1121 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
1122 struct sk_buff *skb = rx_ring->skb;
1123 bool xdp_xmit = false;
1124 struct xdp_buff xdp;
1125
1126 xdp.rxq = &rx_ring->xdp_rxq;
1127
1128 while (likely(total_rx_packets < budget)) {
1129 struct ixgbevf_rx_buffer *rx_buffer;
1130 union ixgbe_adv_rx_desc *rx_desc;
1131 unsigned int size;
1132
1133
1134 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
1135 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
1136 cleaned_count = 0;
1137 }
1138
1139 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1140 size = le16_to_cpu(rx_desc->wb.upper.length);
1141 if (!size)
1142 break;
1143
1144
1145
1146
1147
1148 rmb();
1149
1150 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
1151
1152
1153 if (!skb) {
1154 xdp.data = page_address(rx_buffer->page) +
1155 rx_buffer->page_offset;
1156 xdp.data_meta = xdp.data;
1157 xdp.data_hard_start = xdp.data -
1158 ixgbevf_rx_offset(rx_ring);
1159 xdp.data_end = xdp.data + size;
1160
1161 skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
1162 }
1163
1164 if (IS_ERR(skb)) {
1165 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
1166 xdp_xmit = true;
1167 ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
1168 size);
1169 } else {
1170 rx_buffer->pagecnt_bias++;
1171 }
1172 total_rx_packets++;
1173 total_rx_bytes += size;
1174 } else if (skb) {
1175 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1176 } else if (ring_uses_build_skb(rx_ring)) {
1177 skb = ixgbevf_build_skb(rx_ring, rx_buffer,
1178 &xdp, rx_desc);
1179 } else {
1180 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
1181 &xdp, rx_desc);
1182 }
1183
1184
1185 if (!skb) {
1186 rx_ring->rx_stats.alloc_rx_buff_failed++;
1187 rx_buffer->pagecnt_bias++;
1188 break;
1189 }
1190
1191 ixgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
1192 cleaned_count++;
1193
1194
1195 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
1196 continue;
1197
1198
1199 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
1200 skb = NULL;
1201 continue;
1202 }
1203
1204
1205 total_rx_bytes += skb->len;
1206
1207
1208
1209
1210 if ((skb->pkt_type == PACKET_BROADCAST ||
1211 skb->pkt_type == PACKET_MULTICAST) &&
1212 ether_addr_equal(rx_ring->netdev->dev_addr,
1213 eth_hdr(skb)->h_source)) {
1214 dev_kfree_skb_irq(skb);
1215 continue;
1216 }
1217
1218
1219 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
1220
1221 ixgbevf_rx_skb(q_vector, skb);
1222
1223
1224 skb = NULL;
1225
1226
1227 total_rx_packets++;
1228 }
1229
1230
1231 rx_ring->skb = skb;
1232
1233 if (xdp_xmit) {
1234 struct ixgbevf_ring *xdp_ring =
1235 adapter->xdp_ring[rx_ring->queue_index];
1236
1237
1238
1239
1240 wmb();
1241 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
1242 }
1243
1244 u64_stats_update_begin(&rx_ring->syncp);
1245 rx_ring->stats.packets += total_rx_packets;
1246 rx_ring->stats.bytes += total_rx_bytes;
1247 u64_stats_update_end(&rx_ring->syncp);
1248 q_vector->rx.total_packets += total_rx_packets;
1249 q_vector->rx.total_bytes += total_rx_bytes;
1250
1251 return total_rx_packets;
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static int ixgbevf_poll(struct napi_struct *napi, int budget)
1263{
1264 struct ixgbevf_q_vector *q_vector =
1265 container_of(napi, struct ixgbevf_q_vector, napi);
1266 struct ixgbevf_adapter *adapter = q_vector->adapter;
1267 struct ixgbevf_ring *ring;
1268 int per_ring_budget, work_done = 0;
1269 bool clean_complete = true;
1270
1271 ixgbevf_for_each_ring(ring, q_vector->tx) {
1272 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1273 clean_complete = false;
1274 }
1275
1276 if (budget <= 0)
1277 return budget;
1278
1279
1280
1281
1282 if (q_vector->rx.count > 1)
1283 per_ring_budget = max(budget/q_vector->rx.count, 1);
1284 else
1285 per_ring_budget = budget;
1286
1287 ixgbevf_for_each_ring(ring, q_vector->rx) {
1288 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1289 per_ring_budget);
1290 work_done += cleaned;
1291 if (cleaned >= per_ring_budget)
1292 clean_complete = false;
1293 }
1294
1295
1296 if (!clean_complete)
1297 return budget;
1298
1299
1300
1301
1302 if (likely(napi_complete_done(napi, work_done))) {
1303 if (adapter->rx_itr_setting == 1)
1304 ixgbevf_set_itr(q_vector);
1305 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1306 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1307 ixgbevf_irq_enable_queues(adapter,
1308 BIT(q_vector->v_idx));
1309 }
1310
1311 return min(work_done, budget - 1);
1312}
1313
1314
1315
1316
1317
1318void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1319{
1320 struct ixgbevf_adapter *adapter = q_vector->adapter;
1321 struct ixgbe_hw *hw = &adapter->hw;
1322 int v_idx = q_vector->v_idx;
1323 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1324
1325
1326
1327
1328 itr_reg |= IXGBE_EITR_CNT_WDIS;
1329
1330 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1341{
1342 struct ixgbevf_q_vector *q_vector;
1343 int q_vectors, v_idx;
1344
1345 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1346 adapter->eims_enable_mask = 0;
1347
1348
1349
1350
1351 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1352 struct ixgbevf_ring *ring;
1353
1354 q_vector = adapter->q_vector[v_idx];
1355
1356 ixgbevf_for_each_ring(ring, q_vector->rx)
1357 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1358
1359 ixgbevf_for_each_ring(ring, q_vector->tx)
1360 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1361
1362 if (q_vector->tx.ring && !q_vector->rx.ring) {
1363
1364 if (adapter->tx_itr_setting == 1)
1365 q_vector->itr = IXGBE_12K_ITR;
1366 else
1367 q_vector->itr = adapter->tx_itr_setting;
1368 } else {
1369
1370 if (adapter->rx_itr_setting == 1)
1371 q_vector->itr = IXGBE_20K_ITR;
1372 else
1373 q_vector->itr = adapter->rx_itr_setting;
1374 }
1375
1376
1377 adapter->eims_enable_mask |= BIT(v_idx);
1378
1379 ixgbevf_write_eitr(q_vector);
1380 }
1381
1382 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1383
1384 adapter->eims_other = BIT(v_idx);
1385 adapter->eims_enable_mask |= adapter->eims_other;
1386}
1387
1388enum latency_range {
1389 lowest_latency = 0,
1390 low_latency = 1,
1391 bulk_latency = 2,
1392 latency_invalid = 255
1393};
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1409 struct ixgbevf_ring_container *ring_container)
1410{
1411 int bytes = ring_container->total_bytes;
1412 int packets = ring_container->total_packets;
1413 u32 timepassed_us;
1414 u64 bytes_perint;
1415 u8 itr_setting = ring_container->itr;
1416
1417 if (packets == 0)
1418 return;
1419
1420
1421
1422
1423
1424
1425
1426 timepassed_us = q_vector->itr >> 2;
1427 if (timepassed_us == 0)
1428 return;
1429
1430 bytes_perint = bytes / timepassed_us;
1431
1432 switch (itr_setting) {
1433 case lowest_latency:
1434 if (bytes_perint > 10)
1435 itr_setting = low_latency;
1436 break;
1437 case low_latency:
1438 if (bytes_perint > 20)
1439 itr_setting = bulk_latency;
1440 else if (bytes_perint <= 10)
1441 itr_setting = lowest_latency;
1442 break;
1443 case bulk_latency:
1444 if (bytes_perint <= 20)
1445 itr_setting = low_latency;
1446 break;
1447 }
1448
1449
1450 ring_container->total_bytes = 0;
1451 ring_container->total_packets = 0;
1452
1453
1454 ring_container->itr = itr_setting;
1455}
1456
1457static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1458{
1459 u32 new_itr = q_vector->itr;
1460 u8 current_itr;
1461
1462 ixgbevf_update_itr(q_vector, &q_vector->tx);
1463 ixgbevf_update_itr(q_vector, &q_vector->rx);
1464
1465 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1466
1467 switch (current_itr) {
1468
1469 case lowest_latency:
1470 new_itr = IXGBE_100K_ITR;
1471 break;
1472 case low_latency:
1473 new_itr = IXGBE_20K_ITR;
1474 break;
1475 case bulk_latency:
1476 new_itr = IXGBE_12K_ITR;
1477 break;
1478 default:
1479 break;
1480 }
1481
1482 if (new_itr != q_vector->itr) {
1483
1484 new_itr = (10 * new_itr * q_vector->itr) /
1485 ((9 * new_itr) + q_vector->itr);
1486
1487
1488 q_vector->itr = new_itr;
1489
1490 ixgbevf_write_eitr(q_vector);
1491 }
1492}
1493
1494static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1495{
1496 struct ixgbevf_adapter *adapter = data;
1497 struct ixgbe_hw *hw = &adapter->hw;
1498
1499 hw->mac.get_link_status = 1;
1500
1501 ixgbevf_service_event_schedule(adapter);
1502
1503 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1504
1505 return IRQ_HANDLED;
1506}
1507
1508
1509
1510
1511
1512
1513static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1514{
1515 struct ixgbevf_q_vector *q_vector = data;
1516
1517
1518 if (q_vector->rx.ring || q_vector->tx.ring)
1519 napi_schedule_irqoff(&q_vector->napi);
1520
1521 return IRQ_HANDLED;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1532{
1533 struct net_device *netdev = adapter->netdev;
1534 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1535 unsigned int ri = 0, ti = 0;
1536 int vector, err;
1537
1538 for (vector = 0; vector < q_vectors; vector++) {
1539 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1540 struct msix_entry *entry = &adapter->msix_entries[vector];
1541
1542 if (q_vector->tx.ring && q_vector->rx.ring) {
1543 snprintf(q_vector->name, sizeof(q_vector->name),
1544 "%s-TxRx-%u", netdev->name, ri++);
1545 ti++;
1546 } else if (q_vector->rx.ring) {
1547 snprintf(q_vector->name, sizeof(q_vector->name),
1548 "%s-rx-%u", netdev->name, ri++);
1549 } else if (q_vector->tx.ring) {
1550 snprintf(q_vector->name, sizeof(q_vector->name),
1551 "%s-tx-%u", netdev->name, ti++);
1552 } else {
1553
1554 continue;
1555 }
1556 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1557 q_vector->name, q_vector);
1558 if (err) {
1559 hw_dbg(&adapter->hw,
1560 "request_irq failed for MSIX interrupt Error: %d\n",
1561 err);
1562 goto free_queue_irqs;
1563 }
1564 }
1565
1566 err = request_irq(adapter->msix_entries[vector].vector,
1567 &ixgbevf_msix_other, 0, netdev->name, adapter);
1568 if (err) {
1569 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1570 err);
1571 goto free_queue_irqs;
1572 }
1573
1574 return 0;
1575
1576free_queue_irqs:
1577 while (vector) {
1578 vector--;
1579 free_irq(adapter->msix_entries[vector].vector,
1580 adapter->q_vector[vector]);
1581 }
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592 adapter->num_msix_vectors = 0;
1593 return err;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1604{
1605 int err = ixgbevf_request_msix_irqs(adapter);
1606
1607 if (err)
1608 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1609
1610 return err;
1611}
1612
1613static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1614{
1615 int i, q_vectors;
1616
1617 if (!adapter->msix_entries)
1618 return;
1619
1620 q_vectors = adapter->num_msix_vectors;
1621 i = q_vectors - 1;
1622
1623 free_irq(adapter->msix_entries[i].vector, adapter);
1624 i--;
1625
1626 for (; i >= 0; i--) {
1627
1628 if (!adapter->q_vector[i]->rx.ring &&
1629 !adapter->q_vector[i]->tx.ring)
1630 continue;
1631
1632 free_irq(adapter->msix_entries[i].vector,
1633 adapter->q_vector[i]);
1634 }
1635}
1636
1637
1638
1639
1640
1641static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1642{
1643 struct ixgbe_hw *hw = &adapter->hw;
1644 int i;
1645
1646 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1647 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1648 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1649
1650 IXGBE_WRITE_FLUSH(hw);
1651
1652 for (i = 0; i < adapter->num_msix_vectors; i++)
1653 synchronize_irq(adapter->msix_entries[i].vector);
1654}
1655
1656
1657
1658
1659
1660static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1661{
1662 struct ixgbe_hw *hw = &adapter->hw;
1663
1664 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1665 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1666 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1677 struct ixgbevf_ring *ring)
1678{
1679 struct ixgbe_hw *hw = &adapter->hw;
1680 u64 tdba = ring->dma;
1681 int wait_loop = 10;
1682 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1683 u8 reg_idx = ring->reg_idx;
1684
1685
1686 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1687 IXGBE_WRITE_FLUSH(hw);
1688
1689 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1690 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1691 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1692 ring->count * sizeof(union ixgbe_adv_tx_desc));
1693
1694
1695 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1696 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1697
1698
1699 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1700 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1701 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1702
1703
1704 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1705 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1706 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1707
1708
1709 ring->next_to_clean = 0;
1710 ring->next_to_use = 0;
1711
1712
1713
1714
1715
1716 txdctl |= (8 << 16);
1717
1718
1719 txdctl |= (1u << 8) |
1720 32;
1721
1722
1723 memset(ring->tx_buffer_info, 0,
1724 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1725
1726 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1727 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
1728
1729 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1730
1731
1732 do {
1733 usleep_range(1000, 2000);
1734 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1735 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1736 if (!wait_loop)
1737 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1738}
1739
1740
1741
1742
1743
1744
1745
1746static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1747{
1748 u32 i;
1749
1750
1751 for (i = 0; i < adapter->num_tx_queues; i++)
1752 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1753 for (i = 0; i < adapter->num_xdp_queues; i++)
1754 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
1755}
1756
1757#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1758
1759static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1760 struct ixgbevf_ring *ring, int index)
1761{
1762 struct ixgbe_hw *hw = &adapter->hw;
1763 u32 srrctl;
1764
1765 srrctl = IXGBE_SRRCTL_DROP_EN;
1766
1767 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1768 if (ring_uses_large_buffer(ring))
1769 srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1770 else
1771 srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1772 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1773
1774 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1775}
1776
1777static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1778{
1779 struct ixgbe_hw *hw = &adapter->hw;
1780
1781
1782 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1783 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1784 IXGBE_PSRTYPE_L2HDR;
1785
1786 if (adapter->num_rx_queues > 1)
1787 psrtype |= BIT(29);
1788
1789 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1790}
1791
1792#define IXGBEVF_MAX_RX_DESC_POLL 10
1793static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1794 struct ixgbevf_ring *ring)
1795{
1796 struct ixgbe_hw *hw = &adapter->hw;
1797 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1798 u32 rxdctl;
1799 u8 reg_idx = ring->reg_idx;
1800
1801 if (IXGBE_REMOVED(hw->hw_addr))
1802 return;
1803 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1804 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1805
1806
1807 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1808
1809
1810 do {
1811 udelay(10);
1812 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1813 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1814
1815 if (!wait_loop)
1816 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1817 reg_idx);
1818}
1819
1820static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1821 struct ixgbevf_ring *ring)
1822{
1823 struct ixgbe_hw *hw = &adapter->hw;
1824 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1825 u32 rxdctl;
1826 u8 reg_idx = ring->reg_idx;
1827
1828 if (IXGBE_REMOVED(hw->hw_addr))
1829 return;
1830 do {
1831 usleep_range(1000, 2000);
1832 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1833 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1834
1835 if (!wait_loop)
1836 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1837 reg_idx);
1838}
1839
1840
1841
1842
1843
1844
1845
1846static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1847{
1848 u32 *rss_key;
1849
1850 if (!adapter->rss_key) {
1851 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1852 if (unlikely(!rss_key))
1853 return -ENOMEM;
1854
1855 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1856 adapter->rss_key = rss_key;
1857 }
1858
1859 return 0;
1860}
1861
1862static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1863{
1864 struct ixgbe_hw *hw = &adapter->hw;
1865 u32 vfmrqc = 0, vfreta = 0;
1866 u16 rss_i = adapter->num_rx_queues;
1867 u8 i, j;
1868
1869
1870 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1871 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1872
1873 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1874 if (j == rss_i)
1875 j = 0;
1876
1877 adapter->rss_indir_tbl[i] = j;
1878
1879 vfreta |= j << (i & 0x3) * 8;
1880 if ((i & 3) == 3) {
1881 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1882 vfreta = 0;
1883 }
1884 }
1885
1886
1887 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1888 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1889 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1890 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1891
1892 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1893
1894 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1895}
1896
1897static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1898 struct ixgbevf_ring *ring)
1899{
1900 struct ixgbe_hw *hw = &adapter->hw;
1901 union ixgbe_adv_rx_desc *rx_desc;
1902 u64 rdba = ring->dma;
1903 u32 rxdctl;
1904 u8 reg_idx = ring->reg_idx;
1905
1906
1907 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1908 ixgbevf_disable_rx_queue(adapter, ring);
1909
1910 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1911 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1912 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1913 ring->count * sizeof(union ixgbe_adv_rx_desc));
1914
1915#ifndef CONFIG_SPARC
1916
1917 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1918 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1919#else
1920 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1921 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1922 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1923#endif
1924
1925
1926 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1927 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1928 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1929
1930
1931 memset(ring->rx_buffer_info, 0,
1932 sizeof(struct ixgbevf_rx_buffer) * ring->count);
1933
1934
1935 rx_desc = IXGBEVF_RX_DESC(ring, 0);
1936 rx_desc->wb.upper.length = 0;
1937
1938
1939 ring->next_to_clean = 0;
1940 ring->next_to_use = 0;
1941 ring->next_to_alloc = 0;
1942
1943 ixgbevf_configure_srrctl(adapter, ring, reg_idx);
1944
1945
1946 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
1947 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
1948 IXGBE_RXDCTL_RLPML_EN);
1949
1950#if (PAGE_SIZE < 8192)
1951
1952 if (ring_uses_build_skb(ring) &&
1953 !ring_uses_large_buffer(ring))
1954 rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
1955 IXGBE_RXDCTL_RLPML_EN;
1956#endif
1957 }
1958
1959 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1960 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1961
1962 ixgbevf_rx_desc_queue_enable(adapter, ring);
1963 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1964}
1965
1966static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1967 struct ixgbevf_ring *rx_ring)
1968{
1969 struct net_device *netdev = adapter->netdev;
1970 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1971
1972
1973 clear_ring_build_skb_enabled(rx_ring);
1974 clear_ring_uses_large_buffer(rx_ring);
1975
1976 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1977 return;
1978
1979 set_ring_build_skb_enabled(rx_ring);
1980
1981 if (PAGE_SIZE < 8192) {
1982 if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1983 return;
1984
1985 set_ring_uses_large_buffer(rx_ring);
1986 }
1987}
1988
1989
1990
1991
1992
1993
1994
1995static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1996{
1997 struct ixgbe_hw *hw = &adapter->hw;
1998 struct net_device *netdev = adapter->netdev;
1999 int i, ret;
2000
2001 ixgbevf_setup_psrtype(adapter);
2002 if (hw->mac.type >= ixgbe_mac_X550_vf)
2003 ixgbevf_setup_vfmrqc(adapter);
2004
2005 spin_lock_bh(&adapter->mbx_lock);
2006
2007 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
2008 spin_unlock_bh(&adapter->mbx_lock);
2009 if (ret)
2010 dev_err(&adapter->pdev->dev,
2011 "Failed to set MTU at %d\n", netdev->mtu);
2012
2013
2014
2015
2016 for (i = 0; i < adapter->num_rx_queues; i++) {
2017 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
2018
2019 ixgbevf_set_rx_buffer_len(adapter, rx_ring);
2020 ixgbevf_configure_rx_ring(adapter, rx_ring);
2021 }
2022}
2023
2024static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
2025 __be16 proto, u16 vid)
2026{
2027 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2028 struct ixgbe_hw *hw = &adapter->hw;
2029 int err;
2030
2031 spin_lock_bh(&adapter->mbx_lock);
2032
2033
2034 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
2035
2036 spin_unlock_bh(&adapter->mbx_lock);
2037
2038
2039 if (err == IXGBE_ERR_MBX)
2040 return -EIO;
2041
2042 if (err == IXGBE_ERR_INVALID_ARGUMENT)
2043 return -EACCES;
2044
2045 set_bit(vid, adapter->active_vlans);
2046
2047 return err;
2048}
2049
2050static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
2051 __be16 proto, u16 vid)
2052{
2053 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2054 struct ixgbe_hw *hw = &adapter->hw;
2055 int err;
2056
2057 spin_lock_bh(&adapter->mbx_lock);
2058
2059
2060 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
2061
2062 spin_unlock_bh(&adapter->mbx_lock);
2063
2064 clear_bit(vid, adapter->active_vlans);
2065
2066 return err;
2067}
2068
2069static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
2070{
2071 u16 vid;
2072
2073 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2074 ixgbevf_vlan_rx_add_vid(adapter->netdev,
2075 htons(ETH_P_8021Q), vid);
2076}
2077
2078static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
2079{
2080 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2081 struct ixgbe_hw *hw = &adapter->hw;
2082 int count = 0;
2083
2084 if ((netdev_uc_count(netdev)) > 10) {
2085 pr_err("Too many unicast filters - No Space\n");
2086 return -ENOSPC;
2087 }
2088
2089 if (!netdev_uc_empty(netdev)) {
2090 struct netdev_hw_addr *ha;
2091
2092 netdev_for_each_uc_addr(ha, netdev) {
2093 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
2094 udelay(200);
2095 }
2096 } else {
2097
2098
2099
2100 hw->mac.ops.set_uc_addr(hw, 0, NULL);
2101 }
2102
2103 return count;
2104}
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115static void ixgbevf_set_rx_mode(struct net_device *netdev)
2116{
2117 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2118 struct ixgbe_hw *hw = &adapter->hw;
2119 unsigned int flags = netdev->flags;
2120 int xcast_mode;
2121
2122
2123 if (flags & IFF_PROMISC)
2124 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
2125 else if (flags & IFF_ALLMULTI)
2126 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
2127 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
2128 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
2129 else
2130 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
2131
2132 spin_lock_bh(&adapter->mbx_lock);
2133
2134 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
2135
2136
2137 hw->mac.ops.update_mc_addr_list(hw, netdev);
2138
2139 ixgbevf_write_uc_addr_list(netdev);
2140
2141 spin_unlock_bh(&adapter->mbx_lock);
2142}
2143
2144static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
2145{
2146 int q_idx;
2147 struct ixgbevf_q_vector *q_vector;
2148 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2149
2150 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2151 q_vector = adapter->q_vector[q_idx];
2152 napi_enable(&q_vector->napi);
2153 }
2154}
2155
2156static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
2157{
2158 int q_idx;
2159 struct ixgbevf_q_vector *q_vector;
2160 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2161
2162 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2163 q_vector = adapter->q_vector[q_idx];
2164 napi_disable(&q_vector->napi);
2165 }
2166}
2167
2168static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
2169{
2170 struct ixgbe_hw *hw = &adapter->hw;
2171 unsigned int def_q = 0;
2172 unsigned int num_tcs = 0;
2173 unsigned int num_rx_queues = adapter->num_rx_queues;
2174 unsigned int num_tx_queues = adapter->num_tx_queues;
2175 int err;
2176
2177 spin_lock_bh(&adapter->mbx_lock);
2178
2179
2180 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2181
2182 spin_unlock_bh(&adapter->mbx_lock);
2183
2184 if (err)
2185 return err;
2186
2187 if (num_tcs > 1) {
2188
2189 num_tx_queues = 1;
2190
2191
2192 adapter->tx_ring[0]->reg_idx = def_q;
2193
2194
2195 num_rx_queues = num_tcs;
2196 }
2197
2198
2199 if ((adapter->num_rx_queues != num_rx_queues) ||
2200 (adapter->num_tx_queues != num_tx_queues)) {
2201
2202 hw->mbx.timeout = 0;
2203
2204
2205 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
2206 }
2207
2208 return 0;
2209}
2210
2211static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
2212{
2213 ixgbevf_configure_dcb(adapter);
2214
2215 ixgbevf_set_rx_mode(adapter->netdev);
2216
2217 ixgbevf_restore_vlan(adapter);
2218 ixgbevf_ipsec_restore(adapter);
2219
2220 ixgbevf_configure_tx(adapter);
2221 ixgbevf_configure_rx(adapter);
2222}
2223
2224static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2225{
2226
2227 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2228 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2229 adapter->stats.base_vfgprc;
2230 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2231 adapter->stats.base_vfgptc;
2232 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2233 adapter->stats.base_vfgorc;
2234 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2235 adapter->stats.base_vfgotc;
2236 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2237 adapter->stats.base_vfmprc;
2238 }
2239}
2240
2241static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2242{
2243 struct ixgbe_hw *hw = &adapter->hw;
2244
2245 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2246 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2247 adapter->stats.last_vfgorc |=
2248 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2249 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2250 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2251 adapter->stats.last_vfgotc |=
2252 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2253 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2254
2255 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2256 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2257 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2258 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2259 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2260}
2261
2262static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2263{
2264 struct ixgbe_hw *hw = &adapter->hw;
2265 int api[] = { ixgbe_mbox_api_14,
2266 ixgbe_mbox_api_13,
2267 ixgbe_mbox_api_12,
2268 ixgbe_mbox_api_11,
2269 ixgbe_mbox_api_10,
2270 ixgbe_mbox_api_unknown };
2271 int err, idx = 0;
2272
2273 spin_lock_bh(&adapter->mbx_lock);
2274
2275 while (api[idx] != ixgbe_mbox_api_unknown) {
2276 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2277 if (!err)
2278 break;
2279 idx++;
2280 }
2281
2282 spin_unlock_bh(&adapter->mbx_lock);
2283}
2284
2285static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2286{
2287 struct net_device *netdev = adapter->netdev;
2288 struct ixgbe_hw *hw = &adapter->hw;
2289
2290 ixgbevf_configure_msix(adapter);
2291
2292 spin_lock_bh(&adapter->mbx_lock);
2293
2294 if (is_valid_ether_addr(hw->mac.addr))
2295 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2296 else
2297 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2298
2299 spin_unlock_bh(&adapter->mbx_lock);
2300
2301 smp_mb__before_atomic();
2302 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2303 ixgbevf_napi_enable_all(adapter);
2304
2305
2306 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2307 ixgbevf_irq_enable(adapter);
2308
2309
2310 netif_tx_start_all_queues(netdev);
2311
2312 ixgbevf_save_reset_stats(adapter);
2313 ixgbevf_init_last_counter_stats(adapter);
2314
2315 hw->mac.get_link_status = 1;
2316 mod_timer(&adapter->service_timer, jiffies);
2317}
2318
2319void ixgbevf_up(struct ixgbevf_adapter *adapter)
2320{
2321 ixgbevf_configure(adapter);
2322
2323 ixgbevf_up_complete(adapter);
2324}
2325
2326
2327
2328
2329
2330static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2331{
2332 u16 i = rx_ring->next_to_clean;
2333
2334
2335 if (rx_ring->skb) {
2336 dev_kfree_skb(rx_ring->skb);
2337 rx_ring->skb = NULL;
2338 }
2339
2340
2341 while (i != rx_ring->next_to_alloc) {
2342 struct ixgbevf_rx_buffer *rx_buffer;
2343
2344 rx_buffer = &rx_ring->rx_buffer_info[i];
2345
2346
2347
2348
2349 dma_sync_single_range_for_cpu(rx_ring->dev,
2350 rx_buffer->dma,
2351 rx_buffer->page_offset,
2352 ixgbevf_rx_bufsz(rx_ring),
2353 DMA_FROM_DEVICE);
2354
2355
2356 dma_unmap_page_attrs(rx_ring->dev,
2357 rx_buffer->dma,
2358 ixgbevf_rx_pg_size(rx_ring),
2359 DMA_FROM_DEVICE,
2360 IXGBEVF_RX_DMA_ATTR);
2361
2362 __page_frag_cache_drain(rx_buffer->page,
2363 rx_buffer->pagecnt_bias);
2364
2365 i++;
2366 if (i == rx_ring->count)
2367 i = 0;
2368 }
2369
2370 rx_ring->next_to_alloc = 0;
2371 rx_ring->next_to_clean = 0;
2372 rx_ring->next_to_use = 0;
2373}
2374
2375
2376
2377
2378
2379static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2380{
2381 u16 i = tx_ring->next_to_clean;
2382 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2383
2384 while (i != tx_ring->next_to_use) {
2385 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2386
2387
2388 if (ring_is_xdp(tx_ring))
2389 page_frag_free(tx_buffer->data);
2390 else
2391 dev_kfree_skb_any(tx_buffer->skb);
2392
2393
2394 dma_unmap_single(tx_ring->dev,
2395 dma_unmap_addr(tx_buffer, dma),
2396 dma_unmap_len(tx_buffer, len),
2397 DMA_TO_DEVICE);
2398
2399
2400 eop_desc = tx_buffer->next_to_watch;
2401 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2402
2403
2404 while (tx_desc != eop_desc) {
2405 tx_buffer++;
2406 tx_desc++;
2407 i++;
2408 if (unlikely(i == tx_ring->count)) {
2409 i = 0;
2410 tx_buffer = tx_ring->tx_buffer_info;
2411 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2412 }
2413
2414
2415 if (dma_unmap_len(tx_buffer, len))
2416 dma_unmap_page(tx_ring->dev,
2417 dma_unmap_addr(tx_buffer, dma),
2418 dma_unmap_len(tx_buffer, len),
2419 DMA_TO_DEVICE);
2420 }
2421
2422
2423 tx_buffer++;
2424 i++;
2425 if (unlikely(i == tx_ring->count)) {
2426 i = 0;
2427 tx_buffer = tx_ring->tx_buffer_info;
2428 }
2429 }
2430
2431
2432 tx_ring->next_to_use = 0;
2433 tx_ring->next_to_clean = 0;
2434
2435}
2436
2437
2438
2439
2440
2441static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2442{
2443 int i;
2444
2445 for (i = 0; i < adapter->num_rx_queues; i++)
2446 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2447}
2448
2449
2450
2451
2452
2453static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2454{
2455 int i;
2456
2457 for (i = 0; i < adapter->num_tx_queues; i++)
2458 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2459 for (i = 0; i < adapter->num_xdp_queues; i++)
2460 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
2461}
2462
2463void ixgbevf_down(struct ixgbevf_adapter *adapter)
2464{
2465 struct net_device *netdev = adapter->netdev;
2466 struct ixgbe_hw *hw = &adapter->hw;
2467 int i;
2468
2469
2470 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2471 return;
2472
2473
2474 for (i = 0; i < adapter->num_rx_queues; i++)
2475 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2476
2477 usleep_range(10000, 20000);
2478
2479 netif_tx_stop_all_queues(netdev);
2480
2481
2482 netif_carrier_off(netdev);
2483 netif_tx_disable(netdev);
2484
2485 ixgbevf_irq_disable(adapter);
2486
2487 ixgbevf_napi_disable_all(adapter);
2488
2489 del_timer_sync(&adapter->service_timer);
2490
2491
2492 for (i = 0; i < adapter->num_tx_queues; i++) {
2493 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2494
2495 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2496 IXGBE_TXDCTL_SWFLSH);
2497 }
2498
2499 for (i = 0; i < adapter->num_xdp_queues; i++) {
2500 u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
2501
2502 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2503 IXGBE_TXDCTL_SWFLSH);
2504 }
2505
2506 if (!pci_channel_offline(adapter->pdev))
2507 ixgbevf_reset(adapter);
2508
2509 ixgbevf_clean_all_tx_rings(adapter);
2510 ixgbevf_clean_all_rx_rings(adapter);
2511}
2512
2513void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2514{
2515 WARN_ON(in_interrupt());
2516
2517 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2518 msleep(1);
2519
2520 ixgbevf_down(adapter);
2521 ixgbevf_up(adapter);
2522
2523 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2524}
2525
2526void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2527{
2528 struct ixgbe_hw *hw = &adapter->hw;
2529 struct net_device *netdev = adapter->netdev;
2530
2531 if (hw->mac.ops.reset_hw(hw)) {
2532 hw_dbg(hw, "PF still resetting\n");
2533 } else {
2534 hw->mac.ops.init_hw(hw);
2535 ixgbevf_negotiate_api(adapter);
2536 }
2537
2538 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2539 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2540 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2541 }
2542
2543 adapter->last_reset = jiffies;
2544}
2545
2546static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2547 int vectors)
2548{
2549 int vector_threshold;
2550
2551
2552
2553
2554
2555 vector_threshold = MIN_MSIX_COUNT;
2556
2557
2558
2559
2560
2561
2562 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2563 vector_threshold, vectors);
2564
2565 if (vectors < 0) {
2566 dev_err(&adapter->pdev->dev,
2567 "Unable to allocate MSI-X interrupts\n");
2568 kfree(adapter->msix_entries);
2569 adapter->msix_entries = NULL;
2570 return vectors;
2571 }
2572
2573
2574
2575
2576
2577 adapter->num_msix_vectors = vectors;
2578
2579 return 0;
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2594{
2595 struct ixgbe_hw *hw = &adapter->hw;
2596 unsigned int def_q = 0;
2597 unsigned int num_tcs = 0;
2598 int err;
2599
2600
2601 adapter->num_rx_queues = 1;
2602 adapter->num_tx_queues = 1;
2603 adapter->num_xdp_queues = 0;
2604
2605 spin_lock_bh(&adapter->mbx_lock);
2606
2607
2608 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2609
2610 spin_unlock_bh(&adapter->mbx_lock);
2611
2612 if (err)
2613 return;
2614
2615
2616 if (num_tcs > 1) {
2617 adapter->num_rx_queues = num_tcs;
2618 } else {
2619 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2620
2621 switch (hw->api_version) {
2622 case ixgbe_mbox_api_11:
2623 case ixgbe_mbox_api_12:
2624 case ixgbe_mbox_api_13:
2625 case ixgbe_mbox_api_14:
2626 if (adapter->xdp_prog &&
2627 hw->mac.max_tx_queues == rss)
2628 rss = rss > 3 ? 2 : 1;
2629
2630 adapter->num_rx_queues = rss;
2631 adapter->num_tx_queues = rss;
2632 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
2633 default:
2634 break;
2635 }
2636 }
2637}
2638
2639
2640
2641
2642
2643
2644
2645
2646static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2647{
2648 int vector, v_budget;
2649
2650
2651
2652
2653
2654
2655
2656 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2657 v_budget = min_t(int, v_budget, num_online_cpus());
2658 v_budget += NON_Q_VECTORS;
2659
2660 adapter->msix_entries = kcalloc(v_budget,
2661 sizeof(struct msix_entry), GFP_KERNEL);
2662 if (!adapter->msix_entries)
2663 return -ENOMEM;
2664
2665 for (vector = 0; vector < v_budget; vector++)
2666 adapter->msix_entries[vector].entry = vector;
2667
2668
2669
2670
2671
2672 return ixgbevf_acquire_msix_vectors(adapter, v_budget);
2673}
2674
2675static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
2676 struct ixgbevf_ring_container *head)
2677{
2678 ring->next = head->ring;
2679 head->ring = ring;
2680 head->count++;
2681}
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
2697 int txr_count, int txr_idx,
2698 int xdp_count, int xdp_idx,
2699 int rxr_count, int rxr_idx)
2700{
2701 struct ixgbevf_q_vector *q_vector;
2702 int reg_idx = txr_idx + xdp_idx;
2703 struct ixgbevf_ring *ring;
2704 int ring_count, size;
2705
2706 ring_count = txr_count + xdp_count + rxr_count;
2707 size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
2708
2709
2710 q_vector = kzalloc(size, GFP_KERNEL);
2711 if (!q_vector)
2712 return -ENOMEM;
2713
2714
2715 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
2716
2717
2718 adapter->q_vector[v_idx] = q_vector;
2719 q_vector->adapter = adapter;
2720 q_vector->v_idx = v_idx;
2721
2722
2723 ring = q_vector->ring;
2724
2725 while (txr_count) {
2726
2727 ring->dev = &adapter->pdev->dev;
2728 ring->netdev = adapter->netdev;
2729
2730
2731 ring->q_vector = q_vector;
2732
2733
2734 ixgbevf_add_ring(ring, &q_vector->tx);
2735
2736
2737 ring->count = adapter->tx_ring_count;
2738 ring->queue_index = txr_idx;
2739 ring->reg_idx = reg_idx;
2740
2741
2742 adapter->tx_ring[txr_idx] = ring;
2743
2744
2745 txr_count--;
2746 txr_idx++;
2747 reg_idx++;
2748
2749
2750 ring++;
2751 }
2752
2753 while (xdp_count) {
2754
2755 ring->dev = &adapter->pdev->dev;
2756 ring->netdev = adapter->netdev;
2757
2758
2759 ring->q_vector = q_vector;
2760
2761
2762 ixgbevf_add_ring(ring, &q_vector->tx);
2763
2764
2765 ring->count = adapter->tx_ring_count;
2766 ring->queue_index = xdp_idx;
2767 ring->reg_idx = reg_idx;
2768 set_ring_xdp(ring);
2769
2770
2771 adapter->xdp_ring[xdp_idx] = ring;
2772
2773
2774 xdp_count--;
2775 xdp_idx++;
2776 reg_idx++;
2777
2778
2779 ring++;
2780 }
2781
2782 while (rxr_count) {
2783
2784 ring->dev = &adapter->pdev->dev;
2785 ring->netdev = adapter->netdev;
2786
2787
2788 ring->q_vector = q_vector;
2789
2790
2791 ixgbevf_add_ring(ring, &q_vector->rx);
2792
2793
2794 ring->count = adapter->rx_ring_count;
2795 ring->queue_index = rxr_idx;
2796 ring->reg_idx = rxr_idx;
2797
2798
2799 adapter->rx_ring[rxr_idx] = ring;
2800
2801
2802 rxr_count--;
2803 rxr_idx++;
2804
2805
2806 ring++;
2807 }
2808
2809 return 0;
2810}
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
2822{
2823 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
2824 struct ixgbevf_ring *ring;
2825
2826 ixgbevf_for_each_ring(ring, q_vector->tx) {
2827 if (ring_is_xdp(ring))
2828 adapter->xdp_ring[ring->queue_index] = NULL;
2829 else
2830 adapter->tx_ring[ring->queue_index] = NULL;
2831 }
2832
2833 ixgbevf_for_each_ring(ring, q_vector->rx)
2834 adapter->rx_ring[ring->queue_index] = NULL;
2835
2836 adapter->q_vector[v_idx] = NULL;
2837 netif_napi_del(&q_vector->napi);
2838
2839
2840
2841
2842 kfree_rcu(q_vector, rcu);
2843}
2844
2845
2846
2847
2848
2849
2850
2851
2852static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2853{
2854 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2855 int rxr_remaining = adapter->num_rx_queues;
2856 int txr_remaining = adapter->num_tx_queues;
2857 int xdp_remaining = adapter->num_xdp_queues;
2858 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
2859 int err;
2860
2861 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
2862 for (; rxr_remaining; v_idx++, q_vectors--) {
2863 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2864
2865 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2866 0, 0, 0, 0, rqpv, rxr_idx);
2867 if (err)
2868 goto err_out;
2869
2870
2871 rxr_remaining -= rqpv;
2872 rxr_idx += rqpv;
2873 }
2874 }
2875
2876 for (; q_vectors; v_idx++, q_vectors--) {
2877 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
2878 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
2879 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
2880
2881 err = ixgbevf_alloc_q_vector(adapter, v_idx,
2882 tqpv, txr_idx,
2883 xqpv, xdp_idx,
2884 rqpv, rxr_idx);
2885
2886 if (err)
2887 goto err_out;
2888
2889
2890 rxr_remaining -= rqpv;
2891 rxr_idx += rqpv;
2892 txr_remaining -= tqpv;
2893 txr_idx += tqpv;
2894 xdp_remaining -= xqpv;
2895 xdp_idx += xqpv;
2896 }
2897
2898 return 0;
2899
2900err_out:
2901 while (v_idx) {
2902 v_idx--;
2903 ixgbevf_free_q_vector(adapter, v_idx);
2904 }
2905
2906 return -ENOMEM;
2907}
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2918{
2919 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2920
2921 while (q_vectors) {
2922 q_vectors--;
2923 ixgbevf_free_q_vector(adapter, q_vectors);
2924 }
2925}
2926
2927
2928
2929
2930
2931
2932static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2933{
2934 if (!adapter->msix_entries)
2935 return;
2936
2937 pci_disable_msix(adapter->pdev);
2938 kfree(adapter->msix_entries);
2939 adapter->msix_entries = NULL;
2940}
2941
2942
2943
2944
2945
2946
2947static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2948{
2949 int err;
2950
2951
2952 ixgbevf_set_num_queues(adapter);
2953
2954 err = ixgbevf_set_interrupt_capability(adapter);
2955 if (err) {
2956 hw_dbg(&adapter->hw,
2957 "Unable to setup interrupt capabilities\n");
2958 goto err_set_interrupt;
2959 }
2960
2961 err = ixgbevf_alloc_q_vectors(adapter);
2962 if (err) {
2963 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2964 goto err_alloc_q_vectors;
2965 }
2966
2967 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
2968 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
2969 adapter->num_rx_queues, adapter->num_tx_queues,
2970 adapter->num_xdp_queues);
2971
2972 set_bit(__IXGBEVF_DOWN, &adapter->state);
2973
2974 return 0;
2975err_alloc_q_vectors:
2976 ixgbevf_reset_interrupt_capability(adapter);
2977err_set_interrupt:
2978 return err;
2979}
2980
2981
2982
2983
2984
2985
2986
2987
2988static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2989{
2990 adapter->num_tx_queues = 0;
2991 adapter->num_xdp_queues = 0;
2992 adapter->num_rx_queues = 0;
2993
2994 ixgbevf_free_q_vectors(adapter);
2995 ixgbevf_reset_interrupt_capability(adapter);
2996}
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
3007{
3008 struct ixgbe_hw *hw = &adapter->hw;
3009 struct pci_dev *pdev = adapter->pdev;
3010 struct net_device *netdev = adapter->netdev;
3011 int err;
3012
3013
3014 hw->vendor_id = pdev->vendor;
3015 hw->device_id = pdev->device;
3016 hw->revision_id = pdev->revision;
3017 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3018 hw->subsystem_device_id = pdev->subsystem_device;
3019
3020 hw->mbx.ops.init_params(hw);
3021
3022 if (hw->mac.type >= ixgbe_mac_X550_vf) {
3023 err = ixgbevf_init_rss_key(adapter);
3024 if (err)
3025 goto out;
3026 }
3027
3028
3029 hw->mac.max_tx_queues = 2;
3030 hw->mac.max_rx_queues = 2;
3031
3032
3033 spin_lock_init(&adapter->mbx_lock);
3034
3035 err = hw->mac.ops.reset_hw(hw);
3036 if (err) {
3037 dev_info(&pdev->dev,
3038 "PF still in reset state. Is the PF interface up?\n");
3039 } else {
3040 err = hw->mac.ops.init_hw(hw);
3041 if (err) {
3042 pr_err("init_shared_code failed: %d\n", err);
3043 goto out;
3044 }
3045 ixgbevf_negotiate_api(adapter);
3046 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
3047 if (err)
3048 dev_info(&pdev->dev, "Error reading MAC address\n");
3049 else if (is_zero_ether_addr(adapter->hw.mac.addr))
3050 dev_info(&pdev->dev,
3051 "MAC address not assigned by administrator.\n");
3052 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
3053 }
3054
3055 if (!is_valid_ether_addr(netdev->dev_addr)) {
3056 dev_info(&pdev->dev, "Assigning random MAC address\n");
3057 eth_hw_addr_random(netdev);
3058 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
3059 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
3060 }
3061
3062
3063 adapter->rx_itr_setting = 1;
3064 adapter->tx_itr_setting = 1;
3065
3066
3067 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
3068 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
3069
3070 set_bit(__IXGBEVF_DOWN, &adapter->state);
3071 return 0;
3072
3073out:
3074 return err;
3075}
3076
3077#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
3078 { \
3079 u32 current_counter = IXGBE_READ_REG(hw, reg); \
3080 if (current_counter < last_counter) \
3081 counter += 0x100000000LL; \
3082 last_counter = current_counter; \
3083 counter &= 0xFFFFFFFF00000000LL; \
3084 counter |= current_counter; \
3085 }
3086
3087#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
3088 { \
3089 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
3090 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
3091 u64 current_counter = (current_counter_msb << 32) | \
3092 current_counter_lsb; \
3093 if (current_counter < last_counter) \
3094 counter += 0x1000000000LL; \
3095 last_counter = current_counter; \
3096 counter &= 0xFFFFFFF000000000LL; \
3097 counter |= current_counter; \
3098 }
3099
3100
3101
3102
3103void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
3104{
3105 struct ixgbe_hw *hw = &adapter->hw;
3106 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
3107 u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
3108 int i;
3109
3110 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3111 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3112 return;
3113
3114 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3115 adapter->stats.vfgprc);
3116 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3117 adapter->stats.vfgptc);
3118 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3119 adapter->stats.last_vfgorc,
3120 adapter->stats.vfgorc);
3121 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3122 adapter->stats.last_vfgotc,
3123 adapter->stats.vfgotc);
3124 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3125 adapter->stats.vfmprc);
3126
3127 for (i = 0; i < adapter->num_rx_queues; i++) {
3128 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
3129
3130 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
3131 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
3132 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
3133 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
3134 }
3135
3136 adapter->hw_csum_rx_error = hw_csum_rx_error;
3137 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
3138 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
3139 adapter->alloc_rx_page = alloc_rx_page;
3140}
3141
3142
3143
3144
3145
3146static void ixgbevf_service_timer(struct timer_list *t)
3147{
3148 struct ixgbevf_adapter *adapter = from_timer(adapter, t,
3149 service_timer);
3150
3151
3152 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
3153
3154 ixgbevf_service_event_schedule(adapter);
3155}
3156
3157static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
3158{
3159 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
3160 return;
3161
3162 rtnl_lock();
3163
3164 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3165 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
3166 test_bit(__IXGBEVF_RESETTING, &adapter->state)) {
3167 rtnl_unlock();
3168 return;
3169 }
3170
3171 adapter->tx_timeout_count++;
3172
3173 ixgbevf_reinit_locked(adapter);
3174 rtnl_unlock();
3175}
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
3187{
3188 struct ixgbe_hw *hw = &adapter->hw;
3189 u32 eics = 0;
3190 int i;
3191
3192
3193 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3194 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3195 return;
3196
3197
3198 if (netif_carrier_ok(adapter->netdev)) {
3199 for (i = 0; i < adapter->num_tx_queues; i++)
3200 set_check_for_tx_hang(adapter->tx_ring[i]);
3201 for (i = 0; i < adapter->num_xdp_queues; i++)
3202 set_check_for_tx_hang(adapter->xdp_ring[i]);
3203 }
3204
3205
3206 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
3207 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
3208
3209 if (qv->rx.ring || qv->tx.ring)
3210 eics |= BIT(i);
3211 }
3212
3213
3214 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
3215}
3216
3217
3218
3219
3220
3221static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
3222{
3223 struct ixgbe_hw *hw = &adapter->hw;
3224 u32 link_speed = adapter->link_speed;
3225 bool link_up = adapter->link_up;
3226 s32 err;
3227
3228 spin_lock_bh(&adapter->mbx_lock);
3229
3230 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3231
3232 spin_unlock_bh(&adapter->mbx_lock);
3233
3234
3235 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
3236 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
3237 link_up = false;
3238 }
3239
3240 adapter->link_up = link_up;
3241 adapter->link_speed = link_speed;
3242}
3243
3244
3245
3246
3247
3248
3249static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
3250{
3251 struct net_device *netdev = adapter->netdev;
3252
3253
3254 if (netif_carrier_ok(netdev))
3255 return;
3256
3257 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
3258 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
3259 "10 Gbps" :
3260 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
3261 "1 Gbps" :
3262 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
3263 "100 Mbps" :
3264 "unknown speed");
3265
3266 netif_carrier_on(netdev);
3267}
3268
3269
3270
3271
3272
3273
3274static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
3275{
3276 struct net_device *netdev = adapter->netdev;
3277
3278 adapter->link_speed = 0;
3279
3280
3281 if (!netif_carrier_ok(netdev))
3282 return;
3283
3284 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
3285
3286 netif_carrier_off(netdev);
3287}
3288
3289
3290
3291
3292
3293static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
3294{
3295
3296 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3297 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3298 return;
3299
3300 ixgbevf_watchdog_update_link(adapter);
3301
3302 if (adapter->link_up)
3303 ixgbevf_watchdog_link_is_up(adapter);
3304 else
3305 ixgbevf_watchdog_link_is_down(adapter);
3306
3307 ixgbevf_update_stats(adapter);
3308}
3309
3310
3311
3312
3313
3314static void ixgbevf_service_task(struct work_struct *work)
3315{
3316 struct ixgbevf_adapter *adapter = container_of(work,
3317 struct ixgbevf_adapter,
3318 service_task);
3319 struct ixgbe_hw *hw = &adapter->hw;
3320
3321 if (IXGBE_REMOVED(hw->hw_addr)) {
3322 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
3323 rtnl_lock();
3324 ixgbevf_down(adapter);
3325 rtnl_unlock();
3326 }
3327 return;
3328 }
3329
3330 ixgbevf_queue_reset_subtask(adapter);
3331 ixgbevf_reset_subtask(adapter);
3332 ixgbevf_watchdog_subtask(adapter);
3333 ixgbevf_check_hang_subtask(adapter);
3334
3335 ixgbevf_service_event_complete(adapter);
3336}
3337
3338
3339
3340
3341
3342
3343
3344void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
3345{
3346 ixgbevf_clean_tx_ring(tx_ring);
3347
3348 vfree(tx_ring->tx_buffer_info);
3349 tx_ring->tx_buffer_info = NULL;
3350
3351
3352 if (!tx_ring->desc)
3353 return;
3354
3355 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
3356 tx_ring->dma);
3357
3358 tx_ring->desc = NULL;
3359}
3360
3361
3362
3363
3364
3365
3366
3367static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
3368{
3369 int i;
3370
3371 for (i = 0; i < adapter->num_tx_queues; i++)
3372 if (adapter->tx_ring[i]->desc)
3373 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3374 for (i = 0; i < adapter->num_xdp_queues; i++)
3375 if (adapter->xdp_ring[i]->desc)
3376 ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
3377}
3378
3379
3380
3381
3382
3383
3384
3385int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3386{
3387 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
3388 int size;
3389
3390 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3391 tx_ring->tx_buffer_info = vmalloc(size);
3392 if (!tx_ring->tx_buffer_info)
3393 goto err;
3394
3395 u64_stats_init(&tx_ring->syncp);
3396
3397
3398 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3399 tx_ring->size = ALIGN(tx_ring->size, 4096);
3400
3401 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
3402 &tx_ring->dma, GFP_KERNEL);
3403 if (!tx_ring->desc)
3404 goto err;
3405
3406 return 0;
3407
3408err:
3409 vfree(tx_ring->tx_buffer_info);
3410 tx_ring->tx_buffer_info = NULL;
3411 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3412 return -ENOMEM;
3413}
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3426{
3427 int i, j = 0, err = 0;
3428
3429 for (i = 0; i < adapter->num_tx_queues; i++) {
3430 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3431 if (!err)
3432 continue;
3433 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3434 goto err_setup_tx;
3435 }
3436
3437 for (j = 0; j < adapter->num_xdp_queues; j++) {
3438 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
3439 if (!err)
3440 continue;
3441 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
3442 goto err_setup_tx;
3443 }
3444
3445 return 0;
3446err_setup_tx:
3447
3448 while (j--)
3449 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
3450 while (i--)
3451 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
3452
3453 return err;
3454}
3455
3456
3457
3458
3459
3460
3461
3462
3463int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
3464 struct ixgbevf_ring *rx_ring)
3465{
3466 int size;
3467
3468 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3469 rx_ring->rx_buffer_info = vmalloc(size);
3470 if (!rx_ring->rx_buffer_info)
3471 goto err;
3472
3473 u64_stats_init(&rx_ring->syncp);
3474
3475
3476 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3477 rx_ring->size = ALIGN(rx_ring->size, 4096);
3478
3479 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3480 &rx_ring->dma, GFP_KERNEL);
3481
3482 if (!rx_ring->desc)
3483 goto err;
3484
3485
3486 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
3487 rx_ring->queue_index) < 0)
3488 goto err;
3489
3490 rx_ring->xdp_prog = adapter->xdp_prog;
3491
3492 return 0;
3493err:
3494 vfree(rx_ring->rx_buffer_info);
3495 rx_ring->rx_buffer_info = NULL;
3496 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3497 return -ENOMEM;
3498}
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3511{
3512 int i, err = 0;
3513
3514 for (i = 0; i < adapter->num_rx_queues; i++) {
3515 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
3516 if (!err)
3517 continue;
3518 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3519 goto err_setup_rx;
3520 }
3521
3522 return 0;
3523err_setup_rx:
3524
3525 while (i--)
3526 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3527 return err;
3528}
3529
3530
3531
3532
3533
3534
3535
3536void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3537{
3538 ixgbevf_clean_rx_ring(rx_ring);
3539
3540 rx_ring->xdp_prog = NULL;
3541 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
3542 vfree(rx_ring->rx_buffer_info);
3543 rx_ring->rx_buffer_info = NULL;
3544
3545 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3546 rx_ring->dma);
3547
3548 rx_ring->desc = NULL;
3549}
3550
3551
3552
3553
3554
3555
3556
3557static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3558{
3559 int i;
3560
3561 for (i = 0; i < adapter->num_rx_queues; i++)
3562 if (adapter->rx_ring[i]->desc)
3563 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578int ixgbevf_open(struct net_device *netdev)
3579{
3580 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3581 struct ixgbe_hw *hw = &adapter->hw;
3582 int err;
3583
3584
3585
3586
3587
3588
3589
3590 if (!adapter->num_msix_vectors)
3591 return -ENOMEM;
3592
3593 if (hw->adapter_stopped) {
3594 ixgbevf_reset(adapter);
3595
3596
3597
3598 if (hw->adapter_stopped) {
3599 err = IXGBE_ERR_MBX;
3600 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3601 goto err_setup_reset;
3602 }
3603 }
3604
3605
3606 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3607 return -EBUSY;
3608
3609 netif_carrier_off(netdev);
3610
3611
3612 err = ixgbevf_setup_all_tx_resources(adapter);
3613 if (err)
3614 goto err_setup_tx;
3615
3616
3617 err = ixgbevf_setup_all_rx_resources(adapter);
3618 if (err)
3619 goto err_setup_rx;
3620
3621 ixgbevf_configure(adapter);
3622
3623 err = ixgbevf_request_irq(adapter);
3624 if (err)
3625 goto err_req_irq;
3626
3627
3628 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3629 if (err)
3630 goto err_set_queues;
3631
3632 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3633 if (err)
3634 goto err_set_queues;
3635
3636 ixgbevf_up_complete(adapter);
3637
3638 return 0;
3639
3640err_set_queues:
3641 ixgbevf_free_irq(adapter);
3642err_req_irq:
3643 ixgbevf_free_all_rx_resources(adapter);
3644err_setup_rx:
3645 ixgbevf_free_all_tx_resources(adapter);
3646err_setup_tx:
3647 ixgbevf_reset(adapter);
3648err_setup_reset:
3649
3650 return err;
3651}
3652
3653
3654
3655
3656
3657
3658
3659
3660static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3661{
3662 ixgbevf_down(adapter);
3663 ixgbevf_free_irq(adapter);
3664 ixgbevf_free_all_tx_resources(adapter);
3665 ixgbevf_free_all_rx_resources(adapter);
3666}
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679int ixgbevf_close(struct net_device *netdev)
3680{
3681 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3682
3683 if (netif_device_present(netdev))
3684 ixgbevf_close_suspend(adapter);
3685
3686 return 0;
3687}
3688
3689static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3690{
3691 struct net_device *dev = adapter->netdev;
3692
3693 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3694 &adapter->state))
3695 return;
3696
3697
3698 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3699 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3700 return;
3701
3702
3703
3704
3705
3706 rtnl_lock();
3707
3708 if (netif_running(dev))
3709 ixgbevf_close(dev);
3710
3711 ixgbevf_clear_interrupt_scheme(adapter);
3712 ixgbevf_init_interrupt_scheme(adapter);
3713
3714 if (netif_running(dev))
3715 ixgbevf_open(dev);
3716
3717 rtnl_unlock();
3718}
3719
3720static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3721 u32 vlan_macip_lens, u32 fceof_saidx,
3722 u32 type_tucmd, u32 mss_l4len_idx)
3723{
3724 struct ixgbe_adv_tx_context_desc *context_desc;
3725 u16 i = tx_ring->next_to_use;
3726
3727 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3728
3729 i++;
3730 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3731
3732
3733 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3734
3735 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3736 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
3737 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3738 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3739}
3740
3741static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3742 struct ixgbevf_tx_buffer *first,
3743 u8 *hdr_len,
3744 struct ixgbevf_ipsec_tx_data *itd)
3745{
3746 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3747 struct sk_buff *skb = first->skb;
3748 union {
3749 struct iphdr *v4;
3750 struct ipv6hdr *v6;
3751 unsigned char *hdr;
3752 } ip;
3753 union {
3754 struct tcphdr *tcp;
3755 unsigned char *hdr;
3756 } l4;
3757 u32 paylen, l4_offset;
3758 u32 fceof_saidx = 0;
3759 int err;
3760
3761 if (skb->ip_summed != CHECKSUM_PARTIAL)
3762 return 0;
3763
3764 if (!skb_is_gso(skb))
3765 return 0;
3766
3767 err = skb_cow_head(skb, 0);
3768 if (err < 0)
3769 return err;
3770
3771 if (eth_p_mpls(first->protocol))
3772 ip.hdr = skb_inner_network_header(skb);
3773 else
3774 ip.hdr = skb_network_header(skb);
3775 l4.hdr = skb_checksum_start(skb);
3776
3777
3778 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3779
3780
3781 if (ip.v4->version == 4) {
3782 unsigned char *csum_start = skb_checksum_start(skb);
3783 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3784 int len = csum_start - trans_start;
3785
3786
3787
3788
3789
3790 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
3791 csum_fold(csum_partial(trans_start,
3792 len, 0)) : 0;
3793 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3794
3795 ip.v4->tot_len = 0;
3796 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3797 IXGBE_TX_FLAGS_CSUM |
3798 IXGBE_TX_FLAGS_IPV4;
3799 } else {
3800 ip.v6->payload_len = 0;
3801 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3802 IXGBE_TX_FLAGS_CSUM;
3803 }
3804
3805
3806 l4_offset = l4.hdr - skb->data;
3807
3808
3809 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3810
3811
3812 paylen = skb->len - l4_offset;
3813 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3814
3815
3816 first->gso_segs = skb_shinfo(skb)->gso_segs;
3817 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3818
3819
3820 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3821 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3822 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3823
3824 fceof_saidx |= itd->pfsa;
3825 type_tucmd |= itd->flags | itd->trailer_len;
3826
3827
3828 vlan_macip_lens = l4.hdr - ip.hdr;
3829 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3830 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3831
3832 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
3833 mss_l4len_idx);
3834
3835 return 1;
3836}
3837
3838static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3839{
3840 unsigned int offset = 0;
3841
3842 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3843
3844 return offset == skb_checksum_start_offset(skb);
3845}
3846
3847static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3848 struct ixgbevf_tx_buffer *first,
3849 struct ixgbevf_ipsec_tx_data *itd)
3850{
3851 struct sk_buff *skb = first->skb;
3852 u32 vlan_macip_lens = 0;
3853 u32 fceof_saidx = 0;
3854 u32 type_tucmd = 0;
3855
3856 if (skb->ip_summed != CHECKSUM_PARTIAL)
3857 goto no_csum;
3858
3859 switch (skb->csum_offset) {
3860 case offsetof(struct tcphdr, check):
3861 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3862
3863 case offsetof(struct udphdr, check):
3864 break;
3865 case offsetof(struct sctphdr, checksum):
3866
3867 if (((first->protocol == htons(ETH_P_IP)) &&
3868 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3869 ((first->protocol == htons(ETH_P_IPV6)) &&
3870 ixgbevf_ipv6_csum_is_sctp(skb))) {
3871 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3872 break;
3873 }
3874
3875 default:
3876 skb_checksum_help(skb);
3877 goto no_csum;
3878 }
3879
3880 if (first->protocol == htons(ETH_P_IP))
3881 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3882
3883
3884 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3885 vlan_macip_lens = skb_checksum_start_offset(skb) -
3886 skb_network_offset(skb);
3887no_csum:
3888
3889 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3890 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3891
3892 fceof_saidx |= itd->pfsa;
3893 type_tucmd |= itd->flags | itd->trailer_len;
3894
3895 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3896 fceof_saidx, type_tucmd, 0);
3897}
3898
3899static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3900{
3901
3902 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3903 IXGBE_ADVTXD_DCMD_IFCS |
3904 IXGBE_ADVTXD_DCMD_DEXT);
3905
3906
3907 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3908 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3909
3910
3911 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3912 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3913
3914 return cmd_type;
3915}
3916
3917static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3918 u32 tx_flags, unsigned int paylen)
3919{
3920 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3921
3922
3923 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3924 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3925
3926
3927 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3928 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3929
3930
3931 if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
3932 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
3933
3934
3935 if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
3936 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3937
3938
3939
3940
3941 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3942
3943 tx_desc->read.olinfo_status = olinfo_status;
3944}
3945
3946static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3947 struct ixgbevf_tx_buffer *first,
3948 const u8 hdr_len)
3949{
3950 struct sk_buff *skb = first->skb;
3951 struct ixgbevf_tx_buffer *tx_buffer;
3952 union ixgbe_adv_tx_desc *tx_desc;
3953 struct skb_frag_struct *frag;
3954 dma_addr_t dma;
3955 unsigned int data_len, size;
3956 u32 tx_flags = first->tx_flags;
3957 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3958 u16 i = tx_ring->next_to_use;
3959
3960 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3961
3962 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3963
3964 size = skb_headlen(skb);
3965 data_len = skb->data_len;
3966
3967 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3968
3969 tx_buffer = first;
3970
3971 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3972 if (dma_mapping_error(tx_ring->dev, dma))
3973 goto dma_error;
3974
3975
3976 dma_unmap_len_set(tx_buffer, len, size);
3977 dma_unmap_addr_set(tx_buffer, dma, dma);
3978
3979 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3980
3981 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3982 tx_desc->read.cmd_type_len =
3983 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3984
3985 i++;
3986 tx_desc++;
3987 if (i == tx_ring->count) {
3988 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3989 i = 0;
3990 }
3991 tx_desc->read.olinfo_status = 0;
3992
3993 dma += IXGBE_MAX_DATA_PER_TXD;
3994 size -= IXGBE_MAX_DATA_PER_TXD;
3995
3996 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3997 }
3998
3999 if (likely(!data_len))
4000 break;
4001
4002 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
4003
4004 i++;
4005 tx_desc++;
4006 if (i == tx_ring->count) {
4007 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
4008 i = 0;
4009 }
4010 tx_desc->read.olinfo_status = 0;
4011
4012 size = skb_frag_size(frag);
4013 data_len -= size;
4014
4015 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
4016 DMA_TO_DEVICE);
4017
4018 tx_buffer = &tx_ring->tx_buffer_info[i];
4019 }
4020
4021
4022 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
4023 tx_desc->read.cmd_type_len = cmd_type;
4024
4025
4026 first->time_stamp = jiffies;
4027
4028 skb_tx_timestamp(skb);
4029
4030
4031
4032
4033
4034
4035
4036
4037 wmb();
4038
4039
4040 first->next_to_watch = tx_desc;
4041
4042 i++;
4043 if (i == tx_ring->count)
4044 i = 0;
4045
4046 tx_ring->next_to_use = i;
4047
4048
4049 ixgbevf_write_tail(tx_ring, i);
4050
4051 return;
4052dma_error:
4053 dev_err(tx_ring->dev, "TX DMA map failed\n");
4054 tx_buffer = &tx_ring->tx_buffer_info[i];
4055
4056
4057 while (tx_buffer != first) {
4058 if (dma_unmap_len(tx_buffer, len))
4059 dma_unmap_page(tx_ring->dev,
4060 dma_unmap_addr(tx_buffer, dma),
4061 dma_unmap_len(tx_buffer, len),
4062 DMA_TO_DEVICE);
4063 dma_unmap_len_set(tx_buffer, len, 0);
4064
4065 if (i-- == 0)
4066 i += tx_ring->count;
4067 tx_buffer = &tx_ring->tx_buffer_info[i];
4068 }
4069
4070 if (dma_unmap_len(tx_buffer, len))
4071 dma_unmap_single(tx_ring->dev,
4072 dma_unmap_addr(tx_buffer, dma),
4073 dma_unmap_len(tx_buffer, len),
4074 DMA_TO_DEVICE);
4075 dma_unmap_len_set(tx_buffer, len, 0);
4076
4077 dev_kfree_skb_any(tx_buffer->skb);
4078 tx_buffer->skb = NULL;
4079
4080 tx_ring->next_to_use = i;
4081}
4082
4083static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4084{
4085 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
4086
4087
4088
4089
4090 smp_mb();
4091
4092
4093
4094
4095 if (likely(ixgbevf_desc_unused(tx_ring) < size))
4096 return -EBUSY;
4097
4098
4099 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
4100 ++tx_ring->tx_stats.restart_queue;
4101
4102 return 0;
4103}
4104
4105static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
4106{
4107 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
4108 return 0;
4109 return __ixgbevf_maybe_stop_tx(tx_ring, size);
4110}
4111
4112static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4113 struct ixgbevf_ring *tx_ring)
4114{
4115 struct ixgbevf_tx_buffer *first;
4116 int tso;
4117 u32 tx_flags = 0;
4118 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4119 struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
4120#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4121 unsigned short f;
4122#endif
4123 u8 hdr_len = 0;
4124 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
4125
4126 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
4127 dev_kfree_skb_any(skb);
4128 return NETDEV_TX_OK;
4129 }
4130
4131
4132
4133
4134
4135
4136
4137#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
4138 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4139 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4140#else
4141 count += skb_shinfo(skb)->nr_frags;
4142#endif
4143 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
4144 tx_ring->tx_stats.tx_busy++;
4145 return NETDEV_TX_BUSY;
4146 }
4147
4148
4149 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4150 first->skb = skb;
4151 first->bytecount = skb->len;
4152 first->gso_segs = 1;
4153
4154 if (skb_vlan_tag_present(skb)) {
4155 tx_flags |= skb_vlan_tag_get(skb);
4156 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4157 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4158 }
4159
4160
4161 first->tx_flags = tx_flags;
4162 first->protocol = vlan_get_protocol(skb);
4163
4164#ifdef CONFIG_IXGBEVF_IPSEC
4165 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4166 goto out_drop;
4167#endif
4168 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
4169 if (tso < 0)
4170 goto out_drop;
4171 else if (!tso)
4172 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
4173
4174 ixgbevf_tx_map(tx_ring, first, hdr_len);
4175
4176 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
4177
4178 return NETDEV_TX_OK;
4179
4180out_drop:
4181 dev_kfree_skb_any(first->skb);
4182 first->skb = NULL;
4183
4184 return NETDEV_TX_OK;
4185}
4186
4187static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4188{
4189 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4190 struct ixgbevf_ring *tx_ring;
4191
4192 if (skb->len <= 0) {
4193 dev_kfree_skb_any(skb);
4194 return NETDEV_TX_OK;
4195 }
4196
4197
4198
4199
4200 if (skb->len < 17) {
4201 if (skb_padto(skb, 17))
4202 return NETDEV_TX_OK;
4203 skb->len = 17;
4204 }
4205
4206 tx_ring = adapter->tx_ring[skb->queue_mapping];
4207 return ixgbevf_xmit_frame_ring(skb, tx_ring);
4208}
4209
4210
4211
4212
4213
4214
4215
4216
4217static int ixgbevf_set_mac(struct net_device *netdev, void *p)
4218{
4219 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4220 struct ixgbe_hw *hw = &adapter->hw;
4221 struct sockaddr *addr = p;
4222 int err;
4223
4224 if (!is_valid_ether_addr(addr->sa_data))
4225 return -EADDRNOTAVAIL;
4226
4227 spin_lock_bh(&adapter->mbx_lock);
4228
4229 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
4230
4231 spin_unlock_bh(&adapter->mbx_lock);
4232
4233 if (err)
4234 return -EPERM;
4235
4236 ether_addr_copy(hw->mac.addr, addr->sa_data);
4237 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
4238 ether_addr_copy(netdev->dev_addr, addr->sa_data);
4239
4240 return 0;
4241}
4242
4243
4244
4245
4246
4247
4248
4249
4250static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4251{
4252 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4253 struct ixgbe_hw *hw = &adapter->hw;
4254 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4255 int ret;
4256
4257
4258 if (adapter->xdp_prog) {
4259 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n");
4260 return -EPERM;
4261 }
4262
4263 spin_lock_bh(&adapter->mbx_lock);
4264
4265 ret = hw->mac.ops.set_rlpml(hw, max_frame);
4266 spin_unlock_bh(&adapter->mbx_lock);
4267 if (ret)
4268 return -EINVAL;
4269
4270 hw_dbg(hw, "changing MTU from %d to %d\n",
4271 netdev->mtu, new_mtu);
4272
4273
4274 netdev->mtu = new_mtu;
4275
4276 if (netif_running(netdev))
4277 ixgbevf_reinit_locked(adapter);
4278
4279 return 0;
4280}
4281
4282static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4283{
4284 struct net_device *netdev = pci_get_drvdata(pdev);
4285 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4286#ifdef CONFIG_PM
4287 int retval = 0;
4288#endif
4289
4290 rtnl_lock();
4291 netif_device_detach(netdev);
4292
4293 if (netif_running(netdev))
4294 ixgbevf_close_suspend(adapter);
4295
4296 ixgbevf_clear_interrupt_scheme(adapter);
4297 rtnl_unlock();
4298
4299#ifdef CONFIG_PM
4300 retval = pci_save_state(pdev);
4301 if (retval)
4302 return retval;
4303
4304#endif
4305 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4306 pci_disable_device(pdev);
4307
4308 return 0;
4309}
4310
4311#ifdef CONFIG_PM
4312static int ixgbevf_resume(struct pci_dev *pdev)
4313{
4314 struct net_device *netdev = pci_get_drvdata(pdev);
4315 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4316 u32 err;
4317
4318 pci_restore_state(pdev);
4319
4320
4321
4322 pci_save_state(pdev);
4323
4324 err = pci_enable_device_mem(pdev);
4325 if (err) {
4326 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
4327 return err;
4328 }
4329
4330 adapter->hw.hw_addr = adapter->io_addr;
4331 smp_mb__before_atomic();
4332 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4333 pci_set_master(pdev);
4334
4335 ixgbevf_reset(adapter);
4336
4337 rtnl_lock();
4338 err = ixgbevf_init_interrupt_scheme(adapter);
4339 if (!err && netif_running(netdev))
4340 err = ixgbevf_open(netdev);
4341 rtnl_unlock();
4342 if (err)
4343 return err;
4344
4345 netif_device_attach(netdev);
4346
4347 return err;
4348}
4349
4350#endif
4351static void ixgbevf_shutdown(struct pci_dev *pdev)
4352{
4353 ixgbevf_suspend(pdev, PMSG_SUSPEND);
4354}
4355
4356static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
4357 const struct ixgbevf_ring *ring)
4358{
4359 u64 bytes, packets;
4360 unsigned int start;
4361
4362 if (ring) {
4363 do {
4364 start = u64_stats_fetch_begin_irq(&ring->syncp);
4365 bytes = ring->stats.bytes;
4366 packets = ring->stats.packets;
4367 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4368 stats->tx_bytes += bytes;
4369 stats->tx_packets += packets;
4370 }
4371}
4372
4373static void ixgbevf_get_stats(struct net_device *netdev,
4374 struct rtnl_link_stats64 *stats)
4375{
4376 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4377 unsigned int start;
4378 u64 bytes, packets;
4379 const struct ixgbevf_ring *ring;
4380 int i;
4381
4382 ixgbevf_update_stats(adapter);
4383
4384 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
4385
4386 rcu_read_lock();
4387 for (i = 0; i < adapter->num_rx_queues; i++) {
4388 ring = adapter->rx_ring[i];
4389 do {
4390 start = u64_stats_fetch_begin_irq(&ring->syncp);
4391 bytes = ring->stats.bytes;
4392 packets = ring->stats.packets;
4393 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4394 stats->rx_bytes += bytes;
4395 stats->rx_packets += packets;
4396 }
4397
4398 for (i = 0; i < adapter->num_tx_queues; i++) {
4399 ring = adapter->tx_ring[i];
4400 ixgbevf_get_tx_ring_stats(stats, ring);
4401 }
4402
4403 for (i = 0; i < adapter->num_xdp_queues; i++) {
4404 ring = adapter->xdp_ring[i];
4405 ixgbevf_get_tx_ring_stats(stats, ring);
4406 }
4407 rcu_read_unlock();
4408}
4409
4410#define IXGBEVF_MAX_MAC_HDR_LEN 127
4411#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
4412
4413static netdev_features_t
4414ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
4415 netdev_features_t features)
4416{
4417 unsigned int network_hdr_len, mac_hdr_len;
4418
4419
4420 mac_hdr_len = skb_network_header(skb) - skb->data;
4421 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
4422 return features & ~(NETIF_F_HW_CSUM |
4423 NETIF_F_SCTP_CRC |
4424 NETIF_F_HW_VLAN_CTAG_TX |
4425 NETIF_F_TSO |
4426 NETIF_F_TSO6);
4427
4428 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4429 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
4430 return features & ~(NETIF_F_HW_CSUM |
4431 NETIF_F_SCTP_CRC |
4432 NETIF_F_TSO |
4433 NETIF_F_TSO6);
4434
4435
4436
4437
4438 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4439 features &= ~NETIF_F_TSO;
4440
4441 return features;
4442}
4443
4444static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
4445{
4446 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4447 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4448 struct bpf_prog *old_prog;
4449
4450
4451 for (i = 0; i < adapter->num_rx_queues; i++) {
4452 struct ixgbevf_ring *ring = adapter->rx_ring[i];
4453
4454 if (frame_size > ixgbevf_rx_bufsz(ring))
4455 return -EINVAL;
4456 }
4457
4458 old_prog = xchg(&adapter->xdp_prog, prog);
4459
4460
4461 if (!!prog != !!old_prog) {
4462
4463
4464
4465
4466 if (netif_running(dev))
4467 ixgbevf_close(dev);
4468
4469 ixgbevf_clear_interrupt_scheme(adapter);
4470 ixgbevf_init_interrupt_scheme(adapter);
4471
4472 if (netif_running(dev))
4473 ixgbevf_open(dev);
4474 } else {
4475 for (i = 0; i < adapter->num_rx_queues; i++)
4476 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
4477 }
4478
4479 if (old_prog)
4480 bpf_prog_put(old_prog);
4481
4482 return 0;
4483}
4484
4485static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4486{
4487 struct ixgbevf_adapter *adapter = netdev_priv(dev);
4488
4489 switch (xdp->command) {
4490 case XDP_SETUP_PROG:
4491 return ixgbevf_xdp_setup(dev, xdp->prog);
4492 case XDP_QUERY_PROG:
4493 xdp->prog_id = adapter->xdp_prog ?
4494 adapter->xdp_prog->aux->id : 0;
4495 return 0;
4496 default:
4497 return -EINVAL;
4498 }
4499}
4500
4501static const struct net_device_ops ixgbevf_netdev_ops = {
4502 .ndo_open = ixgbevf_open,
4503 .ndo_stop = ixgbevf_close,
4504 .ndo_start_xmit = ixgbevf_xmit_frame,
4505 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4506 .ndo_get_stats64 = ixgbevf_get_stats,
4507 .ndo_validate_addr = eth_validate_addr,
4508 .ndo_set_mac_address = ixgbevf_set_mac,
4509 .ndo_change_mtu = ixgbevf_change_mtu,
4510 .ndo_tx_timeout = ixgbevf_tx_timeout,
4511 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4512 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4513 .ndo_features_check = ixgbevf_features_check,
4514 .ndo_bpf = ixgbevf_xdp,
4515};
4516
4517static void ixgbevf_assign_netdev_ops(struct net_device *dev)
4518{
4519 dev->netdev_ops = &ixgbevf_netdev_ops;
4520 ixgbevf_set_ethtool_ops(dev);
4521 dev->watchdog_timeo = 5 * HZ;
4522}
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4536{
4537 struct net_device *netdev;
4538 struct ixgbevf_adapter *adapter = NULL;
4539 struct ixgbe_hw *hw = NULL;
4540 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
4541 int err, pci_using_dac;
4542 bool disable_dev = false;
4543
4544 err = pci_enable_device(pdev);
4545 if (err)
4546 return err;
4547
4548 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
4549 pci_using_dac = 1;
4550 } else {
4551 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4552 if (err) {
4553 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4554 goto err_dma;
4555 }
4556 pci_using_dac = 0;
4557 }
4558
4559 err = pci_request_regions(pdev, ixgbevf_driver_name);
4560 if (err) {
4561 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4562 goto err_pci_reg;
4563 }
4564
4565 pci_set_master(pdev);
4566
4567 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4568 MAX_TX_QUEUES);
4569 if (!netdev) {
4570 err = -ENOMEM;
4571 goto err_alloc_etherdev;
4572 }
4573
4574 SET_NETDEV_DEV(netdev, &pdev->dev);
4575
4576 adapter = netdev_priv(netdev);
4577
4578 adapter->netdev = netdev;
4579 adapter->pdev = pdev;
4580 hw = &adapter->hw;
4581 hw->back = adapter;
4582 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4583
4584
4585
4586
4587 pci_save_state(pdev);
4588
4589 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4590 pci_resource_len(pdev, 0));
4591 adapter->io_addr = hw->hw_addr;
4592 if (!hw->hw_addr) {
4593 err = -EIO;
4594 goto err_ioremap;
4595 }
4596
4597 ixgbevf_assign_netdev_ops(netdev);
4598
4599
4600 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4601 hw->mac.type = ii->mac;
4602
4603 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4604 sizeof(struct ixgbe_mbx_operations));
4605
4606
4607 err = ixgbevf_sw_init(adapter);
4608 if (err)
4609 goto err_sw_init;
4610
4611
4612 if (!is_valid_ether_addr(netdev->dev_addr)) {
4613 pr_err("invalid MAC address\n");
4614 err = -EIO;
4615 goto err_sw_init;
4616 }
4617
4618 netdev->hw_features = NETIF_F_SG |
4619 NETIF_F_TSO |
4620 NETIF_F_TSO6 |
4621 NETIF_F_RXCSUM |
4622 NETIF_F_HW_CSUM |
4623 NETIF_F_SCTP_CRC;
4624
4625#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4626 NETIF_F_GSO_GRE_CSUM | \
4627 NETIF_F_GSO_IPXIP4 | \
4628 NETIF_F_GSO_IPXIP6 | \
4629 NETIF_F_GSO_UDP_TUNNEL | \
4630 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4631
4632 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4633 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4634 IXGBEVF_GSO_PARTIAL_FEATURES;
4635
4636 netdev->features = netdev->hw_features;
4637
4638 if (pci_using_dac)
4639 netdev->features |= NETIF_F_HIGHDMA;
4640
4641 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4642 netdev->mpls_features |= NETIF_F_SG |
4643 NETIF_F_TSO |
4644 NETIF_F_TSO6 |
4645 NETIF_F_HW_CSUM;
4646 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4647 netdev->hw_enc_features |= netdev->vlan_features;
4648
4649
4650 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4651 NETIF_F_HW_VLAN_CTAG_RX |
4652 NETIF_F_HW_VLAN_CTAG_TX;
4653
4654 netdev->priv_flags |= IFF_UNICAST_FLT;
4655
4656
4657 netdev->min_mtu = ETH_MIN_MTU;
4658 switch (adapter->hw.api_version) {
4659 case ixgbe_mbox_api_11:
4660 case ixgbe_mbox_api_12:
4661 case ixgbe_mbox_api_13:
4662 case ixgbe_mbox_api_14:
4663 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4664 (ETH_HLEN + ETH_FCS_LEN);
4665 break;
4666 default:
4667 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4668 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4669 (ETH_HLEN + ETH_FCS_LEN);
4670 else
4671 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4672 break;
4673 }
4674
4675 if (IXGBE_REMOVED(hw->hw_addr)) {
4676 err = -EIO;
4677 goto err_sw_init;
4678 }
4679
4680 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
4681
4682 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4683 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4684 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4685
4686 err = ixgbevf_init_interrupt_scheme(adapter);
4687 if (err)
4688 goto err_sw_init;
4689
4690 strcpy(netdev->name, "eth%d");
4691
4692 err = register_netdev(netdev);
4693 if (err)
4694 goto err_register;
4695
4696 pci_set_drvdata(pdev, netdev);
4697 netif_carrier_off(netdev);
4698 ixgbevf_init_ipsec_offload(adapter);
4699
4700 ixgbevf_init_last_counter_stats(adapter);
4701
4702
4703 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4704 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4705
4706 switch (hw->mac.type) {
4707 case ixgbe_mac_X550_vf:
4708 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4709 break;
4710 case ixgbe_mac_X540_vf:
4711 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4712 break;
4713 case ixgbe_mac_82599_vf:
4714 default:
4715 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4716 break;
4717 }
4718
4719 return 0;
4720
4721err_register:
4722 ixgbevf_clear_interrupt_scheme(adapter);
4723err_sw_init:
4724 ixgbevf_reset_interrupt_capability(adapter);
4725 iounmap(adapter->io_addr);
4726 kfree(adapter->rss_key);
4727err_ioremap:
4728 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4729 free_netdev(netdev);
4730err_alloc_etherdev:
4731 pci_release_regions(pdev);
4732err_pci_reg:
4733err_dma:
4734 if (!adapter || disable_dev)
4735 pci_disable_device(pdev);
4736 return err;
4737}
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748static void ixgbevf_remove(struct pci_dev *pdev)
4749{
4750 struct net_device *netdev = pci_get_drvdata(pdev);
4751 struct ixgbevf_adapter *adapter;
4752 bool disable_dev;
4753
4754 if (!netdev)
4755 return;
4756
4757 adapter = netdev_priv(netdev);
4758
4759 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4760 cancel_work_sync(&adapter->service_task);
4761
4762 if (netdev->reg_state == NETREG_REGISTERED)
4763 unregister_netdev(netdev);
4764
4765 ixgbevf_stop_ipsec_offload(adapter);
4766 ixgbevf_clear_interrupt_scheme(adapter);
4767 ixgbevf_reset_interrupt_capability(adapter);
4768
4769 iounmap(adapter->io_addr);
4770 pci_release_regions(pdev);
4771
4772 hw_dbg(&adapter->hw, "Remove complete\n");
4773
4774 kfree(adapter->rss_key);
4775 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4776 free_netdev(netdev);
4777
4778 if (disable_dev)
4779 pci_disable_device(pdev);
4780}
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4791 pci_channel_state_t state)
4792{
4793 struct net_device *netdev = pci_get_drvdata(pdev);
4794 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4795
4796 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4797 return PCI_ERS_RESULT_DISCONNECT;
4798
4799 rtnl_lock();
4800 netif_device_detach(netdev);
4801
4802 if (netif_running(netdev))
4803 ixgbevf_close_suspend(adapter);
4804
4805 if (state == pci_channel_io_perm_failure) {
4806 rtnl_unlock();
4807 return PCI_ERS_RESULT_DISCONNECT;
4808 }
4809
4810 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4811 pci_disable_device(pdev);
4812 rtnl_unlock();
4813
4814
4815 return PCI_ERS_RESULT_NEED_RESET;
4816}
4817
4818
4819
4820
4821
4822
4823
4824
4825static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4826{
4827 struct net_device *netdev = pci_get_drvdata(pdev);
4828 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4829
4830 if (pci_enable_device_mem(pdev)) {
4831 dev_err(&pdev->dev,
4832 "Cannot re-enable PCI device after reset.\n");
4833 return PCI_ERS_RESULT_DISCONNECT;
4834 }
4835
4836 adapter->hw.hw_addr = adapter->io_addr;
4837 smp_mb__before_atomic();
4838 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4839 pci_set_master(pdev);
4840
4841 ixgbevf_reset(adapter);
4842
4843 return PCI_ERS_RESULT_RECOVERED;
4844}
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854static void ixgbevf_io_resume(struct pci_dev *pdev)
4855{
4856 struct net_device *netdev = pci_get_drvdata(pdev);
4857
4858 rtnl_lock();
4859 if (netif_running(netdev))
4860 ixgbevf_open(netdev);
4861
4862 netif_device_attach(netdev);
4863 rtnl_unlock();
4864}
4865
4866
4867static const struct pci_error_handlers ixgbevf_err_handler = {
4868 .error_detected = ixgbevf_io_error_detected,
4869 .slot_reset = ixgbevf_io_slot_reset,
4870 .resume = ixgbevf_io_resume,
4871};
4872
4873static struct pci_driver ixgbevf_driver = {
4874 .name = ixgbevf_driver_name,
4875 .id_table = ixgbevf_pci_tbl,
4876 .probe = ixgbevf_probe,
4877 .remove = ixgbevf_remove,
4878#ifdef CONFIG_PM
4879
4880 .suspend = ixgbevf_suspend,
4881 .resume = ixgbevf_resume,
4882#endif
4883 .shutdown = ixgbevf_shutdown,
4884 .err_handler = &ixgbevf_err_handler
4885};
4886
4887
4888
4889
4890
4891
4892
4893static int __init ixgbevf_init_module(void)
4894{
4895 pr_info("%s - version %s\n", ixgbevf_driver_string,
4896 ixgbevf_driver_version);
4897
4898 pr_info("%s\n", ixgbevf_copyright);
4899 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4900 if (!ixgbevf_wq) {
4901 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4902 return -ENOMEM;
4903 }
4904
4905 return pci_register_driver(&ixgbevf_driver);
4906}
4907
4908module_init(ixgbevf_init_module);
4909
4910
4911
4912
4913
4914
4915
4916static void __exit ixgbevf_exit_module(void)
4917{
4918 pci_unregister_driver(&ixgbevf_driver);
4919 if (ixgbevf_wq) {
4920 destroy_workqueue(ixgbevf_wq);
4921 ixgbevf_wq = NULL;
4922 }
4923}
4924
4925#ifdef DEBUG
4926
4927
4928
4929
4930
4931char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4932{
4933 struct ixgbevf_adapter *adapter = hw->back;
4934
4935 return adapter->netdev->name;
4936}
4937
4938#endif
4939module_exit(ixgbevf_exit_module);
4940
4941
4942