1
2
3
4#include "ixgbe.h"
5#include "ixgbe_sriov.h"
6
7#ifdef CONFIG_IXGBE_DCB
8
9
10
11
12
13
14
15
16
17static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
18{
19#ifdef IXGBE_FCOE
20 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
21#endif
22 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
23 int i;
24 u16 reg_idx, pool;
25 u8 tcs = adapter->hw_tcs;
26
27
28 if (tcs <= 1)
29 return false;
30
31
32 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
33 return false;
34
35
36 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
37 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
38
39 if ((reg_idx & ~vmdq->mask) >= tcs) {
40 pool++;
41 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
42 }
43 adapter->rx_ring[i]->reg_idx = reg_idx;
44 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
45 }
46
47 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
48 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
49
50 if ((reg_idx & ~vmdq->mask) >= tcs)
51 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
52 adapter->tx_ring[i]->reg_idx = reg_idx;
53 }
54
55#ifdef IXGBE_FCOE
56
57 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
58 return true;
59
60
61 if (fcoe->offset < tcs)
62 return true;
63
64
65 if (fcoe->indices) {
66 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
67 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
68
69 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
70 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
71 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
72 adapter->rx_ring[i]->reg_idx = reg_idx;
73 adapter->rx_ring[i]->netdev = adapter->netdev;
74 reg_idx++;
75 }
76
77 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
78 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
79 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
80 adapter->tx_ring[i]->reg_idx = reg_idx;
81 reg_idx++;
82 }
83 }
84
85#endif
86 return true;
87}
88
89
90static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
91 unsigned int *tx, unsigned int *rx)
92{
93 struct ixgbe_hw *hw = &adapter->hw;
94 u8 num_tcs = adapter->hw_tcs;
95
96 *tx = 0;
97 *rx = 0;
98
99 switch (hw->mac.type) {
100 case ixgbe_mac_82598EB:
101
102 *tx = tc << 2;
103 *rx = tc << 3;
104 break;
105 case ixgbe_mac_82599EB:
106 case ixgbe_mac_X540:
107 case ixgbe_mac_X550:
108 case ixgbe_mac_X550EM_x:
109 case ixgbe_mac_x550em_a:
110 if (num_tcs > 4) {
111
112
113
114
115
116 *rx = tc << 4;
117 if (tc < 3)
118 *tx = tc << 5;
119 else if (tc < 5)
120 *tx = (tc + 2) << 4;
121 else
122 *tx = (tc + 8) << 3;
123 } else {
124
125
126
127
128
129 *rx = tc << 5;
130 if (tc < 2)
131 *tx = tc << 6;
132 else
133 *tx = (tc + 4) << 4;
134 }
135 default:
136 break;
137 }
138}
139
140
141
142
143
144
145
146
147static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
148{
149 u8 num_tcs = adapter->hw_tcs;
150 unsigned int tx_idx, rx_idx;
151 int tc, offset, rss_i, i;
152
153
154 if (num_tcs <= 1)
155 return false;
156
157 rss_i = adapter->ring_feature[RING_F_RSS].indices;
158
159 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
160 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
161 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
162 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
163 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
164 adapter->rx_ring[offset + i]->netdev = adapter->netdev;
165 adapter->tx_ring[offset + i]->dcb_tc = tc;
166 adapter->rx_ring[offset + i]->dcb_tc = tc;
167 }
168 }
169
170 return true;
171}
172
173#endif
174
175
176
177
178
179
180
181
182static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
183{
184#ifdef IXGBE_FCOE
185 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
186#endif
187 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
188 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
189 u16 reg_idx, pool;
190 int i;
191
192
193 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
194 return false;
195
196
197 pool = 0;
198 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
199 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
200#ifdef IXGBE_FCOE
201
202 if (fcoe->offset && (i > fcoe->offset))
203 break;
204#endif
205
206 if ((reg_idx & ~vmdq->mask) >= rss->indices) {
207 pool++;
208 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
209 }
210 adapter->rx_ring[i]->reg_idx = reg_idx;
211 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
212 }
213
214#ifdef IXGBE_FCOE
215
216 for (; i < adapter->num_rx_queues; i++, reg_idx++) {
217 adapter->rx_ring[i]->reg_idx = reg_idx;
218 adapter->rx_ring[i]->netdev = adapter->netdev;
219 }
220
221#endif
222 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
223 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
224#ifdef IXGBE_FCOE
225
226 if (fcoe->offset && (i > fcoe->offset))
227 break;
228#endif
229
230 if ((reg_idx & rss->mask) >= rss->indices)
231 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
232 adapter->tx_ring[i]->reg_idx = reg_idx;
233 }
234
235#ifdef IXGBE_FCOE
236
237 for (; i < adapter->num_tx_queues; i++, reg_idx++)
238 adapter->tx_ring[i]->reg_idx = reg_idx;
239
240#endif
241
242 return true;
243}
244
245
246
247
248
249
250
251
252static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
253{
254 int i, reg_idx;
255
256 for (i = 0; i < adapter->num_rx_queues; i++) {
257 adapter->rx_ring[i]->reg_idx = i;
258 adapter->rx_ring[i]->netdev = adapter->netdev;
259 }
260 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
261 adapter->tx_ring[i]->reg_idx = reg_idx;
262 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
263 adapter->xdp_ring[i]->reg_idx = reg_idx;
264
265 return true;
266}
267
268
269
270
271
272
273
274
275
276
277
278
279static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
280{
281
282 adapter->rx_ring[0]->reg_idx = 0;
283 adapter->tx_ring[0]->reg_idx = 0;
284
285#ifdef CONFIG_IXGBE_DCB
286 if (ixgbe_cache_ring_dcb_sriov(adapter))
287 return;
288
289 if (ixgbe_cache_ring_dcb(adapter))
290 return;
291
292#endif
293 if (ixgbe_cache_ring_sriov(adapter))
294 return;
295
296 ixgbe_cache_ring_rss(adapter);
297}
298
299static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
300{
301 return adapter->xdp_prog ? nr_cpu_ids : 0;
302}
303
304#define IXGBE_RSS_64Q_MASK 0x3F
305#define IXGBE_RSS_16Q_MASK 0xF
306#define IXGBE_RSS_8Q_MASK 0x7
307#define IXGBE_RSS_4Q_MASK 0x3
308#define IXGBE_RSS_2Q_MASK 0x1
309#define IXGBE_RSS_DISABLED_MASK 0x0
310
311#ifdef CONFIG_IXGBE_DCB
312
313
314
315
316
317
318
319
320
321static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
322{
323 int i;
324 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
325 u16 vmdq_m = 0;
326#ifdef IXGBE_FCOE
327 u16 fcoe_i = 0;
328#endif
329 u8 tcs = adapter->hw_tcs;
330
331
332 if (tcs <= 1)
333 return false;
334
335
336 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
337 return false;
338
339
340 vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
341
342
343 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
344
345
346 if (tcs > 4) {
347 vmdq_i = min_t(u16, vmdq_i, 16);
348 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
349
350 } else {
351 vmdq_i = min_t(u16, vmdq_i, 32);
352 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
353 }
354
355#ifdef IXGBE_FCOE
356
357 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
358
359#endif
360
361 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
362
363
364 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
365 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
366
367
368
369
370
371 adapter->ring_feature[RING_F_RSS].indices = 1;
372 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
373
374
375 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
376
377 adapter->num_rx_pools = vmdq_i;
378 adapter->num_rx_queues_per_pool = tcs;
379
380 adapter->num_tx_queues = vmdq_i * tcs;
381 adapter->num_xdp_queues = 0;
382 adapter->num_rx_queues = vmdq_i * tcs;
383
384#ifdef IXGBE_FCOE
385 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
386 struct ixgbe_ring_feature *fcoe;
387
388 fcoe = &adapter->ring_feature[RING_F_FCOE];
389
390
391 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
392
393 if (fcoe_i) {
394
395 fcoe->indices = fcoe_i;
396 fcoe->offset = vmdq_i * tcs;
397
398
399 adapter->num_tx_queues += fcoe_i;
400 adapter->num_rx_queues += fcoe_i;
401 } else if (tcs > 1) {
402
403 fcoe->indices = 1;
404 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
405 } else {
406 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
407
408 fcoe->indices = 0;
409 fcoe->offset = 0;
410 }
411 }
412
413#endif
414
415 for (i = 0; i < tcs; i++)
416 netdev_set_tc_queue(adapter->netdev, i, 1, i);
417
418 return true;
419}
420
421static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
422{
423 struct net_device *dev = adapter->netdev;
424 struct ixgbe_ring_feature *f;
425 int rss_i, rss_m, i;
426 int tcs;
427
428
429 tcs = adapter->hw_tcs;
430
431
432 if (tcs <= 1)
433 return false;
434
435
436 rss_i = dev->num_tx_queues / tcs;
437 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
438
439 rss_i = min_t(u16, rss_i, 4);
440 rss_m = IXGBE_RSS_4Q_MASK;
441 } else if (tcs > 4) {
442
443 rss_i = min_t(u16, rss_i, 8);
444 rss_m = IXGBE_RSS_8Q_MASK;
445 } else {
446
447 rss_i = min_t(u16, rss_i, 16);
448 rss_m = IXGBE_RSS_16Q_MASK;
449 }
450
451
452 f = &adapter->ring_feature[RING_F_RSS];
453 rss_i = min_t(int, rss_i, f->limit);
454 f->indices = rss_i;
455 f->mask = rss_m;
456
457
458 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
459
460#ifdef IXGBE_FCOE
461
462
463
464
465
466 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
467 u8 tc = ixgbe_fcoe_get_tc(adapter);
468
469 f = &adapter->ring_feature[RING_F_FCOE];
470 f->indices = min_t(u16, rss_i, f->limit);
471 f->offset = rss_i * tc;
472 }
473
474#endif
475 for (i = 0; i < tcs; i++)
476 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
477
478 adapter->num_tx_queues = rss_i * tcs;
479 adapter->num_xdp_queues = 0;
480 adapter->num_rx_queues = rss_i * tcs;
481
482 return true;
483}
484
485#endif
486
487
488
489
490
491
492
493
494
495static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
496{
497 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
498 u16 vmdq_m = 0;
499 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
500 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
501#ifdef IXGBE_FCOE
502 u16 fcoe_i = 0;
503#endif
504
505
506 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
507 return false;
508
509
510 rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
511
512
513 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
514
515
516 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
517
518
519 if (vmdq_i > 32) {
520 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
521 rss_m = IXGBE_RSS_2Q_MASK;
522 rss_i = min_t(u16, rss_i, 2);
523
524 } else {
525 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
526 rss_m = IXGBE_RSS_4Q_MASK;
527
528 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
529 }
530
531#ifdef IXGBE_FCOE
532
533 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
534
535#endif
536
537 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
538
539
540 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
541 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
542
543
544 adapter->ring_feature[RING_F_RSS].indices = rss_i;
545 adapter->ring_feature[RING_F_RSS].mask = rss_m;
546
547 adapter->num_rx_pools = vmdq_i;
548 adapter->num_rx_queues_per_pool = rss_i;
549
550 adapter->num_rx_queues = vmdq_i * rss_i;
551 adapter->num_tx_queues = vmdq_i * rss_i;
552 adapter->num_xdp_queues = 0;
553
554
555 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
556
557#ifdef IXGBE_FCOE
558
559
560
561
562
563 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
564 struct ixgbe_ring_feature *fcoe;
565
566 fcoe = &adapter->ring_feature[RING_F_FCOE];
567
568
569 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
570
571 if (vmdq_i > 1 && fcoe_i) {
572
573 fcoe->indices = fcoe_i;
574 fcoe->offset = vmdq_i * rss_i;
575 } else {
576
577 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
578
579
580 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
581 fcoe_i = rss_i;
582
583
584 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
585 fcoe->offset = fcoe_i - fcoe->indices;
586
587 fcoe_i -= rss_i;
588 }
589
590
591 adapter->num_tx_queues += fcoe_i;
592 adapter->num_rx_queues += fcoe_i;
593 }
594
595#endif
596
597
598
599
600
601 if (vmdq_i > 1)
602 netdev_set_num_tc(adapter->netdev, 1);
603
604
605 netdev_set_tc_queue(adapter->netdev, 0,
606 adapter->num_rx_queues_per_pool, 0);
607
608 return true;
609}
610
611
612
613
614
615
616
617
618
619static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
620{
621 struct ixgbe_hw *hw = &adapter->hw;
622 struct ixgbe_ring_feature *f;
623 u16 rss_i;
624
625
626 f = &adapter->ring_feature[RING_F_RSS];
627 rss_i = f->limit;
628
629 f->indices = rss_i;
630
631 if (hw->mac.type < ixgbe_mac_X550)
632 f->mask = IXGBE_RSS_16Q_MASK;
633 else
634 f->mask = IXGBE_RSS_64Q_MASK;
635
636
637 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
638
639
640
641
642
643
644 if (rss_i > 1 && adapter->atr_sample_rate) {
645 f = &adapter->ring_feature[RING_F_FDIR];
646
647 rss_i = f->indices = f->limit;
648
649 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
650 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
651 }
652
653#ifdef IXGBE_FCOE
654
655
656
657
658
659
660
661
662 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
663 struct net_device *dev = adapter->netdev;
664 u16 fcoe_i;
665
666 f = &adapter->ring_feature[RING_F_FCOE];
667
668
669 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
670 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
671
672
673 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
674 fcoe_i = rss_i;
675
676
677 f->indices = min_t(u16, fcoe_i, f->limit);
678 f->offset = fcoe_i - f->indices;
679 rss_i = max_t(u16, fcoe_i, rss_i);
680 }
681
682#endif
683 adapter->num_rx_queues = rss_i;
684 adapter->num_tx_queues = rss_i;
685 adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
686
687 return true;
688}
689
690
691
692
693
694
695
696
697
698
699
700
701static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
702{
703
704 adapter->num_rx_queues = 1;
705 adapter->num_tx_queues = 1;
706 adapter->num_xdp_queues = 0;
707 adapter->num_rx_pools = 1;
708 adapter->num_rx_queues_per_pool = 1;
709
710#ifdef CONFIG_IXGBE_DCB
711 if (ixgbe_set_dcb_sriov_queues(adapter))
712 return;
713
714 if (ixgbe_set_dcb_queues(adapter))
715 return;
716
717#endif
718 if (ixgbe_set_sriov_queues(adapter))
719 return;
720
721 ixgbe_set_rss_queues(adapter);
722}
723
724
725
726
727
728
729
730
731
732static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
733{
734 struct ixgbe_hw *hw = &adapter->hw;
735 int i, vectors, vector_threshold;
736
737
738
739
740 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
741 vectors = max(vectors, adapter->num_xdp_queues);
742
743
744
745
746
747
748 vectors = min_t(int, vectors, num_online_cpus());
749
750
751 vectors += NON_Q_VECTORS;
752
753
754
755
756
757
758
759 vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
760
761
762
763
764 vector_threshold = MIN_MSIX_COUNT;
765
766 adapter->msix_entries = kcalloc(vectors,
767 sizeof(struct msix_entry),
768 GFP_KERNEL);
769 if (!adapter->msix_entries)
770 return -ENOMEM;
771
772 for (i = 0; i < vectors; i++)
773 adapter->msix_entries[i].entry = i;
774
775 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
776 vector_threshold, vectors);
777
778 if (vectors < 0) {
779
780
781
782 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
783 vectors);
784
785 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
786 kfree(adapter->msix_entries);
787 adapter->msix_entries = NULL;
788
789 return vectors;
790 }
791
792
793
794
795 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
796
797
798
799
800 vectors -= NON_Q_VECTORS;
801 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
802
803 return 0;
804}
805
806static void ixgbe_add_ring(struct ixgbe_ring *ring,
807 struct ixgbe_ring_container *head)
808{
809 ring->next = head->ring;
810 head->ring = ring;
811 head->count++;
812 head->next_update = jiffies + 1;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
830 int v_count, int v_idx,
831 int txr_count, int txr_idx,
832 int xdp_count, int xdp_idx,
833 int rxr_count, int rxr_idx)
834{
835 int node = dev_to_node(&adapter->pdev->dev);
836 struct ixgbe_q_vector *q_vector;
837 struct ixgbe_ring *ring;
838 int cpu = -1;
839 int ring_count;
840 u8 tcs = adapter->hw_tcs;
841
842 ring_count = txr_count + rxr_count + xdp_count;
843
844
845 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
846 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
847 if (rss_i > 1 && adapter->atr_sample_rate) {
848 cpu = cpumask_local_spread(v_idx, node);
849 node = cpu_to_node(cpu);
850 }
851 }
852
853
854 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
855 GFP_KERNEL, node);
856 if (!q_vector)
857 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
858 GFP_KERNEL);
859 if (!q_vector)
860 return -ENOMEM;
861
862
863 if (cpu != -1)
864 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
865 q_vector->numa_node = node;
866
867#ifdef CONFIG_IXGBE_DCA
868
869 q_vector->cpu = -1;
870
871#endif
872
873 netif_napi_add(adapter->netdev, &q_vector->napi,
874 ixgbe_poll, 64);
875
876
877 adapter->q_vector[v_idx] = q_vector;
878 q_vector->adapter = adapter;
879 q_vector->v_idx = v_idx;
880
881
882 q_vector->tx.work_limit = adapter->tx_work_limit;
883
884
885 q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
886 IXGBE_ITR_ADAPTIVE_LATENCY;
887 q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
888 IXGBE_ITR_ADAPTIVE_LATENCY;
889
890
891 if (txr_count && !rxr_count) {
892
893 if (adapter->tx_itr_setting == 1)
894 q_vector->itr = IXGBE_12K_ITR;
895 else
896 q_vector->itr = adapter->tx_itr_setting;
897 } else {
898
899 if (adapter->rx_itr_setting == 1)
900 q_vector->itr = IXGBE_20K_ITR;
901 else
902 q_vector->itr = adapter->rx_itr_setting;
903 }
904
905
906 ring = q_vector->ring;
907
908 while (txr_count) {
909
910 ring->dev = &adapter->pdev->dev;
911 ring->netdev = adapter->netdev;
912
913
914 ring->q_vector = q_vector;
915
916
917 ixgbe_add_ring(ring, &q_vector->tx);
918
919
920 ring->count = adapter->tx_ring_count;
921 ring->queue_index = txr_idx;
922
923
924 WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
925
926
927 txr_count--;
928 txr_idx += v_count;
929
930
931 ring++;
932 }
933
934 while (xdp_count) {
935
936 ring->dev = &adapter->pdev->dev;
937 ring->netdev = adapter->netdev;
938
939
940 ring->q_vector = q_vector;
941
942
943 ixgbe_add_ring(ring, &q_vector->tx);
944
945
946 ring->count = adapter->tx_ring_count;
947 ring->queue_index = xdp_idx;
948 set_ring_xdp(ring);
949
950
951 WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
952
953
954 xdp_count--;
955 xdp_idx++;
956
957
958 ring++;
959 }
960
961 while (rxr_count) {
962
963 ring->dev = &adapter->pdev->dev;
964 ring->netdev = adapter->netdev;
965
966
967 ring->q_vector = q_vector;
968
969
970 ixgbe_add_ring(ring, &q_vector->rx);
971
972
973
974
975
976 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
977 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
978
979#ifdef IXGBE_FCOE
980 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
981 struct ixgbe_ring_feature *f;
982 f = &adapter->ring_feature[RING_F_FCOE];
983 if ((rxr_idx >= f->offset) &&
984 (rxr_idx < f->offset + f->indices))
985 set_bit(__IXGBE_RX_FCOE, &ring->state);
986 }
987
988#endif
989
990 ring->count = adapter->rx_ring_count;
991 ring->queue_index = rxr_idx;
992
993
994 WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
995
996
997 rxr_count--;
998 rxr_idx += v_count;
999
1000
1001 ring++;
1002 }
1003
1004 return 0;
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1017{
1018 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1019 struct ixgbe_ring *ring;
1020
1021 ixgbe_for_each_ring(ring, q_vector->tx) {
1022 if (ring_is_xdp(ring))
1023 WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1024 else
1025 WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1026 }
1027
1028 ixgbe_for_each_ring(ring, q_vector->rx)
1029 WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1030
1031 adapter->q_vector[v_idx] = NULL;
1032 __netif_napi_del(&q_vector->napi);
1033
1034
1035
1036
1037
1038
1039 kfree_rcu(q_vector, rcu);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1050{
1051 int q_vectors = adapter->num_q_vectors;
1052 int rxr_remaining = adapter->num_rx_queues;
1053 int txr_remaining = adapter->num_tx_queues;
1054 int xdp_remaining = adapter->num_xdp_queues;
1055 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1056 int err, i;
1057
1058
1059 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1060 q_vectors = 1;
1061
1062 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1063 for (; rxr_remaining; v_idx++) {
1064 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1065 0, 0, 0, 0, 1, rxr_idx);
1066
1067 if (err)
1068 goto err_out;
1069
1070
1071 rxr_remaining--;
1072 rxr_idx++;
1073 }
1074 }
1075
1076 for (; v_idx < q_vectors; v_idx++) {
1077 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1078 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1079 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1080
1081 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1082 tqpv, txr_idx,
1083 xqpv, xdp_idx,
1084 rqpv, rxr_idx);
1085
1086 if (err)
1087 goto err_out;
1088
1089
1090 rxr_remaining -= rqpv;
1091 txr_remaining -= tqpv;
1092 xdp_remaining -= xqpv;
1093 rxr_idx++;
1094 txr_idx++;
1095 xdp_idx += xqpv;
1096 }
1097
1098 for (i = 0; i < adapter->num_rx_queues; i++) {
1099 if (adapter->rx_ring[i])
1100 adapter->rx_ring[i]->ring_idx = i;
1101 }
1102
1103 for (i = 0; i < adapter->num_tx_queues; i++) {
1104 if (adapter->tx_ring[i])
1105 adapter->tx_ring[i]->ring_idx = i;
1106 }
1107
1108 for (i = 0; i < adapter->num_xdp_queues; i++) {
1109 if (adapter->xdp_ring[i])
1110 adapter->xdp_ring[i]->ring_idx = i;
1111 }
1112
1113 return 0;
1114
1115err_out:
1116 adapter->num_tx_queues = 0;
1117 adapter->num_xdp_queues = 0;
1118 adapter->num_rx_queues = 0;
1119 adapter->num_q_vectors = 0;
1120
1121 while (v_idx--)
1122 ixgbe_free_q_vector(adapter, v_idx);
1123
1124 return -ENOMEM;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1136{
1137 int v_idx = adapter->num_q_vectors;
1138
1139 adapter->num_tx_queues = 0;
1140 adapter->num_xdp_queues = 0;
1141 adapter->num_rx_queues = 0;
1142 adapter->num_q_vectors = 0;
1143
1144 while (v_idx--)
1145 ixgbe_free_q_vector(adapter, v_idx);
1146}
1147
1148static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1149{
1150 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1151 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1152 pci_disable_msix(adapter->pdev);
1153 kfree(adapter->msix_entries);
1154 adapter->msix_entries = NULL;
1155 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1156 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1157 pci_disable_msi(adapter->pdev);
1158 }
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1169{
1170 int err;
1171
1172
1173 if (!ixgbe_acquire_msix_vectors(adapter))
1174 return;
1175
1176
1177
1178
1179
1180
1181
1182 if (adapter->hw_tcs > 1) {
1183 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1184 netdev_reset_tc(adapter->netdev);
1185
1186 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1187 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1188
1189 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1190 adapter->temp_dcb_cfg.pfc_mode_enable = false;
1191 adapter->dcb_cfg.pfc_mode_enable = false;
1192 }
1193
1194 adapter->hw_tcs = 0;
1195 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1196 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1197
1198
1199 e_dev_warn("Disabling SR-IOV support\n");
1200 ixgbe_disable_sriov(adapter);
1201
1202
1203 e_dev_warn("Disabling RSS support\n");
1204 adapter->ring_feature[RING_F_RSS].limit = 1;
1205
1206
1207
1208
1209 ixgbe_set_num_queues(adapter);
1210 adapter->num_q_vectors = 1;
1211
1212 err = pci_enable_msi(adapter->pdev);
1213 if (err)
1214 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1215 err);
1216 else
1217 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1231{
1232 int err;
1233
1234
1235 ixgbe_set_num_queues(adapter);
1236
1237
1238 ixgbe_set_interrupt_capability(adapter);
1239
1240 err = ixgbe_alloc_q_vectors(adapter);
1241 if (err) {
1242 e_dev_err("Unable to allocate memory for queue vectors\n");
1243 goto err_alloc_q_vectors;
1244 }
1245
1246 ixgbe_cache_ring_register(adapter);
1247
1248 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1249 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1250 adapter->num_rx_queues, adapter->num_tx_queues,
1251 adapter->num_xdp_queues);
1252
1253 set_bit(__IXGBE_DOWN, &adapter->state);
1254
1255 return 0;
1256
1257err_alloc_q_vectors:
1258 ixgbe_reset_interrupt_capability(adapter);
1259 return err;
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1270{
1271 adapter->num_tx_queues = 0;
1272 adapter->num_xdp_queues = 0;
1273 adapter->num_rx_queues = 0;
1274
1275 ixgbe_free_q_vectors(adapter);
1276 ixgbe_reset_interrupt_capability(adapter);
1277}
1278
1279void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1280 u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1281{
1282 struct ixgbe_adv_tx_context_desc *context_desc;
1283 u16 i = tx_ring->next_to_use;
1284
1285 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1286
1287 i++;
1288 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1289
1290
1291 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1292
1293 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1294 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
1295 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1296 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1297}
1298
1299