1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "ixgbe.h"
30#include "ixgbe_sriov.h"
31
32#ifdef CONFIG_IXGBE_DCB
33
34
35
36
37
38
39
40
41
42static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
43{
44#ifdef IXGBE_FCOE
45 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
46#endif
47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
48 int i;
49 u16 reg_idx;
50 u8 tcs = netdev_get_num_tc(adapter->netdev);
51
52
53 if (tcs <= 1)
54 return false;
55
56
57 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
58 return false;
59
60
61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
62 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
63
64 if ((reg_idx & ~vmdq->mask) >= tcs)
65 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
66 adapter->rx_ring[i]->reg_idx = reg_idx;
67 }
68
69 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
70 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
71
72 if ((reg_idx & ~vmdq->mask) >= tcs)
73 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
74 adapter->tx_ring[i]->reg_idx = reg_idx;
75 }
76
77#ifdef IXGBE_FCOE
78
79 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
80 return true;
81
82
83 if (fcoe->offset < tcs)
84 return true;
85
86
87 if (fcoe->indices) {
88 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
89 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
90
91 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
92 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
93 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
94 adapter->rx_ring[i]->reg_idx = reg_idx;
95 reg_idx++;
96 }
97
98 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
99 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
100 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
101 adapter->tx_ring[i]->reg_idx = reg_idx;
102 reg_idx++;
103 }
104 }
105
106#endif
107 return true;
108}
109
110
111static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
112 unsigned int *tx, unsigned int *rx)
113{
114 struct net_device *dev = adapter->netdev;
115 struct ixgbe_hw *hw = &adapter->hw;
116 u8 num_tcs = netdev_get_num_tc(dev);
117
118 *tx = 0;
119 *rx = 0;
120
121 switch (hw->mac.type) {
122 case ixgbe_mac_82598EB:
123
124 *tx = tc << 2;
125 *rx = tc << 3;
126 break;
127 case ixgbe_mac_82599EB:
128 case ixgbe_mac_X540:
129 case ixgbe_mac_X550:
130 case ixgbe_mac_X550EM_x:
131 case ixgbe_mac_x550em_a:
132 if (num_tcs > 4) {
133
134
135
136
137
138 *rx = tc << 4;
139 if (tc < 3)
140 *tx = tc << 5;
141 else if (tc < 5)
142 *tx = (tc + 2) << 4;
143 else
144 *tx = (tc + 8) << 3;
145 } else {
146
147
148
149
150
151 *rx = tc << 5;
152 if (tc < 2)
153 *tx = tc << 6;
154 else
155 *tx = (tc + 4) << 4;
156 }
157 default:
158 break;
159 }
160}
161
162
163
164
165
166
167
168
169static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
170{
171 struct net_device *dev = adapter->netdev;
172 unsigned int tx_idx, rx_idx;
173 int tc, offset, rss_i, i;
174 u8 num_tcs = netdev_get_num_tc(dev);
175
176
177 if (num_tcs <= 1)
178 return false;
179
180 rss_i = adapter->ring_feature[RING_F_RSS].indices;
181
182 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
183 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
184 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
185 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
186 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
187 adapter->tx_ring[offset + i]->dcb_tc = tc;
188 adapter->rx_ring[offset + i]->dcb_tc = tc;
189 }
190 }
191
192 return true;
193}
194
195#endif
196
197
198
199
200
201
202
203
204static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
205{
206#ifdef IXGBE_FCOE
207 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
208#endif
209 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
210 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
211 int i;
212 u16 reg_idx;
213
214
215 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
216 return false;
217
218
219 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
220 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
221#ifdef IXGBE_FCOE
222
223 if (fcoe->offset && (i > fcoe->offset))
224 break;
225#endif
226
227 if ((reg_idx & ~vmdq->mask) >= rss->indices)
228 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
229 adapter->rx_ring[i]->reg_idx = reg_idx;
230 }
231
232#ifdef IXGBE_FCOE
233
234 for (; i < adapter->num_rx_queues; i++, reg_idx++)
235 adapter->rx_ring[i]->reg_idx = reg_idx;
236
237#endif
238 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
239 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
240#ifdef IXGBE_FCOE
241
242 if (fcoe->offset && (i > fcoe->offset))
243 break;
244#endif
245
246 if ((reg_idx & rss->mask) >= rss->indices)
247 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
248 adapter->tx_ring[i]->reg_idx = reg_idx;
249 }
250
251#ifdef IXGBE_FCOE
252
253 for (; i < adapter->num_tx_queues; i++, reg_idx++)
254 adapter->tx_ring[i]->reg_idx = reg_idx;
255
256#endif
257
258 return true;
259}
260
261
262
263
264
265
266
267
268static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
269{
270 int i;
271
272 for (i = 0; i < adapter->num_rx_queues; i++)
273 adapter->rx_ring[i]->reg_idx = i;
274 for (i = 0; i < adapter->num_tx_queues; i++)
275 adapter->tx_ring[i]->reg_idx = i;
276
277 return true;
278}
279
280
281
282
283
284
285
286
287
288
289
290
291static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
292{
293
294 adapter->rx_ring[0]->reg_idx = 0;
295 adapter->tx_ring[0]->reg_idx = 0;
296
297#ifdef CONFIG_IXGBE_DCB
298 if (ixgbe_cache_ring_dcb_sriov(adapter))
299 return;
300
301 if (ixgbe_cache_ring_dcb(adapter))
302 return;
303
304#endif
305 if (ixgbe_cache_ring_sriov(adapter))
306 return;
307
308 ixgbe_cache_ring_rss(adapter);
309}
310
311#define IXGBE_RSS_16Q_MASK 0xF
312#define IXGBE_RSS_8Q_MASK 0x7
313#define IXGBE_RSS_4Q_MASK 0x3
314#define IXGBE_RSS_2Q_MASK 0x1
315#define IXGBE_RSS_DISABLED_MASK 0x0
316
317#ifdef CONFIG_IXGBE_DCB
318
319
320
321
322
323
324
325
326
327static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
328{
329 int i;
330 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
331 u16 vmdq_m = 0;
332#ifdef IXGBE_FCOE
333 u16 fcoe_i = 0;
334#endif
335 u8 tcs = netdev_get_num_tc(adapter->netdev);
336
337
338 if (tcs <= 1)
339 return false;
340
341
342 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
343 return false;
344
345
346 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
347
348
349 if (tcs > 4) {
350 vmdq_i = min_t(u16, vmdq_i, 16);
351 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
352
353 } else {
354 vmdq_i = min_t(u16, vmdq_i, 32);
355 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
356 }
357
358#ifdef IXGBE_FCOE
359
360 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
361
362#endif
363
364 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
365
366
367 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
368 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
369
370
371
372
373
374 adapter->ring_feature[RING_F_RSS].indices = 1;
375 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
376
377
378 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
379
380 adapter->num_rx_pools = vmdq_i;
381 adapter->num_rx_queues_per_pool = tcs;
382
383 adapter->num_tx_queues = vmdq_i * tcs;
384 adapter->num_rx_queues = vmdq_i * tcs;
385
386#ifdef IXGBE_FCOE
387 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
388 struct ixgbe_ring_feature *fcoe;
389
390 fcoe = &adapter->ring_feature[RING_F_FCOE];
391
392
393 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
394
395 if (fcoe_i) {
396
397 fcoe->indices = fcoe_i;
398 fcoe->offset = vmdq_i * tcs;
399
400
401 adapter->num_tx_queues += fcoe_i;
402 adapter->num_rx_queues += fcoe_i;
403 } else if (tcs > 1) {
404
405 fcoe->indices = 1;
406 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
407 } else {
408 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
409
410 fcoe->indices = 0;
411 fcoe->offset = 0;
412 }
413 }
414
415#endif
416
417 for (i = 0; i < tcs; i++)
418 netdev_set_tc_queue(adapter->netdev, i, 1, i);
419
420 return true;
421}
422
423static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
424{
425 struct net_device *dev = adapter->netdev;
426 struct ixgbe_ring_feature *f;
427 int rss_i, rss_m, i;
428 int tcs;
429
430
431 tcs = netdev_get_num_tc(dev);
432
433
434 if (tcs <= 1)
435 return false;
436
437
438 rss_i = dev->num_tx_queues / tcs;
439 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
440
441 rss_i = min_t(u16, rss_i, 4);
442 rss_m = IXGBE_RSS_4Q_MASK;
443 } else if (tcs > 4) {
444
445 rss_i = min_t(u16, rss_i, 8);
446 rss_m = IXGBE_RSS_8Q_MASK;
447 } else {
448
449 rss_i = min_t(u16, rss_i, 16);
450 rss_m = IXGBE_RSS_16Q_MASK;
451 }
452
453
454 f = &adapter->ring_feature[RING_F_RSS];
455 rss_i = min_t(int, rss_i, f->limit);
456 f->indices = rss_i;
457 f->mask = rss_m;
458
459
460 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
461
462#ifdef IXGBE_FCOE
463
464
465
466
467
468 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
469 u8 tc = ixgbe_fcoe_get_tc(adapter);
470
471 f = &adapter->ring_feature[RING_F_FCOE];
472 f->indices = min_t(u16, rss_i, f->limit);
473 f->offset = rss_i * tc;
474 }
475
476#endif
477 for (i = 0; i < tcs; i++)
478 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
479
480 adapter->num_tx_queues = rss_i * tcs;
481 adapter->num_rx_queues = rss_i * tcs;
482
483 return true;
484}
485
486#endif
487
488
489
490
491
492
493
494
495
496static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
497{
498 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
499 u16 vmdq_m = 0;
500 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
501 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
502#ifdef IXGBE_FCOE
503 u16 fcoe_i = 0;
504#endif
505 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
506
507
508 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
509 return false;
510
511
512 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
513
514
515 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
516
517
518 if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
519 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
520 rss_m = IXGBE_RSS_2Q_MASK;
521 rss_i = min_t(u16, rss_i, 2);
522
523 } else {
524 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
525 rss_m = IXGBE_RSS_4Q_MASK;
526 rss_i = 4;
527 }
528
529#ifdef IXGBE_FCOE
530
531 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
532
533#endif
534
535 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
536
537
538 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
539 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
540
541
542 adapter->ring_feature[RING_F_RSS].indices = rss_i;
543 adapter->ring_feature[RING_F_RSS].mask = rss_m;
544
545 adapter->num_rx_pools = vmdq_i;
546 adapter->num_rx_queues_per_pool = rss_i;
547
548 adapter->num_rx_queues = vmdq_i * rss_i;
549 adapter->num_tx_queues = vmdq_i * rss_i;
550
551
552 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
553
554#ifdef IXGBE_FCOE
555
556
557
558
559
560 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
561 struct ixgbe_ring_feature *fcoe;
562
563 fcoe = &adapter->ring_feature[RING_F_FCOE];
564
565
566 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
567
568 if (vmdq_i > 1 && fcoe_i) {
569
570 fcoe->indices = fcoe_i;
571 fcoe->offset = vmdq_i * rss_i;
572 } else {
573
574 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
575
576
577 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
578 fcoe_i = rss_i;
579
580
581 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
582 fcoe->offset = fcoe_i - fcoe->indices;
583
584 fcoe_i -= rss_i;
585 }
586
587
588 adapter->num_tx_queues += fcoe_i;
589 adapter->num_rx_queues += fcoe_i;
590 }
591
592#endif
593 return true;
594}
595
596
597
598
599
600
601
602
603
604static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
605{
606 struct ixgbe_ring_feature *f;
607 u16 rss_i;
608
609
610 f = &adapter->ring_feature[RING_F_RSS];
611 rss_i = f->limit;
612
613 f->indices = rss_i;
614 f->mask = IXGBE_RSS_16Q_MASK;
615
616
617 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
618
619
620
621
622
623
624 if (rss_i > 1 && adapter->atr_sample_rate) {
625 f = &adapter->ring_feature[RING_F_FDIR];
626
627 rss_i = f->indices = f->limit;
628
629 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
630 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
631 }
632
633#ifdef IXGBE_FCOE
634
635
636
637
638
639
640
641
642 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
643 struct net_device *dev = adapter->netdev;
644 u16 fcoe_i;
645
646 f = &adapter->ring_feature[RING_F_FCOE];
647
648
649 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
650 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
651
652
653 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
654 fcoe_i = rss_i;
655
656
657 f->indices = min_t(u16, fcoe_i, f->limit);
658 f->offset = fcoe_i - f->indices;
659 rss_i = max_t(u16, fcoe_i, rss_i);
660 }
661
662#endif
663 adapter->num_rx_queues = rss_i;
664 adapter->num_tx_queues = rss_i;
665
666 return true;
667}
668
669
670
671
672
673
674
675
676
677
678
679
680static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
681{
682
683 adapter->num_rx_queues = 1;
684 adapter->num_tx_queues = 1;
685 adapter->num_rx_pools = adapter->num_rx_queues;
686 adapter->num_rx_queues_per_pool = 1;
687
688#ifdef CONFIG_IXGBE_DCB
689 if (ixgbe_set_dcb_sriov_queues(adapter))
690 return;
691
692 if (ixgbe_set_dcb_queues(adapter))
693 return;
694
695#endif
696 if (ixgbe_set_sriov_queues(adapter))
697 return;
698
699 ixgbe_set_rss_queues(adapter);
700}
701
702
703
704
705
706
707
708
709
710static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
711{
712 struct ixgbe_hw *hw = &adapter->hw;
713 int i, vectors, vector_threshold;
714
715
716 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
717
718
719
720
721
722
723 vectors = min_t(int, vectors, num_online_cpus());
724
725
726 vectors += NON_Q_VECTORS;
727
728
729
730
731
732
733
734 vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
735
736
737
738
739 vector_threshold = MIN_MSIX_COUNT;
740
741 adapter->msix_entries = kcalloc(vectors,
742 sizeof(struct msix_entry),
743 GFP_KERNEL);
744 if (!adapter->msix_entries)
745 return -ENOMEM;
746
747 for (i = 0; i < vectors; i++)
748 adapter->msix_entries[i].entry = i;
749
750 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
751 vector_threshold, vectors);
752
753 if (vectors < 0) {
754
755
756
757 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
758 vectors);
759
760 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
761 kfree(adapter->msix_entries);
762 adapter->msix_entries = NULL;
763
764 return vectors;
765 }
766
767
768
769
770 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
771
772
773
774
775 vectors -= NON_Q_VECTORS;
776 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
777
778 return 0;
779}
780
781static void ixgbe_add_ring(struct ixgbe_ring *ring,
782 struct ixgbe_ring_container *head)
783{
784 ring->next = head->ring;
785 head->ring = ring;
786 head->count++;
787}
788
789
790
791
792
793
794
795
796
797
798
799
800
801static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
802 int v_count, int v_idx,
803 int txr_count, int txr_idx,
804 int rxr_count, int rxr_idx)
805{
806 struct ixgbe_q_vector *q_vector;
807 struct ixgbe_ring *ring;
808 int node = NUMA_NO_NODE;
809 int cpu = -1;
810 int ring_count, size;
811 u8 tcs = netdev_get_num_tc(adapter->netdev);
812
813 ring_count = txr_count + rxr_count;
814 size = sizeof(struct ixgbe_q_vector) +
815 (sizeof(struct ixgbe_ring) * ring_count);
816
817
818 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
819 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
820 if (rss_i > 1 && adapter->atr_sample_rate) {
821 if (cpu_online(v_idx)) {
822 cpu = v_idx;
823 node = cpu_to_node(cpu);
824 }
825 }
826 }
827
828
829 q_vector = kzalloc_node(size, GFP_KERNEL, node);
830 if (!q_vector)
831 q_vector = kzalloc(size, GFP_KERNEL);
832 if (!q_vector)
833 return -ENOMEM;
834
835
836 if (cpu != -1)
837 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
838 q_vector->numa_node = node;
839
840#ifdef CONFIG_IXGBE_DCA
841
842 q_vector->cpu = -1;
843
844#endif
845
846 netif_napi_add(adapter->netdev, &q_vector->napi,
847 ixgbe_poll, 64);
848
849#ifdef CONFIG_NET_RX_BUSY_POLL
850
851 atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
852
853#endif
854
855 adapter->q_vector[v_idx] = q_vector;
856 q_vector->adapter = adapter;
857 q_vector->v_idx = v_idx;
858
859
860 q_vector->tx.work_limit = adapter->tx_work_limit;
861
862
863 ring = q_vector->ring;
864
865
866 if (txr_count && !rxr_count) {
867
868 if (adapter->tx_itr_setting == 1)
869 q_vector->itr = IXGBE_12K_ITR;
870 else
871 q_vector->itr = adapter->tx_itr_setting;
872 } else {
873
874 if (adapter->rx_itr_setting == 1)
875 q_vector->itr = IXGBE_20K_ITR;
876 else
877 q_vector->itr = adapter->rx_itr_setting;
878 }
879
880 while (txr_count) {
881
882 ring->dev = &adapter->pdev->dev;
883 ring->netdev = adapter->netdev;
884
885
886 ring->q_vector = q_vector;
887
888
889 ixgbe_add_ring(ring, &q_vector->tx);
890
891
892 ring->count = adapter->tx_ring_count;
893 if (adapter->num_rx_pools > 1)
894 ring->queue_index =
895 txr_idx % adapter->num_rx_queues_per_pool;
896 else
897 ring->queue_index = txr_idx;
898
899
900 adapter->tx_ring[txr_idx] = ring;
901
902
903 txr_count--;
904 txr_idx += v_count;
905
906
907 ring++;
908 }
909
910 while (rxr_count) {
911
912 ring->dev = &adapter->pdev->dev;
913 ring->netdev = adapter->netdev;
914
915
916 ring->q_vector = q_vector;
917
918
919 ixgbe_add_ring(ring, &q_vector->rx);
920
921
922
923
924
925 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
926 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
927
928#ifdef IXGBE_FCOE
929 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
930 struct ixgbe_ring_feature *f;
931 f = &adapter->ring_feature[RING_F_FCOE];
932 if ((rxr_idx >= f->offset) &&
933 (rxr_idx < f->offset + f->indices))
934 set_bit(__IXGBE_RX_FCOE, &ring->state);
935 }
936
937#endif
938
939 ring->count = adapter->rx_ring_count;
940 if (adapter->num_rx_pools > 1)
941 ring->queue_index =
942 rxr_idx % adapter->num_rx_queues_per_pool;
943 else
944 ring->queue_index = rxr_idx;
945
946
947 adapter->rx_ring[rxr_idx] = ring;
948
949
950 rxr_count--;
951 rxr_idx += v_count;
952
953
954 ring++;
955 }
956
957 return 0;
958}
959
960
961
962
963
964
965
966
967
968
969static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
970{
971 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
972 struct ixgbe_ring *ring;
973
974 ixgbe_for_each_ring(ring, q_vector->tx)
975 adapter->tx_ring[ring->queue_index] = NULL;
976
977 ixgbe_for_each_ring(ring, q_vector->rx)
978 adapter->rx_ring[ring->queue_index] = NULL;
979
980 adapter->q_vector[v_idx] = NULL;
981 napi_hash_del(&q_vector->napi);
982 netif_napi_del(&q_vector->napi);
983
984
985
986
987
988 kfree_rcu(q_vector, rcu);
989}
990
991
992
993
994
995
996
997
998static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
999{
1000 int q_vectors = adapter->num_q_vectors;
1001 int rxr_remaining = adapter->num_rx_queues;
1002 int txr_remaining = adapter->num_tx_queues;
1003 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1004 int err;
1005
1006
1007 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1008 q_vectors = 1;
1009
1010 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1011 for (; rxr_remaining; v_idx++) {
1012 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1013 0, 0, 1, rxr_idx);
1014
1015 if (err)
1016 goto err_out;
1017
1018
1019 rxr_remaining--;
1020 rxr_idx++;
1021 }
1022 }
1023
1024 for (; v_idx < q_vectors; v_idx++) {
1025 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1026 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1027 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1028 tqpv, txr_idx,
1029 rqpv, rxr_idx);
1030
1031 if (err)
1032 goto err_out;
1033
1034
1035 rxr_remaining -= rqpv;
1036 txr_remaining -= tqpv;
1037 rxr_idx++;
1038 txr_idx++;
1039 }
1040
1041 return 0;
1042
1043err_out:
1044 adapter->num_tx_queues = 0;
1045 adapter->num_rx_queues = 0;
1046 adapter->num_q_vectors = 0;
1047
1048 while (v_idx--)
1049 ixgbe_free_q_vector(adapter, v_idx);
1050
1051 return -ENOMEM;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1063{
1064 int v_idx = adapter->num_q_vectors;
1065
1066 adapter->num_tx_queues = 0;
1067 adapter->num_rx_queues = 0;
1068 adapter->num_q_vectors = 0;
1069
1070 while (v_idx--)
1071 ixgbe_free_q_vector(adapter, v_idx);
1072}
1073
1074static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1075{
1076 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1077 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1078 pci_disable_msix(adapter->pdev);
1079 kfree(adapter->msix_entries);
1080 adapter->msix_entries = NULL;
1081 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1082 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1083 pci_disable_msi(adapter->pdev);
1084 }
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1095{
1096 int err;
1097
1098
1099 if (!ixgbe_acquire_msix_vectors(adapter))
1100 return;
1101
1102
1103
1104
1105
1106
1107
1108 if (netdev_get_num_tc(adapter->netdev) > 1) {
1109 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1110 netdev_reset_tc(adapter->netdev);
1111
1112 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1113 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1114
1115 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1116 adapter->temp_dcb_cfg.pfc_mode_enable = false;
1117 adapter->dcb_cfg.pfc_mode_enable = false;
1118 }
1119
1120 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1121 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1122
1123
1124 e_dev_warn("Disabling SR-IOV support\n");
1125 ixgbe_disable_sriov(adapter);
1126
1127
1128 e_dev_warn("Disabling RSS support\n");
1129 adapter->ring_feature[RING_F_RSS].limit = 1;
1130
1131
1132
1133
1134 ixgbe_set_num_queues(adapter);
1135 adapter->num_q_vectors = 1;
1136
1137 err = pci_enable_msi(adapter->pdev);
1138 if (err)
1139 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1140 err);
1141 else
1142 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1156{
1157 int err;
1158
1159
1160 ixgbe_set_num_queues(adapter);
1161
1162
1163 ixgbe_set_interrupt_capability(adapter);
1164
1165 err = ixgbe_alloc_q_vectors(adapter);
1166 if (err) {
1167 e_dev_err("Unable to allocate memory for queue vectors\n");
1168 goto err_alloc_q_vectors;
1169 }
1170
1171 ixgbe_cache_ring_register(adapter);
1172
1173 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1174 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1175 adapter->num_rx_queues, adapter->num_tx_queues);
1176
1177 set_bit(__IXGBE_DOWN, &adapter->state);
1178
1179 return 0;
1180
1181err_alloc_q_vectors:
1182 ixgbe_reset_interrupt_capability(adapter);
1183 return err;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1194{
1195 adapter->num_tx_queues = 0;
1196 adapter->num_rx_queues = 0;
1197
1198 ixgbe_free_q_vectors(adapter);
1199 ixgbe_reset_interrupt_capability(adapter);
1200}
1201
1202void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1203 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1204{
1205 struct ixgbe_adv_tx_context_desc *context_desc;
1206 u16 i = tx_ring->next_to_use;
1207
1208 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1209
1210 i++;
1211 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1212
1213
1214 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1215
1216 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1217 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
1218 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1219 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1220}
1221
1222