1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/topology.h>
48#include <linux/cpumask.h>
49#include <linux/module.h>
50#include <linux/interrupt.h>
51#include <linux/numa.h>
52
53#include "hfi.h"
54#include "affinity.h"
55#include "sdma.h"
56#include "trace.h"
57
58struct hfi1_affinity_node_list node_affinity = {
59 .list = LIST_HEAD_INIT(node_affinity.list),
60 .lock = __MUTEX_INITIALIZER(node_affinity.lock)
61};
62
63
64static const char * const irq_type_names[] = {
65 "SDMA",
66 "RCVCTXT",
67 "NETDEVCTXT",
68 "GENERAL",
69 "OTHER",
70};
71
72
73static unsigned int *hfi1_per_node_cntr;
74
75static inline void init_cpu_mask_set(struct cpu_mask_set *set)
76{
77 cpumask_clear(&set->mask);
78 cpumask_clear(&set->used);
79 set->gen = 0;
80}
81
82
83static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
84{
85 if (cpumask_equal(&set->mask, &set->used)) {
86
87
88
89
90 set->gen++;
91 cpumask_clear(&set->used);
92 }
93}
94
95static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
96{
97 if (cpumask_empty(&set->used) && set->gen) {
98 set->gen--;
99 cpumask_copy(&set->used, &set->mask);
100 }
101}
102
103
104static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
105{
106 int cpu;
107
108 if (!diff || !set)
109 return -EINVAL;
110
111 _cpu_mask_set_gen_inc(set);
112
113
114 cpumask_andnot(diff, &set->mask, &set->used);
115
116 cpu = cpumask_first(diff);
117 if (cpu >= nr_cpu_ids)
118 cpu = -EINVAL;
119 else
120 cpumask_set_cpu(cpu, &set->used);
121
122 return cpu;
123}
124
125static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
126{
127 if (!set)
128 return;
129
130 cpumask_clear_cpu(cpu, &set->used);
131 _cpu_mask_set_gen_dec(set);
132}
133
134
135void init_real_cpu_mask(void)
136{
137 int possible, curr_cpu, i, ht;
138
139 cpumask_clear(&node_affinity.real_cpu_mask);
140
141
142 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
143
144
145
146
147 possible = cpumask_weight(&node_affinity.real_cpu_mask);
148 ht = cpumask_weight(topology_sibling_cpumask(
149 cpumask_first(&node_affinity.real_cpu_mask)));
150
151
152
153
154
155 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
156 for (i = 0; i < possible / ht; i++)
157 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
158
159
160
161
162 for (; i < possible; i++) {
163 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
164 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
165 }
166}
167
168int node_affinity_init(void)
169{
170 int node;
171 struct pci_dev *dev = NULL;
172 const struct pci_device_id *ids = hfi1_pci_tbl;
173
174 cpumask_clear(&node_affinity.proc.used);
175 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
176
177 node_affinity.proc.gen = 0;
178 node_affinity.num_core_siblings =
179 cpumask_weight(topology_sibling_cpumask(
180 cpumask_first(&node_affinity.proc.mask)
181 ));
182 node_affinity.num_possible_nodes = num_possible_nodes();
183 node_affinity.num_online_nodes = num_online_nodes();
184 node_affinity.num_online_cpus = num_online_cpus();
185
186
187
188
189
190
191 init_real_cpu_mask();
192
193 hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
194 sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
195 if (!hfi1_per_node_cntr)
196 return -ENOMEM;
197
198 while (ids->vendor) {
199 dev = NULL;
200 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
201 node = pcibus_to_node(dev->bus);
202 if (node < 0)
203 goto out;
204
205 hfi1_per_node_cntr[node]++;
206 }
207 ids++;
208 }
209
210 return 0;
211
212out:
213
214
215
216
217 pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
218 pr_err("HFI: System BIOS may need to be upgraded\n");
219 for (node = 0; node < node_affinity.num_possible_nodes; node++)
220 hfi1_per_node_cntr[node] = 1;
221
222 return 0;
223}
224
225static void node_affinity_destroy(struct hfi1_affinity_node *entry)
226{
227 free_percpu(entry->comp_vect_affinity);
228 kfree(entry);
229}
230
231void node_affinity_destroy_all(void)
232{
233 struct list_head *pos, *q;
234 struct hfi1_affinity_node *entry;
235
236 mutex_lock(&node_affinity.lock);
237 list_for_each_safe(pos, q, &node_affinity.list) {
238 entry = list_entry(pos, struct hfi1_affinity_node,
239 list);
240 list_del(pos);
241 node_affinity_destroy(entry);
242 }
243 mutex_unlock(&node_affinity.lock);
244 kfree(hfi1_per_node_cntr);
245}
246
247static struct hfi1_affinity_node *node_affinity_allocate(int node)
248{
249 struct hfi1_affinity_node *entry;
250
251 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
252 if (!entry)
253 return NULL;
254 entry->node = node;
255 entry->comp_vect_affinity = alloc_percpu(u16);
256 INIT_LIST_HEAD(&entry->list);
257
258 return entry;
259}
260
261
262
263
264
265static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
266{
267 list_add_tail(&entry->list, &node_affinity.list);
268}
269
270
271static struct hfi1_affinity_node *node_affinity_lookup(int node)
272{
273 struct list_head *pos;
274 struct hfi1_affinity_node *entry;
275
276 list_for_each(pos, &node_affinity.list) {
277 entry = list_entry(pos, struct hfi1_affinity_node, list);
278 if (entry->node == node)
279 return entry;
280 }
281
282 return NULL;
283}
284
285static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
286 u16 __percpu *comp_vect_affinity)
287{
288 int curr_cpu;
289 u16 cntr;
290 u16 prev_cntr;
291 int ret_cpu;
292
293 if (!possible_cpumask) {
294 ret_cpu = -EINVAL;
295 goto fail;
296 }
297
298 if (!comp_vect_affinity) {
299 ret_cpu = -EINVAL;
300 goto fail;
301 }
302
303 ret_cpu = cpumask_first(possible_cpumask);
304 if (ret_cpu >= nr_cpu_ids) {
305 ret_cpu = -EINVAL;
306 goto fail;
307 }
308
309 prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu);
310 for_each_cpu(curr_cpu, possible_cpumask) {
311 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
312
313 if (cntr < prev_cntr) {
314 ret_cpu = curr_cpu;
315 prev_cntr = cntr;
316 }
317 }
318
319 *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1;
320
321fail:
322 return ret_cpu;
323}
324
325static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
326 u16 __percpu *comp_vect_affinity)
327{
328 int curr_cpu;
329 int max_cpu;
330 u16 cntr;
331 u16 prev_cntr;
332
333 if (!possible_cpumask)
334 return -EINVAL;
335
336 if (!comp_vect_affinity)
337 return -EINVAL;
338
339 max_cpu = cpumask_first(possible_cpumask);
340 if (max_cpu >= nr_cpu_ids)
341 return -EINVAL;
342
343 prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu);
344 for_each_cpu(curr_cpu, possible_cpumask) {
345 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
346
347 if (cntr > prev_cntr) {
348 max_cpu = curr_cpu;
349 prev_cntr = cntr;
350 }
351 }
352
353 *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1;
354
355 return max_cpu;
356}
357
358
359
360
361
362static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
363 struct hfi1_affinity_node *entry,
364 cpumask_var_t non_intr_cpus,
365 cpumask_var_t available_cpus)
366 __must_hold(&node_affinity.lock)
367{
368 int cpu;
369 struct cpu_mask_set *set = dd->comp_vect;
370
371 lockdep_assert_held(&node_affinity.lock);
372 if (!non_intr_cpus) {
373 cpu = -1;
374 goto fail;
375 }
376
377 if (!available_cpus) {
378 cpu = -1;
379 goto fail;
380 }
381
382
383 _cpu_mask_set_gen_inc(set);
384 cpumask_andnot(available_cpus, &set->mask, &set->used);
385
386
387 cpumask_andnot(non_intr_cpus, available_cpus,
388 &entry->def_intr.used);
389
390
391 if (!cpumask_empty(non_intr_cpus))
392 cpu = cpumask_first(non_intr_cpus);
393 else
394 cpu = cpumask_first(available_cpus);
395
396 if (cpu >= nr_cpu_ids) {
397 cpu = -1;
398 goto fail;
399 }
400 cpumask_set_cpu(cpu, &set->used);
401
402fail:
403 return cpu;
404}
405
406static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
407{
408 struct cpu_mask_set *set = dd->comp_vect;
409
410 if (cpu < 0)
411 return;
412
413 cpu_mask_set_put(set, cpu);
414}
415
416
417static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
418{
419 int i, cpu;
420
421 if (!dd->comp_vect_mappings)
422 return;
423
424 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
425 cpu = dd->comp_vect_mappings[i];
426 _dev_comp_vect_cpu_put(dd, cpu);
427 dd->comp_vect_mappings[i] = -1;
428 hfi1_cdbg(AFFINITY,
429 "[%s] Release CPU %d from completion vector %d",
430 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
431 }
432
433 kfree(dd->comp_vect_mappings);
434 dd->comp_vect_mappings = NULL;
435}
436
437
438
439
440
441static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
442 struct hfi1_affinity_node *entry)
443 __must_hold(&node_affinity.lock)
444{
445 int i, cpu, ret;
446 cpumask_var_t non_intr_cpus;
447 cpumask_var_t available_cpus;
448
449 lockdep_assert_held(&node_affinity.lock);
450
451 if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL))
452 return -ENOMEM;
453
454 if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) {
455 free_cpumask_var(non_intr_cpus);
456 return -ENOMEM;
457 }
458
459 dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
460 sizeof(*dd->comp_vect_mappings),
461 GFP_KERNEL);
462 if (!dd->comp_vect_mappings) {
463 ret = -ENOMEM;
464 goto fail;
465 }
466 for (i = 0; i < dd->comp_vect_possible_cpus; i++)
467 dd->comp_vect_mappings[i] = -1;
468
469 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
470 cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
471 available_cpus);
472 if (cpu < 0) {
473 ret = -EINVAL;
474 goto fail;
475 }
476
477 dd->comp_vect_mappings[i] = cpu;
478 hfi1_cdbg(AFFINITY,
479 "[%s] Completion Vector %d -> CPU %d",
480 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
481 }
482
483 free_cpumask_var(available_cpus);
484 free_cpumask_var(non_intr_cpus);
485 return 0;
486
487fail:
488 free_cpumask_var(available_cpus);
489 free_cpumask_var(non_intr_cpus);
490 _dev_comp_vect_mappings_destroy(dd);
491
492 return ret;
493}
494
495int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
496{
497 int ret;
498 struct hfi1_affinity_node *entry;
499
500 mutex_lock(&node_affinity.lock);
501 entry = node_affinity_lookup(dd->node);
502 if (!entry) {
503 ret = -EINVAL;
504 goto unlock;
505 }
506 ret = _dev_comp_vect_mappings_create(dd, entry);
507unlock:
508 mutex_unlock(&node_affinity.lock);
509
510 return ret;
511}
512
513void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
514{
515 _dev_comp_vect_mappings_destroy(dd);
516}
517
518int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
519{
520 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
521 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
522
523 if (!dd->comp_vect_mappings)
524 return -EINVAL;
525 if (comp_vect >= dd->comp_vect_possible_cpus)
526 return -EINVAL;
527
528 return dd->comp_vect_mappings[comp_vect];
529}
530
531
532
533
534static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
535 struct hfi1_affinity_node *entry,
536 bool first_dev_init)
537 __must_hold(&node_affinity.lock)
538{
539 int i, j, curr_cpu;
540 int possible_cpus_comp_vect = 0;
541 struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
542
543 lockdep_assert_held(&node_affinity.lock);
544
545
546
547
548
549
550
551 if (cpumask_weight(&entry->comp_vect_mask) == 1) {
552 possible_cpus_comp_vect = 1;
553 dd_dev_warn(dd,
554 "Number of kernel receive queues is too large for completion vector affinity to be effective\n");
555 } else {
556 possible_cpus_comp_vect +=
557 cpumask_weight(&entry->comp_vect_mask) /
558 hfi1_per_node_cntr[dd->node];
559
560
561
562
563
564
565 if (first_dev_init &&
566 cpumask_weight(&entry->comp_vect_mask) %
567 hfi1_per_node_cntr[dd->node] != 0)
568 possible_cpus_comp_vect++;
569 }
570
571 dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
572
573
574 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
575 curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask,
576 entry->comp_vect_affinity);
577 if (curr_cpu < 0)
578 goto fail;
579
580 cpumask_set_cpu(curr_cpu, dev_comp_vect_mask);
581 }
582
583 hfi1_cdbg(AFFINITY,
584 "[%s] Completion vector affinity CPU set(s) %*pbl",
585 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
586 cpumask_pr_args(dev_comp_vect_mask));
587
588 return 0;
589
590fail:
591 for (j = 0; j < i; j++)
592 per_cpu_affinity_put_max(&entry->comp_vect_mask,
593 entry->comp_vect_affinity);
594
595 return curr_cpu;
596}
597
598
599
600
601static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
602 struct hfi1_affinity_node *entry)
603 __must_hold(&node_affinity.lock)
604{
605 int i, cpu;
606
607 lockdep_assert_held(&node_affinity.lock);
608 if (!dd->comp_vect_possible_cpus)
609 return;
610
611 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
612 cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
613 entry->comp_vect_affinity);
614
615 if (cpu >= 0)
616 cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
617 }
618
619 dd->comp_vect_possible_cpus = 0;
620}
621
622
623
624
625
626
627
628
629
630
631
632
633int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
634{
635 int node = pcibus_to_node(dd->pcidev->bus);
636 struct hfi1_affinity_node *entry;
637 const struct cpumask *local_mask;
638 int curr_cpu, possible, i, ret;
639 bool new_entry = false;
640
641
642
643
644
645 if (node < 0) {
646 dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
647 node = 0;
648 }
649 dd->node = node;
650
651 local_mask = cpumask_of_node(dd->node);
652 if (cpumask_first(local_mask) >= nr_cpu_ids)
653 local_mask = topology_core_cpumask(0);
654
655 mutex_lock(&node_affinity.lock);
656 entry = node_affinity_lookup(dd->node);
657
658
659
660
661
662 if (!entry) {
663 entry = node_affinity_allocate(node);
664 if (!entry) {
665 dd_dev_err(dd,
666 "Unable to allocate global affinity node\n");
667 ret = -ENOMEM;
668 goto fail;
669 }
670 new_entry = true;
671
672 init_cpu_mask_set(&entry->def_intr);
673 init_cpu_mask_set(&entry->rcv_intr);
674 cpumask_clear(&entry->comp_vect_mask);
675 cpumask_clear(&entry->general_intr_mask);
676
677 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
678 local_mask);
679
680
681 possible = cpumask_weight(&entry->def_intr.mask);
682 curr_cpu = cpumask_first(&entry->def_intr.mask);
683
684 if (possible == 1) {
685
686 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
687 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
688 } else {
689
690
691
692
693
694 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
695 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
696 curr_cpu = cpumask_next(curr_cpu,
697 &entry->def_intr.mask);
698
699
700
701
702
703 for (i = 0;
704 i < (dd->n_krcv_queues - 1) *
705 hfi1_per_node_cntr[dd->node];
706 i++) {
707 cpumask_clear_cpu(curr_cpu,
708 &entry->def_intr.mask);
709 cpumask_set_cpu(curr_cpu,
710 &entry->rcv_intr.mask);
711 curr_cpu = cpumask_next(curr_cpu,
712 &entry->def_intr.mask);
713 if (curr_cpu >= nr_cpu_ids)
714 break;
715 }
716
717
718
719
720
721
722 if (cpumask_weight(&entry->def_intr.mask) == 0)
723 cpumask_copy(&entry->def_intr.mask,
724 &entry->general_intr_mask);
725 }
726
727
728 cpumask_and(&entry->comp_vect_mask,
729 &node_affinity.real_cpu_mask, local_mask);
730 cpumask_andnot(&entry->comp_vect_mask,
731 &entry->comp_vect_mask,
732 &entry->rcv_intr.mask);
733 cpumask_andnot(&entry->comp_vect_mask,
734 &entry->comp_vect_mask,
735 &entry->general_intr_mask);
736
737
738
739
740
741
742 if (cpumask_weight(&entry->comp_vect_mask) == 0)
743 cpumask_copy(&entry->comp_vect_mask,
744 &entry->general_intr_mask);
745 }
746
747 ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
748 if (ret < 0)
749 goto fail;
750
751 if (new_entry)
752 node_affinity_add_tail(entry);
753
754 mutex_unlock(&node_affinity.lock);
755
756 return 0;
757
758fail:
759 if (new_entry)
760 node_affinity_destroy(entry);
761 mutex_unlock(&node_affinity.lock);
762 return ret;
763}
764
765void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
766{
767 struct hfi1_affinity_node *entry;
768
769 if (dd->node < 0)
770 return;
771
772 mutex_lock(&node_affinity.lock);
773 entry = node_affinity_lookup(dd->node);
774 if (!entry)
775 goto unlock;
776
777
778
779
780
781 _dev_comp_vect_cpu_mask_clean_up(dd, entry);
782unlock:
783 mutex_unlock(&node_affinity.lock);
784 dd->node = NUMA_NO_NODE;
785}
786
787
788
789
790
791
792static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
793{
794 struct sdma_engine *sde = msix->arg;
795 struct hfi1_devdata *dd = sde->dd;
796 struct hfi1_affinity_node *entry;
797 struct cpu_mask_set *set;
798 int i, old_cpu;
799
800 if (cpu > num_online_cpus() || cpu == sde->cpu)
801 return;
802
803 mutex_lock(&node_affinity.lock);
804 entry = node_affinity_lookup(dd->node);
805 if (!entry)
806 goto unlock;
807
808 old_cpu = sde->cpu;
809 sde->cpu = cpu;
810 cpumask_clear(&msix->mask);
811 cpumask_set_cpu(cpu, &msix->mask);
812 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
813 msix->irq, irq_type_names[msix->type],
814 sde->this_idx, cpu);
815 irq_set_affinity_hint(msix->irq, &msix->mask);
816
817
818
819
820
821 set = &entry->def_intr;
822 cpumask_set_cpu(cpu, &set->mask);
823 cpumask_set_cpu(cpu, &set->used);
824 for (i = 0; i < dd->msix_info.max_requested; i++) {
825 struct hfi1_msix_entry *other_msix;
826
827 other_msix = &dd->msix_info.msix_entries[i];
828 if (other_msix->type != IRQ_SDMA || other_msix == msix)
829 continue;
830
831 if (cpumask_test_cpu(old_cpu, &other_msix->mask))
832 goto unlock;
833 }
834 cpumask_clear_cpu(old_cpu, &set->mask);
835 cpumask_clear_cpu(old_cpu, &set->used);
836unlock:
837 mutex_unlock(&node_affinity.lock);
838}
839
840static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
841 const cpumask_t *mask)
842{
843 int cpu = cpumask_first(mask);
844 struct hfi1_msix_entry *msix = container_of(notify,
845 struct hfi1_msix_entry,
846 notify);
847
848
849 hfi1_update_sdma_affinity(msix, cpu);
850}
851
852static void hfi1_irq_notifier_release(struct kref *ref)
853{
854
855
856
857
858}
859
860static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
861{
862 struct irq_affinity_notify *notify = &msix->notify;
863
864 notify->irq = msix->irq;
865 notify->notify = hfi1_irq_notifier_notify;
866 notify->release = hfi1_irq_notifier_release;
867
868 if (irq_set_affinity_notifier(notify->irq, notify))
869 pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
870 notify->irq);
871}
872
873static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
874{
875 struct irq_affinity_notify *notify = &msix->notify;
876
877 if (irq_set_affinity_notifier(notify->irq, NULL))
878 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
879 notify->irq);
880}
881
882
883
884
885
886static int get_irq_affinity(struct hfi1_devdata *dd,
887 struct hfi1_msix_entry *msix)
888{
889 cpumask_var_t diff;
890 struct hfi1_affinity_node *entry;
891 struct cpu_mask_set *set = NULL;
892 struct sdma_engine *sde = NULL;
893 struct hfi1_ctxtdata *rcd = NULL;
894 char extra[64];
895 int cpu = -1;
896
897 extra[0] = '\0';
898 cpumask_clear(&msix->mask);
899
900 entry = node_affinity_lookup(dd->node);
901
902 switch (msix->type) {
903 case IRQ_SDMA:
904 sde = (struct sdma_engine *)msix->arg;
905 scnprintf(extra, 64, "engine %u", sde->this_idx);
906 set = &entry->def_intr;
907 break;
908 case IRQ_GENERAL:
909 cpu = cpumask_first(&entry->general_intr_mask);
910 break;
911 case IRQ_RCVCTXT:
912 rcd = (struct hfi1_ctxtdata *)msix->arg;
913 if (rcd->ctxt == HFI1_CTRL_CTXT)
914 cpu = cpumask_first(&entry->general_intr_mask);
915 else
916 set = &entry->rcv_intr;
917 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
918 break;
919 case IRQ_NETDEVCTXT:
920 rcd = (struct hfi1_ctxtdata *)msix->arg;
921 set = &entry->def_intr;
922 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
923 break;
924 default:
925 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
926 return -EINVAL;
927 }
928
929
930
931
932
933
934 if (cpu == -1 && set) {
935 if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
936 return -ENOMEM;
937
938 cpu = cpu_mask_set_get_first(set, diff);
939 if (cpu < 0) {
940 free_cpumask_var(diff);
941 dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
942 return cpu;
943 }
944
945 free_cpumask_var(diff);
946 }
947
948 cpumask_set_cpu(cpu, &msix->mask);
949 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
950 msix->irq, irq_type_names[msix->type],
951 extra, cpu);
952 irq_set_affinity_hint(msix->irq, &msix->mask);
953
954 if (msix->type == IRQ_SDMA) {
955 sde->cpu = cpu;
956 hfi1_setup_sdma_notifier(msix);
957 }
958
959 return 0;
960}
961
962int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
963{
964 int ret;
965
966 mutex_lock(&node_affinity.lock);
967 ret = get_irq_affinity(dd, msix);
968 mutex_unlock(&node_affinity.lock);
969 return ret;
970}
971
972void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
973 struct hfi1_msix_entry *msix)
974{
975 struct cpu_mask_set *set = NULL;
976 struct hfi1_ctxtdata *rcd;
977 struct hfi1_affinity_node *entry;
978
979 mutex_lock(&node_affinity.lock);
980 entry = node_affinity_lookup(dd->node);
981
982 switch (msix->type) {
983 case IRQ_SDMA:
984 set = &entry->def_intr;
985 hfi1_cleanup_sdma_notifier(msix);
986 break;
987 case IRQ_GENERAL:
988
989 break;
990 case IRQ_RCVCTXT:
991 rcd = (struct hfi1_ctxtdata *)msix->arg;
992
993 if (rcd->ctxt != HFI1_CTRL_CTXT)
994 set = &entry->rcv_intr;
995 break;
996 case IRQ_NETDEVCTXT:
997 rcd = (struct hfi1_ctxtdata *)msix->arg;
998 set = &entry->def_intr;
999 break;
1000 default:
1001 mutex_unlock(&node_affinity.lock);
1002 return;
1003 }
1004
1005 if (set) {
1006 cpumask_andnot(&set->used, &set->used, &msix->mask);
1007 _cpu_mask_set_gen_dec(set);
1008 }
1009
1010 irq_set_affinity_hint(msix->irq, NULL);
1011 cpumask_clear(&msix->mask);
1012 mutex_unlock(&node_affinity.lock);
1013}
1014
1015
1016static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
1017 struct hfi1_affinity_node_list *affinity)
1018{
1019 int possible, curr_cpu, i;
1020 uint num_cores_per_socket = node_affinity.num_online_cpus /
1021 affinity->num_core_siblings /
1022 node_affinity.num_online_nodes;
1023
1024 cpumask_copy(hw_thread_mask, &affinity->proc.mask);
1025 if (affinity->num_core_siblings > 0) {
1026
1027 possible = cpumask_weight(hw_thread_mask);
1028 curr_cpu = cpumask_first(hw_thread_mask);
1029 for (i = 0;
1030 i < num_cores_per_socket * node_affinity.num_online_nodes;
1031 i++)
1032 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
1033
1034 for (; i < possible; i++) {
1035 cpumask_clear_cpu(curr_cpu, hw_thread_mask);
1036 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
1037 }
1038
1039
1040 cpumask_shift_left(hw_thread_mask, hw_thread_mask,
1041 num_cores_per_socket *
1042 node_affinity.num_online_nodes *
1043 hw_thread_no);
1044 }
1045}
1046
1047int hfi1_get_proc_affinity(int node)
1048{
1049 int cpu = -1, ret, i;
1050 struct hfi1_affinity_node *entry;
1051 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
1052 const struct cpumask *node_mask,
1053 *proc_mask = current->cpus_ptr;
1054 struct hfi1_affinity_node_list *affinity = &node_affinity;
1055 struct cpu_mask_set *set = &affinity->proc;
1056
1057
1058
1059
1060
1061 if (current->nr_cpus_allowed == 1) {
1062 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
1063 current->pid, current->comm,
1064 cpumask_pr_args(proc_mask));
1065
1066
1067
1068
1069 cpu = cpumask_first(proc_mask);
1070 cpumask_set_cpu(cpu, &set->used);
1071 goto done;
1072 } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
1073 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
1074 current->pid, current->comm,
1075 cpumask_pr_args(proc_mask));
1076 goto done;
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
1101 if (!ret)
1102 goto done;
1103 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
1104 if (!ret)
1105 goto free_diff;
1106 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
1107 if (!ret)
1108 goto free_hw_thread_mask;
1109 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
1110 if (!ret)
1111 goto free_available_mask;
1112
1113 mutex_lock(&affinity->lock);
1114
1115
1116
1117
1118 _cpu_mask_set_gen_inc(set);
1119
1120
1121
1122
1123
1124 entry = node_affinity_lookup(node);
1125 if (entry) {
1126 cpumask_copy(intrs_mask, (entry->def_intr.gen ?
1127 &entry->def_intr.mask :
1128 &entry->def_intr.used));
1129 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
1130 &entry->rcv_intr.mask :
1131 &entry->rcv_intr.used));
1132 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
1133 }
1134 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
1135 cpumask_pr_args(intrs_mask));
1136
1137 cpumask_copy(hw_thread_mask, &set->mask);
1138
1139
1140
1141
1142
1143 if (affinity->num_core_siblings > 0) {
1144 for (i = 0; i < affinity->num_core_siblings; i++) {
1145 find_hw_thread_mask(i, hw_thread_mask, affinity);
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 cpumask_andnot(diff, hw_thread_mask, &set->used);
1156 if (!cpumask_empty(diff))
1157 break;
1158 }
1159 }
1160 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
1161 cpumask_pr_args(hw_thread_mask));
1162
1163 node_mask = cpumask_of_node(node);
1164 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
1165 cpumask_pr_args(node_mask));
1166
1167
1168 cpumask_and(available_mask, hw_thread_mask, node_mask);
1169 cpumask_andnot(available_mask, available_mask, &set->used);
1170 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
1171 cpumask_pr_args(available_mask));
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 cpumask_andnot(diff, available_mask, intrs_mask);
1190 if (!cpumask_empty(diff))
1191 cpumask_copy(available_mask, diff);
1192
1193
1194 if (cpumask_empty(available_mask)) {
1195 cpumask_andnot(available_mask, hw_thread_mask, &set->used);
1196
1197 cpumask_andnot(available_mask, available_mask, node_mask);
1198 hfi1_cdbg(PROC,
1199 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
1200 cpumask_pr_args(available_mask));
1201
1202
1203
1204
1205
1206 cpumask_andnot(diff, available_mask, intrs_mask);
1207 if (!cpumask_empty(diff))
1208 cpumask_copy(available_mask, diff);
1209 }
1210 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
1211 cpumask_pr_args(available_mask));
1212
1213 cpu = cpumask_first(available_mask);
1214 if (cpu >= nr_cpu_ids)
1215 cpu = -1;
1216 else
1217 cpumask_set_cpu(cpu, &set->used);
1218
1219 mutex_unlock(&affinity->lock);
1220 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
1221
1222 free_cpumask_var(intrs_mask);
1223free_available_mask:
1224 free_cpumask_var(available_mask);
1225free_hw_thread_mask:
1226 free_cpumask_var(hw_thread_mask);
1227free_diff:
1228 free_cpumask_var(diff);
1229done:
1230 return cpu;
1231}
1232
1233void hfi1_put_proc_affinity(int cpu)
1234{
1235 struct hfi1_affinity_node_list *affinity = &node_affinity;
1236 struct cpu_mask_set *set = &affinity->proc;
1237
1238 if (cpu < 0)
1239 return;
1240
1241 mutex_lock(&affinity->lock);
1242 cpu_mask_set_put(set, cpu);
1243 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
1244 mutex_unlock(&affinity->lock);
1245}
1246