1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#include <linux/topology.h>
48#include <linux/cpumask.h>
49#include <linux/module.h>
50#include <linux/interrupt.h>
51
52#include "hfi.h"
53#include "affinity.h"
54#include "sdma.h"
55#include "trace.h"
56
57struct hfi1_affinity_node_list node_affinity = {
58 .list = LIST_HEAD_INIT(node_affinity.list),
59 .lock = __MUTEX_INITIALIZER(node_affinity.lock)
60};
61
62
63static const char * const irq_type_names[] = {
64 "SDMA",
65 "RCVCTXT",
66 "GENERAL",
67 "OTHER",
68};
69
70
71static unsigned int *hfi1_per_node_cntr;
72
73static inline void init_cpu_mask_set(struct cpu_mask_set *set)
74{
75 cpumask_clear(&set->mask);
76 cpumask_clear(&set->used);
77 set->gen = 0;
78}
79
80
81void init_real_cpu_mask(void)
82{
83 int possible, curr_cpu, i, ht;
84
85 cpumask_clear(&node_affinity.real_cpu_mask);
86
87
88 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
89
90
91
92
93 possible = cpumask_weight(&node_affinity.real_cpu_mask);
94 ht = cpumask_weight(topology_sibling_cpumask(
95 cpumask_first(&node_affinity.real_cpu_mask)));
96
97
98
99
100
101 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
102 for (i = 0; i < possible / ht; i++)
103 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
104
105
106
107
108 for (; i < possible; i++) {
109 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
110 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
111 }
112}
113
114int node_affinity_init(void)
115{
116 int node;
117 struct pci_dev *dev = NULL;
118 const struct pci_device_id *ids = hfi1_pci_tbl;
119
120 cpumask_clear(&node_affinity.proc.used);
121 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
122
123 node_affinity.proc.gen = 0;
124 node_affinity.num_core_siblings =
125 cpumask_weight(topology_sibling_cpumask(
126 cpumask_first(&node_affinity.proc.mask)
127 ));
128 node_affinity.num_possible_nodes = num_possible_nodes();
129 node_affinity.num_online_nodes = num_online_nodes();
130 node_affinity.num_online_cpus = num_online_cpus();
131
132
133
134
135
136
137 init_real_cpu_mask();
138
139 hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
140 sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
141 if (!hfi1_per_node_cntr)
142 return -ENOMEM;
143
144 while (ids->vendor) {
145 dev = NULL;
146 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
147 node = pcibus_to_node(dev->bus);
148 if (node < 0)
149 node = numa_node_id();
150
151 hfi1_per_node_cntr[node]++;
152 }
153 ids++;
154 }
155
156 return 0;
157}
158
159void node_affinity_destroy(void)
160{
161 struct list_head *pos, *q;
162 struct hfi1_affinity_node *entry;
163
164 mutex_lock(&node_affinity.lock);
165 list_for_each_safe(pos, q, &node_affinity.list) {
166 entry = list_entry(pos, struct hfi1_affinity_node,
167 list);
168 list_del(pos);
169 kfree(entry);
170 }
171 mutex_unlock(&node_affinity.lock);
172 kfree(hfi1_per_node_cntr);
173}
174
175static struct hfi1_affinity_node *node_affinity_allocate(int node)
176{
177 struct hfi1_affinity_node *entry;
178
179 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
180 if (!entry)
181 return NULL;
182 entry->node = node;
183 INIT_LIST_HEAD(&entry->list);
184
185 return entry;
186}
187
188
189
190
191
192static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
193{
194 list_add_tail(&entry->list, &node_affinity.list);
195}
196
197
198static struct hfi1_affinity_node *node_affinity_lookup(int node)
199{
200 struct list_head *pos;
201 struct hfi1_affinity_node *entry;
202
203 list_for_each(pos, &node_affinity.list) {
204 entry = list_entry(pos, struct hfi1_affinity_node, list);
205 if (entry->node == node)
206 return entry;
207 }
208
209 return NULL;
210}
211
212
213
214
215
216
217
218
219
220
221
222
223int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
224{
225 int node = pcibus_to_node(dd->pcidev->bus);
226 struct hfi1_affinity_node *entry;
227 const struct cpumask *local_mask;
228 int curr_cpu, possible, i;
229
230 if (node < 0)
231 node = numa_node_id();
232 dd->node = node;
233
234 local_mask = cpumask_of_node(dd->node);
235 if (cpumask_first(local_mask) >= nr_cpu_ids)
236 local_mask = topology_core_cpumask(0);
237
238 mutex_lock(&node_affinity.lock);
239 entry = node_affinity_lookup(dd->node);
240
241
242
243
244
245 if (!entry) {
246 entry = node_affinity_allocate(node);
247 if (!entry) {
248 dd_dev_err(dd,
249 "Unable to allocate global affinity node\n");
250 mutex_unlock(&node_affinity.lock);
251 return -ENOMEM;
252 }
253 init_cpu_mask_set(&entry->def_intr);
254 init_cpu_mask_set(&entry->rcv_intr);
255 cpumask_clear(&entry->general_intr_mask);
256
257 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
258 local_mask);
259
260
261 possible = cpumask_weight(&entry->def_intr.mask);
262 curr_cpu = cpumask_first(&entry->def_intr.mask);
263
264 if (possible == 1) {
265
266 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
267 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
268 } else {
269
270
271
272
273
274 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
275 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
276 curr_cpu = cpumask_next(curr_cpu,
277 &entry->def_intr.mask);
278
279
280
281
282
283 for (i = 0;
284 i < (dd->n_krcv_queues - 1) *
285 hfi1_per_node_cntr[dd->node];
286 i++) {
287 cpumask_clear_cpu(curr_cpu,
288 &entry->def_intr.mask);
289 cpumask_set_cpu(curr_cpu,
290 &entry->rcv_intr.mask);
291 curr_cpu = cpumask_next(curr_cpu,
292 &entry->def_intr.mask);
293 if (curr_cpu >= nr_cpu_ids)
294 break;
295 }
296
297
298
299
300
301
302 if (cpumask_weight(&entry->def_intr.mask) == 0)
303 cpumask_copy(&entry->def_intr.mask,
304 &entry->general_intr_mask);
305 }
306
307 node_affinity_add_tail(entry);
308 }
309 mutex_unlock(&node_affinity.lock);
310 return 0;
311}
312
313
314
315
316
317
318static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
319{
320 struct sdma_engine *sde = msix->arg;
321 struct hfi1_devdata *dd = sde->dd;
322 struct hfi1_affinity_node *entry;
323 struct cpu_mask_set *set;
324 int i, old_cpu;
325
326 if (cpu > num_online_cpus() || cpu == sde->cpu)
327 return;
328
329 mutex_lock(&node_affinity.lock);
330 entry = node_affinity_lookup(dd->node);
331 if (!entry)
332 goto unlock;
333
334 old_cpu = sde->cpu;
335 sde->cpu = cpu;
336 cpumask_clear(&msix->mask);
337 cpumask_set_cpu(cpu, &msix->mask);
338 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
339 msix->irq, irq_type_names[msix->type],
340 sde->this_idx, cpu);
341 irq_set_affinity_hint(msix->irq, &msix->mask);
342
343
344
345
346
347 set = &entry->def_intr;
348 cpumask_set_cpu(cpu, &set->mask);
349 cpumask_set_cpu(cpu, &set->used);
350 for (i = 0; i < dd->num_msix_entries; i++) {
351 struct hfi1_msix_entry *other_msix;
352
353 other_msix = &dd->msix_entries[i];
354 if (other_msix->type != IRQ_SDMA || other_msix == msix)
355 continue;
356
357 if (cpumask_test_cpu(old_cpu, &other_msix->mask))
358 goto unlock;
359 }
360 cpumask_clear_cpu(old_cpu, &set->mask);
361 cpumask_clear_cpu(old_cpu, &set->used);
362unlock:
363 mutex_unlock(&node_affinity.lock);
364}
365
366static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
367 const cpumask_t *mask)
368{
369 int cpu = cpumask_first(mask);
370 struct hfi1_msix_entry *msix = container_of(notify,
371 struct hfi1_msix_entry,
372 notify);
373
374
375 hfi1_update_sdma_affinity(msix, cpu);
376}
377
378static void hfi1_irq_notifier_release(struct kref *ref)
379{
380
381
382
383
384}
385
386static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
387{
388 struct irq_affinity_notify *notify = &msix->notify;
389
390 notify->irq = msix->irq;
391 notify->notify = hfi1_irq_notifier_notify;
392 notify->release = hfi1_irq_notifier_release;
393
394 if (irq_set_affinity_notifier(notify->irq, notify))
395 pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
396 notify->irq);
397}
398
399static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
400{
401 struct irq_affinity_notify *notify = &msix->notify;
402
403 if (irq_set_affinity_notifier(notify->irq, NULL))
404 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
405 notify->irq);
406}
407
408
409
410
411
412static int get_irq_affinity(struct hfi1_devdata *dd,
413 struct hfi1_msix_entry *msix)
414{
415 cpumask_var_t diff;
416 struct hfi1_affinity_node *entry;
417 struct cpu_mask_set *set = NULL;
418 struct sdma_engine *sde = NULL;
419 struct hfi1_ctxtdata *rcd = NULL;
420 char extra[64];
421 int cpu = -1;
422
423 extra[0] = '\0';
424 cpumask_clear(&msix->mask);
425
426 entry = node_affinity_lookup(dd->node);
427
428 switch (msix->type) {
429 case IRQ_SDMA:
430 sde = (struct sdma_engine *)msix->arg;
431 scnprintf(extra, 64, "engine %u", sde->this_idx);
432 set = &entry->def_intr;
433 break;
434 case IRQ_GENERAL:
435 cpu = cpumask_first(&entry->general_intr_mask);
436 break;
437 case IRQ_RCVCTXT:
438 rcd = (struct hfi1_ctxtdata *)msix->arg;
439 if (rcd->ctxt == HFI1_CTRL_CTXT)
440 cpu = cpumask_first(&entry->general_intr_mask);
441 else
442 set = &entry->rcv_intr;
443 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
444 break;
445 default:
446 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
447 return -EINVAL;
448 }
449
450
451
452
453
454
455 if (cpu == -1 && set) {
456 if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
457 return -ENOMEM;
458
459 if (cpumask_equal(&set->mask, &set->used)) {
460
461
462
463
464 set->gen++;
465 cpumask_clear(&set->used);
466 }
467 cpumask_andnot(diff, &set->mask, &set->used);
468 cpu = cpumask_first(diff);
469 cpumask_set_cpu(cpu, &set->used);
470
471 free_cpumask_var(diff);
472 }
473
474 cpumask_set_cpu(cpu, &msix->mask);
475 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
476 msix->irq, irq_type_names[msix->type],
477 extra, cpu);
478 irq_set_affinity_hint(msix->irq, &msix->mask);
479
480 if (msix->type == IRQ_SDMA) {
481 sde->cpu = cpu;
482 hfi1_setup_sdma_notifier(msix);
483 }
484
485 return 0;
486}
487
488int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
489{
490 int ret;
491
492 mutex_lock(&node_affinity.lock);
493 ret = get_irq_affinity(dd, msix);
494 mutex_unlock(&node_affinity.lock);
495 return ret;
496}
497
498void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
499 struct hfi1_msix_entry *msix)
500{
501 struct cpu_mask_set *set = NULL;
502 struct hfi1_ctxtdata *rcd;
503 struct hfi1_affinity_node *entry;
504
505 mutex_lock(&node_affinity.lock);
506 entry = node_affinity_lookup(dd->node);
507
508 switch (msix->type) {
509 case IRQ_SDMA:
510 set = &entry->def_intr;
511 hfi1_cleanup_sdma_notifier(msix);
512 break;
513 case IRQ_GENERAL:
514
515 break;
516 case IRQ_RCVCTXT:
517 rcd = (struct hfi1_ctxtdata *)msix->arg;
518
519 if (rcd->ctxt != HFI1_CTRL_CTXT)
520 set = &entry->rcv_intr;
521 break;
522 default:
523 mutex_unlock(&node_affinity.lock);
524 return;
525 }
526
527 if (set) {
528 cpumask_andnot(&set->used, &set->used, &msix->mask);
529 if (cpumask_empty(&set->used) && set->gen) {
530 set->gen--;
531 cpumask_copy(&set->used, &set->mask);
532 }
533 }
534
535 irq_set_affinity_hint(msix->irq, NULL);
536 cpumask_clear(&msix->mask);
537 mutex_unlock(&node_affinity.lock);
538}
539
540
541static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
542 struct hfi1_affinity_node_list *affinity)
543{
544 int possible, curr_cpu, i;
545 uint num_cores_per_socket = node_affinity.num_online_cpus /
546 affinity->num_core_siblings /
547 node_affinity.num_online_nodes;
548
549 cpumask_copy(hw_thread_mask, &affinity->proc.mask);
550 if (affinity->num_core_siblings > 0) {
551
552 possible = cpumask_weight(hw_thread_mask);
553 curr_cpu = cpumask_first(hw_thread_mask);
554 for (i = 0;
555 i < num_cores_per_socket * node_affinity.num_online_nodes;
556 i++)
557 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
558
559 for (; i < possible; i++) {
560 cpumask_clear_cpu(curr_cpu, hw_thread_mask);
561 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
562 }
563
564
565 cpumask_shift_left(hw_thread_mask, hw_thread_mask,
566 num_cores_per_socket *
567 node_affinity.num_online_nodes *
568 hw_thread_no);
569 }
570}
571
572int hfi1_get_proc_affinity(int node)
573{
574 int cpu = -1, ret, i;
575 struct hfi1_affinity_node *entry;
576 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
577 const struct cpumask *node_mask,
578 *proc_mask = ¤t->cpus_allowed;
579 struct hfi1_affinity_node_list *affinity = &node_affinity;
580 struct cpu_mask_set *set = &affinity->proc;
581
582
583
584
585
586 if (cpumask_weight(proc_mask) == 1) {
587 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
588 current->pid, current->comm,
589 cpumask_pr_args(proc_mask));
590
591
592
593
594 cpu = cpumask_first(proc_mask);
595 cpumask_set_cpu(cpu, &set->used);
596 goto done;
597 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
598 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
599 current->pid, current->comm,
600 cpumask_pr_args(proc_mask));
601 goto done;
602 }
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
626 if (!ret)
627 goto done;
628 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
629 if (!ret)
630 goto free_diff;
631 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
632 if (!ret)
633 goto free_hw_thread_mask;
634 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
635 if (!ret)
636 goto free_available_mask;
637
638 mutex_lock(&affinity->lock);
639
640
641
642
643 if (cpumask_equal(&set->mask, &set->used)) {
644 set->gen++;
645 cpumask_clear(&set->used);
646 }
647
648
649
650
651
652 entry = node_affinity_lookup(node);
653 if (entry) {
654 cpumask_copy(intrs_mask, (entry->def_intr.gen ?
655 &entry->def_intr.mask :
656 &entry->def_intr.used));
657 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
658 &entry->rcv_intr.mask :
659 &entry->rcv_intr.used));
660 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
661 }
662 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
663 cpumask_pr_args(intrs_mask));
664
665 cpumask_copy(hw_thread_mask, &set->mask);
666
667
668
669
670
671 if (affinity->num_core_siblings > 0) {
672 for (i = 0; i < affinity->num_core_siblings; i++) {
673 find_hw_thread_mask(i, hw_thread_mask, affinity);
674
675
676
677
678
679
680
681
682
683 cpumask_andnot(diff, hw_thread_mask, &set->used);
684 if (!cpumask_empty(diff))
685 break;
686 }
687 }
688 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
689 cpumask_pr_args(hw_thread_mask));
690
691 node_mask = cpumask_of_node(node);
692 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
693 cpumask_pr_args(node_mask));
694
695
696 cpumask_and(available_mask, hw_thread_mask, node_mask);
697 cpumask_andnot(available_mask, available_mask, &set->used);
698 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
699 cpumask_pr_args(available_mask));
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717 cpumask_andnot(diff, available_mask, intrs_mask);
718 if (!cpumask_empty(diff))
719 cpumask_copy(available_mask, diff);
720
721
722 if (cpumask_empty(available_mask)) {
723 cpumask_andnot(available_mask, hw_thread_mask, &set->used);
724
725 cpumask_andnot(available_mask, available_mask, node_mask);
726 hfi1_cdbg(PROC,
727 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
728 cpumask_pr_args(available_mask));
729
730
731
732
733
734 cpumask_andnot(diff, available_mask, intrs_mask);
735 if (!cpumask_empty(diff))
736 cpumask_copy(available_mask, diff);
737 }
738 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
739 cpumask_pr_args(available_mask));
740
741 cpu = cpumask_first(available_mask);
742 if (cpu >= nr_cpu_ids)
743 cpu = -1;
744 else
745 cpumask_set_cpu(cpu, &set->used);
746
747 mutex_unlock(&affinity->lock);
748 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
749
750 free_cpumask_var(intrs_mask);
751free_available_mask:
752 free_cpumask_var(available_mask);
753free_hw_thread_mask:
754 free_cpumask_var(hw_thread_mask);
755free_diff:
756 free_cpumask_var(diff);
757done:
758 return cpu;
759}
760
761void hfi1_put_proc_affinity(int cpu)
762{
763 struct hfi1_affinity_node_list *affinity = &node_affinity;
764 struct cpu_mask_set *set = &affinity->proc;
765
766 if (cpu < 0)
767 return;
768
769 mutex_lock(&affinity->lock);
770 cpumask_clear_cpu(cpu, &set->used);
771 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
772 if (cpumask_empty(&set->used) && set->gen) {
773 set->gen--;
774 cpumask_copy(&set->used, &set->mask);
775 }
776 mutex_unlock(&affinity->lock);
777}
778