1
2
3
4
5
6
7
8
9
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/seq_file.h>
13#include <linux/init.h>
14#include <linux/compiler.h>
15#include <linux/slab.h>
16#include <asm/irqdomain.h>
17#include <asm/hw_irq.h>
18#include <asm/traps.h>
19#include <asm/apic.h>
20#include <asm/i8259.h>
21#include <asm/desc.h>
22#include <asm/irq_remapping.h>
23
24#include <asm/trace/irq_vectors.h>
25
26struct apic_chip_data {
27 struct irq_cfg hw_irq_cfg;
28 unsigned int vector;
29 unsigned int prev_vector;
30 unsigned int cpu;
31 unsigned int prev_cpu;
32 unsigned int irq;
33 struct hlist_node clist;
34 unsigned int move_in_progress : 1,
35 is_managed : 1,
36 can_reserve : 1,
37 has_reserved : 1;
38};
39
40struct irq_domain *x86_vector_domain;
41EXPORT_SYMBOL_GPL(x86_vector_domain);
42static DEFINE_RAW_SPINLOCK(vector_lock);
43static cpumask_var_t vector_searchmask;
44static struct irq_chip lapic_controller;
45static struct irq_matrix *vector_matrix;
46#ifdef CONFIG_SMP
47static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
48#endif
49
50void lock_vector_lock(void)
51{
52
53
54
55 raw_spin_lock(&vector_lock);
56}
57
58void unlock_vector_lock(void)
59{
60 raw_spin_unlock(&vector_lock);
61}
62
63void init_irq_alloc_info(struct irq_alloc_info *info,
64 const struct cpumask *mask)
65{
66 memset(info, 0, sizeof(*info));
67 info->mask = mask;
68}
69
70void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
71{
72 if (src)
73 *dst = *src;
74 else
75 memset(dst, 0, sizeof(*dst));
76}
77
78static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
79{
80 if (!irqd)
81 return NULL;
82
83 while (irqd->parent_data)
84 irqd = irqd->parent_data;
85
86 return irqd->chip_data;
87}
88
89struct irq_cfg *irqd_cfg(struct irq_data *irqd)
90{
91 struct apic_chip_data *apicd = apic_chip_data(irqd);
92
93 return apicd ? &apicd->hw_irq_cfg : NULL;
94}
95EXPORT_SYMBOL_GPL(irqd_cfg);
96
97struct irq_cfg *irq_cfg(unsigned int irq)
98{
99 return irqd_cfg(irq_get_irq_data(irq));
100}
101
102static struct apic_chip_data *alloc_apic_chip_data(int node)
103{
104 struct apic_chip_data *apicd;
105
106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
107 if (apicd)
108 INIT_HLIST_NODE(&apicd->clist);
109 return apicd;
110}
111
112static void free_apic_chip_data(struct apic_chip_data *apicd)
113{
114 kfree(apicd);
115}
116
117static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
118 unsigned int cpu)
119{
120 struct apic_chip_data *apicd = apic_chip_data(irqd);
121
122 lockdep_assert_held(&vector_lock);
123
124 apicd->hw_irq_cfg.vector = vector;
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
127 trace_vector_config(irqd->irq, vector, cpu,
128 apicd->hw_irq_cfg.dest_apicid);
129}
130
131static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
132 unsigned int newcpu)
133{
134 struct apic_chip_data *apicd = apic_chip_data(irqd);
135 struct irq_desc *desc = irq_data_to_desc(irqd);
136 bool managed = irqd_affinity_is_managed(irqd);
137
138 lockdep_assert_held(&vector_lock);
139
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
141 apicd->cpu);
142
143
144
145
146
147
148
149 apicd->prev_vector = 0;
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
151 goto setnew;
152
153
154
155
156
157
158
159
160 if (cpu_online(apicd->cpu)) {
161 apicd->move_in_progress = true;
162 apicd->prev_vector = apicd->vector;
163 apicd->prev_cpu = apicd->cpu;
164 WARN_ON_ONCE(apicd->cpu == newcpu);
165 } else {
166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
167 managed);
168 }
169
170setnew:
171 apicd->vector = newvec;
172 apicd->cpu = newcpu;
173 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
174 per_cpu(vector_irq, newcpu)[newvec] = desc;
175}
176
177static void vector_assign_managed_shutdown(struct irq_data *irqd)
178{
179 unsigned int cpu = cpumask_first(cpu_online_mask);
180
181 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
182}
183
184static int reserve_managed_vector(struct irq_data *irqd)
185{
186 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
187 struct apic_chip_data *apicd = apic_chip_data(irqd);
188 unsigned long flags;
189 int ret;
190
191 raw_spin_lock_irqsave(&vector_lock, flags);
192 apicd->is_managed = true;
193 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
194 raw_spin_unlock_irqrestore(&vector_lock, flags);
195 trace_vector_reserve_managed(irqd->irq, ret);
196 return ret;
197}
198
199static void reserve_irq_vector_locked(struct irq_data *irqd)
200{
201 struct apic_chip_data *apicd = apic_chip_data(irqd);
202
203 irq_matrix_reserve(vector_matrix);
204 apicd->can_reserve = true;
205 apicd->has_reserved = true;
206 irqd_set_can_reserve(irqd);
207 trace_vector_reserve(irqd->irq, 0);
208 vector_assign_managed_shutdown(irqd);
209}
210
211static int reserve_irq_vector(struct irq_data *irqd)
212{
213 unsigned long flags;
214
215 raw_spin_lock_irqsave(&vector_lock, flags);
216 reserve_irq_vector_locked(irqd);
217 raw_spin_unlock_irqrestore(&vector_lock, flags);
218 return 0;
219}
220
221static int
222assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
223{
224 struct apic_chip_data *apicd = apic_chip_data(irqd);
225 bool resvd = apicd->has_reserved;
226 unsigned int cpu = apicd->cpu;
227 int vector = apicd->vector;
228
229 lockdep_assert_held(&vector_lock);
230
231
232
233
234
235
236 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
237 return 0;
238
239
240
241
242
243
244
245 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
246 return -EBUSY;
247
248 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
249 trace_vector_alloc(irqd->irq, vector, resvd, vector);
250 if (vector < 0)
251 return vector;
252 apic_update_vector(irqd, vector, cpu);
253 apic_update_irq_cfg(irqd, vector, cpu);
254
255 return 0;
256}
257
258static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
259{
260 unsigned long flags;
261 int ret;
262
263 raw_spin_lock_irqsave(&vector_lock, flags);
264 cpumask_and(vector_searchmask, dest, cpu_online_mask);
265 ret = assign_vector_locked(irqd, vector_searchmask);
266 raw_spin_unlock_irqrestore(&vector_lock, flags);
267 return ret;
268}
269
270static int assign_irq_vector_any_locked(struct irq_data *irqd)
271{
272
273 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
274 int node = irq_data_get_node(irqd);
275
276 if (node != NUMA_NO_NODE) {
277
278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
279 if (!assign_vector_locked(irqd, vector_searchmask))
280 return 0;
281 }
282
283
284 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
285 if (!assign_vector_locked(irqd, vector_searchmask))
286 return 0;
287
288 if (node != NUMA_NO_NODE) {
289
290 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
291 return 0;
292 }
293
294
295 return assign_vector_locked(irqd, cpu_online_mask);
296}
297
298static int
299assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
300{
301 if (irqd_affinity_is_managed(irqd))
302 return reserve_managed_vector(irqd);
303 if (info->mask)
304 return assign_irq_vector(irqd, info->mask);
305
306
307
308
309 return reserve_irq_vector(irqd);
310}
311
312static int
313assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
314{
315 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
316 struct apic_chip_data *apicd = apic_chip_data(irqd);
317 int vector, cpu;
318
319 cpumask_and(vector_searchmask, dest, affmsk);
320
321
322 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
323 return 0;
324 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
325 &cpu);
326 trace_vector_alloc_managed(irqd->irq, vector, vector);
327 if (vector < 0)
328 return vector;
329 apic_update_vector(irqd, vector, cpu);
330 apic_update_irq_cfg(irqd, vector, cpu);
331 return 0;
332}
333
334static void clear_irq_vector(struct irq_data *irqd)
335{
336 struct apic_chip_data *apicd = apic_chip_data(irqd);
337 bool managed = irqd_affinity_is_managed(irqd);
338 unsigned int vector = apicd->vector;
339
340 lockdep_assert_held(&vector_lock);
341
342 if (!vector)
343 return;
344
345 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
346 apicd->prev_cpu);
347
348 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
349 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
350 apicd->vector = 0;
351
352
353 vector = apicd->prev_vector;
354 if (!vector)
355 return;
356
357 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
358 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
359 apicd->prev_vector = 0;
360 apicd->move_in_progress = 0;
361 hlist_del_init(&apicd->clist);
362}
363
364static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
365{
366 struct apic_chip_data *apicd = apic_chip_data(irqd);
367 unsigned long flags;
368
369 trace_vector_deactivate(irqd->irq, apicd->is_managed,
370 apicd->can_reserve, false);
371
372
373 if (!apicd->is_managed && !apicd->can_reserve)
374 return;
375
376 if (apicd->has_reserved)
377 return;
378
379 raw_spin_lock_irqsave(&vector_lock, flags);
380 clear_irq_vector(irqd);
381 if (apicd->can_reserve)
382 reserve_irq_vector_locked(irqd);
383 else
384 vector_assign_managed_shutdown(irqd);
385 raw_spin_unlock_irqrestore(&vector_lock, flags);
386}
387
388static int activate_reserved(struct irq_data *irqd)
389{
390 struct apic_chip_data *apicd = apic_chip_data(irqd);
391 int ret;
392
393 ret = assign_irq_vector_any_locked(irqd);
394 if (!ret) {
395 apicd->has_reserved = false;
396
397
398
399
400
401
402
403 if (!irqd_can_reserve(irqd))
404 apicd->can_reserve = false;
405 }
406
407
408
409
410
411 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
412 irq_data_get_affinity_mask(irqd))) {
413 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
414 irqd->irq);
415 }
416
417 return ret;
418}
419
420static int activate_managed(struct irq_data *irqd)
421{
422 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
423 int ret;
424
425 cpumask_and(vector_searchmask, dest, cpu_online_mask);
426 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
427
428 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
429 return -EINVAL;
430 }
431
432 ret = assign_managed_vector(irqd, vector_searchmask);
433
434
435
436
437 if (WARN_ON_ONCE(ret < 0)) {
438 pr_err("Managed startup irq %u, no vector available\n",
439 irqd->irq);
440 }
441 return ret;
442}
443
444static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
445 bool reserve)
446{
447 struct apic_chip_data *apicd = apic_chip_data(irqd);
448 unsigned long flags;
449 int ret = 0;
450
451 trace_vector_activate(irqd->irq, apicd->is_managed,
452 apicd->can_reserve, reserve);
453
454 raw_spin_lock_irqsave(&vector_lock, flags);
455 if (!apicd->can_reserve && !apicd->is_managed)
456 assign_irq_vector_any_locked(irqd);
457 else if (reserve || irqd_is_managed_and_shutdown(irqd))
458 vector_assign_managed_shutdown(irqd);
459 else if (apicd->is_managed)
460 ret = activate_managed(irqd);
461 else if (apicd->has_reserved)
462 ret = activate_reserved(irqd);
463 raw_spin_unlock_irqrestore(&vector_lock, flags);
464 return ret;
465}
466
467static void vector_free_reserved_and_managed(struct irq_data *irqd)
468{
469 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
470 struct apic_chip_data *apicd = apic_chip_data(irqd);
471
472 trace_vector_teardown(irqd->irq, apicd->is_managed,
473 apicd->has_reserved);
474
475 if (apicd->has_reserved)
476 irq_matrix_remove_reserved(vector_matrix);
477 if (apicd->is_managed)
478 irq_matrix_remove_managed(vector_matrix, dest);
479}
480
481static void x86_vector_free_irqs(struct irq_domain *domain,
482 unsigned int virq, unsigned int nr_irqs)
483{
484 struct apic_chip_data *apicd;
485 struct irq_data *irqd;
486 unsigned long flags;
487 int i;
488
489 for (i = 0; i < nr_irqs; i++) {
490 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
491 if (irqd && irqd->chip_data) {
492 raw_spin_lock_irqsave(&vector_lock, flags);
493 clear_irq_vector(irqd);
494 vector_free_reserved_and_managed(irqd);
495 apicd = irqd->chip_data;
496 irq_domain_reset_irq_data(irqd);
497 raw_spin_unlock_irqrestore(&vector_lock, flags);
498 free_apic_chip_data(apicd);
499 }
500 }
501}
502
503static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
504 struct apic_chip_data *apicd)
505{
506 unsigned long flags;
507 bool realloc = false;
508
509 apicd->vector = ISA_IRQ_VECTOR(virq);
510 apicd->cpu = 0;
511
512 raw_spin_lock_irqsave(&vector_lock, flags);
513
514
515
516
517 if (irqd_is_activated(irqd)) {
518 trace_vector_setup(virq, true, 0);
519 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
520 } else {
521
522 apicd->can_reserve = true;
523 irqd_set_can_reserve(irqd);
524 clear_irq_vector(irqd);
525 realloc = true;
526 }
527 raw_spin_unlock_irqrestore(&vector_lock, flags);
528 return realloc;
529}
530
531static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
532 unsigned int nr_irqs, void *arg)
533{
534 struct irq_alloc_info *info = arg;
535 struct apic_chip_data *apicd;
536 struct irq_data *irqd;
537 int i, err, node;
538
539 if (disable_apic)
540 return -ENXIO;
541
542
543 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
544 return -ENOSYS;
545
546 for (i = 0; i < nr_irqs; i++) {
547 irqd = irq_domain_get_irq_data(domain, virq + i);
548 BUG_ON(!irqd);
549 node = irq_data_get_node(irqd);
550 WARN_ON_ONCE(irqd->chip_data);
551 apicd = alloc_apic_chip_data(node);
552 if (!apicd) {
553 err = -ENOMEM;
554 goto error;
555 }
556
557 apicd->irq = virq + i;
558 irqd->chip = &lapic_controller;
559 irqd->chip_data = apicd;
560 irqd->hwirq = virq + i;
561 irqd_set_single_target(irqd);
562
563
564
565
566
567 irqd_set_handle_enforce_irqctx(irqd);
568
569
570 irqd_set_affinity_on_activate(irqd);
571
572
573
574
575
576
577
578
579 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
580 if (!vector_configure_legacy(virq + i, irqd, apicd))
581 continue;
582 }
583
584 err = assign_irq_vector_policy(irqd, info);
585 trace_vector_setup(virq + i, false, err);
586 if (err) {
587 irqd->chip_data = NULL;
588 free_apic_chip_data(apicd);
589 goto error;
590 }
591 }
592
593 return 0;
594
595error:
596 x86_vector_free_irqs(domain, virq, i);
597 return err;
598}
599
600#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
601static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
602 struct irq_data *irqd, int ind)
603{
604 struct apic_chip_data apicd;
605 unsigned long flags;
606 int irq;
607
608 if (!irqd) {
609 irq_matrix_debug_show(m, vector_matrix, ind);
610 return;
611 }
612
613 irq = irqd->irq;
614 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
615 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
616 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
617 return;
618 }
619
620 if (!irqd->chip_data) {
621 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
622 return;
623 }
624
625 raw_spin_lock_irqsave(&vector_lock, flags);
626 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
627 raw_spin_unlock_irqrestore(&vector_lock, flags);
628
629 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
630 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
631 if (apicd.prev_vector) {
632 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
633 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
634 }
635 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
636 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
637 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
638 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
639 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
640}
641#endif
642
643static const struct irq_domain_ops x86_vector_domain_ops = {
644 .alloc = x86_vector_alloc_irqs,
645 .free = x86_vector_free_irqs,
646 .activate = x86_vector_activate,
647 .deactivate = x86_vector_deactivate,
648#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
649 .debug_show = x86_vector_debug_show,
650#endif
651};
652
653int __init arch_probe_nr_irqs(void)
654{
655 int nr;
656
657 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
658 nr_irqs = NR_VECTORS * nr_cpu_ids;
659
660 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
661#if defined(CONFIG_PCI_MSI)
662
663
664
665 if (gsi_top <= NR_IRQS_LEGACY)
666 nr += 8 * nr_cpu_ids;
667 else
668 nr += gsi_top * 16;
669#endif
670 if (nr < nr_irqs)
671 nr_irqs = nr;
672
673
674
675
676
677 return legacy_pic->probe();
678}
679
680void lapic_assign_legacy_vector(unsigned int irq, bool replace)
681{
682
683
684
685
686
687 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
688}
689
690void __init lapic_assign_system_vectors(void)
691{
692 unsigned int i, vector = 0;
693
694 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
695 irq_matrix_assign_system(vector_matrix, vector, false);
696
697 if (nr_legacy_irqs() > 1)
698 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
699
700
701 irq_matrix_online(vector_matrix);
702
703
704 for (i = 0; i < nr_legacy_irqs(); i++) {
705 if (i != PIC_CASCADE_IR)
706 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
707 }
708}
709
710int __init arch_early_irq_init(void)
711{
712 struct fwnode_handle *fn;
713
714 fn = irq_domain_alloc_named_fwnode("VECTOR");
715 BUG_ON(!fn);
716 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
717 NULL);
718 BUG_ON(x86_vector_domain == NULL);
719 irq_set_default_host(x86_vector_domain);
720
721 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
722
723
724
725
726
727 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
728 FIRST_SYSTEM_VECTOR);
729 BUG_ON(!vector_matrix);
730
731 return arch_early_ioapic_init();
732}
733
734#ifdef CONFIG_SMP
735
736static struct irq_desc *__setup_vector_irq(int vector)
737{
738 int isairq = vector - ISA_IRQ_VECTOR(0);
739
740
741 if (isairq < 0 || isairq >= nr_legacy_irqs())
742 return VECTOR_UNUSED;
743
744 if (test_bit(isairq, &io_apic_irqs))
745 return VECTOR_UNUSED;
746 return irq_to_desc(isairq);
747}
748
749
750void lapic_online(void)
751{
752 unsigned int vector;
753
754 lockdep_assert_held(&vector_lock);
755
756
757 irq_matrix_online(vector_matrix);
758
759
760
761
762
763
764
765
766
767
768 for (vector = 0; vector < NR_VECTORS; vector++)
769 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
770}
771
772void lapic_offline(void)
773{
774 lock_vector_lock();
775 irq_matrix_offline(vector_matrix);
776 unlock_vector_lock();
777}
778
779static int apic_set_affinity(struct irq_data *irqd,
780 const struct cpumask *dest, bool force)
781{
782 int err;
783
784 if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
785 return -EIO;
786
787 raw_spin_lock(&vector_lock);
788 cpumask_and(vector_searchmask, dest, cpu_online_mask);
789 if (irqd_affinity_is_managed(irqd))
790 err = assign_managed_vector(irqd, vector_searchmask);
791 else
792 err = assign_vector_locked(irqd, vector_searchmask);
793 raw_spin_unlock(&vector_lock);
794 return err ? err : IRQ_SET_MASK_OK;
795}
796
797#else
798# define apic_set_affinity NULL
799#endif
800
801static int apic_retrigger_irq(struct irq_data *irqd)
802{
803 struct apic_chip_data *apicd = apic_chip_data(irqd);
804 unsigned long flags;
805
806 raw_spin_lock_irqsave(&vector_lock, flags);
807 apic->send_IPI(apicd->cpu, apicd->vector);
808 raw_spin_unlock_irqrestore(&vector_lock, flags);
809
810 return 1;
811}
812
813void apic_ack_irq(struct irq_data *irqd)
814{
815 irq_move_irq(irqd);
816 ack_APIC_irq();
817}
818
819void apic_ack_edge(struct irq_data *irqd)
820{
821 irq_complete_move(irqd_cfg(irqd));
822 apic_ack_irq(irqd);
823}
824
825static struct irq_chip lapic_controller = {
826 .name = "APIC",
827 .irq_ack = apic_ack_edge,
828 .irq_set_affinity = apic_set_affinity,
829 .irq_compose_msi_msg = x86_vector_msi_compose_msg,
830 .irq_retrigger = apic_retrigger_irq,
831};
832
833#ifdef CONFIG_SMP
834
835static void free_moved_vector(struct apic_chip_data *apicd)
836{
837 unsigned int vector = apicd->prev_vector;
838 unsigned int cpu = apicd->prev_cpu;
839 bool managed = apicd->is_managed;
840
841
842
843
844
845
846
847
848
849
850
851 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
852 irq_matrix_free(vector_matrix, cpu, vector, managed);
853 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
854 hlist_del_init(&apicd->clist);
855 apicd->prev_vector = 0;
856 apicd->move_in_progress = 0;
857}
858
859DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
860{
861 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
862 struct apic_chip_data *apicd;
863 struct hlist_node *tmp;
864
865 ack_APIC_irq();
866
867 raw_spin_lock(&vector_lock);
868
869 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
870 unsigned int irr, vector = apicd->prev_vector;
871
872
873
874
875
876
877
878
879
880
881 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
882 if (irr & (1U << (vector % 32))) {
883 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
884 continue;
885 }
886 free_moved_vector(apicd);
887 }
888
889 raw_spin_unlock(&vector_lock);
890}
891
892static void __send_cleanup_vector(struct apic_chip_data *apicd)
893{
894 unsigned int cpu;
895
896 raw_spin_lock(&vector_lock);
897 apicd->move_in_progress = 0;
898 cpu = apicd->prev_cpu;
899 if (cpu_online(cpu)) {
900 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
901 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
902 } else {
903 apicd->prev_vector = 0;
904 }
905 raw_spin_unlock(&vector_lock);
906}
907
908void send_cleanup_vector(struct irq_cfg *cfg)
909{
910 struct apic_chip_data *apicd;
911
912 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
913 if (apicd->move_in_progress)
914 __send_cleanup_vector(apicd);
915}
916
917void irq_complete_move(struct irq_cfg *cfg)
918{
919 struct apic_chip_data *apicd;
920
921 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
922 if (likely(!apicd->move_in_progress))
923 return;
924
925
926
927
928
929
930
931 if (apicd->cpu == smp_processor_id())
932 __send_cleanup_vector(apicd);
933}
934
935
936
937
938void irq_force_complete_move(struct irq_desc *desc)
939{
940 struct apic_chip_data *apicd;
941 struct irq_data *irqd;
942 unsigned int vector;
943
944
945
946
947
948
949
950
951
952
953 irqd = irq_domain_get_irq_data(x86_vector_domain,
954 irq_desc_get_irq(desc));
955 if (!irqd)
956 return;
957
958 raw_spin_lock(&vector_lock);
959 apicd = apic_chip_data(irqd);
960 if (!apicd)
961 goto unlock;
962
963
964
965
966 vector = apicd->prev_vector;
967 if (!vector)
968 goto unlock;
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985 if (apicd->move_in_progress) {
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1019 irqd->irq, vector);
1020 }
1021 free_moved_vector(apicd);
1022unlock:
1023 raw_spin_unlock(&vector_lock);
1024}
1025
1026#ifdef CONFIG_HOTPLUG_CPU
1027
1028
1029
1030
1031int lapic_can_unplug_cpu(void)
1032{
1033 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1034 int ret = 0;
1035
1036 raw_spin_lock(&vector_lock);
1037 tomove = irq_matrix_allocated(vector_matrix);
1038 avl = irq_matrix_available(vector_matrix, true);
1039 if (avl < tomove) {
1040 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1041 cpu, tomove, avl);
1042 ret = -ENOSPC;
1043 goto out;
1044 }
1045 rsvd = irq_matrix_reserved(vector_matrix);
1046 if (avl < rsvd) {
1047 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1048 rsvd, avl);
1049 }
1050out:
1051 raw_spin_unlock(&vector_lock);
1052 return ret;
1053}
1054#endif
1055#endif
1056
1057static void __init print_APIC_field(int base)
1058{
1059 int i;
1060
1061 printk(KERN_DEBUG);
1062
1063 for (i = 0; i < 8; i++)
1064 pr_cont("%08x", apic_read(base + i*0x10));
1065
1066 pr_cont("\n");
1067}
1068
1069static void __init print_local_APIC(void *dummy)
1070{
1071 unsigned int i, v, ver, maxlvt;
1072 u64 icr;
1073
1074 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1075 smp_processor_id(), hard_smp_processor_id());
1076 v = apic_read(APIC_ID);
1077 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1078 v = apic_read(APIC_LVR);
1079 pr_info("... APIC VERSION: %08x\n", v);
1080 ver = GET_APIC_VERSION(v);
1081 maxlvt = lapic_get_maxlvt();
1082
1083 v = apic_read(APIC_TASKPRI);
1084 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1085
1086
1087 if (APIC_INTEGRATED(ver)) {
1088 if (!APIC_XAPIC(ver)) {
1089 v = apic_read(APIC_ARBPRI);
1090 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1091 v, v & APIC_ARBPRI_MASK);
1092 }
1093 v = apic_read(APIC_PROCPRI);
1094 pr_debug("... APIC PROCPRI: %08x\n", v);
1095 }
1096
1097
1098
1099
1100
1101 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1102 v = apic_read(APIC_RRR);
1103 pr_debug("... APIC RRR: %08x\n", v);
1104 }
1105
1106 v = apic_read(APIC_LDR);
1107 pr_debug("... APIC LDR: %08x\n", v);
1108 if (!x2apic_enabled()) {
1109 v = apic_read(APIC_DFR);
1110 pr_debug("... APIC DFR: %08x\n", v);
1111 }
1112 v = apic_read(APIC_SPIV);
1113 pr_debug("... APIC SPIV: %08x\n", v);
1114
1115 pr_debug("... APIC ISR field:\n");
1116 print_APIC_field(APIC_ISR);
1117 pr_debug("... APIC TMR field:\n");
1118 print_APIC_field(APIC_TMR);
1119 pr_debug("... APIC IRR field:\n");
1120 print_APIC_field(APIC_IRR);
1121
1122
1123 if (APIC_INTEGRATED(ver)) {
1124
1125 if (maxlvt > 3)
1126 apic_write(APIC_ESR, 0);
1127
1128 v = apic_read(APIC_ESR);
1129 pr_debug("... APIC ESR: %08x\n", v);
1130 }
1131
1132 icr = apic_icr_read();
1133 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1134 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1135
1136 v = apic_read(APIC_LVTT);
1137 pr_debug("... APIC LVTT: %08x\n", v);
1138
1139 if (maxlvt > 3) {
1140
1141 v = apic_read(APIC_LVTPC);
1142 pr_debug("... APIC LVTPC: %08x\n", v);
1143 }
1144 v = apic_read(APIC_LVT0);
1145 pr_debug("... APIC LVT0: %08x\n", v);
1146 v = apic_read(APIC_LVT1);
1147 pr_debug("... APIC LVT1: %08x\n", v);
1148
1149 if (maxlvt > 2) {
1150
1151 v = apic_read(APIC_LVTERR);
1152 pr_debug("... APIC LVTERR: %08x\n", v);
1153 }
1154
1155 v = apic_read(APIC_TMICT);
1156 pr_debug("... APIC TMICT: %08x\n", v);
1157 v = apic_read(APIC_TMCCT);
1158 pr_debug("... APIC TMCCT: %08x\n", v);
1159 v = apic_read(APIC_TDCR);
1160 pr_debug("... APIC TDCR: %08x\n", v);
1161
1162 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1163 v = apic_read(APIC_EFEAT);
1164 maxlvt = (v >> 16) & 0xff;
1165 pr_debug("... APIC EFEAT: %08x\n", v);
1166 v = apic_read(APIC_ECTRL);
1167 pr_debug("... APIC ECTRL: %08x\n", v);
1168 for (i = 0; i < maxlvt; i++) {
1169 v = apic_read(APIC_EILVTn(i));
1170 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1171 }
1172 }
1173 pr_cont("\n");
1174}
1175
1176static void __init print_local_APICs(int maxcpu)
1177{
1178 int cpu;
1179
1180 if (!maxcpu)
1181 return;
1182
1183 preempt_disable();
1184 for_each_online_cpu(cpu) {
1185 if (cpu >= maxcpu)
1186 break;
1187 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1188 }
1189 preempt_enable();
1190}
1191
1192static void __init print_PIC(void)
1193{
1194 unsigned int v;
1195 unsigned long flags;
1196
1197 if (!nr_legacy_irqs())
1198 return;
1199
1200 pr_debug("\nprinting PIC contents\n");
1201
1202 raw_spin_lock_irqsave(&i8259A_lock, flags);
1203
1204 v = inb(0xa1) << 8 | inb(0x21);
1205 pr_debug("... PIC IMR: %04x\n", v);
1206
1207 v = inb(0xa0) << 8 | inb(0x20);
1208 pr_debug("... PIC IRR: %04x\n", v);
1209
1210 outb(0x0b, 0xa0);
1211 outb(0x0b, 0x20);
1212 v = inb(0xa0) << 8 | inb(0x20);
1213 outb(0x0a, 0xa0);
1214 outb(0x0a, 0x20);
1215
1216 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1217
1218 pr_debug("... PIC ISR: %04x\n", v);
1219
1220 v = inb(0x4d1) << 8 | inb(0x4d0);
1221 pr_debug("... PIC ELCR: %04x\n", v);
1222}
1223
1224static int show_lapic __initdata = 1;
1225static __init int setup_show_lapic(char *arg)
1226{
1227 int num = -1;
1228
1229 if (strcmp(arg, "all") == 0) {
1230 show_lapic = CONFIG_NR_CPUS;
1231 } else {
1232 get_option(&arg, &num);
1233 if (num >= 0)
1234 show_lapic = num;
1235 }
1236
1237 return 1;
1238}
1239__setup("show_lapic=", setup_show_lapic);
1240
1241static int __init print_ICs(void)
1242{
1243 if (apic_verbosity == APIC_QUIET)
1244 return 0;
1245
1246 print_PIC();
1247
1248
1249 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1250 return 0;
1251
1252 print_local_APICs(show_lapic);
1253 print_IO_APICs();
1254
1255 return 0;
1256}
1257
1258late_initcall(print_ICs);
1259