1
2
3
4
5
6
7
8
9
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/seq_file.h>
13#include <linux/init.h>
14#include <linux/compiler.h>
15#include <linux/slab.h>
16#include <asm/irqdomain.h>
17#include <asm/hw_irq.h>
18#include <asm/traps.h>
19#include <asm/apic.h>
20#include <asm/i8259.h>
21#include <asm/desc.h>
22#include <asm/irq_remapping.h>
23
24#include <asm/trace/irq_vectors.h>
25
26struct apic_chip_data {
27 struct irq_cfg hw_irq_cfg;
28 unsigned int vector;
29 unsigned int prev_vector;
30 unsigned int cpu;
31 unsigned int prev_cpu;
32 unsigned int irq;
33 struct hlist_node clist;
34 unsigned int move_in_progress : 1,
35 is_managed : 1,
36 can_reserve : 1,
37 has_reserved : 1;
38};
39
40struct irq_domain *x86_vector_domain;
41EXPORT_SYMBOL_GPL(x86_vector_domain);
42static DEFINE_RAW_SPINLOCK(vector_lock);
43static cpumask_var_t vector_searchmask;
44static struct irq_chip lapic_controller;
45static struct irq_matrix *vector_matrix;
46#ifdef CONFIG_SMP
47static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
48#endif
49
50void lock_vector_lock(void)
51{
52
53
54
55 raw_spin_lock(&vector_lock);
56}
57
58void unlock_vector_lock(void)
59{
60 raw_spin_unlock(&vector_lock);
61}
62
63void init_irq_alloc_info(struct irq_alloc_info *info,
64 const struct cpumask *mask)
65{
66 memset(info, 0, sizeof(*info));
67 info->mask = mask;
68}
69
70void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
71{
72 if (src)
73 *dst = *src;
74 else
75 memset(dst, 0, sizeof(*dst));
76}
77
78static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
79{
80 if (!irqd)
81 return NULL;
82
83 while (irqd->parent_data)
84 irqd = irqd->parent_data;
85
86 return irqd->chip_data;
87}
88
89struct irq_cfg *irqd_cfg(struct irq_data *irqd)
90{
91 struct apic_chip_data *apicd = apic_chip_data(irqd);
92
93 return apicd ? &apicd->hw_irq_cfg : NULL;
94}
95EXPORT_SYMBOL_GPL(irqd_cfg);
96
97struct irq_cfg *irq_cfg(unsigned int irq)
98{
99 return irqd_cfg(irq_get_irq_data(irq));
100}
101
102static struct apic_chip_data *alloc_apic_chip_data(int node)
103{
104 struct apic_chip_data *apicd;
105
106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
107 if (apicd)
108 INIT_HLIST_NODE(&apicd->clist);
109 return apicd;
110}
111
112static void free_apic_chip_data(struct apic_chip_data *apicd)
113{
114 kfree(apicd);
115}
116
117static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
118 unsigned int cpu)
119{
120 struct apic_chip_data *apicd = apic_chip_data(irqd);
121
122 lockdep_assert_held(&vector_lock);
123
124 apicd->hw_irq_cfg.vector = vector;
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
127 trace_vector_config(irqd->irq, vector, cpu,
128 apicd->hw_irq_cfg.dest_apicid);
129}
130
131static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
132 unsigned int newcpu)
133{
134 struct apic_chip_data *apicd = apic_chip_data(irqd);
135 struct irq_desc *desc = irq_data_to_desc(irqd);
136 bool managed = irqd_affinity_is_managed(irqd);
137
138 lockdep_assert_held(&vector_lock);
139
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
141 apicd->cpu);
142
143
144
145
146
147
148
149 apicd->prev_vector = 0;
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
151 goto setnew;
152
153
154
155
156
157
158
159
160 if (cpu_online(apicd->cpu)) {
161 apicd->move_in_progress = true;
162 apicd->prev_vector = apicd->vector;
163 apicd->prev_cpu = apicd->cpu;
164 WARN_ON_ONCE(apicd->cpu == newcpu);
165 } else {
166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
167 managed);
168 }
169
170setnew:
171 apicd->vector = newvec;
172 apicd->cpu = newcpu;
173 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
174 per_cpu(vector_irq, newcpu)[newvec] = desc;
175}
176
177static void vector_assign_managed_shutdown(struct irq_data *irqd)
178{
179 unsigned int cpu = cpumask_first(cpu_online_mask);
180
181 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
182}
183
184static int reserve_managed_vector(struct irq_data *irqd)
185{
186 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
187 struct apic_chip_data *apicd = apic_chip_data(irqd);
188 unsigned long flags;
189 int ret;
190
191 raw_spin_lock_irqsave(&vector_lock, flags);
192 apicd->is_managed = true;
193 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
194 raw_spin_unlock_irqrestore(&vector_lock, flags);
195 trace_vector_reserve_managed(irqd->irq, ret);
196 return ret;
197}
198
199static void reserve_irq_vector_locked(struct irq_data *irqd)
200{
201 struct apic_chip_data *apicd = apic_chip_data(irqd);
202
203 irq_matrix_reserve(vector_matrix);
204 apicd->can_reserve = true;
205 apicd->has_reserved = true;
206 irqd_set_can_reserve(irqd);
207 trace_vector_reserve(irqd->irq, 0);
208 vector_assign_managed_shutdown(irqd);
209}
210
211static int reserve_irq_vector(struct irq_data *irqd)
212{
213 unsigned long flags;
214
215 raw_spin_lock_irqsave(&vector_lock, flags);
216 reserve_irq_vector_locked(irqd);
217 raw_spin_unlock_irqrestore(&vector_lock, flags);
218 return 0;
219}
220
221static int
222assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
223{
224 struct apic_chip_data *apicd = apic_chip_data(irqd);
225 bool resvd = apicd->has_reserved;
226 unsigned int cpu = apicd->cpu;
227 int vector = apicd->vector;
228
229 lockdep_assert_held(&vector_lock);
230
231
232
233
234
235
236 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
237 return 0;
238
239
240
241
242
243
244
245 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
246 return -EBUSY;
247
248 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
249 trace_vector_alloc(irqd->irq, vector, resvd, vector);
250 if (vector < 0)
251 return vector;
252 apic_update_vector(irqd, vector, cpu);
253 apic_update_irq_cfg(irqd, vector, cpu);
254
255 return 0;
256}
257
258static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
259{
260 unsigned long flags;
261 int ret;
262
263 raw_spin_lock_irqsave(&vector_lock, flags);
264 cpumask_and(vector_searchmask, dest, cpu_online_mask);
265 ret = assign_vector_locked(irqd, vector_searchmask);
266 raw_spin_unlock_irqrestore(&vector_lock, flags);
267 return ret;
268}
269
270static int assign_irq_vector_any_locked(struct irq_data *irqd)
271{
272
273 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
274 int node = irq_data_get_node(irqd);
275
276 if (node != NUMA_NO_NODE) {
277
278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
279 if (!assign_vector_locked(irqd, vector_searchmask))
280 return 0;
281 }
282
283
284 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
285 if (!assign_vector_locked(irqd, vector_searchmask))
286 return 0;
287
288 if (node != NUMA_NO_NODE) {
289
290 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
291 return 0;
292 }
293
294
295 return assign_vector_locked(irqd, cpu_online_mask);
296}
297
298static int
299assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
300{
301 if (irqd_affinity_is_managed(irqd))
302 return reserve_managed_vector(irqd);
303 if (info->mask)
304 return assign_irq_vector(irqd, info->mask);
305
306
307
308
309 return reserve_irq_vector(irqd);
310}
311
312static int
313assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
314{
315 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
316 struct apic_chip_data *apicd = apic_chip_data(irqd);
317 int vector, cpu;
318
319 cpumask_and(vector_searchmask, dest, affmsk);
320
321
322 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
323 return 0;
324 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
325 &cpu);
326 trace_vector_alloc_managed(irqd->irq, vector, vector);
327 if (vector < 0)
328 return vector;
329 apic_update_vector(irqd, vector, cpu);
330 apic_update_irq_cfg(irqd, vector, cpu);
331 return 0;
332}
333
334static void clear_irq_vector(struct irq_data *irqd)
335{
336 struct apic_chip_data *apicd = apic_chip_data(irqd);
337 bool managed = irqd_affinity_is_managed(irqd);
338 unsigned int vector = apicd->vector;
339
340 lockdep_assert_held(&vector_lock);
341
342 if (!vector)
343 return;
344
345 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
346 apicd->prev_cpu);
347
348 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
349 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
350 apicd->vector = 0;
351
352
353 vector = apicd->prev_vector;
354 if (!vector)
355 return;
356
357 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
358 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
359 apicd->prev_vector = 0;
360 apicd->move_in_progress = 0;
361 hlist_del_init(&apicd->clist);
362}
363
364static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
365{
366 struct apic_chip_data *apicd = apic_chip_data(irqd);
367 unsigned long flags;
368
369 trace_vector_deactivate(irqd->irq, apicd->is_managed,
370 apicd->can_reserve, false);
371
372
373 if (!apicd->is_managed && !apicd->can_reserve)
374 return;
375
376 if (apicd->has_reserved)
377 return;
378
379 raw_spin_lock_irqsave(&vector_lock, flags);
380 clear_irq_vector(irqd);
381 if (apicd->can_reserve)
382 reserve_irq_vector_locked(irqd);
383 else
384 vector_assign_managed_shutdown(irqd);
385 raw_spin_unlock_irqrestore(&vector_lock, flags);
386}
387
388static int activate_reserved(struct irq_data *irqd)
389{
390 struct apic_chip_data *apicd = apic_chip_data(irqd);
391 int ret;
392
393 ret = assign_irq_vector_any_locked(irqd);
394 if (!ret) {
395 apicd->has_reserved = false;
396
397
398
399
400
401
402
403 if (!irqd_can_reserve(irqd))
404 apicd->can_reserve = false;
405 }
406
407
408
409
410
411 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
412 irq_data_get_affinity_mask(irqd))) {
413 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
414 irqd->irq);
415 }
416
417 return ret;
418}
419
420static int activate_managed(struct irq_data *irqd)
421{
422 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
423 int ret;
424
425 cpumask_and(vector_searchmask, dest, cpu_online_mask);
426 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
427
428 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
429 return -EINVAL;
430 }
431
432 ret = assign_managed_vector(irqd, vector_searchmask);
433
434
435
436
437 if (WARN_ON_ONCE(ret < 0)) {
438 pr_err("Managed startup irq %u, no vector available\n",
439 irqd->irq);
440 }
441 return ret;
442}
443
444static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
445 bool reserve)
446{
447 struct apic_chip_data *apicd = apic_chip_data(irqd);
448 unsigned long flags;
449 int ret = 0;
450
451 trace_vector_activate(irqd->irq, apicd->is_managed,
452 apicd->can_reserve, reserve);
453
454 raw_spin_lock_irqsave(&vector_lock, flags);
455 if (!apicd->can_reserve && !apicd->is_managed)
456 assign_irq_vector_any_locked(irqd);
457 else if (reserve || irqd_is_managed_and_shutdown(irqd))
458 vector_assign_managed_shutdown(irqd);
459 else if (apicd->is_managed)
460 ret = activate_managed(irqd);
461 else if (apicd->has_reserved)
462 ret = activate_reserved(irqd);
463 raw_spin_unlock_irqrestore(&vector_lock, flags);
464 return ret;
465}
466
467static void vector_free_reserved_and_managed(struct irq_data *irqd)
468{
469 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
470 struct apic_chip_data *apicd = apic_chip_data(irqd);
471
472 trace_vector_teardown(irqd->irq, apicd->is_managed,
473 apicd->has_reserved);
474
475 if (apicd->has_reserved)
476 irq_matrix_remove_reserved(vector_matrix);
477 if (apicd->is_managed)
478 irq_matrix_remove_managed(vector_matrix, dest);
479}
480
481static void x86_vector_free_irqs(struct irq_domain *domain,
482 unsigned int virq, unsigned int nr_irqs)
483{
484 struct apic_chip_data *apicd;
485 struct irq_data *irqd;
486 unsigned long flags;
487 int i;
488
489 for (i = 0; i < nr_irqs; i++) {
490 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
491 if (irqd && irqd->chip_data) {
492 raw_spin_lock_irqsave(&vector_lock, flags);
493 clear_irq_vector(irqd);
494 vector_free_reserved_and_managed(irqd);
495 apicd = irqd->chip_data;
496 irq_domain_reset_irq_data(irqd);
497 raw_spin_unlock_irqrestore(&vector_lock, flags);
498 free_apic_chip_data(apicd);
499 }
500 }
501}
502
503static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
504 struct apic_chip_data *apicd)
505{
506 unsigned long flags;
507 bool realloc = false;
508
509 apicd->vector = ISA_IRQ_VECTOR(virq);
510 apicd->cpu = 0;
511
512 raw_spin_lock_irqsave(&vector_lock, flags);
513
514
515
516
517 if (irqd_is_activated(irqd)) {
518 trace_vector_setup(virq, true, 0);
519 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
520 } else {
521
522 apicd->can_reserve = true;
523 irqd_set_can_reserve(irqd);
524 clear_irq_vector(irqd);
525 realloc = true;
526 }
527 raw_spin_unlock_irqrestore(&vector_lock, flags);
528 return realloc;
529}
530
531static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
532 unsigned int nr_irqs, void *arg)
533{
534 struct irq_alloc_info *info = arg;
535 struct apic_chip_data *apicd;
536 struct irq_data *irqd;
537 int i, err, node;
538
539 if (disable_apic)
540 return -ENXIO;
541
542
543 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
544 return -ENOSYS;
545
546
547
548
549
550 if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY &&
551 virq == PIC_CASCADE_IR))
552 return -EINVAL;
553
554 for (i = 0; i < nr_irqs; i++) {
555 irqd = irq_domain_get_irq_data(domain, virq + i);
556 BUG_ON(!irqd);
557 node = irq_data_get_node(irqd);
558 WARN_ON_ONCE(irqd->chip_data);
559 apicd = alloc_apic_chip_data(node);
560 if (!apicd) {
561 err = -ENOMEM;
562 goto error;
563 }
564
565 apicd->irq = virq + i;
566 irqd->chip = &lapic_controller;
567 irqd->chip_data = apicd;
568 irqd->hwirq = virq + i;
569 irqd_set_single_target(irqd);
570
571
572
573
574
575 irqd_set_handle_enforce_irqctx(irqd);
576
577
578 irqd_set_affinity_on_activate(irqd);
579
580
581
582
583
584
585
586
587 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
588 if (!vector_configure_legacy(virq + i, irqd, apicd))
589 continue;
590 }
591
592 err = assign_irq_vector_policy(irqd, info);
593 trace_vector_setup(virq + i, false, err);
594 if (err) {
595 irqd->chip_data = NULL;
596 free_apic_chip_data(apicd);
597 goto error;
598 }
599 }
600
601 return 0;
602
603error:
604 x86_vector_free_irqs(domain, virq, i);
605 return err;
606}
607
608#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
609static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
610 struct irq_data *irqd, int ind)
611{
612 struct apic_chip_data apicd;
613 unsigned long flags;
614 int irq;
615
616 if (!irqd) {
617 irq_matrix_debug_show(m, vector_matrix, ind);
618 return;
619 }
620
621 irq = irqd->irq;
622 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
623 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
624 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
625 return;
626 }
627
628 if (!irqd->chip_data) {
629 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
630 return;
631 }
632
633 raw_spin_lock_irqsave(&vector_lock, flags);
634 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
635 raw_spin_unlock_irqrestore(&vector_lock, flags);
636
637 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
638 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
639 if (apicd.prev_vector) {
640 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
641 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
642 }
643 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
644 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
645 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
646 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
647 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
648}
649#endif
650
651int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec)
652{
653 if (fwspec->param_count != 1)
654 return 0;
655
656 if (is_fwnode_irqchip(fwspec->fwnode)) {
657 const char *fwname = fwnode_get_name(fwspec->fwnode);
658 return fwname && !strncmp(fwname, "IO-APIC-", 8) &&
659 simple_strtol(fwname+8, NULL, 10) == fwspec->param[0];
660 }
661 return to_of_node(fwspec->fwnode) &&
662 of_device_is_compatible(to_of_node(fwspec->fwnode),
663 "intel,ce4100-ioapic");
664}
665
666int x86_fwspec_is_hpet(struct irq_fwspec *fwspec)
667{
668 if (fwspec->param_count != 1)
669 return 0;
670
671 if (is_fwnode_irqchip(fwspec->fwnode)) {
672 const char *fwname = fwnode_get_name(fwspec->fwnode);
673 return fwname && !strncmp(fwname, "HPET-MSI-", 9) &&
674 simple_strtol(fwname+9, NULL, 10) == fwspec->param[0];
675 }
676 return 0;
677}
678
679static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec,
680 enum irq_domain_bus_token bus_token)
681{
682
683
684
685
686
687 if (apic->apic_id_valid(32768))
688 return 0;
689
690 return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec);
691}
692
693static const struct irq_domain_ops x86_vector_domain_ops = {
694 .select = x86_vector_select,
695 .alloc = x86_vector_alloc_irqs,
696 .free = x86_vector_free_irqs,
697 .activate = x86_vector_activate,
698 .deactivate = x86_vector_deactivate,
699#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
700 .debug_show = x86_vector_debug_show,
701#endif
702};
703
704int __init arch_probe_nr_irqs(void)
705{
706 int nr;
707
708 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
709 nr_irqs = NR_VECTORS * nr_cpu_ids;
710
711 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
712#if defined(CONFIG_PCI_MSI)
713
714
715
716 if (gsi_top <= NR_IRQS_LEGACY)
717 nr += 8 * nr_cpu_ids;
718 else
719 nr += gsi_top * 16;
720#endif
721 if (nr < nr_irqs)
722 nr_irqs = nr;
723
724
725
726
727
728 return legacy_pic->probe();
729}
730
731void lapic_assign_legacy_vector(unsigned int irq, bool replace)
732{
733
734
735
736
737
738 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
739}
740
741void __init lapic_update_legacy_vectors(void)
742{
743 unsigned int i;
744
745 if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
746 return;
747
748
749
750
751
752
753
754
755 for (i = 0; i < nr_legacy_irqs(); i++) {
756 if (i != PIC_CASCADE_IR)
757 lapic_assign_legacy_vector(i, true);
758 }
759}
760
761void __init lapic_assign_system_vectors(void)
762{
763 unsigned int i, vector = 0;
764
765 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
766 irq_matrix_assign_system(vector_matrix, vector, false);
767
768 if (nr_legacy_irqs() > 1)
769 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
770
771
772 irq_matrix_online(vector_matrix);
773
774
775 for (i = 0; i < nr_legacy_irqs(); i++) {
776
777
778
779
780
781 if (i != PIC_CASCADE_IR)
782 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
783 }
784}
785
786int __init arch_early_irq_init(void)
787{
788 struct fwnode_handle *fn;
789
790 fn = irq_domain_alloc_named_fwnode("VECTOR");
791 BUG_ON(!fn);
792 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
793 NULL);
794 BUG_ON(x86_vector_domain == NULL);
795 irq_set_default_host(x86_vector_domain);
796
797 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
798
799
800
801
802
803 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
804 FIRST_SYSTEM_VECTOR);
805 BUG_ON(!vector_matrix);
806
807 return arch_early_ioapic_init();
808}
809
810#ifdef CONFIG_SMP
811
812static struct irq_desc *__setup_vector_irq(int vector)
813{
814 int isairq = vector - ISA_IRQ_VECTOR(0);
815
816
817 if (isairq < 0 || isairq >= nr_legacy_irqs())
818 return VECTOR_UNUSED;
819
820 if (test_bit(isairq, &io_apic_irqs))
821 return VECTOR_UNUSED;
822 return irq_to_desc(isairq);
823}
824
825
826void lapic_online(void)
827{
828 unsigned int vector;
829
830 lockdep_assert_held(&vector_lock);
831
832
833 irq_matrix_online(vector_matrix);
834
835
836
837
838
839
840
841
842
843
844 for (vector = 0; vector < NR_VECTORS; vector++)
845 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
846}
847
848void lapic_offline(void)
849{
850 lock_vector_lock();
851 irq_matrix_offline(vector_matrix);
852 unlock_vector_lock();
853}
854
855static int apic_set_affinity(struct irq_data *irqd,
856 const struct cpumask *dest, bool force)
857{
858 int err;
859
860 if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
861 return -EIO;
862
863 raw_spin_lock(&vector_lock);
864 cpumask_and(vector_searchmask, dest, cpu_online_mask);
865 if (irqd_affinity_is_managed(irqd))
866 err = assign_managed_vector(irqd, vector_searchmask);
867 else
868 err = assign_vector_locked(irqd, vector_searchmask);
869 raw_spin_unlock(&vector_lock);
870 return err ? err : IRQ_SET_MASK_OK;
871}
872
873#else
874# define apic_set_affinity NULL
875#endif
876
877static int apic_retrigger_irq(struct irq_data *irqd)
878{
879 struct apic_chip_data *apicd = apic_chip_data(irqd);
880 unsigned long flags;
881
882 raw_spin_lock_irqsave(&vector_lock, flags);
883 apic->send_IPI(apicd->cpu, apicd->vector);
884 raw_spin_unlock_irqrestore(&vector_lock, flags);
885
886 return 1;
887}
888
889void apic_ack_irq(struct irq_data *irqd)
890{
891 irq_move_irq(irqd);
892 ack_APIC_irq();
893}
894
895void apic_ack_edge(struct irq_data *irqd)
896{
897 irq_complete_move(irqd_cfg(irqd));
898 apic_ack_irq(irqd);
899}
900
901static void x86_vector_msi_compose_msg(struct irq_data *data,
902 struct msi_msg *msg)
903{
904 __irq_msi_compose_msg(irqd_cfg(data), msg, false);
905}
906
907static struct irq_chip lapic_controller = {
908 .name = "APIC",
909 .irq_ack = apic_ack_edge,
910 .irq_set_affinity = apic_set_affinity,
911 .irq_compose_msi_msg = x86_vector_msi_compose_msg,
912 .irq_retrigger = apic_retrigger_irq,
913};
914
915#ifdef CONFIG_SMP
916
917static void free_moved_vector(struct apic_chip_data *apicd)
918{
919 unsigned int vector = apicd->prev_vector;
920 unsigned int cpu = apicd->prev_cpu;
921 bool managed = apicd->is_managed;
922
923
924
925
926
927
928
929
930
931
932
933 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
934 irq_matrix_free(vector_matrix, cpu, vector, managed);
935 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
936 hlist_del_init(&apicd->clist);
937 apicd->prev_vector = 0;
938 apicd->move_in_progress = 0;
939}
940
941DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
942{
943 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
944 struct apic_chip_data *apicd;
945 struct hlist_node *tmp;
946
947 ack_APIC_irq();
948
949 raw_spin_lock(&vector_lock);
950
951 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
952 unsigned int irr, vector = apicd->prev_vector;
953
954
955
956
957
958
959
960
961
962
963 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
964 if (irr & (1U << (vector % 32))) {
965 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
966 continue;
967 }
968 free_moved_vector(apicd);
969 }
970
971 raw_spin_unlock(&vector_lock);
972}
973
974static void __send_cleanup_vector(struct apic_chip_data *apicd)
975{
976 unsigned int cpu;
977
978 raw_spin_lock(&vector_lock);
979 apicd->move_in_progress = 0;
980 cpu = apicd->prev_cpu;
981 if (cpu_online(cpu)) {
982 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
983 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
984 } else {
985 apicd->prev_vector = 0;
986 }
987 raw_spin_unlock(&vector_lock);
988}
989
990void send_cleanup_vector(struct irq_cfg *cfg)
991{
992 struct apic_chip_data *apicd;
993
994 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
995 if (apicd->move_in_progress)
996 __send_cleanup_vector(apicd);
997}
998
999void irq_complete_move(struct irq_cfg *cfg)
1000{
1001 struct apic_chip_data *apicd;
1002
1003 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
1004 if (likely(!apicd->move_in_progress))
1005 return;
1006
1007
1008
1009
1010
1011
1012
1013 if (apicd->cpu == smp_processor_id())
1014 __send_cleanup_vector(apicd);
1015}
1016
1017
1018
1019
1020void irq_force_complete_move(struct irq_desc *desc)
1021{
1022 struct apic_chip_data *apicd;
1023 struct irq_data *irqd;
1024 unsigned int vector;
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 irqd = irq_domain_get_irq_data(x86_vector_domain,
1036 irq_desc_get_irq(desc));
1037 if (!irqd)
1038 return;
1039
1040 raw_spin_lock(&vector_lock);
1041 apicd = apic_chip_data(irqd);
1042 if (!apicd)
1043 goto unlock;
1044
1045
1046
1047
1048 vector = apicd->prev_vector;
1049 if (!vector)
1050 goto unlock;
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 if (apicd->move_in_progress) {
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1101 irqd->irq, vector);
1102 }
1103 free_moved_vector(apicd);
1104unlock:
1105 raw_spin_unlock(&vector_lock);
1106}
1107
1108#ifdef CONFIG_HOTPLUG_CPU
1109
1110
1111
1112
1113int lapic_can_unplug_cpu(void)
1114{
1115 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1116 int ret = 0;
1117
1118 raw_spin_lock(&vector_lock);
1119 tomove = irq_matrix_allocated(vector_matrix);
1120 avl = irq_matrix_available(vector_matrix, true);
1121 if (avl < tomove) {
1122 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1123 cpu, tomove, avl);
1124 ret = -ENOSPC;
1125 goto out;
1126 }
1127 rsvd = irq_matrix_reserved(vector_matrix);
1128 if (avl < rsvd) {
1129 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1130 rsvd, avl);
1131 }
1132out:
1133 raw_spin_unlock(&vector_lock);
1134 return ret;
1135}
1136#endif
1137#endif
1138
1139static void __init print_APIC_field(int base)
1140{
1141 int i;
1142
1143 printk(KERN_DEBUG);
1144
1145 for (i = 0; i < 8; i++)
1146 pr_cont("%08x", apic_read(base + i*0x10));
1147
1148 pr_cont("\n");
1149}
1150
1151static void __init print_local_APIC(void *dummy)
1152{
1153 unsigned int i, v, ver, maxlvt;
1154 u64 icr;
1155
1156 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1157 smp_processor_id(), hard_smp_processor_id());
1158 v = apic_read(APIC_ID);
1159 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1160 v = apic_read(APIC_LVR);
1161 pr_info("... APIC VERSION: %08x\n", v);
1162 ver = GET_APIC_VERSION(v);
1163 maxlvt = lapic_get_maxlvt();
1164
1165 v = apic_read(APIC_TASKPRI);
1166 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1167
1168
1169 if (APIC_INTEGRATED(ver)) {
1170 if (!APIC_XAPIC(ver)) {
1171 v = apic_read(APIC_ARBPRI);
1172 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1173 v, v & APIC_ARBPRI_MASK);
1174 }
1175 v = apic_read(APIC_PROCPRI);
1176 pr_debug("... APIC PROCPRI: %08x\n", v);
1177 }
1178
1179
1180
1181
1182
1183 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1184 v = apic_read(APIC_RRR);
1185 pr_debug("... APIC RRR: %08x\n", v);
1186 }
1187
1188 v = apic_read(APIC_LDR);
1189 pr_debug("... APIC LDR: %08x\n", v);
1190 if (!x2apic_enabled()) {
1191 v = apic_read(APIC_DFR);
1192 pr_debug("... APIC DFR: %08x\n", v);
1193 }
1194 v = apic_read(APIC_SPIV);
1195 pr_debug("... APIC SPIV: %08x\n", v);
1196
1197 pr_debug("... APIC ISR field:\n");
1198 print_APIC_field(APIC_ISR);
1199 pr_debug("... APIC TMR field:\n");
1200 print_APIC_field(APIC_TMR);
1201 pr_debug("... APIC IRR field:\n");
1202 print_APIC_field(APIC_IRR);
1203
1204
1205 if (APIC_INTEGRATED(ver)) {
1206
1207 if (maxlvt > 3)
1208 apic_write(APIC_ESR, 0);
1209
1210 v = apic_read(APIC_ESR);
1211 pr_debug("... APIC ESR: %08x\n", v);
1212 }
1213
1214 icr = apic_icr_read();
1215 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1216 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1217
1218 v = apic_read(APIC_LVTT);
1219 pr_debug("... APIC LVTT: %08x\n", v);
1220
1221 if (maxlvt > 3) {
1222
1223 v = apic_read(APIC_LVTPC);
1224 pr_debug("... APIC LVTPC: %08x\n", v);
1225 }
1226 v = apic_read(APIC_LVT0);
1227 pr_debug("... APIC LVT0: %08x\n", v);
1228 v = apic_read(APIC_LVT1);
1229 pr_debug("... APIC LVT1: %08x\n", v);
1230
1231 if (maxlvt > 2) {
1232
1233 v = apic_read(APIC_LVTERR);
1234 pr_debug("... APIC LVTERR: %08x\n", v);
1235 }
1236
1237 v = apic_read(APIC_TMICT);
1238 pr_debug("... APIC TMICT: %08x\n", v);
1239 v = apic_read(APIC_TMCCT);
1240 pr_debug("... APIC TMCCT: %08x\n", v);
1241 v = apic_read(APIC_TDCR);
1242 pr_debug("... APIC TDCR: %08x\n", v);
1243
1244 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1245 v = apic_read(APIC_EFEAT);
1246 maxlvt = (v >> 16) & 0xff;
1247 pr_debug("... APIC EFEAT: %08x\n", v);
1248 v = apic_read(APIC_ECTRL);
1249 pr_debug("... APIC ECTRL: %08x\n", v);
1250 for (i = 0; i < maxlvt; i++) {
1251 v = apic_read(APIC_EILVTn(i));
1252 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1253 }
1254 }
1255 pr_cont("\n");
1256}
1257
1258static void __init print_local_APICs(int maxcpu)
1259{
1260 int cpu;
1261
1262 if (!maxcpu)
1263 return;
1264
1265 preempt_disable();
1266 for_each_online_cpu(cpu) {
1267 if (cpu >= maxcpu)
1268 break;
1269 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1270 }
1271 preempt_enable();
1272}
1273
1274static void __init print_PIC(void)
1275{
1276 unsigned int v;
1277 unsigned long flags;
1278
1279 if (!nr_legacy_irqs())
1280 return;
1281
1282 pr_debug("\nprinting PIC contents\n");
1283
1284 raw_spin_lock_irqsave(&i8259A_lock, flags);
1285
1286 v = inb(0xa1) << 8 | inb(0x21);
1287 pr_debug("... PIC IMR: %04x\n", v);
1288
1289 v = inb(0xa0) << 8 | inb(0x20);
1290 pr_debug("... PIC IRR: %04x\n", v);
1291
1292 outb(0x0b, 0xa0);
1293 outb(0x0b, 0x20);
1294 v = inb(0xa0) << 8 | inb(0x20);
1295 outb(0x0a, 0xa0);
1296 outb(0x0a, 0x20);
1297
1298 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1299
1300 pr_debug("... PIC ISR: %04x\n", v);
1301
1302 v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1);
1303 pr_debug("... PIC ELCR: %04x\n", v);
1304}
1305
1306static int show_lapic __initdata = 1;
1307static __init int setup_show_lapic(char *arg)
1308{
1309 int num = -1;
1310
1311 if (strcmp(arg, "all") == 0) {
1312 show_lapic = CONFIG_NR_CPUS;
1313 } else {
1314 get_option(&arg, &num);
1315 if (num >= 0)
1316 show_lapic = num;
1317 }
1318
1319 return 1;
1320}
1321__setup("show_lapic=", setup_show_lapic);
1322
1323static int __init print_ICs(void)
1324{
1325 if (apic_verbosity == APIC_QUIET)
1326 return 0;
1327
1328 print_PIC();
1329
1330
1331 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1332 return 0;
1333
1334 print_local_APICs(show_lapic);
1335 print_IO_APICs();
1336
1337 return 0;
1338}
1339
1340late_initcall(print_ICs);
1341