1
2
3
4
5
6
7
8
9
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/seq_file.h>
13#include <linux/init.h>
14#include <linux/compiler.h>
15#include <linux/slab.h>
16#include <asm/irqdomain.h>
17#include <asm/hw_irq.h>
18#include <asm/traps.h>
19#include <asm/apic.h>
20#include <asm/i8259.h>
21#include <asm/desc.h>
22#include <asm/irq_remapping.h>
23
24#include <asm/trace/irq_vectors.h>
25
26struct apic_chip_data {
27 struct irq_cfg hw_irq_cfg;
28 unsigned int vector;
29 unsigned int prev_vector;
30 unsigned int cpu;
31 unsigned int prev_cpu;
32 unsigned int irq;
33 struct hlist_node clist;
34 unsigned int move_in_progress : 1,
35 is_managed : 1,
36 can_reserve : 1,
37 has_reserved : 1;
38};
39
40struct irq_domain *x86_vector_domain;
41EXPORT_SYMBOL_GPL(x86_vector_domain);
42static DEFINE_RAW_SPINLOCK(vector_lock);
43static cpumask_var_t vector_searchmask;
44static struct irq_chip lapic_controller;
45static struct irq_matrix *vector_matrix;
46#ifdef CONFIG_SMP
47static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
48#endif
49
50void lock_vector_lock(void)
51{
52
53
54
55 raw_spin_lock(&vector_lock);
56}
57
58void unlock_vector_lock(void)
59{
60 raw_spin_unlock(&vector_lock);
61}
62
63void init_irq_alloc_info(struct irq_alloc_info *info,
64 const struct cpumask *mask)
65{
66 memset(info, 0, sizeof(*info));
67 info->mask = mask;
68}
69
70void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
71{
72 if (src)
73 *dst = *src;
74 else
75 memset(dst, 0, sizeof(*dst));
76}
77
78static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
79{
80 if (!irqd)
81 return NULL;
82
83 while (irqd->parent_data)
84 irqd = irqd->parent_data;
85
86 return irqd->chip_data;
87}
88
89struct irq_cfg *irqd_cfg(struct irq_data *irqd)
90{
91 struct apic_chip_data *apicd = apic_chip_data(irqd);
92
93 return apicd ? &apicd->hw_irq_cfg : NULL;
94}
95EXPORT_SYMBOL_GPL(irqd_cfg);
96
97struct irq_cfg *irq_cfg(unsigned int irq)
98{
99 return irqd_cfg(irq_get_irq_data(irq));
100}
101
102static struct apic_chip_data *alloc_apic_chip_data(int node)
103{
104 struct apic_chip_data *apicd;
105
106 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
107 if (apicd)
108 INIT_HLIST_NODE(&apicd->clist);
109 return apicd;
110}
111
112static void free_apic_chip_data(struct apic_chip_data *apicd)
113{
114 kfree(apicd);
115}
116
117static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
118 unsigned int cpu)
119{
120 struct apic_chip_data *apicd = apic_chip_data(irqd);
121
122 lockdep_assert_held(&vector_lock);
123
124 apicd->hw_irq_cfg.vector = vector;
125 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
126 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
127 trace_vector_config(irqd->irq, vector, cpu,
128 apicd->hw_irq_cfg.dest_apicid);
129}
130
131static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
132 unsigned int newcpu)
133{
134 struct apic_chip_data *apicd = apic_chip_data(irqd);
135 struct irq_desc *desc = irq_data_to_desc(irqd);
136 bool managed = irqd_affinity_is_managed(irqd);
137
138 lockdep_assert_held(&vector_lock);
139
140 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
141 apicd->cpu);
142
143
144
145
146
147
148
149 apicd->prev_vector = 0;
150 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
151 goto setnew;
152
153
154
155
156
157
158
159
160 if (cpu_online(apicd->cpu)) {
161 apicd->move_in_progress = true;
162 apicd->prev_vector = apicd->vector;
163 apicd->prev_cpu = apicd->cpu;
164 WARN_ON_ONCE(apicd->cpu == newcpu);
165 } else {
166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
167 managed);
168 }
169
170setnew:
171 apicd->vector = newvec;
172 apicd->cpu = newcpu;
173 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
174 per_cpu(vector_irq, newcpu)[newvec] = desc;
175}
176
177static void vector_assign_managed_shutdown(struct irq_data *irqd)
178{
179 unsigned int cpu = cpumask_first(cpu_online_mask);
180
181 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
182}
183
184static int reserve_managed_vector(struct irq_data *irqd)
185{
186 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
187 struct apic_chip_data *apicd = apic_chip_data(irqd);
188 unsigned long flags;
189 int ret;
190
191 raw_spin_lock_irqsave(&vector_lock, flags);
192 apicd->is_managed = true;
193 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
194 raw_spin_unlock_irqrestore(&vector_lock, flags);
195 trace_vector_reserve_managed(irqd->irq, ret);
196 return ret;
197}
198
199static void reserve_irq_vector_locked(struct irq_data *irqd)
200{
201 struct apic_chip_data *apicd = apic_chip_data(irqd);
202
203 irq_matrix_reserve(vector_matrix);
204 apicd->can_reserve = true;
205 apicd->has_reserved = true;
206 irqd_set_can_reserve(irqd);
207 trace_vector_reserve(irqd->irq, 0);
208 vector_assign_managed_shutdown(irqd);
209}
210
211static int reserve_irq_vector(struct irq_data *irqd)
212{
213 unsigned long flags;
214
215 raw_spin_lock_irqsave(&vector_lock, flags);
216 reserve_irq_vector_locked(irqd);
217 raw_spin_unlock_irqrestore(&vector_lock, flags);
218 return 0;
219}
220
221static int
222assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
223{
224 struct apic_chip_data *apicd = apic_chip_data(irqd);
225 bool resvd = apicd->has_reserved;
226 unsigned int cpu = apicd->cpu;
227 int vector = apicd->vector;
228
229 lockdep_assert_held(&vector_lock);
230
231
232
233
234
235
236 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
237 return 0;
238
239
240
241
242
243
244
245 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
246 return -EBUSY;
247
248 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
249 trace_vector_alloc(irqd->irq, vector, resvd, vector);
250 if (vector < 0)
251 return vector;
252 apic_update_vector(irqd, vector, cpu);
253 apic_update_irq_cfg(irqd, vector, cpu);
254
255 return 0;
256}
257
258static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
259{
260 unsigned long flags;
261 int ret;
262
263 raw_spin_lock_irqsave(&vector_lock, flags);
264 cpumask_and(vector_searchmask, dest, cpu_online_mask);
265 ret = assign_vector_locked(irqd, vector_searchmask);
266 raw_spin_unlock_irqrestore(&vector_lock, flags);
267 return ret;
268}
269
270static int assign_irq_vector_any_locked(struct irq_data *irqd)
271{
272
273 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
274 int node = irq_data_get_node(irqd);
275
276 if (node != NUMA_NO_NODE) {
277
278 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
279 if (!assign_vector_locked(irqd, vector_searchmask))
280 return 0;
281 }
282
283
284 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
285 if (!assign_vector_locked(irqd, vector_searchmask))
286 return 0;
287
288 if (node != NUMA_NO_NODE) {
289
290 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
291 return 0;
292 }
293
294
295 return assign_vector_locked(irqd, cpu_online_mask);
296}
297
298static int
299assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
300{
301 if (irqd_affinity_is_managed(irqd))
302 return reserve_managed_vector(irqd);
303 if (info->mask)
304 return assign_irq_vector(irqd, info->mask);
305
306
307
308
309 return reserve_irq_vector(irqd);
310}
311
312static int
313assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
314{
315 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
316 struct apic_chip_data *apicd = apic_chip_data(irqd);
317 int vector, cpu;
318
319 cpumask_and(vector_searchmask, dest, affmsk);
320
321
322 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
323 return 0;
324 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
325 &cpu);
326 trace_vector_alloc_managed(irqd->irq, vector, vector);
327 if (vector < 0)
328 return vector;
329 apic_update_vector(irqd, vector, cpu);
330 apic_update_irq_cfg(irqd, vector, cpu);
331 return 0;
332}
333
334static void clear_irq_vector(struct irq_data *irqd)
335{
336 struct apic_chip_data *apicd = apic_chip_data(irqd);
337 bool managed = irqd_affinity_is_managed(irqd);
338 unsigned int vector = apicd->vector;
339
340 lockdep_assert_held(&vector_lock);
341
342 if (!vector)
343 return;
344
345 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
346 apicd->prev_cpu);
347
348 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
349 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
350 apicd->vector = 0;
351
352
353 vector = apicd->prev_vector;
354 if (!vector)
355 return;
356
357 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
358 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
359 apicd->prev_vector = 0;
360 apicd->move_in_progress = 0;
361 hlist_del_init(&apicd->clist);
362}
363
364static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
365{
366 struct apic_chip_data *apicd = apic_chip_data(irqd);
367 unsigned long flags;
368
369 trace_vector_deactivate(irqd->irq, apicd->is_managed,
370 apicd->can_reserve, false);
371
372
373 if (!apicd->is_managed && !apicd->can_reserve)
374 return;
375
376 if (apicd->has_reserved)
377 return;
378
379 raw_spin_lock_irqsave(&vector_lock, flags);
380 clear_irq_vector(irqd);
381 if (apicd->can_reserve)
382 reserve_irq_vector_locked(irqd);
383 else
384 vector_assign_managed_shutdown(irqd);
385 raw_spin_unlock_irqrestore(&vector_lock, flags);
386}
387
388static int activate_reserved(struct irq_data *irqd)
389{
390 struct apic_chip_data *apicd = apic_chip_data(irqd);
391 int ret;
392
393 ret = assign_irq_vector_any_locked(irqd);
394 if (!ret) {
395 apicd->has_reserved = false;
396
397
398
399
400
401
402
403 if (!irqd_can_reserve(irqd))
404 apicd->can_reserve = false;
405 }
406
407
408
409
410
411 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
412 irq_data_get_affinity_mask(irqd))) {
413 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
414 irqd->irq);
415 }
416
417 return ret;
418}
419
420static int activate_managed(struct irq_data *irqd)
421{
422 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
423 int ret;
424
425 cpumask_and(vector_searchmask, dest, cpu_online_mask);
426 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
427
428 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
429 return -EINVAL;
430 }
431
432 ret = assign_managed_vector(irqd, vector_searchmask);
433
434
435
436
437 if (WARN_ON_ONCE(ret < 0)) {
438 pr_err("Managed startup irq %u, no vector available\n",
439 irqd->irq);
440 }
441 return ret;
442}
443
444static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
445 bool reserve)
446{
447 struct apic_chip_data *apicd = apic_chip_data(irqd);
448 unsigned long flags;
449 int ret = 0;
450
451 trace_vector_activate(irqd->irq, apicd->is_managed,
452 apicd->can_reserve, reserve);
453
454 raw_spin_lock_irqsave(&vector_lock, flags);
455 if (!apicd->can_reserve && !apicd->is_managed)
456 assign_irq_vector_any_locked(irqd);
457 else if (reserve || irqd_is_managed_and_shutdown(irqd))
458 vector_assign_managed_shutdown(irqd);
459 else if (apicd->is_managed)
460 ret = activate_managed(irqd);
461 else if (apicd->has_reserved)
462 ret = activate_reserved(irqd);
463 raw_spin_unlock_irqrestore(&vector_lock, flags);
464 return ret;
465}
466
467static void vector_free_reserved_and_managed(struct irq_data *irqd)
468{
469 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
470 struct apic_chip_data *apicd = apic_chip_data(irqd);
471
472 trace_vector_teardown(irqd->irq, apicd->is_managed,
473 apicd->has_reserved);
474
475 if (apicd->has_reserved)
476 irq_matrix_remove_reserved(vector_matrix);
477 if (apicd->is_managed)
478 irq_matrix_remove_managed(vector_matrix, dest);
479}
480
481static void x86_vector_free_irqs(struct irq_domain *domain,
482 unsigned int virq, unsigned int nr_irqs)
483{
484 struct apic_chip_data *apicd;
485 struct irq_data *irqd;
486 unsigned long flags;
487 int i;
488
489 for (i = 0; i < nr_irqs; i++) {
490 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
491 if (irqd && irqd->chip_data) {
492 raw_spin_lock_irqsave(&vector_lock, flags);
493 clear_irq_vector(irqd);
494 vector_free_reserved_and_managed(irqd);
495 apicd = irqd->chip_data;
496 irq_domain_reset_irq_data(irqd);
497 raw_spin_unlock_irqrestore(&vector_lock, flags);
498 free_apic_chip_data(apicd);
499 }
500 }
501}
502
503static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
504 struct apic_chip_data *apicd)
505{
506 unsigned long flags;
507 bool realloc = false;
508
509 apicd->vector = ISA_IRQ_VECTOR(virq);
510 apicd->cpu = 0;
511
512 raw_spin_lock_irqsave(&vector_lock, flags);
513
514
515
516
517 if (irqd_is_activated(irqd)) {
518 trace_vector_setup(virq, true, 0);
519 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
520 } else {
521
522 apicd->can_reserve = true;
523 irqd_set_can_reserve(irqd);
524 clear_irq_vector(irqd);
525 realloc = true;
526 }
527 raw_spin_unlock_irqrestore(&vector_lock, flags);
528 return realloc;
529}
530
531static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
532 unsigned int nr_irqs, void *arg)
533{
534 struct irq_alloc_info *info = arg;
535 struct apic_chip_data *apicd;
536 struct irq_data *irqd;
537 int i, err, node;
538
539 if (disable_apic)
540 return -ENXIO;
541
542
543 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
544 return -ENOSYS;
545
546 for (i = 0; i < nr_irqs; i++) {
547 irqd = irq_domain_get_irq_data(domain, virq + i);
548 BUG_ON(!irqd);
549 node = irq_data_get_node(irqd);
550 WARN_ON_ONCE(irqd->chip_data);
551 apicd = alloc_apic_chip_data(node);
552 if (!apicd) {
553 err = -ENOMEM;
554 goto error;
555 }
556
557 apicd->irq = virq + i;
558 irqd->chip = &lapic_controller;
559 irqd->chip_data = apicd;
560 irqd->hwirq = virq + i;
561 irqd_set_single_target(irqd);
562
563
564
565
566
567 irqd_set_handle_enforce_irqctx(irqd);
568
569
570 irqd_set_affinity_on_activate(irqd);
571
572
573
574
575
576
577
578
579 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
580 if (!vector_configure_legacy(virq + i, irqd, apicd))
581 continue;
582 }
583
584 err = assign_irq_vector_policy(irqd, info);
585 trace_vector_setup(virq + i, false, err);
586 if (err) {
587 irqd->chip_data = NULL;
588 free_apic_chip_data(apicd);
589 goto error;
590 }
591 }
592
593 return 0;
594
595error:
596 x86_vector_free_irqs(domain, virq, i);
597 return err;
598}
599
600#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
601static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
602 struct irq_data *irqd, int ind)
603{
604 struct apic_chip_data apicd;
605 unsigned long flags;
606 int irq;
607
608 if (!irqd) {
609 irq_matrix_debug_show(m, vector_matrix, ind);
610 return;
611 }
612
613 irq = irqd->irq;
614 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
615 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
616 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
617 return;
618 }
619
620 if (!irqd->chip_data) {
621 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
622 return;
623 }
624
625 raw_spin_lock_irqsave(&vector_lock, flags);
626 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
627 raw_spin_unlock_irqrestore(&vector_lock, flags);
628
629 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
630 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
631 if (apicd.prev_vector) {
632 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
633 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
634 }
635 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
636 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
637 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
638 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
639 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
640}
641#endif
642
643int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec)
644{
645 if (fwspec->param_count != 1)
646 return 0;
647
648 if (is_fwnode_irqchip(fwspec->fwnode)) {
649 const char *fwname = fwnode_get_name(fwspec->fwnode);
650 return fwname && !strncmp(fwname, "IO-APIC-", 8) &&
651 simple_strtol(fwname+8, NULL, 10) == fwspec->param[0];
652 }
653 return to_of_node(fwspec->fwnode) &&
654 of_device_is_compatible(to_of_node(fwspec->fwnode),
655 "intel,ce4100-ioapic");
656}
657
658int x86_fwspec_is_hpet(struct irq_fwspec *fwspec)
659{
660 if (fwspec->param_count != 1)
661 return 0;
662
663 if (is_fwnode_irqchip(fwspec->fwnode)) {
664 const char *fwname = fwnode_get_name(fwspec->fwnode);
665 return fwname && !strncmp(fwname, "HPET-MSI-", 9) &&
666 simple_strtol(fwname+9, NULL, 10) == fwspec->param[0];
667 }
668 return 0;
669}
670
671static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec,
672 enum irq_domain_bus_token bus_token)
673{
674
675
676
677
678
679 if (apic->apic_id_valid(32768))
680 return 0;
681
682 return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec);
683}
684
685static const struct irq_domain_ops x86_vector_domain_ops = {
686 .select = x86_vector_select,
687 .alloc = x86_vector_alloc_irqs,
688 .free = x86_vector_free_irqs,
689 .activate = x86_vector_activate,
690 .deactivate = x86_vector_deactivate,
691#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
692 .debug_show = x86_vector_debug_show,
693#endif
694};
695
696int __init arch_probe_nr_irqs(void)
697{
698 int nr;
699
700 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
701 nr_irqs = NR_VECTORS * nr_cpu_ids;
702
703 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
704#if defined(CONFIG_PCI_MSI)
705
706
707
708 if (gsi_top <= NR_IRQS_LEGACY)
709 nr += 8 * nr_cpu_ids;
710 else
711 nr += gsi_top * 16;
712#endif
713 if (nr < nr_irqs)
714 nr_irqs = nr;
715
716
717
718
719
720 return legacy_pic->probe();
721}
722
723void lapic_assign_legacy_vector(unsigned int irq, bool replace)
724{
725
726
727
728
729
730 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
731}
732
733void __init lapic_assign_system_vectors(void)
734{
735 unsigned int i, vector = 0;
736
737 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
738 irq_matrix_assign_system(vector_matrix, vector, false);
739
740 if (nr_legacy_irqs() > 1)
741 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
742
743
744 irq_matrix_online(vector_matrix);
745
746
747 for (i = 0; i < nr_legacy_irqs(); i++) {
748 if (i != PIC_CASCADE_IR)
749 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
750 }
751}
752
753int __init arch_early_irq_init(void)
754{
755 struct fwnode_handle *fn;
756
757 fn = irq_domain_alloc_named_fwnode("VECTOR");
758 BUG_ON(!fn);
759 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
760 NULL);
761 BUG_ON(x86_vector_domain == NULL);
762 irq_set_default_host(x86_vector_domain);
763
764 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
765
766
767
768
769
770 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
771 FIRST_SYSTEM_VECTOR);
772 BUG_ON(!vector_matrix);
773
774 return arch_early_ioapic_init();
775}
776
777#ifdef CONFIG_SMP
778
779static struct irq_desc *__setup_vector_irq(int vector)
780{
781 int isairq = vector - ISA_IRQ_VECTOR(0);
782
783
784 if (isairq < 0 || isairq >= nr_legacy_irqs())
785 return VECTOR_UNUSED;
786
787 if (test_bit(isairq, &io_apic_irqs))
788 return VECTOR_UNUSED;
789 return irq_to_desc(isairq);
790}
791
792
793void lapic_online(void)
794{
795 unsigned int vector;
796
797 lockdep_assert_held(&vector_lock);
798
799
800 irq_matrix_online(vector_matrix);
801
802
803
804
805
806
807
808
809
810
811 for (vector = 0; vector < NR_VECTORS; vector++)
812 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
813}
814
815void lapic_offline(void)
816{
817 lock_vector_lock();
818 irq_matrix_offline(vector_matrix);
819 unlock_vector_lock();
820}
821
822static int apic_set_affinity(struct irq_data *irqd,
823 const struct cpumask *dest, bool force)
824{
825 int err;
826
827 if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
828 return -EIO;
829
830 raw_spin_lock(&vector_lock);
831 cpumask_and(vector_searchmask, dest, cpu_online_mask);
832 if (irqd_affinity_is_managed(irqd))
833 err = assign_managed_vector(irqd, vector_searchmask);
834 else
835 err = assign_vector_locked(irqd, vector_searchmask);
836 raw_spin_unlock(&vector_lock);
837 return err ? err : IRQ_SET_MASK_OK;
838}
839
840#else
841# define apic_set_affinity NULL
842#endif
843
844static int apic_retrigger_irq(struct irq_data *irqd)
845{
846 struct apic_chip_data *apicd = apic_chip_data(irqd);
847 unsigned long flags;
848
849 raw_spin_lock_irqsave(&vector_lock, flags);
850 apic->send_IPI(apicd->cpu, apicd->vector);
851 raw_spin_unlock_irqrestore(&vector_lock, flags);
852
853 return 1;
854}
855
856void apic_ack_irq(struct irq_data *irqd)
857{
858 irq_move_irq(irqd);
859 ack_APIC_irq();
860}
861
862void apic_ack_edge(struct irq_data *irqd)
863{
864 irq_complete_move(irqd_cfg(irqd));
865 apic_ack_irq(irqd);
866}
867
868static void x86_vector_msi_compose_msg(struct irq_data *data,
869 struct msi_msg *msg)
870{
871 __irq_msi_compose_msg(irqd_cfg(data), msg, false);
872}
873
874static struct irq_chip lapic_controller = {
875 .name = "APIC",
876 .irq_ack = apic_ack_edge,
877 .irq_set_affinity = apic_set_affinity,
878 .irq_compose_msi_msg = x86_vector_msi_compose_msg,
879 .irq_retrigger = apic_retrigger_irq,
880};
881
882#ifdef CONFIG_SMP
883
884static void free_moved_vector(struct apic_chip_data *apicd)
885{
886 unsigned int vector = apicd->prev_vector;
887 unsigned int cpu = apicd->prev_cpu;
888 bool managed = apicd->is_managed;
889
890
891
892
893
894
895
896
897
898
899
900 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
901 irq_matrix_free(vector_matrix, cpu, vector, managed);
902 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
903 hlist_del_init(&apicd->clist);
904 apicd->prev_vector = 0;
905 apicd->move_in_progress = 0;
906}
907
908DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
909{
910 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
911 struct apic_chip_data *apicd;
912 struct hlist_node *tmp;
913
914 ack_APIC_irq();
915
916 raw_spin_lock(&vector_lock);
917
918 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
919 unsigned int irr, vector = apicd->prev_vector;
920
921
922
923
924
925
926
927
928
929
930 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
931 if (irr & (1U << (vector % 32))) {
932 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
933 continue;
934 }
935 free_moved_vector(apicd);
936 }
937
938 raw_spin_unlock(&vector_lock);
939}
940
941static void __send_cleanup_vector(struct apic_chip_data *apicd)
942{
943 unsigned int cpu;
944
945 raw_spin_lock(&vector_lock);
946 apicd->move_in_progress = 0;
947 cpu = apicd->prev_cpu;
948 if (cpu_online(cpu)) {
949 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
950 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
951 } else {
952 apicd->prev_vector = 0;
953 }
954 raw_spin_unlock(&vector_lock);
955}
956
957void send_cleanup_vector(struct irq_cfg *cfg)
958{
959 struct apic_chip_data *apicd;
960
961 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
962 if (apicd->move_in_progress)
963 __send_cleanup_vector(apicd);
964}
965
966void irq_complete_move(struct irq_cfg *cfg)
967{
968 struct apic_chip_data *apicd;
969
970 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
971 if (likely(!apicd->move_in_progress))
972 return;
973
974
975
976
977
978
979
980 if (apicd->cpu == smp_processor_id())
981 __send_cleanup_vector(apicd);
982}
983
984
985
986
987void irq_force_complete_move(struct irq_desc *desc)
988{
989 struct apic_chip_data *apicd;
990 struct irq_data *irqd;
991 unsigned int vector;
992
993
994
995
996
997
998
999
1000
1001
1002 irqd = irq_domain_get_irq_data(x86_vector_domain,
1003 irq_desc_get_irq(desc));
1004 if (!irqd)
1005 return;
1006
1007 raw_spin_lock(&vector_lock);
1008 apicd = apic_chip_data(irqd);
1009 if (!apicd)
1010 goto unlock;
1011
1012
1013
1014
1015 vector = apicd->prev_vector;
1016 if (!vector)
1017 goto unlock;
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 if (apicd->move_in_progress) {
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1068 irqd->irq, vector);
1069 }
1070 free_moved_vector(apicd);
1071unlock:
1072 raw_spin_unlock(&vector_lock);
1073}
1074
1075#ifdef CONFIG_HOTPLUG_CPU
1076
1077
1078
1079
1080int lapic_can_unplug_cpu(void)
1081{
1082 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1083 int ret = 0;
1084
1085 raw_spin_lock(&vector_lock);
1086 tomove = irq_matrix_allocated(vector_matrix);
1087 avl = irq_matrix_available(vector_matrix, true);
1088 if (avl < tomove) {
1089 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1090 cpu, tomove, avl);
1091 ret = -ENOSPC;
1092 goto out;
1093 }
1094 rsvd = irq_matrix_reserved(vector_matrix);
1095 if (avl < rsvd) {
1096 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1097 rsvd, avl);
1098 }
1099out:
1100 raw_spin_unlock(&vector_lock);
1101 return ret;
1102}
1103#endif
1104#endif
1105
1106static void __init print_APIC_field(int base)
1107{
1108 int i;
1109
1110 printk(KERN_DEBUG);
1111
1112 for (i = 0; i < 8; i++)
1113 pr_cont("%08x", apic_read(base + i*0x10));
1114
1115 pr_cont("\n");
1116}
1117
1118static void __init print_local_APIC(void *dummy)
1119{
1120 unsigned int i, v, ver, maxlvt;
1121 u64 icr;
1122
1123 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1124 smp_processor_id(), hard_smp_processor_id());
1125 v = apic_read(APIC_ID);
1126 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1127 v = apic_read(APIC_LVR);
1128 pr_info("... APIC VERSION: %08x\n", v);
1129 ver = GET_APIC_VERSION(v);
1130 maxlvt = lapic_get_maxlvt();
1131
1132 v = apic_read(APIC_TASKPRI);
1133 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1134
1135
1136 if (APIC_INTEGRATED(ver)) {
1137 if (!APIC_XAPIC(ver)) {
1138 v = apic_read(APIC_ARBPRI);
1139 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1140 v, v & APIC_ARBPRI_MASK);
1141 }
1142 v = apic_read(APIC_PROCPRI);
1143 pr_debug("... APIC PROCPRI: %08x\n", v);
1144 }
1145
1146
1147
1148
1149
1150 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1151 v = apic_read(APIC_RRR);
1152 pr_debug("... APIC RRR: %08x\n", v);
1153 }
1154
1155 v = apic_read(APIC_LDR);
1156 pr_debug("... APIC LDR: %08x\n", v);
1157 if (!x2apic_enabled()) {
1158 v = apic_read(APIC_DFR);
1159 pr_debug("... APIC DFR: %08x\n", v);
1160 }
1161 v = apic_read(APIC_SPIV);
1162 pr_debug("... APIC SPIV: %08x\n", v);
1163
1164 pr_debug("... APIC ISR field:\n");
1165 print_APIC_field(APIC_ISR);
1166 pr_debug("... APIC TMR field:\n");
1167 print_APIC_field(APIC_TMR);
1168 pr_debug("... APIC IRR field:\n");
1169 print_APIC_field(APIC_IRR);
1170
1171
1172 if (APIC_INTEGRATED(ver)) {
1173
1174 if (maxlvt > 3)
1175 apic_write(APIC_ESR, 0);
1176
1177 v = apic_read(APIC_ESR);
1178 pr_debug("... APIC ESR: %08x\n", v);
1179 }
1180
1181 icr = apic_icr_read();
1182 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1183 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1184
1185 v = apic_read(APIC_LVTT);
1186 pr_debug("... APIC LVTT: %08x\n", v);
1187
1188 if (maxlvt > 3) {
1189
1190 v = apic_read(APIC_LVTPC);
1191 pr_debug("... APIC LVTPC: %08x\n", v);
1192 }
1193 v = apic_read(APIC_LVT0);
1194 pr_debug("... APIC LVT0: %08x\n", v);
1195 v = apic_read(APIC_LVT1);
1196 pr_debug("... APIC LVT1: %08x\n", v);
1197
1198 if (maxlvt > 2) {
1199
1200 v = apic_read(APIC_LVTERR);
1201 pr_debug("... APIC LVTERR: %08x\n", v);
1202 }
1203
1204 v = apic_read(APIC_TMICT);
1205 pr_debug("... APIC TMICT: %08x\n", v);
1206 v = apic_read(APIC_TMCCT);
1207 pr_debug("... APIC TMCCT: %08x\n", v);
1208 v = apic_read(APIC_TDCR);
1209 pr_debug("... APIC TDCR: %08x\n", v);
1210
1211 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1212 v = apic_read(APIC_EFEAT);
1213 maxlvt = (v >> 16) & 0xff;
1214 pr_debug("... APIC EFEAT: %08x\n", v);
1215 v = apic_read(APIC_ECTRL);
1216 pr_debug("... APIC ECTRL: %08x\n", v);
1217 for (i = 0; i < maxlvt; i++) {
1218 v = apic_read(APIC_EILVTn(i));
1219 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1220 }
1221 }
1222 pr_cont("\n");
1223}
1224
1225static void __init print_local_APICs(int maxcpu)
1226{
1227 int cpu;
1228
1229 if (!maxcpu)
1230 return;
1231
1232 preempt_disable();
1233 for_each_online_cpu(cpu) {
1234 if (cpu >= maxcpu)
1235 break;
1236 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1237 }
1238 preempt_enable();
1239}
1240
1241static void __init print_PIC(void)
1242{
1243 unsigned int v;
1244 unsigned long flags;
1245
1246 if (!nr_legacy_irqs())
1247 return;
1248
1249 pr_debug("\nprinting PIC contents\n");
1250
1251 raw_spin_lock_irqsave(&i8259A_lock, flags);
1252
1253 v = inb(0xa1) << 8 | inb(0x21);
1254 pr_debug("... PIC IMR: %04x\n", v);
1255
1256 v = inb(0xa0) << 8 | inb(0x20);
1257 pr_debug("... PIC IRR: %04x\n", v);
1258
1259 outb(0x0b, 0xa0);
1260 outb(0x0b, 0x20);
1261 v = inb(0xa0) << 8 | inb(0x20);
1262 outb(0x0a, 0xa0);
1263 outb(0x0a, 0x20);
1264
1265 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1266
1267 pr_debug("... PIC ISR: %04x\n", v);
1268
1269 v = inb(0x4d1) << 8 | inb(0x4d0);
1270 pr_debug("... PIC ELCR: %04x\n", v);
1271}
1272
1273static int show_lapic __initdata = 1;
1274static __init int setup_show_lapic(char *arg)
1275{
1276 int num = -1;
1277
1278 if (strcmp(arg, "all") == 0) {
1279 show_lapic = CONFIG_NR_CPUS;
1280 } else {
1281 get_option(&arg, &num);
1282 if (num >= 0)
1283 show_lapic = num;
1284 }
1285
1286 return 1;
1287}
1288__setup("show_lapic=", setup_show_lapic);
1289
1290static int __init print_ICs(void)
1291{
1292 if (apic_verbosity == APIC_QUIET)
1293 return 0;
1294
1295 print_PIC();
1296
1297
1298 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1299 return 0;
1300
1301 print_local_APICs(show_lapic);
1302 print_IO_APICs();
1303
1304 return 0;
1305}
1306
1307late_initcall(print_ICs);
1308