1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/interrupt.h>
14#include <linux/seq_file.h>
15#include <linux/init.h>
16#include <linux/compiler.h>
17#include <linux/slab.h>
18#include <asm/irqdomain.h>
19#include <asm/hw_irq.h>
20#include <asm/apic.h>
21#include <asm/i8259.h>
22#include <asm/desc.h>
23#include <asm/irq_remapping.h>
24
25#include <asm/trace/irq_vectors.h>
26
27struct apic_chip_data {
28 struct irq_cfg hw_irq_cfg;
29 unsigned int vector;
30 unsigned int prev_vector;
31 unsigned int cpu;
32 unsigned int prev_cpu;
33 unsigned int irq;
34 struct hlist_node clist;
35 unsigned int move_in_progress : 1,
36 is_managed : 1,
37 can_reserve : 1,
38 has_reserved : 1;
39};
40
41struct irq_domain *x86_vector_domain;
42EXPORT_SYMBOL_GPL(x86_vector_domain);
43static DEFINE_RAW_SPINLOCK(vector_lock);
44static cpumask_var_t vector_searchmask;
45static struct irq_chip lapic_controller;
46static struct irq_matrix *vector_matrix;
47#ifdef CONFIG_SMP
48static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
49#endif
50
51void lock_vector_lock(void)
52{
53
54
55
56 raw_spin_lock(&vector_lock);
57}
58
59void unlock_vector_lock(void)
60{
61 raw_spin_unlock(&vector_lock);
62}
63
64void init_irq_alloc_info(struct irq_alloc_info *info,
65 const struct cpumask *mask)
66{
67 memset(info, 0, sizeof(*info));
68 info->mask = mask;
69}
70
71void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
72{
73 if (src)
74 *dst = *src;
75 else
76 memset(dst, 0, sizeof(*dst));
77}
78
79static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
80{
81 if (!irqd)
82 return NULL;
83
84 while (irqd->parent_data)
85 irqd = irqd->parent_data;
86
87 return irqd->chip_data;
88}
89
90struct irq_cfg *irqd_cfg(struct irq_data *irqd)
91{
92 struct apic_chip_data *apicd = apic_chip_data(irqd);
93
94 return apicd ? &apicd->hw_irq_cfg : NULL;
95}
96EXPORT_SYMBOL_GPL(irqd_cfg);
97
98struct irq_cfg *irq_cfg(unsigned int irq)
99{
100 return irqd_cfg(irq_get_irq_data(irq));
101}
102
103static struct apic_chip_data *alloc_apic_chip_data(int node)
104{
105 struct apic_chip_data *apicd;
106
107 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
108 if (apicd)
109 INIT_HLIST_NODE(&apicd->clist);
110 return apicd;
111}
112
113static void free_apic_chip_data(struct apic_chip_data *apicd)
114{
115 kfree(apicd);
116}
117
118static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
119 unsigned int cpu)
120{
121 struct apic_chip_data *apicd = apic_chip_data(irqd);
122
123 lockdep_assert_held(&vector_lock);
124
125 apicd->hw_irq_cfg.vector = vector;
126 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
127 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
128 trace_vector_config(irqd->irq, vector, cpu,
129 apicd->hw_irq_cfg.dest_apicid);
130}
131
132static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
133 unsigned int newcpu)
134{
135 struct apic_chip_data *apicd = apic_chip_data(irqd);
136 struct irq_desc *desc = irq_data_to_desc(irqd);
137 bool managed = irqd_affinity_is_managed(irqd);
138
139 lockdep_assert_held(&vector_lock);
140
141 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
142 apicd->cpu);
143
144
145
146
147
148
149
150 apicd->prev_vector = 0;
151 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
152 goto setnew;
153
154
155
156
157
158
159
160
161 if (cpu_online(apicd->cpu)) {
162 apicd->move_in_progress = true;
163 apicd->prev_vector = apicd->vector;
164 apicd->prev_cpu = apicd->cpu;
165 } else {
166 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
167 managed);
168 }
169
170setnew:
171 apicd->vector = newvec;
172 apicd->cpu = newcpu;
173 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
174 per_cpu(vector_irq, newcpu)[newvec] = desc;
175}
176
177static void vector_assign_managed_shutdown(struct irq_data *irqd)
178{
179 unsigned int cpu = cpumask_first(cpu_online_mask);
180
181 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
182}
183
184static int reserve_managed_vector(struct irq_data *irqd)
185{
186 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
187 struct apic_chip_data *apicd = apic_chip_data(irqd);
188 unsigned long flags;
189 int ret;
190
191 raw_spin_lock_irqsave(&vector_lock, flags);
192 apicd->is_managed = true;
193 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
194 raw_spin_unlock_irqrestore(&vector_lock, flags);
195 trace_vector_reserve_managed(irqd->irq, ret);
196 return ret;
197}
198
199static void reserve_irq_vector_locked(struct irq_data *irqd)
200{
201 struct apic_chip_data *apicd = apic_chip_data(irqd);
202
203 irq_matrix_reserve(vector_matrix);
204 apicd->can_reserve = true;
205 apicd->has_reserved = true;
206 irqd_set_can_reserve(irqd);
207 trace_vector_reserve(irqd->irq, 0);
208 vector_assign_managed_shutdown(irqd);
209}
210
211static int reserve_irq_vector(struct irq_data *irqd)
212{
213 unsigned long flags;
214
215 raw_spin_lock_irqsave(&vector_lock, flags);
216 reserve_irq_vector_locked(irqd);
217 raw_spin_unlock_irqrestore(&vector_lock, flags);
218 return 0;
219}
220
221static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
222{
223 struct apic_chip_data *apicd = apic_chip_data(irqd);
224 bool resvd = apicd->has_reserved;
225 unsigned int cpu = apicd->cpu;
226 int vector = apicd->vector;
227
228 lockdep_assert_held(&vector_lock);
229
230
231
232
233
234
235 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
236 return 0;
237
238 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
239 if (vector > 0)
240 apic_update_vector(irqd, vector, cpu);
241 trace_vector_alloc(irqd->irq, vector, resvd, vector);
242 return vector;
243}
244
245static int assign_vector_locked(struct irq_data *irqd,
246 const struct cpumask *dest)
247{
248 struct apic_chip_data *apicd = apic_chip_data(irqd);
249 int vector = allocate_vector(irqd, dest);
250
251 if (vector < 0)
252 return vector;
253
254 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
255 return 0;
256}
257
258static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
259{
260 unsigned long flags;
261 int ret;
262
263 raw_spin_lock_irqsave(&vector_lock, flags);
264 cpumask_and(vector_searchmask, dest, cpu_online_mask);
265 ret = assign_vector_locked(irqd, vector_searchmask);
266 raw_spin_unlock_irqrestore(&vector_lock, flags);
267 return ret;
268}
269
270static int assign_irq_vector_any_locked(struct irq_data *irqd)
271{
272
273 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
274 int node = irq_data_get_node(irqd);
275
276 if (node == NUMA_NO_NODE)
277 goto all;
278
279 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
280 if (!assign_vector_locked(irqd, vector_searchmask))
281 return 0;
282
283 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
284 return 0;
285all:
286
287 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
288 if (!assign_vector_locked(irqd, vector_searchmask))
289 return 0;
290
291 return assign_vector_locked(irqd, cpu_online_mask);
292}
293
294static int
295assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
296{
297 if (irqd_affinity_is_managed(irqd))
298 return reserve_managed_vector(irqd);
299 if (info->mask)
300 return assign_irq_vector(irqd, info->mask);
301
302
303
304
305 return reserve_irq_vector(irqd);
306}
307
308static int
309assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
310{
311 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
312 struct apic_chip_data *apicd = apic_chip_data(irqd);
313 int vector, cpu;
314
315 cpumask_and(vector_searchmask, vector_searchmask, affmsk);
316 cpu = cpumask_first(vector_searchmask);
317 if (cpu >= nr_cpu_ids)
318 return -EINVAL;
319
320 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
321 return 0;
322 vector = irq_matrix_alloc_managed(vector_matrix, cpu);
323 trace_vector_alloc_managed(irqd->irq, vector, vector);
324 if (vector < 0)
325 return vector;
326 apic_update_vector(irqd, vector, cpu);
327 apic_update_irq_cfg(irqd, vector, cpu);
328 return 0;
329}
330
331static void clear_irq_vector(struct irq_data *irqd)
332{
333 struct apic_chip_data *apicd = apic_chip_data(irqd);
334 bool managed = irqd_affinity_is_managed(irqd);
335 unsigned int vector = apicd->vector;
336
337 lockdep_assert_held(&vector_lock);
338
339 if (!vector)
340 return;
341
342 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
343 apicd->prev_cpu);
344
345 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
346 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
347 apicd->vector = 0;
348
349
350 vector = apicd->prev_vector;
351 if (!vector)
352 return;
353
354 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
355 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
356 apicd->prev_vector = 0;
357 apicd->move_in_progress = 0;
358 hlist_del_init(&apicd->clist);
359}
360
361static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
362{
363 struct apic_chip_data *apicd = apic_chip_data(irqd);
364 unsigned long flags;
365
366 trace_vector_deactivate(irqd->irq, apicd->is_managed,
367 apicd->can_reserve, false);
368
369
370 if (!apicd->is_managed && !apicd->can_reserve)
371 return;
372
373 if (apicd->has_reserved)
374 return;
375
376 raw_spin_lock_irqsave(&vector_lock, flags);
377 clear_irq_vector(irqd);
378 if (apicd->can_reserve)
379 reserve_irq_vector_locked(irqd);
380 else
381 vector_assign_managed_shutdown(irqd);
382 raw_spin_unlock_irqrestore(&vector_lock, flags);
383}
384
385static int activate_reserved(struct irq_data *irqd)
386{
387 struct apic_chip_data *apicd = apic_chip_data(irqd);
388 int ret;
389
390 ret = assign_irq_vector_any_locked(irqd);
391 if (!ret) {
392 apicd->has_reserved = false;
393
394
395
396
397
398
399
400 if (!irqd_can_reserve(irqd))
401 apicd->can_reserve = false;
402 }
403 return ret;
404}
405
406static int activate_managed(struct irq_data *irqd)
407{
408 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
409 int ret;
410
411 cpumask_and(vector_searchmask, dest, cpu_online_mask);
412 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
413
414 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
415 return EINVAL;
416 }
417
418 ret = assign_managed_vector(irqd, vector_searchmask);
419
420
421
422
423 if (WARN_ON_ONCE(ret < 0)) {
424 pr_err("Managed startup irq %u, no vector available\n",
425 irqd->irq);
426 }
427 return ret;
428}
429
430static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
431 bool reserve)
432{
433 struct apic_chip_data *apicd = apic_chip_data(irqd);
434 unsigned long flags;
435 int ret = 0;
436
437 trace_vector_activate(irqd->irq, apicd->is_managed,
438 apicd->can_reserve, reserve);
439
440
441 if (!apicd->can_reserve && !apicd->is_managed)
442 return 0;
443
444 raw_spin_lock_irqsave(&vector_lock, flags);
445 if (reserve || irqd_is_managed_and_shutdown(irqd))
446 vector_assign_managed_shutdown(irqd);
447 else if (apicd->is_managed)
448 ret = activate_managed(irqd);
449 else if (apicd->has_reserved)
450 ret = activate_reserved(irqd);
451 raw_spin_unlock_irqrestore(&vector_lock, flags);
452 return ret;
453}
454
455static void vector_free_reserved_and_managed(struct irq_data *irqd)
456{
457 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
458 struct apic_chip_data *apicd = apic_chip_data(irqd);
459
460 trace_vector_teardown(irqd->irq, apicd->is_managed,
461 apicd->has_reserved);
462
463 if (apicd->has_reserved)
464 irq_matrix_remove_reserved(vector_matrix);
465 if (apicd->is_managed)
466 irq_matrix_remove_managed(vector_matrix, dest);
467}
468
469static void x86_vector_free_irqs(struct irq_domain *domain,
470 unsigned int virq, unsigned int nr_irqs)
471{
472 struct apic_chip_data *apicd;
473 struct irq_data *irqd;
474 unsigned long flags;
475 int i;
476
477 for (i = 0; i < nr_irqs; i++) {
478 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
479 if (irqd && irqd->chip_data) {
480 raw_spin_lock_irqsave(&vector_lock, flags);
481 clear_irq_vector(irqd);
482 vector_free_reserved_and_managed(irqd);
483 apicd = irqd->chip_data;
484 irq_domain_reset_irq_data(irqd);
485 raw_spin_unlock_irqrestore(&vector_lock, flags);
486 free_apic_chip_data(apicd);
487 }
488 }
489}
490
491static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
492 struct apic_chip_data *apicd)
493{
494 unsigned long flags;
495 bool realloc = false;
496
497 apicd->vector = ISA_IRQ_VECTOR(virq);
498 apicd->cpu = 0;
499
500 raw_spin_lock_irqsave(&vector_lock, flags);
501
502
503
504
505 if (irqd_is_activated(irqd)) {
506 trace_vector_setup(virq, true, 0);
507 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
508 } else {
509
510 apicd->can_reserve = true;
511 irqd_set_can_reserve(irqd);
512 clear_irq_vector(irqd);
513 realloc = true;
514 }
515 raw_spin_unlock_irqrestore(&vector_lock, flags);
516 return realloc;
517}
518
519static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
520 unsigned int nr_irqs, void *arg)
521{
522 struct irq_alloc_info *info = arg;
523 struct apic_chip_data *apicd;
524 struct irq_data *irqd;
525 int i, err, node;
526
527 if (disable_apic)
528 return -ENXIO;
529
530
531 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
532 return -ENOSYS;
533
534 for (i = 0; i < nr_irqs; i++) {
535 irqd = irq_domain_get_irq_data(domain, virq + i);
536 BUG_ON(!irqd);
537 node = irq_data_get_node(irqd);
538 WARN_ON_ONCE(irqd->chip_data);
539 apicd = alloc_apic_chip_data(node);
540 if (!apicd) {
541 err = -ENOMEM;
542 goto error;
543 }
544
545 apicd->irq = virq + i;
546 irqd->chip = &lapic_controller;
547 irqd->chip_data = apicd;
548 irqd->hwirq = virq + i;
549 irqd_set_single_target(irqd);
550
551
552
553
554
555
556
557 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
558 if (!vector_configure_legacy(virq + i, irqd, apicd))
559 continue;
560 }
561
562 err = assign_irq_vector_policy(irqd, info);
563 trace_vector_setup(virq + i, false, err);
564 if (err) {
565 irqd->chip_data = NULL;
566 free_apic_chip_data(apicd);
567 goto error;
568 }
569 }
570
571 return 0;
572
573error:
574 x86_vector_free_irqs(domain, virq, i);
575 return err;
576}
577
578#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
579static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
580 struct irq_data *irqd, int ind)
581{
582 unsigned int cpu, vector, prev_cpu, prev_vector;
583 struct apic_chip_data *apicd;
584 unsigned long flags;
585 int irq;
586
587 if (!irqd) {
588 irq_matrix_debug_show(m, vector_matrix, ind);
589 return;
590 }
591
592 irq = irqd->irq;
593 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
594 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
595 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
596 return;
597 }
598
599 apicd = irqd->chip_data;
600 if (!apicd) {
601 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
602 return;
603 }
604
605 raw_spin_lock_irqsave(&vector_lock, flags);
606 cpu = apicd->cpu;
607 vector = apicd->vector;
608 prev_cpu = apicd->prev_cpu;
609 prev_vector = apicd->prev_vector;
610 raw_spin_unlock_irqrestore(&vector_lock, flags);
611 seq_printf(m, "%*sVector: %5u\n", ind, "", vector);
612 seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu);
613 if (prev_vector) {
614 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector);
615 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu);
616 }
617}
618#endif
619
620static const struct irq_domain_ops x86_vector_domain_ops = {
621 .alloc = x86_vector_alloc_irqs,
622 .free = x86_vector_free_irqs,
623 .activate = x86_vector_activate,
624 .deactivate = x86_vector_deactivate,
625#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
626 .debug_show = x86_vector_debug_show,
627#endif
628};
629
630int __init arch_probe_nr_irqs(void)
631{
632 int nr;
633
634 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
635 nr_irqs = NR_VECTORS * nr_cpu_ids;
636
637 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
638#if defined(CONFIG_PCI_MSI)
639
640
641
642 if (gsi_top <= NR_IRQS_LEGACY)
643 nr += 8 * nr_cpu_ids;
644 else
645 nr += gsi_top * 16;
646#endif
647 if (nr < nr_irqs)
648 nr_irqs = nr;
649
650
651
652
653
654 return legacy_pic->probe();
655}
656
657void lapic_assign_legacy_vector(unsigned int irq, bool replace)
658{
659
660
661
662
663
664 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
665}
666
667void __init lapic_assign_system_vectors(void)
668{
669 unsigned int i, vector = 0;
670
671 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
672 irq_matrix_assign_system(vector_matrix, vector, false);
673
674 if (nr_legacy_irqs() > 1)
675 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
676
677
678 irq_matrix_online(vector_matrix);
679
680
681 for (i = 0; i < nr_legacy_irqs(); i++) {
682 if (i != PIC_CASCADE_IR)
683 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
684 }
685}
686
687int __init arch_early_irq_init(void)
688{
689 struct fwnode_handle *fn;
690
691 fn = irq_domain_alloc_named_fwnode("VECTOR");
692 BUG_ON(!fn);
693 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
694 NULL);
695 BUG_ON(x86_vector_domain == NULL);
696 irq_domain_free_fwnode(fn);
697 irq_set_default_host(x86_vector_domain);
698
699 arch_init_msi_domain(x86_vector_domain);
700
701 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
702
703
704
705
706
707 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
708 FIRST_SYSTEM_VECTOR);
709 BUG_ON(!vector_matrix);
710
711 return arch_early_ioapic_init();
712}
713
714#ifdef CONFIG_SMP
715
716static struct irq_desc *__setup_vector_irq(int vector)
717{
718 int isairq = vector - ISA_IRQ_VECTOR(0);
719
720
721 if (isairq < 0 || isairq >= nr_legacy_irqs())
722 return VECTOR_UNUSED;
723
724 if (test_bit(isairq, &io_apic_irqs))
725 return VECTOR_UNUSED;
726 return irq_to_desc(isairq);
727}
728
729
730void lapic_online(void)
731{
732 unsigned int vector;
733
734 lockdep_assert_held(&vector_lock);
735
736
737 irq_matrix_online(vector_matrix);
738
739
740
741
742
743
744
745
746
747
748 for (vector = 0; vector < NR_VECTORS; vector++)
749 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
750}
751
752void lapic_offline(void)
753{
754 lock_vector_lock();
755 irq_matrix_offline(vector_matrix);
756 unlock_vector_lock();
757}
758
759static int apic_set_affinity(struct irq_data *irqd,
760 const struct cpumask *dest, bool force)
761{
762 struct apic_chip_data *apicd = apic_chip_data(irqd);
763 int err;
764
765
766
767
768
769
770
771
772
773 if (!irqd_is_activated(irqd) &&
774 (apicd->is_managed || apicd->can_reserve))
775 return IRQ_SET_MASK_OK;
776
777 raw_spin_lock(&vector_lock);
778 cpumask_and(vector_searchmask, dest, cpu_online_mask);
779 if (irqd_affinity_is_managed(irqd))
780 err = assign_managed_vector(irqd, vector_searchmask);
781 else
782 err = assign_vector_locked(irqd, vector_searchmask);
783 raw_spin_unlock(&vector_lock);
784 return err ? err : IRQ_SET_MASK_OK;
785}
786
787#else
788# define apic_set_affinity NULL
789#endif
790
791static int apic_retrigger_irq(struct irq_data *irqd)
792{
793 struct apic_chip_data *apicd = apic_chip_data(irqd);
794 unsigned long flags;
795
796 raw_spin_lock_irqsave(&vector_lock, flags);
797 apic->send_IPI(apicd->cpu, apicd->vector);
798 raw_spin_unlock_irqrestore(&vector_lock, flags);
799
800 return 1;
801}
802
803void apic_ack_edge(struct irq_data *irqd)
804{
805 irq_complete_move(irqd_cfg(irqd));
806 irq_move_irq(irqd);
807 ack_APIC_irq();
808}
809
810static struct irq_chip lapic_controller = {
811 .name = "APIC",
812 .irq_ack = apic_ack_edge,
813 .irq_set_affinity = apic_set_affinity,
814 .irq_retrigger = apic_retrigger_irq,
815};
816
817#ifdef CONFIG_SMP
818
819static void free_moved_vector(struct apic_chip_data *apicd)
820{
821 unsigned int vector = apicd->prev_vector;
822 unsigned int cpu = apicd->prev_cpu;
823 bool managed = apicd->is_managed;
824
825
826
827
828
829
830
831 WARN_ON_ONCE(managed);
832
833 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
834 irq_matrix_free(vector_matrix, cpu, vector, managed);
835 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
836 hlist_del_init(&apicd->clist);
837 apicd->prev_vector = 0;
838 apicd->move_in_progress = 0;
839}
840
841asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
842{
843 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
844 struct apic_chip_data *apicd;
845 struct hlist_node *tmp;
846
847 entering_ack_irq();
848
849 raw_spin_lock(&vector_lock);
850
851 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
852 unsigned int irr, vector = apicd->prev_vector;
853
854
855
856
857
858
859
860
861
862
863 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
864 if (irr & (1U << (vector % 32))) {
865 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
866 continue;
867 }
868 free_moved_vector(apicd);
869 }
870
871 raw_spin_unlock(&vector_lock);
872 exiting_irq();
873}
874
875static void __send_cleanup_vector(struct apic_chip_data *apicd)
876{
877 unsigned int cpu;
878
879 raw_spin_lock(&vector_lock);
880 apicd->move_in_progress = 0;
881 cpu = apicd->prev_cpu;
882 if (cpu_online(cpu)) {
883 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
884 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
885 } else {
886 apicd->prev_vector = 0;
887 }
888 raw_spin_unlock(&vector_lock);
889}
890
891void send_cleanup_vector(struct irq_cfg *cfg)
892{
893 struct apic_chip_data *apicd;
894
895 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
896 if (apicd->move_in_progress)
897 __send_cleanup_vector(apicd);
898}
899
900static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
901{
902 struct apic_chip_data *apicd;
903
904 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
905 if (likely(!apicd->move_in_progress))
906 return;
907
908 if (vector == apicd->vector && apicd->cpu == smp_processor_id())
909 __send_cleanup_vector(apicd);
910}
911
912void irq_complete_move(struct irq_cfg *cfg)
913{
914 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
915}
916
917
918
919
920void irq_force_complete_move(struct irq_desc *desc)
921{
922 struct apic_chip_data *apicd;
923 struct irq_data *irqd;
924 unsigned int vector;
925
926
927
928
929
930
931
932
933
934
935 irqd = irq_domain_get_irq_data(x86_vector_domain,
936 irq_desc_get_irq(desc));
937 if (!irqd)
938 return;
939
940 raw_spin_lock(&vector_lock);
941 apicd = apic_chip_data(irqd);
942 if (!apicd)
943 goto unlock;
944
945
946
947
948 vector = apicd->prev_vector;
949 if (!vector)
950 goto unlock;
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967 if (apicd->move_in_progress) {
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1001 irqd->irq, vector);
1002 }
1003 free_moved_vector(apicd);
1004unlock:
1005 raw_spin_unlock(&vector_lock);
1006}
1007
1008#ifdef CONFIG_HOTPLUG_CPU
1009
1010
1011
1012
1013int lapic_can_unplug_cpu(void)
1014{
1015 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1016 int ret = 0;
1017
1018 raw_spin_lock(&vector_lock);
1019 tomove = irq_matrix_allocated(vector_matrix);
1020 avl = irq_matrix_available(vector_matrix, true);
1021 if (avl < tomove) {
1022 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1023 cpu, tomove, avl);
1024 ret = -ENOSPC;
1025 goto out;
1026 }
1027 rsvd = irq_matrix_reserved(vector_matrix);
1028 if (avl < rsvd) {
1029 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1030 rsvd, avl);
1031 }
1032out:
1033 raw_spin_unlock(&vector_lock);
1034 return ret;
1035}
1036#endif
1037#endif
1038
1039static void __init print_APIC_field(int base)
1040{
1041 int i;
1042
1043 printk(KERN_DEBUG);
1044
1045 for (i = 0; i < 8; i++)
1046 pr_cont("%08x", apic_read(base + i*0x10));
1047
1048 pr_cont("\n");
1049}
1050
1051static void __init print_local_APIC(void *dummy)
1052{
1053 unsigned int i, v, ver, maxlvt;
1054 u64 icr;
1055
1056 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1057 smp_processor_id(), hard_smp_processor_id());
1058 v = apic_read(APIC_ID);
1059 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1060 v = apic_read(APIC_LVR);
1061 pr_info("... APIC VERSION: %08x\n", v);
1062 ver = GET_APIC_VERSION(v);
1063 maxlvt = lapic_get_maxlvt();
1064
1065 v = apic_read(APIC_TASKPRI);
1066 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1067
1068
1069 if (APIC_INTEGRATED(ver)) {
1070 if (!APIC_XAPIC(ver)) {
1071 v = apic_read(APIC_ARBPRI);
1072 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1073 v, v & APIC_ARBPRI_MASK);
1074 }
1075 v = apic_read(APIC_PROCPRI);
1076 pr_debug("... APIC PROCPRI: %08x\n", v);
1077 }
1078
1079
1080
1081
1082
1083 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1084 v = apic_read(APIC_RRR);
1085 pr_debug("... APIC RRR: %08x\n", v);
1086 }
1087
1088 v = apic_read(APIC_LDR);
1089 pr_debug("... APIC LDR: %08x\n", v);
1090 if (!x2apic_enabled()) {
1091 v = apic_read(APIC_DFR);
1092 pr_debug("... APIC DFR: %08x\n", v);
1093 }
1094 v = apic_read(APIC_SPIV);
1095 pr_debug("... APIC SPIV: %08x\n", v);
1096
1097 pr_debug("... APIC ISR field:\n");
1098 print_APIC_field(APIC_ISR);
1099 pr_debug("... APIC TMR field:\n");
1100 print_APIC_field(APIC_TMR);
1101 pr_debug("... APIC IRR field:\n");
1102 print_APIC_field(APIC_IRR);
1103
1104
1105 if (APIC_INTEGRATED(ver)) {
1106
1107 if (maxlvt > 3)
1108 apic_write(APIC_ESR, 0);
1109
1110 v = apic_read(APIC_ESR);
1111 pr_debug("... APIC ESR: %08x\n", v);
1112 }
1113
1114 icr = apic_icr_read();
1115 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1116 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1117
1118 v = apic_read(APIC_LVTT);
1119 pr_debug("... APIC LVTT: %08x\n", v);
1120
1121 if (maxlvt > 3) {
1122
1123 v = apic_read(APIC_LVTPC);
1124 pr_debug("... APIC LVTPC: %08x\n", v);
1125 }
1126 v = apic_read(APIC_LVT0);
1127 pr_debug("... APIC LVT0: %08x\n", v);
1128 v = apic_read(APIC_LVT1);
1129 pr_debug("... APIC LVT1: %08x\n", v);
1130
1131 if (maxlvt > 2) {
1132
1133 v = apic_read(APIC_LVTERR);
1134 pr_debug("... APIC LVTERR: %08x\n", v);
1135 }
1136
1137 v = apic_read(APIC_TMICT);
1138 pr_debug("... APIC TMICT: %08x\n", v);
1139 v = apic_read(APIC_TMCCT);
1140 pr_debug("... APIC TMCCT: %08x\n", v);
1141 v = apic_read(APIC_TDCR);
1142 pr_debug("... APIC TDCR: %08x\n", v);
1143
1144 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1145 v = apic_read(APIC_EFEAT);
1146 maxlvt = (v >> 16) & 0xff;
1147 pr_debug("... APIC EFEAT: %08x\n", v);
1148 v = apic_read(APIC_ECTRL);
1149 pr_debug("... APIC ECTRL: %08x\n", v);
1150 for (i = 0; i < maxlvt; i++) {
1151 v = apic_read(APIC_EILVTn(i));
1152 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1153 }
1154 }
1155 pr_cont("\n");
1156}
1157
1158static void __init print_local_APICs(int maxcpu)
1159{
1160 int cpu;
1161
1162 if (!maxcpu)
1163 return;
1164
1165 preempt_disable();
1166 for_each_online_cpu(cpu) {
1167 if (cpu >= maxcpu)
1168 break;
1169 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1170 }
1171 preempt_enable();
1172}
1173
1174static void __init print_PIC(void)
1175{
1176 unsigned int v;
1177 unsigned long flags;
1178
1179 if (!nr_legacy_irqs())
1180 return;
1181
1182 pr_debug("\nprinting PIC contents\n");
1183
1184 raw_spin_lock_irqsave(&i8259A_lock, flags);
1185
1186 v = inb(0xa1) << 8 | inb(0x21);
1187 pr_debug("... PIC IMR: %04x\n", v);
1188
1189 v = inb(0xa0) << 8 | inb(0x20);
1190 pr_debug("... PIC IRR: %04x\n", v);
1191
1192 outb(0x0b, 0xa0);
1193 outb(0x0b, 0x20);
1194 v = inb(0xa0) << 8 | inb(0x20);
1195 outb(0x0a, 0xa0);
1196 outb(0x0a, 0x20);
1197
1198 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1199
1200 pr_debug("... PIC ISR: %04x\n", v);
1201
1202 v = inb(0x4d1) << 8 | inb(0x4d0);
1203 pr_debug("... PIC ELCR: %04x\n", v);
1204}
1205
1206static int show_lapic __initdata = 1;
1207static __init int setup_show_lapic(char *arg)
1208{
1209 int num = -1;
1210
1211 if (strcmp(arg, "all") == 0) {
1212 show_lapic = CONFIG_NR_CPUS;
1213 } else {
1214 get_option(&arg, &num);
1215 if (num >= 0)
1216 show_lapic = num;
1217 }
1218
1219 return 1;
1220}
1221__setup("show_lapic=", setup_show_lapic);
1222
1223static int __init print_ICs(void)
1224{
1225 if (apic_verbosity == APIC_QUIET)
1226 return 0;
1227
1228 print_PIC();
1229
1230
1231 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1232 return 0;
1233
1234 print_local_APICs(show_lapic);
1235 print_IO_APICs();
1236
1237 return 0;
1238}
1239
1240late_initcall(print_ICs);
1241