1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mm.h>
35#include <linux/interrupt.h>
36#include <linux/irq.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/sched.h>
40#include <linux/pci.h>
41#include <linux/mc146818rtc.h>
42#include <linux/compiler.h>
43#include <linux/acpi.h>
44#include <linux/export.h>
45#include <linux/syscore_ops.h>
46#include <linux/freezer.h>
47#include <linux/kthread.h>
48#include <linux/jiffies.h>
49#include <linux/slab.h>
50#include <linux/memblock.h>
51#include <linux/msi.h>
52
53#include <asm/irqdomain.h>
54#include <asm/io.h>
55#include <asm/smp.h>
56#include <asm/cpu.h>
57#include <asm/desc.h>
58#include <asm/proto.h>
59#include <asm/acpi.h>
60#include <asm/dma.h>
61#include <asm/timer.h>
62#include <asm/time.h>
63#include <asm/i8259.h>
64#include <asm/setup.h>
65#include <asm/irq_remapping.h>
66#include <asm/hw_irq.h>
67#include <asm/apic.h>
68
69#define for_each_ioapic(idx) \
70 for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
71#define for_each_ioapic_reverse(idx) \
72 for ((idx) = nr_ioapics - 1; (idx) >= 0; (idx)--)
73#define for_each_pin(idx, pin) \
74 for ((pin) = 0; (pin) < ioapics[(idx)].nr_registers; (pin)++)
75#define for_each_ioapic_pin(idx, pin) \
76 for_each_ioapic((idx)) \
77 for_each_pin((idx), (pin))
78#define for_each_irq_pin(entry, head) \
79 list_for_each_entry(entry, &head, list)
80
81static DEFINE_RAW_SPINLOCK(ioapic_lock);
82static DEFINE_MUTEX(ioapic_mutex);
83static unsigned int ioapic_dynirq_base;
84static int ioapic_initialized;
85
86struct irq_pin_list {
87 struct list_head list;
88 int apic, pin;
89};
90
91struct mp_chip_data {
92 struct list_head irq_2_pin;
93 struct IO_APIC_route_entry entry;
94 bool is_level;
95 bool active_low;
96 bool isa_irq;
97 u32 count;
98};
99
100struct mp_ioapic_gsi {
101 u32 gsi_base;
102 u32 gsi_end;
103};
104
105static struct ioapic {
106
107
108
109 int nr_registers;
110
111
112
113 struct IO_APIC_route_entry *saved_registers;
114
115 struct mpc_ioapic mp_config;
116
117 struct mp_ioapic_gsi gsi_config;
118 struct ioapic_domain_cfg irqdomain_cfg;
119 struct irq_domain *irqdomain;
120 struct resource *iomem_res;
121} ioapics[MAX_IO_APICS];
122
123#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
124
125int mpc_ioapic_id(int ioapic_idx)
126{
127 return ioapics[ioapic_idx].mp_config.apicid;
128}
129
130unsigned int mpc_ioapic_addr(int ioapic_idx)
131{
132 return ioapics[ioapic_idx].mp_config.apicaddr;
133}
134
135static inline struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
136{
137 return &ioapics[ioapic_idx].gsi_config;
138}
139
140static inline int mp_ioapic_pin_count(int ioapic)
141{
142 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
143
144 return gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1;
145}
146
147static inline u32 mp_pin_to_gsi(int ioapic, int pin)
148{
149 return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin;
150}
151
152static inline bool mp_is_legacy_irq(int irq)
153{
154 return irq >= 0 && irq < nr_legacy_irqs();
155}
156
157static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic)
158{
159 return ioapics[ioapic].irqdomain;
160}
161
162int nr_ioapics;
163
164
165u32 gsi_top;
166
167
168struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
169
170
171int mp_irq_entries;
172
173#ifdef CONFIG_EISA
174int mp_bus_id_to_type[MAX_MP_BUSSES];
175#endif
176
177DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
178
179int skip_ioapic_setup;
180
181
182
183
184void disable_ioapic_support(void)
185{
186#ifdef CONFIG_PCI
187 noioapicquirk = 1;
188 noioapicreroute = -1;
189#endif
190 skip_ioapic_setup = 1;
191}
192
193static int __init parse_noapic(char *str)
194{
195
196 disable_ioapic_support();
197 return 0;
198}
199early_param("noapic", parse_noapic);
200
201
202void mp_save_irq(struct mpc_intsrc *m)
203{
204 int i;
205
206 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
207 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
208 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
209 m->srcbusirq, m->dstapic, m->dstirq);
210
211 for (i = 0; i < mp_irq_entries; i++) {
212 if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
213 return;
214 }
215
216 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
217 if (++mp_irq_entries == MAX_IRQ_SOURCES)
218 panic("Max # of irq sources exceeded!!\n");
219}
220
221static void alloc_ioapic_saved_registers(int idx)
222{
223 size_t size;
224
225 if (ioapics[idx].saved_registers)
226 return;
227
228 size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers;
229 ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL);
230 if (!ioapics[idx].saved_registers)
231 pr_err("IOAPIC %d: suspend/resume impossible!\n", idx);
232}
233
234static void free_ioapic_saved_registers(int idx)
235{
236 kfree(ioapics[idx].saved_registers);
237 ioapics[idx].saved_registers = NULL;
238}
239
240int __init arch_early_ioapic_init(void)
241{
242 int i;
243
244 if (!nr_legacy_irqs())
245 io_apic_irqs = ~0UL;
246
247 for_each_ioapic(i)
248 alloc_ioapic_saved_registers(i);
249
250 return 0;
251}
252
253struct io_apic {
254 unsigned int index;
255 unsigned int unused[3];
256 unsigned int data;
257 unsigned int unused2[11];
258 unsigned int eoi;
259};
260
261static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
262{
263 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
264 + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
265}
266
267static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
268{
269 struct io_apic __iomem *io_apic = io_apic_base(apic);
270 writel(vector, &io_apic->eoi);
271}
272
273unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
274{
275 struct io_apic __iomem *io_apic = io_apic_base(apic);
276 writel(reg, &io_apic->index);
277 return readl(&io_apic->data);
278}
279
280static void io_apic_write(unsigned int apic, unsigned int reg,
281 unsigned int value)
282{
283 struct io_apic __iomem *io_apic = io_apic_base(apic);
284
285 writel(reg, &io_apic->index);
286 writel(value, &io_apic->data);
287}
288
289static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
290{
291 struct IO_APIC_route_entry entry;
292
293 entry.w1 = io_apic_read(apic, 0x10 + 2 * pin);
294 entry.w2 = io_apic_read(apic, 0x11 + 2 * pin);
295
296 return entry;
297}
298
299static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
300{
301 struct IO_APIC_route_entry entry;
302 unsigned long flags;
303
304 raw_spin_lock_irqsave(&ioapic_lock, flags);
305 entry = __ioapic_read_entry(apic, pin);
306 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
307
308 return entry;
309}
310
311
312
313
314
315
316
317static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
318{
319 io_apic_write(apic, 0x11 + 2*pin, e.w2);
320 io_apic_write(apic, 0x10 + 2*pin, e.w1);
321}
322
323static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
324{
325 unsigned long flags;
326
327 raw_spin_lock_irqsave(&ioapic_lock, flags);
328 __ioapic_write_entry(apic, pin, e);
329 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
330}
331
332
333
334
335
336
337static void ioapic_mask_entry(int apic, int pin)
338{
339 struct IO_APIC_route_entry e = { .masked = true };
340 unsigned long flags;
341
342 raw_spin_lock_irqsave(&ioapic_lock, flags);
343 io_apic_write(apic, 0x10 + 2*pin, e.w1);
344 io_apic_write(apic, 0x11 + 2*pin, e.w2);
345 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
346}
347
348
349
350
351
352
353static int __add_pin_to_irq_node(struct mp_chip_data *data,
354 int node, int apic, int pin)
355{
356 struct irq_pin_list *entry;
357
358
359 for_each_irq_pin(entry, data->irq_2_pin)
360 if (entry->apic == apic && entry->pin == pin)
361 return 0;
362
363 entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
364 if (!entry) {
365 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
366 node, apic, pin);
367 return -ENOMEM;
368 }
369 entry->apic = apic;
370 entry->pin = pin;
371 list_add_tail(&entry->list, &data->irq_2_pin);
372
373 return 0;
374}
375
376static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin)
377{
378 struct irq_pin_list *tmp, *entry;
379
380 list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list)
381 if (entry->apic == apic && entry->pin == pin) {
382 list_del(&entry->list);
383 kfree(entry);
384 return;
385 }
386}
387
388static void add_pin_to_irq_node(struct mp_chip_data *data,
389 int node, int apic, int pin)
390{
391 if (__add_pin_to_irq_node(data, node, apic, pin))
392 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
393}
394
395
396
397
398static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
399 int oldapic, int oldpin,
400 int newapic, int newpin)
401{
402 struct irq_pin_list *entry;
403
404 for_each_irq_pin(entry, data->irq_2_pin) {
405 if (entry->apic == oldapic && entry->pin == oldpin) {
406 entry->apic = newapic;
407 entry->pin = newpin;
408
409 return;
410 }
411 }
412
413
414 add_pin_to_irq_node(data, node, newapic, newpin);
415}
416
417static void io_apic_modify_irq(struct mp_chip_data *data, bool masked,
418 void (*final)(struct irq_pin_list *entry))
419{
420 struct irq_pin_list *entry;
421
422 data->entry.masked = masked;
423
424 for_each_irq_pin(entry, data->irq_2_pin) {
425 io_apic_write(entry->apic, 0x10 + 2 * entry->pin, data->entry.w1);
426 if (final)
427 final(entry);
428 }
429}
430
431static void io_apic_sync(struct irq_pin_list *entry)
432{
433
434
435
436
437 struct io_apic __iomem *io_apic;
438
439 io_apic = io_apic_base(entry->apic);
440 readl(&io_apic->data);
441}
442
443static void mask_ioapic_irq(struct irq_data *irq_data)
444{
445 struct mp_chip_data *data = irq_data->chip_data;
446 unsigned long flags;
447
448 raw_spin_lock_irqsave(&ioapic_lock, flags);
449 io_apic_modify_irq(data, true, &io_apic_sync);
450 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
451}
452
453static void __unmask_ioapic(struct mp_chip_data *data)
454{
455 io_apic_modify_irq(data, false, NULL);
456}
457
458static void unmask_ioapic_irq(struct irq_data *irq_data)
459{
460 struct mp_chip_data *data = irq_data->chip_data;
461 unsigned long flags;
462
463 raw_spin_lock_irqsave(&ioapic_lock, flags);
464 __unmask_ioapic(data);
465 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
466}
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484static void __eoi_ioapic_pin(int apic, int pin, int vector)
485{
486 if (mpc_ioapic_ver(apic) >= 0x20) {
487 io_apic_eoi(apic, vector);
488 } else {
489 struct IO_APIC_route_entry entry, entry1;
490
491 entry = entry1 = __ioapic_read_entry(apic, pin);
492
493
494
495
496 entry1.masked = true;
497 entry1.is_level = false;
498
499 __ioapic_write_entry(apic, pin, entry1);
500
501
502
503
504 __ioapic_write_entry(apic, pin, entry);
505 }
506}
507
508static void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
509{
510 unsigned long flags;
511 struct irq_pin_list *entry;
512
513 raw_spin_lock_irqsave(&ioapic_lock, flags);
514 for_each_irq_pin(entry, data->irq_2_pin)
515 __eoi_ioapic_pin(entry->apic, entry->pin, vector);
516 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
517}
518
519static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
520{
521 struct IO_APIC_route_entry entry;
522
523
524 entry = ioapic_read_entry(apic, pin);
525 if (entry.delivery_mode == APIC_DELIVERY_MODE_SMI)
526 return;
527
528
529
530
531
532 if (!entry.masked) {
533 entry.masked = true;
534 ioapic_write_entry(apic, pin, entry);
535 entry = ioapic_read_entry(apic, pin);
536 }
537
538 if (entry.irr) {
539 unsigned long flags;
540
541
542
543
544
545
546 if (!entry.is_level) {
547 entry.is_level = true;
548 ioapic_write_entry(apic, pin, entry);
549 }
550 raw_spin_lock_irqsave(&ioapic_lock, flags);
551 __eoi_ioapic_pin(apic, pin, entry.vector);
552 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
553 }
554
555
556
557
558
559 ioapic_mask_entry(apic, pin);
560 entry = ioapic_read_entry(apic, pin);
561 if (entry.irr)
562 pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
563 mpc_ioapic_id(apic), pin);
564}
565
566void clear_IO_APIC (void)
567{
568 int apic, pin;
569
570 for_each_ioapic_pin(apic, pin)
571 clear_IO_APIC_pin(apic, pin);
572}
573
574#ifdef CONFIG_X86_32
575
576
577
578
579
580#define MAX_PIRQS 8
581static int pirq_entries[MAX_PIRQS] = {
582 [0 ... MAX_PIRQS - 1] = -1
583};
584
585static int __init ioapic_pirq_setup(char *str)
586{
587 int i, max;
588 int ints[MAX_PIRQS+1];
589
590 get_options(str, ARRAY_SIZE(ints), ints);
591
592 apic_printk(APIC_VERBOSE, KERN_INFO
593 "PIRQ redirection, working around broken MP-BIOS.\n");
594 max = MAX_PIRQS;
595 if (ints[0] < MAX_PIRQS)
596 max = ints[0];
597
598 for (i = 0; i < max; i++) {
599 apic_printk(APIC_VERBOSE, KERN_DEBUG
600 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
601
602
603
604 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
605 }
606 return 1;
607}
608
609__setup("pirq=", ioapic_pirq_setup);
610#endif
611
612
613
614
615int save_ioapic_entries(void)
616{
617 int apic, pin;
618 int err = 0;
619
620 for_each_ioapic(apic) {
621 if (!ioapics[apic].saved_registers) {
622 err = -ENOMEM;
623 continue;
624 }
625
626 for_each_pin(apic, pin)
627 ioapics[apic].saved_registers[pin] =
628 ioapic_read_entry(apic, pin);
629 }
630
631 return err;
632}
633
634
635
636
637void mask_ioapic_entries(void)
638{
639 int apic, pin;
640
641 for_each_ioapic(apic) {
642 if (!ioapics[apic].saved_registers)
643 continue;
644
645 for_each_pin(apic, pin) {
646 struct IO_APIC_route_entry entry;
647
648 entry = ioapics[apic].saved_registers[pin];
649 if (!entry.masked) {
650 entry.masked = true;
651 ioapic_write_entry(apic, pin, entry);
652 }
653 }
654 }
655}
656
657
658
659
660int restore_ioapic_entries(void)
661{
662 int apic, pin;
663
664 for_each_ioapic(apic) {
665 if (!ioapics[apic].saved_registers)
666 continue;
667
668 for_each_pin(apic, pin)
669 ioapic_write_entry(apic, pin,
670 ioapics[apic].saved_registers[pin]);
671 }
672 return 0;
673}
674
675
676
677
678static int find_irq_entry(int ioapic_idx, int pin, int type)
679{
680 int i;
681
682 for (i = 0; i < mp_irq_entries; i++)
683 if (mp_irqs[i].irqtype == type &&
684 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
685 mp_irqs[i].dstapic == MP_APIC_ALL) &&
686 mp_irqs[i].dstirq == pin)
687 return i;
688
689 return -1;
690}
691
692
693
694
695static int __init find_isa_irq_pin(int irq, int type)
696{
697 int i;
698
699 for (i = 0; i < mp_irq_entries; i++) {
700 int lbus = mp_irqs[i].srcbus;
701
702 if (test_bit(lbus, mp_bus_not_pci) &&
703 (mp_irqs[i].irqtype == type) &&
704 (mp_irqs[i].srcbusirq == irq))
705
706 return mp_irqs[i].dstirq;
707 }
708 return -1;
709}
710
711static int __init find_isa_irq_apic(int irq, int type)
712{
713 int i;
714
715 for (i = 0; i < mp_irq_entries; i++) {
716 int lbus = mp_irqs[i].srcbus;
717
718 if (test_bit(lbus, mp_bus_not_pci) &&
719 (mp_irqs[i].irqtype == type) &&
720 (mp_irqs[i].srcbusirq == irq))
721 break;
722 }
723
724 if (i < mp_irq_entries) {
725 int ioapic_idx;
726
727 for_each_ioapic(ioapic_idx)
728 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
729 return ioapic_idx;
730 }
731
732 return -1;
733}
734
735static bool irq_active_low(int idx)
736{
737 int bus = mp_irqs[idx].srcbus;
738
739
740
741
742 switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) {
743 case MP_IRQPOL_DEFAULT:
744
745
746
747
748 return !test_bit(bus, mp_bus_not_pci);
749 case MP_IRQPOL_ACTIVE_HIGH:
750 return false;
751 case MP_IRQPOL_RESERVED:
752 pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
753 fallthrough;
754 case MP_IRQPOL_ACTIVE_LOW:
755 default:
756 return true;
757 }
758}
759
760#ifdef CONFIG_EISA
761
762
763
764static bool EISA_ELCR(unsigned int irq)
765{
766 if (irq < nr_legacy_irqs()) {
767 unsigned int port = PIC_ELCR1 + (irq >> 3);
768 return (inb(port) >> (irq & 7)) & 1;
769 }
770 apic_printk(APIC_VERBOSE, KERN_INFO
771 "Broken MPtable reports ISA irq %d\n", irq);
772 return false;
773}
774
775
776
777
778
779
780
781static bool eisa_irq_is_level(int idx, int bus, bool level)
782{
783 switch (mp_bus_id_to_type[bus]) {
784 case MP_BUS_PCI:
785 case MP_BUS_ISA:
786 return level;
787 case MP_BUS_EISA:
788 return EISA_ELCR(mp_irqs[idx].srcbusirq);
789 }
790 pr_warn("IOAPIC: Invalid srcbus: %d defaulting to level\n", bus);
791 return true;
792}
793#else
794static inline int eisa_irq_is_level(int idx, int bus, bool level)
795{
796 return level;
797}
798#endif
799
800static bool irq_is_level(int idx)
801{
802 int bus = mp_irqs[idx].srcbus;
803 bool level;
804
805
806
807
808 switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) {
809 case MP_IRQTRIG_DEFAULT:
810
811
812
813
814 level = !test_bit(bus, mp_bus_not_pci);
815
816 return eisa_irq_is_level(idx, bus, level);
817 case MP_IRQTRIG_EDGE:
818 return false;
819 case MP_IRQTRIG_RESERVED:
820 pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
821 fallthrough;
822 case MP_IRQTRIG_LEVEL:
823 default:
824 return true;
825 }
826}
827
828static int __acpi_get_override_irq(u32 gsi, bool *trigger, bool *polarity)
829{
830 int ioapic, pin, idx;
831
832 if (skip_ioapic_setup)
833 return -1;
834
835 ioapic = mp_find_ioapic(gsi);
836 if (ioapic < 0)
837 return -1;
838
839 pin = mp_find_ioapic_pin(ioapic, gsi);
840 if (pin < 0)
841 return -1;
842
843 idx = find_irq_entry(ioapic, pin, mp_INT);
844 if (idx < 0)
845 return -1;
846
847 *trigger = irq_is_level(idx);
848 *polarity = irq_active_low(idx);
849 return 0;
850}
851
852#ifdef CONFIG_ACPI
853int acpi_get_override_irq(u32 gsi, int *is_level, int *active_low)
854{
855 *is_level = *active_low = 0;
856 return __acpi_get_override_irq(gsi, (bool *)is_level,
857 (bool *)active_low);
858}
859#endif
860
861void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node,
862 int trigger, int polarity)
863{
864 init_irq_alloc_info(info, NULL);
865 info->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
866 info->ioapic.node = node;
867 info->ioapic.is_level = trigger;
868 info->ioapic.active_low = polarity;
869 info->ioapic.valid = 1;
870}
871
872static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst,
873 struct irq_alloc_info *src,
874 u32 gsi, int ioapic_idx, int pin)
875{
876 bool level, pol_low;
877
878 copy_irq_alloc_info(dst, src);
879 dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
880 dst->devid = mpc_ioapic_id(ioapic_idx);
881 dst->ioapic.pin = pin;
882 dst->ioapic.valid = 1;
883 if (src && src->ioapic.valid) {
884 dst->ioapic.node = src->ioapic.node;
885 dst->ioapic.is_level = src->ioapic.is_level;
886 dst->ioapic.active_low = src->ioapic.active_low;
887 } else {
888 dst->ioapic.node = NUMA_NO_NODE;
889 if (__acpi_get_override_irq(gsi, &level, &pol_low) >= 0) {
890 dst->ioapic.is_level = level;
891 dst->ioapic.active_low = pol_low;
892 } else {
893
894
895
896
897 dst->ioapic.is_level = true;
898 dst->ioapic.active_low = true;
899 }
900 }
901}
902
903static int ioapic_alloc_attr_node(struct irq_alloc_info *info)
904{
905 return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE;
906}
907
908static void mp_register_handler(unsigned int irq, bool level)
909{
910 irq_flow_handler_t hdl;
911 bool fasteoi;
912
913 if (level) {
914 irq_set_status_flags(irq, IRQ_LEVEL);
915 fasteoi = true;
916 } else {
917 irq_clear_status_flags(irq, IRQ_LEVEL);
918 fasteoi = false;
919 }
920
921 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
922 __irq_set_handler(irq, hdl, 0, fasteoi ? "fasteoi" : "edge");
923}
924
925static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
926{
927 struct mp_chip_data *data = irq_get_chip_data(irq);
928
929
930
931
932
933
934 if (irq < nr_legacy_irqs() && data->count == 1) {
935 if (info->ioapic.is_level != data->is_level)
936 mp_register_handler(irq, info->ioapic.is_level);
937 data->entry.is_level = data->is_level = info->ioapic.is_level;
938 data->entry.active_low = data->active_low = info->ioapic.active_low;
939 }
940
941 return data->is_level == info->ioapic.is_level &&
942 data->active_low == info->ioapic.active_low;
943}
944
945static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
946 struct irq_alloc_info *info)
947{
948 bool legacy = false;
949 int irq = -1;
950 int type = ioapics[ioapic].irqdomain_cfg.type;
951
952 switch (type) {
953 case IOAPIC_DOMAIN_LEGACY:
954
955
956
957
958 if (!ioapic_initialized || gsi >= nr_legacy_irqs())
959 irq = gsi;
960 legacy = mp_is_legacy_irq(irq);
961 break;
962 case IOAPIC_DOMAIN_STRICT:
963 irq = gsi;
964 break;
965 case IOAPIC_DOMAIN_DYNAMIC:
966 break;
967 default:
968 WARN(1, "ioapic: unknown irqdomain type %d\n", type);
969 return -1;
970 }
971
972 return __irq_domain_alloc_irqs(domain, irq, 1,
973 ioapic_alloc_attr_node(info),
974 info, legacy, NULL);
975}
976
977
978
979
980
981
982
983
984
985
986
987static int alloc_isa_irq_from_domain(struct irq_domain *domain,
988 int irq, int ioapic, int pin,
989 struct irq_alloc_info *info)
990{
991 struct mp_chip_data *data;
992 struct irq_data *irq_data = irq_get_irq_data(irq);
993 int node = ioapic_alloc_attr_node(info);
994
995
996
997
998
999
1000 if (irq_data && irq_data->parent_data) {
1001 if (!mp_check_pin_attr(irq, info))
1002 return -EBUSY;
1003 if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic,
1004 info->ioapic.pin))
1005 return -ENOMEM;
1006 } else {
1007 info->flags |= X86_IRQ_ALLOC_LEGACY;
1008 irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
1009 NULL);
1010 if (irq >= 0) {
1011 irq_data = irq_domain_get_irq_data(domain, irq);
1012 data = irq_data->chip_data;
1013 data->isa_irq = true;
1014 }
1015 }
1016
1017 return irq;
1018}
1019
1020static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
1021 unsigned int flags, struct irq_alloc_info *info)
1022{
1023 int irq;
1024 bool legacy = false;
1025 struct irq_alloc_info tmp;
1026 struct mp_chip_data *data;
1027 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
1028
1029 if (!domain)
1030 return -ENOSYS;
1031
1032 if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
1033 irq = mp_irqs[idx].srcbusirq;
1034 legacy = mp_is_legacy_irq(irq);
1035
1036
1037
1038
1039
1040
1041
1042
1043 if (legacy && irq == PIC_CASCADE_IR)
1044 return -EINVAL;
1045 }
1046
1047 mutex_lock(&ioapic_mutex);
1048 if (!(flags & IOAPIC_MAP_ALLOC)) {
1049 if (!legacy) {
1050 irq = irq_find_mapping(domain, pin);
1051 if (irq == 0)
1052 irq = -ENOENT;
1053 }
1054 } else {
1055 ioapic_copy_alloc_attr(&tmp, info, gsi, ioapic, pin);
1056 if (legacy)
1057 irq = alloc_isa_irq_from_domain(domain, irq,
1058 ioapic, pin, &tmp);
1059 else if ((irq = irq_find_mapping(domain, pin)) == 0)
1060 irq = alloc_irq_from_domain(domain, ioapic, gsi, &tmp);
1061 else if (!mp_check_pin_attr(irq, &tmp))
1062 irq = -EBUSY;
1063 if (irq >= 0) {
1064 data = irq_get_chip_data(irq);
1065 data->count++;
1066 }
1067 }
1068 mutex_unlock(&ioapic_mutex);
1069
1070 return irq;
1071}
1072
1073static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags)
1074{
1075 u32 gsi = mp_pin_to_gsi(ioapic, pin);
1076
1077
1078
1079
1080 if (mp_irqs[idx].dstirq != pin)
1081 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
1082
1083#ifdef CONFIG_X86_32
1084
1085
1086
1087 if ((pin >= 16) && (pin <= 23)) {
1088 if (pirq_entries[pin-16] != -1) {
1089 if (!pirq_entries[pin-16]) {
1090 apic_printk(APIC_VERBOSE, KERN_DEBUG
1091 "disabling PIRQ%d\n", pin-16);
1092 } else {
1093 int irq = pirq_entries[pin-16];
1094 apic_printk(APIC_VERBOSE, KERN_DEBUG
1095 "using PIRQ%d -> IRQ %d\n",
1096 pin-16, irq);
1097 return irq;
1098 }
1099 }
1100 }
1101#endif
1102
1103 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, NULL);
1104}
1105
1106int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info)
1107{
1108 int ioapic, pin, idx;
1109
1110 ioapic = mp_find_ioapic(gsi);
1111 if (ioapic < 0)
1112 return -ENODEV;
1113
1114 pin = mp_find_ioapic_pin(ioapic, gsi);
1115 idx = find_irq_entry(ioapic, pin, mp_INT);
1116 if ((flags & IOAPIC_MAP_CHECK) && idx < 0)
1117 return -ENODEV;
1118
1119 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info);
1120}
1121
1122void mp_unmap_irq(int irq)
1123{
1124 struct irq_data *irq_data = irq_get_irq_data(irq);
1125 struct mp_chip_data *data;
1126
1127 if (!irq_data || !irq_data->domain)
1128 return;
1129
1130 data = irq_data->chip_data;
1131 if (!data || data->isa_irq)
1132 return;
1133
1134 mutex_lock(&ioapic_mutex);
1135 if (--data->count == 0)
1136 irq_domain_free_irqs(irq, 1);
1137 mutex_unlock(&ioapic_mutex);
1138}
1139
1140
1141
1142
1143
1144int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
1145{
1146 int irq, i, best_ioapic = -1, best_idx = -1;
1147
1148 apic_printk(APIC_DEBUG,
1149 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1150 bus, slot, pin);
1151 if (test_bit(bus, mp_bus_not_pci)) {
1152 apic_printk(APIC_VERBOSE,
1153 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1154 return -1;
1155 }
1156
1157 for (i = 0; i < mp_irq_entries; i++) {
1158 int lbus = mp_irqs[i].srcbus;
1159 int ioapic_idx, found = 0;
1160
1161 if (bus != lbus || mp_irqs[i].irqtype != mp_INT ||
1162 slot != ((mp_irqs[i].srcbusirq >> 2) & 0x1f))
1163 continue;
1164
1165 for_each_ioapic(ioapic_idx)
1166 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic ||
1167 mp_irqs[i].dstapic == MP_APIC_ALL) {
1168 found = 1;
1169 break;
1170 }
1171 if (!found)
1172 continue;
1173
1174
1175 irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq, 0);
1176 if (irq > 0 && !IO_APIC_IRQ(irq))
1177 continue;
1178
1179 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1180 best_idx = i;
1181 best_ioapic = ioapic_idx;
1182 goto out;
1183 }
1184
1185
1186
1187
1188
1189 if (best_idx < 0) {
1190 best_idx = i;
1191 best_ioapic = ioapic_idx;
1192 }
1193 }
1194 if (best_idx < 0)
1195 return -1;
1196
1197out:
1198 return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
1199 IOAPIC_MAP_ALLOC);
1200}
1201EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1202
1203static struct irq_chip ioapic_chip, ioapic_ir_chip;
1204
1205static void __init setup_IO_APIC_irqs(void)
1206{
1207 unsigned int ioapic, pin;
1208 int idx;
1209
1210 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1211
1212 for_each_ioapic_pin(ioapic, pin) {
1213 idx = find_irq_entry(ioapic, pin, mp_INT);
1214 if (idx < 0)
1215 apic_printk(APIC_VERBOSE,
1216 KERN_DEBUG " apic %d pin %d not connected\n",
1217 mpc_ioapic_id(ioapic), pin);
1218 else
1219 pin_2_irq(idx, ioapic, pin,
1220 ioapic ? 0 : IOAPIC_MAP_ALLOC);
1221 }
1222}
1223
1224void ioapic_zap_locks(void)
1225{
1226 raw_spin_lock_init(&ioapic_lock);
1227}
1228
1229static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
1230{
1231 struct IO_APIC_route_entry entry;
1232 char buf[256];
1233 int i;
1234
1235 printk(KERN_DEBUG "IOAPIC %d:\n", apic);
1236 for (i = 0; i <= nr_entries; i++) {
1237 entry = ioapic_read_entry(apic, i);
1238 snprintf(buf, sizeof(buf),
1239 " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)",
1240 i,
1241 entry.masked ? "disabled" : "enabled ",
1242 entry.is_level ? "level" : "edge ",
1243 entry.active_low ? "low " : "high",
1244 entry.vector, entry.irr, entry.delivery_status);
1245 if (entry.ir_format) {
1246 printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
1247 buf,
1248 (entry.ir_index_15 << 15) | entry.ir_index_0_14,
1249 entry.ir_zero);
1250 } else {
1251 printk(KERN_DEBUG "%s, %s, D(%02X%02X), M(%1d)\n", buf,
1252 entry.dest_mode_logical ? "logical " : "physical",
1253 entry.virt_destid_8_14, entry.destid_0_7,
1254 entry.delivery_mode);
1255 }
1256 }
1257}
1258
1259static void __init print_IO_APIC(int ioapic_idx)
1260{
1261 union IO_APIC_reg_00 reg_00;
1262 union IO_APIC_reg_01 reg_01;
1263 union IO_APIC_reg_02 reg_02;
1264 union IO_APIC_reg_03 reg_03;
1265 unsigned long flags;
1266
1267 raw_spin_lock_irqsave(&ioapic_lock, flags);
1268 reg_00.raw = io_apic_read(ioapic_idx, 0);
1269 reg_01.raw = io_apic_read(ioapic_idx, 1);
1270 if (reg_01.bits.version >= 0x10)
1271 reg_02.raw = io_apic_read(ioapic_idx, 2);
1272 if (reg_01.bits.version >= 0x20)
1273 reg_03.raw = io_apic_read(ioapic_idx, 3);
1274 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1275
1276 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1277 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1278 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1279 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1280 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1281
1282 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1283 printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
1284 reg_01.bits.entries);
1285
1286 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1287 printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
1288 reg_01.bits.version);
1289
1290
1291
1292
1293
1294
1295 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1296 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1297 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1298 }
1299
1300
1301
1302
1303
1304
1305 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1306 reg_03.raw != reg_01.raw) {
1307 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1308 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1309 }
1310
1311 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1312 io_apic_print_entries(ioapic_idx, reg_01.bits.entries);
1313}
1314
1315void __init print_IO_APICs(void)
1316{
1317 int ioapic_idx;
1318 unsigned int irq;
1319
1320 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1321 for_each_ioapic(ioapic_idx)
1322 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1323 mpc_ioapic_id(ioapic_idx),
1324 ioapics[ioapic_idx].nr_registers);
1325
1326
1327
1328
1329
1330 printk(KERN_INFO "testing the IO APIC.......................\n");
1331
1332 for_each_ioapic(ioapic_idx)
1333 print_IO_APIC(ioapic_idx);
1334
1335 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1336 for_each_active_irq(irq) {
1337 struct irq_pin_list *entry;
1338 struct irq_chip *chip;
1339 struct mp_chip_data *data;
1340
1341 chip = irq_get_chip(irq);
1342 if (chip != &ioapic_chip && chip != &ioapic_ir_chip)
1343 continue;
1344 data = irq_get_chip_data(irq);
1345 if (!data)
1346 continue;
1347 if (list_empty(&data->irq_2_pin))
1348 continue;
1349
1350 printk(KERN_DEBUG "IRQ%d ", irq);
1351 for_each_irq_pin(entry, data->irq_2_pin)
1352 pr_cont("-> %d:%d", entry->apic, entry->pin);
1353 pr_cont("\n");
1354 }
1355
1356 printk(KERN_INFO ".................................... done.\n");
1357}
1358
1359
1360static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1361
1362void __init enable_IO_APIC(void)
1363{
1364 int i8259_apic, i8259_pin;
1365 int apic, pin;
1366
1367 if (skip_ioapic_setup)
1368 nr_ioapics = 0;
1369
1370 if (!nr_legacy_irqs() || !nr_ioapics)
1371 return;
1372
1373 for_each_ioapic_pin(apic, pin) {
1374
1375 struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin);
1376
1377
1378
1379
1380 if (!entry.masked &&
1381 entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) {
1382 ioapic_i8259.apic = apic;
1383 ioapic_i8259.pin = pin;
1384 goto found_i8259;
1385 }
1386 }
1387 found_i8259:
1388
1389
1390
1391
1392
1393 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1394 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1395
1396 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1397 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1398 ioapic_i8259.pin = i8259_pin;
1399 ioapic_i8259.apic = i8259_apic;
1400 }
1401
1402 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1403 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1404 {
1405 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1406 }
1407
1408
1409
1410
1411 clear_IO_APIC();
1412}
1413
1414void native_restore_boot_irq_mode(void)
1415{
1416
1417
1418
1419
1420
1421 if (ioapic_i8259.pin != -1) {
1422 struct IO_APIC_route_entry entry;
1423 u32 apic_id = read_apic_id();
1424
1425 memset(&entry, 0, sizeof(entry));
1426 entry.masked = false;
1427 entry.is_level = false;
1428 entry.active_low = false;
1429 entry.dest_mode_logical = false;
1430 entry.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
1431 entry.destid_0_7 = apic_id & 0xFF;
1432 entry.virt_destid_8_14 = apic_id >> 8;
1433
1434
1435
1436
1437 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1438 }
1439
1440 if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
1441 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1442}
1443
1444void restore_boot_irq_mode(void)
1445{
1446 if (!nr_legacy_irqs())
1447 return;
1448
1449 x86_apic_ops.restore();
1450}
1451
1452#ifdef CONFIG_X86_32
1453
1454
1455
1456
1457
1458
1459void __init setup_ioapic_ids_from_mpc_nocheck(void)
1460{
1461 union IO_APIC_reg_00 reg_00;
1462 physid_mask_t phys_id_present_map;
1463 int ioapic_idx;
1464 int i;
1465 unsigned char old_id;
1466 unsigned long flags;
1467
1468
1469
1470
1471
1472 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
1473
1474
1475
1476
1477 for_each_ioapic(ioapic_idx) {
1478
1479 raw_spin_lock_irqsave(&ioapic_lock, flags);
1480 reg_00.raw = io_apic_read(ioapic_idx, 0);
1481 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1482
1483 old_id = mpc_ioapic_id(ioapic_idx);
1484
1485 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
1486 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1487 ioapic_idx, mpc_ioapic_id(ioapic_idx));
1488 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1489 reg_00.bits.ID);
1490 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
1491 }
1492
1493
1494
1495
1496
1497
1498 if (apic->check_apicid_used(&phys_id_present_map,
1499 mpc_ioapic_id(ioapic_idx))) {
1500 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1501 ioapic_idx, mpc_ioapic_id(ioapic_idx));
1502 for (i = 0; i < get_physical_broadcast(); i++)
1503 if (!physid_isset(i, phys_id_present_map))
1504 break;
1505 if (i >= get_physical_broadcast())
1506 panic("Max APIC ID exceeded!\n");
1507 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1508 i);
1509 physid_set(i, phys_id_present_map);
1510 ioapics[ioapic_idx].mp_config.apicid = i;
1511 } else {
1512 physid_mask_t tmp;
1513 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
1514 &tmp);
1515 apic_printk(APIC_VERBOSE, "Setting %d in the "
1516 "phys_id_present_map\n",
1517 mpc_ioapic_id(ioapic_idx));
1518 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1519 }
1520
1521
1522
1523
1524
1525 if (old_id != mpc_ioapic_id(ioapic_idx))
1526 for (i = 0; i < mp_irq_entries; i++)
1527 if (mp_irqs[i].dstapic == old_id)
1528 mp_irqs[i].dstapic
1529 = mpc_ioapic_id(ioapic_idx);
1530
1531
1532
1533
1534
1535 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
1536 continue;
1537
1538 apic_printk(APIC_VERBOSE, KERN_INFO
1539 "...changing IO-APIC physical APIC ID to %d ...",
1540 mpc_ioapic_id(ioapic_idx));
1541
1542 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
1543 raw_spin_lock_irqsave(&ioapic_lock, flags);
1544 io_apic_write(ioapic_idx, 0, reg_00.raw);
1545 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1546
1547
1548
1549
1550 raw_spin_lock_irqsave(&ioapic_lock, flags);
1551 reg_00.raw = io_apic_read(ioapic_idx, 0);
1552 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1553 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
1554 pr_cont("could not set ID!\n");
1555 else
1556 apic_printk(APIC_VERBOSE, " ok.\n");
1557 }
1558}
1559
1560void __init setup_ioapic_ids_from_mpc(void)
1561{
1562
1563 if (acpi_ioapic)
1564 return;
1565
1566
1567
1568
1569 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1570 || APIC_XAPIC(boot_cpu_apic_version))
1571 return;
1572 setup_ioapic_ids_from_mpc_nocheck();
1573}
1574#endif
1575
1576int no_timer_check __initdata;
1577
1578static int __init notimercheck(char *s)
1579{
1580 no_timer_check = 1;
1581 return 1;
1582}
1583__setup("no_timer_check", notimercheck);
1584
1585static void __init delay_with_tsc(void)
1586{
1587 unsigned long long start, now;
1588 unsigned long end = jiffies + 4;
1589
1590 start = rdtsc();
1591
1592
1593
1594
1595
1596
1597
1598 do {
1599 rep_nop();
1600 now = rdtsc();
1601 } while ((now - start) < 40000000000ULL / HZ &&
1602 time_before_eq(jiffies, end));
1603}
1604
1605static void __init delay_without_tsc(void)
1606{
1607 unsigned long end = jiffies + 4;
1608 int band = 1;
1609
1610
1611
1612
1613
1614
1615
1616
1617 do {
1618 __delay(((1U << band++) * 10000000UL) / HZ);
1619 } while (band < 12 && time_before_eq(jiffies, end));
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630static int __init timer_irq_works(void)
1631{
1632 unsigned long t1 = jiffies;
1633
1634 if (no_timer_check)
1635 return 1;
1636
1637 local_irq_enable();
1638 if (boot_cpu_has(X86_FEATURE_TSC))
1639 delay_with_tsc();
1640 else
1641 delay_without_tsc();
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 local_irq_disable();
1652
1653
1654 return time_after(jiffies, t1 + 4);
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static unsigned int startup_ioapic_irq(struct irq_data *data)
1680{
1681 int was_pending = 0, irq = data->irq;
1682 unsigned long flags;
1683
1684 raw_spin_lock_irqsave(&ioapic_lock, flags);
1685 if (irq < nr_legacy_irqs()) {
1686 legacy_pic->mask(irq);
1687 if (legacy_pic->irq_pending(irq))
1688 was_pending = 1;
1689 }
1690 __unmask_ioapic(data->chip_data);
1691 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1692
1693 return was_pending;
1694}
1695
1696atomic_t irq_mis_count;
1697
1698#ifdef CONFIG_GENERIC_PENDING_IRQ
1699static bool io_apic_level_ack_pending(struct mp_chip_data *data)
1700{
1701 struct irq_pin_list *entry;
1702 unsigned long flags;
1703
1704 raw_spin_lock_irqsave(&ioapic_lock, flags);
1705 for_each_irq_pin(entry, data->irq_2_pin) {
1706 struct IO_APIC_route_entry e;
1707 int pin;
1708
1709 pin = entry->pin;
1710 e.w1 = io_apic_read(entry->apic, 0x10 + pin*2);
1711
1712 if (e.irr) {
1713 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1714 return true;
1715 }
1716 }
1717 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1718
1719 return false;
1720}
1721
1722static inline bool ioapic_prepare_move(struct irq_data *data)
1723{
1724
1725 if (unlikely(irqd_is_setaffinity_pending(data))) {
1726 if (!irqd_irq_masked(data))
1727 mask_ioapic_irq(data);
1728 return true;
1729 }
1730 return false;
1731}
1732
1733static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
1734{
1735 if (unlikely(moveit)) {
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 if (!io_apic_level_ack_pending(data->chip_data))
1763 irq_move_masked_irq(data);
1764
1765 if (!irqd_irq_masked(data))
1766 unmask_ioapic_irq(data);
1767 }
1768}
1769#else
1770static inline bool ioapic_prepare_move(struct irq_data *data)
1771{
1772 return false;
1773}
1774static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
1775{
1776}
1777#endif
1778
1779static void ioapic_ack_level(struct irq_data *irq_data)
1780{
1781 struct irq_cfg *cfg = irqd_cfg(irq_data);
1782 unsigned long v;
1783 bool moveit;
1784 int i;
1785
1786 irq_complete_move(cfg);
1787 moveit = ioapic_prepare_move(irq_data);
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 i = cfg->vector;
1822 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1823
1824
1825
1826
1827
1828 ack_APIC_irq();
1829
1830
1831
1832
1833
1834
1835
1836
1837 if (!(v & (1 << (i & 0x1f)))) {
1838 atomic_inc(&irq_mis_count);
1839 eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
1840 }
1841
1842 ioapic_finish_move(irq_data, moveit);
1843}
1844
1845static void ioapic_ir_ack_level(struct irq_data *irq_data)
1846{
1847 struct mp_chip_data *data = irq_data->chip_data;
1848
1849
1850
1851
1852
1853
1854
1855 apic_ack_irq(irq_data);
1856 eoi_ioapic_pin(data->entry.vector, data);
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875static void ioapic_setup_msg_from_msi(struct irq_data *irq_data,
1876 struct IO_APIC_route_entry *entry)
1877{
1878 struct msi_msg msg;
1879
1880
1881 irq_chip_compose_msi_msg(irq_data, &msg);
1882
1883
1884
1885
1886
1887
1888 entry->vector = msg.arch_data.vector;
1889
1890 entry->delivery_mode = msg.arch_data.delivery_mode;
1891
1892 entry->dest_mode_logical = msg.arch_addr_lo.dest_mode_logical;
1893
1894 entry->ir_format = msg.arch_addr_lo.dmar_format;
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 entry->ir_index_0_14 = msg.arch_addr_lo.dmar_index_0_14;
1906}
1907
1908static void ioapic_configure_entry(struct irq_data *irqd)
1909{
1910 struct mp_chip_data *mpd = irqd->chip_data;
1911 struct irq_pin_list *entry;
1912
1913 ioapic_setup_msg_from_msi(irqd, &mpd->entry);
1914
1915 for_each_irq_pin(entry, mpd->irq_2_pin)
1916 __ioapic_write_entry(entry->apic, entry->pin, mpd->entry);
1917}
1918
1919static int ioapic_set_affinity(struct irq_data *irq_data,
1920 const struct cpumask *mask, bool force)
1921{
1922 struct irq_data *parent = irq_data->parent_data;
1923 unsigned long flags;
1924 int ret;
1925
1926 ret = parent->chip->irq_set_affinity(parent, mask, force);
1927 raw_spin_lock_irqsave(&ioapic_lock, flags);
1928 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE)
1929 ioapic_configure_entry(irq_data);
1930 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1931
1932 return ret;
1933}
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949static int ioapic_irq_get_chip_state(struct irq_data *irqd,
1950 enum irqchip_irq_state which,
1951 bool *state)
1952{
1953 struct mp_chip_data *mcd = irqd->chip_data;
1954 struct IO_APIC_route_entry rentry;
1955 struct irq_pin_list *p;
1956
1957 if (which != IRQCHIP_STATE_ACTIVE)
1958 return -EINVAL;
1959
1960 *state = false;
1961 raw_spin_lock(&ioapic_lock);
1962 for_each_irq_pin(p, mcd->irq_2_pin) {
1963 rentry = __ioapic_read_entry(p->apic, p->pin);
1964
1965
1966
1967
1968
1969
1970 if (rentry.irr && rentry.is_level) {
1971 *state = true;
1972 break;
1973 }
1974 }
1975 raw_spin_unlock(&ioapic_lock);
1976 return 0;
1977}
1978
1979static struct irq_chip ioapic_chip __read_mostly = {
1980 .name = "IO-APIC",
1981 .irq_startup = startup_ioapic_irq,
1982 .irq_mask = mask_ioapic_irq,
1983 .irq_unmask = unmask_ioapic_irq,
1984 .irq_ack = irq_chip_ack_parent,
1985 .irq_eoi = ioapic_ack_level,
1986 .irq_set_affinity = ioapic_set_affinity,
1987 .irq_retrigger = irq_chip_retrigger_hierarchy,
1988 .irq_get_irqchip_state = ioapic_irq_get_chip_state,
1989 .flags = IRQCHIP_SKIP_SET_WAKE |
1990 IRQCHIP_AFFINITY_PRE_STARTUP,
1991};
1992
1993static struct irq_chip ioapic_ir_chip __read_mostly = {
1994 .name = "IR-IO-APIC",
1995 .irq_startup = startup_ioapic_irq,
1996 .irq_mask = mask_ioapic_irq,
1997 .irq_unmask = unmask_ioapic_irq,
1998 .irq_ack = irq_chip_ack_parent,
1999 .irq_eoi = ioapic_ir_ack_level,
2000 .irq_set_affinity = ioapic_set_affinity,
2001 .irq_retrigger = irq_chip_retrigger_hierarchy,
2002 .irq_get_irqchip_state = ioapic_irq_get_chip_state,
2003 .flags = IRQCHIP_SKIP_SET_WAKE |
2004 IRQCHIP_AFFINITY_PRE_STARTUP,
2005};
2006
2007static inline void init_IO_APIC_traps(void)
2008{
2009 struct irq_cfg *cfg;
2010 unsigned int irq;
2011
2012 for_each_active_irq(irq) {
2013 cfg = irq_cfg(irq);
2014 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2015
2016
2017
2018
2019
2020 if (irq < nr_legacy_irqs())
2021 legacy_pic->make_irq(irq);
2022 else
2023
2024 irq_set_chip(irq, &no_irq_chip);
2025 }
2026 }
2027}
2028
2029
2030
2031
2032
2033static void mask_lapic_irq(struct irq_data *data)
2034{
2035 unsigned long v;
2036
2037 v = apic_read(APIC_LVT0);
2038 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2039}
2040
2041static void unmask_lapic_irq(struct irq_data *data)
2042{
2043 unsigned long v;
2044
2045 v = apic_read(APIC_LVT0);
2046 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2047}
2048
2049static void ack_lapic_irq(struct irq_data *data)
2050{
2051 ack_APIC_irq();
2052}
2053
2054static struct irq_chip lapic_chip __read_mostly = {
2055 .name = "local-APIC",
2056 .irq_mask = mask_lapic_irq,
2057 .irq_unmask = unmask_lapic_irq,
2058 .irq_ack = ack_lapic_irq,
2059};
2060
2061static void lapic_register_intr(int irq)
2062{
2063 irq_clear_status_flags(irq, IRQ_LEVEL);
2064 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2065 "edge");
2066}
2067
2068
2069
2070
2071
2072
2073
2074
2075static inline void __init unlock_ExtINT_logic(void)
2076{
2077 int apic, pin, i;
2078 struct IO_APIC_route_entry entry0, entry1;
2079 unsigned char save_control, save_freq_select;
2080 u32 apic_id;
2081
2082 pin = find_isa_irq_pin(8, mp_INT);
2083 if (pin == -1) {
2084 WARN_ON_ONCE(1);
2085 return;
2086 }
2087 apic = find_isa_irq_apic(8, mp_INT);
2088 if (apic == -1) {
2089 WARN_ON_ONCE(1);
2090 return;
2091 }
2092
2093 entry0 = ioapic_read_entry(apic, pin);
2094 clear_IO_APIC_pin(apic, pin);
2095
2096 apic_id = hard_smp_processor_id();
2097 memset(&entry1, 0, sizeof(entry1));
2098
2099 entry1.dest_mode_logical = true;
2100 entry1.masked = false;
2101 entry1.destid_0_7 = apic_id & 0xFF;
2102 entry1.virt_destid_8_14 = apic_id >> 8;
2103 entry1.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
2104 entry1.active_low = entry0.active_low;
2105 entry1.is_level = false;
2106 entry1.vector = 0;
2107
2108 ioapic_write_entry(apic, pin, entry1);
2109
2110 save_control = CMOS_READ(RTC_CONTROL);
2111 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2112 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2113 RTC_FREQ_SELECT);
2114 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2115
2116 i = 100;
2117 while (i-- > 0) {
2118 mdelay(10);
2119 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2120 i -= 10;
2121 }
2122
2123 CMOS_WRITE(save_control, RTC_CONTROL);
2124 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2125 clear_IO_APIC_pin(apic, pin);
2126
2127 ioapic_write_entry(apic, pin, entry0);
2128}
2129
2130static int disable_timer_pin_1 __initdata;
2131
2132static int __init disable_timer_pin_setup(char *arg)
2133{
2134 disable_timer_pin_1 = 1;
2135 return 0;
2136}
2137early_param("disable_timer_pin_1", disable_timer_pin_setup);
2138
2139static int mp_alloc_timer_irq(int ioapic, int pin)
2140{
2141 int irq = -1;
2142 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
2143
2144 if (domain) {
2145 struct irq_alloc_info info;
2146
2147 ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0);
2148 info.devid = mpc_ioapic_id(ioapic);
2149 info.ioapic.pin = pin;
2150 mutex_lock(&ioapic_mutex);
2151 irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info);
2152 mutex_unlock(&ioapic_mutex);
2153 }
2154
2155 return irq;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166static inline void __init check_timer(void)
2167{
2168 struct irq_data *irq_data = irq_get_irq_data(0);
2169 struct mp_chip_data *data = irq_data->chip_data;
2170 struct irq_cfg *cfg = irqd_cfg(irq_data);
2171 int node = cpu_to_node(0);
2172 int apic1, pin1, apic2, pin2;
2173 int no_pin1 = 0;
2174
2175 if (!global_clock_event)
2176 return;
2177
2178 local_irq_disable();
2179
2180
2181
2182
2183 legacy_pic->mask(0);
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2195 legacy_pic->init(1);
2196
2197 pin1 = find_isa_irq_pin(0, mp_INT);
2198 apic1 = find_isa_irq_apic(0, mp_INT);
2199 pin2 = ioapic_i8259.pin;
2200 apic2 = ioapic_i8259.apic;
2201
2202 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2203 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2204 cfg->vector, apic1, pin1, apic2, pin2);
2205
2206
2207
2208
2209
2210
2211
2212
2213 if (pin1 == -1) {
2214 panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
2215 pin1 = pin2;
2216 apic1 = apic2;
2217 no_pin1 = 1;
2218 } else if (pin2 == -1) {
2219 pin2 = pin1;
2220 apic2 = apic1;
2221 }
2222
2223 if (pin1 != -1) {
2224
2225 if (no_pin1) {
2226 mp_alloc_timer_irq(apic1, pin1);
2227 } else {
2228
2229
2230
2231
2232
2233 int idx = find_irq_entry(apic1, pin1, mp_INT);
2234
2235 if (idx != -1 && irq_is_level(idx))
2236 unmask_ioapic_irq(irq_get_irq_data(0));
2237 }
2238 irq_domain_deactivate_irq(irq_data);
2239 irq_domain_activate_irq(irq_data, false);
2240 if (timer_irq_works()) {
2241 if (disable_timer_pin_1 > 0)
2242 clear_IO_APIC_pin(0, pin1);
2243 goto out;
2244 }
2245 panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
2246 clear_IO_APIC_pin(apic1, pin1);
2247 if (!no_pin1)
2248 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2249 "8254 timer not connected to IO-APIC\n");
2250
2251 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2252 "(IRQ0) through the 8259A ...\n");
2253 apic_printk(APIC_QUIET, KERN_INFO
2254 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2255
2256
2257
2258 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2259 irq_domain_deactivate_irq(irq_data);
2260 irq_domain_activate_irq(irq_data, false);
2261 legacy_pic->unmask(0);
2262 if (timer_irq_works()) {
2263 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2264 goto out;
2265 }
2266
2267
2268
2269 legacy_pic->mask(0);
2270 clear_IO_APIC_pin(apic2, pin2);
2271 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2272 }
2273
2274 apic_printk(APIC_QUIET, KERN_INFO
2275 "...trying to set up timer as Virtual Wire IRQ...\n");
2276
2277 lapic_register_intr(0);
2278 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);
2279 legacy_pic->unmask(0);
2280
2281 if (timer_irq_works()) {
2282 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2283 goto out;
2284 }
2285 legacy_pic->mask(0);
2286 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2287 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2288
2289 apic_printk(APIC_QUIET, KERN_INFO
2290 "...trying to set up timer as ExtINT IRQ...\n");
2291
2292 legacy_pic->init(0);
2293 legacy_pic->make_irq(0);
2294 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2295 legacy_pic->unmask(0);
2296
2297 unlock_ExtINT_logic();
2298
2299 if (timer_irq_works()) {
2300 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2301 goto out;
2302 }
2303 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2304 if (apic_is_x2apic_enabled())
2305 apic_printk(APIC_QUIET, KERN_INFO
2306 "Perhaps problem with the pre-enabled x2apic mode\n"
2307 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
2308 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2309 "report. Then try booting with the 'noapic' option.\n");
2310out:
2311 local_irq_enable();
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331#define PIC_IRQS (1UL << PIC_CASCADE_IR)
2332
2333static int mp_irqdomain_create(int ioapic)
2334{
2335 struct irq_domain *parent;
2336 int hwirqs = mp_ioapic_pin_count(ioapic);
2337 struct ioapic *ip = &ioapics[ioapic];
2338 struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
2339 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2340 struct fwnode_handle *fn;
2341 struct irq_fwspec fwspec;
2342
2343 if (cfg->type == IOAPIC_DOMAIN_INVALID)
2344 return 0;
2345
2346
2347 if (cfg->dev) {
2348 fn = of_node_to_fwnode(cfg->dev);
2349 } else {
2350 fn = irq_domain_alloc_named_id_fwnode("IO-APIC", mpc_ioapic_id(ioapic));
2351 if (!fn)
2352 return -ENOMEM;
2353 }
2354
2355 fwspec.fwnode = fn;
2356 fwspec.param_count = 1;
2357 fwspec.param[0] = mpc_ioapic_id(ioapic);
2358
2359 parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_ANY);
2360 if (!parent) {
2361 if (!cfg->dev)
2362 irq_domain_free_fwnode(fn);
2363 return -ENODEV;
2364 }
2365
2366 ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
2367 (void *)(long)ioapic);
2368
2369 if (!ip->irqdomain) {
2370
2371 if (!cfg->dev)
2372 irq_domain_free_fwnode(fn);
2373 return -ENOMEM;
2374 }
2375
2376 ip->irqdomain->parent = parent;
2377
2378 if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
2379 cfg->type == IOAPIC_DOMAIN_STRICT)
2380 ioapic_dynirq_base = max(ioapic_dynirq_base,
2381 gsi_cfg->gsi_end + 1);
2382
2383 return 0;
2384}
2385
2386static void ioapic_destroy_irqdomain(int idx)
2387{
2388 struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
2389 struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
2390
2391 if (ioapics[idx].irqdomain) {
2392 irq_domain_remove(ioapics[idx].irqdomain);
2393 if (!cfg->dev)
2394 irq_domain_free_fwnode(fn);
2395 ioapics[idx].irqdomain = NULL;
2396 }
2397}
2398
2399void __init setup_IO_APIC(void)
2400{
2401 int ioapic;
2402
2403 if (skip_ioapic_setup || !nr_ioapics)
2404 return;
2405
2406 io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
2407
2408 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2409 for_each_ioapic(ioapic)
2410 BUG_ON(mp_irqdomain_create(ioapic));
2411
2412
2413
2414
2415 x86_init.mpparse.setup_ioapic_ids();
2416
2417 sync_Arb_IDs();
2418 setup_IO_APIC_irqs();
2419 init_IO_APIC_traps();
2420 if (nr_legacy_irqs())
2421 check_timer();
2422
2423 ioapic_initialized = 1;
2424}
2425
2426static void resume_ioapic_id(int ioapic_idx)
2427{
2428 unsigned long flags;
2429 union IO_APIC_reg_00 reg_00;
2430
2431 raw_spin_lock_irqsave(&ioapic_lock, flags);
2432 reg_00.raw = io_apic_read(ioapic_idx, 0);
2433 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
2434 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2435 io_apic_write(ioapic_idx, 0, reg_00.raw);
2436 }
2437 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2438}
2439
2440static void ioapic_resume(void)
2441{
2442 int ioapic_idx;
2443
2444 for_each_ioapic_reverse(ioapic_idx)
2445 resume_ioapic_id(ioapic_idx);
2446
2447 restore_ioapic_entries();
2448}
2449
2450static struct syscore_ops ioapic_syscore_ops = {
2451 .suspend = save_ioapic_entries,
2452 .resume = ioapic_resume,
2453};
2454
2455static int __init ioapic_init_ops(void)
2456{
2457 register_syscore_ops(&ioapic_syscore_ops);
2458
2459 return 0;
2460}
2461
2462device_initcall(ioapic_init_ops);
2463
2464static int io_apic_get_redir_entries(int ioapic)
2465{
2466 union IO_APIC_reg_01 reg_01;
2467 unsigned long flags;
2468
2469 raw_spin_lock_irqsave(&ioapic_lock, flags);
2470 reg_01.raw = io_apic_read(ioapic, 1);
2471 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2472
2473
2474
2475
2476
2477 return reg_01.bits.entries + 1;
2478}
2479
2480unsigned int arch_dynirq_lower_bound(unsigned int from)
2481{
2482
2483
2484
2485
2486 if (!ioapic_initialized)
2487 return gsi_top;
2488
2489
2490
2491
2492 return ioapic_dynirq_base ? : from;
2493}
2494
2495#ifdef CONFIG_X86_32
2496static int io_apic_get_unique_id(int ioapic, int apic_id)
2497{
2498 union IO_APIC_reg_00 reg_00;
2499 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
2500 physid_mask_t tmp;
2501 unsigned long flags;
2502 int i = 0;
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513 if (physids_empty(apic_id_map))
2514 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
2515
2516 raw_spin_lock_irqsave(&ioapic_lock, flags);
2517 reg_00.raw = io_apic_read(ioapic, 0);
2518 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2519
2520 if (apic_id >= get_physical_broadcast()) {
2521 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
2522 "%d\n", ioapic, apic_id, reg_00.bits.ID);
2523 apic_id = reg_00.bits.ID;
2524 }
2525
2526
2527
2528
2529
2530 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
2531
2532 for (i = 0; i < get_physical_broadcast(); i++) {
2533 if (!apic->check_apicid_used(&apic_id_map, i))
2534 break;
2535 }
2536
2537 if (i == get_physical_broadcast())
2538 panic("Max apic_id exceeded!\n");
2539
2540 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
2541 "trying %d\n", ioapic, apic_id, i);
2542
2543 apic_id = i;
2544 }
2545
2546 apic->apicid_to_cpu_present(apic_id, &tmp);
2547 physids_or(apic_id_map, apic_id_map, tmp);
2548
2549 if (reg_00.bits.ID != apic_id) {
2550 reg_00.bits.ID = apic_id;
2551
2552 raw_spin_lock_irqsave(&ioapic_lock, flags);
2553 io_apic_write(ioapic, 0, reg_00.raw);
2554 reg_00.raw = io_apic_read(ioapic, 0);
2555 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2556
2557
2558 if (reg_00.bits.ID != apic_id) {
2559 pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
2560 ioapic);
2561 return -1;
2562 }
2563 }
2564
2565 apic_printk(APIC_VERBOSE, KERN_INFO
2566 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
2567
2568 return apic_id;
2569}
2570
2571static u8 io_apic_unique_id(int idx, u8 id)
2572{
2573 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
2574 !APIC_XAPIC(boot_cpu_apic_version))
2575 return io_apic_get_unique_id(idx, id);
2576 else
2577 return id;
2578}
2579#else
2580static u8 io_apic_unique_id(int idx, u8 id)
2581{
2582 union IO_APIC_reg_00 reg_00;
2583 DECLARE_BITMAP(used, 256);
2584 unsigned long flags;
2585 u8 new_id;
2586 int i;
2587
2588 bitmap_zero(used, 256);
2589 for_each_ioapic(i)
2590 __set_bit(mpc_ioapic_id(i), used);
2591
2592
2593 if (!test_bit(id, used))
2594 return id;
2595
2596
2597
2598
2599
2600 raw_spin_lock_irqsave(&ioapic_lock, flags);
2601 reg_00.raw = io_apic_read(idx, 0);
2602 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2603 new_id = reg_00.bits.ID;
2604 if (!test_bit(new_id, used)) {
2605 apic_printk(APIC_VERBOSE, KERN_INFO
2606 "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
2607 idx, new_id, id);
2608 return new_id;
2609 }
2610
2611
2612
2613
2614 new_id = find_first_zero_bit(used, 256);
2615 reg_00.bits.ID = new_id;
2616 raw_spin_lock_irqsave(&ioapic_lock, flags);
2617 io_apic_write(idx, 0, reg_00.raw);
2618 reg_00.raw = io_apic_read(idx, 0);
2619 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2620
2621 BUG_ON(reg_00.bits.ID != new_id);
2622
2623 return new_id;
2624}
2625#endif
2626
2627static int io_apic_get_version(int ioapic)
2628{
2629 union IO_APIC_reg_01 reg_01;
2630 unsigned long flags;
2631
2632 raw_spin_lock_irqsave(&ioapic_lock, flags);
2633 reg_01.raw = io_apic_read(ioapic, 1);
2634 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2635
2636 return reg_01.bits.version;
2637}
2638
2639
2640
2641
2642
2643#define IOAPIC_RESOURCE_NAME_SIZE 11
2644
2645static struct resource *ioapic_resources;
2646
2647static struct resource * __init ioapic_setup_resources(void)
2648{
2649 unsigned long n;
2650 struct resource *res;
2651 char *mem;
2652 int i;
2653
2654 if (nr_ioapics == 0)
2655 return NULL;
2656
2657 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
2658 n *= nr_ioapics;
2659
2660 mem = memblock_alloc(n, SMP_CACHE_BYTES);
2661 if (!mem)
2662 panic("%s: Failed to allocate %lu bytes\n", __func__, n);
2663 res = (void *)mem;
2664
2665 mem += sizeof(struct resource) * nr_ioapics;
2666
2667 for_each_ioapic(i) {
2668 res[i].name = mem;
2669 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2670 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
2671 mem += IOAPIC_RESOURCE_NAME_SIZE;
2672 ioapics[i].iomem_res = &res[i];
2673 }
2674
2675 ioapic_resources = res;
2676
2677 return res;
2678}
2679
2680void __init io_apic_init_mappings(void)
2681{
2682 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
2683 struct resource *ioapic_res;
2684 int i;
2685
2686 ioapic_res = ioapic_setup_resources();
2687 for_each_ioapic(i) {
2688 if (smp_found_config) {
2689 ioapic_phys = mpc_ioapic_addr(i);
2690#ifdef CONFIG_X86_32
2691 if (!ioapic_phys) {
2692 printk(KERN_ERR
2693 "WARNING: bogus zero IO-APIC "
2694 "address found in MPTABLE, "
2695 "disabling IO/APIC support!\n");
2696 smp_found_config = 0;
2697 skip_ioapic_setup = 1;
2698 goto fake_ioapic_page;
2699 }
2700#endif
2701 } else {
2702#ifdef CONFIG_X86_32
2703fake_ioapic_page:
2704#endif
2705 ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
2706 PAGE_SIZE);
2707 if (!ioapic_phys)
2708 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
2709 __func__, PAGE_SIZE, PAGE_SIZE);
2710 ioapic_phys = __pa(ioapic_phys);
2711 }
2712 set_fixmap_nocache(idx, ioapic_phys);
2713 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
2714 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
2715 ioapic_phys);
2716 idx++;
2717
2718 ioapic_res->start = ioapic_phys;
2719 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
2720 ioapic_res++;
2721 }
2722}
2723
2724void __init ioapic_insert_resources(void)
2725{
2726 int i;
2727 struct resource *r = ioapic_resources;
2728
2729 if (!r) {
2730 if (nr_ioapics > 0)
2731 printk(KERN_ERR
2732 "IO APIC resources couldn't be allocated.\n");
2733 return;
2734 }
2735
2736 for_each_ioapic(i) {
2737 insert_resource(&iomem_resource, r);
2738 r++;
2739 }
2740}
2741
2742int mp_find_ioapic(u32 gsi)
2743{
2744 int i;
2745
2746 if (nr_ioapics == 0)
2747 return -1;
2748
2749
2750 for_each_ioapic(i) {
2751 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i);
2752 if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end)
2753 return i;
2754 }
2755
2756 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
2757 return -1;
2758}
2759
2760int mp_find_ioapic_pin(int ioapic, u32 gsi)
2761{
2762 struct mp_ioapic_gsi *gsi_cfg;
2763
2764 if (WARN_ON(ioapic < 0))
2765 return -1;
2766
2767 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2768 if (WARN_ON(gsi > gsi_cfg->gsi_end))
2769 return -1;
2770
2771 return gsi - gsi_cfg->gsi_base;
2772}
2773
2774static int bad_ioapic_register(int idx)
2775{
2776 union IO_APIC_reg_00 reg_00;
2777 union IO_APIC_reg_01 reg_01;
2778 union IO_APIC_reg_02 reg_02;
2779
2780 reg_00.raw = io_apic_read(idx, 0);
2781 reg_01.raw = io_apic_read(idx, 1);
2782 reg_02.raw = io_apic_read(idx, 2);
2783
2784 if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
2785 pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
2786 mpc_ioapic_addr(idx));
2787 return 1;
2788 }
2789
2790 return 0;
2791}
2792
2793static int find_free_ioapic_entry(void)
2794{
2795 int idx;
2796
2797 for (idx = 0; idx < MAX_IO_APICS; idx++)
2798 if (ioapics[idx].nr_registers == 0)
2799 return idx;
2800
2801 return MAX_IO_APICS;
2802}
2803
2804
2805
2806
2807
2808
2809
2810
2811int mp_register_ioapic(int id, u32 address, u32 gsi_base,
2812 struct ioapic_domain_cfg *cfg)
2813{
2814 bool hotplug = !!ioapic_initialized;
2815 struct mp_ioapic_gsi *gsi_cfg;
2816 int idx, ioapic, entries;
2817 u32 gsi_end;
2818
2819 if (!address) {
2820 pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
2821 return -EINVAL;
2822 }
2823 for_each_ioapic(ioapic)
2824 if (ioapics[ioapic].mp_config.apicaddr == address) {
2825 pr_warn("address 0x%x conflicts with IOAPIC%d\n",
2826 address, ioapic);
2827 return -EEXIST;
2828 }
2829
2830 idx = find_free_ioapic_entry();
2831 if (idx >= MAX_IO_APICS) {
2832 pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
2833 MAX_IO_APICS, idx);
2834 return -ENOSPC;
2835 }
2836
2837 ioapics[idx].mp_config.type = MP_IOAPIC;
2838 ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
2839 ioapics[idx].mp_config.apicaddr = address;
2840
2841 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
2842 if (bad_ioapic_register(idx)) {
2843 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2844 return -ENODEV;
2845 }
2846
2847 ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id);
2848 ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
2849
2850
2851
2852
2853
2854 entries = io_apic_get_redir_entries(idx);
2855 gsi_end = gsi_base + entries - 1;
2856 for_each_ioapic(ioapic) {
2857 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2858 if ((gsi_base >= gsi_cfg->gsi_base &&
2859 gsi_base <= gsi_cfg->gsi_end) ||
2860 (gsi_end >= gsi_cfg->gsi_base &&
2861 gsi_end <= gsi_cfg->gsi_end)) {
2862 pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
2863 gsi_base, gsi_end,
2864 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
2865 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2866 return -ENOSPC;
2867 }
2868 }
2869 gsi_cfg = mp_ioapic_gsi_routing(idx);
2870 gsi_cfg->gsi_base = gsi_base;
2871 gsi_cfg->gsi_end = gsi_end;
2872
2873 ioapics[idx].irqdomain = NULL;
2874 ioapics[idx].irqdomain_cfg = *cfg;
2875
2876
2877
2878
2879
2880
2881 if (hotplug) {
2882 if (mp_irqdomain_create(idx)) {
2883 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2884 return -ENOMEM;
2885 }
2886 alloc_ioapic_saved_registers(idx);
2887 }
2888
2889 if (gsi_cfg->gsi_end >= gsi_top)
2890 gsi_top = gsi_cfg->gsi_end + 1;
2891 if (nr_ioapics <= idx)
2892 nr_ioapics = idx + 1;
2893
2894
2895 ioapics[idx].nr_registers = entries;
2896
2897 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
2898 idx, mpc_ioapic_id(idx),
2899 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
2900 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
2901
2902 return 0;
2903}
2904
2905int mp_unregister_ioapic(u32 gsi_base)
2906{
2907 int ioapic, pin;
2908 int found = 0;
2909
2910 for_each_ioapic(ioapic)
2911 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
2912 found = 1;
2913 break;
2914 }
2915 if (!found) {
2916 pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
2917 return -ENODEV;
2918 }
2919
2920 for_each_pin(ioapic, pin) {
2921 u32 gsi = mp_pin_to_gsi(ioapic, pin);
2922 int irq = mp_map_gsi_to_irq(gsi, 0, NULL);
2923 struct mp_chip_data *data;
2924
2925 if (irq >= 0) {
2926 data = irq_get_chip_data(irq);
2927 if (data && data->count) {
2928 pr_warn("pin%d on IOAPIC%d is still in use.\n",
2929 pin, ioapic);
2930 return -EBUSY;
2931 }
2932 }
2933 }
2934
2935
2936 ioapics[ioapic].nr_registers = 0;
2937 ioapic_destroy_irqdomain(ioapic);
2938 free_ioapic_saved_registers(ioapic);
2939 if (ioapics[ioapic].iomem_res)
2940 release_resource(ioapics[ioapic].iomem_res);
2941 clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic);
2942 memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic]));
2943
2944 return 0;
2945}
2946
2947int mp_ioapic_registered(u32 gsi_base)
2948{
2949 int ioapic;
2950
2951 for_each_ioapic(ioapic)
2952 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base)
2953 return 1;
2954
2955 return 0;
2956}
2957
2958static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data,
2959 struct irq_alloc_info *info)
2960{
2961 if (info && info->ioapic.valid) {
2962 data->is_level = info->ioapic.is_level;
2963 data->active_low = info->ioapic.active_low;
2964 } else if (__acpi_get_override_irq(gsi, &data->is_level,
2965 &data->active_low) < 0) {
2966
2967 data->is_level = true;
2968 data->active_low = true;
2969 }
2970}
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984static void mp_preconfigure_entry(struct mp_chip_data *data)
2985{
2986 struct IO_APIC_route_entry *entry = &data->entry;
2987
2988 memset(entry, 0, sizeof(*entry));
2989 entry->is_level = data->is_level;
2990 entry->active_low = data->active_low;
2991
2992
2993
2994
2995 entry->masked = data->is_level;
2996}
2997
2998int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2999 unsigned int nr_irqs, void *arg)
3000{
3001 struct irq_alloc_info *info = arg;
3002 struct mp_chip_data *data;
3003 struct irq_data *irq_data;
3004 int ret, ioapic, pin;
3005 unsigned long flags;
3006
3007 if (!info || nr_irqs > 1)
3008 return -EINVAL;
3009 irq_data = irq_domain_get_irq_data(domain, virq);
3010 if (!irq_data)
3011 return -EINVAL;
3012
3013 ioapic = mp_irqdomain_ioapic_idx(domain);
3014 pin = info->ioapic.pin;
3015 if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0)
3016 return -EEXIST;
3017
3018 data = kzalloc(sizeof(*data), GFP_KERNEL);
3019 if (!data)
3020 return -ENOMEM;
3021
3022 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
3023 if (ret < 0) {
3024 kfree(data);
3025 return ret;
3026 }
3027
3028 INIT_LIST_HEAD(&data->irq_2_pin);
3029 irq_data->hwirq = info->ioapic.pin;
3030 irq_data->chip = (domain->parent == x86_vector_domain) ?
3031 &ioapic_chip : &ioapic_ir_chip;
3032 irq_data->chip_data = data;
3033 mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info);
3034
3035 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
3036
3037 mp_preconfigure_entry(data);
3038 mp_register_handler(virq, data->is_level);
3039
3040 local_irq_save(flags);
3041 if (virq < nr_legacy_irqs())
3042 legacy_pic->mask(virq);
3043 local_irq_restore(flags);
3044
3045 apic_printk(APIC_VERBOSE, KERN_DEBUG
3046 "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n",
3047 ioapic, mpc_ioapic_id(ioapic), pin, virq,
3048 data->is_level, data->active_low);
3049 return 0;
3050}
3051
3052void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
3053 unsigned int nr_irqs)
3054{
3055 struct irq_data *irq_data;
3056 struct mp_chip_data *data;
3057
3058 BUG_ON(nr_irqs != 1);
3059 irq_data = irq_domain_get_irq_data(domain, virq);
3060 if (irq_data && irq_data->chip_data) {
3061 data = irq_data->chip_data;
3062 __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain),
3063 (int)irq_data->hwirq);
3064 WARN_ON(!list_empty(&data->irq_2_pin));
3065 kfree(irq_data->chip_data);
3066 }
3067 irq_domain_free_irqs_top(domain, virq, nr_irqs);
3068}
3069
3070int mp_irqdomain_activate(struct irq_domain *domain,
3071 struct irq_data *irq_data, bool reserve)
3072{
3073 unsigned long flags;
3074
3075 raw_spin_lock_irqsave(&ioapic_lock, flags);
3076 ioapic_configure_entry(irq_data);
3077 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3078 return 0;
3079}
3080
3081void mp_irqdomain_deactivate(struct irq_domain *domain,
3082 struct irq_data *irq_data)
3083{
3084
3085 ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain),
3086 (int)irq_data->hwirq);
3087}
3088
3089int mp_irqdomain_ioapic_idx(struct irq_domain *domain)
3090{
3091 return (int)(long)domain->host_data;
3092}
3093
3094const struct irq_domain_ops mp_ioapic_irqdomain_ops = {
3095 .alloc = mp_irqdomain_alloc,
3096 .free = mp_irqdomain_free,
3097 .activate = mp_irqdomain_activate,
3098 .deactivate = mp_irqdomain_deactivate,
3099};
3100