1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mm.h>
35#include <linux/interrupt.h>
36#include <linux/irq.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/sched.h>
40#include <linux/pci.h>
41#include <linux/mc146818rtc.h>
42#include <linux/compiler.h>
43#include <linux/acpi.h>
44#include <linux/export.h>
45#include <linux/syscore_ops.h>
46#include <linux/freezer.h>
47#include <linux/kthread.h>
48#include <linux/jiffies.h>
49#include <linux/slab.h>
50#include <linux/memblock.h>
51#include <linux/msi.h>
52
53#include <asm/irqdomain.h>
54#include <asm/io.h>
55#include <asm/smp.h>
56#include <asm/cpu.h>
57#include <asm/desc.h>
58#include <asm/proto.h>
59#include <asm/acpi.h>
60#include <asm/dma.h>
61#include <asm/timer.h>
62#include <asm/time.h>
63#include <asm/i8259.h>
64#include <asm/setup.h>
65#include <asm/irq_remapping.h>
66#include <asm/hw_irq.h>
67#include <asm/apic.h>
68
69#define for_each_ioapic(idx) \
70 for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
71#define for_each_ioapic_reverse(idx) \
72 for ((idx) = nr_ioapics - 1; (idx) >= 0; (idx)--)
73#define for_each_pin(idx, pin) \
74 for ((pin) = 0; (pin) < ioapics[(idx)].nr_registers; (pin)++)
75#define for_each_ioapic_pin(idx, pin) \
76 for_each_ioapic((idx)) \
77 for_each_pin((idx), (pin))
78#define for_each_irq_pin(entry, head) \
79 list_for_each_entry(entry, &head, list)
80
81static DEFINE_RAW_SPINLOCK(ioapic_lock);
82static DEFINE_MUTEX(ioapic_mutex);
83static unsigned int ioapic_dynirq_base;
84static int ioapic_initialized;
85
86struct irq_pin_list {
87 struct list_head list;
88 int apic, pin;
89};
90
91struct mp_chip_data {
92 struct list_head irq_2_pin;
93 struct IO_APIC_route_entry entry;
94 bool is_level;
95 bool active_low;
96 bool isa_irq;
97 u32 count;
98};
99
100struct mp_ioapic_gsi {
101 u32 gsi_base;
102 u32 gsi_end;
103};
104
105static struct ioapic {
106
107
108
109 int nr_registers;
110
111
112
113 struct IO_APIC_route_entry *saved_registers;
114
115 struct mpc_ioapic mp_config;
116
117 struct mp_ioapic_gsi gsi_config;
118 struct ioapic_domain_cfg irqdomain_cfg;
119 struct irq_domain *irqdomain;
120 struct resource *iomem_res;
121} ioapics[MAX_IO_APICS];
122
123#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
124
125int mpc_ioapic_id(int ioapic_idx)
126{
127 return ioapics[ioapic_idx].mp_config.apicid;
128}
129
130unsigned int mpc_ioapic_addr(int ioapic_idx)
131{
132 return ioapics[ioapic_idx].mp_config.apicaddr;
133}
134
135static inline struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
136{
137 return &ioapics[ioapic_idx].gsi_config;
138}
139
140static inline int mp_ioapic_pin_count(int ioapic)
141{
142 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
143
144 return gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1;
145}
146
147static inline u32 mp_pin_to_gsi(int ioapic, int pin)
148{
149 return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin;
150}
151
152static inline bool mp_is_legacy_irq(int irq)
153{
154 return irq >= 0 && irq < nr_legacy_irqs();
155}
156
157static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic)
158{
159 return ioapics[ioapic].irqdomain;
160}
161
162int nr_ioapics;
163
164
165u32 gsi_top;
166
167
168struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
169
170
171int mp_irq_entries;
172
173#ifdef CONFIG_EISA
174int mp_bus_id_to_type[MAX_MP_BUSSES];
175#endif
176
177DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
178
179int skip_ioapic_setup;
180
181
182
183
184void disable_ioapic_support(void)
185{
186#ifdef CONFIG_PCI
187 noioapicquirk = 1;
188 noioapicreroute = -1;
189#endif
190 skip_ioapic_setup = 1;
191}
192
193static int __init parse_noapic(char *str)
194{
195
196 disable_ioapic_support();
197 return 0;
198}
199early_param("noapic", parse_noapic);
200
201
202void mp_save_irq(struct mpc_intsrc *m)
203{
204 int i;
205
206 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
207 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
208 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
209 m->srcbusirq, m->dstapic, m->dstirq);
210
211 for (i = 0; i < mp_irq_entries; i++) {
212 if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
213 return;
214 }
215
216 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
217 if (++mp_irq_entries == MAX_IRQ_SOURCES)
218 panic("Max # of irq sources exceeded!!\n");
219}
220
221static void alloc_ioapic_saved_registers(int idx)
222{
223 size_t size;
224
225 if (ioapics[idx].saved_registers)
226 return;
227
228 size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers;
229 ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL);
230 if (!ioapics[idx].saved_registers)
231 pr_err("IOAPIC %d: suspend/resume impossible!\n", idx);
232}
233
234static void free_ioapic_saved_registers(int idx)
235{
236 kfree(ioapics[idx].saved_registers);
237 ioapics[idx].saved_registers = NULL;
238}
239
240int __init arch_early_ioapic_init(void)
241{
242 int i;
243
244 if (!nr_legacy_irqs())
245 io_apic_irqs = ~0UL;
246
247 for_each_ioapic(i)
248 alloc_ioapic_saved_registers(i);
249
250 return 0;
251}
252
253struct io_apic {
254 unsigned int index;
255 unsigned int unused[3];
256 unsigned int data;
257 unsigned int unused2[11];
258 unsigned int eoi;
259};
260
261static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
262{
263 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
264 + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
265}
266
267static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
268{
269 struct io_apic __iomem *io_apic = io_apic_base(apic);
270 writel(vector, &io_apic->eoi);
271}
272
273unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
274{
275 struct io_apic __iomem *io_apic = io_apic_base(apic);
276 writel(reg, &io_apic->index);
277 return readl(&io_apic->data);
278}
279
280static void io_apic_write(unsigned int apic, unsigned int reg,
281 unsigned int value)
282{
283 struct io_apic __iomem *io_apic = io_apic_base(apic);
284
285 writel(reg, &io_apic->index);
286 writel(value, &io_apic->data);
287}
288
289static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
290{
291 struct IO_APIC_route_entry entry;
292
293 entry.w1 = io_apic_read(apic, 0x10 + 2 * pin);
294 entry.w2 = io_apic_read(apic, 0x11 + 2 * pin);
295
296 return entry;
297}
298
299static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
300{
301 struct IO_APIC_route_entry entry;
302 unsigned long flags;
303
304 raw_spin_lock_irqsave(&ioapic_lock, flags);
305 entry = __ioapic_read_entry(apic, pin);
306 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
307
308 return entry;
309}
310
311
312
313
314
315
316
317static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
318{
319 io_apic_write(apic, 0x11 + 2*pin, e.w2);
320 io_apic_write(apic, 0x10 + 2*pin, e.w1);
321}
322
323static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
324{
325 unsigned long flags;
326
327 raw_spin_lock_irqsave(&ioapic_lock, flags);
328 __ioapic_write_entry(apic, pin, e);
329 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
330}
331
332
333
334
335
336
337static void ioapic_mask_entry(int apic, int pin)
338{
339 struct IO_APIC_route_entry e = { .masked = true };
340 unsigned long flags;
341
342 raw_spin_lock_irqsave(&ioapic_lock, flags);
343 io_apic_write(apic, 0x10 + 2*pin, e.w1);
344 io_apic_write(apic, 0x11 + 2*pin, e.w2);
345 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
346}
347
348
349
350
351
352
353static int __add_pin_to_irq_node(struct mp_chip_data *data,
354 int node, int apic, int pin)
355{
356 struct irq_pin_list *entry;
357
358
359 for_each_irq_pin(entry, data->irq_2_pin)
360 if (entry->apic == apic && entry->pin == pin)
361 return 0;
362
363 entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
364 if (!entry) {
365 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
366 node, apic, pin);
367 return -ENOMEM;
368 }
369 entry->apic = apic;
370 entry->pin = pin;
371 list_add_tail(&entry->list, &data->irq_2_pin);
372
373 return 0;
374}
375
376static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin)
377{
378 struct irq_pin_list *tmp, *entry;
379
380 list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list)
381 if (entry->apic == apic && entry->pin == pin) {
382 list_del(&entry->list);
383 kfree(entry);
384 return;
385 }
386}
387
388static void add_pin_to_irq_node(struct mp_chip_data *data,
389 int node, int apic, int pin)
390{
391 if (__add_pin_to_irq_node(data, node, apic, pin))
392 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
393}
394
395
396
397
398static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
399 int oldapic, int oldpin,
400 int newapic, int newpin)
401{
402 struct irq_pin_list *entry;
403
404 for_each_irq_pin(entry, data->irq_2_pin) {
405 if (entry->apic == oldapic && entry->pin == oldpin) {
406 entry->apic = newapic;
407 entry->pin = newpin;
408
409 return;
410 }
411 }
412
413
414 add_pin_to_irq_node(data, node, newapic, newpin);
415}
416
417static void io_apic_modify_irq(struct mp_chip_data *data, bool masked,
418 void (*final)(struct irq_pin_list *entry))
419{
420 struct irq_pin_list *entry;
421
422 data->entry.masked = masked;
423
424 for_each_irq_pin(entry, data->irq_2_pin) {
425 io_apic_write(entry->apic, 0x10 + 2 * entry->pin, data->entry.w1);
426 if (final)
427 final(entry);
428 }
429}
430
431static void io_apic_sync(struct irq_pin_list *entry)
432{
433
434
435
436
437 struct io_apic __iomem *io_apic;
438
439 io_apic = io_apic_base(entry->apic);
440 readl(&io_apic->data);
441}
442
443static void mask_ioapic_irq(struct irq_data *irq_data)
444{
445 struct mp_chip_data *data = irq_data->chip_data;
446 unsigned long flags;
447
448 raw_spin_lock_irqsave(&ioapic_lock, flags);
449 io_apic_modify_irq(data, true, &io_apic_sync);
450 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
451}
452
453static void __unmask_ioapic(struct mp_chip_data *data)
454{
455 io_apic_modify_irq(data, false, NULL);
456}
457
458static void unmask_ioapic_irq(struct irq_data *irq_data)
459{
460 struct mp_chip_data *data = irq_data->chip_data;
461 unsigned long flags;
462
463 raw_spin_lock_irqsave(&ioapic_lock, flags);
464 __unmask_ioapic(data);
465 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
466}
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484static void __eoi_ioapic_pin(int apic, int pin, int vector)
485{
486 if (mpc_ioapic_ver(apic) >= 0x20) {
487 io_apic_eoi(apic, vector);
488 } else {
489 struct IO_APIC_route_entry entry, entry1;
490
491 entry = entry1 = __ioapic_read_entry(apic, pin);
492
493
494
495
496 entry1.masked = true;
497 entry1.is_level = false;
498
499 __ioapic_write_entry(apic, pin, entry1);
500
501
502
503
504 __ioapic_write_entry(apic, pin, entry);
505 }
506}
507
508static void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
509{
510 unsigned long flags;
511 struct irq_pin_list *entry;
512
513 raw_spin_lock_irqsave(&ioapic_lock, flags);
514 for_each_irq_pin(entry, data->irq_2_pin)
515 __eoi_ioapic_pin(entry->apic, entry->pin, vector);
516 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
517}
518
519static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
520{
521 struct IO_APIC_route_entry entry;
522
523
524 entry = ioapic_read_entry(apic, pin);
525 if (entry.delivery_mode == APIC_DELIVERY_MODE_SMI)
526 return;
527
528
529
530
531
532 if (!entry.masked) {
533 entry.masked = true;
534 ioapic_write_entry(apic, pin, entry);
535 entry = ioapic_read_entry(apic, pin);
536 }
537
538 if (entry.irr) {
539 unsigned long flags;
540
541
542
543
544
545
546 if (!entry.is_level) {
547 entry.is_level = true;
548 ioapic_write_entry(apic, pin, entry);
549 }
550 raw_spin_lock_irqsave(&ioapic_lock, flags);
551 __eoi_ioapic_pin(apic, pin, entry.vector);
552 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
553 }
554
555
556
557
558
559 ioapic_mask_entry(apic, pin);
560 entry = ioapic_read_entry(apic, pin);
561 if (entry.irr)
562 pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
563 mpc_ioapic_id(apic), pin);
564}
565
566void clear_IO_APIC (void)
567{
568 int apic, pin;
569
570 for_each_ioapic_pin(apic, pin)
571 clear_IO_APIC_pin(apic, pin);
572}
573
574#ifdef CONFIG_X86_32
575
576
577
578
579
580#define MAX_PIRQS 8
581static int pirq_entries[MAX_PIRQS] = {
582 [0 ... MAX_PIRQS - 1] = -1
583};
584
585static int __init ioapic_pirq_setup(char *str)
586{
587 int i, max;
588 int ints[MAX_PIRQS+1];
589
590 get_options(str, ARRAY_SIZE(ints), ints);
591
592 apic_printk(APIC_VERBOSE, KERN_INFO
593 "PIRQ redirection, working around broken MP-BIOS.\n");
594 max = MAX_PIRQS;
595 if (ints[0] < MAX_PIRQS)
596 max = ints[0];
597
598 for (i = 0; i < max; i++) {
599 apic_printk(APIC_VERBOSE, KERN_DEBUG
600 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
601
602
603
604 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
605 }
606 return 1;
607}
608
609__setup("pirq=", ioapic_pirq_setup);
610#endif
611
612
613
614
615int save_ioapic_entries(void)
616{
617 int apic, pin;
618 int err = 0;
619
620 for_each_ioapic(apic) {
621 if (!ioapics[apic].saved_registers) {
622 err = -ENOMEM;
623 continue;
624 }
625
626 for_each_pin(apic, pin)
627 ioapics[apic].saved_registers[pin] =
628 ioapic_read_entry(apic, pin);
629 }
630
631 return err;
632}
633
634
635
636
637void mask_ioapic_entries(void)
638{
639 int apic, pin;
640
641 for_each_ioapic(apic) {
642 if (!ioapics[apic].saved_registers)
643 continue;
644
645 for_each_pin(apic, pin) {
646 struct IO_APIC_route_entry entry;
647
648 entry = ioapics[apic].saved_registers[pin];
649 if (!entry.masked) {
650 entry.masked = true;
651 ioapic_write_entry(apic, pin, entry);
652 }
653 }
654 }
655}
656
657
658
659
660int restore_ioapic_entries(void)
661{
662 int apic, pin;
663
664 for_each_ioapic(apic) {
665 if (!ioapics[apic].saved_registers)
666 continue;
667
668 for_each_pin(apic, pin)
669 ioapic_write_entry(apic, pin,
670 ioapics[apic].saved_registers[pin]);
671 }
672 return 0;
673}
674
675
676
677
678static int find_irq_entry(int ioapic_idx, int pin, int type)
679{
680 int i;
681
682 for (i = 0; i < mp_irq_entries; i++)
683 if (mp_irqs[i].irqtype == type &&
684 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
685 mp_irqs[i].dstapic == MP_APIC_ALL) &&
686 mp_irqs[i].dstirq == pin)
687 return i;
688
689 return -1;
690}
691
692
693
694
695static int __init find_isa_irq_pin(int irq, int type)
696{
697 int i;
698
699 for (i = 0; i < mp_irq_entries; i++) {
700 int lbus = mp_irqs[i].srcbus;
701
702 if (test_bit(lbus, mp_bus_not_pci) &&
703 (mp_irqs[i].irqtype == type) &&
704 (mp_irqs[i].srcbusirq == irq))
705
706 return mp_irqs[i].dstirq;
707 }
708 return -1;
709}
710
711static int __init find_isa_irq_apic(int irq, int type)
712{
713 int i;
714
715 for (i = 0; i < mp_irq_entries; i++) {
716 int lbus = mp_irqs[i].srcbus;
717
718 if (test_bit(lbus, mp_bus_not_pci) &&
719 (mp_irqs[i].irqtype == type) &&
720 (mp_irqs[i].srcbusirq == irq))
721 break;
722 }
723
724 if (i < mp_irq_entries) {
725 int ioapic_idx;
726
727 for_each_ioapic(ioapic_idx)
728 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
729 return ioapic_idx;
730 }
731
732 return -1;
733}
734
735static bool irq_active_low(int idx)
736{
737 int bus = mp_irqs[idx].srcbus;
738
739
740
741
742 switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) {
743 case MP_IRQPOL_DEFAULT:
744
745
746
747
748 return !test_bit(bus, mp_bus_not_pci);
749 case MP_IRQPOL_ACTIVE_HIGH:
750 return false;
751 case MP_IRQPOL_RESERVED:
752 pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
753 fallthrough;
754 case MP_IRQPOL_ACTIVE_LOW:
755 default:
756 return true;
757 }
758}
759
760#ifdef CONFIG_EISA
761
762
763
764static bool EISA_ELCR(unsigned int irq)
765{
766 if (irq < nr_legacy_irqs()) {
767 unsigned int port = 0x4d0 + (irq >> 3);
768 return (inb(port) >> (irq & 7)) & 1;
769 }
770 apic_printk(APIC_VERBOSE, KERN_INFO
771 "Broken MPtable reports ISA irq %d\n", irq);
772 return false;
773}
774
775
776
777
778
779
780
781static bool eisa_irq_is_level(int idx, int bus, bool level)
782{
783 switch (mp_bus_id_to_type[bus]) {
784 case MP_BUS_PCI:
785 case MP_BUS_ISA:
786 return level;
787 case MP_BUS_EISA:
788 return EISA_ELCR(mp_irqs[idx].srcbusirq);
789 }
790 pr_warn("IOAPIC: Invalid srcbus: %d defaulting to level\n", bus);
791 return true;
792}
793#else
794static inline int eisa_irq_is_level(int idx, int bus, bool level)
795{
796 return level;
797}
798#endif
799
800static bool irq_is_level(int idx)
801{
802 int bus = mp_irqs[idx].srcbus;
803 bool level;
804
805
806
807
808 switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) {
809 case MP_IRQTRIG_DEFAULT:
810
811
812
813
814 level = !test_bit(bus, mp_bus_not_pci);
815
816 return eisa_irq_is_level(idx, bus, level);
817 case MP_IRQTRIG_EDGE:
818 return false;
819 case MP_IRQTRIG_RESERVED:
820 pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
821 fallthrough;
822 case MP_IRQTRIG_LEVEL:
823 default:
824 return true;
825 }
826}
827
828static int __acpi_get_override_irq(u32 gsi, bool *trigger, bool *polarity)
829{
830 int ioapic, pin, idx;
831
832 if (skip_ioapic_setup)
833 return -1;
834
835 ioapic = mp_find_ioapic(gsi);
836 if (ioapic < 0)
837 return -1;
838
839 pin = mp_find_ioapic_pin(ioapic, gsi);
840 if (pin < 0)
841 return -1;
842
843 idx = find_irq_entry(ioapic, pin, mp_INT);
844 if (idx < 0)
845 return -1;
846
847 *trigger = irq_is_level(idx);
848 *polarity = irq_active_low(idx);
849 return 0;
850}
851
852#ifdef CONFIG_ACPI
853int acpi_get_override_irq(u32 gsi, int *is_level, int *active_low)
854{
855 *is_level = *active_low = 0;
856 return __acpi_get_override_irq(gsi, (bool *)is_level,
857 (bool *)active_low);
858}
859#endif
860
861void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node,
862 int trigger, int polarity)
863{
864 init_irq_alloc_info(info, NULL);
865 info->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
866 info->ioapic.node = node;
867 info->ioapic.is_level = trigger;
868 info->ioapic.active_low = polarity;
869 info->ioapic.valid = 1;
870}
871
872static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst,
873 struct irq_alloc_info *src,
874 u32 gsi, int ioapic_idx, int pin)
875{
876 bool level, pol_low;
877
878 copy_irq_alloc_info(dst, src);
879 dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
880 dst->devid = mpc_ioapic_id(ioapic_idx);
881 dst->ioapic.pin = pin;
882 dst->ioapic.valid = 1;
883 if (src && src->ioapic.valid) {
884 dst->ioapic.node = src->ioapic.node;
885 dst->ioapic.is_level = src->ioapic.is_level;
886 dst->ioapic.active_low = src->ioapic.active_low;
887 } else {
888 dst->ioapic.node = NUMA_NO_NODE;
889 if (__acpi_get_override_irq(gsi, &level, &pol_low) >= 0) {
890 dst->ioapic.is_level = level;
891 dst->ioapic.active_low = pol_low;
892 } else {
893
894
895
896
897 dst->ioapic.is_level = true;
898 dst->ioapic.active_low = true;
899 }
900 }
901}
902
903static int ioapic_alloc_attr_node(struct irq_alloc_info *info)
904{
905 return (info && info->ioapic.valid) ? info->ioapic.node : NUMA_NO_NODE;
906}
907
908static void mp_register_handler(unsigned int irq, bool level)
909{
910 irq_flow_handler_t hdl;
911 bool fasteoi;
912
913 if (level) {
914 irq_set_status_flags(irq, IRQ_LEVEL);
915 fasteoi = true;
916 } else {
917 irq_clear_status_flags(irq, IRQ_LEVEL);
918 fasteoi = false;
919 }
920
921 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
922 __irq_set_handler(irq, hdl, 0, fasteoi ? "fasteoi" : "edge");
923}
924
925static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
926{
927 struct mp_chip_data *data = irq_get_chip_data(irq);
928
929
930
931
932
933
934 if (irq < nr_legacy_irqs() && data->count == 1) {
935 if (info->ioapic.is_level != data->is_level)
936 mp_register_handler(irq, info->ioapic.is_level);
937 data->entry.is_level = data->is_level = info->ioapic.is_level;
938 data->entry.active_low = data->active_low = info->ioapic.active_low;
939 }
940
941 return data->is_level == info->ioapic.is_level &&
942 data->active_low == info->ioapic.active_low;
943}
944
945static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
946 struct irq_alloc_info *info)
947{
948 bool legacy = false;
949 int irq = -1;
950 int type = ioapics[ioapic].irqdomain_cfg.type;
951
952 switch (type) {
953 case IOAPIC_DOMAIN_LEGACY:
954
955
956
957
958 if (!ioapic_initialized || gsi >= nr_legacy_irqs())
959 irq = gsi;
960 legacy = mp_is_legacy_irq(irq);
961 break;
962 case IOAPIC_DOMAIN_STRICT:
963 irq = gsi;
964 break;
965 case IOAPIC_DOMAIN_DYNAMIC:
966 break;
967 default:
968 WARN(1, "ioapic: unknown irqdomain type %d\n", type);
969 return -1;
970 }
971
972 return __irq_domain_alloc_irqs(domain, irq, 1,
973 ioapic_alloc_attr_node(info),
974 info, legacy, NULL);
975}
976
977
978
979
980
981
982
983
984
985
986
987static int alloc_isa_irq_from_domain(struct irq_domain *domain,
988 int irq, int ioapic, int pin,
989 struct irq_alloc_info *info)
990{
991 struct mp_chip_data *data;
992 struct irq_data *irq_data = irq_get_irq_data(irq);
993 int node = ioapic_alloc_attr_node(info);
994
995
996
997
998
999
1000 if (irq_data && irq_data->parent_data) {
1001 if (!mp_check_pin_attr(irq, info))
1002 return -EBUSY;
1003 if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic,
1004 info->ioapic.pin))
1005 return -ENOMEM;
1006 } else {
1007 info->flags |= X86_IRQ_ALLOC_LEGACY;
1008 irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
1009 NULL);
1010 if (irq >= 0) {
1011 irq_data = irq_domain_get_irq_data(domain, irq);
1012 data = irq_data->chip_data;
1013 data->isa_irq = true;
1014 }
1015 }
1016
1017 return irq;
1018}
1019
1020static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
1021 unsigned int flags, struct irq_alloc_info *info)
1022{
1023 int irq;
1024 bool legacy = false;
1025 struct irq_alloc_info tmp;
1026 struct mp_chip_data *data;
1027 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
1028
1029 if (!domain)
1030 return -ENOSYS;
1031
1032 if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
1033 irq = mp_irqs[idx].srcbusirq;
1034 legacy = mp_is_legacy_irq(irq);
1035
1036
1037
1038
1039
1040
1041
1042
1043 if (legacy && irq == PIC_CASCADE_IR)
1044 return -EINVAL;
1045 }
1046
1047 mutex_lock(&ioapic_mutex);
1048 if (!(flags & IOAPIC_MAP_ALLOC)) {
1049 if (!legacy) {
1050 irq = irq_find_mapping(domain, pin);
1051 if (irq == 0)
1052 irq = -ENOENT;
1053 }
1054 } else {
1055 ioapic_copy_alloc_attr(&tmp, info, gsi, ioapic, pin);
1056 if (legacy)
1057 irq = alloc_isa_irq_from_domain(domain, irq,
1058 ioapic, pin, &tmp);
1059 else if ((irq = irq_find_mapping(domain, pin)) == 0)
1060 irq = alloc_irq_from_domain(domain, ioapic, gsi, &tmp);
1061 else if (!mp_check_pin_attr(irq, &tmp))
1062 irq = -EBUSY;
1063 if (irq >= 0) {
1064 data = irq_get_chip_data(irq);
1065 data->count++;
1066 }
1067 }
1068 mutex_unlock(&ioapic_mutex);
1069
1070 return irq;
1071}
1072
1073static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags)
1074{
1075 u32 gsi = mp_pin_to_gsi(ioapic, pin);
1076
1077
1078
1079
1080 if (mp_irqs[idx].dstirq != pin)
1081 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
1082
1083#ifdef CONFIG_X86_32
1084
1085
1086
1087 if ((pin >= 16) && (pin <= 23)) {
1088 if (pirq_entries[pin-16] != -1) {
1089 if (!pirq_entries[pin-16]) {
1090 apic_printk(APIC_VERBOSE, KERN_DEBUG
1091 "disabling PIRQ%d\n", pin-16);
1092 } else {
1093 int irq = pirq_entries[pin-16];
1094 apic_printk(APIC_VERBOSE, KERN_DEBUG
1095 "using PIRQ%d -> IRQ %d\n",
1096 pin-16, irq);
1097 return irq;
1098 }
1099 }
1100 }
1101#endif
1102
1103 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, NULL);
1104}
1105
1106int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info)
1107{
1108 int ioapic, pin, idx;
1109
1110 ioapic = mp_find_ioapic(gsi);
1111 if (ioapic < 0)
1112 return -ENODEV;
1113
1114 pin = mp_find_ioapic_pin(ioapic, gsi);
1115 idx = find_irq_entry(ioapic, pin, mp_INT);
1116 if ((flags & IOAPIC_MAP_CHECK) && idx < 0)
1117 return -ENODEV;
1118
1119 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info);
1120}
1121
1122void mp_unmap_irq(int irq)
1123{
1124 struct irq_data *irq_data = irq_get_irq_data(irq);
1125 struct mp_chip_data *data;
1126
1127 if (!irq_data || !irq_data->domain)
1128 return;
1129
1130 data = irq_data->chip_data;
1131 if (!data || data->isa_irq)
1132 return;
1133
1134 mutex_lock(&ioapic_mutex);
1135 if (--data->count == 0)
1136 irq_domain_free_irqs(irq, 1);
1137 mutex_unlock(&ioapic_mutex);
1138}
1139
1140
1141
1142
1143
1144int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
1145{
1146 int irq, i, best_ioapic = -1, best_idx = -1;
1147
1148 apic_printk(APIC_DEBUG,
1149 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1150 bus, slot, pin);
1151 if (test_bit(bus, mp_bus_not_pci)) {
1152 apic_printk(APIC_VERBOSE,
1153 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1154 return -1;
1155 }
1156
1157 for (i = 0; i < mp_irq_entries; i++) {
1158 int lbus = mp_irqs[i].srcbus;
1159 int ioapic_idx, found = 0;
1160
1161 if (bus != lbus || mp_irqs[i].irqtype != mp_INT ||
1162 slot != ((mp_irqs[i].srcbusirq >> 2) & 0x1f))
1163 continue;
1164
1165 for_each_ioapic(ioapic_idx)
1166 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic ||
1167 mp_irqs[i].dstapic == MP_APIC_ALL) {
1168 found = 1;
1169 break;
1170 }
1171 if (!found)
1172 continue;
1173
1174
1175 irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq, 0);
1176 if (irq > 0 && !IO_APIC_IRQ(irq))
1177 continue;
1178
1179 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1180 best_idx = i;
1181 best_ioapic = ioapic_idx;
1182 goto out;
1183 }
1184
1185
1186
1187
1188
1189 if (best_idx < 0) {
1190 best_idx = i;
1191 best_ioapic = ioapic_idx;
1192 }
1193 }
1194 if (best_idx < 0)
1195 return -1;
1196
1197out:
1198 return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
1199 IOAPIC_MAP_ALLOC);
1200}
1201EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1202
1203static struct irq_chip ioapic_chip, ioapic_ir_chip;
1204
1205static void __init setup_IO_APIC_irqs(void)
1206{
1207 unsigned int ioapic, pin;
1208 int idx;
1209
1210 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1211
1212 for_each_ioapic_pin(ioapic, pin) {
1213 idx = find_irq_entry(ioapic, pin, mp_INT);
1214 if (idx < 0)
1215 apic_printk(APIC_VERBOSE,
1216 KERN_DEBUG " apic %d pin %d not connected\n",
1217 mpc_ioapic_id(ioapic), pin);
1218 else
1219 pin_2_irq(idx, ioapic, pin,
1220 ioapic ? 0 : IOAPIC_MAP_ALLOC);
1221 }
1222}
1223
1224void ioapic_zap_locks(void)
1225{
1226 raw_spin_lock_init(&ioapic_lock);
1227}
1228
1229static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
1230{
1231 struct IO_APIC_route_entry entry;
1232 char buf[256];
1233 int i;
1234
1235 printk(KERN_DEBUG "IOAPIC %d:\n", apic);
1236 for (i = 0; i <= nr_entries; i++) {
1237 entry = ioapic_read_entry(apic, i);
1238 snprintf(buf, sizeof(buf),
1239 " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)",
1240 i,
1241 entry.masked ? "disabled" : "enabled ",
1242 entry.is_level ? "level" : "edge ",
1243 entry.active_low ? "low " : "high",
1244 entry.vector, entry.irr, entry.delivery_status);
1245 if (entry.ir_format) {
1246 printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
1247 buf,
1248 (entry.ir_index_15 << 15) | entry.ir_index_0_14,
1249 entry.ir_zero);
1250 } else {
1251 printk(KERN_DEBUG "%s, %s, D(%02X%02X), M(%1d)\n", buf,
1252 entry.dest_mode_logical ? "logical " : "physical",
1253 entry.virt_destid_8_14, entry.destid_0_7,
1254 entry.delivery_mode);
1255 }
1256 }
1257}
1258
1259static void __init print_IO_APIC(int ioapic_idx)
1260{
1261 union IO_APIC_reg_00 reg_00;
1262 union IO_APIC_reg_01 reg_01;
1263 union IO_APIC_reg_02 reg_02;
1264 union IO_APIC_reg_03 reg_03;
1265 unsigned long flags;
1266
1267 raw_spin_lock_irqsave(&ioapic_lock, flags);
1268 reg_00.raw = io_apic_read(ioapic_idx, 0);
1269 reg_01.raw = io_apic_read(ioapic_idx, 1);
1270 if (reg_01.bits.version >= 0x10)
1271 reg_02.raw = io_apic_read(ioapic_idx, 2);
1272 if (reg_01.bits.version >= 0x20)
1273 reg_03.raw = io_apic_read(ioapic_idx, 3);
1274 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1275
1276 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1277 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1278 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1279 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1280 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1281
1282 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1283 printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
1284 reg_01.bits.entries);
1285
1286 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1287 printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
1288 reg_01.bits.version);
1289
1290
1291
1292
1293
1294
1295 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1296 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1297 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1298 }
1299
1300
1301
1302
1303
1304
1305 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1306 reg_03.raw != reg_01.raw) {
1307 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1308 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1309 }
1310
1311 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1312 io_apic_print_entries(ioapic_idx, reg_01.bits.entries);
1313}
1314
1315void __init print_IO_APICs(void)
1316{
1317 int ioapic_idx;
1318 unsigned int irq;
1319
1320 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1321 for_each_ioapic(ioapic_idx)
1322 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1323 mpc_ioapic_id(ioapic_idx),
1324 ioapics[ioapic_idx].nr_registers);
1325
1326
1327
1328
1329
1330 printk(KERN_INFO "testing the IO APIC.......................\n");
1331
1332 for_each_ioapic(ioapic_idx)
1333 print_IO_APIC(ioapic_idx);
1334
1335 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1336 for_each_active_irq(irq) {
1337 struct irq_pin_list *entry;
1338 struct irq_chip *chip;
1339 struct mp_chip_data *data;
1340
1341 chip = irq_get_chip(irq);
1342 if (chip != &ioapic_chip && chip != &ioapic_ir_chip)
1343 continue;
1344 data = irq_get_chip_data(irq);
1345 if (!data)
1346 continue;
1347 if (list_empty(&data->irq_2_pin))
1348 continue;
1349
1350 printk(KERN_DEBUG "IRQ%d ", irq);
1351 for_each_irq_pin(entry, data->irq_2_pin)
1352 pr_cont("-> %d:%d", entry->apic, entry->pin);
1353 pr_cont("\n");
1354 }
1355
1356 printk(KERN_INFO ".................................... done.\n");
1357}
1358
1359
1360static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1361
1362void __init enable_IO_APIC(void)
1363{
1364 int i8259_apic, i8259_pin;
1365 int apic, pin;
1366
1367 if (skip_ioapic_setup)
1368 nr_ioapics = 0;
1369
1370 if (!nr_legacy_irqs() || !nr_ioapics)
1371 return;
1372
1373 for_each_ioapic_pin(apic, pin) {
1374
1375 struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin);
1376
1377
1378
1379
1380 if (!entry.masked &&
1381 entry.delivery_mode == APIC_DELIVERY_MODE_EXTINT) {
1382 ioapic_i8259.apic = apic;
1383 ioapic_i8259.pin = pin;
1384 goto found_i8259;
1385 }
1386 }
1387 found_i8259:
1388
1389
1390
1391
1392
1393 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1394 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1395
1396 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1397 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1398 ioapic_i8259.pin = i8259_pin;
1399 ioapic_i8259.apic = i8259_apic;
1400 }
1401
1402 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1403 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1404 {
1405 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1406 }
1407
1408
1409
1410
1411 clear_IO_APIC();
1412}
1413
1414void native_restore_boot_irq_mode(void)
1415{
1416
1417
1418
1419
1420
1421 if (ioapic_i8259.pin != -1) {
1422 struct IO_APIC_route_entry entry;
1423 u32 apic_id = read_apic_id();
1424
1425 memset(&entry, 0, sizeof(entry));
1426 entry.masked = false;
1427 entry.is_level = false;
1428 entry.active_low = false;
1429 entry.dest_mode_logical = false;
1430 entry.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
1431 entry.destid_0_7 = apic_id & 0xFF;
1432 entry.virt_destid_8_14 = apic_id >> 8;
1433
1434
1435
1436
1437 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1438 }
1439
1440 if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
1441 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1442}
1443
1444void restore_boot_irq_mode(void)
1445{
1446 if (!nr_legacy_irqs())
1447 return;
1448
1449 x86_apic_ops.restore();
1450}
1451
1452#ifdef CONFIG_X86_32
1453
1454
1455
1456
1457
1458
1459void __init setup_ioapic_ids_from_mpc_nocheck(void)
1460{
1461 union IO_APIC_reg_00 reg_00;
1462 physid_mask_t phys_id_present_map;
1463 int ioapic_idx;
1464 int i;
1465 unsigned char old_id;
1466 unsigned long flags;
1467
1468
1469
1470
1471
1472 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
1473
1474
1475
1476
1477 for_each_ioapic(ioapic_idx) {
1478
1479 raw_spin_lock_irqsave(&ioapic_lock, flags);
1480 reg_00.raw = io_apic_read(ioapic_idx, 0);
1481 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1482
1483 old_id = mpc_ioapic_id(ioapic_idx);
1484
1485 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
1486 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1487 ioapic_idx, mpc_ioapic_id(ioapic_idx));
1488 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1489 reg_00.bits.ID);
1490 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
1491 }
1492
1493
1494
1495
1496
1497
1498 if (apic->check_apicid_used(&phys_id_present_map,
1499 mpc_ioapic_id(ioapic_idx))) {
1500 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1501 ioapic_idx, mpc_ioapic_id(ioapic_idx));
1502 for (i = 0; i < get_physical_broadcast(); i++)
1503 if (!physid_isset(i, phys_id_present_map))
1504 break;
1505 if (i >= get_physical_broadcast())
1506 panic("Max APIC ID exceeded!\n");
1507 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1508 i);
1509 physid_set(i, phys_id_present_map);
1510 ioapics[ioapic_idx].mp_config.apicid = i;
1511 } else {
1512 physid_mask_t tmp;
1513 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
1514 &tmp);
1515 apic_printk(APIC_VERBOSE, "Setting %d in the "
1516 "phys_id_present_map\n",
1517 mpc_ioapic_id(ioapic_idx));
1518 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1519 }
1520
1521
1522
1523
1524
1525 if (old_id != mpc_ioapic_id(ioapic_idx))
1526 for (i = 0; i < mp_irq_entries; i++)
1527 if (mp_irqs[i].dstapic == old_id)
1528 mp_irqs[i].dstapic
1529 = mpc_ioapic_id(ioapic_idx);
1530
1531
1532
1533
1534
1535 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
1536 continue;
1537
1538 apic_printk(APIC_VERBOSE, KERN_INFO
1539 "...changing IO-APIC physical APIC ID to %d ...",
1540 mpc_ioapic_id(ioapic_idx));
1541
1542 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
1543 raw_spin_lock_irqsave(&ioapic_lock, flags);
1544 io_apic_write(ioapic_idx, 0, reg_00.raw);
1545 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1546
1547
1548
1549
1550 raw_spin_lock_irqsave(&ioapic_lock, flags);
1551 reg_00.raw = io_apic_read(ioapic_idx, 0);
1552 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1553 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
1554 pr_cont("could not set ID!\n");
1555 else
1556 apic_printk(APIC_VERBOSE, " ok.\n");
1557 }
1558}
1559
1560void __init setup_ioapic_ids_from_mpc(void)
1561{
1562
1563 if (acpi_ioapic)
1564 return;
1565
1566
1567
1568
1569 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1570 || APIC_XAPIC(boot_cpu_apic_version))
1571 return;
1572 setup_ioapic_ids_from_mpc_nocheck();
1573}
1574#endif
1575
1576int no_timer_check __initdata;
1577
1578static int __init notimercheck(char *s)
1579{
1580 no_timer_check = 1;
1581 return 1;
1582}
1583__setup("no_timer_check", notimercheck);
1584
1585static void __init delay_with_tsc(void)
1586{
1587 unsigned long long start, now;
1588 unsigned long end = jiffies + 4;
1589
1590 start = rdtsc();
1591
1592
1593
1594
1595
1596
1597
1598 do {
1599 rep_nop();
1600 now = rdtsc();
1601 } while ((now - start) < 40000000000ULL / HZ &&
1602 time_before_eq(jiffies, end));
1603}
1604
1605static void __init delay_without_tsc(void)
1606{
1607 unsigned long end = jiffies + 4;
1608 int band = 1;
1609
1610
1611
1612
1613
1614
1615
1616
1617 do {
1618 __delay(((1U << band++) * 10000000UL) / HZ);
1619 } while (band < 12 && time_before_eq(jiffies, end));
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630static int __init timer_irq_works(void)
1631{
1632 unsigned long t1 = jiffies;
1633
1634 if (no_timer_check)
1635 return 1;
1636
1637 local_irq_enable();
1638 if (boot_cpu_has(X86_FEATURE_TSC))
1639 delay_with_tsc();
1640 else
1641 delay_without_tsc();
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 local_irq_disable();
1652
1653
1654 return time_after(jiffies, t1 + 4);
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679static unsigned int startup_ioapic_irq(struct irq_data *data)
1680{
1681 int was_pending = 0, irq = data->irq;
1682 unsigned long flags;
1683
1684 raw_spin_lock_irqsave(&ioapic_lock, flags);
1685 if (irq < nr_legacy_irqs()) {
1686 legacy_pic->mask(irq);
1687 if (legacy_pic->irq_pending(irq))
1688 was_pending = 1;
1689 }
1690 __unmask_ioapic(data->chip_data);
1691 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1692
1693 return was_pending;
1694}
1695
1696atomic_t irq_mis_count;
1697
1698#ifdef CONFIG_GENERIC_PENDING_IRQ
1699static bool io_apic_level_ack_pending(struct mp_chip_data *data)
1700{
1701 struct irq_pin_list *entry;
1702 unsigned long flags;
1703
1704 raw_spin_lock_irqsave(&ioapic_lock, flags);
1705 for_each_irq_pin(entry, data->irq_2_pin) {
1706 struct IO_APIC_route_entry e;
1707 int pin;
1708
1709 pin = entry->pin;
1710 e.w1 = io_apic_read(entry->apic, 0x10 + pin*2);
1711
1712 if (e.irr) {
1713 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1714 return true;
1715 }
1716 }
1717 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1718
1719 return false;
1720}
1721
1722static inline bool ioapic_prepare_move(struct irq_data *data)
1723{
1724
1725 if (unlikely(irqd_is_setaffinity_pending(data))) {
1726 if (!irqd_irq_masked(data))
1727 mask_ioapic_irq(data);
1728 return true;
1729 }
1730 return false;
1731}
1732
1733static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
1734{
1735 if (unlikely(moveit)) {
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 if (!io_apic_level_ack_pending(data->chip_data))
1763 irq_move_masked_irq(data);
1764
1765 if (!irqd_irq_masked(data))
1766 unmask_ioapic_irq(data);
1767 }
1768}
1769#else
1770static inline bool ioapic_prepare_move(struct irq_data *data)
1771{
1772 return false;
1773}
1774static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
1775{
1776}
1777#endif
1778
1779static void ioapic_ack_level(struct irq_data *irq_data)
1780{
1781 struct irq_cfg *cfg = irqd_cfg(irq_data);
1782 unsigned long v;
1783 bool moveit;
1784 int i;
1785
1786 irq_complete_move(cfg);
1787 moveit = ioapic_prepare_move(irq_data);
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 i = cfg->vector;
1822 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1823
1824
1825
1826
1827
1828 ack_APIC_irq();
1829
1830
1831
1832
1833
1834
1835
1836
1837 if (!(v & (1 << (i & 0x1f)))) {
1838 atomic_inc(&irq_mis_count);
1839 eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
1840 }
1841
1842 ioapic_finish_move(irq_data, moveit);
1843}
1844
1845static void ioapic_ir_ack_level(struct irq_data *irq_data)
1846{
1847 struct mp_chip_data *data = irq_data->chip_data;
1848
1849
1850
1851
1852
1853
1854
1855 apic_ack_irq(irq_data);
1856 eoi_ioapic_pin(data->entry.vector, data);
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875static void ioapic_setup_msg_from_msi(struct irq_data *irq_data,
1876 struct IO_APIC_route_entry *entry)
1877{
1878 struct msi_msg msg;
1879
1880
1881 irq_chip_compose_msi_msg(irq_data, &msg);
1882
1883
1884
1885
1886
1887
1888 entry->vector = msg.arch_data.vector;
1889
1890 entry->delivery_mode = msg.arch_data.delivery_mode;
1891
1892 entry->dest_mode_logical = msg.arch_addr_lo.dest_mode_logical;
1893
1894 entry->ir_format = msg.arch_addr_lo.dmar_format;
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 entry->ir_index_0_14 = msg.arch_addr_lo.dmar_index_0_14;
1906}
1907
1908static void ioapic_configure_entry(struct irq_data *irqd)
1909{
1910 struct mp_chip_data *mpd = irqd->chip_data;
1911 struct irq_pin_list *entry;
1912
1913 ioapic_setup_msg_from_msi(irqd, &mpd->entry);
1914
1915 for_each_irq_pin(entry, mpd->irq_2_pin)
1916 __ioapic_write_entry(entry->apic, entry->pin, mpd->entry);
1917}
1918
1919static int ioapic_set_affinity(struct irq_data *irq_data,
1920 const struct cpumask *mask, bool force)
1921{
1922 struct irq_data *parent = irq_data->parent_data;
1923 unsigned long flags;
1924 int ret;
1925
1926 ret = parent->chip->irq_set_affinity(parent, mask, force);
1927 raw_spin_lock_irqsave(&ioapic_lock, flags);
1928 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE)
1929 ioapic_configure_entry(irq_data);
1930 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1931
1932 return ret;
1933}
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949static int ioapic_irq_get_chip_state(struct irq_data *irqd,
1950 enum irqchip_irq_state which,
1951 bool *state)
1952{
1953 struct mp_chip_data *mcd = irqd->chip_data;
1954 struct IO_APIC_route_entry rentry;
1955 struct irq_pin_list *p;
1956
1957 if (which != IRQCHIP_STATE_ACTIVE)
1958 return -EINVAL;
1959
1960 *state = false;
1961 raw_spin_lock(&ioapic_lock);
1962 for_each_irq_pin(p, mcd->irq_2_pin) {
1963 rentry = __ioapic_read_entry(p->apic, p->pin);
1964
1965
1966
1967
1968
1969
1970 if (rentry.irr && rentry.is_level) {
1971 *state = true;
1972 break;
1973 }
1974 }
1975 raw_spin_unlock(&ioapic_lock);
1976 return 0;
1977}
1978
1979static struct irq_chip ioapic_chip __read_mostly = {
1980 .name = "IO-APIC",
1981 .irq_startup = startup_ioapic_irq,
1982 .irq_mask = mask_ioapic_irq,
1983 .irq_unmask = unmask_ioapic_irq,
1984 .irq_ack = irq_chip_ack_parent,
1985 .irq_eoi = ioapic_ack_level,
1986 .irq_set_affinity = ioapic_set_affinity,
1987 .irq_retrigger = irq_chip_retrigger_hierarchy,
1988 .irq_get_irqchip_state = ioapic_irq_get_chip_state,
1989 .flags = IRQCHIP_SKIP_SET_WAKE,
1990};
1991
1992static struct irq_chip ioapic_ir_chip __read_mostly = {
1993 .name = "IR-IO-APIC",
1994 .irq_startup = startup_ioapic_irq,
1995 .irq_mask = mask_ioapic_irq,
1996 .irq_unmask = unmask_ioapic_irq,
1997 .irq_ack = irq_chip_ack_parent,
1998 .irq_eoi = ioapic_ir_ack_level,
1999 .irq_set_affinity = ioapic_set_affinity,
2000 .irq_retrigger = irq_chip_retrigger_hierarchy,
2001 .irq_get_irqchip_state = ioapic_irq_get_chip_state,
2002 .flags = IRQCHIP_SKIP_SET_WAKE,
2003};
2004
2005static inline void init_IO_APIC_traps(void)
2006{
2007 struct irq_cfg *cfg;
2008 unsigned int irq;
2009
2010 for_each_active_irq(irq) {
2011 cfg = irq_cfg(irq);
2012 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2013
2014
2015
2016
2017
2018 if (irq < nr_legacy_irqs())
2019 legacy_pic->make_irq(irq);
2020 else
2021
2022 irq_set_chip(irq, &no_irq_chip);
2023 }
2024 }
2025}
2026
2027
2028
2029
2030
2031static void mask_lapic_irq(struct irq_data *data)
2032{
2033 unsigned long v;
2034
2035 v = apic_read(APIC_LVT0);
2036 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2037}
2038
2039static void unmask_lapic_irq(struct irq_data *data)
2040{
2041 unsigned long v;
2042
2043 v = apic_read(APIC_LVT0);
2044 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2045}
2046
2047static void ack_lapic_irq(struct irq_data *data)
2048{
2049 ack_APIC_irq();
2050}
2051
2052static struct irq_chip lapic_chip __read_mostly = {
2053 .name = "local-APIC",
2054 .irq_mask = mask_lapic_irq,
2055 .irq_unmask = unmask_lapic_irq,
2056 .irq_ack = ack_lapic_irq,
2057};
2058
2059static void lapic_register_intr(int irq)
2060{
2061 irq_clear_status_flags(irq, IRQ_LEVEL);
2062 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2063 "edge");
2064}
2065
2066
2067
2068
2069
2070
2071
2072
2073static inline void __init unlock_ExtINT_logic(void)
2074{
2075 int apic, pin, i;
2076 struct IO_APIC_route_entry entry0, entry1;
2077 unsigned char save_control, save_freq_select;
2078 u32 apic_id;
2079
2080 pin = find_isa_irq_pin(8, mp_INT);
2081 if (pin == -1) {
2082 WARN_ON_ONCE(1);
2083 return;
2084 }
2085 apic = find_isa_irq_apic(8, mp_INT);
2086 if (apic == -1) {
2087 WARN_ON_ONCE(1);
2088 return;
2089 }
2090
2091 entry0 = ioapic_read_entry(apic, pin);
2092 clear_IO_APIC_pin(apic, pin);
2093
2094 apic_id = hard_smp_processor_id();
2095 memset(&entry1, 0, sizeof(entry1));
2096
2097 entry1.dest_mode_logical = true;
2098 entry1.masked = false;
2099 entry1.destid_0_7 = apic_id & 0xFF;
2100 entry1.virt_destid_8_14 = apic_id >> 8;
2101 entry1.delivery_mode = APIC_DELIVERY_MODE_EXTINT;
2102 entry1.active_low = entry0.active_low;
2103 entry1.is_level = false;
2104 entry1.vector = 0;
2105
2106 ioapic_write_entry(apic, pin, entry1);
2107
2108 save_control = CMOS_READ(RTC_CONTROL);
2109 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2110 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2111 RTC_FREQ_SELECT);
2112 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2113
2114 i = 100;
2115 while (i-- > 0) {
2116 mdelay(10);
2117 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2118 i -= 10;
2119 }
2120
2121 CMOS_WRITE(save_control, RTC_CONTROL);
2122 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2123 clear_IO_APIC_pin(apic, pin);
2124
2125 ioapic_write_entry(apic, pin, entry0);
2126}
2127
2128static int disable_timer_pin_1 __initdata;
2129
2130static int __init disable_timer_pin_setup(char *arg)
2131{
2132 disable_timer_pin_1 = 1;
2133 return 0;
2134}
2135early_param("disable_timer_pin_1", disable_timer_pin_setup);
2136
2137static int mp_alloc_timer_irq(int ioapic, int pin)
2138{
2139 int irq = -1;
2140 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
2141
2142 if (domain) {
2143 struct irq_alloc_info info;
2144
2145 ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0);
2146 info.devid = mpc_ioapic_id(ioapic);
2147 info.ioapic.pin = pin;
2148 mutex_lock(&ioapic_mutex);
2149 irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info);
2150 mutex_unlock(&ioapic_mutex);
2151 }
2152
2153 return irq;
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164static inline void __init check_timer(void)
2165{
2166 struct irq_data *irq_data = irq_get_irq_data(0);
2167 struct mp_chip_data *data = irq_data->chip_data;
2168 struct irq_cfg *cfg = irqd_cfg(irq_data);
2169 int node = cpu_to_node(0);
2170 int apic1, pin1, apic2, pin2;
2171 int no_pin1 = 0;
2172
2173 if (!global_clock_event)
2174 return;
2175
2176 local_irq_disable();
2177
2178
2179
2180
2181 legacy_pic->mask(0);
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2193 legacy_pic->init(1);
2194
2195 pin1 = find_isa_irq_pin(0, mp_INT);
2196 apic1 = find_isa_irq_apic(0, mp_INT);
2197 pin2 = ioapic_i8259.pin;
2198 apic2 = ioapic_i8259.apic;
2199
2200 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2201 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2202 cfg->vector, apic1, pin1, apic2, pin2);
2203
2204
2205
2206
2207
2208
2209
2210
2211 if (pin1 == -1) {
2212 panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
2213 pin1 = pin2;
2214 apic1 = apic2;
2215 no_pin1 = 1;
2216 } else if (pin2 == -1) {
2217 pin2 = pin1;
2218 apic2 = apic1;
2219 }
2220
2221 if (pin1 != -1) {
2222
2223 if (no_pin1) {
2224 mp_alloc_timer_irq(apic1, pin1);
2225 } else {
2226
2227
2228
2229
2230
2231 int idx = find_irq_entry(apic1, pin1, mp_INT);
2232
2233 if (idx != -1 && irq_is_level(idx))
2234 unmask_ioapic_irq(irq_get_irq_data(0));
2235 }
2236 irq_domain_deactivate_irq(irq_data);
2237 irq_domain_activate_irq(irq_data, false);
2238 if (timer_irq_works()) {
2239 if (disable_timer_pin_1 > 0)
2240 clear_IO_APIC_pin(0, pin1);
2241 goto out;
2242 }
2243 panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
2244 clear_IO_APIC_pin(apic1, pin1);
2245 if (!no_pin1)
2246 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2247 "8254 timer not connected to IO-APIC\n");
2248
2249 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2250 "(IRQ0) through the 8259A ...\n");
2251 apic_printk(APIC_QUIET, KERN_INFO
2252 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2253
2254
2255
2256 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2257 irq_domain_deactivate_irq(irq_data);
2258 irq_domain_activate_irq(irq_data, false);
2259 legacy_pic->unmask(0);
2260 if (timer_irq_works()) {
2261 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2262 goto out;
2263 }
2264
2265
2266
2267 legacy_pic->mask(0);
2268 clear_IO_APIC_pin(apic2, pin2);
2269 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2270 }
2271
2272 apic_printk(APIC_QUIET, KERN_INFO
2273 "...trying to set up timer as Virtual Wire IRQ...\n");
2274
2275 lapic_register_intr(0);
2276 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);
2277 legacy_pic->unmask(0);
2278
2279 if (timer_irq_works()) {
2280 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2281 goto out;
2282 }
2283 legacy_pic->mask(0);
2284 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2285 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2286
2287 apic_printk(APIC_QUIET, KERN_INFO
2288 "...trying to set up timer as ExtINT IRQ...\n");
2289
2290 legacy_pic->init(0);
2291 legacy_pic->make_irq(0);
2292 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2293 legacy_pic->unmask(0);
2294
2295 unlock_ExtINT_logic();
2296
2297 if (timer_irq_works()) {
2298 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2299 goto out;
2300 }
2301 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2302 if (apic_is_x2apic_enabled())
2303 apic_printk(APIC_QUIET, KERN_INFO
2304 "Perhaps problem with the pre-enabled x2apic mode\n"
2305 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
2306 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2307 "report. Then try booting with the 'noapic' option.\n");
2308out:
2309 local_irq_enable();
2310}
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329#define PIC_IRQS (1UL << PIC_CASCADE_IR)
2330
2331static int mp_irqdomain_create(int ioapic)
2332{
2333 struct irq_domain *parent;
2334 int hwirqs = mp_ioapic_pin_count(ioapic);
2335 struct ioapic *ip = &ioapics[ioapic];
2336 struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
2337 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2338 struct fwnode_handle *fn;
2339 struct irq_fwspec fwspec;
2340
2341 if (cfg->type == IOAPIC_DOMAIN_INVALID)
2342 return 0;
2343
2344
2345 if (cfg->dev) {
2346 fn = of_node_to_fwnode(cfg->dev);
2347 } else {
2348 fn = irq_domain_alloc_named_id_fwnode("IO-APIC", mpc_ioapic_id(ioapic));
2349 if (!fn)
2350 return -ENOMEM;
2351 }
2352
2353 fwspec.fwnode = fn;
2354 fwspec.param_count = 1;
2355 fwspec.param[0] = mpc_ioapic_id(ioapic);
2356
2357 parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_ANY);
2358 if (!parent) {
2359 if (!cfg->dev)
2360 irq_domain_free_fwnode(fn);
2361 return -ENODEV;
2362 }
2363
2364 ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
2365 (void *)(long)ioapic);
2366
2367 if (!ip->irqdomain) {
2368
2369 if (!cfg->dev)
2370 irq_domain_free_fwnode(fn);
2371 return -ENOMEM;
2372 }
2373
2374 ip->irqdomain->parent = parent;
2375
2376 if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
2377 cfg->type == IOAPIC_DOMAIN_STRICT)
2378 ioapic_dynirq_base = max(ioapic_dynirq_base,
2379 gsi_cfg->gsi_end + 1);
2380
2381 return 0;
2382}
2383
2384static void ioapic_destroy_irqdomain(int idx)
2385{
2386 struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
2387 struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
2388
2389 if (ioapics[idx].irqdomain) {
2390 irq_domain_remove(ioapics[idx].irqdomain);
2391 if (!cfg->dev)
2392 irq_domain_free_fwnode(fn);
2393 ioapics[idx].irqdomain = NULL;
2394 }
2395}
2396
2397void __init setup_IO_APIC(void)
2398{
2399 int ioapic;
2400
2401 if (skip_ioapic_setup || !nr_ioapics)
2402 return;
2403
2404 io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
2405
2406 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2407 for_each_ioapic(ioapic)
2408 BUG_ON(mp_irqdomain_create(ioapic));
2409
2410
2411
2412
2413 x86_init.mpparse.setup_ioapic_ids();
2414
2415 sync_Arb_IDs();
2416 setup_IO_APIC_irqs();
2417 init_IO_APIC_traps();
2418 if (nr_legacy_irqs())
2419 check_timer();
2420
2421 ioapic_initialized = 1;
2422}
2423
2424static void resume_ioapic_id(int ioapic_idx)
2425{
2426 unsigned long flags;
2427 union IO_APIC_reg_00 reg_00;
2428
2429 raw_spin_lock_irqsave(&ioapic_lock, flags);
2430 reg_00.raw = io_apic_read(ioapic_idx, 0);
2431 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
2432 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2433 io_apic_write(ioapic_idx, 0, reg_00.raw);
2434 }
2435 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2436}
2437
2438static void ioapic_resume(void)
2439{
2440 int ioapic_idx;
2441
2442 for_each_ioapic_reverse(ioapic_idx)
2443 resume_ioapic_id(ioapic_idx);
2444
2445 restore_ioapic_entries();
2446}
2447
2448static struct syscore_ops ioapic_syscore_ops = {
2449 .suspend = save_ioapic_entries,
2450 .resume = ioapic_resume,
2451};
2452
2453static int __init ioapic_init_ops(void)
2454{
2455 register_syscore_ops(&ioapic_syscore_ops);
2456
2457 return 0;
2458}
2459
2460device_initcall(ioapic_init_ops);
2461
2462static int io_apic_get_redir_entries(int ioapic)
2463{
2464 union IO_APIC_reg_01 reg_01;
2465 unsigned long flags;
2466
2467 raw_spin_lock_irqsave(&ioapic_lock, flags);
2468 reg_01.raw = io_apic_read(ioapic, 1);
2469 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2470
2471
2472
2473
2474
2475 return reg_01.bits.entries + 1;
2476}
2477
2478unsigned int arch_dynirq_lower_bound(unsigned int from)
2479{
2480
2481
2482
2483
2484 if (!ioapic_initialized)
2485 return gsi_top;
2486
2487
2488
2489
2490 return ioapic_dynirq_base ? : from;
2491}
2492
2493#ifdef CONFIG_X86_32
2494static int io_apic_get_unique_id(int ioapic, int apic_id)
2495{
2496 union IO_APIC_reg_00 reg_00;
2497 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
2498 physid_mask_t tmp;
2499 unsigned long flags;
2500 int i = 0;
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511 if (physids_empty(apic_id_map))
2512 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
2513
2514 raw_spin_lock_irqsave(&ioapic_lock, flags);
2515 reg_00.raw = io_apic_read(ioapic, 0);
2516 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2517
2518 if (apic_id >= get_physical_broadcast()) {
2519 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
2520 "%d\n", ioapic, apic_id, reg_00.bits.ID);
2521 apic_id = reg_00.bits.ID;
2522 }
2523
2524
2525
2526
2527
2528 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
2529
2530 for (i = 0; i < get_physical_broadcast(); i++) {
2531 if (!apic->check_apicid_used(&apic_id_map, i))
2532 break;
2533 }
2534
2535 if (i == get_physical_broadcast())
2536 panic("Max apic_id exceeded!\n");
2537
2538 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
2539 "trying %d\n", ioapic, apic_id, i);
2540
2541 apic_id = i;
2542 }
2543
2544 apic->apicid_to_cpu_present(apic_id, &tmp);
2545 physids_or(apic_id_map, apic_id_map, tmp);
2546
2547 if (reg_00.bits.ID != apic_id) {
2548 reg_00.bits.ID = apic_id;
2549
2550 raw_spin_lock_irqsave(&ioapic_lock, flags);
2551 io_apic_write(ioapic, 0, reg_00.raw);
2552 reg_00.raw = io_apic_read(ioapic, 0);
2553 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2554
2555
2556 if (reg_00.bits.ID != apic_id) {
2557 pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
2558 ioapic);
2559 return -1;
2560 }
2561 }
2562
2563 apic_printk(APIC_VERBOSE, KERN_INFO
2564 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
2565
2566 return apic_id;
2567}
2568
2569static u8 io_apic_unique_id(int idx, u8 id)
2570{
2571 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
2572 !APIC_XAPIC(boot_cpu_apic_version))
2573 return io_apic_get_unique_id(idx, id);
2574 else
2575 return id;
2576}
2577#else
2578static u8 io_apic_unique_id(int idx, u8 id)
2579{
2580 union IO_APIC_reg_00 reg_00;
2581 DECLARE_BITMAP(used, 256);
2582 unsigned long flags;
2583 u8 new_id;
2584 int i;
2585
2586 bitmap_zero(used, 256);
2587 for_each_ioapic(i)
2588 __set_bit(mpc_ioapic_id(i), used);
2589
2590
2591 if (!test_bit(id, used))
2592 return id;
2593
2594
2595
2596
2597
2598 raw_spin_lock_irqsave(&ioapic_lock, flags);
2599 reg_00.raw = io_apic_read(idx, 0);
2600 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2601 new_id = reg_00.bits.ID;
2602 if (!test_bit(new_id, used)) {
2603 apic_printk(APIC_VERBOSE, KERN_INFO
2604 "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
2605 idx, new_id, id);
2606 return new_id;
2607 }
2608
2609
2610
2611
2612 new_id = find_first_zero_bit(used, 256);
2613 reg_00.bits.ID = new_id;
2614 raw_spin_lock_irqsave(&ioapic_lock, flags);
2615 io_apic_write(idx, 0, reg_00.raw);
2616 reg_00.raw = io_apic_read(idx, 0);
2617 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2618
2619 BUG_ON(reg_00.bits.ID != new_id);
2620
2621 return new_id;
2622}
2623#endif
2624
2625static int io_apic_get_version(int ioapic)
2626{
2627 union IO_APIC_reg_01 reg_01;
2628 unsigned long flags;
2629
2630 raw_spin_lock_irqsave(&ioapic_lock, flags);
2631 reg_01.raw = io_apic_read(ioapic, 1);
2632 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2633
2634 return reg_01.bits.version;
2635}
2636
2637
2638
2639
2640
2641#define IOAPIC_RESOURCE_NAME_SIZE 11
2642
2643static struct resource *ioapic_resources;
2644
2645static struct resource * __init ioapic_setup_resources(void)
2646{
2647 unsigned long n;
2648 struct resource *res;
2649 char *mem;
2650 int i;
2651
2652 if (nr_ioapics == 0)
2653 return NULL;
2654
2655 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
2656 n *= nr_ioapics;
2657
2658 mem = memblock_alloc(n, SMP_CACHE_BYTES);
2659 if (!mem)
2660 panic("%s: Failed to allocate %lu bytes\n", __func__, n);
2661 res = (void *)mem;
2662
2663 mem += sizeof(struct resource) * nr_ioapics;
2664
2665 for_each_ioapic(i) {
2666 res[i].name = mem;
2667 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2668 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
2669 mem += IOAPIC_RESOURCE_NAME_SIZE;
2670 ioapics[i].iomem_res = &res[i];
2671 }
2672
2673 ioapic_resources = res;
2674
2675 return res;
2676}
2677
2678void __init io_apic_init_mappings(void)
2679{
2680 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
2681 struct resource *ioapic_res;
2682 int i;
2683
2684 ioapic_res = ioapic_setup_resources();
2685 for_each_ioapic(i) {
2686 if (smp_found_config) {
2687 ioapic_phys = mpc_ioapic_addr(i);
2688#ifdef CONFIG_X86_32
2689 if (!ioapic_phys) {
2690 printk(KERN_ERR
2691 "WARNING: bogus zero IO-APIC "
2692 "address found in MPTABLE, "
2693 "disabling IO/APIC support!\n");
2694 smp_found_config = 0;
2695 skip_ioapic_setup = 1;
2696 goto fake_ioapic_page;
2697 }
2698#endif
2699 } else {
2700#ifdef CONFIG_X86_32
2701fake_ioapic_page:
2702#endif
2703 ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
2704 PAGE_SIZE);
2705 if (!ioapic_phys)
2706 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
2707 __func__, PAGE_SIZE, PAGE_SIZE);
2708 ioapic_phys = __pa(ioapic_phys);
2709 }
2710 set_fixmap_nocache(idx, ioapic_phys);
2711 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
2712 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
2713 ioapic_phys);
2714 idx++;
2715
2716 ioapic_res->start = ioapic_phys;
2717 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
2718 ioapic_res++;
2719 }
2720}
2721
2722void __init ioapic_insert_resources(void)
2723{
2724 int i;
2725 struct resource *r = ioapic_resources;
2726
2727 if (!r) {
2728 if (nr_ioapics > 0)
2729 printk(KERN_ERR
2730 "IO APIC resources couldn't be allocated.\n");
2731 return;
2732 }
2733
2734 for_each_ioapic(i) {
2735 insert_resource(&iomem_resource, r);
2736 r++;
2737 }
2738}
2739
2740int mp_find_ioapic(u32 gsi)
2741{
2742 int i;
2743
2744 if (nr_ioapics == 0)
2745 return -1;
2746
2747
2748 for_each_ioapic(i) {
2749 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i);
2750 if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end)
2751 return i;
2752 }
2753
2754 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
2755 return -1;
2756}
2757
2758int mp_find_ioapic_pin(int ioapic, u32 gsi)
2759{
2760 struct mp_ioapic_gsi *gsi_cfg;
2761
2762 if (WARN_ON(ioapic < 0))
2763 return -1;
2764
2765 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2766 if (WARN_ON(gsi > gsi_cfg->gsi_end))
2767 return -1;
2768
2769 return gsi - gsi_cfg->gsi_base;
2770}
2771
2772static int bad_ioapic_register(int idx)
2773{
2774 union IO_APIC_reg_00 reg_00;
2775 union IO_APIC_reg_01 reg_01;
2776 union IO_APIC_reg_02 reg_02;
2777
2778 reg_00.raw = io_apic_read(idx, 0);
2779 reg_01.raw = io_apic_read(idx, 1);
2780 reg_02.raw = io_apic_read(idx, 2);
2781
2782 if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
2783 pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
2784 mpc_ioapic_addr(idx));
2785 return 1;
2786 }
2787
2788 return 0;
2789}
2790
2791static int find_free_ioapic_entry(void)
2792{
2793 int idx;
2794
2795 for (idx = 0; idx < MAX_IO_APICS; idx++)
2796 if (ioapics[idx].nr_registers == 0)
2797 return idx;
2798
2799 return MAX_IO_APICS;
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809int mp_register_ioapic(int id, u32 address, u32 gsi_base,
2810 struct ioapic_domain_cfg *cfg)
2811{
2812 bool hotplug = !!ioapic_initialized;
2813 struct mp_ioapic_gsi *gsi_cfg;
2814 int idx, ioapic, entries;
2815 u32 gsi_end;
2816
2817 if (!address) {
2818 pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
2819 return -EINVAL;
2820 }
2821 for_each_ioapic(ioapic)
2822 if (ioapics[ioapic].mp_config.apicaddr == address) {
2823 pr_warn("address 0x%x conflicts with IOAPIC%d\n",
2824 address, ioapic);
2825 return -EEXIST;
2826 }
2827
2828 idx = find_free_ioapic_entry();
2829 if (idx >= MAX_IO_APICS) {
2830 pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
2831 MAX_IO_APICS, idx);
2832 return -ENOSPC;
2833 }
2834
2835 ioapics[idx].mp_config.type = MP_IOAPIC;
2836 ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
2837 ioapics[idx].mp_config.apicaddr = address;
2838
2839 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
2840 if (bad_ioapic_register(idx)) {
2841 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2842 return -ENODEV;
2843 }
2844
2845 ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id);
2846 ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
2847
2848
2849
2850
2851
2852 entries = io_apic_get_redir_entries(idx);
2853 gsi_end = gsi_base + entries - 1;
2854 for_each_ioapic(ioapic) {
2855 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2856 if ((gsi_base >= gsi_cfg->gsi_base &&
2857 gsi_base <= gsi_cfg->gsi_end) ||
2858 (gsi_end >= gsi_cfg->gsi_base &&
2859 gsi_end <= gsi_cfg->gsi_end)) {
2860 pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
2861 gsi_base, gsi_end,
2862 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
2863 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2864 return -ENOSPC;
2865 }
2866 }
2867 gsi_cfg = mp_ioapic_gsi_routing(idx);
2868 gsi_cfg->gsi_base = gsi_base;
2869 gsi_cfg->gsi_end = gsi_end;
2870
2871 ioapics[idx].irqdomain = NULL;
2872 ioapics[idx].irqdomain_cfg = *cfg;
2873
2874
2875
2876
2877
2878
2879 if (hotplug) {
2880 if (mp_irqdomain_create(idx)) {
2881 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
2882 return -ENOMEM;
2883 }
2884 alloc_ioapic_saved_registers(idx);
2885 }
2886
2887 if (gsi_cfg->gsi_end >= gsi_top)
2888 gsi_top = gsi_cfg->gsi_end + 1;
2889 if (nr_ioapics <= idx)
2890 nr_ioapics = idx + 1;
2891
2892
2893 ioapics[idx].nr_registers = entries;
2894
2895 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
2896 idx, mpc_ioapic_id(idx),
2897 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
2898 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
2899
2900 return 0;
2901}
2902
2903int mp_unregister_ioapic(u32 gsi_base)
2904{
2905 int ioapic, pin;
2906 int found = 0;
2907
2908 for_each_ioapic(ioapic)
2909 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
2910 found = 1;
2911 break;
2912 }
2913 if (!found) {
2914 pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
2915 return -ENODEV;
2916 }
2917
2918 for_each_pin(ioapic, pin) {
2919 u32 gsi = mp_pin_to_gsi(ioapic, pin);
2920 int irq = mp_map_gsi_to_irq(gsi, 0, NULL);
2921 struct mp_chip_data *data;
2922
2923 if (irq >= 0) {
2924 data = irq_get_chip_data(irq);
2925 if (data && data->count) {
2926 pr_warn("pin%d on IOAPIC%d is still in use.\n",
2927 pin, ioapic);
2928 return -EBUSY;
2929 }
2930 }
2931 }
2932
2933
2934 ioapics[ioapic].nr_registers = 0;
2935 ioapic_destroy_irqdomain(ioapic);
2936 free_ioapic_saved_registers(ioapic);
2937 if (ioapics[ioapic].iomem_res)
2938 release_resource(ioapics[ioapic].iomem_res);
2939 clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic);
2940 memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic]));
2941
2942 return 0;
2943}
2944
2945int mp_ioapic_registered(u32 gsi_base)
2946{
2947 int ioapic;
2948
2949 for_each_ioapic(ioapic)
2950 if (ioapics[ioapic].gsi_config.gsi_base == gsi_base)
2951 return 1;
2952
2953 return 0;
2954}
2955
2956static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data,
2957 struct irq_alloc_info *info)
2958{
2959 if (info && info->ioapic.valid) {
2960 data->is_level = info->ioapic.is_level;
2961 data->active_low = info->ioapic.active_low;
2962 } else if (__acpi_get_override_irq(gsi, &data->is_level,
2963 &data->active_low) < 0) {
2964
2965 data->is_level = true;
2966 data->active_low = true;
2967 }
2968}
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982static void mp_preconfigure_entry(struct mp_chip_data *data)
2983{
2984 struct IO_APIC_route_entry *entry = &data->entry;
2985
2986 memset(entry, 0, sizeof(*entry));
2987 entry->is_level = data->is_level;
2988 entry->active_low = data->active_low;
2989
2990
2991
2992
2993 entry->masked = data->is_level;
2994}
2995
2996int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
2997 unsigned int nr_irqs, void *arg)
2998{
2999 struct irq_alloc_info *info = arg;
3000 struct mp_chip_data *data;
3001 struct irq_data *irq_data;
3002 int ret, ioapic, pin;
3003 unsigned long flags;
3004
3005 if (!info || nr_irqs > 1)
3006 return -EINVAL;
3007 irq_data = irq_domain_get_irq_data(domain, virq);
3008 if (!irq_data)
3009 return -EINVAL;
3010
3011 ioapic = mp_irqdomain_ioapic_idx(domain);
3012 pin = info->ioapic.pin;
3013 if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0)
3014 return -EEXIST;
3015
3016 data = kzalloc(sizeof(*data), GFP_KERNEL);
3017 if (!data)
3018 return -ENOMEM;
3019
3020 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
3021 if (ret < 0) {
3022 kfree(data);
3023 return ret;
3024 }
3025
3026 INIT_LIST_HEAD(&data->irq_2_pin);
3027 irq_data->hwirq = info->ioapic.pin;
3028 irq_data->chip = (domain->parent == x86_vector_domain) ?
3029 &ioapic_chip : &ioapic_ir_chip;
3030 irq_data->chip_data = data;
3031 mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info);
3032
3033 add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
3034
3035 mp_preconfigure_entry(data);
3036 mp_register_handler(virq, data->is_level);
3037
3038 local_irq_save(flags);
3039 if (virq < nr_legacy_irqs())
3040 legacy_pic->mask(virq);
3041 local_irq_restore(flags);
3042
3043 apic_printk(APIC_VERBOSE, KERN_DEBUG
3044 "IOAPIC[%d]: Preconfigured routing entry (%d-%d -> IRQ %d Level:%i ActiveLow:%i)\n",
3045 ioapic, mpc_ioapic_id(ioapic), pin, virq,
3046 data->is_level, data->active_low);
3047 return 0;
3048}
3049
3050void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
3051 unsigned int nr_irqs)
3052{
3053 struct irq_data *irq_data;
3054 struct mp_chip_data *data;
3055
3056 BUG_ON(nr_irqs != 1);
3057 irq_data = irq_domain_get_irq_data(domain, virq);
3058 if (irq_data && irq_data->chip_data) {
3059 data = irq_data->chip_data;
3060 __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain),
3061 (int)irq_data->hwirq);
3062 WARN_ON(!list_empty(&data->irq_2_pin));
3063 kfree(irq_data->chip_data);
3064 }
3065 irq_domain_free_irqs_top(domain, virq, nr_irqs);
3066}
3067
3068int mp_irqdomain_activate(struct irq_domain *domain,
3069 struct irq_data *irq_data, bool reserve)
3070{
3071 unsigned long flags;
3072
3073 raw_spin_lock_irqsave(&ioapic_lock, flags);
3074 ioapic_configure_entry(irq_data);
3075 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3076 return 0;
3077}
3078
3079void mp_irqdomain_deactivate(struct irq_domain *domain,
3080 struct irq_data *irq_data)
3081{
3082
3083 ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain),
3084 (int)irq_data->hwirq);
3085}
3086
3087int mp_irqdomain_ioapic_idx(struct irq_domain *domain)
3088{
3089 return (int)(long)domain->host_data;
3090}
3091
3092const struct irq_domain_ops mp_ioapic_irqdomain_ops = {
3093 .alloc = mp_irqdomain_alloc,
3094 .free = mp_irqdomain_free,
3095 .activate = mp_irqdomain_activate,
3096 .deactivate = mp_irqdomain_deactivate,
3097};
3098