1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/mm.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
28#include <linux/pci.h>
29#include <linux/mc146818rtc.h>
30#include <linux/compiler.h>
31#include <linux/acpi.h>
32#include <linux/module.h>
33#include <linux/syscore_ops.h>
34#include <linux/msi.h>
35#include <linux/htirq.h>
36#include <linux/freezer.h>
37#include <linux/kthread.h>
38#include <linux/jiffies.h>
39#include <linux/slab.h>
40#ifdef CONFIG_ACPI
41#include <acpi/acpi_bus.h>
42#endif
43#include <linux/bootmem.h>
44#include <linux/dmar.h>
45#include <linux/hpet.h>
46
47#include <asm/idle.h>
48#include <asm/io.h>
49#include <asm/smp.h>
50#include <asm/cpu.h>
51#include <asm/desc.h>
52#include <asm/proto.h>
53#include <asm/acpi.h>
54#include <asm/dma.h>
55#include <asm/timer.h>
56#include <asm/i8259.h>
57#include <asm/msidef.h>
58#include <asm/hypertransport.h>
59#include <asm/setup.h>
60#include <asm/irq_remapping.h>
61#include <asm/hpet.h>
62#include <asm/hw_irq.h>
63
64#include <asm/apic.h>
65
66#define __apicdebuginit(type) static type __init
67
68#define for_each_irq_pin(entry, head) \
69 for (entry = head; entry; entry = entry->next)
70
71#ifdef CONFIG_IRQ_REMAP
72static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
73static inline bool irq_remapped(struct irq_cfg *cfg)
74{
75 return cfg->irq_2_iommu.iommu != NULL;
76}
77#else
78static inline bool irq_remapped(struct irq_cfg *cfg)
79{
80 return false;
81}
82static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
83{
84}
85#endif
86
87
88
89
90
91int sis_apic_bug = -1;
92
93static DEFINE_RAW_SPINLOCK(ioapic_lock);
94static DEFINE_RAW_SPINLOCK(vector_lock);
95
96static struct ioapic {
97
98
99
100 int nr_registers;
101
102
103
104 struct IO_APIC_route_entry *saved_registers;
105
106 struct mpc_ioapic mp_config;
107
108 struct mp_ioapic_gsi gsi_config;
109 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
110} ioapics[MAX_IO_APICS];
111
112#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
113
114int mpc_ioapic_id(int ioapic_idx)
115{
116 return ioapics[ioapic_idx].mp_config.apicid;
117}
118
119unsigned int mpc_ioapic_addr(int ioapic_idx)
120{
121 return ioapics[ioapic_idx].mp_config.apicaddr;
122}
123
124struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
125{
126 return &ioapics[ioapic_idx].gsi_config;
127}
128
129int nr_ioapics;
130
131
132u32 gsi_top;
133
134
135struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
136
137
138int mp_irq_entries;
139
140
141static int nr_irqs_gsi = NR_IRQS_LEGACY;
142
143#ifdef CONFIG_EISA
144int mp_bus_id_to_type[MAX_MP_BUSSES];
145#endif
146
147DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
148
149int skip_ioapic_setup;
150
151
152
153
154void disable_ioapic_support(void)
155{
156#ifdef CONFIG_PCI
157 noioapicquirk = 1;
158 noioapicreroute = -1;
159#endif
160 skip_ioapic_setup = 1;
161}
162
163static int __init parse_noapic(char *str)
164{
165
166 disable_ioapic_support();
167 return 0;
168}
169early_param("noapic", parse_noapic);
170
171static int io_apic_setup_irq_pin(unsigned int irq, int node,
172 struct io_apic_irq_attr *attr);
173
174
175void mp_save_irq(struct mpc_intsrc *m)
176{
177 int i;
178
179 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
180 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
181 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
182 m->srcbusirq, m->dstapic, m->dstirq);
183
184 for (i = 0; i < mp_irq_entries; i++) {
185 if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
186 return;
187 }
188
189 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
190 if (++mp_irq_entries == MAX_IRQ_SOURCES)
191 panic("Max # of irq sources exceeded!!\n");
192}
193
194struct irq_pin_list {
195 int apic, pin;
196 struct irq_pin_list *next;
197};
198
199static struct irq_pin_list *alloc_irq_pin_list(int node)
200{
201 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
202}
203
204
205
206static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
207
208int __init arch_early_irq_init(void)
209{
210 struct irq_cfg *cfg;
211 int count, node, i;
212
213 if (!legacy_pic->nr_legacy_irqs)
214 io_apic_irqs = ~0UL;
215
216 for (i = 0; i < nr_ioapics; i++) {
217 ioapics[i].saved_registers =
218 kzalloc(sizeof(struct IO_APIC_route_entry) *
219 ioapics[i].nr_registers, GFP_KERNEL);
220 if (!ioapics[i].saved_registers)
221 pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
222 }
223
224 cfg = irq_cfgx;
225 count = ARRAY_SIZE(irq_cfgx);
226 node = cpu_to_node(0);
227
228
229 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);
230
231 for (i = 0; i < count; i++) {
232 irq_set_chip_data(i, &cfg[i]);
233 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node);
234 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
235
236
237
238
239 if (i < legacy_pic->nr_legacy_irqs) {
240 cfg[i].vector = IRQ0_VECTOR + i;
241 cpumask_set_cpu(0, cfg[i].domain);
242 }
243 }
244
245 return 0;
246}
247
248static struct irq_cfg *irq_cfg(unsigned int irq)
249{
250 return irq_get_chip_data(irq);
251}
252
253static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
254{
255 struct irq_cfg *cfg;
256
257 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
258 if (!cfg)
259 return NULL;
260 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
261 goto out_cfg;
262 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
263 goto out_domain;
264 return cfg;
265out_domain:
266 free_cpumask_var(cfg->domain);
267out_cfg:
268 kfree(cfg);
269 return NULL;
270}
271
272static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
273{
274 if (!cfg)
275 return;
276 irq_set_chip_data(at, NULL);
277 free_cpumask_var(cfg->domain);
278 free_cpumask_var(cfg->old_domain);
279 kfree(cfg);
280}
281
282static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
283{
284 int res = irq_alloc_desc_at(at, node);
285 struct irq_cfg *cfg;
286
287 if (res < 0) {
288 if (res != -EEXIST)
289 return NULL;
290 cfg = irq_get_chip_data(at);
291 if (cfg)
292 return cfg;
293 }
294
295 cfg = alloc_irq_cfg(at, node);
296 if (cfg)
297 irq_set_chip_data(at, cfg);
298 else
299 irq_free_desc(at);
300 return cfg;
301}
302
303static int alloc_irq_from(unsigned int from, int node)
304{
305 return irq_alloc_desc_from(from, node);
306}
307
308static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
309{
310 free_irq_cfg(at, cfg);
311 irq_free_desc(at);
312}
313
314
315struct io_apic {
316 unsigned int index;
317 unsigned int unused[3];
318 unsigned int data;
319 unsigned int unused2[11];
320 unsigned int eoi;
321};
322
323static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
324{
325 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
326 + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
327}
328
329static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
330{
331 struct io_apic __iomem *io_apic = io_apic_base(apic);
332 writel(vector, &io_apic->eoi);
333}
334
335unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
336{
337 struct io_apic __iomem *io_apic = io_apic_base(apic);
338 writel(reg, &io_apic->index);
339 return readl(&io_apic->data);
340}
341
342void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
343{
344 struct io_apic __iomem *io_apic = io_apic_base(apic);
345
346 writel(reg, &io_apic->index);
347 writel(value, &io_apic->data);
348}
349
350
351
352
353
354
355
356void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
357{
358 struct io_apic __iomem *io_apic = io_apic_base(apic);
359
360 if (sis_apic_bug)
361 writel(reg, &io_apic->index);
362 writel(value, &io_apic->data);
363}
364
365union entry_union {
366 struct { u32 w1, w2; };
367 struct IO_APIC_route_entry entry;
368};
369
370static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
371{
372 union entry_union eu;
373
374 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
375 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
376
377 return eu.entry;
378}
379
380static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
381{
382 union entry_union eu;
383 unsigned long flags;
384
385 raw_spin_lock_irqsave(&ioapic_lock, flags);
386 eu.entry = __ioapic_read_entry(apic, pin);
387 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
388
389 return eu.entry;
390}
391
392
393
394
395
396
397
398static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
399{
400 union entry_union eu = {{0, 0}};
401
402 eu.entry = e;
403 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
404 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
405}
406
407static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
408{
409 unsigned long flags;
410
411 raw_spin_lock_irqsave(&ioapic_lock, flags);
412 __ioapic_write_entry(apic, pin, e);
413 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
414}
415
416
417
418
419
420
421static void ioapic_mask_entry(int apic, int pin)
422{
423 unsigned long flags;
424 union entry_union eu = { .entry.mask = 1 };
425
426 raw_spin_lock_irqsave(&ioapic_lock, flags);
427 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
428 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
429 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
430}
431
432
433
434
435
436
437static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
438{
439 struct irq_pin_list **last, *entry;
440
441
442 last = &cfg->irq_2_pin;
443 for_each_irq_pin(entry, cfg->irq_2_pin) {
444 if (entry->apic == apic && entry->pin == pin)
445 return 0;
446 last = &entry->next;
447 }
448
449 entry = alloc_irq_pin_list(node);
450 if (!entry) {
451 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
452 node, apic, pin);
453 return -ENOMEM;
454 }
455 entry->apic = apic;
456 entry->pin = pin;
457
458 *last = entry;
459 return 0;
460}
461
462static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
463{
464 if (__add_pin_to_irq_node(cfg, node, apic, pin))
465 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
466}
467
468
469
470
471static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
472 int oldapic, int oldpin,
473 int newapic, int newpin)
474{
475 struct irq_pin_list *entry;
476
477 for_each_irq_pin(entry, cfg->irq_2_pin) {
478 if (entry->apic == oldapic && entry->pin == oldpin) {
479 entry->apic = newapic;
480 entry->pin = newpin;
481
482 return;
483 }
484 }
485
486
487 add_pin_to_irq_node(cfg, node, newapic, newpin);
488}
489
490static void __io_apic_modify_irq(struct irq_pin_list *entry,
491 int mask_and, int mask_or,
492 void (*final)(struct irq_pin_list *entry))
493{
494 unsigned int reg, pin;
495
496 pin = entry->pin;
497 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
498 reg &= mask_and;
499 reg |= mask_or;
500 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
501 if (final)
502 final(entry);
503}
504
505static void io_apic_modify_irq(struct irq_cfg *cfg,
506 int mask_and, int mask_or,
507 void (*final)(struct irq_pin_list *entry))
508{
509 struct irq_pin_list *entry;
510
511 for_each_irq_pin(entry, cfg->irq_2_pin)
512 __io_apic_modify_irq(entry, mask_and, mask_or, final);
513}
514
515static void io_apic_sync(struct irq_pin_list *entry)
516{
517
518
519
520
521 struct io_apic __iomem *io_apic;
522
523 io_apic = io_apic_base(entry->apic);
524 readl(&io_apic->data);
525}
526
527static void mask_ioapic(struct irq_cfg *cfg)
528{
529 unsigned long flags;
530
531 raw_spin_lock_irqsave(&ioapic_lock, flags);
532 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
533 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
534}
535
536static void mask_ioapic_irq(struct irq_data *data)
537{
538 mask_ioapic(data->chip_data);
539}
540
541static void __unmask_ioapic(struct irq_cfg *cfg)
542{
543 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
544}
545
546static void unmask_ioapic(struct irq_cfg *cfg)
547{
548 unsigned long flags;
549
550 raw_spin_lock_irqsave(&ioapic_lock, flags);
551 __unmask_ioapic(cfg);
552 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
553}
554
555static void unmask_ioapic_irq(struct irq_data *data)
556{
557 unmask_ioapic(data->chip_data);
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
577{
578 if (mpc_ioapic_ver(apic) >= 0x20) {
579
580
581
582
583
584
585 if (cfg && irq_remapped(cfg))
586 io_apic_eoi(apic, pin);
587 else
588 io_apic_eoi(apic, vector);
589 } else {
590 struct IO_APIC_route_entry entry, entry1;
591
592 entry = entry1 = __ioapic_read_entry(apic, pin);
593
594
595
596
597 entry1.mask = 1;
598 entry1.trigger = IOAPIC_EDGE;
599
600 __ioapic_write_entry(apic, pin, entry1);
601
602
603
604
605 __ioapic_write_entry(apic, pin, entry);
606 }
607}
608
609static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
610{
611 struct irq_pin_list *entry;
612 unsigned long flags;
613
614 raw_spin_lock_irqsave(&ioapic_lock, flags);
615 for_each_irq_pin(entry, cfg->irq_2_pin)
616 __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg);
617 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
618}
619
620static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
621{
622 struct IO_APIC_route_entry entry;
623
624
625 entry = ioapic_read_entry(apic, pin);
626 if (entry.delivery_mode == dest_SMI)
627 return;
628
629
630
631
632
633 if (!entry.mask) {
634 entry.mask = 1;
635 ioapic_write_entry(apic, pin, entry);
636 entry = ioapic_read_entry(apic, pin);
637 }
638
639 if (entry.irr) {
640 unsigned long flags;
641
642
643
644
645
646
647 if (!entry.trigger) {
648 entry.trigger = IOAPIC_LEVEL;
649 ioapic_write_entry(apic, pin, entry);
650 }
651
652 raw_spin_lock_irqsave(&ioapic_lock, flags);
653 __eoi_ioapic_pin(apic, pin, entry.vector, NULL);
654 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
655 }
656
657
658
659
660
661 ioapic_mask_entry(apic, pin);
662 entry = ioapic_read_entry(apic, pin);
663 if (entry.irr)
664 pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
665 mpc_ioapic_id(apic), pin);
666}
667
668static void clear_IO_APIC (void)
669{
670 int apic, pin;
671
672 for (apic = 0; apic < nr_ioapics; apic++)
673 for (pin = 0; pin < ioapics[apic].nr_registers; pin++)
674 clear_IO_APIC_pin(apic, pin);
675}
676
677#ifdef CONFIG_X86_32
678
679
680
681
682
683#define MAX_PIRQS 8
684static int pirq_entries[MAX_PIRQS] = {
685 [0 ... MAX_PIRQS - 1] = -1
686};
687
688static int __init ioapic_pirq_setup(char *str)
689{
690 int i, max;
691 int ints[MAX_PIRQS+1];
692
693 get_options(str, ARRAY_SIZE(ints), ints);
694
695 apic_printk(APIC_VERBOSE, KERN_INFO
696 "PIRQ redirection, working around broken MP-BIOS.\n");
697 max = MAX_PIRQS;
698 if (ints[0] < MAX_PIRQS)
699 max = ints[0];
700
701 for (i = 0; i < max; i++) {
702 apic_printk(APIC_VERBOSE, KERN_DEBUG
703 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
704
705
706
707 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
708 }
709 return 1;
710}
711
712__setup("pirq=", ioapic_pirq_setup);
713#endif
714
715
716
717
718int save_ioapic_entries(void)
719{
720 int apic, pin;
721 int err = 0;
722
723 for (apic = 0; apic < nr_ioapics; apic++) {
724 if (!ioapics[apic].saved_registers) {
725 err = -ENOMEM;
726 continue;
727 }
728
729 for (pin = 0; pin < ioapics[apic].nr_registers; pin++)
730 ioapics[apic].saved_registers[pin] =
731 ioapic_read_entry(apic, pin);
732 }
733
734 return err;
735}
736
737
738
739
740void mask_ioapic_entries(void)
741{
742 int apic, pin;
743
744 for (apic = 0; apic < nr_ioapics; apic++) {
745 if (!ioapics[apic].saved_registers)
746 continue;
747
748 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) {
749 struct IO_APIC_route_entry entry;
750
751 entry = ioapics[apic].saved_registers[pin];
752 if (!entry.mask) {
753 entry.mask = 1;
754 ioapic_write_entry(apic, pin, entry);
755 }
756 }
757 }
758}
759
760
761
762
763int restore_ioapic_entries(void)
764{
765 int apic, pin;
766
767 for (apic = 0; apic < nr_ioapics; apic++) {
768 if (!ioapics[apic].saved_registers)
769 continue;
770
771 for (pin = 0; pin < ioapics[apic].nr_registers; pin++)
772 ioapic_write_entry(apic, pin,
773 ioapics[apic].saved_registers[pin]);
774 }
775 return 0;
776}
777
778
779
780
781static int find_irq_entry(int ioapic_idx, int pin, int type)
782{
783 int i;
784
785 for (i = 0; i < mp_irq_entries; i++)
786 if (mp_irqs[i].irqtype == type &&
787 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
788 mp_irqs[i].dstapic == MP_APIC_ALL) &&
789 mp_irqs[i].dstirq == pin)
790 return i;
791
792 return -1;
793}
794
795
796
797
798static int __init find_isa_irq_pin(int irq, int type)
799{
800 int i;
801
802 for (i = 0; i < mp_irq_entries; i++) {
803 int lbus = mp_irqs[i].srcbus;
804
805 if (test_bit(lbus, mp_bus_not_pci) &&
806 (mp_irqs[i].irqtype == type) &&
807 (mp_irqs[i].srcbusirq == irq))
808
809 return mp_irqs[i].dstirq;
810 }
811 return -1;
812}
813
814static int __init find_isa_irq_apic(int irq, int type)
815{
816 int i;
817
818 for (i = 0; i < mp_irq_entries; i++) {
819 int lbus = mp_irqs[i].srcbus;
820
821 if (test_bit(lbus, mp_bus_not_pci) &&
822 (mp_irqs[i].irqtype == type) &&
823 (mp_irqs[i].srcbusirq == irq))
824 break;
825 }
826
827 if (i < mp_irq_entries) {
828 int ioapic_idx;
829
830 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
831 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
832 return ioapic_idx;
833 }
834
835 return -1;
836}
837
838#ifdef CONFIG_EISA
839
840
841
842static int EISA_ELCR(unsigned int irq)
843{
844 if (irq < legacy_pic->nr_legacy_irqs) {
845 unsigned int port = 0x4d0 + (irq >> 3);
846 return (inb(port) >> (irq & 7)) & 1;
847 }
848 apic_printk(APIC_VERBOSE, KERN_INFO
849 "Broken MPtable reports ISA irq %d\n", irq);
850 return 0;
851}
852
853#endif
854
855
856
857
858#define default_ISA_trigger(idx) (0)
859#define default_ISA_polarity(idx) (0)
860
861
862
863
864
865
866#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
867#define default_EISA_polarity(idx) default_ISA_polarity(idx)
868
869
870
871
872#define default_PCI_trigger(idx) (1)
873#define default_PCI_polarity(idx) (1)
874
875static int irq_polarity(int idx)
876{
877 int bus = mp_irqs[idx].srcbus;
878 int polarity;
879
880
881
882
883 switch (mp_irqs[idx].irqflag & 3)
884 {
885 case 0:
886 if (test_bit(bus, mp_bus_not_pci))
887 polarity = default_ISA_polarity(idx);
888 else
889 polarity = default_PCI_polarity(idx);
890 break;
891 case 1:
892 {
893 polarity = 0;
894 break;
895 }
896 case 2:
897 {
898 pr_warn("broken BIOS!!\n");
899 polarity = 1;
900 break;
901 }
902 case 3:
903 {
904 polarity = 1;
905 break;
906 }
907 default:
908 {
909 pr_warn("broken BIOS!!\n");
910 polarity = 1;
911 break;
912 }
913 }
914 return polarity;
915}
916
917static int irq_trigger(int idx)
918{
919 int bus = mp_irqs[idx].srcbus;
920 int trigger;
921
922
923
924
925 switch ((mp_irqs[idx].irqflag>>2) & 3)
926 {
927 case 0:
928 if (test_bit(bus, mp_bus_not_pci))
929 trigger = default_ISA_trigger(idx);
930 else
931 trigger = default_PCI_trigger(idx);
932#ifdef CONFIG_EISA
933 switch (mp_bus_id_to_type[bus]) {
934 case MP_BUS_ISA:
935 {
936
937 break;
938 }
939 case MP_BUS_EISA:
940 {
941 trigger = default_EISA_trigger(idx);
942 break;
943 }
944 case MP_BUS_PCI:
945 {
946
947 break;
948 }
949 default:
950 {
951 pr_warn("broken BIOS!!\n");
952 trigger = 1;
953 break;
954 }
955 }
956#endif
957 break;
958 case 1:
959 {
960 trigger = 0;
961 break;
962 }
963 case 2:
964 {
965 pr_warn("broken BIOS!!\n");
966 trigger = 1;
967 break;
968 }
969 case 3:
970 {
971 trigger = 1;
972 break;
973 }
974 default:
975 {
976 pr_warn("broken BIOS!!\n");
977 trigger = 0;
978 break;
979 }
980 }
981 return trigger;
982}
983
984static int pin_2_irq(int idx, int apic, int pin)
985{
986 int irq;
987 int bus = mp_irqs[idx].srcbus;
988 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(apic);
989
990
991
992
993 if (mp_irqs[idx].dstirq != pin)
994 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
995
996 if (test_bit(bus, mp_bus_not_pci)) {
997 irq = mp_irqs[idx].srcbusirq;
998 } else {
999 u32 gsi = gsi_cfg->gsi_base + pin;
1000
1001 if (gsi >= NR_IRQS_LEGACY)
1002 irq = gsi;
1003 else
1004 irq = gsi_top + gsi;
1005 }
1006
1007#ifdef CONFIG_X86_32
1008
1009
1010
1011 if ((pin >= 16) && (pin <= 23)) {
1012 if (pirq_entries[pin-16] != -1) {
1013 if (!pirq_entries[pin-16]) {
1014 apic_printk(APIC_VERBOSE, KERN_DEBUG
1015 "disabling PIRQ%d\n", pin-16);
1016 } else {
1017 irq = pirq_entries[pin-16];
1018 apic_printk(APIC_VERBOSE, KERN_DEBUG
1019 "using PIRQ%d -> IRQ %d\n",
1020 pin-16, irq);
1021 }
1022 }
1023 }
1024#endif
1025
1026 return irq;
1027}
1028
1029
1030
1031
1032
1033int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1034 struct io_apic_irq_attr *irq_attr)
1035{
1036 int ioapic_idx, i, best_guess = -1;
1037
1038 apic_printk(APIC_DEBUG,
1039 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1040 bus, slot, pin);
1041 if (test_bit(bus, mp_bus_not_pci)) {
1042 apic_printk(APIC_VERBOSE,
1043 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1044 return -1;
1045 }
1046 for (i = 0; i < mp_irq_entries; i++) {
1047 int lbus = mp_irqs[i].srcbus;
1048
1049 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1050 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic ||
1051 mp_irqs[i].dstapic == MP_APIC_ALL)
1052 break;
1053
1054 if (!test_bit(lbus, mp_bus_not_pci) &&
1055 !mp_irqs[i].irqtype &&
1056 (bus == lbus) &&
1057 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1058 int irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq);
1059
1060 if (!(ioapic_idx || IO_APIC_IRQ(irq)))
1061 continue;
1062
1063 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1064 set_io_apic_irq_attr(irq_attr, ioapic_idx,
1065 mp_irqs[i].dstirq,
1066 irq_trigger(i),
1067 irq_polarity(i));
1068 return irq;
1069 }
1070
1071
1072
1073
1074 if (best_guess < 0) {
1075 set_io_apic_irq_attr(irq_attr, ioapic_idx,
1076 mp_irqs[i].dstirq,
1077 irq_trigger(i),
1078 irq_polarity(i));
1079 best_guess = irq;
1080 }
1081 }
1082 }
1083 return best_guess;
1084}
1085EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1086
1087void lock_vector_lock(void)
1088{
1089
1090
1091
1092 raw_spin_lock(&vector_lock);
1093}
1094
1095void unlock_vector_lock(void)
1096{
1097 raw_spin_unlock(&vector_lock);
1098}
1099
1100static int
1101__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1102{
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1115 static int current_offset = VECTOR_OFFSET_START % 16;
1116 int cpu, err;
1117 cpumask_var_t tmp_mask;
1118
1119 if (cfg->move_in_progress)
1120 return -EBUSY;
1121
1122 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1123 return -ENOMEM;
1124
1125
1126 err = -ENOSPC;
1127 cpumask_clear(cfg->old_domain);
1128 cpu = cpumask_first_and(mask, cpu_online_mask);
1129 while (cpu < nr_cpu_ids) {
1130 int new_cpu, vector, offset;
1131
1132 apic->vector_allocation_domain(cpu, tmp_mask, mask);
1133
1134 if (cpumask_subset(tmp_mask, cfg->domain)) {
1135 err = 0;
1136 if (cpumask_equal(tmp_mask, cfg->domain))
1137 break;
1138
1139
1140
1141
1142
1143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1144 cfg->move_in_progress = 1;
1145 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1146 break;
1147 }
1148
1149 vector = current_vector;
1150 offset = current_offset;
1151next:
1152 vector += 16;
1153 if (vector >= first_system_vector) {
1154 offset = (offset + 1) % 16;
1155 vector = FIRST_EXTERNAL_VECTOR + offset;
1156 }
1157
1158 if (unlikely(current_vector == vector)) {
1159 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1160 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1161 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1162 continue;
1163 }
1164
1165 if (test_bit(vector, used_vectors))
1166 goto next;
1167
1168 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1169 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1170 goto next;
1171
1172 current_vector = vector;
1173 current_offset = offset;
1174 if (cfg->vector) {
1175 cfg->move_in_progress = 1;
1176 cpumask_copy(cfg->old_domain, cfg->domain);
1177 }
1178 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1179 per_cpu(vector_irq, new_cpu)[vector] = irq;
1180 cfg->vector = vector;
1181 cpumask_copy(cfg->domain, tmp_mask);
1182 err = 0;
1183 break;
1184 }
1185 free_cpumask_var(tmp_mask);
1186 return err;
1187}
1188
1189int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1190{
1191 int err;
1192 unsigned long flags;
1193
1194 raw_spin_lock_irqsave(&vector_lock, flags);
1195 err = __assign_irq_vector(irq, cfg, mask);
1196 raw_spin_unlock_irqrestore(&vector_lock, flags);
1197 return err;
1198}
1199
1200static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1201{
1202 int cpu, vector;
1203
1204 BUG_ON(!cfg->vector);
1205
1206 vector = cfg->vector;
1207 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1208 per_cpu(vector_irq, cpu)[vector] = -1;
1209
1210 cfg->vector = 0;
1211 cpumask_clear(cfg->domain);
1212
1213 if (likely(!cfg->move_in_progress))
1214 return;
1215 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1216 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1217 vector++) {
1218 if (per_cpu(vector_irq, cpu)[vector] != irq)
1219 continue;
1220 per_cpu(vector_irq, cpu)[vector] = -1;
1221 break;
1222 }
1223 }
1224 cfg->move_in_progress = 0;
1225}
1226
1227void __setup_vector_irq(int cpu)
1228{
1229
1230 int irq, vector;
1231 struct irq_cfg *cfg;
1232
1233
1234
1235
1236
1237
1238 raw_spin_lock(&vector_lock);
1239
1240 for_each_active_irq(irq) {
1241 cfg = irq_get_chip_data(irq);
1242 if (!cfg)
1243 continue;
1244
1245
1246
1247
1248 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
1249 cpumask_set_cpu(cpu, cfg->domain);
1250
1251 if (!cpumask_test_cpu(cpu, cfg->domain))
1252 continue;
1253 vector = cfg->vector;
1254 per_cpu(vector_irq, cpu)[vector] = irq;
1255 }
1256
1257 for (vector = 0; vector < NR_VECTORS; ++vector) {
1258 irq = per_cpu(vector_irq, cpu)[vector];
1259 if (irq < 0)
1260 continue;
1261
1262 cfg = irq_cfg(irq);
1263 if (!cpumask_test_cpu(cpu, cfg->domain))
1264 per_cpu(vector_irq, cpu)[vector] = -1;
1265 }
1266 raw_spin_unlock(&vector_lock);
1267}
1268
1269static struct irq_chip ioapic_chip;
1270
1271#ifdef CONFIG_X86_32
1272static inline int IO_APIC_irq_trigger(int irq)
1273{
1274 int apic, idx, pin;
1275
1276 for (apic = 0; apic < nr_ioapics; apic++) {
1277 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) {
1278 idx = find_irq_entry(apic, pin, mp_INT);
1279 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1280 return irq_trigger(idx);
1281 }
1282 }
1283
1284
1285
1286 return 0;
1287}
1288#else
1289static inline int IO_APIC_irq_trigger(int irq)
1290{
1291 return 1;
1292}
1293#endif
1294
1295static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
1296 unsigned long trigger)
1297{
1298 struct irq_chip *chip = &ioapic_chip;
1299 irq_flow_handler_t hdl;
1300 bool fasteoi;
1301
1302 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1303 trigger == IOAPIC_LEVEL) {
1304 irq_set_status_flags(irq, IRQ_LEVEL);
1305 fasteoi = true;
1306 } else {
1307 irq_clear_status_flags(irq, IRQ_LEVEL);
1308 fasteoi = false;
1309 }
1310
1311 if (irq_remapped(cfg)) {
1312 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1313 irq_remap_modify_chip_defaults(chip);
1314 fasteoi = trigger != 0;
1315 }
1316
1317 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
1318 irq_set_chip_and_handler_name(irq, chip, hdl,
1319 fasteoi ? "fasteoi" : "edge");
1320}
1321
1322static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
1323 unsigned int destination, int vector,
1324 struct io_apic_irq_attr *attr)
1325{
1326 if (irq_remapping_enabled)
1327 return setup_ioapic_remapped_entry(irq, entry, destination,
1328 vector, attr);
1329
1330 memset(entry, 0, sizeof(*entry));
1331
1332 entry->delivery_mode = apic->irq_delivery_mode;
1333 entry->dest_mode = apic->irq_dest_mode;
1334 entry->dest = destination;
1335 entry->vector = vector;
1336 entry->mask = 0;
1337 entry->trigger = attr->trigger;
1338 entry->polarity = attr->polarity;
1339
1340
1341
1342
1343
1344 if (attr->trigger)
1345 entry->mask = 1;
1346
1347 return 0;
1348}
1349
1350static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1351 struct io_apic_irq_attr *attr)
1352{
1353 struct IO_APIC_route_entry entry;
1354 unsigned int dest;
1355
1356 if (!IO_APIC_IRQ(irq))
1357 return;
1358
1359
1360
1361
1362
1363
1364 if (irq < legacy_pic->nr_legacy_irqs &&
1365 cpumask_equal(cfg->domain, cpumask_of(0)))
1366 apic->vector_allocation_domain(0, cfg->domain,
1367 apic->target_cpus());
1368
1369 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1370 return;
1371
1372 if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
1373 &dest)) {
1374 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
1375 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1376 __clear_irq_vector(irq, cfg);
1377
1378 return;
1379 }
1380
1381 apic_printk(APIC_VERBOSE,KERN_DEBUG
1382 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1383 "IRQ %d Mode:%i Active:%i Dest:%d)\n",
1384 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
1385 cfg->vector, irq, attr->trigger, attr->polarity, dest);
1386
1387 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
1388 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1389 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1390 __clear_irq_vector(irq, cfg);
1391
1392 return;
1393 }
1394
1395 ioapic_register_intr(irq, cfg, attr->trigger);
1396 if (irq < legacy_pic->nr_legacy_irqs)
1397 legacy_pic->mask(irq);
1398
1399 ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry);
1400}
1401
1402static bool __init io_apic_pin_not_connected(int idx, int ioapic_idx, int pin)
1403{
1404 if (idx != -1)
1405 return false;
1406
1407 apic_printk(APIC_VERBOSE, KERN_DEBUG " apic %d pin %d not connected\n",
1408 mpc_ioapic_id(ioapic_idx), pin);
1409 return true;
1410}
1411
1412static void __init __io_apic_setup_irqs(unsigned int ioapic_idx)
1413{
1414 int idx, node = cpu_to_node(0);
1415 struct io_apic_irq_attr attr;
1416 unsigned int pin, irq;
1417
1418 for (pin = 0; pin < ioapics[ioapic_idx].nr_registers; pin++) {
1419 idx = find_irq_entry(ioapic_idx, pin, mp_INT);
1420 if (io_apic_pin_not_connected(idx, ioapic_idx, pin))
1421 continue;
1422
1423 irq = pin_2_irq(idx, ioapic_idx, pin);
1424
1425 if ((ioapic_idx > 0) && (irq > 16))
1426 continue;
1427
1428
1429
1430
1431
1432 if (apic->multi_timer_check &&
1433 apic->multi_timer_check(ioapic_idx, irq))
1434 continue;
1435
1436 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx),
1437 irq_polarity(idx));
1438
1439 io_apic_setup_irq_pin(irq, node, &attr);
1440 }
1441}
1442
1443static void __init setup_IO_APIC_irqs(void)
1444{
1445 unsigned int ioapic_idx;
1446
1447 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1448
1449 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1450 __io_apic_setup_irqs(ioapic_idx);
1451}
1452
1453
1454
1455
1456
1457
1458void setup_IO_APIC_irq_extra(u32 gsi)
1459{
1460 int ioapic_idx = 0, pin, idx, irq, node = cpu_to_node(0);
1461 struct io_apic_irq_attr attr;
1462
1463
1464
1465
1466 ioapic_idx = mp_find_ioapic(gsi);
1467 if (ioapic_idx < 0)
1468 return;
1469
1470 pin = mp_find_ioapic_pin(ioapic_idx, gsi);
1471 idx = find_irq_entry(ioapic_idx, pin, mp_INT);
1472 if (idx == -1)
1473 return;
1474
1475 irq = pin_2_irq(idx, ioapic_idx, pin);
1476
1477
1478 if (ioapic_idx == 0 || irq < NR_IRQS_LEGACY)
1479 return;
1480
1481 set_io_apic_irq_attr(&attr, ioapic_idx, pin, irq_trigger(idx),
1482 irq_polarity(idx));
1483
1484 io_apic_setup_irq_pin_once(irq, node, &attr);
1485}
1486
1487
1488
1489
1490static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1491 unsigned int pin, int vector)
1492{
1493 struct IO_APIC_route_entry entry;
1494 unsigned int dest;
1495
1496 if (irq_remapping_enabled)
1497 return;
1498
1499 memset(&entry, 0, sizeof(entry));
1500
1501
1502
1503
1504
1505 if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
1506 apic->target_cpus(), &dest)))
1507 dest = BAD_APICID;
1508
1509 entry.dest_mode = apic->irq_dest_mode;
1510 entry.mask = 0;
1511 entry.dest = dest;
1512 entry.delivery_mode = apic->irq_delivery_mode;
1513 entry.polarity = 0;
1514 entry.trigger = 0;
1515 entry.vector = vector;
1516
1517
1518
1519
1520
1521 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
1522 "edge");
1523
1524
1525
1526
1527 ioapic_write_entry(ioapic_idx, pin, entry);
1528}
1529
1530__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1531{
1532 int i;
1533 union IO_APIC_reg_00 reg_00;
1534 union IO_APIC_reg_01 reg_01;
1535 union IO_APIC_reg_02 reg_02;
1536 union IO_APIC_reg_03 reg_03;
1537 unsigned long flags;
1538
1539 raw_spin_lock_irqsave(&ioapic_lock, flags);
1540 reg_00.raw = io_apic_read(ioapic_idx, 0);
1541 reg_01.raw = io_apic_read(ioapic_idx, 1);
1542 if (reg_01.bits.version >= 0x10)
1543 reg_02.raw = io_apic_read(ioapic_idx, 2);
1544 if (reg_01.bits.version >= 0x20)
1545 reg_03.raw = io_apic_read(ioapic_idx, 3);
1546 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1547
1548 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1549 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1550 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1551 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1552 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1553
1554 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1555 printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
1556 reg_01.bits.entries);
1557
1558 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1559 printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
1560 reg_01.bits.version);
1561
1562
1563
1564
1565
1566
1567 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1568 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1569 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1570 }
1571
1572
1573
1574
1575
1576
1577 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1578 reg_03.raw != reg_01.raw) {
1579 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1580 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1581 }
1582
1583 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1584
1585 if (irq_remapping_enabled) {
1586 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
1587 " Pol Stat Indx2 Zero Vect:\n");
1588 } else {
1589 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1590 " Stat Dmod Deli Vect:\n");
1591 }
1592
1593 for (i = 0; i <= reg_01.bits.entries; i++) {
1594 if (irq_remapping_enabled) {
1595 struct IO_APIC_route_entry entry;
1596 struct IR_IO_APIC_route_entry *ir_entry;
1597
1598 entry = ioapic_read_entry(ioapic_idx, i);
1599 ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
1600 printk(KERN_DEBUG " %02x %04X ",
1601 i,
1602 ir_entry->index
1603 );
1604 pr_cont("%1d %1d %1d %1d %1d "
1605 "%1d %1d %X %02X\n",
1606 ir_entry->format,
1607 ir_entry->mask,
1608 ir_entry->trigger,
1609 ir_entry->irr,
1610 ir_entry->polarity,
1611 ir_entry->delivery_status,
1612 ir_entry->index2,
1613 ir_entry->zero,
1614 ir_entry->vector
1615 );
1616 } else {
1617 struct IO_APIC_route_entry entry;
1618
1619 entry = ioapic_read_entry(ioapic_idx, i);
1620 printk(KERN_DEBUG " %02x %02X ",
1621 i,
1622 entry.dest
1623 );
1624 pr_cont("%1d %1d %1d %1d %1d "
1625 "%1d %1d %02X\n",
1626 entry.mask,
1627 entry.trigger,
1628 entry.irr,
1629 entry.polarity,
1630 entry.delivery_status,
1631 entry.dest_mode,
1632 entry.delivery_mode,
1633 entry.vector
1634 );
1635 }
1636 }
1637}
1638
1639__apicdebuginit(void) print_IO_APICs(void)
1640{
1641 int ioapic_idx;
1642 struct irq_cfg *cfg;
1643 unsigned int irq;
1644 struct irq_chip *chip;
1645
1646 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1647 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1648 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1649 mpc_ioapic_id(ioapic_idx),
1650 ioapics[ioapic_idx].nr_registers);
1651
1652
1653
1654
1655
1656 printk(KERN_INFO "testing the IO APIC.......................\n");
1657
1658 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++)
1659 print_IO_APIC(ioapic_idx);
1660
1661 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1662 for_each_active_irq(irq) {
1663 struct irq_pin_list *entry;
1664
1665 chip = irq_get_chip(irq);
1666 if (chip != &ioapic_chip)
1667 continue;
1668
1669 cfg = irq_get_chip_data(irq);
1670 if (!cfg)
1671 continue;
1672 entry = cfg->irq_2_pin;
1673 if (!entry)
1674 continue;
1675 printk(KERN_DEBUG "IRQ%d ", irq);
1676 for_each_irq_pin(entry, cfg->irq_2_pin)
1677 pr_cont("-> %d:%d", entry->apic, entry->pin);
1678 pr_cont("\n");
1679 }
1680
1681 printk(KERN_INFO ".................................... done.\n");
1682}
1683
1684__apicdebuginit(void) print_APIC_field(int base)
1685{
1686 int i;
1687
1688 printk(KERN_DEBUG);
1689
1690 for (i = 0; i < 8; i++)
1691 pr_cont("%08x", apic_read(base + i*0x10));
1692
1693 pr_cont("\n");
1694}
1695
1696__apicdebuginit(void) print_local_APIC(void *dummy)
1697{
1698 unsigned int i, v, ver, maxlvt;
1699 u64 icr;
1700
1701 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1702 smp_processor_id(), hard_smp_processor_id());
1703 v = apic_read(APIC_ID);
1704 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1705 v = apic_read(APIC_LVR);
1706 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1707 ver = GET_APIC_VERSION(v);
1708 maxlvt = lapic_get_maxlvt();
1709
1710 v = apic_read(APIC_TASKPRI);
1711 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1712
1713 if (APIC_INTEGRATED(ver)) {
1714 if (!APIC_XAPIC(ver)) {
1715 v = apic_read(APIC_ARBPRI);
1716 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1717 v & APIC_ARBPRI_MASK);
1718 }
1719 v = apic_read(APIC_PROCPRI);
1720 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1721 }
1722
1723
1724
1725
1726
1727 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1728 v = apic_read(APIC_RRR);
1729 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1730 }
1731
1732 v = apic_read(APIC_LDR);
1733 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1734 if (!x2apic_enabled()) {
1735 v = apic_read(APIC_DFR);
1736 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1737 }
1738 v = apic_read(APIC_SPIV);
1739 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1740
1741 printk(KERN_DEBUG "... APIC ISR field:\n");
1742 print_APIC_field(APIC_ISR);
1743 printk(KERN_DEBUG "... APIC TMR field:\n");
1744 print_APIC_field(APIC_TMR);
1745 printk(KERN_DEBUG "... APIC IRR field:\n");
1746 print_APIC_field(APIC_IRR);
1747
1748 if (APIC_INTEGRATED(ver)) {
1749 if (maxlvt > 3)
1750 apic_write(APIC_ESR, 0);
1751
1752 v = apic_read(APIC_ESR);
1753 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1754 }
1755
1756 icr = apic_icr_read();
1757 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1758 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1759
1760 v = apic_read(APIC_LVTT);
1761 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1762
1763 if (maxlvt > 3) {
1764 v = apic_read(APIC_LVTPC);
1765 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1766 }
1767 v = apic_read(APIC_LVT0);
1768 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1769 v = apic_read(APIC_LVT1);
1770 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1771
1772 if (maxlvt > 2) {
1773 v = apic_read(APIC_LVTERR);
1774 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1775 }
1776
1777 v = apic_read(APIC_TMICT);
1778 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1779 v = apic_read(APIC_TMCCT);
1780 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1781 v = apic_read(APIC_TDCR);
1782 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1783
1784 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1785 v = apic_read(APIC_EFEAT);
1786 maxlvt = (v >> 16) & 0xff;
1787 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1788 v = apic_read(APIC_ECTRL);
1789 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1790 for (i = 0; i < maxlvt; i++) {
1791 v = apic_read(APIC_EILVTn(i));
1792 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1793 }
1794 }
1795 pr_cont("\n");
1796}
1797
1798__apicdebuginit(void) print_local_APICs(int maxcpu)
1799{
1800 int cpu;
1801
1802 if (!maxcpu)
1803 return;
1804
1805 preempt_disable();
1806 for_each_online_cpu(cpu) {
1807 if (cpu >= maxcpu)
1808 break;
1809 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1810 }
1811 preempt_enable();
1812}
1813
1814__apicdebuginit(void) print_PIC(void)
1815{
1816 unsigned int v;
1817 unsigned long flags;
1818
1819 if (!legacy_pic->nr_legacy_irqs)
1820 return;
1821
1822 printk(KERN_DEBUG "\nprinting PIC contents\n");
1823
1824 raw_spin_lock_irqsave(&i8259A_lock, flags);
1825
1826 v = inb(0xa1) << 8 | inb(0x21);
1827 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1828
1829 v = inb(0xa0) << 8 | inb(0x20);
1830 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1831
1832 outb(0x0b,0xa0);
1833 outb(0x0b,0x20);
1834 v = inb(0xa0) << 8 | inb(0x20);
1835 outb(0x0a,0xa0);
1836 outb(0x0a,0x20);
1837
1838 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1839
1840 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1841
1842 v = inb(0x4d1) << 8 | inb(0x4d0);
1843 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1844}
1845
1846static int __initdata show_lapic = 1;
1847static __init int setup_show_lapic(char *arg)
1848{
1849 int num = -1;
1850
1851 if (strcmp(arg, "all") == 0) {
1852 show_lapic = CONFIG_NR_CPUS;
1853 } else {
1854 get_option(&arg, &num);
1855 if (num >= 0)
1856 show_lapic = num;
1857 }
1858
1859 return 1;
1860}
1861__setup("show_lapic=", setup_show_lapic);
1862
1863__apicdebuginit(int) print_ICs(void)
1864{
1865 if (apic_verbosity == APIC_QUIET)
1866 return 0;
1867
1868 print_PIC();
1869
1870
1871 if (!cpu_has_apic && !apic_from_smp_config())
1872 return 0;
1873
1874 print_local_APICs(show_lapic);
1875 print_IO_APICs();
1876
1877 return 0;
1878}
1879
1880late_initcall(print_ICs);
1881
1882
1883
1884static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1885
1886void __init enable_IO_APIC(void)
1887{
1888 int i8259_apic, i8259_pin;
1889 int apic;
1890
1891 if (!legacy_pic->nr_legacy_irqs)
1892 return;
1893
1894 for(apic = 0; apic < nr_ioapics; apic++) {
1895 int pin;
1896
1897 for (pin = 0; pin < ioapics[apic].nr_registers; pin++) {
1898 struct IO_APIC_route_entry entry;
1899 entry = ioapic_read_entry(apic, pin);
1900
1901
1902
1903
1904 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1905 ioapic_i8259.apic = apic;
1906 ioapic_i8259.pin = pin;
1907 goto found_i8259;
1908 }
1909 }
1910 }
1911 found_i8259:
1912
1913
1914
1915
1916
1917 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1918 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1919
1920 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1921 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1922 ioapic_i8259.pin = i8259_pin;
1923 ioapic_i8259.apic = i8259_apic;
1924 }
1925
1926 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1927 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1928 {
1929 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1930 }
1931
1932
1933
1934
1935 clear_IO_APIC();
1936}
1937
1938
1939
1940
1941void disable_IO_APIC(void)
1942{
1943
1944
1945
1946 clear_IO_APIC();
1947
1948 if (!legacy_pic->nr_legacy_irqs)
1949 return;
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) {
1962 struct IO_APIC_route_entry entry;
1963
1964 memset(&entry, 0, sizeof(entry));
1965 entry.mask = 0;
1966 entry.trigger = 0;
1967 entry.irr = 0;
1968 entry.polarity = 0;
1969 entry.delivery_status = 0;
1970 entry.dest_mode = 0;
1971 entry.delivery_mode = dest_ExtINT;
1972 entry.vector = 0;
1973 entry.dest = read_apic_id();
1974
1975
1976
1977
1978 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1979 }
1980
1981
1982
1983
1984 if (cpu_has_apic || apic_from_smp_config())
1985 disconnect_bsp_APIC(!irq_remapping_enabled &&
1986 ioapic_i8259.pin != -1);
1987}
1988
1989#ifdef CONFIG_X86_32
1990
1991
1992
1993
1994
1995
1996void __init setup_ioapic_ids_from_mpc_nocheck(void)
1997{
1998 union IO_APIC_reg_00 reg_00;
1999 physid_mask_t phys_id_present_map;
2000 int ioapic_idx;
2001 int i;
2002 unsigned char old_id;
2003 unsigned long flags;
2004
2005
2006
2007
2008
2009 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
2010
2011
2012
2013
2014 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
2015
2016 raw_spin_lock_irqsave(&ioapic_lock, flags);
2017 reg_00.raw = io_apic_read(ioapic_idx, 0);
2018 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2019
2020 old_id = mpc_ioapic_id(ioapic_idx);
2021
2022 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
2023 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2024 ioapic_idx, mpc_ioapic_id(ioapic_idx));
2025 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2026 reg_00.bits.ID);
2027 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
2028 }
2029
2030
2031
2032
2033
2034
2035 if (apic->check_apicid_used(&phys_id_present_map,
2036 mpc_ioapic_id(ioapic_idx))) {
2037 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2038 ioapic_idx, mpc_ioapic_id(ioapic_idx));
2039 for (i = 0; i < get_physical_broadcast(); i++)
2040 if (!physid_isset(i, phys_id_present_map))
2041 break;
2042 if (i >= get_physical_broadcast())
2043 panic("Max APIC ID exceeded!\n");
2044 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2045 i);
2046 physid_set(i, phys_id_present_map);
2047 ioapics[ioapic_idx].mp_config.apicid = i;
2048 } else {
2049 physid_mask_t tmp;
2050 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
2051 &tmp);
2052 apic_printk(APIC_VERBOSE, "Setting %d in the "
2053 "phys_id_present_map\n",
2054 mpc_ioapic_id(ioapic_idx));
2055 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2056 }
2057
2058
2059
2060
2061
2062 if (old_id != mpc_ioapic_id(ioapic_idx))
2063 for (i = 0; i < mp_irq_entries; i++)
2064 if (mp_irqs[i].dstapic == old_id)
2065 mp_irqs[i].dstapic
2066 = mpc_ioapic_id(ioapic_idx);
2067
2068
2069
2070
2071
2072 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
2073 continue;
2074
2075 apic_printk(APIC_VERBOSE, KERN_INFO
2076 "...changing IO-APIC physical APIC ID to %d ...",
2077 mpc_ioapic_id(ioapic_idx));
2078
2079 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2080 raw_spin_lock_irqsave(&ioapic_lock, flags);
2081 io_apic_write(ioapic_idx, 0, reg_00.raw);
2082 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2083
2084
2085
2086
2087 raw_spin_lock_irqsave(&ioapic_lock, flags);
2088 reg_00.raw = io_apic_read(ioapic_idx, 0);
2089 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2090 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
2091 pr_cont("could not set ID!\n");
2092 else
2093 apic_printk(APIC_VERBOSE, " ok.\n");
2094 }
2095}
2096
2097void __init setup_ioapic_ids_from_mpc(void)
2098{
2099
2100 if (acpi_ioapic)
2101 return;
2102
2103
2104
2105
2106 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2107 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2108 return;
2109 setup_ioapic_ids_from_mpc_nocheck();
2110}
2111#endif
2112
2113int no_timer_check __initdata;
2114
2115static int __init notimercheck(char *s)
2116{
2117 no_timer_check = 1;
2118 return 1;
2119}
2120__setup("no_timer_check", notimercheck);
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130static int __init timer_irq_works(void)
2131{
2132 unsigned long t1 = jiffies;
2133 unsigned long flags;
2134
2135 if (no_timer_check)
2136 return 1;
2137
2138 local_save_flags(flags);
2139 local_irq_enable();
2140
2141 mdelay((10 * 1000) / HZ);
2142 local_irq_restore(flags);
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153 if (time_after(jiffies, t1 + 4))
2154 return 1;
2155 return 0;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181static unsigned int startup_ioapic_irq(struct irq_data *data)
2182{
2183 int was_pending = 0, irq = data->irq;
2184 unsigned long flags;
2185
2186 raw_spin_lock_irqsave(&ioapic_lock, flags);
2187 if (irq < legacy_pic->nr_legacy_irqs) {
2188 legacy_pic->mask(irq);
2189 if (legacy_pic->irq_pending(irq))
2190 was_pending = 1;
2191 }
2192 __unmask_ioapic(data->chip_data);
2193 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2194
2195 return was_pending;
2196}
2197
2198static int ioapic_retrigger_irq(struct irq_data *data)
2199{
2200 struct irq_cfg *cfg = data->chip_data;
2201 unsigned long flags;
2202
2203 raw_spin_lock_irqsave(&vector_lock, flags);
2204 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2205 raw_spin_unlock_irqrestore(&vector_lock, flags);
2206
2207 return 1;
2208}
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219#ifdef CONFIG_SMP
2220void send_cleanup_vector(struct irq_cfg *cfg)
2221{
2222 cpumask_var_t cleanup_mask;
2223
2224 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2225 unsigned int i;
2226 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2227 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2228 } else {
2229 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2230 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2231 free_cpumask_var(cleanup_mask);
2232 }
2233 cfg->move_in_progress = 0;
2234}
2235
2236asmlinkage void smp_irq_move_cleanup_interrupt(void)
2237{
2238 unsigned vector, me;
2239
2240 ack_APIC_irq();
2241 irq_enter();
2242 exit_idle();
2243
2244 me = smp_processor_id();
2245 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2246 unsigned int irq;
2247 unsigned int irr;
2248 struct irq_desc *desc;
2249 struct irq_cfg *cfg;
2250 irq = __this_cpu_read(vector_irq[vector]);
2251
2252 if (irq == -1)
2253 continue;
2254
2255 desc = irq_to_desc(irq);
2256 if (!desc)
2257 continue;
2258
2259 cfg = irq_cfg(irq);
2260 if (!cfg)
2261 continue;
2262
2263 raw_spin_lock(&desc->lock);
2264
2265
2266
2267
2268
2269 if (cfg->move_in_progress)
2270 goto unlock;
2271
2272 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2273 goto unlock;
2274
2275 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2276
2277
2278
2279
2280
2281
2282
2283 if (irr & (1 << (vector % 32))) {
2284 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2285 goto unlock;
2286 }
2287 __this_cpu_write(vector_irq[vector], -1);
2288unlock:
2289 raw_spin_unlock(&desc->lock);
2290 }
2291
2292 irq_exit();
2293}
2294
2295static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2296{
2297 unsigned me;
2298
2299 if (likely(!cfg->move_in_progress))
2300 return;
2301
2302 me = smp_processor_id();
2303
2304 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2305 send_cleanup_vector(cfg);
2306}
2307
2308static void irq_complete_move(struct irq_cfg *cfg)
2309{
2310 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2311}
2312
2313void irq_force_complete_move(int irq)
2314{
2315 struct irq_cfg *cfg = irq_get_chip_data(irq);
2316
2317 if (!cfg)
2318 return;
2319
2320 __irq_complete_move(cfg, cfg->vector);
2321}
2322#else
2323static inline void irq_complete_move(struct irq_cfg *cfg) { }
2324#endif
2325
2326static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2327{
2328 int apic, pin;
2329 struct irq_pin_list *entry;
2330 u8 vector = cfg->vector;
2331
2332 for_each_irq_pin(entry, cfg->irq_2_pin) {
2333 unsigned int reg;
2334
2335 apic = entry->apic;
2336 pin = entry->pin;
2337
2338
2339
2340
2341 if (!irq_remapped(cfg))
2342 io_apic_write(apic, 0x11 + pin*2, dest);
2343 reg = io_apic_read(apic, 0x10 + pin*2);
2344 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2345 reg |= vector;
2346 io_apic_modify(apic, 0x10 + pin*2, reg);
2347 }
2348}
2349
2350
2351
2352
2353
2354
2355int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2356 unsigned int *dest_id)
2357{
2358 struct irq_cfg *cfg = data->chip_data;
2359 unsigned int irq = data->irq;
2360 int err;
2361
2362 if (!config_enabled(CONFIG_SMP))
2363 return -1;
2364
2365 if (!cpumask_intersects(mask, cpu_online_mask))
2366 return -EINVAL;
2367
2368 err = assign_irq_vector(irq, cfg, mask);
2369 if (err)
2370 return err;
2371
2372 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2373 if (err) {
2374 if (assign_irq_vector(irq, cfg, data->affinity))
2375 pr_err("Failed to recover vector for irq %d\n", irq);
2376 return err;
2377 }
2378
2379 cpumask_copy(data->affinity, mask);
2380
2381 return 0;
2382}
2383
2384static int
2385ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2386 bool force)
2387{
2388 unsigned int dest, irq = data->irq;
2389 unsigned long flags;
2390 int ret;
2391
2392 if (!config_enabled(CONFIG_SMP))
2393 return -1;
2394
2395 raw_spin_lock_irqsave(&ioapic_lock, flags);
2396 ret = __ioapic_set_affinity(data, mask, &dest);
2397 if (!ret) {
2398
2399 dest = SET_APIC_LOGICAL_ID(dest);
2400 __target_IO_APIC_irq(irq, dest, data->chip_data);
2401 ret = IRQ_SET_MASK_OK_NOCOPY;
2402 }
2403 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2404 return ret;
2405}
2406
2407static void ack_apic_edge(struct irq_data *data)
2408{
2409 irq_complete_move(data->chip_data);
2410 irq_move_irq(data);
2411 ack_APIC_irq();
2412}
2413
2414atomic_t irq_mis_count;
2415
2416#ifdef CONFIG_GENERIC_PENDING_IRQ
2417static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
2418{
2419 struct irq_pin_list *entry;
2420 unsigned long flags;
2421
2422 raw_spin_lock_irqsave(&ioapic_lock, flags);
2423 for_each_irq_pin(entry, cfg->irq_2_pin) {
2424 unsigned int reg;
2425 int pin;
2426
2427 pin = entry->pin;
2428 reg = io_apic_read(entry->apic, 0x10 + pin*2);
2429
2430 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
2431 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2432 return true;
2433 }
2434 }
2435 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2436
2437 return false;
2438}
2439
2440static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2441{
2442
2443 if (unlikely(irqd_is_setaffinity_pending(data))) {
2444 mask_ioapic(cfg);
2445 return true;
2446 }
2447 return false;
2448}
2449
2450static inline void ioapic_irqd_unmask(struct irq_data *data,
2451 struct irq_cfg *cfg, bool masked)
2452{
2453 if (unlikely(masked)) {
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480 if (!io_apic_level_ack_pending(cfg))
2481 irq_move_masked_irq(data);
2482 unmask_ioapic(cfg);
2483 }
2484}
2485#else
2486static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2487{
2488 return false;
2489}
2490static inline void ioapic_irqd_unmask(struct irq_data *data,
2491 struct irq_cfg *cfg, bool masked)
2492{
2493}
2494#endif
2495
2496static void ack_apic_level(struct irq_data *data)
2497{
2498 struct irq_cfg *cfg = data->chip_data;
2499 int i, irq = data->irq;
2500 unsigned long v;
2501 bool masked;
2502
2503 irq_complete_move(cfg);
2504 masked = ioapic_irqd_mask(data, cfg);
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538 i = cfg->vector;
2539 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2540
2541
2542
2543
2544
2545 ack_APIC_irq();
2546
2547
2548
2549
2550
2551
2552
2553
2554 if (!(v & (1 << (i & 0x1f)))) {
2555 atomic_inc(&irq_mis_count);
2556
2557 eoi_ioapic_irq(irq, cfg);
2558 }
2559
2560 ioapic_irqd_unmask(data, cfg, masked);
2561}
2562
2563#ifdef CONFIG_IRQ_REMAP
2564static void ir_ack_apic_edge(struct irq_data *data)
2565{
2566 ack_APIC_irq();
2567}
2568
2569static void ir_ack_apic_level(struct irq_data *data)
2570{
2571 ack_APIC_irq();
2572 eoi_ioapic_irq(data->irq, data->chip_data);
2573}
2574
2575static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
2576{
2577 seq_printf(p, " IR-%s", data->chip->name);
2578}
2579
2580static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
2581{
2582 chip->irq_print_chip = ir_print_prefix;
2583 chip->irq_ack = ir_ack_apic_edge;
2584 chip->irq_eoi = ir_ack_apic_level;
2585
2586 chip->irq_set_affinity = set_remapped_irq_affinity;
2587}
2588#endif
2589
2590static struct irq_chip ioapic_chip __read_mostly = {
2591 .name = "IO-APIC",
2592 .irq_startup = startup_ioapic_irq,
2593 .irq_mask = mask_ioapic_irq,
2594 .irq_unmask = unmask_ioapic_irq,
2595 .irq_ack = ack_apic_edge,
2596 .irq_eoi = ack_apic_level,
2597 .irq_set_affinity = ioapic_set_affinity,
2598 .irq_retrigger = ioapic_retrigger_irq,
2599};
2600
2601static inline void init_IO_APIC_traps(void)
2602{
2603 struct irq_cfg *cfg;
2604 unsigned int irq;
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617 for_each_active_irq(irq) {
2618 cfg = irq_get_chip_data(irq);
2619 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2620
2621
2622
2623
2624
2625 if (irq < legacy_pic->nr_legacy_irqs)
2626 legacy_pic->make_irq(irq);
2627 else
2628
2629 irq_set_chip(irq, &no_irq_chip);
2630 }
2631 }
2632}
2633
2634
2635
2636
2637
2638static void mask_lapic_irq(struct irq_data *data)
2639{
2640 unsigned long v;
2641
2642 v = apic_read(APIC_LVT0);
2643 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2644}
2645
2646static void unmask_lapic_irq(struct irq_data *data)
2647{
2648 unsigned long v;
2649
2650 v = apic_read(APIC_LVT0);
2651 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2652}
2653
2654static void ack_lapic_irq(struct irq_data *data)
2655{
2656 ack_APIC_irq();
2657}
2658
2659static struct irq_chip lapic_chip __read_mostly = {
2660 .name = "local-APIC",
2661 .irq_mask = mask_lapic_irq,
2662 .irq_unmask = unmask_lapic_irq,
2663 .irq_ack = ack_lapic_irq,
2664};
2665
2666static void lapic_register_intr(int irq)
2667{
2668 irq_clear_status_flags(irq, IRQ_LEVEL);
2669 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2670 "edge");
2671}
2672
2673
2674
2675
2676
2677
2678
2679
2680static inline void __init unlock_ExtINT_logic(void)
2681{
2682 int apic, pin, i;
2683 struct IO_APIC_route_entry entry0, entry1;
2684 unsigned char save_control, save_freq_select;
2685
2686 pin = find_isa_irq_pin(8, mp_INT);
2687 if (pin == -1) {
2688 WARN_ON_ONCE(1);
2689 return;
2690 }
2691 apic = find_isa_irq_apic(8, mp_INT);
2692 if (apic == -1) {
2693 WARN_ON_ONCE(1);
2694 return;
2695 }
2696
2697 entry0 = ioapic_read_entry(apic, pin);
2698 clear_IO_APIC_pin(apic, pin);
2699
2700 memset(&entry1, 0, sizeof(entry1));
2701
2702 entry1.dest_mode = 0;
2703 entry1.mask = 0;
2704 entry1.dest = hard_smp_processor_id();
2705 entry1.delivery_mode = dest_ExtINT;
2706 entry1.polarity = entry0.polarity;
2707 entry1.trigger = 0;
2708 entry1.vector = 0;
2709
2710 ioapic_write_entry(apic, pin, entry1);
2711
2712 save_control = CMOS_READ(RTC_CONTROL);
2713 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2714 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2715 RTC_FREQ_SELECT);
2716 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2717
2718 i = 100;
2719 while (i-- > 0) {
2720 mdelay(10);
2721 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2722 i -= 10;
2723 }
2724
2725 CMOS_WRITE(save_control, RTC_CONTROL);
2726 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2727 clear_IO_APIC_pin(apic, pin);
2728
2729 ioapic_write_entry(apic, pin, entry0);
2730}
2731
2732static int disable_timer_pin_1 __initdata;
2733
2734static int __init disable_timer_pin_setup(char *arg)
2735{
2736 disable_timer_pin_1 = 1;
2737 return 0;
2738}
2739early_param("disable_timer_pin_1", disable_timer_pin_setup);
2740
2741int timer_through_8259 __initdata;
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751static inline void __init check_timer(void)
2752{
2753 struct irq_cfg *cfg = irq_get_chip_data(0);
2754 int node = cpu_to_node(0);
2755 int apic1, pin1, apic2, pin2;
2756 unsigned long flags;
2757 int no_pin1 = 0;
2758
2759 local_irq_save(flags);
2760
2761
2762
2763
2764 legacy_pic->mask(0);
2765 assign_irq_vector(0, cfg, apic->target_cpus());
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2777 legacy_pic->init(1);
2778
2779 pin1 = find_isa_irq_pin(0, mp_INT);
2780 apic1 = find_isa_irq_apic(0, mp_INT);
2781 pin2 = ioapic_i8259.pin;
2782 apic2 = ioapic_i8259.apic;
2783
2784 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2785 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2786 cfg->vector, apic1, pin1, apic2, pin2);
2787
2788
2789
2790
2791
2792
2793
2794
2795 if (pin1 == -1) {
2796 if (irq_remapping_enabled)
2797 panic("BIOS bug: timer not connected to IO-APIC");
2798 pin1 = pin2;
2799 apic1 = apic2;
2800 no_pin1 = 1;
2801 } else if (pin2 == -1) {
2802 pin2 = pin1;
2803 apic2 = apic1;
2804 }
2805
2806 if (pin1 != -1) {
2807
2808
2809
2810 if (no_pin1) {
2811 add_pin_to_irq_node(cfg, node, apic1, pin1);
2812 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2813 } else {
2814
2815
2816
2817
2818
2819 int idx;
2820 idx = find_irq_entry(apic1, pin1, mp_INT);
2821 if (idx != -1 && irq_trigger(idx))
2822 unmask_ioapic(cfg);
2823 }
2824 if (timer_irq_works()) {
2825 if (disable_timer_pin_1 > 0)
2826 clear_IO_APIC_pin(0, pin1);
2827 goto out;
2828 }
2829 if (irq_remapping_enabled)
2830 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2831 local_irq_disable();
2832 clear_IO_APIC_pin(apic1, pin1);
2833 if (!no_pin1)
2834 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2835 "8254 timer not connected to IO-APIC\n");
2836
2837 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2838 "(IRQ0) through the 8259A ...\n");
2839 apic_printk(APIC_QUIET, KERN_INFO
2840 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2841
2842
2843
2844 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2845 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2846 legacy_pic->unmask(0);
2847 if (timer_irq_works()) {
2848 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2849 timer_through_8259 = 1;
2850 goto out;
2851 }
2852
2853
2854
2855 local_irq_disable();
2856 legacy_pic->mask(0);
2857 clear_IO_APIC_pin(apic2, pin2);
2858 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2859 }
2860
2861 apic_printk(APIC_QUIET, KERN_INFO
2862 "...trying to set up timer as Virtual Wire IRQ...\n");
2863
2864 lapic_register_intr(0);
2865 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);
2866 legacy_pic->unmask(0);
2867
2868 if (timer_irq_works()) {
2869 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2870 goto out;
2871 }
2872 local_irq_disable();
2873 legacy_pic->mask(0);
2874 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2875 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2876
2877 apic_printk(APIC_QUIET, KERN_INFO
2878 "...trying to set up timer as ExtINT IRQ...\n");
2879
2880 legacy_pic->init(0);
2881 legacy_pic->make_irq(0);
2882 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2883
2884 unlock_ExtINT_logic();
2885
2886 if (timer_irq_works()) {
2887 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2888 goto out;
2889 }
2890 local_irq_disable();
2891 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2892 if (x2apic_preenabled)
2893 apic_printk(APIC_QUIET, KERN_INFO
2894 "Perhaps problem with the pre-enabled x2apic mode\n"
2895 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
2896 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2897 "report. Then try booting with the 'noapic' option.\n");
2898out:
2899 local_irq_restore(flags);
2900}
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919#define PIC_IRQS (1UL << PIC_CASCADE_IR)
2920
2921void __init setup_IO_APIC(void)
2922{
2923
2924
2925
2926
2927 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
2928
2929 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2930
2931
2932
2933 x86_init.mpparse.setup_ioapic_ids();
2934
2935 sync_Arb_IDs();
2936 setup_IO_APIC_irqs();
2937 init_IO_APIC_traps();
2938 if (legacy_pic->nr_legacy_irqs)
2939 check_timer();
2940}
2941
2942
2943
2944
2945
2946
2947static int __init io_apic_bug_finalize(void)
2948{
2949 if (sis_apic_bug == -1)
2950 sis_apic_bug = 0;
2951 return 0;
2952}
2953
2954late_initcall(io_apic_bug_finalize);
2955
2956static void resume_ioapic_id(int ioapic_idx)
2957{
2958 unsigned long flags;
2959 union IO_APIC_reg_00 reg_00;
2960
2961 raw_spin_lock_irqsave(&ioapic_lock, flags);
2962 reg_00.raw = io_apic_read(ioapic_idx, 0);
2963 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
2964 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2965 io_apic_write(ioapic_idx, 0, reg_00.raw);
2966 }
2967 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2968}
2969
2970static void ioapic_resume(void)
2971{
2972 int ioapic_idx;
2973
2974 for (ioapic_idx = nr_ioapics - 1; ioapic_idx >= 0; ioapic_idx--)
2975 resume_ioapic_id(ioapic_idx);
2976
2977 restore_ioapic_entries();
2978}
2979
2980static struct syscore_ops ioapic_syscore_ops = {
2981 .suspend = save_ioapic_entries,
2982 .resume = ioapic_resume,
2983};
2984
2985static int __init ioapic_init_ops(void)
2986{
2987 register_syscore_ops(&ioapic_syscore_ops);
2988
2989 return 0;
2990}
2991
2992device_initcall(ioapic_init_ops);
2993
2994
2995
2996
2997unsigned int create_irq_nr(unsigned int from, int node)
2998{
2999 struct irq_cfg *cfg;
3000 unsigned long flags;
3001 unsigned int ret = 0;
3002 int irq;
3003
3004 if (from < nr_irqs_gsi)
3005 from = nr_irqs_gsi;
3006
3007 irq = alloc_irq_from(from, node);
3008 if (irq < 0)
3009 return 0;
3010 cfg = alloc_irq_cfg(irq, node);
3011 if (!cfg) {
3012 free_irq_at(irq, NULL);
3013 return 0;
3014 }
3015
3016 raw_spin_lock_irqsave(&vector_lock, flags);
3017 if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
3018 ret = irq;
3019 raw_spin_unlock_irqrestore(&vector_lock, flags);
3020
3021 if (ret) {
3022 irq_set_chip_data(irq, cfg);
3023 irq_clear_status_flags(irq, IRQ_NOREQUEST);
3024 } else {
3025 free_irq_at(irq, cfg);
3026 }
3027 return ret;
3028}
3029
3030int create_irq(void)
3031{
3032 int node = cpu_to_node(0);
3033 unsigned int irq_want;
3034 int irq;
3035
3036 irq_want = nr_irqs_gsi;
3037 irq = create_irq_nr(irq_want, node);
3038
3039 if (irq == 0)
3040 irq = -1;
3041
3042 return irq;
3043}
3044
3045void destroy_irq(unsigned int irq)
3046{
3047 struct irq_cfg *cfg = irq_get_chip_data(irq);
3048 unsigned long flags;
3049
3050 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3051
3052 if (irq_remapped(cfg))
3053 free_remapped_irq(irq);
3054 raw_spin_lock_irqsave(&vector_lock, flags);
3055 __clear_irq_vector(irq, cfg);
3056 raw_spin_unlock_irqrestore(&vector_lock, flags);
3057 free_irq_at(irq, cfg);
3058}
3059
3060
3061
3062
3063#ifdef CONFIG_PCI_MSI
3064static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3065 struct msi_msg *msg, u8 hpet_id)
3066{
3067 struct irq_cfg *cfg;
3068 int err;
3069 unsigned dest;
3070
3071 if (disable_apic)
3072 return -ENXIO;
3073
3074 cfg = irq_cfg(irq);
3075 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3076 if (err)
3077 return err;
3078
3079 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3080 apic->target_cpus(), &dest);
3081 if (err)
3082 return err;
3083
3084 if (irq_remapped(cfg)) {
3085 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
3086 return err;
3087 }
3088
3089 if (x2apic_enabled())
3090 msg->address_hi = MSI_ADDR_BASE_HI |
3091 MSI_ADDR_EXT_DEST_ID(dest);
3092 else
3093 msg->address_hi = MSI_ADDR_BASE_HI;
3094
3095 msg->address_lo =
3096 MSI_ADDR_BASE_LO |
3097 ((apic->irq_dest_mode == 0) ?
3098 MSI_ADDR_DEST_MODE_PHYSICAL:
3099 MSI_ADDR_DEST_MODE_LOGICAL) |
3100 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3101 MSI_ADDR_REDIRECTION_CPU:
3102 MSI_ADDR_REDIRECTION_LOWPRI) |
3103 MSI_ADDR_DEST_ID(dest);
3104
3105 msg->data =
3106 MSI_DATA_TRIGGER_EDGE |
3107 MSI_DATA_LEVEL_ASSERT |
3108 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3109 MSI_DATA_DELIVERY_FIXED:
3110 MSI_DATA_DELIVERY_LOWPRI) |
3111 MSI_DATA_VECTOR(cfg->vector);
3112
3113 return err;
3114}
3115
3116static int
3117msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3118{
3119 struct irq_cfg *cfg = data->chip_data;
3120 struct msi_msg msg;
3121 unsigned int dest;
3122
3123 if (__ioapic_set_affinity(data, mask, &dest))
3124 return -1;
3125
3126 __get_cached_msi_msg(data->msi_desc, &msg);
3127
3128 msg.data &= ~MSI_DATA_VECTOR_MASK;
3129 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3130 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3131 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3132
3133 __write_msi_msg(data->msi_desc, &msg);
3134
3135 return IRQ_SET_MASK_OK_NOCOPY;
3136}
3137
3138
3139
3140
3141
3142static struct irq_chip msi_chip = {
3143 .name = "PCI-MSI",
3144 .irq_unmask = unmask_msi_irq,
3145 .irq_mask = mask_msi_irq,
3146 .irq_ack = ack_apic_edge,
3147 .irq_set_affinity = msi_set_affinity,
3148 .irq_retrigger = ioapic_retrigger_irq,
3149};
3150
3151static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3152{
3153 struct irq_chip *chip = &msi_chip;
3154 struct msi_msg msg;
3155 int ret;
3156
3157 ret = msi_compose_msg(dev, irq, &msg, -1);
3158 if (ret < 0)
3159 return ret;
3160
3161 irq_set_msi_desc(irq, msidesc);
3162 write_msi_msg(irq, &msg);
3163
3164 if (irq_remapped(irq_get_chip_data(irq))) {
3165 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3166 irq_remap_modify_chip_defaults(chip);
3167 }
3168
3169 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3170
3171 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3172
3173 return 0;
3174}
3175
3176int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3177{
3178 int node, ret, sub_handle, index = 0;
3179 unsigned int irq, irq_want;
3180 struct msi_desc *msidesc;
3181
3182
3183 if (type == PCI_CAP_ID_MSI && nvec > 1)
3184 return 1;
3185
3186 node = dev_to_node(&dev->dev);
3187 irq_want = nr_irqs_gsi;
3188 sub_handle = 0;
3189 list_for_each_entry(msidesc, &dev->msi_list, list) {
3190 irq = create_irq_nr(irq_want, node);
3191 if (irq == 0)
3192 return -1;
3193 irq_want = irq + 1;
3194 if (!irq_remapping_enabled)
3195 goto no_ir;
3196
3197 if (!sub_handle) {
3198
3199
3200
3201
3202 index = msi_alloc_remapped_irq(dev, irq, nvec);
3203 if (index < 0) {
3204 ret = index;
3205 goto error;
3206 }
3207 } else {
3208 ret = msi_setup_remapped_irq(dev, irq, index,
3209 sub_handle);
3210 if (ret < 0)
3211 goto error;
3212 }
3213no_ir:
3214 ret = setup_msi_irq(dev, msidesc, irq);
3215 if (ret < 0)
3216 goto error;
3217 sub_handle++;
3218 }
3219 return 0;
3220
3221error:
3222 destroy_irq(irq);
3223 return ret;
3224}
3225
3226void native_teardown_msi_irq(unsigned int irq)
3227{
3228 destroy_irq(irq);
3229}
3230
3231#ifdef CONFIG_DMAR_TABLE
3232static int
3233dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3234 bool force)
3235{
3236 struct irq_cfg *cfg = data->chip_data;
3237 unsigned int dest, irq = data->irq;
3238 struct msi_msg msg;
3239
3240 if (__ioapic_set_affinity(data, mask, &dest))
3241 return -1;
3242
3243 dmar_msi_read(irq, &msg);
3244
3245 msg.data &= ~MSI_DATA_VECTOR_MASK;
3246 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3247 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3248 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3249 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3250
3251 dmar_msi_write(irq, &msg);
3252
3253 return IRQ_SET_MASK_OK_NOCOPY;
3254}
3255
3256static struct irq_chip dmar_msi_type = {
3257 .name = "DMAR_MSI",
3258 .irq_unmask = dmar_msi_unmask,
3259 .irq_mask = dmar_msi_mask,
3260 .irq_ack = ack_apic_edge,
3261 .irq_set_affinity = dmar_msi_set_affinity,
3262 .irq_retrigger = ioapic_retrigger_irq,
3263};
3264
3265int arch_setup_dmar_msi(unsigned int irq)
3266{
3267 int ret;
3268 struct msi_msg msg;
3269
3270 ret = msi_compose_msg(NULL, irq, &msg, -1);
3271 if (ret < 0)
3272 return ret;
3273 dmar_msi_write(irq, &msg);
3274 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3275 "edge");
3276 return 0;
3277}
3278#endif
3279
3280#ifdef CONFIG_HPET_TIMER
3281
3282static int hpet_msi_set_affinity(struct irq_data *data,
3283 const struct cpumask *mask, bool force)
3284{
3285 struct irq_cfg *cfg = data->chip_data;
3286 struct msi_msg msg;
3287 unsigned int dest;
3288
3289 if (__ioapic_set_affinity(data, mask, &dest))
3290 return -1;
3291
3292 hpet_msi_read(data->handler_data, &msg);
3293
3294 msg.data &= ~MSI_DATA_VECTOR_MASK;
3295 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3296 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3297 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3298
3299 hpet_msi_write(data->handler_data, &msg);
3300
3301 return IRQ_SET_MASK_OK_NOCOPY;
3302}
3303
3304static struct irq_chip hpet_msi_type = {
3305 .name = "HPET_MSI",
3306 .irq_unmask = hpet_msi_unmask,
3307 .irq_mask = hpet_msi_mask,
3308 .irq_ack = ack_apic_edge,
3309 .irq_set_affinity = hpet_msi_set_affinity,
3310 .irq_retrigger = ioapic_retrigger_irq,
3311};
3312
3313int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3314{
3315 struct irq_chip *chip = &hpet_msi_type;
3316 struct msi_msg msg;
3317 int ret;
3318
3319 if (irq_remapping_enabled) {
3320 if (!setup_hpet_msi_remapped(irq, id))
3321 return -1;
3322 }
3323
3324 ret = msi_compose_msg(NULL, irq, &msg, id);
3325 if (ret < 0)
3326 return ret;
3327
3328 hpet_msi_write(irq_get_handler_data(irq), &msg);
3329 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3330 if (irq_remapped(irq_get_chip_data(irq)))
3331 irq_remap_modify_chip_defaults(chip);
3332
3333 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3334 return 0;
3335}
3336#endif
3337
3338#endif
3339
3340
3341
3342#ifdef CONFIG_HT_IRQ
3343
3344static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3345{
3346 struct ht_irq_msg msg;
3347 fetch_ht_irq_msg(irq, &msg);
3348
3349 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3350 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3351
3352 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3353 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3354
3355 write_ht_irq_msg(irq, &msg);
3356}
3357
3358static int
3359ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3360{
3361 struct irq_cfg *cfg = data->chip_data;
3362 unsigned int dest;
3363
3364 if (__ioapic_set_affinity(data, mask, &dest))
3365 return -1;
3366
3367 target_ht_irq(data->irq, dest, cfg->vector);
3368 return IRQ_SET_MASK_OK_NOCOPY;
3369}
3370
3371static struct irq_chip ht_irq_chip = {
3372 .name = "PCI-HT",
3373 .irq_mask = mask_ht_irq,
3374 .irq_unmask = unmask_ht_irq,
3375 .irq_ack = ack_apic_edge,
3376 .irq_set_affinity = ht_set_affinity,
3377 .irq_retrigger = ioapic_retrigger_irq,
3378};
3379
3380int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3381{
3382 struct irq_cfg *cfg;
3383 struct ht_irq_msg msg;
3384 unsigned dest;
3385 int err;
3386
3387 if (disable_apic)
3388 return -ENXIO;
3389
3390 cfg = irq_cfg(irq);
3391 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3392 if (err)
3393 return err;
3394
3395 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3396 apic->target_cpus(), &dest);
3397 if (err)
3398 return err;
3399
3400 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3401
3402 msg.address_lo =
3403 HT_IRQ_LOW_BASE |
3404 HT_IRQ_LOW_DEST_ID(dest) |
3405 HT_IRQ_LOW_VECTOR(cfg->vector) |
3406 ((apic->irq_dest_mode == 0) ?
3407 HT_IRQ_LOW_DM_PHYSICAL :
3408 HT_IRQ_LOW_DM_LOGICAL) |
3409 HT_IRQ_LOW_RQEOI_EDGE |
3410 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3411 HT_IRQ_LOW_MT_FIXED :
3412 HT_IRQ_LOW_MT_ARBITRATED) |
3413 HT_IRQ_LOW_IRQ_MASKED;
3414
3415 write_ht_irq_msg(irq, &msg);
3416
3417 irq_set_chip_and_handler_name(irq, &ht_irq_chip,
3418 handle_edge_irq, "edge");
3419
3420 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3421
3422 return 0;
3423}
3424#endif
3425
3426static int
3427io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
3428{
3429 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
3430 int ret;
3431
3432 if (!cfg)
3433 return -EINVAL;
3434 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
3435 if (!ret)
3436 setup_ioapic_irq(irq, cfg, attr);
3437 return ret;
3438}
3439
3440int io_apic_setup_irq_pin_once(unsigned int irq, int node,
3441 struct io_apic_irq_attr *attr)
3442{
3443 unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin;
3444 int ret;
3445
3446
3447 if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) {
3448 pr_debug("Pin %d-%d already programmed\n",
3449 mpc_ioapic_id(ioapic_idx), pin);
3450 return 0;
3451 }
3452 ret = io_apic_setup_irq_pin(irq, node, attr);
3453 if (!ret)
3454 set_bit(pin, ioapics[ioapic_idx].pin_programmed);
3455 return ret;
3456}
3457
3458static int __init io_apic_get_redir_entries(int ioapic)
3459{
3460 union IO_APIC_reg_01 reg_01;
3461 unsigned long flags;
3462
3463 raw_spin_lock_irqsave(&ioapic_lock, flags);
3464 reg_01.raw = io_apic_read(ioapic, 1);
3465 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3466
3467
3468
3469
3470
3471 return reg_01.bits.entries + 1;
3472}
3473
3474static void __init probe_nr_irqs_gsi(void)
3475{
3476 int nr;
3477
3478 nr = gsi_top + NR_IRQS_LEGACY;
3479 if (nr > nr_irqs_gsi)
3480 nr_irqs_gsi = nr;
3481
3482 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3483}
3484
3485int get_nr_irqs_gsi(void)
3486{
3487 return nr_irqs_gsi;
3488}
3489
3490int __init arch_probe_nr_irqs(void)
3491{
3492 int nr;
3493
3494 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3495 nr_irqs = NR_VECTORS * nr_cpu_ids;
3496
3497 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3498#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3499
3500
3501
3502 nr += nr_irqs_gsi * 16;
3503#endif
3504 if (nr < nr_irqs)
3505 nr_irqs = nr;
3506
3507 return NR_IRQS_LEGACY;
3508}
3509
3510int io_apic_set_pci_routing(struct device *dev, int irq,
3511 struct io_apic_irq_attr *irq_attr)
3512{
3513 int node;
3514
3515 if (!IO_APIC_IRQ(irq)) {
3516 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3517 irq_attr->ioapic);
3518 return -EINVAL;
3519 }
3520
3521 node = dev ? dev_to_node(dev) : cpu_to_node(0);
3522
3523 return io_apic_setup_irq_pin_once(irq, node, irq_attr);
3524}
3525
3526#ifdef CONFIG_X86_32
3527static int __init io_apic_get_unique_id(int ioapic, int apic_id)
3528{
3529 union IO_APIC_reg_00 reg_00;
3530 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3531 physid_mask_t tmp;
3532 unsigned long flags;
3533 int i = 0;
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544 if (physids_empty(apic_id_map))
3545 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3546
3547 raw_spin_lock_irqsave(&ioapic_lock, flags);
3548 reg_00.raw = io_apic_read(ioapic, 0);
3549 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3550
3551 if (apic_id >= get_physical_broadcast()) {
3552 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3553 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3554 apic_id = reg_00.bits.ID;
3555 }
3556
3557
3558
3559
3560
3561 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
3562
3563 for (i = 0; i < get_physical_broadcast(); i++) {
3564 if (!apic->check_apicid_used(&apic_id_map, i))
3565 break;
3566 }
3567
3568 if (i == get_physical_broadcast())
3569 panic("Max apic_id exceeded!\n");
3570
3571 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3572 "trying %d\n", ioapic, apic_id, i);
3573
3574 apic_id = i;
3575 }
3576
3577 apic->apicid_to_cpu_present(apic_id, &tmp);
3578 physids_or(apic_id_map, apic_id_map, tmp);
3579
3580 if (reg_00.bits.ID != apic_id) {
3581 reg_00.bits.ID = apic_id;
3582
3583 raw_spin_lock_irqsave(&ioapic_lock, flags);
3584 io_apic_write(ioapic, 0, reg_00.raw);
3585 reg_00.raw = io_apic_read(ioapic, 0);
3586 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3587
3588
3589 if (reg_00.bits.ID != apic_id) {
3590 pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
3591 ioapic);
3592 return -1;
3593 }
3594 }
3595
3596 apic_printk(APIC_VERBOSE, KERN_INFO
3597 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3598
3599 return apic_id;
3600}
3601
3602static u8 __init io_apic_unique_id(u8 id)
3603{
3604 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3605 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3606 return io_apic_get_unique_id(nr_ioapics, id);
3607 else
3608 return id;
3609}
3610#else
3611static u8 __init io_apic_unique_id(u8 id)
3612{
3613 int i;
3614 DECLARE_BITMAP(used, 256);
3615
3616 bitmap_zero(used, 256);
3617 for (i = 0; i < nr_ioapics; i++) {
3618 __set_bit(mpc_ioapic_id(i), used);
3619 }
3620 if (!test_bit(id, used))
3621 return id;
3622 return find_first_zero_bit(used, 256);
3623}
3624#endif
3625
3626static int __init io_apic_get_version(int ioapic)
3627{
3628 union IO_APIC_reg_01 reg_01;
3629 unsigned long flags;
3630
3631 raw_spin_lock_irqsave(&ioapic_lock, flags);
3632 reg_01.raw = io_apic_read(ioapic, 1);
3633 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3634
3635 return reg_01.bits.version;
3636}
3637
3638int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
3639{
3640 int ioapic, pin, idx;
3641
3642 if (skip_ioapic_setup)
3643 return -1;
3644
3645 ioapic = mp_find_ioapic(gsi);
3646 if (ioapic < 0)
3647 return -1;
3648
3649 pin = mp_find_ioapic_pin(ioapic, gsi);
3650 if (pin < 0)
3651 return -1;
3652
3653 idx = find_irq_entry(ioapic, pin, mp_INT);
3654 if (idx < 0)
3655 return -1;
3656
3657 *trigger = irq_trigger(idx);
3658 *polarity = irq_polarity(idx);
3659 return 0;
3660}
3661
3662
3663
3664
3665
3666
3667#ifdef CONFIG_SMP
3668void __init setup_ioapic_dest(void)
3669{
3670 int pin, ioapic, irq, irq_entry;
3671 const struct cpumask *mask;
3672 struct irq_data *idata;
3673
3674 if (skip_ioapic_setup == 1)
3675 return;
3676
3677 for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
3678 for (pin = 0; pin < ioapics[ioapic].nr_registers; pin++) {
3679 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3680 if (irq_entry == -1)
3681 continue;
3682 irq = pin_2_irq(irq_entry, ioapic, pin);
3683
3684 if ((ioapic > 0) && (irq > 16))
3685 continue;
3686
3687 idata = irq_get_irq_data(irq);
3688
3689
3690
3691
3692 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
3693 mask = idata->affinity;
3694 else
3695 mask = apic->target_cpus();
3696
3697 if (irq_remapping_enabled)
3698 set_remapped_irq_affinity(idata, mask, false);
3699 else
3700 ioapic_set_affinity(idata, mask, false);
3701 }
3702
3703}
3704#endif
3705
3706#define IOAPIC_RESOURCE_NAME_SIZE 11
3707
3708static struct resource *ioapic_resources;
3709
3710static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3711{
3712 unsigned long n;
3713 struct resource *res;
3714 char *mem;
3715 int i;
3716
3717 if (nr_ioapics <= 0)
3718 return NULL;
3719
3720 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3721 n *= nr_ioapics;
3722
3723 mem = alloc_bootmem(n);
3724 res = (void *)mem;
3725
3726 mem += sizeof(struct resource) * nr_ioapics;
3727
3728 for (i = 0; i < nr_ioapics; i++) {
3729 res[i].name = mem;
3730 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3731 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3732 mem += IOAPIC_RESOURCE_NAME_SIZE;
3733 }
3734
3735 ioapic_resources = res;
3736
3737 return res;
3738}
3739
3740void __init native_io_apic_init_mappings(void)
3741{
3742 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3743 struct resource *ioapic_res;
3744 int i;
3745
3746 ioapic_res = ioapic_setup_resources(nr_ioapics);
3747 for (i = 0; i < nr_ioapics; i++) {
3748 if (smp_found_config) {
3749 ioapic_phys = mpc_ioapic_addr(i);
3750#ifdef CONFIG_X86_32
3751 if (!ioapic_phys) {
3752 printk(KERN_ERR
3753 "WARNING: bogus zero IO-APIC "
3754 "address found in MPTABLE, "
3755 "disabling IO/APIC support!\n");
3756 smp_found_config = 0;
3757 skip_ioapic_setup = 1;
3758 goto fake_ioapic_page;
3759 }
3760#endif
3761 } else {
3762#ifdef CONFIG_X86_32
3763fake_ioapic_page:
3764#endif
3765 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
3766 ioapic_phys = __pa(ioapic_phys);
3767 }
3768 set_fixmap_nocache(idx, ioapic_phys);
3769 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
3770 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
3771 ioapic_phys);
3772 idx++;
3773
3774 ioapic_res->start = ioapic_phys;
3775 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3776 ioapic_res++;
3777 }
3778
3779 probe_nr_irqs_gsi();
3780}
3781
3782void __init ioapic_insert_resources(void)
3783{
3784 int i;
3785 struct resource *r = ioapic_resources;
3786
3787 if (!r) {
3788 if (nr_ioapics > 0)
3789 printk(KERN_ERR
3790 "IO APIC resources couldn't be allocated.\n");
3791 return;
3792 }
3793
3794 for (i = 0; i < nr_ioapics; i++) {
3795 insert_resource(&iomem_resource, r);
3796 r++;
3797 }
3798}
3799
3800int mp_find_ioapic(u32 gsi)
3801{
3802 int i = 0;
3803
3804 if (nr_ioapics == 0)
3805 return -1;
3806
3807
3808 for (i = 0; i < nr_ioapics; i++) {
3809 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i);
3810 if ((gsi >= gsi_cfg->gsi_base)
3811 && (gsi <= gsi_cfg->gsi_end))
3812 return i;
3813 }
3814
3815 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
3816 return -1;
3817}
3818
3819int mp_find_ioapic_pin(int ioapic, u32 gsi)
3820{
3821 struct mp_ioapic_gsi *gsi_cfg;
3822
3823 if (WARN_ON(ioapic == -1))
3824 return -1;
3825
3826 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
3827 if (WARN_ON(gsi > gsi_cfg->gsi_end))
3828 return -1;
3829
3830 return gsi - gsi_cfg->gsi_base;
3831}
3832
3833static __init int bad_ioapic(unsigned long address)
3834{
3835 if (nr_ioapics >= MAX_IO_APICS) {
3836 pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
3837 MAX_IO_APICS, nr_ioapics);
3838 return 1;
3839 }
3840 if (!address) {
3841 pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
3842 return 1;
3843 }
3844 return 0;
3845}
3846
3847static __init int bad_ioapic_register(int idx)
3848{
3849 union IO_APIC_reg_00 reg_00;
3850 union IO_APIC_reg_01 reg_01;
3851 union IO_APIC_reg_02 reg_02;
3852
3853 reg_00.raw = io_apic_read(idx, 0);
3854 reg_01.raw = io_apic_read(idx, 1);
3855 reg_02.raw = io_apic_read(idx, 2);
3856
3857 if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
3858 pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
3859 mpc_ioapic_addr(idx));
3860 return 1;
3861 }
3862
3863 return 0;
3864}
3865
3866void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
3867{
3868 int idx = 0;
3869 int entries;
3870 struct mp_ioapic_gsi *gsi_cfg;
3871
3872 if (bad_ioapic(address))
3873 return;
3874
3875 idx = nr_ioapics;
3876
3877 ioapics[idx].mp_config.type = MP_IOAPIC;
3878 ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
3879 ioapics[idx].mp_config.apicaddr = address;
3880
3881 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
3882
3883 if (bad_ioapic_register(idx)) {
3884 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
3885 return;
3886 }
3887
3888 ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
3889 ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
3890
3891
3892
3893
3894
3895 entries = io_apic_get_redir_entries(idx);
3896 gsi_cfg = mp_ioapic_gsi_routing(idx);
3897 gsi_cfg->gsi_base = gsi_base;
3898 gsi_cfg->gsi_end = gsi_base + entries - 1;
3899
3900
3901
3902
3903 ioapics[idx].nr_registers = entries;
3904
3905 if (gsi_cfg->gsi_end >= gsi_top)
3906 gsi_top = gsi_cfg->gsi_end + 1;
3907
3908 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
3909 idx, mpc_ioapic_id(idx),
3910 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
3911 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
3912
3913 nr_ioapics++;
3914}
3915
3916
3917void __init pre_init_apic_IRQ0(void)
3918{
3919 struct io_apic_irq_attr attr = { 0, 0, 0, 0 };
3920
3921 printk(KERN_INFO "Early APIC setup for system timer0\n");
3922#ifndef CONFIG_SMP
3923 physid_set_mask_of_physid(boot_cpu_physical_apicid,
3924 &phys_cpu_present_map);
3925#endif
3926 setup_local_APIC();
3927
3928 io_apic_setup_irq_pin(0, 0, &attr);
3929 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
3930 "edge");
3931}
3932