1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/mm.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
28#include <linux/pci.h>
29#include <linux/mc146818rtc.h>
30#include <linux/compiler.h>
31#include <linux/acpi.h>
32#include <linux/module.h>
33#include <linux/sysdev.h>
34#include <linux/msi.h>
35#include <linux/htirq.h>
36#include <linux/freezer.h>
37#include <linux/kthread.h>
38#include <linux/jiffies.h>
39#include <linux/slab.h>
40#ifdef CONFIG_ACPI
41#include <acpi/acpi_bus.h>
42#endif
43#include <linux/bootmem.h>
44#include <linux/dmar.h>
45#include <linux/hpet.h>
46
47#include <asm/idle.h>
48#include <asm/io.h>
49#include <asm/smp.h>
50#include <asm/cpu.h>
51#include <asm/desc.h>
52#include <asm/proto.h>
53#include <asm/acpi.h>
54#include <asm/dma.h>
55#include <asm/timer.h>
56#include <asm/i8259.h>
57#include <asm/msidef.h>
58#include <asm/hypertransport.h>
59#include <asm/setup.h>
60#include <asm/irq_remapping.h>
61#include <asm/hpet.h>
62#include <asm/hw_irq.h>
63
64#include <asm/apic.h>
65
66#define __apicdebuginit(type) static type __init
67#define for_each_irq_pin(entry, head) \
68 for (entry = head; entry; entry = entry->next)
69
70
71
72
73
74int sis_apic_bug = -1;
75
76static DEFINE_RAW_SPINLOCK(ioapic_lock);
77static DEFINE_RAW_SPINLOCK(vector_lock);
78
79
80
81
82int nr_ioapic_registers[MAX_IO_APICS];
83
84
85struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
86int nr_ioapics;
87
88
89struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
90
91
92u32 gsi_top;
93
94
95struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
96
97
98int mp_irq_entries;
99
100
101static int nr_irqs_gsi = NR_IRQS_LEGACY;
102
103#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
104int mp_bus_id_to_type[MAX_MP_BUSSES];
105#endif
106
107DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
108
109int skip_ioapic_setup;
110
111void arch_disable_smp_support(void)
112{
113#ifdef CONFIG_PCI
114 noioapicquirk = 1;
115 noioapicreroute = -1;
116#endif
117 skip_ioapic_setup = 1;
118}
119
120static int __init parse_noapic(char *str)
121{
122
123 arch_disable_smp_support();
124 return 0;
125}
126early_param("noapic", parse_noapic);
127
128
129void mp_save_irq(struct mpc_intsrc *m)
130{
131 int i;
132
133 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
134 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
135 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
136 m->srcbusirq, m->dstapic, m->dstirq);
137
138 for (i = 0; i < mp_irq_entries; i++) {
139 if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
140 return;
141 }
142
143 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
144 if (++mp_irq_entries == MAX_IRQ_SOURCES)
145 panic("Max # of irq sources exceeded!!\n");
146}
147
148struct irq_pin_list {
149 int apic, pin;
150 struct irq_pin_list *next;
151};
152
153static struct irq_pin_list *alloc_irq_pin_list(int node)
154{
155 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
156}
157
158
159
160#ifdef CONFIG_SPARSE_IRQ
161static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
162#else
163static struct irq_cfg irq_cfgx[NR_IRQS];
164#endif
165
166int __init arch_early_irq_init(void)
167{
168 struct irq_cfg *cfg;
169 int count, node, i;
170
171 if (!legacy_pic->nr_legacy_irqs) {
172 nr_irqs_gsi = 0;
173 io_apic_irqs = ~0UL;
174 }
175
176 cfg = irq_cfgx;
177 count = ARRAY_SIZE(irq_cfgx);
178 node = cpu_to_node(0);
179
180
181 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs);
182
183 for (i = 0; i < count; i++) {
184 set_irq_chip_data(i, &cfg[i]);
185 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node);
186 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
187
188
189
190
191 if (i < legacy_pic->nr_legacy_irqs) {
192 cfg[i].vector = IRQ0_VECTOR + i;
193 cpumask_set_cpu(0, cfg[i].domain);
194 }
195 }
196
197 return 0;
198}
199
200#ifdef CONFIG_SPARSE_IRQ
201static struct irq_cfg *irq_cfg(unsigned int irq)
202{
203 return get_irq_chip_data(irq);
204}
205
206static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
207{
208 struct irq_cfg *cfg;
209
210 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
211 if (!cfg)
212 return NULL;
213 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
214 goto out_cfg;
215 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
216 goto out_domain;
217 return cfg;
218out_domain:
219 free_cpumask_var(cfg->domain);
220out_cfg:
221 kfree(cfg);
222 return NULL;
223}
224
225static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
226{
227 if (!cfg)
228 return;
229 set_irq_chip_data(at, NULL);
230 free_cpumask_var(cfg->domain);
231 free_cpumask_var(cfg->old_domain);
232 kfree(cfg);
233}
234
235#else
236
237struct irq_cfg *irq_cfg(unsigned int irq)
238{
239 return irq < nr_irqs ? irq_cfgx + irq : NULL;
240}
241
242static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
243{
244 return irq_cfgx + irq;
245}
246
247static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { }
248
249#endif
250
251static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
252{
253 int res = irq_alloc_desc_at(at, node);
254 struct irq_cfg *cfg;
255
256 if (res < 0) {
257 if (res != -EEXIST)
258 return NULL;
259 cfg = get_irq_chip_data(at);
260 if (cfg)
261 return cfg;
262 }
263
264 cfg = alloc_irq_cfg(at, node);
265 if (cfg)
266 set_irq_chip_data(at, cfg);
267 else
268 irq_free_desc(at);
269 return cfg;
270}
271
272static int alloc_irq_from(unsigned int from, int node)
273{
274 return irq_alloc_desc_from(from, node);
275}
276
277static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
278{
279 free_irq_cfg(at, cfg);
280 irq_free_desc(at);
281}
282
283struct io_apic {
284 unsigned int index;
285 unsigned int unused[3];
286 unsigned int data;
287 unsigned int unused2[11];
288 unsigned int eoi;
289};
290
291static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
292{
293 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
294 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
295}
296
297static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
298{
299 struct io_apic __iomem *io_apic = io_apic_base(apic);
300 writel(vector, &io_apic->eoi);
301}
302
303static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
304{
305 struct io_apic __iomem *io_apic = io_apic_base(apic);
306 writel(reg, &io_apic->index);
307 return readl(&io_apic->data);
308}
309
310static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
311{
312 struct io_apic __iomem *io_apic = io_apic_base(apic);
313 writel(reg, &io_apic->index);
314 writel(value, &io_apic->data);
315}
316
317
318
319
320
321
322
323static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
324{
325 struct io_apic __iomem *io_apic = io_apic_base(apic);
326
327 if (sis_apic_bug)
328 writel(reg, &io_apic->index);
329 writel(value, &io_apic->data);
330}
331
332static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
333{
334 struct irq_pin_list *entry;
335 unsigned long flags;
336
337 raw_spin_lock_irqsave(&ioapic_lock, flags);
338 for_each_irq_pin(entry, cfg->irq_2_pin) {
339 unsigned int reg;
340 int pin;
341
342 pin = entry->pin;
343 reg = io_apic_read(entry->apic, 0x10 + pin*2);
344
345 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
346 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
347 return true;
348 }
349 }
350 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
351
352 return false;
353}
354
355union entry_union {
356 struct { u32 w1, w2; };
357 struct IO_APIC_route_entry entry;
358};
359
360static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
361{
362 union entry_union eu;
363 unsigned long flags;
364 raw_spin_lock_irqsave(&ioapic_lock, flags);
365 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
366 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
367 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
368 return eu.entry;
369}
370
371
372
373
374
375
376
377static void
378__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
379{
380 union entry_union eu = {{0, 0}};
381
382 eu.entry = e;
383 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
384 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
385}
386
387static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
388{
389 unsigned long flags;
390 raw_spin_lock_irqsave(&ioapic_lock, flags);
391 __ioapic_write_entry(apic, pin, e);
392 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
393}
394
395
396
397
398
399
400static void ioapic_mask_entry(int apic, int pin)
401{
402 unsigned long flags;
403 union entry_union eu = { .entry.mask = 1 };
404
405 raw_spin_lock_irqsave(&ioapic_lock, flags);
406 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
407 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
408 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
409}
410
411
412
413
414
415
416static int
417__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
418{
419 struct irq_pin_list **last, *entry;
420
421
422 last = &cfg->irq_2_pin;
423 for_each_irq_pin(entry, cfg->irq_2_pin) {
424 if (entry->apic == apic && entry->pin == pin)
425 return 0;
426 last = &entry->next;
427 }
428
429 entry = alloc_irq_pin_list(node);
430 if (!entry) {
431 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
432 node, apic, pin);
433 return -ENOMEM;
434 }
435 entry->apic = apic;
436 entry->pin = pin;
437
438 *last = entry;
439 return 0;
440}
441
442static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
443{
444 if (__add_pin_to_irq_node(cfg, node, apic, pin))
445 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
446}
447
448
449
450
451static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
452 int oldapic, int oldpin,
453 int newapic, int newpin)
454{
455 struct irq_pin_list *entry;
456
457 for_each_irq_pin(entry, cfg->irq_2_pin) {
458 if (entry->apic == oldapic && entry->pin == oldpin) {
459 entry->apic = newapic;
460 entry->pin = newpin;
461
462 return;
463 }
464 }
465
466
467 add_pin_to_irq_node(cfg, node, newapic, newpin);
468}
469
470static void __io_apic_modify_irq(struct irq_pin_list *entry,
471 int mask_and, int mask_or,
472 void (*final)(struct irq_pin_list *entry))
473{
474 unsigned int reg, pin;
475
476 pin = entry->pin;
477 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
478 reg &= mask_and;
479 reg |= mask_or;
480 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
481 if (final)
482 final(entry);
483}
484
485static void io_apic_modify_irq(struct irq_cfg *cfg,
486 int mask_and, int mask_or,
487 void (*final)(struct irq_pin_list *entry))
488{
489 struct irq_pin_list *entry;
490
491 for_each_irq_pin(entry, cfg->irq_2_pin)
492 __io_apic_modify_irq(entry, mask_and, mask_or, final);
493}
494
495static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
496{
497 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
498 IO_APIC_REDIR_MASKED, NULL);
499}
500
501static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
502{
503 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
504 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
505}
506
507static void io_apic_sync(struct irq_pin_list *entry)
508{
509
510
511
512
513 struct io_apic __iomem *io_apic;
514 io_apic = io_apic_base(entry->apic);
515 readl(&io_apic->data);
516}
517
518static void mask_ioapic(struct irq_cfg *cfg)
519{
520 unsigned long flags;
521
522 raw_spin_lock_irqsave(&ioapic_lock, flags);
523 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
524 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
525}
526
527static void mask_ioapic_irq(struct irq_data *data)
528{
529 mask_ioapic(data->chip_data);
530}
531
532static void __unmask_ioapic(struct irq_cfg *cfg)
533{
534 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
535}
536
537static void unmask_ioapic(struct irq_cfg *cfg)
538{
539 unsigned long flags;
540
541 raw_spin_lock_irqsave(&ioapic_lock, flags);
542 __unmask_ioapic(cfg);
543 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
544}
545
546static void unmask_ioapic_irq(struct irq_data *data)
547{
548 unmask_ioapic(data->chip_data);
549}
550
551static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
552{
553 struct IO_APIC_route_entry entry;
554
555
556 entry = ioapic_read_entry(apic, pin);
557 if (entry.delivery_mode == dest_SMI)
558 return;
559
560
561
562 ioapic_mask_entry(apic, pin);
563}
564
565static void clear_IO_APIC (void)
566{
567 int apic, pin;
568
569 for (apic = 0; apic < nr_ioapics; apic++)
570 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
571 clear_IO_APIC_pin(apic, pin);
572}
573
574#ifdef CONFIG_X86_32
575
576
577
578
579
580#define MAX_PIRQS 8
581static int pirq_entries[MAX_PIRQS] = {
582 [0 ... MAX_PIRQS - 1] = -1
583};
584
585static int __init ioapic_pirq_setup(char *str)
586{
587 int i, max;
588 int ints[MAX_PIRQS+1];
589
590 get_options(str, ARRAY_SIZE(ints), ints);
591
592 apic_printk(APIC_VERBOSE, KERN_INFO
593 "PIRQ redirection, working around broken MP-BIOS.\n");
594 max = MAX_PIRQS;
595 if (ints[0] < MAX_PIRQS)
596 max = ints[0];
597
598 for (i = 0; i < max; i++) {
599 apic_printk(APIC_VERBOSE, KERN_DEBUG
600 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
601
602
603
604 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
605 }
606 return 1;
607}
608
609__setup("pirq=", ioapic_pirq_setup);
610#endif
611
612struct IO_APIC_route_entry **alloc_ioapic_entries(void)
613{
614 int apic;
615 struct IO_APIC_route_entry **ioapic_entries;
616
617 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
618 GFP_KERNEL);
619 if (!ioapic_entries)
620 return 0;
621
622 for (apic = 0; apic < nr_ioapics; apic++) {
623 ioapic_entries[apic] =
624 kzalloc(sizeof(struct IO_APIC_route_entry) *
625 nr_ioapic_registers[apic], GFP_KERNEL);
626 if (!ioapic_entries[apic])
627 goto nomem;
628 }
629
630 return ioapic_entries;
631
632nomem:
633 while (--apic >= 0)
634 kfree(ioapic_entries[apic]);
635 kfree(ioapic_entries);
636
637 return 0;
638}
639
640
641
642
643int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
644{
645 int apic, pin;
646
647 if (!ioapic_entries)
648 return -ENOMEM;
649
650 for (apic = 0; apic < nr_ioapics; apic++) {
651 if (!ioapic_entries[apic])
652 return -ENOMEM;
653
654 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
655 ioapic_entries[apic][pin] =
656 ioapic_read_entry(apic, pin);
657 }
658
659 return 0;
660}
661
662
663
664
665void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
666{
667 int apic, pin;
668
669 if (!ioapic_entries)
670 return;
671
672 for (apic = 0; apic < nr_ioapics; apic++) {
673 if (!ioapic_entries[apic])
674 break;
675
676 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
677 struct IO_APIC_route_entry entry;
678
679 entry = ioapic_entries[apic][pin];
680 if (!entry.mask) {
681 entry.mask = 1;
682 ioapic_write_entry(apic, pin, entry);
683 }
684 }
685 }
686}
687
688
689
690
691int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
692{
693 int apic, pin;
694
695 if (!ioapic_entries)
696 return -ENOMEM;
697
698 for (apic = 0; apic < nr_ioapics; apic++) {
699 if (!ioapic_entries[apic])
700 return -ENOMEM;
701
702 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
703 ioapic_write_entry(apic, pin,
704 ioapic_entries[apic][pin]);
705 }
706 return 0;
707}
708
709void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
710{
711 int apic;
712
713 for (apic = 0; apic < nr_ioapics; apic++)
714 kfree(ioapic_entries[apic]);
715
716 kfree(ioapic_entries);
717}
718
719
720
721
722static int find_irq_entry(int apic, int pin, int type)
723{
724 int i;
725
726 for (i = 0; i < mp_irq_entries; i++)
727 if (mp_irqs[i].irqtype == type &&
728 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
729 mp_irqs[i].dstapic == MP_APIC_ALL) &&
730 mp_irqs[i].dstirq == pin)
731 return i;
732
733 return -1;
734}
735
736
737
738
739static int __init find_isa_irq_pin(int irq, int type)
740{
741 int i;
742
743 for (i = 0; i < mp_irq_entries; i++) {
744 int lbus = mp_irqs[i].srcbus;
745
746 if (test_bit(lbus, mp_bus_not_pci) &&
747 (mp_irqs[i].irqtype == type) &&
748 (mp_irqs[i].srcbusirq == irq))
749
750 return mp_irqs[i].dstirq;
751 }
752 return -1;
753}
754
755static int __init find_isa_irq_apic(int irq, int type)
756{
757 int i;
758
759 for (i = 0; i < mp_irq_entries; i++) {
760 int lbus = mp_irqs[i].srcbus;
761
762 if (test_bit(lbus, mp_bus_not_pci) &&
763 (mp_irqs[i].irqtype == type) &&
764 (mp_irqs[i].srcbusirq == irq))
765 break;
766 }
767 if (i < mp_irq_entries) {
768 int apic;
769 for(apic = 0; apic < nr_ioapics; apic++) {
770 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
771 return apic;
772 }
773 }
774
775 return -1;
776}
777
778#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
779
780
781
782static int EISA_ELCR(unsigned int irq)
783{
784 if (irq < legacy_pic->nr_legacy_irqs) {
785 unsigned int port = 0x4d0 + (irq >> 3);
786 return (inb(port) >> (irq & 7)) & 1;
787 }
788 apic_printk(APIC_VERBOSE, KERN_INFO
789 "Broken MPtable reports ISA irq %d\n", irq);
790 return 0;
791}
792
793#endif
794
795
796
797
798#define default_ISA_trigger(idx) (0)
799#define default_ISA_polarity(idx) (0)
800
801
802
803
804
805
806#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
807#define default_EISA_polarity(idx) default_ISA_polarity(idx)
808
809
810
811
812#define default_PCI_trigger(idx) (1)
813#define default_PCI_polarity(idx) (1)
814
815
816
817
818#define default_MCA_trigger(idx) (1)
819#define default_MCA_polarity(idx) default_ISA_polarity(idx)
820
821static int MPBIOS_polarity(int idx)
822{
823 int bus = mp_irqs[idx].srcbus;
824 int polarity;
825
826
827
828
829 switch (mp_irqs[idx].irqflag & 3)
830 {
831 case 0:
832 if (test_bit(bus, mp_bus_not_pci))
833 polarity = default_ISA_polarity(idx);
834 else
835 polarity = default_PCI_polarity(idx);
836 break;
837 case 1:
838 {
839 polarity = 0;
840 break;
841 }
842 case 2:
843 {
844 printk(KERN_WARNING "broken BIOS!!\n");
845 polarity = 1;
846 break;
847 }
848 case 3:
849 {
850 polarity = 1;
851 break;
852 }
853 default:
854 {
855 printk(KERN_WARNING "broken BIOS!!\n");
856 polarity = 1;
857 break;
858 }
859 }
860 return polarity;
861}
862
863static int MPBIOS_trigger(int idx)
864{
865 int bus = mp_irqs[idx].srcbus;
866 int trigger;
867
868
869
870
871 switch ((mp_irqs[idx].irqflag>>2) & 3)
872 {
873 case 0:
874 if (test_bit(bus, mp_bus_not_pci))
875 trigger = default_ISA_trigger(idx);
876 else
877 trigger = default_PCI_trigger(idx);
878#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
879 switch (mp_bus_id_to_type[bus]) {
880 case MP_BUS_ISA:
881 {
882
883 break;
884 }
885 case MP_BUS_EISA:
886 {
887 trigger = default_EISA_trigger(idx);
888 break;
889 }
890 case MP_BUS_PCI:
891 {
892
893 break;
894 }
895 case MP_BUS_MCA:
896 {
897 trigger = default_MCA_trigger(idx);
898 break;
899 }
900 default:
901 {
902 printk(KERN_WARNING "broken BIOS!!\n");
903 trigger = 1;
904 break;
905 }
906 }
907#endif
908 break;
909 case 1:
910 {
911 trigger = 0;
912 break;
913 }
914 case 2:
915 {
916 printk(KERN_WARNING "broken BIOS!!\n");
917 trigger = 1;
918 break;
919 }
920 case 3:
921 {
922 trigger = 1;
923 break;
924 }
925 default:
926 {
927 printk(KERN_WARNING "broken BIOS!!\n");
928 trigger = 0;
929 break;
930 }
931 }
932 return trigger;
933}
934
935static inline int irq_polarity(int idx)
936{
937 return MPBIOS_polarity(idx);
938}
939
940static inline int irq_trigger(int idx)
941{
942 return MPBIOS_trigger(idx);
943}
944
945static int pin_2_irq(int idx, int apic, int pin)
946{
947 int irq;
948 int bus = mp_irqs[idx].srcbus;
949
950
951
952
953 if (mp_irqs[idx].dstirq != pin)
954 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
955
956 if (test_bit(bus, mp_bus_not_pci)) {
957 irq = mp_irqs[idx].srcbusirq;
958 } else {
959 u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
960
961 if (gsi >= NR_IRQS_LEGACY)
962 irq = gsi;
963 else
964 irq = gsi_top + gsi;
965 }
966
967#ifdef CONFIG_X86_32
968
969
970
971 if ((pin >= 16) && (pin <= 23)) {
972 if (pirq_entries[pin-16] != -1) {
973 if (!pirq_entries[pin-16]) {
974 apic_printk(APIC_VERBOSE, KERN_DEBUG
975 "disabling PIRQ%d\n", pin-16);
976 } else {
977 irq = pirq_entries[pin-16];
978 apic_printk(APIC_VERBOSE, KERN_DEBUG
979 "using PIRQ%d -> IRQ %d\n",
980 pin-16, irq);
981 }
982 }
983 }
984#endif
985
986 return irq;
987}
988
989
990
991
992
993int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
994 struct io_apic_irq_attr *irq_attr)
995{
996 int apic, i, best_guess = -1;
997
998 apic_printk(APIC_DEBUG,
999 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1000 bus, slot, pin);
1001 if (test_bit(bus, mp_bus_not_pci)) {
1002 apic_printk(APIC_VERBOSE,
1003 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1004 return -1;
1005 }
1006 for (i = 0; i < mp_irq_entries; i++) {
1007 int lbus = mp_irqs[i].srcbus;
1008
1009 for (apic = 0; apic < nr_ioapics; apic++)
1010 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1011 mp_irqs[i].dstapic == MP_APIC_ALL)
1012 break;
1013
1014 if (!test_bit(lbus, mp_bus_not_pci) &&
1015 !mp_irqs[i].irqtype &&
1016 (bus == lbus) &&
1017 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1018 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1019
1020 if (!(apic || IO_APIC_IRQ(irq)))
1021 continue;
1022
1023 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1024 set_io_apic_irq_attr(irq_attr, apic,
1025 mp_irqs[i].dstirq,
1026 irq_trigger(i),
1027 irq_polarity(i));
1028 return irq;
1029 }
1030
1031
1032
1033
1034 if (best_guess < 0) {
1035 set_io_apic_irq_attr(irq_attr, apic,
1036 mp_irqs[i].dstirq,
1037 irq_trigger(i),
1038 irq_polarity(i));
1039 best_guess = irq;
1040 }
1041 }
1042 }
1043 return best_guess;
1044}
1045EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1046
1047void lock_vector_lock(void)
1048{
1049
1050
1051
1052 raw_spin_lock(&vector_lock);
1053}
1054
1055void unlock_vector_lock(void)
1056{
1057 raw_spin_unlock(&vector_lock);
1058}
1059
1060static int
1061__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1062{
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1075 static int current_offset = VECTOR_OFFSET_START % 8;
1076 unsigned int old_vector;
1077 int cpu, err;
1078 cpumask_var_t tmp_mask;
1079
1080 if (cfg->move_in_progress)
1081 return -EBUSY;
1082
1083 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1084 return -ENOMEM;
1085
1086 old_vector = cfg->vector;
1087 if (old_vector) {
1088 cpumask_and(tmp_mask, mask, cpu_online_mask);
1089 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1090 if (!cpumask_empty(tmp_mask)) {
1091 free_cpumask_var(tmp_mask);
1092 return 0;
1093 }
1094 }
1095
1096
1097 err = -ENOSPC;
1098 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1099 int new_cpu;
1100 int vector, offset;
1101
1102 apic->vector_allocation_domain(cpu, tmp_mask);
1103
1104 vector = current_vector;
1105 offset = current_offset;
1106next:
1107 vector += 8;
1108 if (vector >= first_system_vector) {
1109
1110 offset = (offset + 1) % 8;
1111 vector = FIRST_EXTERNAL_VECTOR + offset;
1112 }
1113 if (unlikely(current_vector == vector))
1114 continue;
1115
1116 if (test_bit(vector, used_vectors))
1117 goto next;
1118
1119 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1120 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1121 goto next;
1122
1123 current_vector = vector;
1124 current_offset = offset;
1125 if (old_vector) {
1126 cfg->move_in_progress = 1;
1127 cpumask_copy(cfg->old_domain, cfg->domain);
1128 }
1129 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1130 per_cpu(vector_irq, new_cpu)[vector] = irq;
1131 cfg->vector = vector;
1132 cpumask_copy(cfg->domain, tmp_mask);
1133 err = 0;
1134 break;
1135 }
1136 free_cpumask_var(tmp_mask);
1137 return err;
1138}
1139
1140int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1141{
1142 int err;
1143 unsigned long flags;
1144
1145 raw_spin_lock_irqsave(&vector_lock, flags);
1146 err = __assign_irq_vector(irq, cfg, mask);
1147 raw_spin_unlock_irqrestore(&vector_lock, flags);
1148 return err;
1149}
1150
1151static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1152{
1153 int cpu, vector;
1154
1155 BUG_ON(!cfg->vector);
1156
1157 vector = cfg->vector;
1158 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1159 per_cpu(vector_irq, cpu)[vector] = -1;
1160
1161 cfg->vector = 0;
1162 cpumask_clear(cfg->domain);
1163
1164 if (likely(!cfg->move_in_progress))
1165 return;
1166 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1167 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1168 vector++) {
1169 if (per_cpu(vector_irq, cpu)[vector] != irq)
1170 continue;
1171 per_cpu(vector_irq, cpu)[vector] = -1;
1172 break;
1173 }
1174 }
1175 cfg->move_in_progress = 0;
1176}
1177
1178void __setup_vector_irq(int cpu)
1179{
1180
1181 int irq, vector;
1182 struct irq_cfg *cfg;
1183
1184
1185
1186
1187
1188
1189 raw_spin_lock(&vector_lock);
1190
1191 for_each_active_irq(irq) {
1192 cfg = get_irq_chip_data(irq);
1193 if (!cfg)
1194 continue;
1195
1196
1197
1198
1199 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
1200 cpumask_set_cpu(cpu, cfg->domain);
1201
1202 if (!cpumask_test_cpu(cpu, cfg->domain))
1203 continue;
1204 vector = cfg->vector;
1205 per_cpu(vector_irq, cpu)[vector] = irq;
1206 }
1207
1208 for (vector = 0; vector < NR_VECTORS; ++vector) {
1209 irq = per_cpu(vector_irq, cpu)[vector];
1210 if (irq < 0)
1211 continue;
1212
1213 cfg = irq_cfg(irq);
1214 if (!cpumask_test_cpu(cpu, cfg->domain))
1215 per_cpu(vector_irq, cpu)[vector] = -1;
1216 }
1217 raw_spin_unlock(&vector_lock);
1218}
1219
1220static struct irq_chip ioapic_chip;
1221static struct irq_chip ir_ioapic_chip;
1222
1223#define IOAPIC_AUTO -1
1224#define IOAPIC_EDGE 0
1225#define IOAPIC_LEVEL 1
1226
1227#ifdef CONFIG_X86_32
1228static inline int IO_APIC_irq_trigger(int irq)
1229{
1230 int apic, idx, pin;
1231
1232 for (apic = 0; apic < nr_ioapics; apic++) {
1233 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1234 idx = find_irq_entry(apic, pin, mp_INT);
1235 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1236 return irq_trigger(idx);
1237 }
1238 }
1239
1240
1241
1242 return 0;
1243}
1244#else
1245static inline int IO_APIC_irq_trigger(int irq)
1246{
1247 return 1;
1248}
1249#endif
1250
1251static void ioapic_register_intr(unsigned int irq, unsigned long trigger)
1252{
1253
1254 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1255 trigger == IOAPIC_LEVEL)
1256 irq_set_status_flags(irq, IRQ_LEVEL);
1257 else
1258 irq_clear_status_flags(irq, IRQ_LEVEL);
1259
1260 if (irq_remapped(get_irq_chip_data(irq))) {
1261 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1262 if (trigger)
1263 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1264 handle_fasteoi_irq,
1265 "fasteoi");
1266 else
1267 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1268 handle_edge_irq, "edge");
1269 return;
1270 }
1271
1272 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1273 trigger == IOAPIC_LEVEL)
1274 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1275 handle_fasteoi_irq,
1276 "fasteoi");
1277 else
1278 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1279 handle_edge_irq, "edge");
1280}
1281
1282static int setup_ioapic_entry(int apic_id, int irq,
1283 struct IO_APIC_route_entry *entry,
1284 unsigned int destination, int trigger,
1285 int polarity, int vector, int pin)
1286{
1287
1288
1289
1290 memset(entry,0,sizeof(*entry));
1291
1292 if (intr_remapping_enabled) {
1293 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1294 struct irte irte;
1295 struct IR_IO_APIC_route_entry *ir_entry =
1296 (struct IR_IO_APIC_route_entry *) entry;
1297 int index;
1298
1299 if (!iommu)
1300 panic("No mapping iommu for ioapic %d\n", apic_id);
1301
1302 index = alloc_irte(iommu, irq, 1);
1303 if (index < 0)
1304 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1305
1306 prepare_irte(&irte, vector, destination);
1307
1308
1309 set_ioapic_sid(&irte, apic_id);
1310
1311 modify_irte(irq, &irte);
1312
1313 ir_entry->index2 = (index >> 15) & 0x1;
1314 ir_entry->zero = 0;
1315 ir_entry->format = 1;
1316 ir_entry->index = (index & 0x7fff);
1317
1318
1319
1320
1321 ir_entry->vector = pin;
1322 } else {
1323 entry->delivery_mode = apic->irq_delivery_mode;
1324 entry->dest_mode = apic->irq_dest_mode;
1325 entry->dest = destination;
1326 entry->vector = vector;
1327 }
1328
1329 entry->mask = 0;
1330 entry->trigger = trigger;
1331 entry->polarity = polarity;
1332
1333
1334
1335
1336 if (trigger)
1337 entry->mask = 1;
1338 return 0;
1339}
1340
1341static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq,
1342 struct irq_cfg *cfg, int trigger, int polarity)
1343{
1344 struct IO_APIC_route_entry entry;
1345 unsigned int dest;
1346
1347 if (!IO_APIC_IRQ(irq))
1348 return;
1349
1350
1351
1352
1353
1354 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1355 apic->vector_allocation_domain(0, cfg->domain);
1356
1357 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1358 return;
1359
1360 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1361
1362 apic_printk(APIC_VERBOSE,KERN_DEBUG
1363 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1364 "IRQ %d Mode:%i Active:%i)\n",
1365 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1366 irq, trigger, polarity);
1367
1368
1369 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1370 dest, trigger, polarity, cfg->vector, pin)) {
1371 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1372 mp_ioapics[apic_id].apicid, pin);
1373 __clear_irq_vector(irq, cfg);
1374 return;
1375 }
1376
1377 ioapic_register_intr(irq, trigger);
1378 if (irq < legacy_pic->nr_legacy_irqs)
1379 legacy_pic->mask(irq);
1380
1381 ioapic_write_entry(apic_id, pin, entry);
1382}
1383
1384static struct {
1385 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
1386} mp_ioapic_routing[MAX_IO_APICS];
1387
1388static void __init setup_IO_APIC_irqs(void)
1389{
1390 int apic_id, pin, idx, irq, notcon = 0;
1391 int node = cpu_to_node(0);
1392 struct irq_cfg *cfg;
1393
1394 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1395
1396 for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1397 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1398 idx = find_irq_entry(apic_id, pin, mp_INT);
1399 if (idx == -1) {
1400 if (!notcon) {
1401 notcon = 1;
1402 apic_printk(APIC_VERBOSE,
1403 KERN_DEBUG " %d-%d",
1404 mp_ioapics[apic_id].apicid, pin);
1405 } else
1406 apic_printk(APIC_VERBOSE, " %d-%d",
1407 mp_ioapics[apic_id].apicid, pin);
1408 continue;
1409 }
1410 if (notcon) {
1411 apic_printk(APIC_VERBOSE,
1412 " (apicid-pin) not connected\n");
1413 notcon = 0;
1414 }
1415
1416 irq = pin_2_irq(idx, apic_id, pin);
1417
1418 if ((apic_id > 0) && (irq > 16))
1419 continue;
1420
1421
1422
1423
1424
1425 if (apic->multi_timer_check &&
1426 apic->multi_timer_check(apic_id, irq))
1427 continue;
1428
1429 cfg = alloc_irq_and_cfg_at(irq, node);
1430 if (!cfg)
1431 continue;
1432
1433 add_pin_to_irq_node(cfg, node, apic_id, pin);
1434
1435
1436
1437
1438 setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx),
1439 irq_polarity(idx));
1440 }
1441
1442 if (notcon)
1443 apic_printk(APIC_VERBOSE,
1444 " (apicid-pin) not connected\n");
1445}
1446
1447
1448
1449
1450
1451
1452void setup_IO_APIC_irq_extra(u32 gsi)
1453{
1454 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0);
1455 struct irq_cfg *cfg;
1456
1457
1458
1459
1460 apic_id = mp_find_ioapic(gsi);
1461 if (apic_id < 0)
1462 return;
1463
1464 pin = mp_find_ioapic_pin(apic_id, gsi);
1465 idx = find_irq_entry(apic_id, pin, mp_INT);
1466 if (idx == -1)
1467 return;
1468
1469 irq = pin_2_irq(idx, apic_id, pin);
1470
1471
1472 if (apic_id == 0 || irq < NR_IRQS_LEGACY)
1473 return;
1474
1475 cfg = alloc_irq_and_cfg_at(irq, node);
1476 if (!cfg)
1477 return;
1478
1479 add_pin_to_irq_node(cfg, node, apic_id, pin);
1480
1481 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1482 pr_debug("Pin %d-%d already programmed\n",
1483 mp_ioapics[apic_id].apicid, pin);
1484 return;
1485 }
1486 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1487
1488 setup_ioapic_irq(apic_id, pin, irq, cfg,
1489 irq_trigger(idx), irq_polarity(idx));
1490}
1491
1492
1493
1494
1495static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1496 int vector)
1497{
1498 struct IO_APIC_route_entry entry;
1499
1500 if (intr_remapping_enabled)
1501 return;
1502
1503 memset(&entry, 0, sizeof(entry));
1504
1505
1506
1507
1508
1509 entry.dest_mode = apic->irq_dest_mode;
1510 entry.mask = 0;
1511 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1512 entry.delivery_mode = apic->irq_delivery_mode;
1513 entry.polarity = 0;
1514 entry.trigger = 0;
1515 entry.vector = vector;
1516
1517
1518
1519
1520
1521 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1522
1523
1524
1525
1526 ioapic_write_entry(apic_id, pin, entry);
1527}
1528
1529
1530__apicdebuginit(void) print_IO_APIC(void)
1531{
1532 int apic, i;
1533 union IO_APIC_reg_00 reg_00;
1534 union IO_APIC_reg_01 reg_01;
1535 union IO_APIC_reg_02 reg_02;
1536 union IO_APIC_reg_03 reg_03;
1537 unsigned long flags;
1538 struct irq_cfg *cfg;
1539 unsigned int irq;
1540
1541 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1542 for (i = 0; i < nr_ioapics; i++)
1543 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1544 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1545
1546
1547
1548
1549
1550 printk(KERN_INFO "testing the IO APIC.......................\n");
1551
1552 for (apic = 0; apic < nr_ioapics; apic++) {
1553
1554 raw_spin_lock_irqsave(&ioapic_lock, flags);
1555 reg_00.raw = io_apic_read(apic, 0);
1556 reg_01.raw = io_apic_read(apic, 1);
1557 if (reg_01.bits.version >= 0x10)
1558 reg_02.raw = io_apic_read(apic, 2);
1559 if (reg_01.bits.version >= 0x20)
1560 reg_03.raw = io_apic_read(apic, 3);
1561 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1562
1563 printk("\n");
1564 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1565 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1566 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1567 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1568 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1569
1570 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1571 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1572
1573 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1574 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1575
1576
1577
1578
1579
1580
1581 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1582 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1583 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1584 }
1585
1586
1587
1588
1589
1590
1591 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1592 reg_03.raw != reg_01.raw) {
1593 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1594 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1595 }
1596
1597 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1598
1599 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1600 " Stat Dmod Deli Vect:\n");
1601
1602 for (i = 0; i <= reg_01.bits.entries; i++) {
1603 struct IO_APIC_route_entry entry;
1604
1605 entry = ioapic_read_entry(apic, i);
1606
1607 printk(KERN_DEBUG " %02x %03X ",
1608 i,
1609 entry.dest
1610 );
1611
1612 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1613 entry.mask,
1614 entry.trigger,
1615 entry.irr,
1616 entry.polarity,
1617 entry.delivery_status,
1618 entry.dest_mode,
1619 entry.delivery_mode,
1620 entry.vector
1621 );
1622 }
1623 }
1624 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1625 for_each_active_irq(irq) {
1626 struct irq_pin_list *entry;
1627
1628 cfg = get_irq_chip_data(irq);
1629 if (!cfg)
1630 continue;
1631 entry = cfg->irq_2_pin;
1632 if (!entry)
1633 continue;
1634 printk(KERN_DEBUG "IRQ%d ", irq);
1635 for_each_irq_pin(entry, cfg->irq_2_pin)
1636 printk("-> %d:%d", entry->apic, entry->pin);
1637 printk("\n");
1638 }
1639
1640 printk(KERN_INFO ".................................... done.\n");
1641
1642 return;
1643}
1644
1645__apicdebuginit(void) print_APIC_field(int base)
1646{
1647 int i;
1648
1649 printk(KERN_DEBUG);
1650
1651 for (i = 0; i < 8; i++)
1652 printk(KERN_CONT "%08x", apic_read(base + i*0x10));
1653
1654 printk(KERN_CONT "\n");
1655}
1656
1657__apicdebuginit(void) print_local_APIC(void *dummy)
1658{
1659 unsigned int i, v, ver, maxlvt;
1660 u64 icr;
1661
1662 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1663 smp_processor_id(), hard_smp_processor_id());
1664 v = apic_read(APIC_ID);
1665 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1666 v = apic_read(APIC_LVR);
1667 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1668 ver = GET_APIC_VERSION(v);
1669 maxlvt = lapic_get_maxlvt();
1670
1671 v = apic_read(APIC_TASKPRI);
1672 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1673
1674 if (APIC_INTEGRATED(ver)) {
1675 if (!APIC_XAPIC(ver)) {
1676 v = apic_read(APIC_ARBPRI);
1677 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1678 v & APIC_ARBPRI_MASK);
1679 }
1680 v = apic_read(APIC_PROCPRI);
1681 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1682 }
1683
1684
1685
1686
1687
1688 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1689 v = apic_read(APIC_RRR);
1690 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1691 }
1692
1693 v = apic_read(APIC_LDR);
1694 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1695 if (!x2apic_enabled()) {
1696 v = apic_read(APIC_DFR);
1697 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1698 }
1699 v = apic_read(APIC_SPIV);
1700 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1701
1702 printk(KERN_DEBUG "... APIC ISR field:\n");
1703 print_APIC_field(APIC_ISR);
1704 printk(KERN_DEBUG "... APIC TMR field:\n");
1705 print_APIC_field(APIC_TMR);
1706 printk(KERN_DEBUG "... APIC IRR field:\n");
1707 print_APIC_field(APIC_IRR);
1708
1709 if (APIC_INTEGRATED(ver)) {
1710 if (maxlvt > 3)
1711 apic_write(APIC_ESR, 0);
1712
1713 v = apic_read(APIC_ESR);
1714 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1715 }
1716
1717 icr = apic_icr_read();
1718 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1719 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1720
1721 v = apic_read(APIC_LVTT);
1722 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1723
1724 if (maxlvt > 3) {
1725 v = apic_read(APIC_LVTPC);
1726 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1727 }
1728 v = apic_read(APIC_LVT0);
1729 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1730 v = apic_read(APIC_LVT1);
1731 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1732
1733 if (maxlvt > 2) {
1734 v = apic_read(APIC_LVTERR);
1735 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1736 }
1737
1738 v = apic_read(APIC_TMICT);
1739 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1740 v = apic_read(APIC_TMCCT);
1741 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1742 v = apic_read(APIC_TDCR);
1743 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1744
1745 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1746 v = apic_read(APIC_EFEAT);
1747 maxlvt = (v >> 16) & 0xff;
1748 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1749 v = apic_read(APIC_ECTRL);
1750 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1751 for (i = 0; i < maxlvt; i++) {
1752 v = apic_read(APIC_EILVTn(i));
1753 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1754 }
1755 }
1756 printk("\n");
1757}
1758
1759__apicdebuginit(void) print_local_APICs(int maxcpu)
1760{
1761 int cpu;
1762
1763 if (!maxcpu)
1764 return;
1765
1766 preempt_disable();
1767 for_each_online_cpu(cpu) {
1768 if (cpu >= maxcpu)
1769 break;
1770 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1771 }
1772 preempt_enable();
1773}
1774
1775__apicdebuginit(void) print_PIC(void)
1776{
1777 unsigned int v;
1778 unsigned long flags;
1779
1780 if (!legacy_pic->nr_legacy_irqs)
1781 return;
1782
1783 printk(KERN_DEBUG "\nprinting PIC contents\n");
1784
1785 raw_spin_lock_irqsave(&i8259A_lock, flags);
1786
1787 v = inb(0xa1) << 8 | inb(0x21);
1788 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1789
1790 v = inb(0xa0) << 8 | inb(0x20);
1791 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1792
1793 outb(0x0b,0xa0);
1794 outb(0x0b,0x20);
1795 v = inb(0xa0) << 8 | inb(0x20);
1796 outb(0x0a,0xa0);
1797 outb(0x0a,0x20);
1798
1799 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1800
1801 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1802
1803 v = inb(0x4d1) << 8 | inb(0x4d0);
1804 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1805}
1806
1807static int __initdata show_lapic = 1;
1808static __init int setup_show_lapic(char *arg)
1809{
1810 int num = -1;
1811
1812 if (strcmp(arg, "all") == 0) {
1813 show_lapic = CONFIG_NR_CPUS;
1814 } else {
1815 get_option(&arg, &num);
1816 if (num >= 0)
1817 show_lapic = num;
1818 }
1819
1820 return 1;
1821}
1822__setup("show_lapic=", setup_show_lapic);
1823
1824__apicdebuginit(int) print_ICs(void)
1825{
1826 if (apic_verbosity == APIC_QUIET)
1827 return 0;
1828
1829 print_PIC();
1830
1831
1832 if (!cpu_has_apic && !apic_from_smp_config())
1833 return 0;
1834
1835 print_local_APICs(show_lapic);
1836 print_IO_APIC();
1837
1838 return 0;
1839}
1840
1841fs_initcall(print_ICs);
1842
1843
1844
1845static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1846
1847void __init enable_IO_APIC(void)
1848{
1849 int i8259_apic, i8259_pin;
1850 int apic;
1851
1852 if (!legacy_pic->nr_legacy_irqs)
1853 return;
1854
1855 for(apic = 0; apic < nr_ioapics; apic++) {
1856 int pin;
1857
1858 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1859 struct IO_APIC_route_entry entry;
1860 entry = ioapic_read_entry(apic, pin);
1861
1862
1863
1864
1865 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1866 ioapic_i8259.apic = apic;
1867 ioapic_i8259.pin = pin;
1868 goto found_i8259;
1869 }
1870 }
1871 }
1872 found_i8259:
1873
1874
1875
1876
1877
1878 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1879 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1880
1881 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1882 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1883 ioapic_i8259.pin = i8259_pin;
1884 ioapic_i8259.apic = i8259_apic;
1885 }
1886
1887 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1888 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1889 {
1890 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1891 }
1892
1893
1894
1895
1896 clear_IO_APIC();
1897}
1898
1899
1900
1901
1902void disable_IO_APIC(void)
1903{
1904
1905
1906
1907 clear_IO_APIC();
1908
1909 if (!legacy_pic->nr_legacy_irqs)
1910 return;
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
1923 struct IO_APIC_route_entry entry;
1924
1925 memset(&entry, 0, sizeof(entry));
1926 entry.mask = 0;
1927 entry.trigger = 0;
1928 entry.irr = 0;
1929 entry.polarity = 0;
1930 entry.delivery_status = 0;
1931 entry.dest_mode = 0;
1932 entry.delivery_mode = dest_ExtINT;
1933 entry.vector = 0;
1934 entry.dest = read_apic_id();
1935
1936
1937
1938
1939 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1940 }
1941
1942
1943
1944
1945 if (cpu_has_apic || apic_from_smp_config())
1946 disconnect_bsp_APIC(!intr_remapping_enabled &&
1947 ioapic_i8259.pin != -1);
1948}
1949
1950#ifdef CONFIG_X86_32
1951
1952
1953
1954
1955
1956
1957void __init setup_ioapic_ids_from_mpc_nocheck(void)
1958{
1959 union IO_APIC_reg_00 reg_00;
1960 physid_mask_t phys_id_present_map;
1961 int apic_id;
1962 int i;
1963 unsigned char old_id;
1964 unsigned long flags;
1965
1966
1967
1968
1969
1970 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
1971
1972
1973
1974
1975 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
1976
1977
1978 raw_spin_lock_irqsave(&ioapic_lock, flags);
1979 reg_00.raw = io_apic_read(apic_id, 0);
1980 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1981
1982 old_id = mp_ioapics[apic_id].apicid;
1983
1984 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
1985 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1986 apic_id, mp_ioapics[apic_id].apicid);
1987 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1988 reg_00.bits.ID);
1989 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
1990 }
1991
1992
1993
1994
1995
1996
1997 if (apic->check_apicid_used(&phys_id_present_map,
1998 mp_ioapics[apic_id].apicid)) {
1999 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2000 apic_id, mp_ioapics[apic_id].apicid);
2001 for (i = 0; i < get_physical_broadcast(); i++)
2002 if (!physid_isset(i, phys_id_present_map))
2003 break;
2004 if (i >= get_physical_broadcast())
2005 panic("Max APIC ID exceeded!\n");
2006 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2007 i);
2008 physid_set(i, phys_id_present_map);
2009 mp_ioapics[apic_id].apicid = i;
2010 } else {
2011 physid_mask_t tmp;
2012 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
2013 apic_printk(APIC_VERBOSE, "Setting %d in the "
2014 "phys_id_present_map\n",
2015 mp_ioapics[apic_id].apicid);
2016 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2017 }
2018
2019
2020
2021
2022
2023 if (old_id != mp_ioapics[apic_id].apicid)
2024 for (i = 0; i < mp_irq_entries; i++)
2025 if (mp_irqs[i].dstapic == old_id)
2026 mp_irqs[i].dstapic
2027 = mp_ioapics[apic_id].apicid;
2028
2029
2030
2031
2032
2033 if (mp_ioapics[apic_id].apicid == reg_00.bits.ID)
2034 continue;
2035
2036 apic_printk(APIC_VERBOSE, KERN_INFO
2037 "...changing IO-APIC physical APIC ID to %d ...",
2038 mp_ioapics[apic_id].apicid);
2039
2040 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2041 raw_spin_lock_irqsave(&ioapic_lock, flags);
2042 io_apic_write(apic_id, 0, reg_00.raw);
2043 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2044
2045
2046
2047
2048 raw_spin_lock_irqsave(&ioapic_lock, flags);
2049 reg_00.raw = io_apic_read(apic_id, 0);
2050 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2051 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2052 printk("could not set ID!\n");
2053 else
2054 apic_printk(APIC_VERBOSE, " ok.\n");
2055 }
2056}
2057
2058void __init setup_ioapic_ids_from_mpc(void)
2059{
2060
2061 if (acpi_ioapic)
2062 return;
2063
2064
2065
2066
2067 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2068 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2069 return;
2070 setup_ioapic_ids_from_mpc_nocheck();
2071}
2072#endif
2073
2074int no_timer_check __initdata;
2075
2076static int __init notimercheck(char *s)
2077{
2078 no_timer_check = 1;
2079 return 1;
2080}
2081__setup("no_timer_check", notimercheck);
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091static int __init timer_irq_works(void)
2092{
2093 unsigned long t1 = jiffies;
2094 unsigned long flags;
2095
2096 if (no_timer_check)
2097 return 1;
2098
2099 local_save_flags(flags);
2100 local_irq_enable();
2101
2102 mdelay((10 * 1000) / HZ);
2103 local_irq_restore(flags);
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 if (time_after(jiffies, t1 + 4))
2115 return 1;
2116 return 0;
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142static unsigned int startup_ioapic_irq(struct irq_data *data)
2143{
2144 int was_pending = 0, irq = data->irq;
2145 unsigned long flags;
2146
2147 raw_spin_lock_irqsave(&ioapic_lock, flags);
2148 if (irq < legacy_pic->nr_legacy_irqs) {
2149 legacy_pic->mask(irq);
2150 if (legacy_pic->irq_pending(irq))
2151 was_pending = 1;
2152 }
2153 __unmask_ioapic(data->chip_data);
2154 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2155
2156 return was_pending;
2157}
2158
2159static int ioapic_retrigger_irq(struct irq_data *data)
2160{
2161 struct irq_cfg *cfg = data->chip_data;
2162 unsigned long flags;
2163
2164 raw_spin_lock_irqsave(&vector_lock, flags);
2165 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2166 raw_spin_unlock_irqrestore(&vector_lock, flags);
2167
2168 return 1;
2169}
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180#ifdef CONFIG_SMP
2181void send_cleanup_vector(struct irq_cfg *cfg)
2182{
2183 cpumask_var_t cleanup_mask;
2184
2185 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2186 unsigned int i;
2187 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2188 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2189 } else {
2190 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2191 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2192 free_cpumask_var(cleanup_mask);
2193 }
2194 cfg->move_in_progress = 0;
2195}
2196
2197static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2198{
2199 int apic, pin;
2200 struct irq_pin_list *entry;
2201 u8 vector = cfg->vector;
2202
2203 for_each_irq_pin(entry, cfg->irq_2_pin) {
2204 unsigned int reg;
2205
2206 apic = entry->apic;
2207 pin = entry->pin;
2208
2209
2210
2211
2212 if (!irq_remapped(cfg))
2213 io_apic_write(apic, 0x11 + pin*2, dest);
2214 reg = io_apic_read(apic, 0x10 + pin*2);
2215 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2216 reg |= vector;
2217 io_apic_modify(apic, 0x10 + pin*2, reg);
2218 }
2219}
2220
2221
2222
2223
2224
2225
2226int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2227 unsigned int *dest_id)
2228{
2229 struct irq_cfg *cfg = data->chip_data;
2230
2231 if (!cpumask_intersects(mask, cpu_online_mask))
2232 return -1;
2233
2234 if (assign_irq_vector(data->irq, data->chip_data, mask))
2235 return -1;
2236
2237 cpumask_copy(data->affinity, mask);
2238
2239 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
2240 return 0;
2241}
2242
2243static int
2244ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2245 bool force)
2246{
2247 unsigned int dest, irq = data->irq;
2248 unsigned long flags;
2249 int ret;
2250
2251 raw_spin_lock_irqsave(&ioapic_lock, flags);
2252 ret = __ioapic_set_affinity(data, mask, &dest);
2253 if (!ret) {
2254
2255 dest = SET_APIC_LOGICAL_ID(dest);
2256 __target_IO_APIC_irq(irq, dest, data->chip_data);
2257 }
2258 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2259 return ret;
2260}
2261
2262#ifdef CONFIG_INTR_REMAP
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275static int
2276ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2277 bool force)
2278{
2279 struct irq_cfg *cfg = data->chip_data;
2280 unsigned int dest, irq = data->irq;
2281 struct irte irte;
2282
2283 if (!cpumask_intersects(mask, cpu_online_mask))
2284 return -EINVAL;
2285
2286 if (get_irte(irq, &irte))
2287 return -EBUSY;
2288
2289 if (assign_irq_vector(irq, cfg, mask))
2290 return -EBUSY;
2291
2292 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2293
2294 irte.vector = cfg->vector;
2295 irte.dest_id = IRTE_DEST(dest);
2296
2297
2298
2299
2300 modify_irte(irq, &irte);
2301
2302 if (cfg->move_in_progress)
2303 send_cleanup_vector(cfg);
2304
2305 cpumask_copy(data->affinity, mask);
2306 return 0;
2307}
2308
2309#else
2310static inline int
2311ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2312 bool force)
2313{
2314 return 0;
2315}
2316#endif
2317
2318asmlinkage void smp_irq_move_cleanup_interrupt(void)
2319{
2320 unsigned vector, me;
2321
2322 ack_APIC_irq();
2323 exit_idle();
2324 irq_enter();
2325
2326 me = smp_processor_id();
2327 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2328 unsigned int irq;
2329 unsigned int irr;
2330 struct irq_desc *desc;
2331 struct irq_cfg *cfg;
2332 irq = __this_cpu_read(vector_irq[vector]);
2333
2334 if (irq == -1)
2335 continue;
2336
2337 desc = irq_to_desc(irq);
2338 if (!desc)
2339 continue;
2340
2341 cfg = irq_cfg(irq);
2342 raw_spin_lock(&desc->lock);
2343
2344
2345
2346
2347
2348 if (cfg->move_in_progress)
2349 goto unlock;
2350
2351 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2352 goto unlock;
2353
2354 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2355
2356
2357
2358
2359
2360
2361
2362 if (irr & (1 << (vector % 32))) {
2363 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2364 goto unlock;
2365 }
2366 __this_cpu_write(vector_irq[vector], -1);
2367unlock:
2368 raw_spin_unlock(&desc->lock);
2369 }
2370
2371 irq_exit();
2372}
2373
2374static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2375{
2376 unsigned me;
2377
2378 if (likely(!cfg->move_in_progress))
2379 return;
2380
2381 me = smp_processor_id();
2382
2383 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2384 send_cleanup_vector(cfg);
2385}
2386
2387static void irq_complete_move(struct irq_cfg *cfg)
2388{
2389 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2390}
2391
2392void irq_force_complete_move(int irq)
2393{
2394 struct irq_cfg *cfg = get_irq_chip_data(irq);
2395
2396 if (!cfg)
2397 return;
2398
2399 __irq_complete_move(cfg, cfg->vector);
2400}
2401#else
2402static inline void irq_complete_move(struct irq_cfg *cfg) { }
2403#endif
2404
2405static void ack_apic_edge(struct irq_data *data)
2406{
2407 irq_complete_move(data->chip_data);
2408 move_native_irq(data->irq);
2409 ack_APIC_irq();
2410}
2411
2412atomic_t irq_mis_count;
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2431{
2432 struct irq_pin_list *entry;
2433 unsigned long flags;
2434
2435 raw_spin_lock_irqsave(&ioapic_lock, flags);
2436 for_each_irq_pin(entry, cfg->irq_2_pin) {
2437 if (mp_ioapics[entry->apic].apicver >= 0x20) {
2438
2439
2440
2441
2442
2443
2444 if (irq_remapped(cfg))
2445 io_apic_eoi(entry->apic, entry->pin);
2446 else
2447 io_apic_eoi(entry->apic, cfg->vector);
2448 } else {
2449 __mask_and_edge_IO_APIC_irq(entry);
2450 __unmask_and_level_IO_APIC_irq(entry);
2451 }
2452 }
2453 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2454}
2455
2456static void ack_apic_level(struct irq_data *data)
2457{
2458 struct irq_cfg *cfg = data->chip_data;
2459 int i, do_unmask_irq = 0, irq = data->irq;
2460 unsigned long v;
2461
2462 irq_complete_move(cfg);
2463#ifdef CONFIG_GENERIC_PENDING_IRQ
2464
2465 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2466 do_unmask_irq = 1;
2467 mask_ioapic(cfg);
2468 }
2469#endif
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 i = cfg->vector;
2504 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2505
2506
2507
2508
2509
2510 ack_APIC_irq();
2511
2512
2513
2514
2515
2516
2517
2518
2519 if (!(v & (1 << (i & 0x1f)))) {
2520 atomic_inc(&irq_mis_count);
2521
2522 eoi_ioapic_irq(irq, cfg);
2523 }
2524
2525
2526 if (unlikely(do_unmask_irq)) {
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553 if (!io_apic_level_ack_pending(cfg))
2554 move_masked_irq(irq);
2555 unmask_ioapic(cfg);
2556 }
2557}
2558
2559#ifdef CONFIG_INTR_REMAP
2560static void ir_ack_apic_edge(struct irq_data *data)
2561{
2562 ack_APIC_irq();
2563}
2564
2565static void ir_ack_apic_level(struct irq_data *data)
2566{
2567 ack_APIC_irq();
2568 eoi_ioapic_irq(data->irq, data->chip_data);
2569}
2570#endif
2571
2572static struct irq_chip ioapic_chip __read_mostly = {
2573 .name = "IO-APIC",
2574 .irq_startup = startup_ioapic_irq,
2575 .irq_mask = mask_ioapic_irq,
2576 .irq_unmask = unmask_ioapic_irq,
2577 .irq_ack = ack_apic_edge,
2578 .irq_eoi = ack_apic_level,
2579#ifdef CONFIG_SMP
2580 .irq_set_affinity = ioapic_set_affinity,
2581#endif
2582 .irq_retrigger = ioapic_retrigger_irq,
2583};
2584
2585static struct irq_chip ir_ioapic_chip __read_mostly = {
2586 .name = "IR-IO-APIC",
2587 .irq_startup = startup_ioapic_irq,
2588 .irq_mask = mask_ioapic_irq,
2589 .irq_unmask = unmask_ioapic_irq,
2590#ifdef CONFIG_INTR_REMAP
2591 .irq_ack = ir_ack_apic_edge,
2592 .irq_eoi = ir_ack_apic_level,
2593#ifdef CONFIG_SMP
2594 .irq_set_affinity = ir_ioapic_set_affinity,
2595#endif
2596#endif
2597 .irq_retrigger = ioapic_retrigger_irq,
2598};
2599
2600static inline void init_IO_APIC_traps(void)
2601{
2602 struct irq_cfg *cfg;
2603 unsigned int irq;
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616 for_each_active_irq(irq) {
2617 cfg = get_irq_chip_data(irq);
2618 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2619
2620
2621
2622
2623
2624 if (irq < legacy_pic->nr_legacy_irqs)
2625 legacy_pic->make_irq(irq);
2626 else
2627
2628 set_irq_chip(irq, &no_irq_chip);
2629 }
2630 }
2631}
2632
2633
2634
2635
2636
2637static void mask_lapic_irq(struct irq_data *data)
2638{
2639 unsigned long v;
2640
2641 v = apic_read(APIC_LVT0);
2642 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2643}
2644
2645static void unmask_lapic_irq(struct irq_data *data)
2646{
2647 unsigned long v;
2648
2649 v = apic_read(APIC_LVT0);
2650 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2651}
2652
2653static void ack_lapic_irq(struct irq_data *data)
2654{
2655 ack_APIC_irq();
2656}
2657
2658static struct irq_chip lapic_chip __read_mostly = {
2659 .name = "local-APIC",
2660 .irq_mask = mask_lapic_irq,
2661 .irq_unmask = unmask_lapic_irq,
2662 .irq_ack = ack_lapic_irq,
2663};
2664
2665static void lapic_register_intr(int irq)
2666{
2667 irq_clear_status_flags(irq, IRQ_LEVEL);
2668 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2669 "edge");
2670}
2671
2672
2673
2674
2675
2676
2677
2678
2679static inline void __init unlock_ExtINT_logic(void)
2680{
2681 int apic, pin, i;
2682 struct IO_APIC_route_entry entry0, entry1;
2683 unsigned char save_control, save_freq_select;
2684
2685 pin = find_isa_irq_pin(8, mp_INT);
2686 if (pin == -1) {
2687 WARN_ON_ONCE(1);
2688 return;
2689 }
2690 apic = find_isa_irq_apic(8, mp_INT);
2691 if (apic == -1) {
2692 WARN_ON_ONCE(1);
2693 return;
2694 }
2695
2696 entry0 = ioapic_read_entry(apic, pin);
2697 clear_IO_APIC_pin(apic, pin);
2698
2699 memset(&entry1, 0, sizeof(entry1));
2700
2701 entry1.dest_mode = 0;
2702 entry1.mask = 0;
2703 entry1.dest = hard_smp_processor_id();
2704 entry1.delivery_mode = dest_ExtINT;
2705 entry1.polarity = entry0.polarity;
2706 entry1.trigger = 0;
2707 entry1.vector = 0;
2708
2709 ioapic_write_entry(apic, pin, entry1);
2710
2711 save_control = CMOS_READ(RTC_CONTROL);
2712 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2713 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2714 RTC_FREQ_SELECT);
2715 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2716
2717 i = 100;
2718 while (i-- > 0) {
2719 mdelay(10);
2720 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2721 i -= 10;
2722 }
2723
2724 CMOS_WRITE(save_control, RTC_CONTROL);
2725 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2726 clear_IO_APIC_pin(apic, pin);
2727
2728 ioapic_write_entry(apic, pin, entry0);
2729}
2730
2731static int disable_timer_pin_1 __initdata;
2732
2733static int __init disable_timer_pin_setup(char *arg)
2734{
2735 disable_timer_pin_1 = 1;
2736 return 0;
2737}
2738early_param("disable_timer_pin_1", disable_timer_pin_setup);
2739
2740int timer_through_8259 __initdata;
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static inline void __init check_timer(void)
2751{
2752 struct irq_cfg *cfg = get_irq_chip_data(0);
2753 int node = cpu_to_node(0);
2754 int apic1, pin1, apic2, pin2;
2755 unsigned long flags;
2756 int no_pin1 = 0;
2757
2758 local_irq_save(flags);
2759
2760
2761
2762
2763 legacy_pic->mask(0);
2764 assign_irq_vector(0, cfg, apic->target_cpus());
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2776 legacy_pic->init(1);
2777
2778 pin1 = find_isa_irq_pin(0, mp_INT);
2779 apic1 = find_isa_irq_apic(0, mp_INT);
2780 pin2 = ioapic_i8259.pin;
2781 apic2 = ioapic_i8259.apic;
2782
2783 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2784 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2785 cfg->vector, apic1, pin1, apic2, pin2);
2786
2787
2788
2789
2790
2791
2792
2793
2794 if (pin1 == -1) {
2795 if (intr_remapping_enabled)
2796 panic("BIOS bug: timer not connected to IO-APIC");
2797 pin1 = pin2;
2798 apic1 = apic2;
2799 no_pin1 = 1;
2800 } else if (pin2 == -1) {
2801 pin2 = pin1;
2802 apic2 = apic1;
2803 }
2804
2805 if (pin1 != -1) {
2806
2807
2808
2809 if (no_pin1) {
2810 add_pin_to_irq_node(cfg, node, apic1, pin1);
2811 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2812 } else {
2813
2814
2815
2816
2817
2818 int idx;
2819 idx = find_irq_entry(apic1, pin1, mp_INT);
2820 if (idx != -1 && irq_trigger(idx))
2821 unmask_ioapic(cfg);
2822 }
2823 if (timer_irq_works()) {
2824 if (disable_timer_pin_1 > 0)
2825 clear_IO_APIC_pin(0, pin1);
2826 goto out;
2827 }
2828 if (intr_remapping_enabled)
2829 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2830 local_irq_disable();
2831 clear_IO_APIC_pin(apic1, pin1);
2832 if (!no_pin1)
2833 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2834 "8254 timer not connected to IO-APIC\n");
2835
2836 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2837 "(IRQ0) through the 8259A ...\n");
2838 apic_printk(APIC_QUIET, KERN_INFO
2839 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2840
2841
2842
2843 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2844 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2845 legacy_pic->unmask(0);
2846 if (timer_irq_works()) {
2847 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2848 timer_through_8259 = 1;
2849 goto out;
2850 }
2851
2852
2853
2854 local_irq_disable();
2855 legacy_pic->mask(0);
2856 clear_IO_APIC_pin(apic2, pin2);
2857 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2858 }
2859
2860 apic_printk(APIC_QUIET, KERN_INFO
2861 "...trying to set up timer as Virtual Wire IRQ...\n");
2862
2863 lapic_register_intr(0);
2864 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);
2865 legacy_pic->unmask(0);
2866
2867 if (timer_irq_works()) {
2868 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2869 goto out;
2870 }
2871 local_irq_disable();
2872 legacy_pic->mask(0);
2873 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2874 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2875
2876 apic_printk(APIC_QUIET, KERN_INFO
2877 "...trying to set up timer as ExtINT IRQ...\n");
2878
2879 legacy_pic->init(0);
2880 legacy_pic->make_irq(0);
2881 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2882
2883 unlock_ExtINT_logic();
2884
2885 if (timer_irq_works()) {
2886 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2887 goto out;
2888 }
2889 local_irq_disable();
2890 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2891 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2892 "report. Then try booting with the 'noapic' option.\n");
2893out:
2894 local_irq_restore(flags);
2895}
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914#define PIC_IRQS (1UL << PIC_CASCADE_IR)
2915
2916void __init setup_IO_APIC(void)
2917{
2918
2919
2920
2921
2922 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
2923
2924 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2925
2926
2927
2928 x86_init.mpparse.setup_ioapic_ids();
2929
2930 sync_Arb_IDs();
2931 setup_IO_APIC_irqs();
2932 init_IO_APIC_traps();
2933 if (legacy_pic->nr_legacy_irqs)
2934 check_timer();
2935}
2936
2937
2938
2939
2940
2941
2942static int __init io_apic_bug_finalize(void)
2943{
2944 if (sis_apic_bug == -1)
2945 sis_apic_bug = 0;
2946 return 0;
2947}
2948
2949late_initcall(io_apic_bug_finalize);
2950
2951struct sysfs_ioapic_data {
2952 struct sys_device dev;
2953 struct IO_APIC_route_entry entry[0];
2954};
2955static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
2956
2957static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
2958{
2959 struct IO_APIC_route_entry *entry;
2960 struct sysfs_ioapic_data *data;
2961 int i;
2962
2963 data = container_of(dev, struct sysfs_ioapic_data, dev);
2964 entry = data->entry;
2965 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2966 *entry = ioapic_read_entry(dev->id, i);
2967
2968 return 0;
2969}
2970
2971static int ioapic_resume(struct sys_device *dev)
2972{
2973 struct IO_APIC_route_entry *entry;
2974 struct sysfs_ioapic_data *data;
2975 unsigned long flags;
2976 union IO_APIC_reg_00 reg_00;
2977 int i;
2978
2979 data = container_of(dev, struct sysfs_ioapic_data, dev);
2980 entry = data->entry;
2981
2982 raw_spin_lock_irqsave(&ioapic_lock, flags);
2983 reg_00.raw = io_apic_read(dev->id, 0);
2984 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
2985 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
2986 io_apic_write(dev->id, 0, reg_00.raw);
2987 }
2988 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2989 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
2990 ioapic_write_entry(dev->id, i, entry[i]);
2991
2992 return 0;
2993}
2994
2995static struct sysdev_class ioapic_sysdev_class = {
2996 .name = "ioapic",
2997 .suspend = ioapic_suspend,
2998 .resume = ioapic_resume,
2999};
3000
3001static int __init ioapic_init_sysfs(void)
3002{
3003 struct sys_device * dev;
3004 int i, size, error;
3005
3006 error = sysdev_class_register(&ioapic_sysdev_class);
3007 if (error)
3008 return error;
3009
3010 for (i = 0; i < nr_ioapics; i++ ) {
3011 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3012 * sizeof(struct IO_APIC_route_entry);
3013 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3014 if (!mp_ioapic_data[i]) {
3015 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3016 continue;
3017 }
3018 dev = &mp_ioapic_data[i]->dev;
3019 dev->id = i;
3020 dev->cls = &ioapic_sysdev_class;
3021 error = sysdev_register(dev);
3022 if (error) {
3023 kfree(mp_ioapic_data[i]);
3024 mp_ioapic_data[i] = NULL;
3025 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3026 continue;
3027 }
3028 }
3029
3030 return 0;
3031}
3032
3033device_initcall(ioapic_init_sysfs);
3034
3035
3036
3037
3038unsigned int create_irq_nr(unsigned int from, int node)
3039{
3040 struct irq_cfg *cfg;
3041 unsigned long flags;
3042 unsigned int ret = 0;
3043 int irq;
3044
3045 if (from < nr_irqs_gsi)
3046 from = nr_irqs_gsi;
3047
3048 irq = alloc_irq_from(from, node);
3049 if (irq < 0)
3050 return 0;
3051 cfg = alloc_irq_cfg(irq, node);
3052 if (!cfg) {
3053 free_irq_at(irq, NULL);
3054 return 0;
3055 }
3056
3057 raw_spin_lock_irqsave(&vector_lock, flags);
3058 if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
3059 ret = irq;
3060 raw_spin_unlock_irqrestore(&vector_lock, flags);
3061
3062 if (ret) {
3063 set_irq_chip_data(irq, cfg);
3064 irq_clear_status_flags(irq, IRQ_NOREQUEST);
3065 } else {
3066 free_irq_at(irq, cfg);
3067 }
3068 return ret;
3069}
3070
3071int create_irq(void)
3072{
3073 int node = cpu_to_node(0);
3074 unsigned int irq_want;
3075 int irq;
3076
3077 irq_want = nr_irqs_gsi;
3078 irq = create_irq_nr(irq_want, node);
3079
3080 if (irq == 0)
3081 irq = -1;
3082
3083 return irq;
3084}
3085
3086void destroy_irq(unsigned int irq)
3087{
3088 struct irq_cfg *cfg = get_irq_chip_data(irq);
3089 unsigned long flags;
3090
3091 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3092
3093 if (irq_remapped(cfg))
3094 free_irte(irq);
3095 raw_spin_lock_irqsave(&vector_lock, flags);
3096 __clear_irq_vector(irq, cfg);
3097 raw_spin_unlock_irqrestore(&vector_lock, flags);
3098 free_irq_at(irq, cfg);
3099}
3100
3101
3102
3103
3104#ifdef CONFIG_PCI_MSI
3105static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3106 struct msi_msg *msg, u8 hpet_id)
3107{
3108 struct irq_cfg *cfg;
3109 int err;
3110 unsigned dest;
3111
3112 if (disable_apic)
3113 return -ENXIO;
3114
3115 cfg = irq_cfg(irq);
3116 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3117 if (err)
3118 return err;
3119
3120 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3121
3122 if (irq_remapped(get_irq_chip_data(irq))) {
3123 struct irte irte;
3124 int ir_index;
3125 u16 sub_handle;
3126
3127 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3128 BUG_ON(ir_index == -1);
3129
3130 prepare_irte(&irte, cfg->vector, dest);
3131
3132
3133 if (pdev)
3134 set_msi_sid(&irte, pdev);
3135 else
3136 set_hpet_sid(&irte, hpet_id);
3137
3138 modify_irte(irq, &irte);
3139
3140 msg->address_hi = MSI_ADDR_BASE_HI;
3141 msg->data = sub_handle;
3142 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3143 MSI_ADDR_IR_SHV |
3144 MSI_ADDR_IR_INDEX1(ir_index) |
3145 MSI_ADDR_IR_INDEX2(ir_index);
3146 } else {
3147 if (x2apic_enabled())
3148 msg->address_hi = MSI_ADDR_BASE_HI |
3149 MSI_ADDR_EXT_DEST_ID(dest);
3150 else
3151 msg->address_hi = MSI_ADDR_BASE_HI;
3152
3153 msg->address_lo =
3154 MSI_ADDR_BASE_LO |
3155 ((apic->irq_dest_mode == 0) ?
3156 MSI_ADDR_DEST_MODE_PHYSICAL:
3157 MSI_ADDR_DEST_MODE_LOGICAL) |
3158 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3159 MSI_ADDR_REDIRECTION_CPU:
3160 MSI_ADDR_REDIRECTION_LOWPRI) |
3161 MSI_ADDR_DEST_ID(dest);
3162
3163 msg->data =
3164 MSI_DATA_TRIGGER_EDGE |
3165 MSI_DATA_LEVEL_ASSERT |
3166 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3167 MSI_DATA_DELIVERY_FIXED:
3168 MSI_DATA_DELIVERY_LOWPRI) |
3169 MSI_DATA_VECTOR(cfg->vector);
3170 }
3171 return err;
3172}
3173
3174#ifdef CONFIG_SMP
3175static int
3176msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3177{
3178 struct irq_cfg *cfg = data->chip_data;
3179 struct msi_msg msg;
3180 unsigned int dest;
3181
3182 if (__ioapic_set_affinity(data, mask, &dest))
3183 return -1;
3184
3185 __get_cached_msi_msg(data->msi_desc, &msg);
3186
3187 msg.data &= ~MSI_DATA_VECTOR_MASK;
3188 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3189 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3190 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3191
3192 __write_msi_msg(data->msi_desc, &msg);
3193
3194 return 0;
3195}
3196#ifdef CONFIG_INTR_REMAP
3197
3198
3199
3200
3201static int
3202ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3203 bool force)
3204{
3205 struct irq_cfg *cfg = data->chip_data;
3206 unsigned int dest, irq = data->irq;
3207 struct irte irte;
3208
3209 if (get_irte(irq, &irte))
3210 return -1;
3211
3212 if (__ioapic_set_affinity(data, mask, &dest))
3213 return -1;
3214
3215 irte.vector = cfg->vector;
3216 irte.dest_id = IRTE_DEST(dest);
3217
3218
3219
3220
3221 modify_irte(irq, &irte);
3222
3223
3224
3225
3226
3227
3228 if (cfg->move_in_progress)
3229 send_cleanup_vector(cfg);
3230
3231 return 0;
3232}
3233
3234#endif
3235#endif
3236
3237
3238
3239
3240
3241static struct irq_chip msi_chip = {
3242 .name = "PCI-MSI",
3243 .irq_unmask = unmask_msi_irq,
3244 .irq_mask = mask_msi_irq,
3245 .irq_ack = ack_apic_edge,
3246#ifdef CONFIG_SMP
3247 .irq_set_affinity = msi_set_affinity,
3248#endif
3249 .irq_retrigger = ioapic_retrigger_irq,
3250};
3251
3252static struct irq_chip msi_ir_chip = {
3253 .name = "IR-PCI-MSI",
3254 .irq_unmask = unmask_msi_irq,
3255 .irq_mask = mask_msi_irq,
3256#ifdef CONFIG_INTR_REMAP
3257 .irq_ack = ir_ack_apic_edge,
3258#ifdef CONFIG_SMP
3259 .irq_set_affinity = ir_msi_set_affinity,
3260#endif
3261#endif
3262 .irq_retrigger = ioapic_retrigger_irq,
3263};
3264
3265
3266
3267
3268
3269
3270static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3271{
3272 struct intel_iommu *iommu;
3273 int index;
3274
3275 iommu = map_dev_to_ir(dev);
3276 if (!iommu) {
3277 printk(KERN_ERR
3278 "Unable to map PCI %s to iommu\n", pci_name(dev));
3279 return -ENOENT;
3280 }
3281
3282 index = alloc_irte(iommu, irq, nvec);
3283 if (index < 0) {
3284 printk(KERN_ERR
3285 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3286 pci_name(dev));
3287 return -ENOSPC;
3288 }
3289 return index;
3290}
3291
3292static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3293{
3294 struct msi_msg msg;
3295 int ret;
3296
3297 ret = msi_compose_msg(dev, irq, &msg, -1);
3298 if (ret < 0)
3299 return ret;
3300
3301 set_irq_msi(irq, msidesc);
3302 write_msi_msg(irq, &msg);
3303
3304 if (irq_remapped(get_irq_chip_data(irq))) {
3305 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3306 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3307 } else
3308 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3309
3310 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3311
3312 return 0;
3313}
3314
3315int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3316{
3317 int node, ret, sub_handle, index = 0;
3318 unsigned int irq, irq_want;
3319 struct msi_desc *msidesc;
3320 struct intel_iommu *iommu = NULL;
3321
3322
3323 if (type == PCI_CAP_ID_MSI && nvec > 1)
3324 return 1;
3325
3326 node = dev_to_node(&dev->dev);
3327 irq_want = nr_irqs_gsi;
3328 sub_handle = 0;
3329 list_for_each_entry(msidesc, &dev->msi_list, list) {
3330 irq = create_irq_nr(irq_want, node);
3331 if (irq == 0)
3332 return -1;
3333 irq_want = irq + 1;
3334 if (!intr_remapping_enabled)
3335 goto no_ir;
3336
3337 if (!sub_handle) {
3338
3339
3340
3341
3342 index = msi_alloc_irte(dev, irq, nvec);
3343 if (index < 0) {
3344 ret = index;
3345 goto error;
3346 }
3347 } else {
3348 iommu = map_dev_to_ir(dev);
3349 if (!iommu) {
3350 ret = -ENOENT;
3351 goto error;
3352 }
3353
3354
3355
3356
3357
3358 set_irte_irq(irq, iommu, index, sub_handle);
3359 }
3360no_ir:
3361 ret = setup_msi_irq(dev, msidesc, irq);
3362 if (ret < 0)
3363 goto error;
3364 sub_handle++;
3365 }
3366 return 0;
3367
3368error:
3369 destroy_irq(irq);
3370 return ret;
3371}
3372
3373void native_teardown_msi_irq(unsigned int irq)
3374{
3375 destroy_irq(irq);
3376}
3377
3378#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3379#ifdef CONFIG_SMP
3380static int
3381dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3382 bool force)
3383{
3384 struct irq_cfg *cfg = data->chip_data;
3385 unsigned int dest, irq = data->irq;
3386 struct msi_msg msg;
3387
3388 if (__ioapic_set_affinity(data, mask, &dest))
3389 return -1;
3390
3391 dmar_msi_read(irq, &msg);
3392
3393 msg.data &= ~MSI_DATA_VECTOR_MASK;
3394 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3395 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3396 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3397 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3398
3399 dmar_msi_write(irq, &msg);
3400
3401 return 0;
3402}
3403
3404#endif
3405
3406static struct irq_chip dmar_msi_type = {
3407 .name = "DMAR_MSI",
3408 .irq_unmask = dmar_msi_unmask,
3409 .irq_mask = dmar_msi_mask,
3410 .irq_ack = ack_apic_edge,
3411#ifdef CONFIG_SMP
3412 .irq_set_affinity = dmar_msi_set_affinity,
3413#endif
3414 .irq_retrigger = ioapic_retrigger_irq,
3415};
3416
3417int arch_setup_dmar_msi(unsigned int irq)
3418{
3419 int ret;
3420 struct msi_msg msg;
3421
3422 ret = msi_compose_msg(NULL, irq, &msg, -1);
3423 if (ret < 0)
3424 return ret;
3425 dmar_msi_write(irq, &msg);
3426 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3427 "edge");
3428 return 0;
3429}
3430#endif
3431
3432#ifdef CONFIG_HPET_TIMER
3433
3434#ifdef CONFIG_SMP
3435static int hpet_msi_set_affinity(struct irq_data *data,
3436 const struct cpumask *mask, bool force)
3437{
3438 struct irq_cfg *cfg = data->chip_data;
3439 struct msi_msg msg;
3440 unsigned int dest;
3441
3442 if (__ioapic_set_affinity(data, mask, &dest))
3443 return -1;
3444
3445 hpet_msi_read(data->handler_data, &msg);
3446
3447 msg.data &= ~MSI_DATA_VECTOR_MASK;
3448 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3449 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3450 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3451
3452 hpet_msi_write(data->handler_data, &msg);
3453
3454 return 0;
3455}
3456
3457#endif
3458
3459static struct irq_chip ir_hpet_msi_type = {
3460 .name = "IR-HPET_MSI",
3461 .irq_unmask = hpet_msi_unmask,
3462 .irq_mask = hpet_msi_mask,
3463#ifdef CONFIG_INTR_REMAP
3464 .irq_ack = ir_ack_apic_edge,
3465#ifdef CONFIG_SMP
3466 .irq_set_affinity = ir_msi_set_affinity,
3467#endif
3468#endif
3469 .irq_retrigger = ioapic_retrigger_irq,
3470};
3471
3472static struct irq_chip hpet_msi_type = {
3473 .name = "HPET_MSI",
3474 .irq_unmask = hpet_msi_unmask,
3475 .irq_mask = hpet_msi_mask,
3476 .irq_ack = ack_apic_edge,
3477#ifdef CONFIG_SMP
3478 .irq_set_affinity = hpet_msi_set_affinity,
3479#endif
3480 .irq_retrigger = ioapic_retrigger_irq,
3481};
3482
3483int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3484{
3485 struct msi_msg msg;
3486 int ret;
3487
3488 if (intr_remapping_enabled) {
3489 struct intel_iommu *iommu = map_hpet_to_ir(id);
3490 int index;
3491
3492 if (!iommu)
3493 return -1;
3494
3495 index = alloc_irte(iommu, irq, 1);
3496 if (index < 0)
3497 return -1;
3498 }
3499
3500 ret = msi_compose_msg(NULL, irq, &msg, id);
3501 if (ret < 0)
3502 return ret;
3503
3504 hpet_msi_write(get_irq_data(irq), &msg);
3505 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3506 if (irq_remapped(get_irq_chip_data(irq)))
3507 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
3508 handle_edge_irq, "edge");
3509 else
3510 set_irq_chip_and_handler_name(irq, &hpet_msi_type,
3511 handle_edge_irq, "edge");
3512
3513 return 0;
3514}
3515#endif
3516
3517#endif
3518
3519
3520
3521#ifdef CONFIG_HT_IRQ
3522
3523#ifdef CONFIG_SMP
3524
3525static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3526{
3527 struct ht_irq_msg msg;
3528 fetch_ht_irq_msg(irq, &msg);
3529
3530 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3531 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3532
3533 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3534 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3535
3536 write_ht_irq_msg(irq, &msg);
3537}
3538
3539static int
3540ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3541{
3542 struct irq_cfg *cfg = data->chip_data;
3543 unsigned int dest;
3544
3545 if (__ioapic_set_affinity(data, mask, &dest))
3546 return -1;
3547
3548 target_ht_irq(data->irq, dest, cfg->vector);
3549 return 0;
3550}
3551
3552#endif
3553
3554static struct irq_chip ht_irq_chip = {
3555 .name = "PCI-HT",
3556 .irq_mask = mask_ht_irq,
3557 .irq_unmask = unmask_ht_irq,
3558 .irq_ack = ack_apic_edge,
3559#ifdef CONFIG_SMP
3560 .irq_set_affinity = ht_set_affinity,
3561#endif
3562 .irq_retrigger = ioapic_retrigger_irq,
3563};
3564
3565int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3566{
3567 struct irq_cfg *cfg;
3568 int err;
3569
3570 if (disable_apic)
3571 return -ENXIO;
3572
3573 cfg = irq_cfg(irq);
3574 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3575 if (!err) {
3576 struct ht_irq_msg msg;
3577 unsigned dest;
3578
3579 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3580 apic->target_cpus());
3581
3582 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3583
3584 msg.address_lo =
3585 HT_IRQ_LOW_BASE |
3586 HT_IRQ_LOW_DEST_ID(dest) |
3587 HT_IRQ_LOW_VECTOR(cfg->vector) |
3588 ((apic->irq_dest_mode == 0) ?
3589 HT_IRQ_LOW_DM_PHYSICAL :
3590 HT_IRQ_LOW_DM_LOGICAL) |
3591 HT_IRQ_LOW_RQEOI_EDGE |
3592 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3593 HT_IRQ_LOW_MT_FIXED :
3594 HT_IRQ_LOW_MT_ARBITRATED) |
3595 HT_IRQ_LOW_IRQ_MASKED;
3596
3597 write_ht_irq_msg(irq, &msg);
3598
3599 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3600 handle_edge_irq, "edge");
3601
3602 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3603 }
3604 return err;
3605}
3606#endif
3607
3608int __init io_apic_get_redir_entries (int ioapic)
3609{
3610 union IO_APIC_reg_01 reg_01;
3611 unsigned long flags;
3612
3613 raw_spin_lock_irqsave(&ioapic_lock, flags);
3614 reg_01.raw = io_apic_read(ioapic, 1);
3615 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3616
3617
3618
3619
3620
3621 return reg_01.bits.entries + 1;
3622}
3623
3624static void __init probe_nr_irqs_gsi(void)
3625{
3626 int nr;
3627
3628 nr = gsi_top + NR_IRQS_LEGACY;
3629 if (nr > nr_irqs_gsi)
3630 nr_irqs_gsi = nr;
3631
3632 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3633}
3634
3635int get_nr_irqs_gsi(void)
3636{
3637 return nr_irqs_gsi;
3638}
3639
3640#ifdef CONFIG_SPARSE_IRQ
3641int __init arch_probe_nr_irqs(void)
3642{
3643 int nr;
3644
3645 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3646 nr_irqs = NR_VECTORS * nr_cpu_ids;
3647
3648 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3649#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3650
3651
3652
3653 nr += nr_irqs_gsi * 16;
3654#endif
3655 if (nr < nr_irqs)
3656 nr_irqs = nr;
3657
3658 return NR_IRQS_LEGACY;
3659}
3660#endif
3661
3662static int __io_apic_set_pci_routing(struct device *dev, int irq,
3663 struct io_apic_irq_attr *irq_attr)
3664{
3665 struct irq_cfg *cfg;
3666 int node;
3667 int ioapic, pin;
3668 int trigger, polarity;
3669
3670 ioapic = irq_attr->ioapic;
3671 if (!IO_APIC_IRQ(irq)) {
3672 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3673 ioapic);
3674 return -EINVAL;
3675 }
3676
3677 if (dev)
3678 node = dev_to_node(dev);
3679 else
3680 node = cpu_to_node(0);
3681
3682 cfg = alloc_irq_and_cfg_at(irq, node);
3683 if (!cfg)
3684 return 0;
3685
3686 pin = irq_attr->ioapic_pin;
3687 trigger = irq_attr->trigger;
3688 polarity = irq_attr->polarity;
3689
3690
3691
3692
3693 if (irq >= legacy_pic->nr_legacy_irqs) {
3694 if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) {
3695 printk(KERN_INFO "can not add pin %d for irq %d\n",
3696 pin, irq);
3697 return 0;
3698 }
3699 }
3700
3701 setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity);
3702
3703 return 0;
3704}
3705
3706int io_apic_set_pci_routing(struct device *dev, int irq,
3707 struct io_apic_irq_attr *irq_attr)
3708{
3709 int ioapic, pin;
3710
3711
3712
3713
3714
3715 ioapic = irq_attr->ioapic;
3716 pin = irq_attr->ioapic_pin;
3717 if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
3718 pr_debug("Pin %d-%d already programmed\n",
3719 mp_ioapics[ioapic].apicid, pin);
3720 return 0;
3721 }
3722 set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
3723
3724 return __io_apic_set_pci_routing(dev, irq, irq_attr);
3725}
3726
3727u8 __init io_apic_unique_id(u8 id)
3728{
3729#ifdef CONFIG_X86_32
3730 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3731 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3732 return io_apic_get_unique_id(nr_ioapics, id);
3733 else
3734 return id;
3735#else
3736 int i;
3737 DECLARE_BITMAP(used, 256);
3738
3739 bitmap_zero(used, 256);
3740 for (i = 0; i < nr_ioapics; i++) {
3741 struct mpc_ioapic *ia = &mp_ioapics[i];
3742 __set_bit(ia->apicid, used);
3743 }
3744 if (!test_bit(id, used))
3745 return id;
3746 return find_first_zero_bit(used, 256);
3747#endif
3748}
3749
3750#ifdef CONFIG_X86_32
3751int __init io_apic_get_unique_id(int ioapic, int apic_id)
3752{
3753 union IO_APIC_reg_00 reg_00;
3754 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3755 physid_mask_t tmp;
3756 unsigned long flags;
3757 int i = 0;
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768 if (physids_empty(apic_id_map))
3769 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3770
3771 raw_spin_lock_irqsave(&ioapic_lock, flags);
3772 reg_00.raw = io_apic_read(ioapic, 0);
3773 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3774
3775 if (apic_id >= get_physical_broadcast()) {
3776 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3777 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3778 apic_id = reg_00.bits.ID;
3779 }
3780
3781
3782
3783
3784
3785 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
3786
3787 for (i = 0; i < get_physical_broadcast(); i++) {
3788 if (!apic->check_apicid_used(&apic_id_map, i))
3789 break;
3790 }
3791
3792 if (i == get_physical_broadcast())
3793 panic("Max apic_id exceeded!\n");
3794
3795 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3796 "trying %d\n", ioapic, apic_id, i);
3797
3798 apic_id = i;
3799 }
3800
3801 apic->apicid_to_cpu_present(apic_id, &tmp);
3802 physids_or(apic_id_map, apic_id_map, tmp);
3803
3804 if (reg_00.bits.ID != apic_id) {
3805 reg_00.bits.ID = apic_id;
3806
3807 raw_spin_lock_irqsave(&ioapic_lock, flags);
3808 io_apic_write(ioapic, 0, reg_00.raw);
3809 reg_00.raw = io_apic_read(ioapic, 0);
3810 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3811
3812
3813 if (reg_00.bits.ID != apic_id) {
3814 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3815 return -1;
3816 }
3817 }
3818
3819 apic_printk(APIC_VERBOSE, KERN_INFO
3820 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3821
3822 return apic_id;
3823}
3824#endif
3825
3826int __init io_apic_get_version(int ioapic)
3827{
3828 union IO_APIC_reg_01 reg_01;
3829 unsigned long flags;
3830
3831 raw_spin_lock_irqsave(&ioapic_lock, flags);
3832 reg_01.raw = io_apic_read(ioapic, 1);
3833 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3834
3835 return reg_01.bits.version;
3836}
3837
3838int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
3839{
3840 int ioapic, pin, idx;
3841
3842 if (skip_ioapic_setup)
3843 return -1;
3844
3845 ioapic = mp_find_ioapic(gsi);
3846 if (ioapic < 0)
3847 return -1;
3848
3849 pin = mp_find_ioapic_pin(ioapic, gsi);
3850 if (pin < 0)
3851 return -1;
3852
3853 idx = find_irq_entry(ioapic, pin, mp_INT);
3854 if (idx < 0)
3855 return -1;
3856
3857 *trigger = irq_trigger(idx);
3858 *polarity = irq_polarity(idx);
3859 return 0;
3860}
3861
3862
3863
3864
3865
3866
3867#ifdef CONFIG_SMP
3868void __init setup_ioapic_dest(void)
3869{
3870 int pin, ioapic, irq, irq_entry;
3871 struct irq_desc *desc;
3872 const struct cpumask *mask;
3873
3874 if (skip_ioapic_setup == 1)
3875 return;
3876
3877 for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
3878 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3879 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3880 if (irq_entry == -1)
3881 continue;
3882 irq = pin_2_irq(irq_entry, ioapic, pin);
3883
3884 if ((ioapic > 0) && (irq > 16))
3885 continue;
3886
3887 desc = irq_to_desc(irq);
3888
3889
3890
3891
3892 if (desc->status &
3893 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3894 mask = desc->irq_data.affinity;
3895 else
3896 mask = apic->target_cpus();
3897
3898 if (intr_remapping_enabled)
3899 ir_ioapic_set_affinity(&desc->irq_data, mask, false);
3900 else
3901 ioapic_set_affinity(&desc->irq_data, mask, false);
3902 }
3903
3904}
3905#endif
3906
3907#define IOAPIC_RESOURCE_NAME_SIZE 11
3908
3909static struct resource *ioapic_resources;
3910
3911static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3912{
3913 unsigned long n;
3914 struct resource *res;
3915 char *mem;
3916 int i;
3917
3918 if (nr_ioapics <= 0)
3919 return NULL;
3920
3921 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3922 n *= nr_ioapics;
3923
3924 mem = alloc_bootmem(n);
3925 res = (void *)mem;
3926
3927 mem += sizeof(struct resource) * nr_ioapics;
3928
3929 for (i = 0; i < nr_ioapics; i++) {
3930 res[i].name = mem;
3931 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3932 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3933 mem += IOAPIC_RESOURCE_NAME_SIZE;
3934 }
3935
3936 ioapic_resources = res;
3937
3938 return res;
3939}
3940
3941void __init ioapic_and_gsi_init(void)
3942{
3943 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3944 struct resource *ioapic_res;
3945 int i;
3946
3947 ioapic_res = ioapic_setup_resources(nr_ioapics);
3948 for (i = 0; i < nr_ioapics; i++) {
3949 if (smp_found_config) {
3950 ioapic_phys = mp_ioapics[i].apicaddr;
3951#ifdef CONFIG_X86_32
3952 if (!ioapic_phys) {
3953 printk(KERN_ERR
3954 "WARNING: bogus zero IO-APIC "
3955 "address found in MPTABLE, "
3956 "disabling IO/APIC support!\n");
3957 smp_found_config = 0;
3958 skip_ioapic_setup = 1;
3959 goto fake_ioapic_page;
3960 }
3961#endif
3962 } else {
3963#ifdef CONFIG_X86_32
3964fake_ioapic_page:
3965#endif
3966 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
3967 ioapic_phys = __pa(ioapic_phys);
3968 }
3969 set_fixmap_nocache(idx, ioapic_phys);
3970 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
3971 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
3972 ioapic_phys);
3973 idx++;
3974
3975 ioapic_res->start = ioapic_phys;
3976 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3977 ioapic_res++;
3978 }
3979
3980 probe_nr_irqs_gsi();
3981}
3982
3983void __init ioapic_insert_resources(void)
3984{
3985 int i;
3986 struct resource *r = ioapic_resources;
3987
3988 if (!r) {
3989 if (nr_ioapics > 0)
3990 printk(KERN_ERR
3991 "IO APIC resources couldn't be allocated.\n");
3992 return;
3993 }
3994
3995 for (i = 0; i < nr_ioapics; i++) {
3996 insert_resource(&iomem_resource, r);
3997 r++;
3998 }
3999}
4000
4001int mp_find_ioapic(u32 gsi)
4002{
4003 int i = 0;
4004
4005 if (nr_ioapics == 0)
4006 return -1;
4007
4008
4009 for (i = 0; i < nr_ioapics; i++) {
4010 if ((gsi >= mp_gsi_routing[i].gsi_base)
4011 && (gsi <= mp_gsi_routing[i].gsi_end))
4012 return i;
4013 }
4014
4015 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
4016 return -1;
4017}
4018
4019int mp_find_ioapic_pin(int ioapic, u32 gsi)
4020{
4021 if (WARN_ON(ioapic == -1))
4022 return -1;
4023 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
4024 return -1;
4025
4026 return gsi - mp_gsi_routing[ioapic].gsi_base;
4027}
4028
4029static int bad_ioapic(unsigned long address)
4030{
4031 if (nr_ioapics >= MAX_IO_APICS) {
4032 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
4033 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
4034 return 1;
4035 }
4036 if (!address) {
4037 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
4038 " found in table, skipping!\n");
4039 return 1;
4040 }
4041 return 0;
4042}
4043
4044void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4045{
4046 int idx = 0;
4047 int entries;
4048
4049 if (bad_ioapic(address))
4050 return;
4051
4052 idx = nr_ioapics;
4053
4054 mp_ioapics[idx].type = MP_IOAPIC;
4055 mp_ioapics[idx].flags = MPC_APIC_USABLE;
4056 mp_ioapics[idx].apicaddr = address;
4057
4058 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
4059 mp_ioapics[idx].apicid = io_apic_unique_id(id);
4060 mp_ioapics[idx].apicver = io_apic_get_version(idx);
4061
4062
4063
4064
4065
4066 entries = io_apic_get_redir_entries(idx);
4067 mp_gsi_routing[idx].gsi_base = gsi_base;
4068 mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;
4069
4070
4071
4072
4073 nr_ioapic_registers[idx] = entries;
4074
4075 if (mp_gsi_routing[idx].gsi_end >= gsi_top)
4076 gsi_top = mp_gsi_routing[idx].gsi_end + 1;
4077
4078 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
4079 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
4080 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
4081 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
4082
4083 nr_ioapics++;
4084}
4085
4086
4087void __init pre_init_apic_IRQ0(void)
4088{
4089 struct irq_cfg *cfg;
4090
4091 printk(KERN_INFO "Early APIC setup for system timer0\n");
4092#ifndef CONFIG_SMP
4093 physid_set_mask_of_physid(boot_cpu_physical_apicid,
4094 &phys_cpu_present_map);
4095#endif
4096
4097 cfg = alloc_irq_and_cfg_at(0, 0);
4098
4099 setup_local_APIC();
4100
4101 add_pin_to_irq_node(cfg, 0, 0, 0);
4102 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
4103
4104 setup_ioapic_irq(0, 0, 0, cfg, 0, 0);
4105}
4106