1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/mm.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
28#include <linux/pci.h>
29#include <linux/mc146818rtc.h>
30#include <linux/compiler.h>
31#include <linux/acpi.h>
32#include <linux/module.h>
33#include <linux/syscore_ops.h>
34#include <linux/irqdomain.h>
35#include <linux/msi.h>
36#include <linux/htirq.h>
37#include <linux/freezer.h>
38#include <linux/kthread.h>
39#include <linux/jiffies.h>
40#include <linux/slab.h>
41#include <linux/bootmem.h>
42#include <linux/dmar.h>
43#include <linux/hpet.h>
44
45#include <asm/idle.h>
46#include <asm/io.h>
47#include <asm/smp.h>
48#include <asm/cpu.h>
49#include <asm/desc.h>
50#include <asm/proto.h>
51#include <asm/acpi.h>
52#include <asm/dma.h>
53#include <asm/timer.h>
54#include <asm/i8259.h>
55#include <asm/msidef.h>
56#include <asm/hypertransport.h>
57#include <asm/setup.h>
58#include <asm/irq_remapping.h>
59#include <asm/hpet.h>
60#include <asm/hw_irq.h>
61
62#include <asm/apic.h>
63
64#define __apicdebuginit(type) static type __init
65
66#define for_each_ioapic(idx) \
67 for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
68#define for_each_ioapic_reverse(idx) \
69 for ((idx) = nr_ioapics - 1; (idx) >= 0; (idx)--)
70#define for_each_pin(idx, pin) \
71 for ((pin) = 0; (pin) < ioapics[(idx)].nr_registers; (pin)++)
72#define for_each_ioapic_pin(idx, pin) \
73 for_each_ioapic((idx)) \
74 for_each_pin((idx), (pin))
75
76#define for_each_irq_pin(entry, head) \
77 for (entry = head; entry; entry = entry->next)
78
79
80
81
82
83int sis_apic_bug = -1;
84
85static DEFINE_RAW_SPINLOCK(ioapic_lock);
86static DEFINE_RAW_SPINLOCK(vector_lock);
87static DEFINE_MUTEX(ioapic_mutex);
88static unsigned int ioapic_dynirq_base;
89static int ioapic_initialized;
90
91struct mp_pin_info {
92 int trigger;
93 int polarity;
94 int node;
95 int set;
96 u32 count;
97};
98
99static struct ioapic {
100
101
102
103 int nr_registers;
104
105
106
107 struct IO_APIC_route_entry *saved_registers;
108
109 struct mpc_ioapic mp_config;
110
111 struct mp_ioapic_gsi gsi_config;
112 struct ioapic_domain_cfg irqdomain_cfg;
113 struct irq_domain *irqdomain;
114 struct mp_pin_info *pin_info;
115} ioapics[MAX_IO_APICS];
116
117#define mpc_ioapic_ver(ioapic_idx) ioapics[ioapic_idx].mp_config.apicver
118
119int mpc_ioapic_id(int ioapic_idx)
120{
121 return ioapics[ioapic_idx].mp_config.apicid;
122}
123
124unsigned int mpc_ioapic_addr(int ioapic_idx)
125{
126 return ioapics[ioapic_idx].mp_config.apicaddr;
127}
128
129struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
130{
131 return &ioapics[ioapic_idx].gsi_config;
132}
133
134static inline int mp_ioapic_pin_count(int ioapic)
135{
136 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
137
138 return gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1;
139}
140
141u32 mp_pin_to_gsi(int ioapic, int pin)
142{
143 return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin;
144}
145
146
147
148
149
150
151static inline int mp_init_irq_at_boot(int ioapic, int irq)
152{
153 if (!nr_legacy_irqs())
154 return 0;
155
156 return ioapic == 0 || (irq >= 0 && irq < nr_legacy_irqs());
157}
158
159static inline struct mp_pin_info *mp_pin_info(int ioapic_idx, int pin)
160{
161 return ioapics[ioapic_idx].pin_info + pin;
162}
163
164static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic)
165{
166 return ioapics[ioapic].irqdomain;
167}
168
169int nr_ioapics;
170
171
172u32 gsi_top;
173
174
175struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
176
177
178int mp_irq_entries;
179
180#ifdef CONFIG_EISA
181int mp_bus_id_to_type[MAX_MP_BUSSES];
182#endif
183
184DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
185
186int skip_ioapic_setup;
187
188
189
190
191void disable_ioapic_support(void)
192{
193#ifdef CONFIG_PCI
194 noioapicquirk = 1;
195 noioapicreroute = -1;
196#endif
197 skip_ioapic_setup = 1;
198}
199
200static int __init parse_noapic(char *str)
201{
202
203 disable_ioapic_support();
204 return 0;
205}
206early_param("noapic", parse_noapic);
207
208static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
209
210
211void mp_save_irq(struct mpc_intsrc *m)
212{
213 int i;
214
215 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
216 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
217 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
218 m->srcbusirq, m->dstapic, m->dstirq);
219
220 for (i = 0; i < mp_irq_entries; i++) {
221 if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
222 return;
223 }
224
225 memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
226 if (++mp_irq_entries == MAX_IRQ_SOURCES)
227 panic("Max # of irq sources exceeded!!\n");
228}
229
230struct irq_pin_list {
231 int apic, pin;
232 struct irq_pin_list *next;
233};
234
235static struct irq_pin_list *alloc_irq_pin_list(int node)
236{
237 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
238}
239
240int __init arch_early_irq_init(void)
241{
242 struct irq_cfg *cfg;
243 int i, node = cpu_to_node(0);
244
245 if (!nr_legacy_irqs())
246 io_apic_irqs = ~0UL;
247
248 for_each_ioapic(i) {
249 ioapics[i].saved_registers =
250 kzalloc(sizeof(struct IO_APIC_route_entry) *
251 ioapics[i].nr_registers, GFP_KERNEL);
252 if (!ioapics[i].saved_registers)
253 pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
254 }
255
256
257
258
259
260 for (i = 0; i < nr_legacy_irqs(); i++) {
261 cfg = alloc_irq_and_cfg_at(i, node);
262 cfg->vector = IRQ0_VECTOR + i;
263 cpumask_setall(cfg->domain);
264 }
265
266 return 0;
267}
268
269static inline struct irq_cfg *irq_cfg(unsigned int irq)
270{
271 return irq_get_chip_data(irq);
272}
273
274static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
275{
276 struct irq_cfg *cfg;
277
278 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
279 if (!cfg)
280 return NULL;
281 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
282 goto out_cfg;
283 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
284 goto out_domain;
285 return cfg;
286out_domain:
287 free_cpumask_var(cfg->domain);
288out_cfg:
289 kfree(cfg);
290 return NULL;
291}
292
293static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
294{
295 if (!cfg)
296 return;
297 irq_set_chip_data(at, NULL);
298 free_cpumask_var(cfg->domain);
299 free_cpumask_var(cfg->old_domain);
300 kfree(cfg);
301}
302
303static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
304{
305 int res = irq_alloc_desc_at(at, node);
306 struct irq_cfg *cfg;
307
308 if (res < 0) {
309 if (res != -EEXIST)
310 return NULL;
311 cfg = irq_cfg(at);
312 if (cfg)
313 return cfg;
314 }
315
316 cfg = alloc_irq_cfg(at, node);
317 if (cfg)
318 irq_set_chip_data(at, cfg);
319 else
320 irq_free_desc(at);
321 return cfg;
322}
323
324struct io_apic {
325 unsigned int index;
326 unsigned int unused[3];
327 unsigned int data;
328 unsigned int unused2[11];
329 unsigned int eoi;
330};
331
332static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
333{
334 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
335 + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
336}
337
338void io_apic_eoi(unsigned int apic, unsigned int vector)
339{
340 struct io_apic __iomem *io_apic = io_apic_base(apic);
341 writel(vector, &io_apic->eoi);
342}
343
344unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
345{
346 struct io_apic __iomem *io_apic = io_apic_base(apic);
347 writel(reg, &io_apic->index);
348 return readl(&io_apic->data);
349}
350
351void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
352{
353 struct io_apic __iomem *io_apic = io_apic_base(apic);
354
355 writel(reg, &io_apic->index);
356 writel(value, &io_apic->data);
357}
358
359
360
361
362
363
364
365void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
366{
367 struct io_apic __iomem *io_apic = io_apic_base(apic);
368
369 if (sis_apic_bug)
370 writel(reg, &io_apic->index);
371 writel(value, &io_apic->data);
372}
373
374union entry_union {
375 struct { u32 w1, w2; };
376 struct IO_APIC_route_entry entry;
377};
378
379static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
380{
381 union entry_union eu;
382
383 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
384 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
385
386 return eu.entry;
387}
388
389static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
390{
391 union entry_union eu;
392 unsigned long flags;
393
394 raw_spin_lock_irqsave(&ioapic_lock, flags);
395 eu.entry = __ioapic_read_entry(apic, pin);
396 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
397
398 return eu.entry;
399}
400
401
402
403
404
405
406
407static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
408{
409 union entry_union eu = {{0, 0}};
410
411 eu.entry = e;
412 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
413 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
414}
415
416static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
417{
418 unsigned long flags;
419
420 raw_spin_lock_irqsave(&ioapic_lock, flags);
421 __ioapic_write_entry(apic, pin, e);
422 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
423}
424
425
426
427
428
429
430static void ioapic_mask_entry(int apic, int pin)
431{
432 unsigned long flags;
433 union entry_union eu = { .entry.mask = 1 };
434
435 raw_spin_lock_irqsave(&ioapic_lock, flags);
436 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
437 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
438 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
439}
440
441
442
443
444
445
446static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
447{
448 struct irq_pin_list **last, *entry;
449
450
451 last = &cfg->irq_2_pin;
452 for_each_irq_pin(entry, cfg->irq_2_pin) {
453 if (entry->apic == apic && entry->pin == pin)
454 return 0;
455 last = &entry->next;
456 }
457
458 entry = alloc_irq_pin_list(node);
459 if (!entry) {
460 pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
461 node, apic, pin);
462 return -ENOMEM;
463 }
464 entry->apic = apic;
465 entry->pin = pin;
466
467 *last = entry;
468 return 0;
469}
470
471static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin)
472{
473 struct irq_pin_list **last, *entry;
474
475 last = &cfg->irq_2_pin;
476 for_each_irq_pin(entry, cfg->irq_2_pin)
477 if (entry->apic == apic && entry->pin == pin) {
478 *last = entry->next;
479 kfree(entry);
480 return;
481 } else {
482 last = &entry->next;
483 }
484}
485
486static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
487{
488 if (__add_pin_to_irq_node(cfg, node, apic, pin))
489 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
490}
491
492
493
494
495static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
496 int oldapic, int oldpin,
497 int newapic, int newpin)
498{
499 struct irq_pin_list *entry;
500
501 for_each_irq_pin(entry, cfg->irq_2_pin) {
502 if (entry->apic == oldapic && entry->pin == oldpin) {
503 entry->apic = newapic;
504 entry->pin = newpin;
505
506 return;
507 }
508 }
509
510
511 add_pin_to_irq_node(cfg, node, newapic, newpin);
512}
513
514static void __io_apic_modify_irq(struct irq_pin_list *entry,
515 int mask_and, int mask_or,
516 void (*final)(struct irq_pin_list *entry))
517{
518 unsigned int reg, pin;
519
520 pin = entry->pin;
521 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
522 reg &= mask_and;
523 reg |= mask_or;
524 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
525 if (final)
526 final(entry);
527}
528
529static void io_apic_modify_irq(struct irq_cfg *cfg,
530 int mask_and, int mask_or,
531 void (*final)(struct irq_pin_list *entry))
532{
533 struct irq_pin_list *entry;
534
535 for_each_irq_pin(entry, cfg->irq_2_pin)
536 __io_apic_modify_irq(entry, mask_and, mask_or, final);
537}
538
539static void io_apic_sync(struct irq_pin_list *entry)
540{
541
542
543
544
545 struct io_apic __iomem *io_apic;
546
547 io_apic = io_apic_base(entry->apic);
548 readl(&io_apic->data);
549}
550
551static void mask_ioapic(struct irq_cfg *cfg)
552{
553 unsigned long flags;
554
555 raw_spin_lock_irqsave(&ioapic_lock, flags);
556 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
557 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
558}
559
560static void mask_ioapic_irq(struct irq_data *data)
561{
562 mask_ioapic(data->chip_data);
563}
564
565static void __unmask_ioapic(struct irq_cfg *cfg)
566{
567 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
568}
569
570static void unmask_ioapic(struct irq_cfg *cfg)
571{
572 unsigned long flags;
573
574 raw_spin_lock_irqsave(&ioapic_lock, flags);
575 __unmask_ioapic(cfg);
576 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
577}
578
579static void unmask_ioapic_irq(struct irq_data *data)
580{
581 unmask_ioapic(data->chip_data);
582}
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600void native_eoi_ioapic_pin(int apic, int pin, int vector)
601{
602 if (mpc_ioapic_ver(apic) >= 0x20) {
603 io_apic_eoi(apic, vector);
604 } else {
605 struct IO_APIC_route_entry entry, entry1;
606
607 entry = entry1 = __ioapic_read_entry(apic, pin);
608
609
610
611
612 entry1.mask = 1;
613 entry1.trigger = IOAPIC_EDGE;
614
615 __ioapic_write_entry(apic, pin, entry1);
616
617
618
619
620 __ioapic_write_entry(apic, pin, entry);
621 }
622}
623
624void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
625{
626 struct irq_pin_list *entry;
627 unsigned long flags;
628
629 raw_spin_lock_irqsave(&ioapic_lock, flags);
630 for_each_irq_pin(entry, cfg->irq_2_pin)
631 x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin,
632 cfg->vector);
633 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
634}
635
636static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
637{
638 struct IO_APIC_route_entry entry;
639
640
641 entry = ioapic_read_entry(apic, pin);
642 if (entry.delivery_mode == dest_SMI)
643 return;
644
645
646
647
648
649 if (!entry.mask) {
650 entry.mask = 1;
651 ioapic_write_entry(apic, pin, entry);
652 entry = ioapic_read_entry(apic, pin);
653 }
654
655 if (entry.irr) {
656 unsigned long flags;
657
658
659
660
661
662
663 if (!entry.trigger) {
664 entry.trigger = IOAPIC_LEVEL;
665 ioapic_write_entry(apic, pin, entry);
666 }
667
668 raw_spin_lock_irqsave(&ioapic_lock, flags);
669 x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector);
670 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
671 }
672
673
674
675
676
677 ioapic_mask_entry(apic, pin);
678 entry = ioapic_read_entry(apic, pin);
679 if (entry.irr)
680 pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
681 mpc_ioapic_id(apic), pin);
682}
683
684static void clear_IO_APIC (void)
685{
686 int apic, pin;
687
688 for_each_ioapic_pin(apic, pin)
689 clear_IO_APIC_pin(apic, pin);
690}
691
692#ifdef CONFIG_X86_32
693
694
695
696
697
698#define MAX_PIRQS 8
699static int pirq_entries[MAX_PIRQS] = {
700 [0 ... MAX_PIRQS - 1] = -1
701};
702
703static int __init ioapic_pirq_setup(char *str)
704{
705 int i, max;
706 int ints[MAX_PIRQS+1];
707
708 get_options(str, ARRAY_SIZE(ints), ints);
709
710 apic_printk(APIC_VERBOSE, KERN_INFO
711 "PIRQ redirection, working around broken MP-BIOS.\n");
712 max = MAX_PIRQS;
713 if (ints[0] < MAX_PIRQS)
714 max = ints[0];
715
716 for (i = 0; i < max; i++) {
717 apic_printk(APIC_VERBOSE, KERN_DEBUG
718 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
719
720
721
722 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
723 }
724 return 1;
725}
726
727__setup("pirq=", ioapic_pirq_setup);
728#endif
729
730
731
732
733int save_ioapic_entries(void)
734{
735 int apic, pin;
736 int err = 0;
737
738 for_each_ioapic(apic) {
739 if (!ioapics[apic].saved_registers) {
740 err = -ENOMEM;
741 continue;
742 }
743
744 for_each_pin(apic, pin)
745 ioapics[apic].saved_registers[pin] =
746 ioapic_read_entry(apic, pin);
747 }
748
749 return err;
750}
751
752
753
754
755void mask_ioapic_entries(void)
756{
757 int apic, pin;
758
759 for_each_ioapic(apic) {
760 if (!ioapics[apic].saved_registers)
761 continue;
762
763 for_each_pin(apic, pin) {
764 struct IO_APIC_route_entry entry;
765
766 entry = ioapics[apic].saved_registers[pin];
767 if (!entry.mask) {
768 entry.mask = 1;
769 ioapic_write_entry(apic, pin, entry);
770 }
771 }
772 }
773}
774
775
776
777
778int restore_ioapic_entries(void)
779{
780 int apic, pin;
781
782 for_each_ioapic(apic) {
783 if (!ioapics[apic].saved_registers)
784 continue;
785
786 for_each_pin(apic, pin)
787 ioapic_write_entry(apic, pin,
788 ioapics[apic].saved_registers[pin]);
789 }
790 return 0;
791}
792
793
794
795
796static int find_irq_entry(int ioapic_idx, int pin, int type)
797{
798 int i;
799
800 for (i = 0; i < mp_irq_entries; i++)
801 if (mp_irqs[i].irqtype == type &&
802 (mp_irqs[i].dstapic == mpc_ioapic_id(ioapic_idx) ||
803 mp_irqs[i].dstapic == MP_APIC_ALL) &&
804 mp_irqs[i].dstirq == pin)
805 return i;
806
807 return -1;
808}
809
810
811
812
813static int __init find_isa_irq_pin(int irq, int type)
814{
815 int i;
816
817 for (i = 0; i < mp_irq_entries; i++) {
818 int lbus = mp_irqs[i].srcbus;
819
820 if (test_bit(lbus, mp_bus_not_pci) &&
821 (mp_irqs[i].irqtype == type) &&
822 (mp_irqs[i].srcbusirq == irq))
823
824 return mp_irqs[i].dstirq;
825 }
826 return -1;
827}
828
829static int __init find_isa_irq_apic(int irq, int type)
830{
831 int i;
832
833 for (i = 0; i < mp_irq_entries; i++) {
834 int lbus = mp_irqs[i].srcbus;
835
836 if (test_bit(lbus, mp_bus_not_pci) &&
837 (mp_irqs[i].irqtype == type) &&
838 (mp_irqs[i].srcbusirq == irq))
839 break;
840 }
841
842 if (i < mp_irq_entries) {
843 int ioapic_idx;
844
845 for_each_ioapic(ioapic_idx)
846 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic)
847 return ioapic_idx;
848 }
849
850 return -1;
851}
852
853#ifdef CONFIG_EISA
854
855
856
857static int EISA_ELCR(unsigned int irq)
858{
859 if (irq < nr_legacy_irqs()) {
860 unsigned int port = 0x4d0 + (irq >> 3);
861 return (inb(port) >> (irq & 7)) & 1;
862 }
863 apic_printk(APIC_VERBOSE, KERN_INFO
864 "Broken MPtable reports ISA irq %d\n", irq);
865 return 0;
866}
867
868#endif
869
870
871
872
873#define default_ISA_trigger(idx) (0)
874#define default_ISA_polarity(idx) (0)
875
876
877
878
879
880
881#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
882#define default_EISA_polarity(idx) default_ISA_polarity(idx)
883
884
885
886
887#define default_PCI_trigger(idx) (1)
888#define default_PCI_polarity(idx) (1)
889
890static int irq_polarity(int idx)
891{
892 int bus = mp_irqs[idx].srcbus;
893 int polarity;
894
895
896
897
898 switch (mp_irqs[idx].irqflag & 3)
899 {
900 case 0:
901 if (test_bit(bus, mp_bus_not_pci))
902 polarity = default_ISA_polarity(idx);
903 else
904 polarity = default_PCI_polarity(idx);
905 break;
906 case 1:
907 {
908 polarity = 0;
909 break;
910 }
911 case 2:
912 {
913 pr_warn("broken BIOS!!\n");
914 polarity = 1;
915 break;
916 }
917 case 3:
918 {
919 polarity = 1;
920 break;
921 }
922 default:
923 {
924 pr_warn("broken BIOS!!\n");
925 polarity = 1;
926 break;
927 }
928 }
929 return polarity;
930}
931
932static int irq_trigger(int idx)
933{
934 int bus = mp_irqs[idx].srcbus;
935 int trigger;
936
937
938
939
940 switch ((mp_irqs[idx].irqflag>>2) & 3)
941 {
942 case 0:
943 if (test_bit(bus, mp_bus_not_pci))
944 trigger = default_ISA_trigger(idx);
945 else
946 trigger = default_PCI_trigger(idx);
947#ifdef CONFIG_EISA
948 switch (mp_bus_id_to_type[bus]) {
949 case MP_BUS_ISA:
950 {
951
952 break;
953 }
954 case MP_BUS_EISA:
955 {
956 trigger = default_EISA_trigger(idx);
957 break;
958 }
959 case MP_BUS_PCI:
960 {
961
962 break;
963 }
964 default:
965 {
966 pr_warn("broken BIOS!!\n");
967 trigger = 1;
968 break;
969 }
970 }
971#endif
972 break;
973 case 1:
974 {
975 trigger = 0;
976 break;
977 }
978 case 2:
979 {
980 pr_warn("broken BIOS!!\n");
981 trigger = 1;
982 break;
983 }
984 case 3:
985 {
986 trigger = 1;
987 break;
988 }
989 default:
990 {
991 pr_warn("broken BIOS!!\n");
992 trigger = 0;
993 break;
994 }
995 }
996 return trigger;
997}
998
999static int alloc_irq_from_domain(struct irq_domain *domain, u32 gsi, int pin)
1000{
1001 int irq = -1;
1002 int ioapic = (int)(long)domain->host_data;
1003 int type = ioapics[ioapic].irqdomain_cfg.type;
1004
1005 switch (type) {
1006 case IOAPIC_DOMAIN_LEGACY:
1007
1008
1009
1010
1011 if (gsi < nr_legacy_irqs())
1012 irq = irq_create_mapping(domain, pin);
1013 else if (irq_create_strict_mappings(domain, gsi, pin, 1) == 0)
1014 irq = gsi;
1015 break;
1016 case IOAPIC_DOMAIN_STRICT:
1017 if (irq_create_strict_mappings(domain, gsi, pin, 1) == 0)
1018 irq = gsi;
1019 break;
1020 case IOAPIC_DOMAIN_DYNAMIC:
1021 irq = irq_create_mapping(domain, pin);
1022 break;
1023 default:
1024 WARN(1, "ioapic: unknown irqdomain type %d\n", type);
1025 break;
1026 }
1027
1028 return irq > 0 ? irq : -1;
1029}
1030
1031static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
1032 unsigned int flags)
1033{
1034 int irq;
1035 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
1036 struct mp_pin_info *info = mp_pin_info(ioapic, pin);
1037
1038 if (!domain)
1039 return -1;
1040
1041 mutex_lock(&ioapic_mutex);
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
1056 irq = mp_irqs[idx].srcbusirq;
1057 if (flags & IOAPIC_MAP_ALLOC) {
1058 if (info->count == 0 &&
1059 mp_irqdomain_map(domain, irq, pin) != 0)
1060 irq = -1;
1061
1062
1063 if (irq == 0)
1064 info->count++;
1065 }
1066 } else {
1067 irq = irq_find_mapping(domain, pin);
1068 if (irq <= 0 && (flags & IOAPIC_MAP_ALLOC))
1069 irq = alloc_irq_from_domain(domain, gsi, pin);
1070 }
1071
1072 if (flags & IOAPIC_MAP_ALLOC) {
1073
1074 if (irq < nr_legacy_irqs() && info->count == 1 &&
1075 mp_irqdomain_map(domain, irq, pin) != 0)
1076 irq = -1;
1077
1078 if (irq > 0)
1079 info->count++;
1080 else if (info->count == 0)
1081 info->set = 0;
1082 }
1083
1084 mutex_unlock(&ioapic_mutex);
1085
1086 return irq > 0 ? irq : -1;
1087}
1088
1089static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags)
1090{
1091 u32 gsi = mp_pin_to_gsi(ioapic, pin);
1092
1093
1094
1095
1096 if (mp_irqs[idx].dstirq != pin)
1097 pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
1098
1099#ifdef CONFIG_X86_32
1100
1101
1102
1103 if ((pin >= 16) && (pin <= 23)) {
1104 if (pirq_entries[pin-16] != -1) {
1105 if (!pirq_entries[pin-16]) {
1106 apic_printk(APIC_VERBOSE, KERN_DEBUG
1107 "disabling PIRQ%d\n", pin-16);
1108 } else {
1109 int irq = pirq_entries[pin-16];
1110 apic_printk(APIC_VERBOSE, KERN_DEBUG
1111 "using PIRQ%d -> IRQ %d\n",
1112 pin-16, irq);
1113 return irq;
1114 }
1115 }
1116 }
1117#endif
1118
1119 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags);
1120}
1121
1122int mp_map_gsi_to_irq(u32 gsi, unsigned int flags)
1123{
1124 int ioapic, pin, idx;
1125
1126 ioapic = mp_find_ioapic(gsi);
1127 if (ioapic < 0)
1128 return -1;
1129
1130 pin = mp_find_ioapic_pin(ioapic, gsi);
1131 idx = find_irq_entry(ioapic, pin, mp_INT);
1132 if ((flags & IOAPIC_MAP_CHECK) && idx < 0)
1133 return -1;
1134
1135 return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags);
1136}
1137
1138void mp_unmap_irq(int irq)
1139{
1140 struct irq_data *data = irq_get_irq_data(irq);
1141 struct mp_pin_info *info;
1142 int ioapic, pin;
1143
1144 if (!data || !data->domain)
1145 return;
1146
1147 ioapic = (int)(long)data->domain->host_data;
1148 pin = (int)data->hwirq;
1149 info = mp_pin_info(ioapic, pin);
1150
1151 mutex_lock(&ioapic_mutex);
1152 if (--info->count == 0) {
1153 info->set = 0;
1154 if (irq < nr_legacy_irqs() &&
1155 ioapics[ioapic].irqdomain_cfg.type == IOAPIC_DOMAIN_LEGACY)
1156 mp_irqdomain_unmap(data->domain, irq);
1157 else
1158 irq_dispose_mapping(irq);
1159 }
1160 mutex_unlock(&ioapic_mutex);
1161}
1162
1163
1164
1165
1166
1167int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1168 struct io_apic_irq_attr *irq_attr)
1169{
1170 int irq, i, best_ioapic = -1, best_idx = -1;
1171
1172 apic_printk(APIC_DEBUG,
1173 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1174 bus, slot, pin);
1175 if (test_bit(bus, mp_bus_not_pci)) {
1176 apic_printk(APIC_VERBOSE,
1177 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1178 return -1;
1179 }
1180
1181 for (i = 0; i < mp_irq_entries; i++) {
1182 int lbus = mp_irqs[i].srcbus;
1183 int ioapic_idx, found = 0;
1184
1185 if (bus != lbus || mp_irqs[i].irqtype != mp_INT ||
1186 slot != ((mp_irqs[i].srcbusirq >> 2) & 0x1f))
1187 continue;
1188
1189 for_each_ioapic(ioapic_idx)
1190 if (mpc_ioapic_id(ioapic_idx) == mp_irqs[i].dstapic ||
1191 mp_irqs[i].dstapic == MP_APIC_ALL) {
1192 found = 1;
1193 break;
1194 }
1195 if (!found)
1196 continue;
1197
1198
1199 irq = pin_2_irq(i, ioapic_idx, mp_irqs[i].dstirq, 0);
1200 if (irq > 0 && !IO_APIC_IRQ(irq))
1201 continue;
1202
1203 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1204 best_idx = i;
1205 best_ioapic = ioapic_idx;
1206 goto out;
1207 }
1208
1209
1210
1211
1212
1213 if (best_idx < 0) {
1214 best_idx = i;
1215 best_ioapic = ioapic_idx;
1216 }
1217 }
1218 if (best_idx < 0)
1219 return -1;
1220
1221out:
1222 irq = pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
1223 IOAPIC_MAP_ALLOC);
1224 if (irq > 0)
1225 set_io_apic_irq_attr(irq_attr, best_ioapic,
1226 mp_irqs[best_idx].dstirq,
1227 irq_trigger(best_idx),
1228 irq_polarity(best_idx));
1229 return irq;
1230}
1231EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1232
1233void lock_vector_lock(void)
1234{
1235
1236
1237
1238 raw_spin_lock(&vector_lock);
1239}
1240
1241void unlock_vector_lock(void)
1242{
1243 raw_spin_unlock(&vector_lock);
1244}
1245
1246static int
1247__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1248{
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1261 static int current_offset = VECTOR_OFFSET_START % 16;
1262 int cpu, err;
1263 cpumask_var_t tmp_mask;
1264
1265 if (cfg->move_in_progress)
1266 return -EBUSY;
1267
1268 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1269 return -ENOMEM;
1270
1271
1272 err = -ENOSPC;
1273 cpumask_clear(cfg->old_domain);
1274 cpu = cpumask_first_and(mask, cpu_online_mask);
1275 while (cpu < nr_cpu_ids) {
1276 int new_cpu, vector, offset;
1277
1278 apic->vector_allocation_domain(cpu, tmp_mask, mask);
1279
1280 if (cpumask_subset(tmp_mask, cfg->domain)) {
1281 err = 0;
1282 if (cpumask_equal(tmp_mask, cfg->domain))
1283 break;
1284
1285
1286
1287
1288
1289 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
1290 cfg->move_in_progress =
1291 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1292 cpumask_and(cfg->domain, cfg->domain, tmp_mask);
1293 break;
1294 }
1295
1296 vector = current_vector;
1297 offset = current_offset;
1298next:
1299 vector += 16;
1300 if (vector >= first_system_vector) {
1301 offset = (offset + 1) % 16;
1302 vector = FIRST_EXTERNAL_VECTOR + offset;
1303 }
1304
1305 if (unlikely(current_vector == vector)) {
1306 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1307 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1308 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1309 continue;
1310 }
1311
1312 if (test_bit(vector, used_vectors))
1313 goto next;
1314
1315 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
1316 if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
1317 goto next;
1318 }
1319
1320 current_vector = vector;
1321 current_offset = offset;
1322 if (cfg->vector) {
1323 cpumask_copy(cfg->old_domain, cfg->domain);
1324 cfg->move_in_progress =
1325 cpumask_intersects(cfg->old_domain, cpu_online_mask);
1326 }
1327 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1328 per_cpu(vector_irq, new_cpu)[vector] = irq;
1329 cfg->vector = vector;
1330 cpumask_copy(cfg->domain, tmp_mask);
1331 err = 0;
1332 break;
1333 }
1334 free_cpumask_var(tmp_mask);
1335 return err;
1336}
1337
1338int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1339{
1340 int err;
1341 unsigned long flags;
1342
1343 raw_spin_lock_irqsave(&vector_lock, flags);
1344 err = __assign_irq_vector(irq, cfg, mask);
1345 raw_spin_unlock_irqrestore(&vector_lock, flags);
1346 return err;
1347}
1348
1349static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1350{
1351 int cpu, vector;
1352
1353 BUG_ON(!cfg->vector);
1354
1355 vector = cfg->vector;
1356 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1357 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1358
1359 cfg->vector = 0;
1360 cpumask_clear(cfg->domain);
1361
1362 if (likely(!cfg->move_in_progress))
1363 return;
1364 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1365 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
1366 if (per_cpu(vector_irq, cpu)[vector] != irq)
1367 continue;
1368 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1369 break;
1370 }
1371 }
1372 cfg->move_in_progress = 0;
1373}
1374
1375void __setup_vector_irq(int cpu)
1376{
1377
1378 int irq, vector;
1379 struct irq_cfg *cfg;
1380
1381
1382
1383
1384
1385
1386 raw_spin_lock(&vector_lock);
1387
1388 for_each_active_irq(irq) {
1389 cfg = irq_cfg(irq);
1390 if (!cfg)
1391 continue;
1392
1393 if (!cpumask_test_cpu(cpu, cfg->domain))
1394 continue;
1395 vector = cfg->vector;
1396 per_cpu(vector_irq, cpu)[vector] = irq;
1397 }
1398
1399 for (vector = 0; vector < NR_VECTORS; ++vector) {
1400 irq = per_cpu(vector_irq, cpu)[vector];
1401 if (irq <= VECTOR_UNDEFINED)
1402 continue;
1403
1404 cfg = irq_cfg(irq);
1405 if (!cpumask_test_cpu(cpu, cfg->domain))
1406 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
1407 }
1408 raw_spin_unlock(&vector_lock);
1409}
1410
1411static struct irq_chip ioapic_chip;
1412
1413#ifdef CONFIG_X86_32
1414static inline int IO_APIC_irq_trigger(int irq)
1415{
1416 int apic, idx, pin;
1417
1418 for_each_ioapic_pin(apic, pin) {
1419 idx = find_irq_entry(apic, pin, mp_INT);
1420 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin, 0)))
1421 return irq_trigger(idx);
1422 }
1423
1424
1425
1426 return 0;
1427}
1428#else
1429static inline int IO_APIC_irq_trigger(int irq)
1430{
1431 return 1;
1432}
1433#endif
1434
1435static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
1436 unsigned long trigger)
1437{
1438 struct irq_chip *chip = &ioapic_chip;
1439 irq_flow_handler_t hdl;
1440 bool fasteoi;
1441
1442 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1443 trigger == IOAPIC_LEVEL) {
1444 irq_set_status_flags(irq, IRQ_LEVEL);
1445 fasteoi = true;
1446 } else {
1447 irq_clear_status_flags(irq, IRQ_LEVEL);
1448 fasteoi = false;
1449 }
1450
1451 if (setup_remapped_irq(irq, cfg, chip))
1452 fasteoi = trigger != 0;
1453
1454 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
1455 irq_set_chip_and_handler_name(irq, chip, hdl,
1456 fasteoi ? "fasteoi" : "edge");
1457}
1458
1459int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
1460 unsigned int destination, int vector,
1461 struct io_apic_irq_attr *attr)
1462{
1463 memset(entry, 0, sizeof(*entry));
1464
1465 entry->delivery_mode = apic->irq_delivery_mode;
1466 entry->dest_mode = apic->irq_dest_mode;
1467 entry->dest = destination;
1468 entry->vector = vector;
1469 entry->mask = 0;
1470 entry->trigger = attr->trigger;
1471 entry->polarity = attr->polarity;
1472
1473
1474
1475
1476
1477 if (attr->trigger)
1478 entry->mask = 1;
1479
1480 return 0;
1481}
1482
1483static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1484 struct io_apic_irq_attr *attr)
1485{
1486 struct IO_APIC_route_entry entry;
1487 unsigned int dest;
1488
1489 if (!IO_APIC_IRQ(irq))
1490 return;
1491
1492 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1493 return;
1494
1495 if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
1496 &dest)) {
1497 pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
1498 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1499 __clear_irq_vector(irq, cfg);
1500
1501 return;
1502 }
1503
1504 apic_printk(APIC_VERBOSE,KERN_DEBUG
1505 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1506 "IRQ %d Mode:%i Active:%i Dest:%d)\n",
1507 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
1508 cfg->vector, irq, attr->trigger, attr->polarity, dest);
1509
1510 if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
1511 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1512 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1513 __clear_irq_vector(irq, cfg);
1514
1515 return;
1516 }
1517
1518 ioapic_register_intr(irq, cfg, attr->trigger);
1519 if (irq < nr_legacy_irqs())
1520 legacy_pic->mask(irq);
1521
1522 ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry);
1523}
1524
1525static void __init setup_IO_APIC_irqs(void)
1526{
1527 unsigned int ioapic, pin;
1528 int idx;
1529
1530 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1531
1532 for_each_ioapic_pin(ioapic, pin) {
1533 idx = find_irq_entry(ioapic, pin, mp_INT);
1534 if (idx < 0)
1535 apic_printk(APIC_VERBOSE,
1536 KERN_DEBUG " apic %d pin %d not connected\n",
1537 mpc_ioapic_id(ioapic), pin);
1538 else
1539 pin_2_irq(idx, ioapic, pin,
1540 ioapic ? 0 : IOAPIC_MAP_ALLOC);
1541 }
1542}
1543
1544
1545
1546
1547static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1548 unsigned int pin, int vector)
1549{
1550 struct IO_APIC_route_entry entry;
1551 unsigned int dest;
1552
1553 memset(&entry, 0, sizeof(entry));
1554
1555
1556
1557
1558
1559 if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
1560 apic->target_cpus(), &dest)))
1561 dest = BAD_APICID;
1562
1563 entry.dest_mode = apic->irq_dest_mode;
1564 entry.mask = 0;
1565 entry.dest = dest;
1566 entry.delivery_mode = apic->irq_delivery_mode;
1567 entry.polarity = 0;
1568 entry.trigger = 0;
1569 entry.vector = vector;
1570
1571
1572
1573
1574
1575 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
1576 "edge");
1577
1578
1579
1580
1581 ioapic_write_entry(ioapic_idx, pin, entry);
1582}
1583
1584void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
1585{
1586 int i;
1587
1588 pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n");
1589
1590 for (i = 0; i <= nr_entries; i++) {
1591 struct IO_APIC_route_entry entry;
1592
1593 entry = ioapic_read_entry(apic, i);
1594
1595 pr_debug(" %02x %02X ", i, entry.dest);
1596 pr_cont("%1d %1d %1d %1d %1d "
1597 "%1d %1d %02X\n",
1598 entry.mask,
1599 entry.trigger,
1600 entry.irr,
1601 entry.polarity,
1602 entry.delivery_status,
1603 entry.dest_mode,
1604 entry.delivery_mode,
1605 entry.vector);
1606 }
1607}
1608
1609void intel_ir_io_apic_print_entries(unsigned int apic,
1610 unsigned int nr_entries)
1611{
1612 int i;
1613
1614 pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n");
1615
1616 for (i = 0; i <= nr_entries; i++) {
1617 struct IR_IO_APIC_route_entry *ir_entry;
1618 struct IO_APIC_route_entry entry;
1619
1620 entry = ioapic_read_entry(apic, i);
1621
1622 ir_entry = (struct IR_IO_APIC_route_entry *)&entry;
1623
1624 pr_debug(" %02x %04X ", i, ir_entry->index);
1625 pr_cont("%1d %1d %1d %1d %1d "
1626 "%1d %1d %X %02X\n",
1627 ir_entry->format,
1628 ir_entry->mask,
1629 ir_entry->trigger,
1630 ir_entry->irr,
1631 ir_entry->polarity,
1632 ir_entry->delivery_status,
1633 ir_entry->index2,
1634 ir_entry->zero,
1635 ir_entry->vector);
1636 }
1637}
1638
1639void ioapic_zap_locks(void)
1640{
1641 raw_spin_lock_init(&ioapic_lock);
1642}
1643
1644__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1645{
1646 union IO_APIC_reg_00 reg_00;
1647 union IO_APIC_reg_01 reg_01;
1648 union IO_APIC_reg_02 reg_02;
1649 union IO_APIC_reg_03 reg_03;
1650 unsigned long flags;
1651
1652 raw_spin_lock_irqsave(&ioapic_lock, flags);
1653 reg_00.raw = io_apic_read(ioapic_idx, 0);
1654 reg_01.raw = io_apic_read(ioapic_idx, 1);
1655 if (reg_01.bits.version >= 0x10)
1656 reg_02.raw = io_apic_read(ioapic_idx, 2);
1657 if (reg_01.bits.version >= 0x20)
1658 reg_03.raw = io_apic_read(ioapic_idx, 3);
1659 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1660
1661 printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
1662 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1663 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1664 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1665 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1666
1667 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
1668 printk(KERN_DEBUG "....... : max redirection entries: %02X\n",
1669 reg_01.bits.entries);
1670
1671 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1672 printk(KERN_DEBUG "....... : IO APIC version: %02X\n",
1673 reg_01.bits.version);
1674
1675
1676
1677
1678
1679
1680 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1681 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1682 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1683 }
1684
1685
1686
1687
1688
1689
1690 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1691 reg_03.raw != reg_01.raw) {
1692 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1693 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1694 }
1695
1696 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1697
1698 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
1699}
1700
1701__apicdebuginit(void) print_IO_APICs(void)
1702{
1703 int ioapic_idx;
1704 struct irq_cfg *cfg;
1705 unsigned int irq;
1706 struct irq_chip *chip;
1707
1708 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1709 for_each_ioapic(ioapic_idx)
1710 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1711 mpc_ioapic_id(ioapic_idx),
1712 ioapics[ioapic_idx].nr_registers);
1713
1714
1715
1716
1717
1718 printk(KERN_INFO "testing the IO APIC.......................\n");
1719
1720 for_each_ioapic(ioapic_idx)
1721 print_IO_APIC(ioapic_idx);
1722
1723 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1724 for_each_active_irq(irq) {
1725 struct irq_pin_list *entry;
1726
1727 chip = irq_get_chip(irq);
1728 if (chip != &ioapic_chip)
1729 continue;
1730
1731 cfg = irq_cfg(irq);
1732 if (!cfg)
1733 continue;
1734 entry = cfg->irq_2_pin;
1735 if (!entry)
1736 continue;
1737 printk(KERN_DEBUG "IRQ%d ", irq);
1738 for_each_irq_pin(entry, cfg->irq_2_pin)
1739 pr_cont("-> %d:%d", entry->apic, entry->pin);
1740 pr_cont("\n");
1741 }
1742
1743 printk(KERN_INFO ".................................... done.\n");
1744}
1745
1746__apicdebuginit(void) print_APIC_field(int base)
1747{
1748 int i;
1749
1750 printk(KERN_DEBUG);
1751
1752 for (i = 0; i < 8; i++)
1753 pr_cont("%08x", apic_read(base + i*0x10));
1754
1755 pr_cont("\n");
1756}
1757
1758__apicdebuginit(void) print_local_APIC(void *dummy)
1759{
1760 unsigned int i, v, ver, maxlvt;
1761 u64 icr;
1762
1763 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1764 smp_processor_id(), hard_smp_processor_id());
1765 v = apic_read(APIC_ID);
1766 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1767 v = apic_read(APIC_LVR);
1768 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1769 ver = GET_APIC_VERSION(v);
1770 maxlvt = lapic_get_maxlvt();
1771
1772 v = apic_read(APIC_TASKPRI);
1773 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1774
1775 if (APIC_INTEGRATED(ver)) {
1776 if (!APIC_XAPIC(ver)) {
1777 v = apic_read(APIC_ARBPRI);
1778 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1779 v & APIC_ARBPRI_MASK);
1780 }
1781 v = apic_read(APIC_PROCPRI);
1782 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1783 }
1784
1785
1786
1787
1788
1789 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1790 v = apic_read(APIC_RRR);
1791 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1792 }
1793
1794 v = apic_read(APIC_LDR);
1795 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1796 if (!x2apic_enabled()) {
1797 v = apic_read(APIC_DFR);
1798 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1799 }
1800 v = apic_read(APIC_SPIV);
1801 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1802
1803 printk(KERN_DEBUG "... APIC ISR field:\n");
1804 print_APIC_field(APIC_ISR);
1805 printk(KERN_DEBUG "... APIC TMR field:\n");
1806 print_APIC_field(APIC_TMR);
1807 printk(KERN_DEBUG "... APIC IRR field:\n");
1808 print_APIC_field(APIC_IRR);
1809
1810 if (APIC_INTEGRATED(ver)) {
1811 if (maxlvt > 3)
1812 apic_write(APIC_ESR, 0);
1813
1814 v = apic_read(APIC_ESR);
1815 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1816 }
1817
1818 icr = apic_icr_read();
1819 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1820 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1821
1822 v = apic_read(APIC_LVTT);
1823 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1824
1825 if (maxlvt > 3) {
1826 v = apic_read(APIC_LVTPC);
1827 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1828 }
1829 v = apic_read(APIC_LVT0);
1830 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1831 v = apic_read(APIC_LVT1);
1832 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1833
1834 if (maxlvt > 2) {
1835 v = apic_read(APIC_LVTERR);
1836 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1837 }
1838
1839 v = apic_read(APIC_TMICT);
1840 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1841 v = apic_read(APIC_TMCCT);
1842 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1843 v = apic_read(APIC_TDCR);
1844 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1845
1846 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1847 v = apic_read(APIC_EFEAT);
1848 maxlvt = (v >> 16) & 0xff;
1849 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1850 v = apic_read(APIC_ECTRL);
1851 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1852 for (i = 0; i < maxlvt; i++) {
1853 v = apic_read(APIC_EILVTn(i));
1854 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1855 }
1856 }
1857 pr_cont("\n");
1858}
1859
1860__apicdebuginit(void) print_local_APICs(int maxcpu)
1861{
1862 int cpu;
1863
1864 if (!maxcpu)
1865 return;
1866
1867 preempt_disable();
1868 for_each_online_cpu(cpu) {
1869 if (cpu >= maxcpu)
1870 break;
1871 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1872 }
1873 preempt_enable();
1874}
1875
1876__apicdebuginit(void) print_PIC(void)
1877{
1878 unsigned int v;
1879 unsigned long flags;
1880
1881 if (!nr_legacy_irqs())
1882 return;
1883
1884 printk(KERN_DEBUG "\nprinting PIC contents\n");
1885
1886 raw_spin_lock_irqsave(&i8259A_lock, flags);
1887
1888 v = inb(0xa1) << 8 | inb(0x21);
1889 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1890
1891 v = inb(0xa0) << 8 | inb(0x20);
1892 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1893
1894 outb(0x0b,0xa0);
1895 outb(0x0b,0x20);
1896 v = inb(0xa0) << 8 | inb(0x20);
1897 outb(0x0a,0xa0);
1898 outb(0x0a,0x20);
1899
1900 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1901
1902 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1903
1904 v = inb(0x4d1) << 8 | inb(0x4d0);
1905 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1906}
1907
1908static int __initdata show_lapic = 1;
1909static __init int setup_show_lapic(char *arg)
1910{
1911 int num = -1;
1912
1913 if (strcmp(arg, "all") == 0) {
1914 show_lapic = CONFIG_NR_CPUS;
1915 } else {
1916 get_option(&arg, &num);
1917 if (num >= 0)
1918 show_lapic = num;
1919 }
1920
1921 return 1;
1922}
1923__setup("show_lapic=", setup_show_lapic);
1924
1925__apicdebuginit(int) print_ICs(void)
1926{
1927 if (apic_verbosity == APIC_QUIET)
1928 return 0;
1929
1930 print_PIC();
1931
1932
1933 if (!cpu_has_apic && !apic_from_smp_config())
1934 return 0;
1935
1936 print_local_APICs(show_lapic);
1937 print_IO_APICs();
1938
1939 return 0;
1940}
1941
1942late_initcall(print_ICs);
1943
1944
1945
1946static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1947
1948void __init enable_IO_APIC(void)
1949{
1950 int i8259_apic, i8259_pin;
1951 int apic, pin;
1952
1953 if (!nr_legacy_irqs())
1954 return;
1955
1956 for_each_ioapic_pin(apic, pin) {
1957
1958 struct IO_APIC_route_entry entry = ioapic_read_entry(apic, pin);
1959
1960
1961
1962
1963 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1964 ioapic_i8259.apic = apic;
1965 ioapic_i8259.pin = pin;
1966 goto found_i8259;
1967 }
1968 }
1969 found_i8259:
1970
1971
1972
1973
1974
1975 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1976 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1977
1978 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1979 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1980 ioapic_i8259.pin = i8259_pin;
1981 ioapic_i8259.apic = i8259_apic;
1982 }
1983
1984 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1985 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1986 {
1987 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1988 }
1989
1990
1991
1992
1993 clear_IO_APIC();
1994}
1995
1996void native_disable_io_apic(void)
1997{
1998
1999
2000
2001
2002
2003 if (ioapic_i8259.pin != -1) {
2004 struct IO_APIC_route_entry entry;
2005
2006 memset(&entry, 0, sizeof(entry));
2007 entry.mask = 0;
2008 entry.trigger = 0;
2009 entry.irr = 0;
2010 entry.polarity = 0;
2011 entry.delivery_status = 0;
2012 entry.dest_mode = 0;
2013 entry.delivery_mode = dest_ExtINT;
2014 entry.vector = 0;
2015 entry.dest = read_apic_id();
2016
2017
2018
2019
2020 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
2021 }
2022
2023 if (cpu_has_apic || apic_from_smp_config())
2024 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
2025
2026}
2027
2028
2029
2030
2031void disable_IO_APIC(void)
2032{
2033
2034
2035
2036 clear_IO_APIC();
2037
2038 if (!nr_legacy_irqs())
2039 return;
2040
2041 x86_io_apic_ops.disable();
2042}
2043
2044#ifdef CONFIG_X86_32
2045
2046
2047
2048
2049
2050
2051void __init setup_ioapic_ids_from_mpc_nocheck(void)
2052{
2053 union IO_APIC_reg_00 reg_00;
2054 physid_mask_t phys_id_present_map;
2055 int ioapic_idx;
2056 int i;
2057 unsigned char old_id;
2058 unsigned long flags;
2059
2060
2061
2062
2063
2064 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
2065
2066
2067
2068
2069 for_each_ioapic(ioapic_idx) {
2070
2071 raw_spin_lock_irqsave(&ioapic_lock, flags);
2072 reg_00.raw = io_apic_read(ioapic_idx, 0);
2073 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2074
2075 old_id = mpc_ioapic_id(ioapic_idx);
2076
2077 if (mpc_ioapic_id(ioapic_idx) >= get_physical_broadcast()) {
2078 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2079 ioapic_idx, mpc_ioapic_id(ioapic_idx));
2080 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2081 reg_00.bits.ID);
2082 ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
2083 }
2084
2085
2086
2087
2088
2089
2090 if (apic->check_apicid_used(&phys_id_present_map,
2091 mpc_ioapic_id(ioapic_idx))) {
2092 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2093 ioapic_idx, mpc_ioapic_id(ioapic_idx));
2094 for (i = 0; i < get_physical_broadcast(); i++)
2095 if (!physid_isset(i, phys_id_present_map))
2096 break;
2097 if (i >= get_physical_broadcast())
2098 panic("Max APIC ID exceeded!\n");
2099 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2100 i);
2101 physid_set(i, phys_id_present_map);
2102 ioapics[ioapic_idx].mp_config.apicid = i;
2103 } else {
2104 physid_mask_t tmp;
2105 apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
2106 &tmp);
2107 apic_printk(APIC_VERBOSE, "Setting %d in the "
2108 "phys_id_present_map\n",
2109 mpc_ioapic_id(ioapic_idx));
2110 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2111 }
2112
2113
2114
2115
2116
2117 if (old_id != mpc_ioapic_id(ioapic_idx))
2118 for (i = 0; i < mp_irq_entries; i++)
2119 if (mp_irqs[i].dstapic == old_id)
2120 mp_irqs[i].dstapic
2121 = mpc_ioapic_id(ioapic_idx);
2122
2123
2124
2125
2126
2127 if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
2128 continue;
2129
2130 apic_printk(APIC_VERBOSE, KERN_INFO
2131 "...changing IO-APIC physical APIC ID to %d ...",
2132 mpc_ioapic_id(ioapic_idx));
2133
2134 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
2135 raw_spin_lock_irqsave(&ioapic_lock, flags);
2136 io_apic_write(ioapic_idx, 0, reg_00.raw);
2137 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2138
2139
2140
2141
2142 raw_spin_lock_irqsave(&ioapic_lock, flags);
2143 reg_00.raw = io_apic_read(ioapic_idx, 0);
2144 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2145 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
2146 pr_cont("could not set ID!\n");
2147 else
2148 apic_printk(APIC_VERBOSE, " ok.\n");
2149 }
2150}
2151
2152void __init setup_ioapic_ids_from_mpc(void)
2153{
2154
2155 if (acpi_ioapic)
2156 return;
2157
2158
2159
2160
2161 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2162 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2163 return;
2164 setup_ioapic_ids_from_mpc_nocheck();
2165}
2166#endif
2167
2168int no_timer_check __initdata;
2169
2170static int __init notimercheck(char *s)
2171{
2172 no_timer_check = 1;
2173 return 1;
2174}
2175__setup("no_timer_check", notimercheck);
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185static int __init timer_irq_works(void)
2186{
2187 unsigned long t1 = jiffies;
2188 unsigned long flags;
2189
2190 if (no_timer_check)
2191 return 1;
2192
2193 local_save_flags(flags);
2194 local_irq_enable();
2195
2196 mdelay((10 * 1000) / HZ);
2197 local_irq_restore(flags);
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208 if (time_after(jiffies, t1 + 4))
2209 return 1;
2210 return 0;
2211}
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236static unsigned int startup_ioapic_irq(struct irq_data *data)
2237{
2238 int was_pending = 0, irq = data->irq;
2239 unsigned long flags;
2240
2241 raw_spin_lock_irqsave(&ioapic_lock, flags);
2242 if (irq < nr_legacy_irqs()) {
2243 legacy_pic->mask(irq);
2244 if (legacy_pic->irq_pending(irq))
2245 was_pending = 1;
2246 }
2247 __unmask_ioapic(data->chip_data);
2248 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2249
2250 return was_pending;
2251}
2252
2253static int ioapic_retrigger_irq(struct irq_data *data)
2254{
2255 struct irq_cfg *cfg = data->chip_data;
2256 unsigned long flags;
2257 int cpu;
2258
2259 raw_spin_lock_irqsave(&vector_lock, flags);
2260 cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
2261 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
2262 raw_spin_unlock_irqrestore(&vector_lock, flags);
2263
2264 return 1;
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276#ifdef CONFIG_SMP
2277void send_cleanup_vector(struct irq_cfg *cfg)
2278{
2279 cpumask_var_t cleanup_mask;
2280
2281 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2282 unsigned int i;
2283 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2284 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2285 } else {
2286 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2287 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2288 free_cpumask_var(cleanup_mask);
2289 }
2290 cfg->move_in_progress = 0;
2291}
2292
2293asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
2294{
2295 unsigned vector, me;
2296
2297 ack_APIC_irq();
2298 irq_enter();
2299 exit_idle();
2300
2301 me = smp_processor_id();
2302 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2303 int irq;
2304 unsigned int irr;
2305 struct irq_desc *desc;
2306 struct irq_cfg *cfg;
2307 irq = __this_cpu_read(vector_irq[vector]);
2308
2309 if (irq <= VECTOR_UNDEFINED)
2310 continue;
2311
2312 desc = irq_to_desc(irq);
2313 if (!desc)
2314 continue;
2315
2316 cfg = irq_cfg(irq);
2317 if (!cfg)
2318 continue;
2319
2320 raw_spin_lock(&desc->lock);
2321
2322
2323
2324
2325
2326 if (cfg->move_in_progress)
2327 goto unlock;
2328
2329 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2330 goto unlock;
2331
2332 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2333
2334
2335
2336
2337
2338
2339
2340 if (irr & (1 << (vector % 32))) {
2341 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2342 goto unlock;
2343 }
2344 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
2345unlock:
2346 raw_spin_unlock(&desc->lock);
2347 }
2348
2349 irq_exit();
2350}
2351
2352static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
2353{
2354 unsigned me;
2355
2356 if (likely(!cfg->move_in_progress))
2357 return;
2358
2359 me = smp_processor_id();
2360
2361 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2362 send_cleanup_vector(cfg);
2363}
2364
2365static void irq_complete_move(struct irq_cfg *cfg)
2366{
2367 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
2368}
2369
2370void irq_force_complete_move(int irq)
2371{
2372 struct irq_cfg *cfg = irq_cfg(irq);
2373
2374 if (!cfg)
2375 return;
2376
2377 __irq_complete_move(cfg, cfg->vector);
2378}
2379#else
2380static inline void irq_complete_move(struct irq_cfg *cfg) { }
2381#endif
2382
2383static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2384{
2385 int apic, pin;
2386 struct irq_pin_list *entry;
2387 u8 vector = cfg->vector;
2388
2389 for_each_irq_pin(entry, cfg->irq_2_pin) {
2390 unsigned int reg;
2391
2392 apic = entry->apic;
2393 pin = entry->pin;
2394
2395 io_apic_write(apic, 0x11 + pin*2, dest);
2396 reg = io_apic_read(apic, 0x10 + pin*2);
2397 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2398 reg |= vector;
2399 io_apic_modify(apic, 0x10 + pin*2, reg);
2400 }
2401}
2402
2403
2404
2405
2406
2407
2408int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2409 unsigned int *dest_id)
2410{
2411 struct irq_cfg *cfg = data->chip_data;
2412 unsigned int irq = data->irq;
2413 int err;
2414
2415 if (!config_enabled(CONFIG_SMP))
2416 return -EPERM;
2417
2418 if (!cpumask_intersects(mask, cpu_online_mask))
2419 return -EINVAL;
2420
2421 err = assign_irq_vector(irq, cfg, mask);
2422 if (err)
2423 return err;
2424
2425 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
2426 if (err) {
2427 if (assign_irq_vector(irq, cfg, data->affinity))
2428 pr_err("Failed to recover vector for irq %d\n", irq);
2429 return err;
2430 }
2431
2432 cpumask_copy(data->affinity, mask);
2433
2434 return 0;
2435}
2436
2437
2438int native_ioapic_set_affinity(struct irq_data *data,
2439 const struct cpumask *mask,
2440 bool force)
2441{
2442 unsigned int dest, irq = data->irq;
2443 unsigned long flags;
2444 int ret;
2445
2446 if (!config_enabled(CONFIG_SMP))
2447 return -EPERM;
2448
2449 raw_spin_lock_irqsave(&ioapic_lock, flags);
2450 ret = __ioapic_set_affinity(data, mask, &dest);
2451 if (!ret) {
2452
2453 dest = SET_APIC_LOGICAL_ID(dest);
2454 __target_IO_APIC_irq(irq, dest, data->chip_data);
2455 ret = IRQ_SET_MASK_OK_NOCOPY;
2456 }
2457 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2458 return ret;
2459}
2460
2461static void ack_apic_edge(struct irq_data *data)
2462{
2463 irq_complete_move(data->chip_data);
2464 irq_move_irq(data);
2465 ack_APIC_irq();
2466}
2467
2468atomic_t irq_mis_count;
2469
2470#ifdef CONFIG_GENERIC_PENDING_IRQ
2471static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
2472{
2473 struct irq_pin_list *entry;
2474 unsigned long flags;
2475
2476 raw_spin_lock_irqsave(&ioapic_lock, flags);
2477 for_each_irq_pin(entry, cfg->irq_2_pin) {
2478 unsigned int reg;
2479 int pin;
2480
2481 pin = entry->pin;
2482 reg = io_apic_read(entry->apic, 0x10 + pin*2);
2483
2484 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
2485 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2486 return true;
2487 }
2488 }
2489 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2490
2491 return false;
2492}
2493
2494static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2495{
2496
2497 if (unlikely(irqd_is_setaffinity_pending(data))) {
2498 mask_ioapic(cfg);
2499 return true;
2500 }
2501 return false;
2502}
2503
2504static inline void ioapic_irqd_unmask(struct irq_data *data,
2505 struct irq_cfg *cfg, bool masked)
2506{
2507 if (unlikely(masked)) {
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534 if (!io_apic_level_ack_pending(cfg))
2535 irq_move_masked_irq(data);
2536 unmask_ioapic(cfg);
2537 }
2538}
2539#else
2540static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2541{
2542 return false;
2543}
2544static inline void ioapic_irqd_unmask(struct irq_data *data,
2545 struct irq_cfg *cfg, bool masked)
2546{
2547}
2548#endif
2549
2550static void ack_apic_level(struct irq_data *data)
2551{
2552 struct irq_cfg *cfg = data->chip_data;
2553 int i, irq = data->irq;
2554 unsigned long v;
2555 bool masked;
2556
2557 irq_complete_move(cfg);
2558 masked = ioapic_irqd_mask(data, cfg);
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 i = cfg->vector;
2593 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2594
2595
2596
2597
2598
2599 ack_APIC_irq();
2600
2601
2602
2603
2604
2605
2606
2607
2608 if (!(v & (1 << (i & 0x1f)))) {
2609 atomic_inc(&irq_mis_count);
2610
2611 eoi_ioapic_irq(irq, cfg);
2612 }
2613
2614 ioapic_irqd_unmask(data, cfg, masked);
2615}
2616
2617static struct irq_chip ioapic_chip __read_mostly = {
2618 .name = "IO-APIC",
2619 .irq_startup = startup_ioapic_irq,
2620 .irq_mask = mask_ioapic_irq,
2621 .irq_unmask = unmask_ioapic_irq,
2622 .irq_ack = ack_apic_edge,
2623 .irq_eoi = ack_apic_level,
2624 .irq_set_affinity = native_ioapic_set_affinity,
2625 .irq_retrigger = ioapic_retrigger_irq,
2626};
2627
2628static inline void init_IO_APIC_traps(void)
2629{
2630 struct irq_cfg *cfg;
2631 unsigned int irq;
2632
2633 for_each_active_irq(irq) {
2634 cfg = irq_cfg(irq);
2635 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2636
2637
2638
2639
2640
2641 if (irq < nr_legacy_irqs())
2642 legacy_pic->make_irq(irq);
2643 else
2644
2645 irq_set_chip(irq, &no_irq_chip);
2646 }
2647 }
2648}
2649
2650
2651
2652
2653
2654static void mask_lapic_irq(struct irq_data *data)
2655{
2656 unsigned long v;
2657
2658 v = apic_read(APIC_LVT0);
2659 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2660}
2661
2662static void unmask_lapic_irq(struct irq_data *data)
2663{
2664 unsigned long v;
2665
2666 v = apic_read(APIC_LVT0);
2667 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2668}
2669
2670static void ack_lapic_irq(struct irq_data *data)
2671{
2672 ack_APIC_irq();
2673}
2674
2675static struct irq_chip lapic_chip __read_mostly = {
2676 .name = "local-APIC",
2677 .irq_mask = mask_lapic_irq,
2678 .irq_unmask = unmask_lapic_irq,
2679 .irq_ack = ack_lapic_irq,
2680};
2681
2682static void lapic_register_intr(int irq)
2683{
2684 irq_clear_status_flags(irq, IRQ_LEVEL);
2685 irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2686 "edge");
2687}
2688
2689
2690
2691
2692
2693
2694
2695
2696static inline void __init unlock_ExtINT_logic(void)
2697{
2698 int apic, pin, i;
2699 struct IO_APIC_route_entry entry0, entry1;
2700 unsigned char save_control, save_freq_select;
2701
2702 pin = find_isa_irq_pin(8, mp_INT);
2703 if (pin == -1) {
2704 WARN_ON_ONCE(1);
2705 return;
2706 }
2707 apic = find_isa_irq_apic(8, mp_INT);
2708 if (apic == -1) {
2709 WARN_ON_ONCE(1);
2710 return;
2711 }
2712
2713 entry0 = ioapic_read_entry(apic, pin);
2714 clear_IO_APIC_pin(apic, pin);
2715
2716 memset(&entry1, 0, sizeof(entry1));
2717
2718 entry1.dest_mode = 0;
2719 entry1.mask = 0;
2720 entry1.dest = hard_smp_processor_id();
2721 entry1.delivery_mode = dest_ExtINT;
2722 entry1.polarity = entry0.polarity;
2723 entry1.trigger = 0;
2724 entry1.vector = 0;
2725
2726 ioapic_write_entry(apic, pin, entry1);
2727
2728 save_control = CMOS_READ(RTC_CONTROL);
2729 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2730 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2731 RTC_FREQ_SELECT);
2732 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2733
2734 i = 100;
2735 while (i-- > 0) {
2736 mdelay(10);
2737 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2738 i -= 10;
2739 }
2740
2741 CMOS_WRITE(save_control, RTC_CONTROL);
2742 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2743 clear_IO_APIC_pin(apic, pin);
2744
2745 ioapic_write_entry(apic, pin, entry0);
2746}
2747
2748static int disable_timer_pin_1 __initdata;
2749
2750static int __init disable_timer_pin_setup(char *arg)
2751{
2752 disable_timer_pin_1 = 1;
2753 return 0;
2754}
2755early_param("disable_timer_pin_1", disable_timer_pin_setup);
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765static inline void __init check_timer(void)
2766{
2767 struct irq_cfg *cfg = irq_cfg(0);
2768 int node = cpu_to_node(0);
2769 int apic1, pin1, apic2, pin2;
2770 unsigned long flags;
2771 int no_pin1 = 0;
2772
2773 local_irq_save(flags);
2774
2775
2776
2777
2778 legacy_pic->mask(0);
2779 assign_irq_vector(0, cfg, apic->target_cpus());
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2791 legacy_pic->init(1);
2792
2793 pin1 = find_isa_irq_pin(0, mp_INT);
2794 apic1 = find_isa_irq_apic(0, mp_INT);
2795 pin2 = ioapic_i8259.pin;
2796 apic2 = ioapic_i8259.apic;
2797
2798 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2799 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2800 cfg->vector, apic1, pin1, apic2, pin2);
2801
2802
2803
2804
2805
2806
2807
2808
2809 if (pin1 == -1) {
2810 panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
2811 pin1 = pin2;
2812 apic1 = apic2;
2813 no_pin1 = 1;
2814 } else if (pin2 == -1) {
2815 pin2 = pin1;
2816 apic2 = apic1;
2817 }
2818
2819 if (pin1 != -1) {
2820
2821
2822
2823 if (no_pin1) {
2824 add_pin_to_irq_node(cfg, node, apic1, pin1);
2825 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2826 } else {
2827
2828
2829
2830
2831
2832 int idx;
2833 idx = find_irq_entry(apic1, pin1, mp_INT);
2834 if (idx != -1 && irq_trigger(idx))
2835 unmask_ioapic(cfg);
2836 }
2837 if (timer_irq_works()) {
2838 if (disable_timer_pin_1 > 0)
2839 clear_IO_APIC_pin(0, pin1);
2840 goto out;
2841 }
2842 panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
2843 local_irq_disable();
2844 clear_IO_APIC_pin(apic1, pin1);
2845 if (!no_pin1)
2846 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2847 "8254 timer not connected to IO-APIC\n");
2848
2849 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2850 "(IRQ0) through the 8259A ...\n");
2851 apic_printk(APIC_QUIET, KERN_INFO
2852 "..... (found apic %d pin %d) ...\n", apic2, pin2);
2853
2854
2855
2856 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
2857 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
2858 legacy_pic->unmask(0);
2859 if (timer_irq_works()) {
2860 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
2861 goto out;
2862 }
2863
2864
2865
2866 local_irq_disable();
2867 legacy_pic->mask(0);
2868 clear_IO_APIC_pin(apic2, pin2);
2869 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
2870 }
2871
2872 apic_printk(APIC_QUIET, KERN_INFO
2873 "...trying to set up timer as Virtual Wire IRQ...\n");
2874
2875 lapic_register_intr(0);
2876 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);
2877 legacy_pic->unmask(0);
2878
2879 if (timer_irq_works()) {
2880 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2881 goto out;
2882 }
2883 local_irq_disable();
2884 legacy_pic->mask(0);
2885 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
2886 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
2887
2888 apic_printk(APIC_QUIET, KERN_INFO
2889 "...trying to set up timer as ExtINT IRQ...\n");
2890
2891 legacy_pic->init(0);
2892 legacy_pic->make_irq(0);
2893 apic_write(APIC_LVT0, APIC_DM_EXTINT);
2894
2895 unlock_ExtINT_logic();
2896
2897 if (timer_irq_works()) {
2898 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
2899 goto out;
2900 }
2901 local_irq_disable();
2902 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
2903 if (x2apic_preenabled)
2904 apic_printk(APIC_QUIET, KERN_INFO
2905 "Perhaps problem with the pre-enabled x2apic mode\n"
2906 "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
2907 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2908 "report. Then try booting with the 'noapic' option.\n");
2909out:
2910 local_irq_restore(flags);
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930#define PIC_IRQS (1UL << PIC_CASCADE_IR)
2931
2932static int mp_irqdomain_create(int ioapic)
2933{
2934 size_t size;
2935 int hwirqs = mp_ioapic_pin_count(ioapic);
2936 struct ioapic *ip = &ioapics[ioapic];
2937 struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
2938 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
2939
2940 size = sizeof(struct mp_pin_info) * mp_ioapic_pin_count(ioapic);
2941 ip->pin_info = kzalloc(size, GFP_KERNEL);
2942 if (!ip->pin_info)
2943 return -ENOMEM;
2944
2945 if (cfg->type == IOAPIC_DOMAIN_INVALID)
2946 return 0;
2947
2948 ip->irqdomain = irq_domain_add_linear(cfg->dev, hwirqs, cfg->ops,
2949 (void *)(long)ioapic);
2950 if(!ip->irqdomain) {
2951 kfree(ip->pin_info);
2952 ip->pin_info = NULL;
2953 return -ENOMEM;
2954 }
2955
2956 if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
2957 cfg->type == IOAPIC_DOMAIN_STRICT)
2958 ioapic_dynirq_base = max(ioapic_dynirq_base,
2959 gsi_cfg->gsi_end + 1);
2960
2961 if (gsi_cfg->gsi_base == 0)
2962 irq_set_default_host(ip->irqdomain);
2963
2964 return 0;
2965}
2966
2967void __init setup_IO_APIC(void)
2968{
2969 int ioapic;
2970
2971
2972
2973
2974 io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
2975
2976 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2977 for_each_ioapic(ioapic)
2978 BUG_ON(mp_irqdomain_create(ioapic));
2979
2980
2981
2982
2983 x86_init.mpparse.setup_ioapic_ids();
2984
2985 sync_Arb_IDs();
2986 setup_IO_APIC_irqs();
2987 init_IO_APIC_traps();
2988 if (nr_legacy_irqs())
2989 check_timer();
2990
2991 ioapic_initialized = 1;
2992}
2993
2994
2995
2996
2997
2998
2999static int __init io_apic_bug_finalize(void)
3000{
3001 if (sis_apic_bug == -1)
3002 sis_apic_bug = 0;
3003 return 0;
3004}
3005
3006late_initcall(io_apic_bug_finalize);
3007
3008static void resume_ioapic_id(int ioapic_idx)
3009{
3010 unsigned long flags;
3011 union IO_APIC_reg_00 reg_00;
3012
3013 raw_spin_lock_irqsave(&ioapic_lock, flags);
3014 reg_00.raw = io_apic_read(ioapic_idx, 0);
3015 if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
3016 reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
3017 io_apic_write(ioapic_idx, 0, reg_00.raw);
3018 }
3019 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3020}
3021
3022static void ioapic_resume(void)
3023{
3024 int ioapic_idx;
3025
3026 for_each_ioapic_reverse(ioapic_idx)
3027 resume_ioapic_id(ioapic_idx);
3028
3029 restore_ioapic_entries();
3030}
3031
3032static struct syscore_ops ioapic_syscore_ops = {
3033 .suspend = save_ioapic_entries,
3034 .resume = ioapic_resume,
3035};
3036
3037static int __init ioapic_init_ops(void)
3038{
3039 register_syscore_ops(&ioapic_syscore_ops);
3040
3041 return 0;
3042}
3043
3044device_initcall(ioapic_init_ops);
3045
3046
3047
3048
3049int arch_setup_hwirq(unsigned int irq, int node)
3050{
3051 struct irq_cfg *cfg;
3052 unsigned long flags;
3053 int ret;
3054
3055 cfg = alloc_irq_cfg(irq, node);
3056 if (!cfg)
3057 return -ENOMEM;
3058
3059 raw_spin_lock_irqsave(&vector_lock, flags);
3060 ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
3061 raw_spin_unlock_irqrestore(&vector_lock, flags);
3062
3063 if (!ret)
3064 irq_set_chip_data(irq, cfg);
3065 else
3066 free_irq_cfg(irq, cfg);
3067 return ret;
3068}
3069
3070void arch_teardown_hwirq(unsigned int irq)
3071{
3072 struct irq_cfg *cfg = irq_cfg(irq);
3073 unsigned long flags;
3074
3075 free_remapped_irq(irq);
3076 raw_spin_lock_irqsave(&vector_lock, flags);
3077 __clear_irq_vector(irq, cfg);
3078 raw_spin_unlock_irqrestore(&vector_lock, flags);
3079 free_irq_cfg(irq, cfg);
3080}
3081
3082
3083
3084
3085void native_compose_msi_msg(struct pci_dev *pdev,
3086 unsigned int irq, unsigned int dest,
3087 struct msi_msg *msg, u8 hpet_id)
3088{
3089 struct irq_cfg *cfg = irq_cfg(irq);
3090
3091 msg->address_hi = MSI_ADDR_BASE_HI;
3092
3093 if (x2apic_enabled())
3094 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
3095
3096 msg->address_lo =
3097 MSI_ADDR_BASE_LO |
3098 ((apic->irq_dest_mode == 0) ?
3099 MSI_ADDR_DEST_MODE_PHYSICAL:
3100 MSI_ADDR_DEST_MODE_LOGICAL) |
3101 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3102 MSI_ADDR_REDIRECTION_CPU:
3103 MSI_ADDR_REDIRECTION_LOWPRI) |
3104 MSI_ADDR_DEST_ID(dest);
3105
3106 msg->data =
3107 MSI_DATA_TRIGGER_EDGE |
3108 MSI_DATA_LEVEL_ASSERT |
3109 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3110 MSI_DATA_DELIVERY_FIXED:
3111 MSI_DATA_DELIVERY_LOWPRI) |
3112 MSI_DATA_VECTOR(cfg->vector);
3113}
3114
3115#ifdef CONFIG_PCI_MSI
3116static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3117 struct msi_msg *msg, u8 hpet_id)
3118{
3119 struct irq_cfg *cfg;
3120 int err;
3121 unsigned dest;
3122
3123 if (disable_apic)
3124 return -ENXIO;
3125
3126 cfg = irq_cfg(irq);
3127 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3128 if (err)
3129 return err;
3130
3131 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3132 apic->target_cpus(), &dest);
3133 if (err)
3134 return err;
3135
3136 x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
3137
3138 return 0;
3139}
3140
3141static int
3142msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3143{
3144 struct irq_cfg *cfg = data->chip_data;
3145 struct msi_msg msg;
3146 unsigned int dest;
3147 int ret;
3148
3149 ret = __ioapic_set_affinity(data, mask, &dest);
3150 if (ret)
3151 return ret;
3152
3153 __get_cached_msi_msg(data->msi_desc, &msg);
3154
3155 msg.data &= ~MSI_DATA_VECTOR_MASK;
3156 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3157 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3158 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3159
3160 __write_msi_msg(data->msi_desc, &msg);
3161
3162 return IRQ_SET_MASK_OK_NOCOPY;
3163}
3164
3165
3166
3167
3168
3169static struct irq_chip msi_chip = {
3170 .name = "PCI-MSI",
3171 .irq_unmask = unmask_msi_irq,
3172 .irq_mask = mask_msi_irq,
3173 .irq_ack = ack_apic_edge,
3174 .irq_set_affinity = msi_set_affinity,
3175 .irq_retrigger = ioapic_retrigger_irq,
3176};
3177
3178int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
3179 unsigned int irq_base, unsigned int irq_offset)
3180{
3181 struct irq_chip *chip = &msi_chip;
3182 struct msi_msg msg;
3183 unsigned int irq = irq_base + irq_offset;
3184 int ret;
3185
3186 ret = msi_compose_msg(dev, irq, &msg, -1);
3187 if (ret < 0)
3188 return ret;
3189
3190 irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
3191
3192
3193
3194
3195
3196 if (!irq_offset)
3197 write_msi_msg(irq, &msg);
3198
3199 setup_remapped_irq(irq, irq_cfg(irq), chip);
3200
3201 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3202
3203 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3204
3205 return 0;
3206}
3207
3208int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3209{
3210 struct msi_desc *msidesc;
3211 unsigned int irq;
3212 int node, ret;
3213
3214
3215 if (type == PCI_CAP_ID_MSI && nvec > 1)
3216 return 1;
3217
3218 node = dev_to_node(&dev->dev);
3219
3220 list_for_each_entry(msidesc, &dev->msi_list, list) {
3221 irq = irq_alloc_hwirq(node);
3222 if (!irq)
3223 return -ENOSPC;
3224
3225 ret = setup_msi_irq(dev, msidesc, irq, 0);
3226 if (ret < 0) {
3227 irq_free_hwirq(irq);
3228 return ret;
3229 }
3230
3231 }
3232 return 0;
3233}
3234
3235void native_teardown_msi_irq(unsigned int irq)
3236{
3237 irq_free_hwirq(irq);
3238}
3239
3240#ifdef CONFIG_DMAR_TABLE
3241static int
3242dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
3243 bool force)
3244{
3245 struct irq_cfg *cfg = data->chip_data;
3246 unsigned int dest, irq = data->irq;
3247 struct msi_msg msg;
3248 int ret;
3249
3250 ret = __ioapic_set_affinity(data, mask, &dest);
3251 if (ret)
3252 return ret;
3253
3254 dmar_msi_read(irq, &msg);
3255
3256 msg.data &= ~MSI_DATA_VECTOR_MASK;
3257 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3258 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3259 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3260 msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
3261
3262 dmar_msi_write(irq, &msg);
3263
3264 return IRQ_SET_MASK_OK_NOCOPY;
3265}
3266
3267static struct irq_chip dmar_msi_type = {
3268 .name = "DMAR_MSI",
3269 .irq_unmask = dmar_msi_unmask,
3270 .irq_mask = dmar_msi_mask,
3271 .irq_ack = ack_apic_edge,
3272 .irq_set_affinity = dmar_msi_set_affinity,
3273 .irq_retrigger = ioapic_retrigger_irq,
3274};
3275
3276int arch_setup_dmar_msi(unsigned int irq)
3277{
3278 int ret;
3279 struct msi_msg msg;
3280
3281 ret = msi_compose_msg(NULL, irq, &msg, -1);
3282 if (ret < 0)
3283 return ret;
3284 dmar_msi_write(irq, &msg);
3285 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3286 "edge");
3287 return 0;
3288}
3289#endif
3290
3291#ifdef CONFIG_HPET_TIMER
3292
3293static int hpet_msi_set_affinity(struct irq_data *data,
3294 const struct cpumask *mask, bool force)
3295{
3296 struct irq_cfg *cfg = data->chip_data;
3297 struct msi_msg msg;
3298 unsigned int dest;
3299 int ret;
3300
3301 ret = __ioapic_set_affinity(data, mask, &dest);
3302 if (ret)
3303 return ret;
3304
3305 hpet_msi_read(data->handler_data, &msg);
3306
3307 msg.data &= ~MSI_DATA_VECTOR_MASK;
3308 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3309 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3310 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3311
3312 hpet_msi_write(data->handler_data, &msg);
3313
3314 return IRQ_SET_MASK_OK_NOCOPY;
3315}
3316
3317static struct irq_chip hpet_msi_type = {
3318 .name = "HPET_MSI",
3319 .irq_unmask = hpet_msi_unmask,
3320 .irq_mask = hpet_msi_mask,
3321 .irq_ack = ack_apic_edge,
3322 .irq_set_affinity = hpet_msi_set_affinity,
3323 .irq_retrigger = ioapic_retrigger_irq,
3324};
3325
3326int default_setup_hpet_msi(unsigned int irq, unsigned int id)
3327{
3328 struct irq_chip *chip = &hpet_msi_type;
3329 struct msi_msg msg;
3330 int ret;
3331
3332 ret = msi_compose_msg(NULL, irq, &msg, id);
3333 if (ret < 0)
3334 return ret;
3335
3336 hpet_msi_write(irq_get_handler_data(irq), &msg);
3337 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3338 setup_remapped_irq(irq, irq_cfg(irq), chip);
3339
3340 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3341 return 0;
3342}
3343#endif
3344
3345#endif
3346
3347
3348
3349#ifdef CONFIG_HT_IRQ
3350
3351static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3352{
3353 struct ht_irq_msg msg;
3354 fetch_ht_irq_msg(irq, &msg);
3355
3356 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3357 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3358
3359 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3360 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3361
3362 write_ht_irq_msg(irq, &msg);
3363}
3364
3365static int
3366ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
3367{
3368 struct irq_cfg *cfg = data->chip_data;
3369 unsigned int dest;
3370 int ret;
3371
3372 ret = __ioapic_set_affinity(data, mask, &dest);
3373 if (ret)
3374 return ret;
3375
3376 target_ht_irq(data->irq, dest, cfg->vector);
3377 return IRQ_SET_MASK_OK_NOCOPY;
3378}
3379
3380static struct irq_chip ht_irq_chip = {
3381 .name = "PCI-HT",
3382 .irq_mask = mask_ht_irq,
3383 .irq_unmask = unmask_ht_irq,
3384 .irq_ack = ack_apic_edge,
3385 .irq_set_affinity = ht_set_affinity,
3386 .irq_retrigger = ioapic_retrigger_irq,
3387};
3388
3389int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3390{
3391 struct irq_cfg *cfg;
3392 struct ht_irq_msg msg;
3393 unsigned dest;
3394 int err;
3395
3396 if (disable_apic)
3397 return -ENXIO;
3398
3399 cfg = irq_cfg(irq);
3400 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3401 if (err)
3402 return err;
3403
3404 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3405 apic->target_cpus(), &dest);
3406 if (err)
3407 return err;
3408
3409 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3410
3411 msg.address_lo =
3412 HT_IRQ_LOW_BASE |
3413 HT_IRQ_LOW_DEST_ID(dest) |
3414 HT_IRQ_LOW_VECTOR(cfg->vector) |
3415 ((apic->irq_dest_mode == 0) ?
3416 HT_IRQ_LOW_DM_PHYSICAL :
3417 HT_IRQ_LOW_DM_LOGICAL) |
3418 HT_IRQ_LOW_RQEOI_EDGE |
3419 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3420 HT_IRQ_LOW_MT_FIXED :
3421 HT_IRQ_LOW_MT_ARBITRATED) |
3422 HT_IRQ_LOW_IRQ_MASKED;
3423
3424 write_ht_irq_msg(irq, &msg);
3425
3426 irq_set_chip_and_handler_name(irq, &ht_irq_chip,
3427 handle_edge_irq, "edge");
3428
3429 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3430
3431 return 0;
3432}
3433#endif
3434
3435static int
3436io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
3437{
3438 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
3439 int ret;
3440
3441 if (!cfg)
3442 return -EINVAL;
3443 ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
3444 if (!ret)
3445 setup_ioapic_irq(irq, cfg, attr);
3446 return ret;
3447}
3448
3449static int __init io_apic_get_redir_entries(int ioapic)
3450{
3451 union IO_APIC_reg_01 reg_01;
3452 unsigned long flags;
3453
3454 raw_spin_lock_irqsave(&ioapic_lock, flags);
3455 reg_01.raw = io_apic_read(ioapic, 1);
3456 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3457
3458
3459
3460
3461
3462 return reg_01.bits.entries + 1;
3463}
3464
3465unsigned int arch_dynirq_lower_bound(unsigned int from)
3466{
3467
3468
3469
3470
3471 return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
3472}
3473
3474int __init arch_probe_nr_irqs(void)
3475{
3476 int nr;
3477
3478 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3479 nr_irqs = NR_VECTORS * nr_cpu_ids;
3480
3481 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
3482#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3483
3484
3485
3486 nr += gsi_top * 16;
3487#endif
3488 if (nr < nr_irqs)
3489 nr_irqs = nr;
3490
3491 return 0;
3492}
3493
3494#ifdef CONFIG_X86_32
3495static int __init io_apic_get_unique_id(int ioapic, int apic_id)
3496{
3497 union IO_APIC_reg_00 reg_00;
3498 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3499 physid_mask_t tmp;
3500 unsigned long flags;
3501 int i = 0;
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512 if (physids_empty(apic_id_map))
3513 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3514
3515 raw_spin_lock_irqsave(&ioapic_lock, flags);
3516 reg_00.raw = io_apic_read(ioapic, 0);
3517 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3518
3519 if (apic_id >= get_physical_broadcast()) {
3520 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3521 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3522 apic_id = reg_00.bits.ID;
3523 }
3524
3525
3526
3527
3528
3529 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
3530
3531 for (i = 0; i < get_physical_broadcast(); i++) {
3532 if (!apic->check_apicid_used(&apic_id_map, i))
3533 break;
3534 }
3535
3536 if (i == get_physical_broadcast())
3537 panic("Max apic_id exceeded!\n");
3538
3539 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3540 "trying %d\n", ioapic, apic_id, i);
3541
3542 apic_id = i;
3543 }
3544
3545 apic->apicid_to_cpu_present(apic_id, &tmp);
3546 physids_or(apic_id_map, apic_id_map, tmp);
3547
3548 if (reg_00.bits.ID != apic_id) {
3549 reg_00.bits.ID = apic_id;
3550
3551 raw_spin_lock_irqsave(&ioapic_lock, flags);
3552 io_apic_write(ioapic, 0, reg_00.raw);
3553 reg_00.raw = io_apic_read(ioapic, 0);
3554 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3555
3556
3557 if (reg_00.bits.ID != apic_id) {
3558 pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
3559 ioapic);
3560 return -1;
3561 }
3562 }
3563
3564 apic_printk(APIC_VERBOSE, KERN_INFO
3565 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3566
3567 return apic_id;
3568}
3569
3570static u8 __init io_apic_unique_id(u8 id)
3571{
3572 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3573 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3574 return io_apic_get_unique_id(nr_ioapics, id);
3575 else
3576 return id;
3577}
3578#else
3579static u8 __init io_apic_unique_id(u8 id)
3580{
3581 int i;
3582 DECLARE_BITMAP(used, 256);
3583
3584 bitmap_zero(used, 256);
3585 for_each_ioapic(i)
3586 __set_bit(mpc_ioapic_id(i), used);
3587 if (!test_bit(id, used))
3588 return id;
3589 return find_first_zero_bit(used, 256);
3590}
3591#endif
3592
3593static int __init io_apic_get_version(int ioapic)
3594{
3595 union IO_APIC_reg_01 reg_01;
3596 unsigned long flags;
3597
3598 raw_spin_lock_irqsave(&ioapic_lock, flags);
3599 reg_01.raw = io_apic_read(ioapic, 1);
3600 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3601
3602 return reg_01.bits.version;
3603}
3604
3605int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
3606{
3607 int ioapic, pin, idx;
3608
3609 if (skip_ioapic_setup)
3610 return -1;
3611
3612 ioapic = mp_find_ioapic(gsi);
3613 if (ioapic < 0)
3614 return -1;
3615
3616 pin = mp_find_ioapic_pin(ioapic, gsi);
3617 if (pin < 0)
3618 return -1;
3619
3620 idx = find_irq_entry(ioapic, pin, mp_INT);
3621 if (idx < 0)
3622 return -1;
3623
3624 *trigger = irq_trigger(idx);
3625 *polarity = irq_polarity(idx);
3626 return 0;
3627}
3628
3629
3630
3631
3632
3633
3634#ifdef CONFIG_SMP
3635void __init setup_ioapic_dest(void)
3636{
3637 int pin, ioapic, irq, irq_entry;
3638 const struct cpumask *mask;
3639 struct irq_data *idata;
3640
3641 if (skip_ioapic_setup == 1)
3642 return;
3643
3644 for_each_ioapic_pin(ioapic, pin) {
3645 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3646 if (irq_entry == -1)
3647 continue;
3648
3649 irq = pin_2_irq(irq_entry, ioapic, pin, 0);
3650 if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
3651 continue;
3652
3653 idata = irq_get_irq_data(irq);
3654
3655
3656
3657
3658 if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
3659 mask = idata->affinity;
3660 else
3661 mask = apic->target_cpus();
3662
3663 x86_io_apic_ops.set_affinity(idata, mask, false);
3664 }
3665
3666}
3667#endif
3668
3669#define IOAPIC_RESOURCE_NAME_SIZE 11
3670
3671static struct resource *ioapic_resources;
3672
3673static struct resource * __init ioapic_setup_resources(void)
3674{
3675 unsigned long n;
3676 struct resource *res;
3677 char *mem;
3678 int i, num = 0;
3679
3680 for_each_ioapic(i)
3681 num++;
3682 if (num == 0)
3683 return NULL;
3684
3685 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3686 n *= num;
3687
3688 mem = alloc_bootmem(n);
3689 res = (void *)mem;
3690
3691 mem += sizeof(struct resource) * num;
3692
3693 num = 0;
3694 for_each_ioapic(i) {
3695 res[num].name = mem;
3696 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3697 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
3698 mem += IOAPIC_RESOURCE_NAME_SIZE;
3699 num++;
3700 }
3701
3702 ioapic_resources = res;
3703
3704 return res;
3705}
3706
3707void __init native_io_apic_init_mappings(void)
3708{
3709 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3710 struct resource *ioapic_res;
3711 int i;
3712
3713 ioapic_res = ioapic_setup_resources();
3714 for_each_ioapic(i) {
3715 if (smp_found_config) {
3716 ioapic_phys = mpc_ioapic_addr(i);
3717#ifdef CONFIG_X86_32
3718 if (!ioapic_phys) {
3719 printk(KERN_ERR
3720 "WARNING: bogus zero IO-APIC "
3721 "address found in MPTABLE, "
3722 "disabling IO/APIC support!\n");
3723 smp_found_config = 0;
3724 skip_ioapic_setup = 1;
3725 goto fake_ioapic_page;
3726 }
3727#endif
3728 } else {
3729#ifdef CONFIG_X86_32
3730fake_ioapic_page:
3731#endif
3732 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
3733 ioapic_phys = __pa(ioapic_phys);
3734 }
3735 set_fixmap_nocache(idx, ioapic_phys);
3736 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
3737 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
3738 ioapic_phys);
3739 idx++;
3740
3741 ioapic_res->start = ioapic_phys;
3742 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
3743 ioapic_res++;
3744 }
3745}
3746
3747void __init ioapic_insert_resources(void)
3748{
3749 int i;
3750 struct resource *r = ioapic_resources;
3751
3752 if (!r) {
3753 if (nr_ioapics > 0)
3754 printk(KERN_ERR
3755 "IO APIC resources couldn't be allocated.\n");
3756 return;
3757 }
3758
3759 for_each_ioapic(i) {
3760 insert_resource(&iomem_resource, r);
3761 r++;
3762 }
3763}
3764
3765int mp_find_ioapic(u32 gsi)
3766{
3767 int i;
3768
3769 if (nr_ioapics == 0)
3770 return -1;
3771
3772
3773 for_each_ioapic(i) {
3774 struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(i);
3775 if (gsi >= gsi_cfg->gsi_base && gsi <= gsi_cfg->gsi_end)
3776 return i;
3777 }
3778
3779 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
3780 return -1;
3781}
3782
3783int mp_find_ioapic_pin(int ioapic, u32 gsi)
3784{
3785 struct mp_ioapic_gsi *gsi_cfg;
3786
3787 if (WARN_ON(ioapic < 0))
3788 return -1;
3789
3790 gsi_cfg = mp_ioapic_gsi_routing(ioapic);
3791 if (WARN_ON(gsi > gsi_cfg->gsi_end))
3792 return -1;
3793
3794 return gsi - gsi_cfg->gsi_base;
3795}
3796
3797static __init int bad_ioapic(unsigned long address)
3798{
3799 if (nr_ioapics >= MAX_IO_APICS) {
3800 pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
3801 MAX_IO_APICS, nr_ioapics);
3802 return 1;
3803 }
3804 if (!address) {
3805 pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
3806 return 1;
3807 }
3808 return 0;
3809}
3810
3811static __init int bad_ioapic_register(int idx)
3812{
3813 union IO_APIC_reg_00 reg_00;
3814 union IO_APIC_reg_01 reg_01;
3815 union IO_APIC_reg_02 reg_02;
3816
3817 reg_00.raw = io_apic_read(idx, 0);
3818 reg_01.raw = io_apic_read(idx, 1);
3819 reg_02.raw = io_apic_read(idx, 2);
3820
3821 if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
3822 pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
3823 mpc_ioapic_addr(idx));
3824 return 1;
3825 }
3826
3827 return 0;
3828}
3829
3830void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
3831 struct ioapic_domain_cfg *cfg)
3832{
3833 int idx = 0;
3834 int entries;
3835 struct mp_ioapic_gsi *gsi_cfg;
3836
3837 if (bad_ioapic(address))
3838 return;
3839
3840 idx = nr_ioapics;
3841
3842 ioapics[idx].mp_config.type = MP_IOAPIC;
3843 ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
3844 ioapics[idx].mp_config.apicaddr = address;
3845 ioapics[idx].irqdomain = NULL;
3846 ioapics[idx].irqdomain_cfg = *cfg;
3847
3848 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
3849
3850 if (bad_ioapic_register(idx)) {
3851 clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
3852 return;
3853 }
3854
3855 ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
3856 ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
3857
3858
3859
3860
3861
3862 entries = io_apic_get_redir_entries(idx);
3863 gsi_cfg = mp_ioapic_gsi_routing(idx);
3864 gsi_cfg->gsi_base = gsi_base;
3865 gsi_cfg->gsi_end = gsi_base + entries - 1;
3866
3867
3868
3869
3870 ioapics[idx].nr_registers = entries;
3871
3872 if (gsi_cfg->gsi_end >= gsi_top)
3873 gsi_top = gsi_cfg->gsi_end + 1;
3874
3875 pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
3876 idx, mpc_ioapic_id(idx),
3877 mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
3878 gsi_cfg->gsi_base, gsi_cfg->gsi_end);
3879
3880 nr_ioapics++;
3881}
3882
3883int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
3884 irq_hw_number_t hwirq)
3885{
3886 int ioapic = (int)(long)domain->host_data;
3887 struct mp_pin_info *info = mp_pin_info(ioapic, hwirq);
3888 struct io_apic_irq_attr attr;
3889
3890
3891 if (!info->set) {
3892 u32 gsi = mp_pin_to_gsi(ioapic, hwirq);
3893
3894 if (acpi_get_override_irq(gsi, &info->trigger,
3895 &info->polarity) < 0) {
3896
3897
3898
3899
3900 info->trigger = 1;
3901 info->polarity = 1;
3902 }
3903 info->node = NUMA_NO_NODE;
3904
3905
3906
3907
3908
3909
3910
3911 if (virq >= nr_legacy_irqs() || info->count)
3912 info->set = 1;
3913 }
3914 set_io_apic_irq_attr(&attr, ioapic, hwirq, info->trigger,
3915 info->polarity);
3916
3917 return io_apic_setup_irq_pin(virq, info->node, &attr);
3918}
3919
3920void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq)
3921{
3922 struct irq_data *data = irq_get_irq_data(virq);
3923 struct irq_cfg *cfg = irq_cfg(virq);
3924 int ioapic = (int)(long)domain->host_data;
3925 int pin = (int)data->hwirq;
3926
3927 ioapic_mask_entry(ioapic, pin);
3928 __remove_pin_from_irq(cfg, ioapic, pin);
3929 WARN_ON(cfg->irq_2_pin != NULL);
3930 arch_teardown_hwirq(virq);
3931}
3932
3933int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node)
3934{
3935 int ret = 0;
3936 int ioapic, pin;
3937 struct mp_pin_info *info;
3938
3939 ioapic = mp_find_ioapic(gsi);
3940 if (ioapic < 0)
3941 return -ENODEV;
3942
3943 pin = mp_find_ioapic_pin(ioapic, gsi);
3944 info = mp_pin_info(ioapic, pin);
3945 trigger = trigger ? 1 : 0;
3946 polarity = polarity ? 1 : 0;
3947
3948 mutex_lock(&ioapic_mutex);
3949 if (!info->set) {
3950 info->trigger = trigger;
3951 info->polarity = polarity;
3952 info->node = node;
3953 info->set = 1;
3954 } else if (info->trigger != trigger || info->polarity != polarity) {
3955 ret = -EBUSY;
3956 }
3957 mutex_unlock(&ioapic_mutex);
3958
3959 return ret;
3960}
3961
3962bool mp_should_keep_irq(struct device *dev)
3963{
3964 if (dev->power.is_prepared)
3965 return true;
3966#ifdef CONFIG_PM_RUNTIME
3967 if (dev->power.runtime_status == RPM_SUSPENDING)
3968 return true;
3969#endif
3970
3971 return false;
3972}
3973
3974
3975void __init pre_init_apic_IRQ0(void)
3976{
3977 struct io_apic_irq_attr attr = { 0, 0, 0, 0 };
3978
3979 printk(KERN_INFO "Early APIC setup for system timer0\n");
3980#ifndef CONFIG_SMP
3981 physid_set_mask_of_physid(boot_cpu_physical_apicid,
3982 &phys_cpu_present_map);
3983#endif
3984 setup_local_APIC();
3985
3986 io_apic_setup_irq_pin(0, 0, &attr);
3987 irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
3988 "edge");
3989}
3990