1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/perf_event.h>
18#include <linux/kernel_stat.h>
19#include <linux/mc146818rtc.h>
20#include <linux/acpi_pmtmr.h>
21#include <linux/clockchips.h>
22#include <linux/interrupt.h>
23#include <linux/memblock.h>
24#include <linux/ftrace.h>
25#include <linux/ioport.h>
26#include <linux/export.h>
27#include <linux/syscore_ops.h>
28#include <linux/delay.h>
29#include <linux/timex.h>
30#include <linux/i8253.h>
31#include <linux/dmar.h>
32#include <linux/init.h>
33#include <linux/cpu.h>
34#include <linux/dmi.h>
35#include <linux/smp.h>
36#include <linux/mm.h>
37
38#include <asm/trace/irq_vectors.h>
39#include <asm/irq_remapping.h>
40#include <asm/perf_event.h>
41#include <asm/x86_init.h>
42#include <asm/pgalloc.h>
43#include <linux/atomic.h>
44#include <asm/mpspec.h>
45#include <asm/i8259.h>
46#include <asm/proto.h>
47#include <asm/apic.h>
48#include <asm/io_apic.h>
49#include <asm/desc.h>
50#include <asm/hpet.h>
51#include <asm/mtrr.h>
52#include <asm/time.h>
53#include <asm/smp.h>
54#include <asm/mce.h>
55#include <asm/tsc.h>
56#include <asm/hypervisor.h>
57#include <asm/cpu_device_id.h>
58#include <asm/intel-family.h>
59#include <asm/irq_regs.h>
60
61unsigned int num_processors;
62
63unsigned disabled_cpus;
64
65
66unsigned int boot_cpu_physical_apicid = -1U;
67EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
68
69u8 boot_cpu_apic_version;
70
71
72
73
74static unsigned int max_physical_apicid;
75
76
77
78
79physid_mask_t phys_cpu_present_map;
80
81
82
83
84
85
86static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
87
88
89
90
91
92static int apic_extnmi = APIC_EXTNMI_BSP;
93
94
95
96
97static bool virt_ext_dest_id __ro_after_init;
98
99
100
101
102DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
103DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
104DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
105EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
106EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
107EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
108
109#ifdef CONFIG_X86_32
110
111
112
113
114
115
116
117DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
118
119
120static int enabled_via_apicbase;
121
122
123
124
125
126
127
128
129
130static inline void imcr_pic_to_apic(void)
131{
132
133 outb(0x70, 0x22);
134
135 outb(0x01, 0x23);
136}
137
138static inline void imcr_apic_to_pic(void)
139{
140
141 outb(0x70, 0x22);
142
143 outb(0x00, 0x23);
144}
145#endif
146
147
148
149
150
151
152static int force_enable_local_apic __initdata;
153
154
155
156
157static int __init parse_lapic(char *arg)
158{
159 if (IS_ENABLED(CONFIG_X86_32) && !arg)
160 force_enable_local_apic = 1;
161 else if (arg && !strncmp(arg, "notscdeadline", 13))
162 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
163 return 0;
164}
165early_param("lapic", parse_lapic);
166
167#ifdef CONFIG_X86_64
168static int apic_calibrate_pmtmr __initdata;
169static __init int setup_apicpmtimer(char *s)
170{
171 apic_calibrate_pmtmr = 1;
172 notsc_setup(NULL);
173 return 0;
174}
175__setup("apicpmtimer", setup_apicpmtimer);
176#endif
177
178unsigned long mp_lapic_addr;
179int disable_apic;
180
181static int disable_apic_timer __initdata;
182
183int local_apic_timer_c2_ok;
184EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
185
186
187
188
189unsigned int apic_verbosity;
190
191int pic_mode;
192
193
194int smp_found_config;
195
196static struct resource lapic_resource = {
197 .name = "Local APIC",
198 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
199};
200
201unsigned int lapic_timer_period = 0;
202
203static void apic_pm_activate(void);
204
205static unsigned long apic_phys;
206
207
208
209
210static inline int lapic_get_version(void)
211{
212 return GET_APIC_VERSION(apic_read(APIC_LVR));
213}
214
215
216
217
218static inline int lapic_is_integrated(void)
219{
220 return APIC_INTEGRATED(lapic_get_version());
221}
222
223
224
225
226static int modern_apic(void)
227{
228
229 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
230 boot_cpu_data.x86 >= 0xf)
231 return 1;
232 return lapic_get_version() >= 0x14;
233}
234
235
236
237
238
239static void __init apic_disable(void)
240{
241 pr_info("APIC: switched to apic NOOP\n");
242 apic = &apic_noop;
243}
244
245void native_apic_wait_icr_idle(void)
246{
247 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
248 cpu_relax();
249}
250
251u32 native_safe_apic_wait_icr_idle(void)
252{
253 u32 send_status;
254 int timeout;
255
256 timeout = 0;
257 do {
258 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
259 if (!send_status)
260 break;
261 inc_irq_stat(icr_read_retry_count);
262 udelay(100);
263 } while (timeout++ < 1000);
264
265 return send_status;
266}
267
268void native_apic_icr_write(u32 low, u32 id)
269{
270 unsigned long flags;
271
272 local_irq_save(flags);
273 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
274 apic_write(APIC_ICR, low);
275 local_irq_restore(flags);
276}
277
278u64 native_apic_icr_read(void)
279{
280 u32 icr1, icr2;
281
282 icr2 = apic_read(APIC_ICR2);
283 icr1 = apic_read(APIC_ICR);
284
285 return icr1 | ((u64)icr2 << 32);
286}
287
288#ifdef CONFIG_X86_32
289
290
291
292int get_physical_broadcast(void)
293{
294 return modern_apic() ? 0xff : 0xf;
295}
296#endif
297
298
299
300
301int lapic_get_maxlvt(void)
302{
303
304
305
306
307 return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
308}
309
310
311
312
313
314
315#define APIC_DIVISOR 16
316#define TSC_DIVISOR 8
317
318
319
320
321
322
323
324
325
326
327
328static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
329{
330 unsigned int lvtt_value, tmp_value;
331
332 lvtt_value = LOCAL_TIMER_VECTOR;
333 if (!oneshot)
334 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
335 else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
336 lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
337
338 if (!lapic_is_integrated())
339 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
340
341 if (!irqen)
342 lvtt_value |= APIC_LVT_MASKED;
343
344 apic_write(APIC_LVTT, lvtt_value);
345
346 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
347
348
349
350
351
352 asm volatile("mfence" : : : "memory");
353
354 printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
355 return;
356 }
357
358
359
360
361 tmp_value = apic_read(APIC_TDCR);
362 apic_write(APIC_TDCR,
363 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
364 APIC_TDR_DIV_16);
365
366 if (!oneshot)
367 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
368}
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
391
392static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
393{
394 return (old & APIC_EILVT_MASKED)
395 || (new == APIC_EILVT_MASKED)
396 || ((new & ~APIC_EILVT_MASKED) == old);
397}
398
399static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
400{
401 unsigned int rsvd, vector;
402
403 if (offset >= APIC_EILVT_NR_MAX)
404 return ~0;
405
406 rsvd = atomic_read(&eilvt_offsets[offset]);
407 do {
408 vector = rsvd & ~APIC_EILVT_MASKED;
409 if (vector && !eilvt_entry_is_changeable(vector, new))
410
411 return rsvd;
412 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
413 } while (rsvd != new);
414
415 rsvd &= ~APIC_EILVT_MASKED;
416 if (rsvd && rsvd != vector)
417 pr_info("LVT offset %d assigned for vector 0x%02x\n",
418 offset, rsvd);
419
420 return new;
421}
422
423
424
425
426
427
428
429int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
430{
431 unsigned long reg = APIC_EILVTn(offset);
432 unsigned int new, old, reserved;
433
434 new = (mask << 16) | (msg_type << 8) | vector;
435 old = apic_read(reg);
436 reserved = reserve_eilvt_offset(offset, new);
437
438 if (reserved != new) {
439 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
440 "vector 0x%x, but the register is already in use for "
441 "vector 0x%x on another cpu\n",
442 smp_processor_id(), reg, offset, new, reserved);
443 return -EINVAL;
444 }
445
446 if (!eilvt_entry_is_changeable(old, new)) {
447 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
448 "vector 0x%x, but the register is already in use for "
449 "vector 0x%x on this cpu\n",
450 smp_processor_id(), reg, offset, new, old);
451 return -EBUSY;
452 }
453
454 apic_write(reg, new);
455
456 return 0;
457}
458EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
459
460
461
462
463static int lapic_next_event(unsigned long delta,
464 struct clock_event_device *evt)
465{
466 apic_write(APIC_TMICT, delta);
467 return 0;
468}
469
470static int lapic_next_deadline(unsigned long delta,
471 struct clock_event_device *evt)
472{
473 u64 tsc;
474
475 tsc = rdtsc();
476 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
477 return 0;
478}
479
480static int lapic_timer_shutdown(struct clock_event_device *evt)
481{
482 unsigned int v;
483
484
485 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
486 return 0;
487
488 v = apic_read(APIC_LVTT);
489 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
490 apic_write(APIC_LVTT, v);
491 apic_write(APIC_TMICT, 0);
492 return 0;
493}
494
495static inline int
496lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
497{
498
499 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
500 return 0;
501
502 __setup_APIC_LVTT(lapic_timer_period, oneshot, 1);
503 return 0;
504}
505
506static int lapic_timer_set_periodic(struct clock_event_device *evt)
507{
508 return lapic_timer_set_periodic_oneshot(evt, false);
509}
510
511static int lapic_timer_set_oneshot(struct clock_event_device *evt)
512{
513 return lapic_timer_set_periodic_oneshot(evt, true);
514}
515
516
517
518
519static void lapic_timer_broadcast(const struct cpumask *mask)
520{
521#ifdef CONFIG_SMP
522 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
523#endif
524}
525
526
527
528
529
530static struct clock_event_device lapic_clockevent = {
531 .name = "lapic",
532 .features = CLOCK_EVT_FEAT_PERIODIC |
533 CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
534 | CLOCK_EVT_FEAT_DUMMY,
535 .shift = 32,
536 .set_state_shutdown = lapic_timer_shutdown,
537 .set_state_periodic = lapic_timer_set_periodic,
538 .set_state_oneshot = lapic_timer_set_oneshot,
539 .set_state_oneshot_stopped = lapic_timer_shutdown,
540 .set_next_event = lapic_next_event,
541 .broadcast = lapic_timer_broadcast,
542 .rating = 100,
543 .irq = -1,
544};
545static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
546
547static u32 hsx_deadline_rev(void)
548{
549 switch (boot_cpu_data.x86_stepping) {
550 case 0x02: return 0x3a;
551 case 0x04: return 0x0f;
552 }
553
554 return ~0U;
555}
556
557static u32 bdx_deadline_rev(void)
558{
559 switch (boot_cpu_data.x86_stepping) {
560 case 0x02: return 0x00000011;
561 case 0x03: return 0x0700000e;
562 case 0x04: return 0x0f00000c;
563 case 0x05: return 0x0e000003;
564 }
565
566 return ~0U;
567}
568
569static u32 skx_deadline_rev(void)
570{
571 switch (boot_cpu_data.x86_stepping) {
572 case 0x03: return 0x01000136;
573 case 0x04: return 0x02000014;
574 }
575
576 if (boot_cpu_data.x86_stepping > 4)
577 return 0;
578
579 return ~0U;
580}
581
582static const struct x86_cpu_id deadline_match[] = {
583 X86_MATCH_INTEL_FAM6_MODEL( HASWELL_X, &hsx_deadline_rev),
584 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
585 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_D, &bdx_deadline_rev),
586 X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_X, &skx_deadline_rev),
587
588 X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
589 X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
590 X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17),
591
592 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25),
593 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17),
594
595 X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2),
596 X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2),
597
598 X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52),
599 X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52),
600
601 {},
602};
603
604static void apic_check_deadline_errata(void)
605{
606 const struct x86_cpu_id *m;
607 u32 rev;
608
609 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
610 boot_cpu_has(X86_FEATURE_HYPERVISOR))
611 return;
612
613 m = x86_match_cpu(deadline_match);
614 if (!m)
615 return;
616
617
618
619
620
621 if ((long)m->driver_data < 0)
622 rev = ((u32 (*)(void))(m->driver_data))();
623 else
624 rev = (u32)m->driver_data;
625
626 if (boot_cpu_data.microcode >= rev)
627 return;
628
629 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
630 pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
631 "please update microcode to version: 0x%x (or later)\n", rev);
632}
633
634
635
636
637
638static void setup_APIC_timer(void)
639{
640 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
641
642 if (this_cpu_has(X86_FEATURE_ARAT)) {
643 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
644
645 lapic_clockevent.rating = 150;
646 }
647
648 memcpy(levt, &lapic_clockevent, sizeof(*levt));
649 levt->cpumask = cpumask_of(smp_processor_id());
650
651 if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
652 levt->name = "lapic-deadline";
653 levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
654 CLOCK_EVT_FEAT_DUMMY);
655 levt->set_next_event = lapic_next_deadline;
656 clockevents_config_and_register(levt,
657 tsc_khz * (1000 / TSC_DIVISOR),
658 0xF, ~0UL);
659 } else
660 clockevents_register_device(levt);
661}
662
663
664
665
666
667static void __lapic_update_tsc_freq(void *info)
668{
669 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
670
671 if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
672 return;
673
674 clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
675}
676
677void lapic_update_tsc_freq(void)
678{
679
680
681
682
683
684 on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
685}
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708#define LAPIC_CAL_LOOPS (HZ/10)
709
710static __initdata int lapic_cal_loops = -1;
711static __initdata long lapic_cal_t1, lapic_cal_t2;
712static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
713static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
714static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
715
716
717
718
719static void __init lapic_cal_handler(struct clock_event_device *dev)
720{
721 unsigned long long tsc = 0;
722 long tapic = apic_read(APIC_TMCCT);
723 unsigned long pm = acpi_pm_read_early();
724
725 if (boot_cpu_has(X86_FEATURE_TSC))
726 tsc = rdtsc();
727
728 switch (lapic_cal_loops++) {
729 case 0:
730 lapic_cal_t1 = tapic;
731 lapic_cal_tsc1 = tsc;
732 lapic_cal_pm1 = pm;
733 lapic_cal_j1 = jiffies;
734 break;
735
736 case LAPIC_CAL_LOOPS:
737 lapic_cal_t2 = tapic;
738 lapic_cal_tsc2 = tsc;
739 if (pm < lapic_cal_pm1)
740 pm += ACPI_PM_OVRRUN;
741 lapic_cal_pm2 = pm;
742 lapic_cal_j2 = jiffies;
743 break;
744 }
745}
746
747static int __init
748calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
749{
750 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
751 const long pm_thresh = pm_100ms / 100;
752 unsigned long mult;
753 u64 res;
754
755#ifndef CONFIG_X86_PM_TIMER
756 return -1;
757#endif
758
759 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
760
761
762 if (!deltapm)
763 return -1;
764
765 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
766
767 if (deltapm > (pm_100ms - pm_thresh) &&
768 deltapm < (pm_100ms + pm_thresh)) {
769 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
770 return 0;
771 }
772
773 res = (((u64)deltapm) * mult) >> 22;
774 do_div(res, 1000000);
775 pr_warning("APIC calibration not consistent "
776 "with PM-Timer: %ldms instead of 100ms\n",(long)res);
777
778
779 res = (((u64)(*delta)) * pm_100ms);
780 do_div(res, deltapm);
781 pr_info("APIC delta adjusted to PM-Timer: "
782 "%lu (%ld)\n", (unsigned long)res, *delta);
783 *delta = (long)res;
784
785
786 if (boot_cpu_has(X86_FEATURE_TSC)) {
787 res = (((u64)(*deltatsc)) * pm_100ms);
788 do_div(res, deltapm);
789 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
790 "PM-Timer: %lu (%ld)\n",
791 (unsigned long)res, *deltatsc);
792 *deltatsc = (long)res;
793 }
794
795 return 0;
796}
797
798static int __init lapic_init_clockevent(void)
799{
800 if (!lapic_timer_period)
801 return -1;
802
803
804 lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR,
805 TICK_NSEC, lapic_clockevent.shift);
806 lapic_clockevent.max_delta_ns =
807 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
808 lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
809 lapic_clockevent.min_delta_ns =
810 clockevent_delta2ns(0xF, &lapic_clockevent);
811 lapic_clockevent.min_delta_ticks = 0xF;
812
813 return 0;
814}
815
816bool __init apic_needs_pit(void)
817{
818
819
820
821
822 if (!tsc_khz || !cpu_khz)
823 return true;
824
825
826 if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic)
827 return true;
828
829
830
831
832
833
834 if (apic_intr_mode == APIC_PIC ||
835 apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
836 return true;
837
838
839 if (!boot_cpu_has(X86_FEATURE_ARAT))
840 return true;
841
842
843 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
844 return false;
845
846
847 if (disable_apic_timer)
848 return true;
849
850
851
852
853 return lapic_timer_period == 0;
854}
855
856static int __init calibrate_APIC_clock(void)
857{
858 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
859 void (*real_handler)(struct clock_event_device *dev);
860 unsigned long deltaj;
861 long delta, deltatsc;
862 int pm_referenced = 0;
863
864 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
865 return 0;
866
867
868
869
870
871
872 if (!lapic_init_clockevent()) {
873 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
874 lapic_timer_period);
875
876
877
878
879 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
880 return 0;
881 }
882
883 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
884 "calibrating APIC timer ...\n");
885
886 local_irq_disable();
887
888
889 real_handler = global_clock_event->event_handler;
890 global_clock_event->event_handler = lapic_cal_handler;
891
892
893
894
895
896 __setup_APIC_LVTT(0xffffffff, 0, 0);
897
898
899 local_irq_enable();
900
901 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
902 cpu_relax();
903
904 local_irq_disable();
905
906
907 global_clock_event->event_handler = real_handler;
908
909
910 delta = lapic_cal_t1 - lapic_cal_t2;
911 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
912
913 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
914
915
916 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
917 &delta, &deltatsc);
918
919 lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
920 lapic_init_clockevent();
921
922 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
923 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
924 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
925 lapic_timer_period);
926
927 if (boot_cpu_has(X86_FEATURE_TSC)) {
928 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
929 "%ld.%04ld MHz.\n",
930 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
931 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
932 }
933
934 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
935 "%u.%04u MHz.\n",
936 lapic_timer_period / (1000000 / HZ),
937 lapic_timer_period % (1000000 / HZ));
938
939
940
941
942 if (lapic_timer_period < (1000000 / HZ)) {
943 local_irq_enable();
944 pr_warning("APIC frequency too slow, disabling apic timer\n");
945 return -1;
946 }
947
948 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
949
950
951
952
953
954 if (!pm_referenced) {
955 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
956
957
958
959
960 levt->event_handler = lapic_cal_handler;
961 lapic_timer_set_periodic(levt);
962 lapic_cal_loops = -1;
963
964
965 local_irq_enable();
966
967 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
968 cpu_relax();
969
970
971 local_irq_disable();
972 lapic_timer_shutdown(levt);
973
974
975 deltaj = lapic_cal_j2 - lapic_cal_j1;
976 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
977
978
979 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
980 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
981 else
982 levt->features |= CLOCK_EVT_FEAT_DUMMY;
983 }
984 local_irq_enable();
985
986 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
987 pr_warning("APIC timer disabled due to verification failure\n");
988 return -1;
989 }
990
991 return 0;
992}
993
994
995
996
997
998
999void __init setup_boot_APIC_clock(void)
1000{
1001
1002
1003
1004
1005
1006
1007 if (disable_apic_timer) {
1008 pr_info("Disabling APIC timer\n");
1009
1010 if (num_possible_cpus() > 1) {
1011 lapic_clockevent.mult = 1;
1012 setup_APIC_timer();
1013 }
1014 return;
1015 }
1016
1017 if (calibrate_APIC_clock()) {
1018
1019 if (num_possible_cpus() > 1)
1020 setup_APIC_timer();
1021 return;
1022 }
1023
1024
1025
1026
1027
1028
1029 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
1030
1031
1032 setup_APIC_timer();
1033 amd_e400_c1e_apic_setup();
1034}
1035
1036void setup_secondary_APIC_clock(void)
1037{
1038 setup_APIC_timer();
1039 amd_e400_c1e_apic_setup();
1040}
1041
1042
1043
1044
1045static void local_apic_timer_interrupt(void)
1046{
1047 struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 if (!evt->event_handler) {
1061 pr_warning("Spurious LAPIC timer interrupt on cpu %d\n",
1062 smp_processor_id());
1063
1064 lapic_timer_shutdown(evt);
1065 return;
1066 }
1067
1068
1069
1070
1071 inc_irq_stat(apic_timer_irqs);
1072
1073 evt->event_handler(evt);
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
1085{
1086 struct pt_regs *old_regs = set_irq_regs(regs);
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 entering_ack_irq();
1097 trace_local_timer_entry(LOCAL_TIMER_VECTOR);
1098 local_apic_timer_interrupt();
1099 trace_local_timer_exit(LOCAL_TIMER_VECTOR);
1100 exiting_irq();
1101
1102 set_irq_regs(old_regs);
1103}
1104
1105int setup_profiling_timer(unsigned int multiplier)
1106{
1107 return -EINVAL;
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121void clear_local_APIC(void)
1122{
1123 int maxlvt;
1124 u32 v;
1125
1126
1127 if (!x2apic_mode && !apic_phys)
1128 return;
1129
1130 maxlvt = lapic_get_maxlvt();
1131
1132
1133
1134
1135 if (maxlvt >= 3) {
1136 v = ERROR_APIC_VECTOR;
1137 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
1138 }
1139
1140
1141
1142
1143 v = apic_read(APIC_LVTT);
1144 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
1145 v = apic_read(APIC_LVT0);
1146 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1147 v = apic_read(APIC_LVT1);
1148 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1149 if (maxlvt >= 4) {
1150 v = apic_read(APIC_LVTPC);
1151 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
1152 }
1153
1154
1155#ifdef CONFIG_X86_THERMAL_VECTOR
1156 if (maxlvt >= 5) {
1157 v = apic_read(APIC_LVTTHMR);
1158 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
1159 }
1160#endif
1161#ifdef CONFIG_X86_MCE_INTEL
1162 if (maxlvt >= 6) {
1163 v = apic_read(APIC_LVTCMCI);
1164 if (!(v & APIC_LVT_MASKED))
1165 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
1166 }
1167#endif
1168
1169
1170
1171
1172 apic_write(APIC_LVTT, APIC_LVT_MASKED);
1173 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1174 apic_write(APIC_LVT1, APIC_LVT_MASKED);
1175 if (maxlvt >= 3)
1176 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1177 if (maxlvt >= 4)
1178 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
1179
1180
1181 if (lapic_is_integrated()) {
1182 if (maxlvt > 3)
1183
1184 apic_write(APIC_ESR, 0);
1185 apic_read(APIC_ESR);
1186 }
1187}
1188
1189
1190
1191
1192void disable_local_APIC(void)
1193{
1194 unsigned int value;
1195
1196
1197 if (!x2apic_mode && !apic_phys)
1198 return;
1199
1200 clear_local_APIC();
1201
1202
1203
1204
1205
1206 value = apic_read(APIC_SPIV);
1207 value &= ~APIC_SPIV_APIC_ENABLED;
1208 apic_write(APIC_SPIV, value);
1209
1210#ifdef CONFIG_X86_32
1211
1212
1213
1214
1215 if (enabled_via_apicbase) {
1216 unsigned int l, h;
1217
1218 rdmsr(MSR_IA32_APICBASE, l, h);
1219 l &= ~MSR_IA32_APICBASE_ENABLE;
1220 wrmsr(MSR_IA32_APICBASE, l, h);
1221 }
1222#endif
1223}
1224
1225
1226
1227
1228
1229
1230
1231void lapic_shutdown(void)
1232{
1233 unsigned long flags;
1234
1235 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1236 return;
1237
1238 local_irq_save(flags);
1239
1240#ifdef CONFIG_X86_32
1241 if (!enabled_via_apicbase)
1242 clear_local_APIC();
1243 else
1244#endif
1245 disable_local_APIC();
1246
1247
1248 local_irq_restore(flags);
1249}
1250
1251
1252
1253
1254void __init sync_Arb_IDs(void)
1255{
1256
1257
1258
1259
1260 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1261 return;
1262
1263
1264
1265
1266 apic_wait_icr_idle();
1267
1268 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
1269 apic_write(APIC_ICR, APIC_DEST_ALLINC |
1270 APIC_INT_LEVELTRIG | APIC_DM_INIT);
1271}
1272
1273enum apic_intr_mode_id apic_intr_mode;
1274
1275static int __init __apic_intr_mode_select(void)
1276{
1277
1278 if (disable_apic) {
1279 pr_info("APIC disabled via kernel command line\n");
1280 return APIC_PIC;
1281 }
1282
1283
1284#ifdef CONFIG_X86_64
1285
1286 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1287 disable_apic = 1;
1288 pr_info("APIC disabled by BIOS\n");
1289 return APIC_PIC;
1290 }
1291#else
1292
1293
1294
1295 if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
1296 disable_apic = 1;
1297 return APIC_PIC;
1298 }
1299
1300
1301 if (!boot_cpu_has(X86_FEATURE_APIC) &&
1302 APIC_INTEGRATED(boot_cpu_apic_version)) {
1303 disable_apic = 1;
1304 pr_err(FW_BUG "Local APIC %d not detected, force emulation\n",
1305 boot_cpu_physical_apicid);
1306 return APIC_PIC;
1307 }
1308#endif
1309
1310
1311 if (!smp_found_config) {
1312 disable_ioapic_support();
1313 if (!acpi_lapic) {
1314 pr_info("APIC: ACPI MADT or MP tables are not detected\n");
1315 return APIC_VIRTUAL_WIRE_NO_CONFIG;
1316 }
1317 return APIC_VIRTUAL_WIRE;
1318 }
1319
1320#ifdef CONFIG_SMP
1321
1322 if (!setup_max_cpus) {
1323 pr_info("APIC: SMP mode deactivated\n");
1324 return APIC_SYMMETRIC_IO_NO_ROUTING;
1325 }
1326
1327 if (read_apic_id() != boot_cpu_physical_apicid) {
1328 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1329 read_apic_id(), boot_cpu_physical_apicid);
1330
1331 }
1332#endif
1333
1334 return APIC_SYMMETRIC_IO;
1335}
1336
1337
1338void __init apic_intr_mode_select(void)
1339{
1340 apic_intr_mode = __apic_intr_mode_select();
1341}
1342
1343
1344
1345
1346void __init init_bsp_APIC(void)
1347{
1348 unsigned int value;
1349
1350
1351
1352
1353
1354 if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
1355 return;
1356
1357
1358
1359
1360 clear_local_APIC();
1361
1362
1363
1364
1365 value = apic_read(APIC_SPIV);
1366 value &= ~APIC_VECTOR_MASK;
1367 value |= APIC_SPIV_APIC_ENABLED;
1368
1369#ifdef CONFIG_X86_32
1370
1371 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1372 (boot_cpu_data.x86 == 15))
1373 value &= ~APIC_SPIV_FOCUS_DISABLED;
1374 else
1375#endif
1376 value |= APIC_SPIV_FOCUS_DISABLED;
1377 value |= SPURIOUS_APIC_VECTOR;
1378 apic_write(APIC_SPIV, value);
1379
1380
1381
1382
1383 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1384 value = APIC_DM_NMI;
1385 if (!lapic_is_integrated())
1386 value |= APIC_LVT_LEVEL_TRIGGER;
1387 if (apic_extnmi == APIC_EXTNMI_NONE)
1388 value |= APIC_LVT_MASKED;
1389 apic_write(APIC_LVT1, value);
1390}
1391
1392
1393void __init apic_intr_mode_init(void)
1394{
1395 bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
1396
1397 switch (apic_intr_mode) {
1398 case APIC_PIC:
1399 pr_info("APIC: Keep in PIC mode(8259)\n");
1400 return;
1401 case APIC_VIRTUAL_WIRE:
1402 pr_info("APIC: Switch to virtual wire mode setup\n");
1403 default_setup_apic_routing();
1404 break;
1405 case APIC_VIRTUAL_WIRE_NO_CONFIG:
1406 pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
1407 upmode = true;
1408 default_setup_apic_routing();
1409 break;
1410 case APIC_SYMMETRIC_IO:
1411 pr_info("APIC: Switch to symmetric I/O mode setup\n");
1412 default_setup_apic_routing();
1413 break;
1414 case APIC_SYMMETRIC_IO_NO_ROUTING:
1415 pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
1416 break;
1417 }
1418
1419 apic_bsp_setup(upmode);
1420}
1421
1422static void lapic_setup_esr(void)
1423{
1424 unsigned int oldvalue, value, maxlvt;
1425
1426 if (!lapic_is_integrated()) {
1427 pr_info("No ESR for 82489DX.\n");
1428 return;
1429 }
1430
1431 if (apic->disable_esr) {
1432
1433
1434
1435
1436
1437
1438 pr_info("Leaving ESR disabled.\n");
1439 return;
1440 }
1441
1442 maxlvt = lapic_get_maxlvt();
1443 if (maxlvt > 3)
1444 apic_write(APIC_ESR, 0);
1445 oldvalue = apic_read(APIC_ESR);
1446
1447
1448 value = ERROR_APIC_VECTOR;
1449 apic_write(APIC_LVTERR, value);
1450
1451
1452
1453
1454 if (maxlvt > 3)
1455 apic_write(APIC_ESR, 0);
1456 value = apic_read(APIC_ESR);
1457 if (value != oldvalue)
1458 apic_printk(APIC_VERBOSE, "ESR value before enabling "
1459 "vector: 0x%08x after: 0x%08x\n",
1460 oldvalue, value);
1461}
1462
1463static void apic_pending_intr_clear(void)
1464{
1465 long long max_loops = cpu_khz ? cpu_khz : 1000000;
1466 unsigned long long tsc = 0, ntsc;
1467 unsigned int queued;
1468 unsigned long value;
1469 int i, j, acked = 0;
1470
1471 if (boot_cpu_has(X86_FEATURE_TSC))
1472 tsc = rdtsc();
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484 do {
1485 queued = 0;
1486 for (i = APIC_ISR_NR - 1; i >= 0; i--)
1487 queued |= apic_read(APIC_IRR + i*0x10);
1488
1489 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1490 value = apic_read(APIC_ISR + i*0x10);
1491 for_each_set_bit(j, &value, 32) {
1492 ack_APIC_irq();
1493 acked++;
1494 }
1495 }
1496 if (acked > 256) {
1497 pr_err("LAPIC pending interrupts after %d EOI\n", acked);
1498 break;
1499 }
1500 if (queued) {
1501 if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
1502 ntsc = rdtsc();
1503 max_loops = (cpu_khz << 10) - (ntsc - tsc);
1504 } else {
1505 max_loops--;
1506 }
1507 }
1508 } while (queued && max_loops > 0);
1509 WARN_ON(max_loops <= 0);
1510}
1511
1512
1513
1514
1515
1516
1517
1518static void setup_local_APIC(void)
1519{
1520 int cpu = smp_processor_id();
1521 unsigned int value;
1522#ifdef CONFIG_X86_32
1523 int logical_apicid, ldr_apicid;
1524#endif
1525
1526
1527 if (disable_apic) {
1528 disable_ioapic_support();
1529 return;
1530 }
1531
1532#ifdef CONFIG_X86_32
1533
1534 if (lapic_is_integrated() && apic->disable_esr) {
1535 apic_write(APIC_ESR, 0);
1536 apic_write(APIC_ESR, 0);
1537 apic_write(APIC_ESR, 0);
1538 apic_write(APIC_ESR, 0);
1539 }
1540#endif
1541 perf_events_lapic_init();
1542
1543
1544
1545
1546
1547 BUG_ON(!apic->apic_id_registered());
1548
1549
1550
1551
1552
1553
1554 apic->init_apic_ldr();
1555
1556#ifdef CONFIG_X86_32
1557
1558
1559
1560
1561
1562 logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1563 ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1564 WARN_ON(logical_apicid != BAD_APICID && logical_apicid != ldr_apicid);
1565
1566 early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
1567#endif
1568
1569
1570
1571
1572
1573 value = apic_read(APIC_TASKPRI);
1574 value &= ~APIC_TPRI_MASK;
1575 apic_write(APIC_TASKPRI, value);
1576
1577 apic_pending_intr_clear();
1578
1579
1580
1581
1582 value = apic_read(APIC_SPIV);
1583 value &= ~APIC_VECTOR_MASK;
1584
1585
1586
1587 value |= APIC_SPIV_APIC_ENABLED;
1588
1589#ifdef CONFIG_X86_32
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614 value &= ~APIC_SPIV_FOCUS_DISABLED;
1615#endif
1616
1617
1618
1619
1620 value |= SPURIOUS_APIC_VECTOR;
1621 apic_write(APIC_SPIV, value);
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1634 if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1635 value = APIC_DM_EXTINT;
1636 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1637 } else {
1638 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1639 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1640 }
1641 apic_write(APIC_LVT0, value);
1642
1643
1644
1645
1646
1647 if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
1648 apic_extnmi == APIC_EXTNMI_ALL)
1649 value = APIC_DM_NMI;
1650 else
1651 value = APIC_DM_NMI | APIC_LVT_MASKED;
1652
1653
1654 if (!lapic_is_integrated())
1655 value |= APIC_LVT_LEVEL_TRIGGER;
1656 apic_write(APIC_LVT1, value);
1657
1658#ifdef CONFIG_X86_MCE_INTEL
1659
1660 if (!cpu)
1661 cmci_recheck();
1662#endif
1663}
1664
1665static void end_local_APIC_setup(void)
1666{
1667 lapic_setup_esr();
1668
1669#ifdef CONFIG_X86_32
1670 {
1671 unsigned int value;
1672
1673 value = apic_read(APIC_LVTT);
1674 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1675 apic_write(APIC_LVTT, value);
1676 }
1677#endif
1678
1679 apic_pm_activate();
1680}
1681
1682
1683
1684
1685void apic_ap_setup(void)
1686{
1687 setup_local_APIC();
1688 end_local_APIC_setup();
1689}
1690
1691#ifdef CONFIG_X86_X2APIC
1692int x2apic_mode;
1693
1694enum {
1695 X2APIC_OFF,
1696 X2APIC_ON,
1697 X2APIC_DISABLED,
1698};
1699static int x2apic_state;
1700
1701static void __x2apic_disable(void)
1702{
1703 u64 msr;
1704
1705 if (!boot_cpu_has(X86_FEATURE_APIC))
1706 return;
1707
1708 rdmsrl(MSR_IA32_APICBASE, msr);
1709 if (!(msr & X2APIC_ENABLE))
1710 return;
1711
1712 wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1713 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1714 printk_once(KERN_INFO "x2apic disabled\n");
1715}
1716
1717static void __x2apic_enable(void)
1718{
1719 u64 msr;
1720
1721 rdmsrl(MSR_IA32_APICBASE, msr);
1722 if (msr & X2APIC_ENABLE)
1723 return;
1724 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1725 printk_once(KERN_INFO "x2apic enabled\n");
1726}
1727
1728static int __init setup_nox2apic(char *str)
1729{
1730 if (x2apic_enabled()) {
1731 int apicid = native_apic_msr_read(APIC_ID);
1732
1733 if (apicid >= 255) {
1734 pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
1735 apicid);
1736 return 0;
1737 }
1738 pr_warning("x2apic already enabled.\n");
1739 __x2apic_disable();
1740 }
1741 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1742 x2apic_state = X2APIC_DISABLED;
1743 x2apic_mode = 0;
1744 return 0;
1745}
1746early_param("nox2apic", setup_nox2apic);
1747
1748
1749void x2apic_setup(void)
1750{
1751
1752
1753
1754
1755 if (x2apic_state != X2APIC_ON) {
1756 __x2apic_disable();
1757 return;
1758 }
1759 __x2apic_enable();
1760}
1761
1762static __init void x2apic_disable(void)
1763{
1764 u32 x2apic_id, state = x2apic_state;
1765
1766 x2apic_mode = 0;
1767 x2apic_state = X2APIC_DISABLED;
1768
1769 if (state != X2APIC_ON)
1770 return;
1771
1772 x2apic_id = read_apic_id();
1773 if (x2apic_id >= 255)
1774 panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1775
1776 __x2apic_disable();
1777 register_lapic_address(mp_lapic_addr);
1778}
1779
1780static __init void x2apic_enable(void)
1781{
1782 if (x2apic_state != X2APIC_OFF)
1783 return;
1784
1785 x2apic_mode = 1;
1786 x2apic_state = X2APIC_ON;
1787 __x2apic_enable();
1788}
1789
1790static __init void try_to_enable_x2apic(int remap_mode)
1791{
1792 if (x2apic_state == X2APIC_DISABLED)
1793 return;
1794
1795 if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1796 u32 apic_limit = 255;
1797
1798
1799
1800
1801
1802 if (!x86_init.hyper.x2apic_available()) {
1803 pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1804 x2apic_disable();
1805 return;
1806 }
1807
1808
1809
1810
1811
1812
1813 if (x86_init.hyper.msi_ext_dest_id()) {
1814 virt_ext_dest_id = 1;
1815 apic_limit = 32767;
1816 }
1817
1818
1819
1820
1821
1822
1823 x2apic_set_max_apicid(apic_limit);
1824 x2apic_phys = 1;
1825 }
1826 x2apic_enable();
1827}
1828
1829void __init check_x2apic(void)
1830{
1831 if (x2apic_enabled()) {
1832 pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
1833 x2apic_mode = 1;
1834 x2apic_state = X2APIC_ON;
1835 } else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
1836 x2apic_state = X2APIC_DISABLED;
1837 }
1838}
1839#else
1840static int __init validate_x2apic(void)
1841{
1842 if (!apic_is_x2apic_enabled())
1843 return 0;
1844
1845
1846
1847 panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
1848}
1849early_initcall(validate_x2apic);
1850
1851static inline void try_to_enable_x2apic(int remap_mode) { }
1852static inline void __x2apic_enable(void) { }
1853#endif
1854
1855void __init enable_IR_x2apic(void)
1856{
1857 unsigned long flags;
1858 int ret, ir_stat;
1859
1860 if (skip_ioapic_setup) {
1861 pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1862 return;
1863 }
1864
1865 ir_stat = irq_remapping_prepare();
1866 if (ir_stat < 0 && !x2apic_supported())
1867 return;
1868
1869 ret = save_ioapic_entries();
1870 if (ret) {
1871 pr_info("Saving IO-APIC state failed: %d\n", ret);
1872 return;
1873 }
1874
1875 local_irq_save(flags);
1876 legacy_pic->mask_all();
1877 mask_ioapic_entries();
1878
1879
1880 if (ir_stat >= 0)
1881 ir_stat = irq_remapping_enable();
1882
1883 try_to_enable_x2apic(ir_stat);
1884
1885 if (ir_stat < 0)
1886 restore_ioapic_entries();
1887 legacy_pic->restore_mask();
1888 local_irq_restore(flags);
1889}
1890
1891#ifdef CONFIG_X86_64
1892
1893
1894
1895
1896
1897
1898static int __init detect_init_APIC(void)
1899{
1900 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1901 pr_info("No local APIC present\n");
1902 return -1;
1903 }
1904
1905 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1906 return 0;
1907}
1908#else
1909
1910static int __init apic_verify(void)
1911{
1912 u32 features, h, l;
1913
1914
1915
1916
1917
1918 features = cpuid_edx(1);
1919 if (!(features & (1 << X86_FEATURE_APIC))) {
1920 pr_warning("Could not enable APIC!\n");
1921 return -1;
1922 }
1923 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1924 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1925
1926
1927 if (boot_cpu_data.x86 >= 6) {
1928 rdmsr(MSR_IA32_APICBASE, l, h);
1929 if (l & MSR_IA32_APICBASE_ENABLE)
1930 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1931 }
1932
1933 pr_info("Found and enabled local APIC!\n");
1934 return 0;
1935}
1936
1937int __init apic_force_enable(unsigned long addr)
1938{
1939 u32 h, l;
1940
1941 if (disable_apic)
1942 return -1;
1943
1944
1945
1946
1947
1948
1949 if (boot_cpu_data.x86 >= 6) {
1950 rdmsr(MSR_IA32_APICBASE, l, h);
1951 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1952 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1953 l &= ~MSR_IA32_APICBASE_BASE;
1954 l |= MSR_IA32_APICBASE_ENABLE | addr;
1955 wrmsr(MSR_IA32_APICBASE, l, h);
1956 enabled_via_apicbase = 1;
1957 }
1958 }
1959 return apic_verify();
1960}
1961
1962
1963
1964
1965static int __init detect_init_APIC(void)
1966{
1967
1968 if (disable_apic)
1969 return -1;
1970
1971 switch (boot_cpu_data.x86_vendor) {
1972 case X86_VENDOR_AMD:
1973 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1974 (boot_cpu_data.x86 >= 15))
1975 break;
1976 goto no_apic;
1977 case X86_VENDOR_INTEL:
1978 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1979 (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
1980 break;
1981 goto no_apic;
1982 default:
1983 goto no_apic;
1984 }
1985
1986 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1987
1988
1989
1990
1991 if (!force_enable_local_apic) {
1992 pr_info("Local APIC disabled by BIOS -- "
1993 "you can enable it with \"lapic\"\n");
1994 return -1;
1995 }
1996 if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
1997 return -1;
1998 } else {
1999 if (apic_verify())
2000 return -1;
2001 }
2002
2003 apic_pm_activate();
2004
2005 return 0;
2006
2007no_apic:
2008 pr_info("No local APIC present or hardware disabled\n");
2009 return -1;
2010}
2011#endif
2012
2013
2014
2015
2016void __init init_apic_mappings(void)
2017{
2018 unsigned int new_apicid;
2019
2020 apic_check_deadline_errata();
2021
2022 if (x2apic_mode) {
2023 boot_cpu_physical_apicid = read_apic_id();
2024 return;
2025 }
2026
2027
2028 if (!smp_found_config && detect_init_APIC()) {
2029
2030 pr_info("APIC: disable apic facility\n");
2031 apic_disable();
2032 } else {
2033 apic_phys = mp_lapic_addr;
2034
2035
2036
2037
2038
2039 if (!acpi_lapic && !smp_found_config)
2040 register_lapic_address(apic_phys);
2041 }
2042
2043
2044
2045
2046
2047 new_apicid = read_apic_id();
2048 if (boot_cpu_physical_apicid != new_apicid) {
2049 boot_cpu_physical_apicid = new_apicid;
2050
2051
2052
2053
2054
2055
2056
2057 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2058 }
2059}
2060
2061void __init register_lapic_address(unsigned long address)
2062{
2063 mp_lapic_addr = address;
2064
2065 if (!x2apic_mode) {
2066 set_fixmap_nocache(FIX_APIC_BASE, address);
2067 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
2068 APIC_BASE, address);
2069 }
2070 if (boot_cpu_physical_apicid == -1U) {
2071 boot_cpu_physical_apicid = read_apic_id();
2072 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2073 }
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
2084{
2085 u8 vector = ~regs->orig_ax;
2086 u32 v;
2087
2088 entering_irq();
2089 trace_spurious_apic_entry(vector);
2090
2091 inc_irq_stat(irq_spurious_count);
2092
2093
2094
2095
2096 if (vector == SPURIOUS_APIC_VECTOR) {
2097
2098 pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
2099 smp_processor_id());
2100 goto out;
2101 }
2102
2103
2104
2105
2106
2107 v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
2108 if (v & (1 << (vector & 0x1f))) {
2109 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
2110 vector, smp_processor_id());
2111 ack_APIC_irq();
2112 } else {
2113 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
2114 vector, smp_processor_id());
2115 }
2116out:
2117 trace_spurious_apic_exit(vector);
2118 exiting_irq();
2119}
2120
2121
2122
2123
2124__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
2125{
2126 static const char * const error_interrupt_reason[] = {
2127 "Send CS error",
2128 "Receive CS error",
2129 "Send accept error",
2130 "Receive accept error",
2131 "Redirectable IPI",
2132 "Send illegal vector",
2133 "Received illegal vector",
2134 "Illegal register address",
2135 };
2136 u32 v, i = 0;
2137
2138 entering_irq();
2139 trace_error_apic_entry(ERROR_APIC_VECTOR);
2140
2141
2142 if (lapic_get_maxlvt() > 3)
2143 apic_write(APIC_ESR, 0);
2144 v = apic_read(APIC_ESR);
2145 ack_APIC_irq();
2146 atomic_inc(&irq_err_count);
2147
2148 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
2149 smp_processor_id(), v);
2150
2151 v &= 0xff;
2152 while (v) {
2153 if (v & 0x1)
2154 apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
2155 i++;
2156 v >>= 1;
2157 }
2158
2159 apic_printk(APIC_DEBUG, KERN_CONT "\n");
2160
2161 trace_error_apic_exit(ERROR_APIC_VECTOR);
2162 exiting_irq();
2163}
2164
2165
2166
2167
2168static void __init connect_bsp_APIC(void)
2169{
2170#ifdef CONFIG_X86_32
2171 if (pic_mode) {
2172
2173
2174
2175 clear_local_APIC();
2176
2177
2178
2179
2180 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
2181 "enabling APIC mode.\n");
2182 imcr_pic_to_apic();
2183 }
2184#endif
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194void disconnect_bsp_APIC(int virt_wire_setup)
2195{
2196 unsigned int value;
2197
2198#ifdef CONFIG_X86_32
2199 if (pic_mode) {
2200
2201
2202
2203
2204
2205
2206 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
2207 "entering PIC mode.\n");
2208 imcr_apic_to_pic();
2209 return;
2210 }
2211#endif
2212
2213
2214
2215
2216 value = apic_read(APIC_SPIV);
2217 value &= ~APIC_VECTOR_MASK;
2218 value |= APIC_SPIV_APIC_ENABLED;
2219 value |= 0xf;
2220 apic_write(APIC_SPIV, value);
2221
2222 if (!virt_wire_setup) {
2223
2224
2225
2226
2227 value = apic_read(APIC_LVT0);
2228 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2229 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2230 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2231 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2232 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2233 apic_write(APIC_LVT0, value);
2234 } else {
2235
2236 apic_write(APIC_LVT0, APIC_LVT_MASKED);
2237 }
2238
2239
2240
2241
2242
2243 value = apic_read(APIC_LVT1);
2244 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2245 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2246 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2247 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2248 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2249 apic_write(APIC_LVT1, value);
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static int nr_logical_cpuids = 1;
2261
2262
2263
2264
2265static int cpuid_to_apicid[] = {
2266 [0 ... NR_CPUS - 1] = -1,
2267};
2268
2269#ifdef CONFIG_SMP
2270
2271
2272
2273
2274bool apic_id_is_primary_thread(unsigned int apicid)
2275{
2276 u32 mask;
2277
2278 if (smp_num_siblings == 1)
2279 return true;
2280
2281 mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
2282 return !(apicid & mask);
2283}
2284#endif
2285
2286
2287
2288
2289
2290static int allocate_logical_cpuid(int apicid)
2291{
2292 int i;
2293
2294
2295
2296
2297
2298 for (i = 0; i < nr_logical_cpuids; i++) {
2299 if (cpuid_to_apicid[i] == apicid)
2300 return i;
2301 }
2302
2303
2304 if (nr_logical_cpuids >= nr_cpu_ids) {
2305 WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
2306 "Processor %d/0x%x and the rest are ignored.\n",
2307 nr_cpu_ids, nr_logical_cpuids, apicid);
2308 return -EINVAL;
2309 }
2310
2311 cpuid_to_apicid[nr_logical_cpuids] = apicid;
2312 return nr_logical_cpuids++;
2313}
2314
2315int generic_processor_info(int apicid, int version)
2316{
2317 int cpu, max = nr_cpu_ids;
2318 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
2319 phys_cpu_present_map);
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 if (disabled_cpu_apicid != BAD_APICID &&
2341 disabled_cpu_apicid != read_apic_id() &&
2342 disabled_cpu_apicid == apicid) {
2343 int thiscpu = num_processors + disabled_cpus;
2344
2345 pr_warning("APIC: Disabling requested cpu."
2346 " Processor %d/0x%x ignored.\n",
2347 thiscpu, apicid);
2348
2349 disabled_cpus++;
2350 return -ENODEV;
2351 }
2352
2353
2354
2355
2356
2357 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
2358 apicid != boot_cpu_physical_apicid) {
2359 int thiscpu = max + disabled_cpus - 1;
2360
2361 pr_warning(
2362 "APIC: NR_CPUS/possible_cpus limit of %i almost"
2363 " reached. Keeping one slot for boot cpu."
2364 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2365
2366 disabled_cpus++;
2367 return -ENODEV;
2368 }
2369
2370 if (num_processors >= nr_cpu_ids) {
2371 int thiscpu = max + disabled_cpus;
2372
2373 pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
2374 "reached. Processor %d/0x%x ignored.\n",
2375 max, thiscpu, apicid);
2376
2377 disabled_cpus++;
2378 return -EINVAL;
2379 }
2380
2381 if (apicid == boot_cpu_physical_apicid) {
2382
2383
2384
2385
2386
2387
2388
2389 cpu = 0;
2390
2391
2392 cpuid_to_apicid[0] = apicid;
2393 } else {
2394 cpu = allocate_logical_cpuid(apicid);
2395 if (cpu < 0) {
2396 disabled_cpus++;
2397 return -EINVAL;
2398 }
2399 }
2400
2401
2402
2403
2404 if (version == 0x0) {
2405 pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2406 cpu, apicid);
2407 version = 0x10;
2408 }
2409
2410 if (version != boot_cpu_apic_version) {
2411 pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2412 boot_cpu_apic_version, cpu, version);
2413 }
2414
2415 if (apicid > max_physical_apicid)
2416 max_physical_apicid = apicid;
2417
2418#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
2419 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
2420 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
2421#endif
2422#ifdef CONFIG_X86_32
2423 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
2424 apic->x86_32_early_logical_apicid(cpu);
2425#endif
2426 set_cpu_possible(cpu, true);
2427 physid_set(apicid, phys_cpu_present_map);
2428 set_cpu_present(cpu, true);
2429 num_processors++;
2430
2431 return cpu;
2432}
2433
2434int hard_smp_processor_id(void)
2435{
2436 return read_apic_id();
2437}
2438
2439void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg,
2440 bool dmar)
2441{
2442 memset(msg, 0, sizeof(*msg));
2443
2444 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
2445 msg->arch_addr_lo.dest_mode_logical = (apic->irq_dest_mode != 0);
2446 msg->arch_addr_lo.destid_0_7 = cfg->dest_apicid & 0xFF;
2447
2448 msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_FIXED;
2449 msg->arch_data.vector = cfg->vector;
2450
2451 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461 if (dmar)
2462 msg->arch_addr_hi.destid_8_31 = cfg->dest_apicid >> 8;
2463 else if (virt_ext_dest_id && cfg->dest_apicid < 0x8000)
2464 msg->arch_addr_lo.virt_destid_8_14 = cfg->dest_apicid >> 8;
2465 else
2466 WARN_ON_ONCE(cfg->dest_apicid > 0xFF);
2467}
2468
2469u32 x86_msi_msg_get_destid(struct msi_msg *msg, bool extid)
2470{
2471 u32 dest = msg->arch_addr_lo.destid_0_7;
2472
2473 if (extid)
2474 dest |= msg->arch_addr_hi.destid_8_31 << 8;
2475 return dest;
2476}
2477EXPORT_SYMBOL_GPL(x86_msi_msg_get_destid);
2478
2479
2480
2481
2482
2483
2484
2485void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2486{
2487 struct apic **drv;
2488
2489 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
2490
2491 WARN_ON((*drv)->eoi_write == eoi_write);
2492 (*drv)->native_eoi_write = (*drv)->eoi_write;
2493 (*drv)->eoi_write = eoi_write;
2494 }
2495}
2496
2497static void __init apic_bsp_up_setup(void)
2498{
2499#ifdef CONFIG_X86_64
2500 apic_write(APIC_ID, apic->set_apic_id(boot_cpu_physical_apicid));
2501#else
2502
2503
2504
2505
2506
2507# ifdef CONFIG_CRASH_DUMP
2508 boot_cpu_physical_apicid = read_apic_id();
2509# endif
2510#endif
2511 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
2512}
2513
2514
2515
2516
2517
2518
2519
2520
2521void __init apic_bsp_setup(bool upmode)
2522{
2523 connect_bsp_APIC();
2524 if (upmode)
2525 apic_bsp_up_setup();
2526 setup_local_APIC();
2527
2528 enable_IO_APIC();
2529 end_local_APIC_setup();
2530 irq_remap_enable_fault_handling();
2531 setup_IO_APIC();
2532}
2533
2534#ifdef CONFIG_UP_LATE_INIT
2535void __init up_late_init(void)
2536{
2537 if (apic_intr_mode == APIC_PIC)
2538 return;
2539
2540
2541 x86_init.timers.setup_percpu_clockev();
2542}
2543#endif
2544
2545
2546
2547
2548#ifdef CONFIG_PM
2549
2550static struct {
2551
2552
2553
2554
2555
2556 int active;
2557
2558 unsigned int apic_id;
2559 unsigned int apic_taskpri;
2560 unsigned int apic_ldr;
2561 unsigned int apic_dfr;
2562 unsigned int apic_spiv;
2563 unsigned int apic_lvtt;
2564 unsigned int apic_lvtpc;
2565 unsigned int apic_lvt0;
2566 unsigned int apic_lvt1;
2567 unsigned int apic_lvterr;
2568 unsigned int apic_tmict;
2569 unsigned int apic_tdcr;
2570 unsigned int apic_thmr;
2571 unsigned int apic_cmci;
2572} apic_pm_state;
2573
2574static int lapic_suspend(void)
2575{
2576 unsigned long flags;
2577 int maxlvt;
2578
2579 if (!apic_pm_state.active)
2580 return 0;
2581
2582 maxlvt = lapic_get_maxlvt();
2583
2584 apic_pm_state.apic_id = apic_read(APIC_ID);
2585 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2586 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2587 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2588 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2589 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2590 if (maxlvt >= 4)
2591 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2592 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2593 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2594 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2595 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2596 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2597#ifdef CONFIG_X86_THERMAL_VECTOR
2598 if (maxlvt >= 5)
2599 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2600#endif
2601#ifdef CONFIG_X86_MCE_INTEL
2602 if (maxlvt >= 6)
2603 apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
2604#endif
2605
2606 local_irq_save(flags);
2607 disable_local_APIC();
2608
2609 irq_remapping_disable();
2610
2611 local_irq_restore(flags);
2612 return 0;
2613}
2614
2615static void lapic_resume(void)
2616{
2617 unsigned int l, h;
2618 unsigned long flags;
2619 int maxlvt;
2620
2621 if (!apic_pm_state.active)
2622 return;
2623
2624 local_irq_save(flags);
2625
2626
2627
2628
2629
2630
2631
2632 mask_ioapic_entries();
2633 legacy_pic->mask_all();
2634
2635 if (x2apic_mode) {
2636 __x2apic_enable();
2637 } else {
2638
2639
2640
2641
2642
2643
2644 if (boot_cpu_data.x86 >= 6) {
2645 rdmsr(MSR_IA32_APICBASE, l, h);
2646 l &= ~MSR_IA32_APICBASE_BASE;
2647 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2648 wrmsr(MSR_IA32_APICBASE, l, h);
2649 }
2650 }
2651
2652 maxlvt = lapic_get_maxlvt();
2653 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2654 apic_write(APIC_ID, apic_pm_state.apic_id);
2655 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2656 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2657 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2658 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2659 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2660 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2661#ifdef CONFIG_X86_THERMAL_VECTOR
2662 if (maxlvt >= 5)
2663 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2664#endif
2665#ifdef CONFIG_X86_MCE_INTEL
2666 if (maxlvt >= 6)
2667 apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
2668#endif
2669 if (maxlvt >= 4)
2670 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2671 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2672 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2673 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2674 apic_write(APIC_ESR, 0);
2675 apic_read(APIC_ESR);
2676 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2677 apic_write(APIC_ESR, 0);
2678 apic_read(APIC_ESR);
2679
2680 irq_remapping_reenable(x2apic_mode);
2681
2682 local_irq_restore(flags);
2683}
2684
2685
2686
2687
2688
2689
2690static struct syscore_ops lapic_syscore_ops = {
2691 .resume = lapic_resume,
2692 .suspend = lapic_suspend,
2693};
2694
2695static void apic_pm_activate(void)
2696{
2697 apic_pm_state.active = 1;
2698}
2699
2700static int __init init_lapic_sysfs(void)
2701{
2702
2703 if (boot_cpu_has(X86_FEATURE_APIC))
2704 register_syscore_ops(&lapic_syscore_ops);
2705
2706 return 0;
2707}
2708
2709
2710core_initcall(init_lapic_sysfs);
2711
2712#else
2713
2714static void apic_pm_activate(void) { }
2715
2716#endif
2717
2718#ifdef CONFIG_X86_64
2719
2720static int multi_checked;
2721static int multi;
2722
2723static int set_multi(const struct dmi_system_id *d)
2724{
2725 if (multi)
2726 return 0;
2727 pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2728 multi = 1;
2729 return 0;
2730}
2731
2732static const struct dmi_system_id multi_dmi_table[] = {
2733 {
2734 .callback = set_multi,
2735 .ident = "IBM System Summit2",
2736 .matches = {
2737 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2738 DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2739 },
2740 },
2741 {}
2742};
2743
2744static void dmi_check_multi(void)
2745{
2746 if (multi_checked)
2747 return;
2748
2749 dmi_check_system(multi_dmi_table);
2750 multi_checked = 1;
2751}
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761int apic_is_clustered_box(void)
2762{
2763 dmi_check_multi();
2764 return multi;
2765}
2766#endif
2767
2768
2769
2770
2771static int __init setup_disableapic(char *arg)
2772{
2773 disable_apic = 1;
2774 setup_clear_cpu_cap(X86_FEATURE_APIC);
2775 return 0;
2776}
2777early_param("disableapic", setup_disableapic);
2778
2779
2780static int __init setup_nolapic(char *arg)
2781{
2782 return setup_disableapic(arg);
2783}
2784early_param("nolapic", setup_nolapic);
2785
2786static int __init parse_lapic_timer_c2_ok(char *arg)
2787{
2788 local_apic_timer_c2_ok = 1;
2789 return 0;
2790}
2791early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2792
2793static int __init parse_disable_apic_timer(char *arg)
2794{
2795 disable_apic_timer = 1;
2796 return 0;
2797}
2798early_param("noapictimer", parse_disable_apic_timer);
2799
2800static int __init parse_nolapic_timer(char *arg)
2801{
2802 disable_apic_timer = 1;
2803 return 0;
2804}
2805early_param("nolapic_timer", parse_nolapic_timer);
2806
2807static int __init apic_set_verbosity(char *arg)
2808{
2809 if (!arg) {
2810#ifdef CONFIG_X86_64
2811 skip_ioapic_setup = 0;
2812 return 0;
2813#endif
2814 return -EINVAL;
2815 }
2816
2817 if (strcmp("debug", arg) == 0)
2818 apic_verbosity = APIC_DEBUG;
2819 else if (strcmp("verbose", arg) == 0)
2820 apic_verbosity = APIC_VERBOSE;
2821#ifdef CONFIG_X86_64
2822 else {
2823 pr_warning("APIC Verbosity level %s not recognised"
2824 " use apic=verbose or apic=debug\n", arg);
2825 return -EINVAL;
2826 }
2827#endif
2828
2829 return 0;
2830}
2831early_param("apic", apic_set_verbosity);
2832
2833static int __init lapic_insert_resource(void)
2834{
2835 if (!apic_phys)
2836 return -1;
2837
2838
2839 lapic_resource.start = apic_phys;
2840 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2841 insert_resource(&iomem_resource, &lapic_resource);
2842
2843 return 0;
2844}
2845
2846
2847
2848
2849
2850late_initcall(lapic_insert_resource);
2851
2852static int __init apic_set_disabled_cpu_apicid(char *arg)
2853{
2854 if (!arg || !get_option(&arg, &disabled_cpu_apicid))
2855 return -EINVAL;
2856
2857 return 0;
2858}
2859early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
2860
2861static int __init apic_set_extnmi(char *arg)
2862{
2863 if (!arg)
2864 return -EINVAL;
2865
2866 if (!strncmp("all", arg, 3))
2867 apic_extnmi = APIC_EXTNMI_ALL;
2868 else if (!strncmp("none", arg, 4))
2869 apic_extnmi = APIC_EXTNMI_NONE;
2870 else if (!strncmp("bsp", arg, 3))
2871 apic_extnmi = APIC_EXTNMI_BSP;
2872 else {
2873 pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
2874 return -EINVAL;
2875 }
2876
2877 return 0;
2878}
2879early_param("apic_extnmi", apic_set_extnmi);
2880