1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/smp.h>
18#include <linux/preempt.h>
19#include <linux/hardirq.h>
20#include <linux/percpu.h>
21#include <linux/delay.h>
22#include <linux/start_kernel.h>
23#include <linux/sched.h>
24#include <linux/kprobes.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <linux/mm.h>
28#include <linux/page-flags.h>
29#include <linux/highmem.h>
30#include <linux/console.h>
31#include <linux/pci.h>
32#include <linux/gfp.h>
33#include <linux/memblock.h>
34#include <linux/edd.h>
35
36#include <xen/xen.h>
37#include <xen/events.h>
38#include <xen/interface/xen.h>
39#include <xen/interface/version.h>
40#include <xen/interface/physdev.h>
41#include <xen/interface/vcpu.h>
42#include <xen/interface/memory.h>
43#include <xen/interface/xen-mca.h>
44#include <xen/features.h>
45#include <xen/page.h>
46#include <xen/hvm.h>
47#include <xen/hvc-console.h>
48#include <xen/acpi.h>
49
50#include <asm/paravirt.h>
51#include <asm/apic.h>
52#include <asm/page.h>
53#include <asm/xen/pci.h>
54#include <asm/xen/hypercall.h>
55#include <asm/xen/hypervisor.h>
56#include <asm/fixmap.h>
57#include <asm/processor.h>
58#include <asm/proto.h>
59#include <asm/msr-index.h>
60#include <asm/traps.h>
61#include <asm/setup.h>
62#include <asm/desc.h>
63#include <asm/pgalloc.h>
64#include <asm/pgtable.h>
65#include <asm/tlbflush.h>
66#include <asm/reboot.h>
67#include <asm/stackprotector.h>
68#include <asm/hypervisor.h>
69#include <asm/mwait.h>
70#include <asm/pci_x86.h>
71#include <asm/pat.h>
72
73#ifdef CONFIG_ACPI
74#include <linux/acpi.h>
75#include <asm/acpi.h>
76#include <acpi/pdc_intel.h>
77#include <acpi/processor.h>
78#include <xen/interface/platform.h>
79#endif
80
81#include "xen-ops.h"
82#include "mmu.h"
83#include "smp.h"
84#include "multicalls.h"
85
86EXPORT_SYMBOL_GPL(hypercall_page);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
105
106
107
108
109
110
111DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
112
113enum xen_domain_type xen_domain_type = XEN_NATIVE;
114EXPORT_SYMBOL_GPL(xen_domain_type);
115
116unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
117EXPORT_SYMBOL(machine_to_phys_mapping);
118unsigned long machine_to_phys_nr;
119EXPORT_SYMBOL(machine_to_phys_nr);
120
121struct start_info *xen_start_info;
122EXPORT_SYMBOL_GPL(xen_start_info);
123
124struct shared_info xen_dummy_shared_info;
125
126void *xen_initial_gdt;
127
128RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
129__read_mostly int xen_have_vector_callback;
130EXPORT_SYMBOL_GPL(xen_have_vector_callback);
131
132
133
134
135
136struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151static int have_vcpu_info_placement = 1;
152
153struct tls_descs {
154 struct desc_struct desc[3];
155};
156
157
158
159
160
161
162
163
164static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
165
166static void clamp_max_cpus(void)
167{
168#ifdef CONFIG_SMP
169 if (setup_max_cpus > MAX_VIRT_CPUS)
170 setup_max_cpus = MAX_VIRT_CPUS;
171#endif
172}
173
174static void xen_vcpu_setup(int cpu)
175{
176 struct vcpu_register_vcpu_info info;
177 int err;
178 struct vcpu_info *vcpup;
179
180 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
181
182
183
184
185
186
187
188
189
190
191
192
193 if (xen_hvm_domain()) {
194 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
195 return;
196 }
197 if (cpu < MAX_VIRT_CPUS)
198 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
199
200 if (!have_vcpu_info_placement) {
201 if (cpu >= MAX_VIRT_CPUS)
202 clamp_max_cpus();
203 return;
204 }
205
206 vcpup = &per_cpu(xen_vcpu_info, cpu);
207 info.mfn = arbitrary_virt_to_mfn(vcpup);
208 info.offset = offset_in_page(vcpup);
209
210
211
212
213
214
215
216
217
218 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
219
220 if (err) {
221 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
222 have_vcpu_info_placement = 0;
223 clamp_max_cpus();
224 } else {
225
226
227 per_cpu(xen_vcpu, cpu) = vcpup;
228 }
229}
230
231
232
233
234
235
236void xen_vcpu_restore(void)
237{
238 int cpu;
239
240 for_each_possible_cpu(cpu) {
241 bool other_cpu = (cpu != smp_processor_id());
242 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
243
244 if (other_cpu && is_up &&
245 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
246 BUG();
247
248 xen_setup_runstate_info(cpu);
249
250 if (have_vcpu_info_placement)
251 xen_vcpu_setup(cpu);
252
253 if (other_cpu && is_up &&
254 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
255 BUG();
256 }
257}
258
259static void __init xen_banner(void)
260{
261 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
262 struct xen_extraversion extra;
263 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
264
265 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
266 pv_info.name);
267 printk(KERN_INFO "Xen version: %d.%d%s%s\n",
268 version >> 16, version & 0xffff, extra.extraversion,
269 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
270}
271
272bool
273xen_running_on_version_or_later(unsigned int major, unsigned int minor)
274{
275 unsigned int version;
276
277 if (!xen_domain())
278 return false;
279
280 version = HYPERVISOR_xen_version(XENVER_version, NULL);
281 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
282 ((version >> 16) > major))
283 return true;
284 return false;
285}
286
287#define CPUID_THERM_POWER_LEAF 6
288#define APERFMPERF_PRESENT 0
289
290static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
291static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
292
293static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
294static __read_mostly unsigned int cpuid_leaf5_ecx_val;
295static __read_mostly unsigned int cpuid_leaf5_edx_val;
296
297static void xen_cpuid(unsigned int *ax, unsigned int *bx,
298 unsigned int *cx, unsigned int *dx)
299{
300 unsigned maskebx = ~0;
301 unsigned maskecx = ~0;
302 unsigned maskedx = ~0;
303 unsigned setecx = 0;
304
305
306
307
308 switch (*ax) {
309 case 1:
310 maskecx = cpuid_leaf1_ecx_mask;
311 setecx = cpuid_leaf1_ecx_set_mask;
312 maskedx = cpuid_leaf1_edx_mask;
313 break;
314
315 case CPUID_MWAIT_LEAF:
316
317 *ax = 0;
318 *bx = 0;
319 *cx = cpuid_leaf5_ecx_val;
320 *dx = cpuid_leaf5_edx_val;
321 return;
322
323 case CPUID_THERM_POWER_LEAF:
324
325 maskecx = ~(1 << APERFMPERF_PRESENT);
326 break;
327
328 case 0xb:
329
330 maskebx = 0;
331 break;
332 }
333
334 asm(XEN_EMULATE_PREFIX "cpuid"
335 : "=a" (*ax),
336 "=b" (*bx),
337 "=c" (*cx),
338 "=d" (*dx)
339 : "0" (*ax), "2" (*cx));
340
341 *bx &= maskebx;
342 *cx &= maskecx;
343 *cx |= setecx;
344 *dx &= maskedx;
345
346}
347
348static bool __init xen_check_mwait(void)
349{
350#ifdef CONFIG_ACPI
351 struct xen_platform_op op = {
352 .cmd = XENPF_set_processor_pminfo,
353 .u.set_pminfo.id = -1,
354 .u.set_pminfo.type = XEN_PM_PDC,
355 };
356 uint32_t buf[3];
357 unsigned int ax, bx, cx, dx;
358 unsigned int mwait_mask;
359
360
361
362
363
364
365
366
367
368 if (!xen_initial_domain())
369 return false;
370
371
372
373
374
375 if (!xen_running_on_version_or_later(4, 2))
376 return false;
377
378 ax = 1;
379 cx = 0;
380
381 native_cpuid(&ax, &bx, &cx, &dx);
382
383 mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
384 (1 << (X86_FEATURE_MWAIT % 32));
385
386 if ((cx & mwait_mask) != mwait_mask)
387 return false;
388
389
390
391
392
393 ax = CPUID_MWAIT_LEAF;
394 bx = 0;
395 cx = 0;
396 dx = 0;
397
398 native_cpuid(&ax, &bx, &cx, &dx);
399
400
401
402
403 buf[0] = ACPI_PDC_REVISION_ID;
404 buf[1] = 1;
405 buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
406
407 set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
408
409 if ((HYPERVISOR_dom0_op(&op) == 0) &&
410 (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
411 cpuid_leaf5_ecx_val = cx;
412 cpuid_leaf5_edx_val = dx;
413 }
414 return true;
415#else
416 return false;
417#endif
418}
419static void __init xen_init_cpuid_mask(void)
420{
421 unsigned int ax, bx, cx, dx;
422 unsigned int xsave_mask;
423
424 cpuid_leaf1_edx_mask =
425 ~((1 << X86_FEATURE_MTRR) |
426 (1 << X86_FEATURE_ACC));
427
428 if (!xen_initial_domain())
429 cpuid_leaf1_edx_mask &=
430 ~((1 << X86_FEATURE_ACPI));
431
432 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
433
434 ax = 1;
435 cx = 0;
436 xen_cpuid(&ax, &bx, &cx, &dx);
437
438 xsave_mask =
439 (1 << (X86_FEATURE_XSAVE % 32)) |
440 (1 << (X86_FEATURE_OSXSAVE % 32));
441
442
443 if ((cx & xsave_mask) != xsave_mask)
444 cpuid_leaf1_ecx_mask &= ~xsave_mask;
445 if (xen_check_mwait())
446 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
447}
448
449static void xen_set_debugreg(int reg, unsigned long val)
450{
451 HYPERVISOR_set_debugreg(reg, val);
452}
453
454static unsigned long xen_get_debugreg(int reg)
455{
456 return HYPERVISOR_get_debugreg(reg);
457}
458
459static void xen_end_context_switch(struct task_struct *next)
460{
461 xen_mc_flush();
462 paravirt_end_context_switch(next);
463}
464
465static unsigned long xen_store_tr(void)
466{
467 return 0;
468}
469
470
471
472
473
474
475
476static void set_aliased_prot(void *v, pgprot_t prot)
477{
478 int level;
479 pte_t *ptep;
480 pte_t pte;
481 unsigned long pfn;
482 struct page *page;
483
484 ptep = lookup_address((unsigned long)v, &level);
485 BUG_ON(ptep == NULL);
486
487 pfn = pte_pfn(*ptep);
488 page = pfn_to_page(pfn);
489
490 pte = pfn_pte(pfn, prot);
491
492 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
493 BUG();
494
495 if (!PageHighMem(page)) {
496 void *av = __va(PFN_PHYS(pfn));
497
498 if (av != v)
499 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
500 BUG();
501 } else
502 kmap_flush_unused();
503}
504
505static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
506{
507 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
508 int i;
509
510 for(i = 0; i < entries; i += entries_per_page)
511 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
512}
513
514static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
515{
516 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
517 int i;
518
519 for(i = 0; i < entries; i += entries_per_page)
520 set_aliased_prot(ldt + i, PAGE_KERNEL);
521}
522
523static void xen_set_ldt(const void *addr, unsigned entries)
524{
525 struct mmuext_op *op;
526 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
527
528 trace_xen_cpu_set_ldt(addr, entries);
529
530 op = mcs.args;
531 op->cmd = MMUEXT_SET_LDT;
532 op->arg1.linear_addr = (unsigned long)addr;
533 op->arg2.nr_ents = entries;
534
535 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
536
537 xen_mc_issue(PARAVIRT_LAZY_CPU);
538}
539
540static void xen_load_gdt(const struct desc_ptr *dtr)
541{
542 unsigned long va = dtr->address;
543 unsigned int size = dtr->size + 1;
544 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
545 unsigned long frames[pages];
546 int f;
547
548
549
550
551
552
553 BUG_ON(size > 65536);
554 BUG_ON(va & ~PAGE_MASK);
555
556 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
557 int level;
558 pte_t *ptep;
559 unsigned long pfn, mfn;
560 void *virt;
561
562
563
564
565
566
567
568
569 ptep = lookup_address(va, &level);
570 BUG_ON(ptep == NULL);
571
572 pfn = pte_pfn(*ptep);
573 mfn = pfn_to_mfn(pfn);
574 virt = __va(PFN_PHYS(pfn));
575
576 frames[f] = mfn;
577
578 make_lowmem_page_readonly((void *)va);
579 make_lowmem_page_readonly(virt);
580 }
581
582 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
583 BUG();
584}
585
586
587
588
589static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
590{
591 unsigned long va = dtr->address;
592 unsigned int size = dtr->size + 1;
593 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
594 unsigned long frames[pages];
595 int f;
596
597
598
599
600
601
602 BUG_ON(size > 65536);
603 BUG_ON(va & ~PAGE_MASK);
604
605 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
606 pte_t pte;
607 unsigned long pfn, mfn;
608
609 pfn = virt_to_pfn(va);
610 mfn = pfn_to_mfn(pfn);
611
612 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
613
614 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
615 BUG();
616
617 frames[f] = mfn;
618 }
619
620 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
621 BUG();
622}
623
624static inline bool desc_equal(const struct desc_struct *d1,
625 const struct desc_struct *d2)
626{
627 return d1->a == d2->a && d1->b == d2->b;
628}
629
630static void load_TLS_descriptor(struct thread_struct *t,
631 unsigned int cpu, unsigned int i)
632{
633 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
634 struct desc_struct *gdt;
635 xmaddr_t maddr;
636 struct multicall_space mc;
637
638 if (desc_equal(shadow, &t->tls_array[i]))
639 return;
640
641 *shadow = t->tls_array[i];
642
643 gdt = get_cpu_gdt_table(cpu);
644 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
645 mc = __xen_mc_entry(0);
646
647 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
648}
649
650static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
651{
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
671#ifdef CONFIG_X86_32
672 lazy_load_gs(0);
673#else
674 loadsegment(fs, 0);
675#endif
676 }
677
678 xen_mc_batch();
679
680 load_TLS_descriptor(t, cpu, 0);
681 load_TLS_descriptor(t, cpu, 1);
682 load_TLS_descriptor(t, cpu, 2);
683
684 xen_mc_issue(PARAVIRT_LAZY_CPU);
685}
686
687#ifdef CONFIG_X86_64
688static void xen_load_gs_index(unsigned int idx)
689{
690 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
691 BUG();
692}
693#endif
694
695static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
696 const void *ptr)
697{
698 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
699 u64 entry = *(u64 *)ptr;
700
701 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
702
703 preempt_disable();
704
705 xen_mc_flush();
706 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
707 BUG();
708
709 preempt_enable();
710}
711
712static int cvt_gate_to_trap(int vector, const gate_desc *val,
713 struct trap_info *info)
714{
715 unsigned long addr;
716
717 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
718 return 0;
719
720 info->vector = vector;
721
722 addr = gate_offset(*val);
723#ifdef CONFIG_X86_64
724
725
726
727
728
729
730
731 if (addr == (unsigned long)debug)
732 addr = (unsigned long)xen_debug;
733 else if (addr == (unsigned long)int3)
734 addr = (unsigned long)xen_int3;
735 else if (addr == (unsigned long)stack_segment)
736 addr = (unsigned long)xen_stack_segment;
737 else if (addr == (unsigned long)double_fault) {
738
739 return 0;
740#ifdef CONFIG_X86_MCE
741 } else if (addr == (unsigned long)machine_check) {
742
743
744
745
746 ;
747#endif
748 } else if (addr == (unsigned long)nmi)
749
750
751
752 ;
753 else {
754
755 if (WARN_ON(val->ist != 0))
756 return 0;
757 }
758#endif
759 info->address = addr;
760
761 info->cs = gate_segment(*val);
762 info->flags = val->dpl;
763
764 if (val->type == GATE_INTERRUPT)
765 info->flags |= 1 << 2;
766
767 return 1;
768}
769
770
771static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
772
773
774
775static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
776{
777 unsigned long p = (unsigned long)&dt[entrynum];
778 unsigned long start, end;
779
780 trace_xen_cpu_write_idt_entry(dt, entrynum, g);
781
782 preempt_disable();
783
784 start = __this_cpu_read(idt_desc.address);
785 end = start + __this_cpu_read(idt_desc.size) + 1;
786
787 xen_mc_flush();
788
789 native_write_idt_entry(dt, entrynum, g);
790
791 if (p >= start && (p + 8) <= end) {
792 struct trap_info info[2];
793
794 info[1].address = 0;
795
796 if (cvt_gate_to_trap(entrynum, g, &info[0]))
797 if (HYPERVISOR_set_trap_table(info))
798 BUG();
799 }
800
801 preempt_enable();
802}
803
804static void xen_convert_trap_info(const struct desc_ptr *desc,
805 struct trap_info *traps)
806{
807 unsigned in, out, count;
808
809 count = (desc->size+1) / sizeof(gate_desc);
810 BUG_ON(count > 256);
811
812 for (in = out = 0; in < count; in++) {
813 gate_desc *entry = (gate_desc*)(desc->address) + in;
814
815 if (cvt_gate_to_trap(in, entry, &traps[out]))
816 out++;
817 }
818 traps[out].address = 0;
819}
820
821void xen_copy_trap_info(struct trap_info *traps)
822{
823 const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
824
825 xen_convert_trap_info(desc, traps);
826}
827
828
829
830
831static void xen_load_idt(const struct desc_ptr *desc)
832{
833 static DEFINE_SPINLOCK(lock);
834 static struct trap_info traps[257];
835
836 trace_xen_cpu_load_idt(desc);
837
838 spin_lock(&lock);
839
840 __get_cpu_var(idt_desc) = *desc;
841
842 xen_convert_trap_info(desc, traps);
843
844 xen_mc_flush();
845 if (HYPERVISOR_set_trap_table(traps))
846 BUG();
847
848 spin_unlock(&lock);
849}
850
851
852
853static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
854 const void *desc, int type)
855{
856 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
857
858 preempt_disable();
859
860 switch (type) {
861 case DESC_LDT:
862 case DESC_TSS:
863
864 break;
865
866 default: {
867 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
868
869 xen_mc_flush();
870 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
871 BUG();
872 }
873
874 }
875
876 preempt_enable();
877}
878
879
880
881
882
883static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
884 const void *desc, int type)
885{
886 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
887
888 switch (type) {
889 case DESC_LDT:
890 case DESC_TSS:
891
892 break;
893
894 default: {
895 xmaddr_t maddr = virt_to_machine(&dt[entry]);
896
897 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
898 dt[entry] = *(struct desc_struct *)desc;
899 }
900
901 }
902}
903
904static void xen_load_sp0(struct tss_struct *tss,
905 struct thread_struct *thread)
906{
907 struct multicall_space mcs;
908
909 mcs = xen_mc_entry(0);
910 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
911 xen_mc_issue(PARAVIRT_LAZY_CPU);
912}
913
914static void xen_set_iopl_mask(unsigned mask)
915{
916 struct physdev_set_iopl set_iopl;
917
918
919 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
920 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
921}
922
923static void xen_io_delay(void)
924{
925}
926
927#ifdef CONFIG_X86_LOCAL_APIC
928static unsigned long xen_set_apic_id(unsigned int x)
929{
930 WARN_ON(1);
931 return x;
932}
933static unsigned int xen_get_apic_id(unsigned long x)
934{
935 return ((x)>>24) & 0xFFu;
936}
937static u32 xen_apic_read(u32 reg)
938{
939 struct xen_platform_op op = {
940 .cmd = XENPF_get_cpuinfo,
941 .interface_version = XENPF_INTERFACE_VERSION,
942 .u.pcpu_info.xen_cpuid = 0,
943 };
944 int ret = 0;
945
946
947
948 if (!xen_initial_domain() || smp_processor_id())
949 return 0;
950
951 if (reg == APIC_LVR)
952 return 0x10;
953
954 if (reg != APIC_ID)
955 return 0;
956
957 ret = HYPERVISOR_dom0_op(&op);
958 if (ret)
959 return 0;
960
961 return op.u.pcpu_info.apic_id << 24;
962}
963
964static void xen_apic_write(u32 reg, u32 val)
965{
966
967 WARN_ON(1);
968}
969
970static u64 xen_apic_icr_read(void)
971{
972 return 0;
973}
974
975static void xen_apic_icr_write(u32 low, u32 id)
976{
977
978 WARN_ON(1);
979}
980
981static void xen_apic_wait_icr_idle(void)
982{
983 return;
984}
985
986static u32 xen_safe_apic_wait_icr_idle(void)
987{
988 return 0;
989}
990
991static void set_xen_basic_apic_ops(void)
992{
993 apic->read = xen_apic_read;
994 apic->write = xen_apic_write;
995 apic->icr_read = xen_apic_icr_read;
996 apic->icr_write = xen_apic_icr_write;
997 apic->wait_icr_idle = xen_apic_wait_icr_idle;
998 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
999 apic->set_apic_id = xen_set_apic_id;
1000 apic->get_apic_id = xen_get_apic_id;
1001
1002#ifdef CONFIG_SMP
1003 apic->send_IPI_allbutself = xen_send_IPI_allbutself;
1004 apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
1005 apic->send_IPI_mask = xen_send_IPI_mask;
1006 apic->send_IPI_all = xen_send_IPI_all;
1007 apic->send_IPI_self = xen_send_IPI_self;
1008#endif
1009}
1010
1011#endif
1012
1013static void xen_clts(void)
1014{
1015 struct multicall_space mcs;
1016
1017 mcs = xen_mc_entry(0);
1018
1019 MULTI_fpu_taskswitch(mcs.mc, 0);
1020
1021 xen_mc_issue(PARAVIRT_LAZY_CPU);
1022}
1023
1024static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
1025
1026static unsigned long xen_read_cr0(void)
1027{
1028 unsigned long cr0 = this_cpu_read(xen_cr0_value);
1029
1030 if (unlikely(cr0 == 0)) {
1031 cr0 = native_read_cr0();
1032 this_cpu_write(xen_cr0_value, cr0);
1033 }
1034
1035 return cr0;
1036}
1037
1038static void xen_write_cr0(unsigned long cr0)
1039{
1040 struct multicall_space mcs;
1041
1042 this_cpu_write(xen_cr0_value, cr0);
1043
1044
1045
1046 mcs = xen_mc_entry(0);
1047
1048 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1049
1050 xen_mc_issue(PARAVIRT_LAZY_CPU);
1051}
1052
1053static void xen_write_cr4(unsigned long cr4)
1054{
1055 cr4 &= ~X86_CR4_PGE;
1056 cr4 &= ~X86_CR4_PSE;
1057
1058 native_write_cr4(cr4);
1059}
1060#ifdef CONFIG_X86_64
1061static inline unsigned long xen_read_cr8(void)
1062{
1063 return 0;
1064}
1065static inline void xen_write_cr8(unsigned long val)
1066{
1067 BUG_ON(val);
1068}
1069#endif
1070static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1071{
1072 int ret;
1073
1074 ret = 0;
1075
1076 switch (msr) {
1077#ifdef CONFIG_X86_64
1078 unsigned which;
1079 u64 base;
1080
1081 case MSR_FS_BASE: which = SEGBASE_FS; goto set;
1082 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
1083 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
1084
1085 set:
1086 base = ((u64)high << 32) | low;
1087 if (HYPERVISOR_set_segment_base(which, base) != 0)
1088 ret = -EIO;
1089 break;
1090#endif
1091
1092 case MSR_STAR:
1093 case MSR_CSTAR:
1094 case MSR_LSTAR:
1095 case MSR_SYSCALL_MASK:
1096 case MSR_IA32_SYSENTER_CS:
1097 case MSR_IA32_SYSENTER_ESP:
1098 case MSR_IA32_SYSENTER_EIP:
1099
1100
1101
1102 break;
1103
1104 case MSR_IA32_CR_PAT:
1105 if (smp_processor_id() == 0)
1106 xen_set_pat(((u64)high << 32) | low);
1107 break;
1108
1109 default:
1110 ret = native_write_msr_safe(msr, low, high);
1111 }
1112
1113 return ret;
1114}
1115
1116void xen_setup_shared_info(void)
1117{
1118 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1119 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1120 xen_start_info->shared_info);
1121
1122 HYPERVISOR_shared_info =
1123 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1124 } else
1125 HYPERVISOR_shared_info =
1126 (struct shared_info *)__va(xen_start_info->shared_info);
1127
1128#ifndef CONFIG_SMP
1129
1130 xen_setup_vcpu_info_placement();
1131#endif
1132
1133 xen_setup_mfn_list_list();
1134}
1135
1136
1137void xen_setup_vcpu_info_placement(void)
1138{
1139 int cpu;
1140
1141 for_each_possible_cpu(cpu)
1142 xen_vcpu_setup(cpu);
1143
1144
1145
1146 if (have_vcpu_info_placement) {
1147 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1148 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1149 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1150 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1151 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1152 }
1153}
1154
1155static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1156 unsigned long addr, unsigned len)
1157{
1158 char *start, *end, *reloc;
1159 unsigned ret;
1160
1161 start = end = reloc = NULL;
1162
1163#define SITE(op, x) \
1164 case PARAVIRT_PATCH(op.x): \
1165 if (have_vcpu_info_placement) { \
1166 start = (char *)xen_##x##_direct; \
1167 end = xen_##x##_direct_end; \
1168 reloc = xen_##x##_direct_reloc; \
1169 } \
1170 goto patch_site
1171
1172 switch (type) {
1173 SITE(pv_irq_ops, irq_enable);
1174 SITE(pv_irq_ops, irq_disable);
1175 SITE(pv_irq_ops, save_fl);
1176 SITE(pv_irq_ops, restore_fl);
1177#undef SITE
1178
1179 patch_site:
1180 if (start == NULL || (end-start) > len)
1181 goto default_patch;
1182
1183 ret = paravirt_patch_insns(insnbuf, len, start, end);
1184
1185
1186
1187
1188
1189 if (reloc > start && reloc < end) {
1190 int reloc_off = reloc - start;
1191 long *relocp = (long *)(insnbuf + reloc_off);
1192 long delta = start - (char *)addr;
1193
1194 *relocp += delta;
1195 }
1196 break;
1197
1198 default_patch:
1199 default:
1200 ret = paravirt_patch_default(type, clobbers, insnbuf,
1201 addr, len);
1202 break;
1203 }
1204
1205 return ret;
1206}
1207
1208static const struct pv_info xen_info __initconst = {
1209 .paravirt_enabled = 1,
1210 .shared_kernel_pmd = 0,
1211
1212#ifdef CONFIG_X86_64
1213 .extra_user_64bit_cs = FLAT_USER_CS64,
1214#endif
1215
1216 .name = "Xen",
1217};
1218
1219static const struct pv_init_ops xen_init_ops __initconst = {
1220 .patch = xen_patch,
1221};
1222
1223static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1224 .cpuid = xen_cpuid,
1225
1226 .set_debugreg = xen_set_debugreg,
1227 .get_debugreg = xen_get_debugreg,
1228
1229 .clts = xen_clts,
1230
1231 .read_cr0 = xen_read_cr0,
1232 .write_cr0 = xen_write_cr0,
1233
1234 .read_cr4 = native_read_cr4,
1235 .read_cr4_safe = native_read_cr4_safe,
1236 .write_cr4 = xen_write_cr4,
1237
1238#ifdef CONFIG_X86_64
1239 .read_cr8 = xen_read_cr8,
1240 .write_cr8 = xen_write_cr8,
1241#endif
1242
1243 .wbinvd = native_wbinvd,
1244
1245 .read_msr = native_read_msr_safe,
1246 .write_msr = xen_write_msr_safe,
1247
1248 .read_tsc = native_read_tsc,
1249 .read_pmc = native_read_pmc,
1250
1251 .read_tscp = native_read_tscp,
1252
1253 .iret = xen_iret,
1254 .irq_enable_sysexit = xen_sysexit,
1255#ifdef CONFIG_X86_64
1256 .usergs_sysret32 = xen_sysret32,
1257 .usergs_sysret64 = xen_sysret64,
1258#endif
1259
1260 .load_tr_desc = paravirt_nop,
1261 .set_ldt = xen_set_ldt,
1262 .load_gdt = xen_load_gdt,
1263 .load_idt = xen_load_idt,
1264 .load_tls = xen_load_tls,
1265#ifdef CONFIG_X86_64
1266 .load_gs_index = xen_load_gs_index,
1267#endif
1268
1269 .alloc_ldt = xen_alloc_ldt,
1270 .free_ldt = xen_free_ldt,
1271
1272 .store_idt = native_store_idt,
1273 .store_tr = xen_store_tr,
1274
1275 .write_ldt_entry = xen_write_ldt_entry,
1276 .write_gdt_entry = xen_write_gdt_entry,
1277 .write_idt_entry = xen_write_idt_entry,
1278 .load_sp0 = xen_load_sp0,
1279
1280 .set_iopl_mask = xen_set_iopl_mask,
1281 .io_delay = xen_io_delay,
1282
1283
1284 .swapgs = paravirt_nop,
1285
1286 .start_context_switch = paravirt_start_context_switch,
1287 .end_context_switch = xen_end_context_switch,
1288};
1289
1290static const struct pv_apic_ops xen_apic_ops __initconst = {
1291#ifdef CONFIG_X86_LOCAL_APIC
1292 .startup_ipi_hook = paravirt_nop,
1293#endif
1294};
1295
1296static void xen_reboot(int reason)
1297{
1298 struct sched_shutdown r = { .reason = reason };
1299
1300 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1301 BUG();
1302}
1303
1304static void xen_restart(char *msg)
1305{
1306 xen_reboot(SHUTDOWN_reboot);
1307}
1308
1309static void xen_emergency_restart(void)
1310{
1311 xen_reboot(SHUTDOWN_reboot);
1312}
1313
1314static void xen_machine_halt(void)
1315{
1316 xen_reboot(SHUTDOWN_poweroff);
1317}
1318
1319static void xen_machine_power_off(void)
1320{
1321 if (pm_power_off)
1322 pm_power_off();
1323 xen_reboot(SHUTDOWN_poweroff);
1324}
1325
1326static void xen_crash_shutdown(struct pt_regs *regs)
1327{
1328 xen_reboot(SHUTDOWN_crash);
1329}
1330
1331static int
1332xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1333{
1334 xen_reboot(SHUTDOWN_crash);
1335 return NOTIFY_DONE;
1336}
1337
1338static struct notifier_block xen_panic_block = {
1339 .notifier_call= xen_panic_event,
1340};
1341
1342int xen_panic_handler_init(void)
1343{
1344 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1345 return 0;
1346}
1347
1348static const struct machine_ops xen_machine_ops __initconst = {
1349 .restart = xen_restart,
1350 .halt = xen_machine_halt,
1351 .power_off = xen_machine_power_off,
1352 .shutdown = xen_machine_halt,
1353 .crash_shutdown = xen_crash_shutdown,
1354 .emergency_restart = xen_emergency_restart,
1355};
1356
1357static void __init xen_boot_params_init_edd(void)
1358{
1359#if IS_ENABLED(CONFIG_EDD)
1360 struct xen_platform_op op;
1361 struct edd_info *edd_info;
1362 u32 *mbr_signature;
1363 unsigned nr;
1364 int ret;
1365
1366 edd_info = boot_params.eddbuf;
1367 mbr_signature = boot_params.edd_mbr_sig_buffer;
1368
1369 op.cmd = XENPF_firmware_info;
1370
1371 op.u.firmware_info.type = XEN_FW_DISK_INFO;
1372 for (nr = 0; nr < EDDMAXNR; nr++) {
1373 struct edd_info *info = edd_info + nr;
1374
1375 op.u.firmware_info.index = nr;
1376 info->params.length = sizeof(info->params);
1377 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1378 &info->params);
1379 ret = HYPERVISOR_dom0_op(&op);
1380 if (ret)
1381 break;
1382
1383#define C(x) info->x = op.u.firmware_info.u.disk_info.x
1384 C(device);
1385 C(version);
1386 C(interface_support);
1387 C(legacy_max_cylinder);
1388 C(legacy_max_head);
1389 C(legacy_sectors_per_track);
1390#undef C
1391 }
1392 boot_params.eddbuf_entries = nr;
1393
1394 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1395 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1396 op.u.firmware_info.index = nr;
1397 ret = HYPERVISOR_dom0_op(&op);
1398 if (ret)
1399 break;
1400 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1401 }
1402 boot_params.edd_mbr_sig_buf_entries = nr;
1403#endif
1404}
1405
1406
1407
1408
1409
1410
1411static void __init xen_setup_stackprotector(void)
1412{
1413 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1414 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1415
1416 setup_stack_canary_segment(0);
1417 switch_to_new_gdt(0);
1418
1419 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1420 pv_cpu_ops.load_gdt = xen_load_gdt;
1421}
1422
1423
1424asmlinkage void __init xen_start_kernel(void)
1425{
1426 struct physdev_set_iopl set_iopl;
1427 int rc;
1428
1429 if (!xen_start_info)
1430 return;
1431
1432 xen_domain_type = XEN_PV_DOMAIN;
1433
1434 xen_setup_machphys_mapping();
1435
1436
1437 pv_info = xen_info;
1438 pv_init_ops = xen_init_ops;
1439 pv_cpu_ops = xen_cpu_ops;
1440 pv_apic_ops = xen_apic_ops;
1441
1442 x86_init.resources.memory_setup = xen_memory_setup;
1443 x86_init.oem.arch_setup = xen_arch_setup;
1444 x86_init.oem.banner = xen_banner;
1445
1446 xen_init_time_ops();
1447
1448
1449
1450
1451
1452 xen_init_mmu_ops();
1453
1454
1455 __supported_pte_mask &= ~_PAGE_GLOBAL;
1456#if 0
1457 if (!xen_initial_domain())
1458#endif
1459 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1460
1461 __supported_pte_mask |= _PAGE_IOMAP;
1462
1463
1464
1465
1466
1467 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1468
1469
1470 x86_configure_nx();
1471
1472 xen_setup_features();
1473
1474
1475 if (!xen_feature(XENFEAT_auto_translated_physmap))
1476 xen_build_dynamic_phys_to_machine();
1477
1478
1479
1480
1481
1482 xen_setup_stackprotector();
1483
1484 xen_init_irq_ops();
1485 xen_init_cpuid_mask();
1486
1487#ifdef CONFIG_X86_LOCAL_APIC
1488
1489
1490
1491 set_xen_basic_apic_ops();
1492#endif
1493
1494 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1495 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1496 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1497 }
1498
1499 machine_ops = xen_machine_ops;
1500
1501
1502
1503
1504
1505
1506 xen_initial_gdt = &per_cpu(gdt_page, 0);
1507
1508 xen_smp_init();
1509
1510#ifdef CONFIG_ACPI_NUMA
1511
1512
1513
1514
1515
1516 acpi_numa = -1;
1517#endif
1518#ifdef CONFIG_X86_PAT
1519
1520
1521
1522
1523
1524 pat_enabled = 0;
1525#endif
1526
1527
1528 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1529
1530 local_irq_disable();
1531 early_boot_irqs_disabled = true;
1532
1533 xen_raw_console_write("mapping kernel into physical memory\n");
1534 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
1535
1536
1537 xen_build_mfn_list_list();
1538
1539
1540
1541#ifdef CONFIG_X86_32
1542 pv_info.kernel_rpl = 1;
1543 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1544 pv_info.kernel_rpl = 0;
1545#else
1546 pv_info.kernel_rpl = 0;
1547#endif
1548
1549 xen_reserve_top();
1550
1551
1552
1553
1554
1555 set_iopl.iopl = 1;
1556 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1557 if (rc != 0)
1558 xen_raw_printk("physdev_op failed %d\n", rc);
1559
1560#ifdef CONFIG_X86_32
1561
1562 cpu_detect(&new_cpu_data);
1563 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1564 new_cpu_data.wp_works_ok = 1;
1565 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1566#endif
1567
1568
1569 boot_params.hdr.type_of_loader = (9 << 4) | 0;
1570 boot_params.hdr.ramdisk_image = xen_start_info->mod_start
1571 ? __pa(xen_start_info->mod_start) : 0;
1572 boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1573 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1574
1575 if (!xen_initial_domain()) {
1576 add_preferred_console("xenboot", 0, NULL);
1577 add_preferred_console("tty", 0, NULL);
1578 add_preferred_console("hvc", 0, NULL);
1579 if (pci_xen)
1580 x86_init.pci.arch_init = pci_xen_init;
1581 } else {
1582 const struct dom0_vga_console_info *info =
1583 (void *)((char *)xen_start_info +
1584 xen_start_info->console.dom0.info_off);
1585 struct xen_platform_op op = {
1586 .cmd = XENPF_firmware_info,
1587 .interface_version = XENPF_INTERFACE_VERSION,
1588 .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1589 };
1590
1591 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1592 xen_start_info->console.domU.mfn = 0;
1593 xen_start_info->console.domU.evtchn = 0;
1594
1595 if (HYPERVISOR_dom0_op(&op) == 0)
1596 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1597
1598 xen_init_apic();
1599
1600
1601 pci_request_acs();
1602
1603 xen_acpi_sleep_register();
1604
1605
1606 x86_init.mpparse.find_smp_config = x86_init_noop;
1607 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1608
1609 xen_boot_params_init_edd();
1610 }
1611#ifdef CONFIG_PCI
1612
1613 pci_probe &= ~PCI_PROBE_BIOS;
1614#endif
1615 xen_raw_console_write("about to get started...\n");
1616
1617 xen_setup_runstate_info(0);
1618
1619
1620#ifdef CONFIG_X86_32
1621 i386_start_kernel();
1622#else
1623 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1624#endif
1625}
1626
1627void __ref xen_hvm_init_shared_info(void)
1628{
1629 int cpu;
1630 struct xen_add_to_physmap xatp;
1631 static struct shared_info *shared_info_page = 0;
1632
1633 if (!shared_info_page)
1634 shared_info_page = (struct shared_info *)
1635 extend_brk(PAGE_SIZE, PAGE_SIZE);
1636 xatp.domid = DOMID_SELF;
1637 xatp.idx = 0;
1638 xatp.space = XENMAPSPACE_shared_info;
1639 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1640 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1641 BUG();
1642
1643 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 for_each_online_cpu(cpu) {
1654
1655 if (cpu >= MAX_VIRT_CPUS)
1656 continue;
1657 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1658 }
1659}
1660
1661#ifdef CONFIG_XEN_PVHVM
1662static void __init init_hvm_pv_info(void)
1663{
1664 int major, minor;
1665 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1666 u64 pfn;
1667
1668 base = xen_cpuid_base();
1669 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1670
1671 major = eax >> 16;
1672 minor = eax & 0xffff;
1673 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1674
1675 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1676
1677 pfn = __pa(hypercall_page);
1678 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1679
1680 xen_setup_features();
1681
1682 pv_info.name = "Xen HVM";
1683
1684 xen_domain_type = XEN_HVM_DOMAIN;
1685}
1686
1687static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1688 void *hcpu)
1689{
1690 int cpu = (long)hcpu;
1691 switch (action) {
1692 case CPU_UP_PREPARE:
1693 xen_vcpu_setup(cpu);
1694 if (xen_have_vector_callback) {
1695 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1696 xen_setup_timer(cpu);
1697 }
1698 break;
1699 default:
1700 break;
1701 }
1702 return NOTIFY_OK;
1703}
1704
1705static struct notifier_block xen_hvm_cpu_notifier = {
1706 .notifier_call = xen_hvm_cpu_notify,
1707};
1708
1709static void __init xen_hvm_guest_init(void)
1710{
1711 init_hvm_pv_info();
1712
1713 xen_hvm_init_shared_info();
1714
1715 xen_panic_handler_init();
1716
1717 if (xen_feature(XENFEAT_hvm_callback_vector))
1718 xen_have_vector_callback = 1;
1719 xen_hvm_smp_init();
1720 register_cpu_notifier(&xen_hvm_cpu_notifier);
1721 xen_unplug_emulated_devices();
1722 x86_init.irqs.intr_init = xen_init_IRQ;
1723 xen_hvm_init_time_ops();
1724 xen_hvm_init_mmu_ops();
1725}
1726
1727static uint32_t __init xen_hvm_platform(void)
1728{
1729 if (xen_pv_domain())
1730 return 0;
1731
1732 return xen_cpuid_base();
1733}
1734
1735bool xen_hvm_need_lapic(void)
1736{
1737 if (xen_pv_domain())
1738 return false;
1739 if (!xen_hvm_domain())
1740 return false;
1741 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1742 return false;
1743 return true;
1744}
1745EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1746
1747const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1748 .name = "Xen HVM",
1749 .detect = xen_hvm_platform,
1750 .init_platform = xen_hvm_guest_init,
1751 .x2apic_available = xen_x2apic_para_available,
1752};
1753EXPORT_SYMBOL(x86_hyper_xen_hvm);
1754#endif
1755