1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/smp.h>
18#include <linux/preempt.h>
19#include <linux/hardirq.h>
20#include <linux/percpu.h>
21#include <linux/delay.h>
22#include <linux/start_kernel.h>
23#include <linux/sched.h>
24#include <linux/kprobes.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <linux/mm.h>
28#include <linux/page-flags.h>
29#include <linux/highmem.h>
30#include <linux/console.h>
31#include <linux/pci.h>
32#include <linux/gfp.h>
33#include <linux/memblock.h>
34#include <linux/edd.h>
35#include <linux/crash_dump.h>
36#include <linux/frame.h>
37
38#ifdef CONFIG_KEXEC_CORE
39#include <linux/kexec.h>
40#endif
41
42#include <xen/xen.h>
43#include <xen/events.h>
44#include <xen/interface/xen.h>
45#include <xen/interface/version.h>
46#include <xen/interface/physdev.h>
47#include <xen/interface/vcpu.h>
48#include <xen/interface/memory.h>
49#include <xen/interface/xen-mca.h>
50#include <xen/features.h>
51#include <xen/page.h>
52#include <xen/hvm.h>
53#include <xen/hvc-console.h>
54#include <xen/acpi.h>
55
56#include <asm/paravirt.h>
57#include <asm/apic.h>
58#include <asm/page.h>
59#include <asm/xen/pci.h>
60#include <asm/xen/hypercall.h>
61#include <asm/xen/hypervisor.h>
62#include <asm/fixmap.h>
63#include <asm/processor.h>
64#include <asm/proto.h>
65#include <asm/msr-index.h>
66#include <asm/traps.h>
67#include <asm/setup.h>
68#include <asm/desc.h>
69#include <asm/pgalloc.h>
70#include <asm/pgtable.h>
71#include <asm/tlbflush.h>
72#include <asm/reboot.h>
73#include <asm/stackprotector.h>
74#include <asm/hypervisor.h>
75#include <asm/mwait.h>
76#include <asm/pci_x86.h>
77#include <asm/pat.h>
78
79#ifdef CONFIG_ACPI
80#include <linux/acpi.h>
81#include <asm/acpi.h>
82#include <acpi/pdc_intel.h>
83#include <acpi/processor.h>
84#include <xen/interface/platform.h>
85#endif
86
87#include "xen-ops.h"
88#include "mmu.h"
89#include "smp.h"
90#include "multicalls.h"
91
92EXPORT_SYMBOL_GPL(hypercall_page);
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
111
112
113
114
115
116
117DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
118
119
120DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
121EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
122
123enum xen_domain_type xen_domain_type = XEN_NATIVE;
124EXPORT_SYMBOL_GPL(xen_domain_type);
125
126unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
127EXPORT_SYMBOL(machine_to_phys_mapping);
128unsigned long machine_to_phys_nr;
129EXPORT_SYMBOL(machine_to_phys_nr);
130
131struct start_info *xen_start_info;
132EXPORT_SYMBOL_GPL(xen_start_info);
133
134struct shared_info xen_dummy_shared_info;
135
136void *xen_initial_gdt;
137
138RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
139__read_mostly int xen_have_vector_callback;
140EXPORT_SYMBOL_GPL(xen_have_vector_callback);
141
142
143
144
145
146struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161static int have_vcpu_info_placement = 1;
162
163struct tls_descs {
164 struct desc_struct desc[3];
165};
166
167
168
169
170
171
172
173
174static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
175
176#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3)
177
178static void clamp_max_cpus(void)
179{
180#ifdef CONFIG_SMP
181 if (setup_max_cpus > MAX_VIRT_CPUS)
182 setup_max_cpus = MAX_VIRT_CPUS;
183#endif
184}
185
186void xen_vcpu_setup(int cpu)
187{
188 struct vcpu_register_vcpu_info info;
189 int err;
190 struct vcpu_info *vcpup;
191
192 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
193
194
195
196
197
198
199
200
201
202
203
204
205 if (xen_hvm_domain()) {
206 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
207 return;
208 }
209 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
210 per_cpu(xen_vcpu, cpu) =
211 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
212
213 if (!have_vcpu_info_placement) {
214 if (cpu >= MAX_VIRT_CPUS)
215 clamp_max_cpus();
216 return;
217 }
218
219 vcpup = &per_cpu(xen_vcpu_info, cpu);
220 info.mfn = arbitrary_virt_to_mfn(vcpup);
221 info.offset = offset_in_page(vcpup);
222
223
224
225
226
227
228
229
230
231 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
232 &info);
233
234 if (err) {
235 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
236 have_vcpu_info_placement = 0;
237 clamp_max_cpus();
238 } else {
239
240
241 per_cpu(xen_vcpu, cpu) = vcpup;
242 }
243}
244
245
246
247
248
249
250void xen_vcpu_restore(void)
251{
252 int cpu;
253
254 for_each_possible_cpu(cpu) {
255 bool other_cpu = (cpu != smp_processor_id());
256 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
257 NULL);
258
259 if (other_cpu && is_up &&
260 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
261 BUG();
262
263 xen_setup_runstate_info(cpu);
264
265 if (have_vcpu_info_placement)
266 xen_vcpu_setup(cpu);
267
268 if (other_cpu && is_up &&
269 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
270 BUG();
271 }
272}
273
274static void __init xen_banner(void)
275{
276 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
277 struct xen_extraversion extra;
278 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
279
280 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
281 pv_info.name);
282 printk(KERN_INFO "Xen version: %d.%d%s%s\n",
283 version >> 16, version & 0xffff, extra.extraversion,
284 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
285}
286
287bool
288xen_running_on_version_or_later(unsigned int major, unsigned int minor)
289{
290 unsigned int version;
291
292 if (!xen_domain())
293 return false;
294
295 version = HYPERVISOR_xen_version(XENVER_version, NULL);
296 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
297 ((version >> 16) > major))
298 return true;
299 return false;
300}
301
302#define CPUID_THERM_POWER_LEAF 6
303#define APERFMPERF_PRESENT 0
304
305static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
306static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
307
308static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
309static __read_mostly unsigned int cpuid_leaf5_ecx_val;
310static __read_mostly unsigned int cpuid_leaf5_edx_val;
311
312static void xen_cpuid(unsigned int *ax, unsigned int *bx,
313 unsigned int *cx, unsigned int *dx)
314{
315 unsigned maskebx = ~0;
316 unsigned maskecx = ~0;
317 unsigned maskedx = ~0;
318 unsigned setecx = 0;
319
320
321
322
323 switch (*ax) {
324 case 1:
325 maskecx = cpuid_leaf1_ecx_mask;
326 setecx = cpuid_leaf1_ecx_set_mask;
327 maskedx = cpuid_leaf1_edx_mask;
328 break;
329
330 case CPUID_MWAIT_LEAF:
331
332 *ax = 0;
333 *bx = 0;
334 *cx = cpuid_leaf5_ecx_val;
335 *dx = cpuid_leaf5_edx_val;
336 return;
337
338 case CPUID_THERM_POWER_LEAF:
339
340 maskecx = ~(1 << APERFMPERF_PRESENT);
341 break;
342
343 case 0xb:
344
345 maskebx = 0;
346 break;
347 }
348
349 asm(XEN_EMULATE_PREFIX "cpuid"
350 : "=a" (*ax),
351 "=b" (*bx),
352 "=c" (*cx),
353 "=d" (*dx)
354 : "0" (*ax), "2" (*cx));
355
356 *bx &= maskebx;
357 *cx &= maskecx;
358 *cx |= setecx;
359 *dx &= maskedx;
360}
361STACK_FRAME_NON_STANDARD(xen_cpuid);
362
363static bool __init xen_check_mwait(void)
364{
365#ifdef CONFIG_ACPI
366 struct xen_platform_op op = {
367 .cmd = XENPF_set_processor_pminfo,
368 .u.set_pminfo.id = -1,
369 .u.set_pminfo.type = XEN_PM_PDC,
370 };
371 uint32_t buf[3];
372 unsigned int ax, bx, cx, dx;
373 unsigned int mwait_mask;
374
375
376
377
378
379
380
381
382
383 if (!xen_initial_domain())
384 return false;
385
386
387
388
389
390 if (!xen_running_on_version_or_later(4, 2))
391 return false;
392
393 ax = 1;
394 cx = 0;
395
396 native_cpuid(&ax, &bx, &cx, &dx);
397
398 mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
399 (1 << (X86_FEATURE_MWAIT % 32));
400
401 if ((cx & mwait_mask) != mwait_mask)
402 return false;
403
404
405
406
407
408 ax = CPUID_MWAIT_LEAF;
409 bx = 0;
410 cx = 0;
411 dx = 0;
412
413 native_cpuid(&ax, &bx, &cx, &dx);
414
415
416
417
418 buf[0] = ACPI_PDC_REVISION_ID;
419 buf[1] = 1;
420 buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
421
422 set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
423
424 if ((HYPERVISOR_dom0_op(&op) == 0) &&
425 (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
426 cpuid_leaf5_ecx_val = cx;
427 cpuid_leaf5_edx_val = dx;
428 }
429 return true;
430#else
431 return false;
432#endif
433}
434static void __init xen_init_cpuid_mask(void)
435{
436 unsigned int ax, bx, cx, dx;
437 unsigned int xsave_mask;
438
439 cpuid_leaf1_edx_mask =
440 ~((1 << X86_FEATURE_MTRR) |
441 (1 << X86_FEATURE_ACC));
442
443 if (!xen_initial_domain())
444 cpuid_leaf1_edx_mask &=
445 ~((1 << X86_FEATURE_APIC) |
446 (1 << X86_FEATURE_ACPI));
447
448 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
449
450 ax = 1;
451 cx = 0;
452 xen_cpuid(&ax, &bx, &cx, &dx);
453
454 xsave_mask =
455 (1 << (X86_FEATURE_XSAVE % 32)) |
456 (1 << (X86_FEATURE_OSXSAVE % 32));
457
458
459 if ((cx & xsave_mask) != xsave_mask)
460 cpuid_leaf1_ecx_mask &= ~xsave_mask;
461 if (xen_check_mwait())
462 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
463}
464
465static void xen_set_debugreg(int reg, unsigned long val)
466{
467 HYPERVISOR_set_debugreg(reg, val);
468}
469
470static unsigned long xen_get_debugreg(int reg)
471{
472 return HYPERVISOR_get_debugreg(reg);
473}
474
475static void xen_end_context_switch(struct task_struct *next)
476{
477 xen_mc_flush();
478 paravirt_end_context_switch(next);
479}
480
481static unsigned long xen_store_tr(void)
482{
483 return 0;
484}
485
486
487
488
489
490
491
492static void set_aliased_prot(void *v, pgprot_t prot)
493{
494 int level;
495 pte_t *ptep;
496 pte_t pte;
497 unsigned long pfn;
498 struct page *page;
499
500 ptep = lookup_address((unsigned long)v, &level);
501 BUG_ON(ptep == NULL);
502
503 pfn = pte_pfn(*ptep);
504 page = pfn_to_page(pfn);
505
506 pte = pfn_pte(pfn, prot);
507
508 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
509 BUG();
510
511 if (!PageHighMem(page)) {
512 void *av = __va(PFN_PHYS(pfn));
513
514 if (av != v)
515 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
516 BUG();
517 } else
518 kmap_flush_unused();
519}
520
521static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
522{
523 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
524 int i;
525
526 for(i = 0; i < entries; i += entries_per_page)
527 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
528}
529
530static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
531{
532 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
533 int i;
534
535 for(i = 0; i < entries; i += entries_per_page)
536 set_aliased_prot(ldt + i, PAGE_KERNEL);
537}
538
539static void xen_set_ldt(const void *addr, unsigned entries)
540{
541 struct mmuext_op *op;
542 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
543
544 trace_xen_cpu_set_ldt(addr, entries);
545
546 op = mcs.args;
547 op->cmd = MMUEXT_SET_LDT;
548 op->arg1.linear_addr = (unsigned long)addr;
549 op->arg2.nr_ents = entries;
550
551 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
552
553 xen_mc_issue(PARAVIRT_LAZY_CPU);
554}
555
556static void xen_load_gdt(const struct desc_ptr *dtr)
557{
558 unsigned long va = dtr->address;
559 unsigned int size = dtr->size + 1;
560 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
561 unsigned long frames[pages];
562 int f;
563
564
565
566
567
568
569 BUG_ON(size > 65536);
570 BUG_ON(va & ~PAGE_MASK);
571
572 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
573 int level;
574 pte_t *ptep;
575 unsigned long pfn, mfn;
576 void *virt;
577
578
579
580
581
582
583
584
585 ptep = lookup_address(va, &level);
586 BUG_ON(ptep == NULL);
587
588 pfn = pte_pfn(*ptep);
589 mfn = pfn_to_mfn(pfn);
590 virt = __va(PFN_PHYS(pfn));
591
592 frames[f] = mfn;
593
594 make_lowmem_page_readonly((void *)va);
595 make_lowmem_page_readonly(virt);
596 }
597
598 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
599 BUG();
600}
601
602
603
604
605static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
606{
607 unsigned long va = dtr->address;
608 unsigned int size = dtr->size + 1;
609 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
610 unsigned long frames[pages];
611 int f;
612
613
614
615
616
617
618 BUG_ON(size > 65536);
619 BUG_ON(va & ~PAGE_MASK);
620
621 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
622 pte_t pte;
623 unsigned long pfn, mfn;
624
625 pfn = virt_to_pfn(va);
626 mfn = pfn_to_mfn(pfn);
627
628 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
629
630 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
631 BUG();
632
633 frames[f] = mfn;
634 }
635
636 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
637 BUG();
638}
639
640static inline bool desc_equal(const struct desc_struct *d1,
641 const struct desc_struct *d2)
642{
643 return d1->a == d2->a && d1->b == d2->b;
644}
645
646static void load_TLS_descriptor(struct thread_struct *t,
647 unsigned int cpu, unsigned int i)
648{
649 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
650 struct desc_struct *gdt;
651 xmaddr_t maddr;
652 struct multicall_space mc;
653
654 if (desc_equal(shadow, &t->tls_array[i]))
655 return;
656
657 *shadow = t->tls_array[i];
658
659 gdt = get_cpu_gdt_table(cpu);
660 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
661 mc = __xen_mc_entry(0);
662
663 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
664}
665
666static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
667{
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
687#ifdef CONFIG_X86_32
688 lazy_load_gs(0);
689#else
690 loadsegment(fs, 0);
691#endif
692 }
693
694 xen_mc_batch();
695
696 load_TLS_descriptor(t, cpu, 0);
697 load_TLS_descriptor(t, cpu, 1);
698 load_TLS_descriptor(t, cpu, 2);
699
700 xen_mc_issue(PARAVIRT_LAZY_CPU);
701}
702
703#ifdef CONFIG_X86_64
704static void xen_load_gs_index(unsigned int idx)
705{
706 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
707 BUG();
708}
709#endif
710
711static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
712 const void *ptr)
713{
714 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
715 u64 entry = *(u64 *)ptr;
716
717 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
718
719 preempt_disable();
720
721 xen_mc_flush();
722 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
723 BUG();
724
725 preempt_enable();
726}
727
728static int cvt_gate_to_trap(int vector, const gate_desc *val,
729 struct trap_info *info)
730{
731 unsigned long addr;
732
733 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
734 return 0;
735
736 info->vector = vector;
737
738 addr = gate_offset(*val);
739#ifdef CONFIG_X86_64
740
741
742
743
744
745
746
747 if (addr == (unsigned long)debug)
748 addr = (unsigned long)xen_debug;
749 else if (addr == (unsigned long)int3)
750 addr = (unsigned long)xen_int3;
751 else if (addr == (unsigned long)stack_segment)
752 addr = (unsigned long)xen_stack_segment;
753 else if (addr == (unsigned long)double_fault ||
754 addr == (unsigned long)nmi) {
755
756 return 0;
757#ifdef CONFIG_X86_MCE
758 } else if (addr == (unsigned long)machine_check) {
759
760
761
762
763 ;
764#endif
765 } else {
766
767 if (WARN_ON(val->ist != 0))
768 return 0;
769 }
770#endif
771 info->address = addr;
772
773 info->cs = gate_segment(*val);
774 info->flags = val->dpl;
775
776 if (val->type == GATE_INTERRUPT)
777 info->flags |= 1 << 2;
778
779 return 1;
780}
781
782
783static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
784
785
786
787static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
788{
789 unsigned long p = (unsigned long)&dt[entrynum];
790 unsigned long start, end;
791
792 trace_xen_cpu_write_idt_entry(dt, entrynum, g);
793
794 preempt_disable();
795
796 start = __this_cpu_read(idt_desc.address);
797 end = start + __this_cpu_read(idt_desc.size) + 1;
798
799 xen_mc_flush();
800
801 native_write_idt_entry(dt, entrynum, g);
802
803 if (p >= start && (p + 8) <= end) {
804 struct trap_info info[2];
805
806 info[1].address = 0;
807
808 if (cvt_gate_to_trap(entrynum, g, &info[0]))
809 if (HYPERVISOR_set_trap_table(info))
810 BUG();
811 }
812
813 preempt_enable();
814}
815
816static void xen_convert_trap_info(const struct desc_ptr *desc,
817 struct trap_info *traps)
818{
819 unsigned in, out, count;
820
821 count = (desc->size+1) / sizeof(gate_desc);
822 BUG_ON(count > 256);
823
824 for (in = out = 0; in < count; in++) {
825 gate_desc *entry = (gate_desc*)(desc->address) + in;
826
827 if (cvt_gate_to_trap(in, entry, &traps[out]))
828 out++;
829 }
830 traps[out].address = 0;
831}
832
833void xen_copy_trap_info(struct trap_info *traps)
834{
835 const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
836
837 xen_convert_trap_info(desc, traps);
838}
839
840
841
842
843static void xen_load_idt(const struct desc_ptr *desc)
844{
845 static DEFINE_SPINLOCK(lock);
846 static struct trap_info traps[257];
847
848 trace_xen_cpu_load_idt(desc);
849
850 spin_lock(&lock);
851
852 __get_cpu_var(idt_desc) = *desc;
853
854 xen_convert_trap_info(desc, traps);
855
856 xen_mc_flush();
857 if (HYPERVISOR_set_trap_table(traps))
858 BUG();
859
860 spin_unlock(&lock);
861}
862
863
864
865static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
866 const void *desc, int type)
867{
868 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
869
870 preempt_disable();
871
872 switch (type) {
873 case DESC_LDT:
874 case DESC_TSS:
875
876 break;
877
878 default: {
879 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
880
881 xen_mc_flush();
882 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
883 BUG();
884 }
885
886 }
887
888 preempt_enable();
889}
890
891
892
893
894
895static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
896 const void *desc, int type)
897{
898 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
899
900 switch (type) {
901 case DESC_LDT:
902 case DESC_TSS:
903
904 break;
905
906 default: {
907 xmaddr_t maddr = virt_to_machine(&dt[entry]);
908
909 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
910 dt[entry] = *(struct desc_struct *)desc;
911 }
912
913 }
914}
915
916static void xen_load_sp0(struct tss_struct *tss,
917 struct thread_struct *thread)
918{
919 struct multicall_space mcs;
920
921 mcs = xen_mc_entry(0);
922 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
923 xen_mc_issue(PARAVIRT_LAZY_CPU);
924}
925
926static void xen_set_iopl_mask(unsigned mask)
927{
928 struct physdev_set_iopl set_iopl;
929
930
931 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
932 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
933}
934
935static void xen_io_delay(void)
936{
937}
938
939#ifdef CONFIG_X86_LOCAL_APIC
940static unsigned long xen_set_apic_id(unsigned int x)
941{
942 WARN_ON(1);
943 return x;
944}
945static unsigned int xen_get_apic_id(unsigned long x)
946{
947 return ((x)>>24) & 0xFFu;
948}
949static u32 xen_apic_read(u32 reg)
950{
951 struct xen_platform_op op = {
952 .cmd = XENPF_get_cpuinfo,
953 .interface_version = XENPF_INTERFACE_VERSION,
954 .u.pcpu_info.xen_cpuid = 0,
955 };
956 int ret = 0;
957
958
959
960 if (!xen_initial_domain() || smp_processor_id())
961 return 0;
962
963 if (reg == APIC_LVR)
964 return 0x10;
965
966 if (reg != APIC_ID)
967 return 0;
968
969 ret = HYPERVISOR_dom0_op(&op);
970 if (ret)
971 return 0;
972
973 return op.u.pcpu_info.apic_id << 24;
974}
975
976static void xen_apic_write(u32 reg, u32 val)
977{
978
979 WARN_ON(1);
980}
981
982static u64 xen_apic_icr_read(void)
983{
984 return 0;
985}
986
987static void xen_apic_icr_write(u32 low, u32 id)
988{
989
990 WARN_ON(1);
991}
992
993static void xen_apic_wait_icr_idle(void)
994{
995 return;
996}
997
998static u32 xen_safe_apic_wait_icr_idle(void)
999{
1000 return 0;
1001}
1002
1003static void set_xen_basic_apic_ops(void)
1004{
1005 apic->read = xen_apic_read;
1006 apic->write = xen_apic_write;
1007 apic->icr_read = xen_apic_icr_read;
1008 apic->icr_write = xen_apic_icr_write;
1009 apic->wait_icr_idle = xen_apic_wait_icr_idle;
1010 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
1011 apic->set_apic_id = xen_set_apic_id;
1012 apic->get_apic_id = xen_get_apic_id;
1013
1014#ifdef CONFIG_SMP
1015 apic->send_IPI_allbutself = xen_send_IPI_allbutself;
1016 apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
1017 apic->send_IPI_mask = xen_send_IPI_mask;
1018 apic->send_IPI_all = xen_send_IPI_all;
1019 apic->send_IPI_self = xen_send_IPI_self;
1020#endif
1021}
1022
1023#endif
1024
1025static void xen_clts(void)
1026{
1027 struct multicall_space mcs;
1028
1029 mcs = xen_mc_entry(0);
1030
1031 MULTI_fpu_taskswitch(mcs.mc, 0);
1032
1033 xen_mc_issue(PARAVIRT_LAZY_CPU);
1034}
1035
1036static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
1037
1038static unsigned long xen_read_cr0(void)
1039{
1040 unsigned long cr0 = this_cpu_read(xen_cr0_value);
1041
1042 if (unlikely(cr0 == 0)) {
1043 cr0 = native_read_cr0();
1044 this_cpu_write(xen_cr0_value, cr0);
1045 }
1046
1047 return cr0;
1048}
1049
1050static void xen_write_cr0(unsigned long cr0)
1051{
1052 struct multicall_space mcs;
1053
1054 this_cpu_write(xen_cr0_value, cr0);
1055
1056
1057
1058 mcs = xen_mc_entry(0);
1059
1060 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1061
1062 xen_mc_issue(PARAVIRT_LAZY_CPU);
1063}
1064
1065static void xen_write_cr4(unsigned long cr4)
1066{
1067 cr4 &= ~X86_CR4_PGE;
1068 cr4 &= ~X86_CR4_PSE;
1069
1070 native_write_cr4(cr4);
1071}
1072#ifdef CONFIG_X86_64
1073static inline unsigned long xen_read_cr8(void)
1074{
1075 return 0;
1076}
1077static inline void xen_write_cr8(unsigned long val)
1078{
1079 BUG_ON(val);
1080}
1081#endif
1082static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1083{
1084 int ret;
1085
1086 ret = 0;
1087
1088 switch (msr) {
1089#ifdef CONFIG_X86_64
1090 unsigned which;
1091 u64 base;
1092
1093 case MSR_FS_BASE: which = SEGBASE_FS; goto set;
1094 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
1095 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
1096
1097 set:
1098 base = ((u64)high << 32) | low;
1099 if (HYPERVISOR_set_segment_base(which, base) != 0)
1100 ret = -EIO;
1101 break;
1102#endif
1103
1104 case MSR_STAR:
1105 case MSR_CSTAR:
1106 case MSR_LSTAR:
1107 case MSR_SYSCALL_MASK:
1108 case MSR_IA32_SYSENTER_CS:
1109 case MSR_IA32_SYSENTER_ESP:
1110 case MSR_IA32_SYSENTER_EIP:
1111
1112
1113
1114 break;
1115
1116 case MSR_IA32_CR_PAT:
1117 if (smp_processor_id() == 0)
1118 xen_set_pat(((u64)high << 32) | low);
1119 break;
1120
1121 default:
1122 ret = native_write_msr_safe(msr, low, high);
1123 }
1124
1125 return ret;
1126}
1127
1128void xen_setup_shared_info(void)
1129{
1130 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1131 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1132 xen_start_info->shared_info);
1133
1134 HYPERVISOR_shared_info =
1135 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1136 } else
1137 HYPERVISOR_shared_info =
1138 (struct shared_info *)__va(xen_start_info->shared_info);
1139
1140#ifndef CONFIG_SMP
1141
1142 xen_setup_vcpu_info_placement();
1143#endif
1144
1145 xen_setup_mfn_list_list();
1146}
1147
1148
1149void xen_setup_vcpu_info_placement(void)
1150{
1151 int cpu;
1152
1153 for_each_possible_cpu(cpu) {
1154
1155 per_cpu(xen_vcpu_id, cpu) = cpu;
1156 xen_vcpu_setup(cpu);
1157 }
1158
1159
1160
1161 if (have_vcpu_info_placement) {
1162 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1163 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1164 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1165 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1166 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1167 }
1168}
1169
1170static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1171 unsigned long addr, unsigned len)
1172{
1173 char *start, *end, *reloc;
1174 unsigned ret;
1175
1176 start = end = reloc = NULL;
1177
1178#define SITE(op, x) \
1179 case PARAVIRT_PATCH(op.x): \
1180 if (have_vcpu_info_placement) { \
1181 start = (char *)xen_##x##_direct; \
1182 end = xen_##x##_direct_end; \
1183 reloc = xen_##x##_direct_reloc; \
1184 } \
1185 goto patch_site
1186
1187 switch (type) {
1188 SITE(pv_irq_ops, irq_enable);
1189 SITE(pv_irq_ops, irq_disable);
1190 SITE(pv_irq_ops, save_fl);
1191 SITE(pv_irq_ops, restore_fl);
1192#undef SITE
1193
1194 patch_site:
1195 if (start == NULL || (end-start) > len)
1196 goto default_patch;
1197
1198 ret = paravirt_patch_insns(insnbuf, len, start, end);
1199
1200
1201
1202
1203
1204 if (reloc > start && reloc < end) {
1205 int reloc_off = reloc - start;
1206 long *relocp = (long *)(insnbuf + reloc_off);
1207 long delta = start - (char *)addr;
1208
1209 *relocp += delta;
1210 }
1211 break;
1212
1213 default_patch:
1214 default:
1215 ret = paravirt_patch_default(type, clobbers, insnbuf,
1216 addr, len);
1217 break;
1218 }
1219
1220 return ret;
1221}
1222
1223static const struct pv_info xen_info __initconst = {
1224 .paravirt_enabled = 1,
1225 .shared_kernel_pmd = 0,
1226
1227#ifdef CONFIG_X86_64
1228 .extra_user_64bit_cs = FLAT_USER_CS64,
1229#endif
1230
1231 .name = "Xen",
1232};
1233
1234static const struct pv_init_ops xen_init_ops __initconst = {
1235 .patch = xen_patch,
1236};
1237
1238static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1239 .cpuid = xen_cpuid,
1240
1241 .set_debugreg = xen_set_debugreg,
1242 .get_debugreg = xen_get_debugreg,
1243
1244 .clts = xen_clts,
1245
1246 .read_cr0 = xen_read_cr0,
1247 .write_cr0 = xen_write_cr0,
1248
1249 .read_cr4 = native_read_cr4,
1250 .read_cr4_safe = native_read_cr4_safe,
1251 .write_cr4 = xen_write_cr4,
1252
1253#ifdef CONFIG_X86_64
1254 .read_cr8 = xen_read_cr8,
1255 .write_cr8 = xen_write_cr8,
1256#endif
1257
1258 .wbinvd = native_wbinvd,
1259
1260 .read_msr = native_read_msr_safe,
1261 .write_msr = xen_write_msr_safe,
1262
1263 .read_pmc = native_read_pmc,
1264
1265 .iret = xen_iret,
1266 .irq_enable_sysexit = xen_sysexit,
1267#ifdef CONFIG_X86_64
1268 .usergs_sysret32 = xen_sysret32,
1269 .usergs_sysret64 = xen_sysret64,
1270#endif
1271
1272 .load_tr_desc = paravirt_nop,
1273 .set_ldt = xen_set_ldt,
1274 .load_gdt = xen_load_gdt,
1275 .load_idt = xen_load_idt,
1276 .load_tls = xen_load_tls,
1277#ifdef CONFIG_X86_64
1278 .load_gs_index = xen_load_gs_index,
1279#endif
1280
1281 .alloc_ldt = xen_alloc_ldt,
1282 .free_ldt = xen_free_ldt,
1283
1284 .store_idt = native_store_idt,
1285 .store_tr = xen_store_tr,
1286
1287 .write_ldt_entry = xen_write_ldt_entry,
1288 .write_gdt_entry = xen_write_gdt_entry,
1289 .write_idt_entry = xen_write_idt_entry,
1290 .load_sp0 = xen_load_sp0,
1291
1292 .set_iopl_mask = xen_set_iopl_mask,
1293 .io_delay = xen_io_delay,
1294
1295
1296 .swapgs = paravirt_nop,
1297
1298 .start_context_switch = paravirt_start_context_switch,
1299 .end_context_switch = xen_end_context_switch,
1300};
1301
1302static const struct pv_apic_ops xen_apic_ops __initconst = {
1303#ifdef CONFIG_X86_LOCAL_APIC
1304 .startup_ipi_hook = paravirt_nop,
1305#endif
1306};
1307
1308static void xen_reboot(int reason)
1309{
1310 struct sched_shutdown r = { .reason = reason };
1311
1312 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1313 BUG();
1314}
1315
1316static void xen_restart(char *msg)
1317{
1318 xen_reboot(SHUTDOWN_reboot);
1319}
1320
1321static void xen_emergency_restart(void)
1322{
1323 xen_reboot(SHUTDOWN_reboot);
1324}
1325
1326static void xen_machine_halt(void)
1327{
1328 xen_reboot(SHUTDOWN_poweroff);
1329}
1330
1331static void xen_machine_power_off(void)
1332{
1333 if (pm_power_off)
1334 pm_power_off();
1335 xen_reboot(SHUTDOWN_poweroff);
1336}
1337
1338static void xen_crash_shutdown(struct pt_regs *regs)
1339{
1340 xen_reboot(SHUTDOWN_crash);
1341}
1342
1343static int
1344xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1345{
1346 xen_reboot(SHUTDOWN_crash);
1347 return NOTIFY_DONE;
1348}
1349
1350static struct notifier_block xen_panic_block = {
1351 .notifier_call= xen_panic_event,
1352};
1353
1354int xen_panic_handler_init(void)
1355{
1356 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1357 return 0;
1358}
1359
1360static const struct machine_ops xen_machine_ops __initconst = {
1361 .restart = xen_restart,
1362 .halt = xen_machine_halt,
1363 .power_off = xen_machine_power_off,
1364 .shutdown = xen_machine_halt,
1365 .crash_shutdown = xen_crash_shutdown,
1366 .emergency_restart = xen_emergency_restart,
1367};
1368
1369static void __init xen_boot_params_init_edd(void)
1370{
1371#if IS_ENABLED(CONFIG_EDD)
1372 struct xen_platform_op op;
1373 struct edd_info *edd_info;
1374 u32 *mbr_signature;
1375 unsigned nr;
1376 int ret;
1377
1378 edd_info = boot_params.eddbuf;
1379 mbr_signature = boot_params.edd_mbr_sig_buffer;
1380
1381 op.cmd = XENPF_firmware_info;
1382
1383 op.u.firmware_info.type = XEN_FW_DISK_INFO;
1384 for (nr = 0; nr < EDDMAXNR; nr++) {
1385 struct edd_info *info = edd_info + nr;
1386
1387 op.u.firmware_info.index = nr;
1388 info->params.length = sizeof(info->params);
1389 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1390 &info->params);
1391 ret = HYPERVISOR_dom0_op(&op);
1392 if (ret)
1393 break;
1394
1395#define C(x) info->x = op.u.firmware_info.u.disk_info.x
1396 C(device);
1397 C(version);
1398 C(interface_support);
1399 C(legacy_max_cylinder);
1400 C(legacy_max_head);
1401 C(legacy_sectors_per_track);
1402#undef C
1403 }
1404 boot_params.eddbuf_entries = nr;
1405
1406 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1407 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1408 op.u.firmware_info.index = nr;
1409 ret = HYPERVISOR_dom0_op(&op);
1410 if (ret)
1411 break;
1412 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1413 }
1414 boot_params.edd_mbr_sig_buf_entries = nr;
1415#endif
1416}
1417
1418
1419
1420
1421
1422
1423static void __init xen_setup_stackprotector(void)
1424{
1425 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1426 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1427
1428 setup_stack_canary_segment(0);
1429 switch_to_new_gdt(0);
1430
1431 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1432 pv_cpu_ops.load_gdt = xen_load_gdt;
1433}
1434
1435
1436asmlinkage void __init xen_start_kernel(void)
1437{
1438 struct physdev_set_iopl set_iopl;
1439 int rc;
1440
1441 if (!xen_start_info)
1442 return;
1443
1444 xen_domain_type = XEN_PV_DOMAIN;
1445
1446 xen_setup_machphys_mapping();
1447
1448
1449 pv_info = xen_info;
1450 pv_init_ops = xen_init_ops;
1451 pv_cpu_ops = xen_cpu_ops;
1452 pv_apic_ops = xen_apic_ops;
1453
1454 x86_init.resources.memory_setup = xen_memory_setup;
1455 x86_init.oem.arch_setup = xen_arch_setup;
1456 x86_init.oem.banner = xen_banner;
1457
1458 xen_init_time_ops();
1459
1460
1461
1462
1463
1464 xen_init_mmu_ops();
1465
1466
1467 __supported_pte_mask &= ~_PAGE_GLOBAL;
1468#if 0
1469 if (!xen_initial_domain())
1470#endif
1471 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1472
1473 __supported_pte_mask |= _PAGE_IOMAP;
1474
1475
1476
1477
1478
1479 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1480
1481
1482 x86_configure_nx();
1483
1484 xen_setup_features();
1485
1486
1487 if (!xen_feature(XENFEAT_auto_translated_physmap))
1488 xen_build_dynamic_phys_to_machine();
1489
1490
1491
1492
1493
1494 xen_setup_stackprotector();
1495
1496 xen_init_irq_ops();
1497 xen_init_cpuid_mask();
1498
1499#ifdef CONFIG_X86_LOCAL_APIC
1500
1501
1502
1503 set_xen_basic_apic_ops();
1504#endif
1505
1506 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1507 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1508 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1509 }
1510
1511 machine_ops = xen_machine_ops;
1512
1513
1514
1515
1516
1517
1518 xen_initial_gdt = &per_cpu(gdt_page, 0);
1519
1520 xen_smp_init();
1521
1522#ifdef CONFIG_ACPI_NUMA
1523
1524
1525
1526
1527
1528 acpi_numa = -1;
1529#endif
1530#ifdef CONFIG_X86_PAT
1531
1532
1533
1534
1535
1536 pat_enabled = 0;
1537#endif
1538
1539
1540 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1541
1542 local_irq_disable();
1543 early_boot_irqs_disabled = true;
1544
1545 xen_raw_console_write("mapping kernel into physical memory\n");
1546 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
1547
1548
1549 xen_build_mfn_list_list();
1550
1551
1552
1553#ifdef CONFIG_X86_32
1554 pv_info.kernel_rpl = 1;
1555 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1556 pv_info.kernel_rpl = 0;
1557#else
1558 pv_info.kernel_rpl = 0;
1559#endif
1560
1561 xen_reserve_top();
1562
1563
1564
1565
1566
1567 set_iopl.iopl = 1;
1568 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1569 if (rc != 0)
1570 xen_raw_printk("physdev_op failed %d\n", rc);
1571
1572#ifdef CONFIG_X86_32
1573
1574 cpu_detect(&new_cpu_data);
1575 new_cpu_data.hard_math = 1;
1576 new_cpu_data.wp_works_ok = 1;
1577 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1578#endif
1579
1580
1581 boot_params.hdr.type_of_loader = (9 << 4) | 0;
1582 boot_params.hdr.ramdisk_image = xen_start_info->mod_start
1583 ? __pa(xen_start_info->mod_start) : 0;
1584 boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1585 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1586
1587 if (!xen_initial_domain()) {
1588 add_preferred_console("xenboot", 0, NULL);
1589 add_preferred_console("tty", 0, NULL);
1590 add_preferred_console("hvc", 0, NULL);
1591 if (pci_xen)
1592 x86_init.pci.arch_init = pci_xen_init;
1593 } else {
1594 const struct dom0_vga_console_info *info =
1595 (void *)((char *)xen_start_info +
1596 xen_start_info->console.dom0.info_off);
1597 struct xen_platform_op op = {
1598 .cmd = XENPF_firmware_info,
1599 .interface_version = XENPF_INTERFACE_VERSION,
1600 .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1601 };
1602
1603 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1604 xen_start_info->console.domU.mfn = 0;
1605 xen_start_info->console.domU.evtchn = 0;
1606
1607 if (HYPERVISOR_dom0_op(&op) == 0)
1608 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1609
1610 xen_init_apic();
1611
1612
1613 pci_request_acs();
1614
1615 xen_acpi_sleep_register();
1616
1617
1618 x86_init.mpparse.find_smp_config = x86_init_noop;
1619 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1620
1621 xen_boot_params_init_edd();
1622 }
1623#ifdef CONFIG_PCI
1624
1625 pci_probe &= ~PCI_PROBE_BIOS;
1626#endif
1627 xen_raw_console_write("about to get started...\n");
1628
1629
1630 per_cpu(xen_vcpu_id, 0) = 0;
1631
1632 xen_setup_runstate_info(0);
1633
1634
1635#ifdef CONFIG_X86_32
1636 i386_start_kernel();
1637#else
1638 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1639#endif
1640}
1641
1642void __ref xen_hvm_init_shared_info(void)
1643{
1644 int cpu;
1645 struct xen_add_to_physmap xatp;
1646 static struct shared_info *shared_info_page = 0;
1647
1648 if (!shared_info_page)
1649 shared_info_page = (struct shared_info *)
1650 extend_brk(PAGE_SIZE, PAGE_SIZE);
1651 xatp.domid = DOMID_SELF;
1652 xatp.idx = 0;
1653 xatp.space = XENMAPSPACE_shared_info;
1654 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1655 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1656 BUG();
1657
1658 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 for_each_online_cpu(cpu) {
1669
1670 if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
1671 continue;
1672 per_cpu(xen_vcpu, cpu) =
1673 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
1674 }
1675}
1676
1677#ifdef CONFIG_XEN_PVHVM
1678static void __init init_hvm_pv_info(void)
1679{
1680 int major, minor;
1681 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1682 u64 pfn;
1683
1684 base = xen_cpuid_base();
1685 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1686
1687 major = eax >> 16;
1688 minor = eax & 0xffff;
1689 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1690
1691 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1692
1693 pfn = __pa(hypercall_page);
1694 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1695
1696 xen_setup_features();
1697
1698 cpuid(base + 4, &eax, &ebx, &ecx, &edx);
1699 if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
1700 this_cpu_write(xen_vcpu_id, ebx);
1701 else
1702 this_cpu_write(xen_vcpu_id, smp_processor_id());
1703
1704 pv_info.name = "Xen HVM";
1705
1706 xen_domain_type = XEN_HVM_DOMAIN;
1707}
1708
1709static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1710 void *hcpu)
1711{
1712 int cpu = (long)hcpu;
1713 switch (action) {
1714 case CPU_UP_PREPARE:
1715 if (cpu_acpi_id(cpu) != U32_MAX)
1716 per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
1717 else
1718 per_cpu(xen_vcpu_id, cpu) = cpu;
1719 xen_vcpu_setup(cpu);
1720 if (xen_have_vector_callback) {
1721 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1722 xen_setup_timer(cpu);
1723 }
1724 break;
1725 default:
1726 break;
1727 }
1728 return NOTIFY_OK;
1729}
1730
1731static struct notifier_block xen_hvm_cpu_notifier = {
1732 .notifier_call = xen_hvm_cpu_notify,
1733};
1734
1735#ifdef CONFIG_KEXEC_CORE
1736static void xen_hvm_shutdown(void)
1737{
1738 native_machine_shutdown();
1739 if (kexec_in_progress)
1740 xen_reboot(SHUTDOWN_soft_reset);
1741}
1742
1743static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1744{
1745 native_machine_crash_shutdown(regs);
1746 xen_reboot(SHUTDOWN_soft_reset);
1747}
1748#endif
1749
1750static void __init xen_hvm_guest_init(void)
1751{
1752 init_hvm_pv_info();
1753
1754 xen_hvm_init_shared_info();
1755
1756 xen_panic_handler_init();
1757
1758 if (xen_feature(XENFEAT_hvm_callback_vector))
1759 xen_have_vector_callback = 1;
1760 xen_hvm_smp_init();
1761 register_cpu_notifier(&xen_hvm_cpu_notifier);
1762 xen_unplug_emulated_devices();
1763 x86_init.irqs.intr_init = xen_init_IRQ;
1764 xen_hvm_init_time_ops();
1765 xen_hvm_init_mmu_ops();
1766#ifdef CONFIG_KEXEC_CORE
1767 if (xen_running_on_version_or_later(3, 2)) {
1768 machine_ops.shutdown = xen_hvm_shutdown;
1769 machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1770 }
1771#endif
1772}
1773
1774static uint32_t __init xen_hvm_platform(void)
1775{
1776 uint32_t eax, ebx, ecx, edx, base;
1777 int major, minor;
1778
1779 if (xen_pv_domain())
1780 return 0;
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 base = xen_cpuid_base();
1791
1792 if (base && is_kdump_kernel()) {
1793 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1794 major = eax >> 16;
1795 minor = eax & 0xffff;
1796 if (major == 3 && minor < 2)
1797 return 0;
1798 }
1799
1800 return base;
1801}
1802
1803bool xen_hvm_need_lapic(void)
1804{
1805 if (xen_pv_domain())
1806 return false;
1807 if (!xen_hvm_domain())
1808 return false;
1809 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1810 return false;
1811 return true;
1812}
1813EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1814
1815const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1816 .name = "Xen HVM",
1817 .detect = xen_hvm_platform,
1818 .init_platform = xen_hvm_guest_init,
1819 .x2apic_available = xen_x2apic_para_available,
1820};
1821EXPORT_SYMBOL(x86_hyper_xen_hvm);
1822#endif
1823