1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/smp.h>
18#include <linux/preempt.h>
19#include <linux/hardirq.h>
20#include <linux/percpu.h>
21#include <linux/delay.h>
22#include <linux/start_kernel.h>
23#include <linux/sched.h>
24#include <linux/kprobes.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <linux/mm.h>
28#include <linux/page-flags.h>
29#include <linux/highmem.h>
30#include <linux/console.h>
31#include <linux/pci.h>
32#include <linux/gfp.h>
33#include <linux/memblock.h>
34#include <linux/edd.h>
35
36#include <xen/xen.h>
37#include <xen/events.h>
38#include <xen/interface/xen.h>
39#include <xen/interface/version.h>
40#include <xen/interface/physdev.h>
41#include <xen/interface/vcpu.h>
42#include <xen/interface/memory.h>
43#include <xen/interface/nmi.h>
44#include <xen/interface/xen-mca.h>
45#include <xen/features.h>
46#include <xen/page.h>
47#include <xen/hvm.h>
48#include <xen/hvc-console.h>
49#include <xen/acpi.h>
50
51#include <asm/paravirt.h>
52#include <asm/apic.h>
53#include <asm/page.h>
54#include <asm/xen/pci.h>
55#include <asm/xen/hypercall.h>
56#include <asm/xen/hypervisor.h>
57#include <asm/fixmap.h>
58#include <asm/processor.h>
59#include <asm/proto.h>
60#include <asm/msr-index.h>
61#include <asm/traps.h>
62#include <asm/setup.h>
63#include <asm/desc.h>
64#include <asm/pgalloc.h>
65#include <asm/pgtable.h>
66#include <asm/tlbflush.h>
67#include <asm/reboot.h>
68#include <asm/stackprotector.h>
69#include <asm/hypervisor.h>
70#include <asm/mach_traps.h>
71#include <asm/mwait.h>
72#include <asm/pci_x86.h>
73#include <asm/pat.h>
74
75#ifdef CONFIG_ACPI
76#include <linux/acpi.h>
77#include <asm/acpi.h>
78#include <acpi/pdc_intel.h>
79#include <acpi/processor.h>
80#include <xen/interface/platform.h>
81#endif
82
83#include "xen-ops.h"
84#include "mmu.h"
85#include "smp.h"
86#include "multicalls.h"
87
88EXPORT_SYMBOL_GPL(hypercall_page);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
107
108
109
110
111
112
113DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
114
115enum xen_domain_type xen_domain_type = XEN_NATIVE;
116EXPORT_SYMBOL_GPL(xen_domain_type);
117
118unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
119EXPORT_SYMBOL(machine_to_phys_mapping);
120unsigned long machine_to_phys_nr;
121EXPORT_SYMBOL(machine_to_phys_nr);
122
123struct start_info *xen_start_info;
124EXPORT_SYMBOL_GPL(xen_start_info);
125
126struct shared_info xen_dummy_shared_info;
127
128void *xen_initial_gdt;
129
130RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
131__read_mostly int xen_have_vector_callback;
132EXPORT_SYMBOL_GPL(xen_have_vector_callback);
133
134
135
136
137
138struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153static int have_vcpu_info_placement = 1;
154
155struct tls_descs {
156 struct desc_struct desc[3];
157};
158
159
160
161
162
163
164
165
166static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
167
168static void clamp_max_cpus(void)
169{
170#ifdef CONFIG_SMP
171 if (setup_max_cpus > MAX_VIRT_CPUS)
172 setup_max_cpus = MAX_VIRT_CPUS;
173#endif
174}
175
176static void xen_vcpu_setup(int cpu)
177{
178 struct vcpu_register_vcpu_info info;
179 int err;
180 struct vcpu_info *vcpup;
181
182 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
183
184
185
186
187
188
189
190
191
192
193
194
195 if (xen_hvm_domain()) {
196 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
197 return;
198 }
199 if (cpu < MAX_VIRT_CPUS)
200 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
201
202 if (!have_vcpu_info_placement) {
203 if (cpu >= MAX_VIRT_CPUS)
204 clamp_max_cpus();
205 return;
206 }
207
208 vcpup = &per_cpu(xen_vcpu_info, cpu);
209 info.mfn = arbitrary_virt_to_mfn(vcpup);
210 info.offset = offset_in_page(vcpup);
211
212
213
214
215
216
217
218
219
220 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
221
222 if (err) {
223 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
224 have_vcpu_info_placement = 0;
225 clamp_max_cpus();
226 } else {
227
228
229 per_cpu(xen_vcpu, cpu) = vcpup;
230 }
231}
232
233
234
235
236
237
238void xen_vcpu_restore(void)
239{
240 int cpu;
241
242 for_each_possible_cpu(cpu) {
243 bool other_cpu = (cpu != smp_processor_id());
244 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
245
246 if (other_cpu && is_up &&
247 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
248 BUG();
249
250 xen_setup_runstate_info(cpu);
251
252 if (have_vcpu_info_placement)
253 xen_vcpu_setup(cpu);
254
255 if (other_cpu && is_up &&
256 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
257 BUG();
258 }
259}
260
261static void __init xen_banner(void)
262{
263 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
264 struct xen_extraversion extra;
265 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
266
267 pr_info("Booting paravirtualized kernel %son %s\n",
268 xen_feature(XENFEAT_auto_translated_physmap) ?
269 "with PVH extensions " : "", pv_info.name);
270 printk(KERN_INFO "Xen version: %d.%d%s%s\n",
271 version >> 16, version & 0xffff, extra.extraversion,
272 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
273}
274
275bool
276xen_running_on_version_or_later(unsigned int major, unsigned int minor)
277{
278 unsigned int version;
279
280 if (!xen_domain())
281 return false;
282
283 version = HYPERVISOR_xen_version(XENVER_version, NULL);
284 if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
285 ((version >> 16) > major))
286 return true;
287 return false;
288}
289
290#define CPUID_THERM_POWER_LEAF 6
291#define APERFMPERF_PRESENT 0
292
293static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
294static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
295
296static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
297static __read_mostly unsigned int cpuid_leaf5_ecx_val;
298static __read_mostly unsigned int cpuid_leaf5_edx_val;
299
300static void xen_cpuid(unsigned int *ax, unsigned int *bx,
301 unsigned int *cx, unsigned int *dx)
302{
303 unsigned maskebx = ~0;
304 unsigned maskecx = ~0;
305 unsigned maskedx = ~0;
306 unsigned setecx = 0;
307
308
309
310
311 switch (*ax) {
312 case 1:
313 maskecx = cpuid_leaf1_ecx_mask;
314 setecx = cpuid_leaf1_ecx_set_mask;
315 maskedx = cpuid_leaf1_edx_mask;
316 break;
317
318 case CPUID_MWAIT_LEAF:
319
320 *ax = 0;
321 *bx = 0;
322 *cx = cpuid_leaf5_ecx_val;
323 *dx = cpuid_leaf5_edx_val;
324 return;
325
326 case CPUID_THERM_POWER_LEAF:
327
328 maskecx = ~(1 << APERFMPERF_PRESENT);
329 break;
330
331 case 0xb:
332
333 maskebx = 0;
334 break;
335 }
336
337 asm(XEN_EMULATE_PREFIX "cpuid"
338 : "=a" (*ax),
339 "=b" (*bx),
340 "=c" (*cx),
341 "=d" (*dx)
342 : "0" (*ax), "2" (*cx));
343
344 *bx &= maskebx;
345 *cx &= maskecx;
346 *cx |= setecx;
347 *dx &= maskedx;
348
349}
350
351static bool __init xen_check_mwait(void)
352{
353#ifdef CONFIG_ACPI
354 struct xen_platform_op op = {
355 .cmd = XENPF_set_processor_pminfo,
356 .u.set_pminfo.id = -1,
357 .u.set_pminfo.type = XEN_PM_PDC,
358 };
359 uint32_t buf[3];
360 unsigned int ax, bx, cx, dx;
361 unsigned int mwait_mask;
362
363
364
365
366
367
368
369
370
371 if (!xen_initial_domain())
372 return false;
373
374
375
376
377
378 if (!xen_running_on_version_or_later(4, 2))
379 return false;
380
381 ax = 1;
382 cx = 0;
383
384 native_cpuid(&ax, &bx, &cx, &dx);
385
386 mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
387 (1 << (X86_FEATURE_MWAIT % 32));
388
389 if ((cx & mwait_mask) != mwait_mask)
390 return false;
391
392
393
394
395
396 ax = CPUID_MWAIT_LEAF;
397 bx = 0;
398 cx = 0;
399 dx = 0;
400
401 native_cpuid(&ax, &bx, &cx, &dx);
402
403
404
405
406 buf[0] = ACPI_PDC_REVISION_ID;
407 buf[1] = 1;
408 buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
409
410 set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
411
412 if ((HYPERVISOR_dom0_op(&op) == 0) &&
413 (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
414 cpuid_leaf5_ecx_val = cx;
415 cpuid_leaf5_edx_val = dx;
416 }
417 return true;
418#else
419 return false;
420#endif
421}
422static void __init xen_init_cpuid_mask(void)
423{
424 unsigned int ax, bx, cx, dx;
425 unsigned int xsave_mask;
426
427 cpuid_leaf1_edx_mask =
428 ~((1 << X86_FEATURE_MTRR) |
429 (1 << X86_FEATURE_ACC));
430
431 if (!xen_initial_domain())
432 cpuid_leaf1_edx_mask &=
433 ~((1 << X86_FEATURE_ACPI));
434
435 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
436
437 ax = 1;
438 cx = 0;
439 cpuid(1, &ax, &bx, &cx, &dx);
440
441 xsave_mask =
442 (1 << (X86_FEATURE_XSAVE % 32)) |
443 (1 << (X86_FEATURE_OSXSAVE % 32));
444
445
446 if ((cx & xsave_mask) != xsave_mask)
447 cpuid_leaf1_ecx_mask &= ~xsave_mask;
448 if (xen_check_mwait())
449 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
450}
451
452static void xen_set_debugreg(int reg, unsigned long val)
453{
454 HYPERVISOR_set_debugreg(reg, val);
455}
456
457static unsigned long xen_get_debugreg(int reg)
458{
459 return HYPERVISOR_get_debugreg(reg);
460}
461
462static void xen_end_context_switch(struct task_struct *next)
463{
464 xen_mc_flush();
465 paravirt_end_context_switch(next);
466}
467
468static unsigned long xen_store_tr(void)
469{
470 return 0;
471}
472
473
474
475
476
477
478
479static void set_aliased_prot(void *v, pgprot_t prot)
480{
481 int level;
482 pte_t *ptep;
483 pte_t pte;
484 unsigned long pfn;
485 struct page *page;
486
487 ptep = lookup_address((unsigned long)v, &level);
488 BUG_ON(ptep == NULL);
489
490 pfn = pte_pfn(*ptep);
491 page = pfn_to_page(pfn);
492
493 pte = pfn_pte(pfn, prot);
494
495 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 BUG();
497
498 if (!PageHighMem(page)) {
499 void *av = __va(PFN_PHYS(pfn));
500
501 if (av != v)
502 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
503 BUG();
504 } else
505 kmap_flush_unused();
506}
507
508static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
509{
510 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 int i;
512
513 for(i = 0; i < entries; i += entries_per_page)
514 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515}
516
517static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
518{
519 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
520 int i;
521
522 for(i = 0; i < entries; i += entries_per_page)
523 set_aliased_prot(ldt + i, PAGE_KERNEL);
524}
525
526static void xen_set_ldt(const void *addr, unsigned entries)
527{
528 struct mmuext_op *op;
529 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
530
531 trace_xen_cpu_set_ldt(addr, entries);
532
533 op = mcs.args;
534 op->cmd = MMUEXT_SET_LDT;
535 op->arg1.linear_addr = (unsigned long)addr;
536 op->arg2.nr_ents = entries;
537
538 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
539
540 xen_mc_issue(PARAVIRT_LAZY_CPU);
541}
542
543static void xen_load_gdt(const struct desc_ptr *dtr)
544{
545 unsigned long va = dtr->address;
546 unsigned int size = dtr->size + 1;
547 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
548 unsigned long frames[pages];
549 int f;
550
551
552
553
554
555
556 BUG_ON(size > 65536);
557 BUG_ON(va & ~PAGE_MASK);
558
559 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
560 int level;
561 pte_t *ptep;
562 unsigned long pfn, mfn;
563 void *virt;
564
565
566
567
568
569
570
571
572 ptep = lookup_address(va, &level);
573 BUG_ON(ptep == NULL);
574
575 pfn = pte_pfn(*ptep);
576 mfn = pfn_to_mfn(pfn);
577 virt = __va(PFN_PHYS(pfn));
578
579 frames[f] = mfn;
580
581 make_lowmem_page_readonly((void *)va);
582 make_lowmem_page_readonly(virt);
583 }
584
585 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
586 BUG();
587}
588
589
590
591
592static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
593{
594 unsigned long va = dtr->address;
595 unsigned int size = dtr->size + 1;
596 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
597 unsigned long frames[pages];
598 int f;
599
600
601
602
603
604
605 BUG_ON(size > 65536);
606 BUG_ON(va & ~PAGE_MASK);
607
608 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
609 pte_t pte;
610 unsigned long pfn, mfn;
611
612 pfn = virt_to_pfn(va);
613 mfn = pfn_to_mfn(pfn);
614
615 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
616
617 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
618 BUG();
619
620 frames[f] = mfn;
621 }
622
623 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
624 BUG();
625}
626
627static inline bool desc_equal(const struct desc_struct *d1,
628 const struct desc_struct *d2)
629{
630 return d1->a == d2->a && d1->b == d2->b;
631}
632
633static void load_TLS_descriptor(struct thread_struct *t,
634 unsigned int cpu, unsigned int i)
635{
636 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
637 struct desc_struct *gdt;
638 xmaddr_t maddr;
639 struct multicall_space mc;
640
641 if (desc_equal(shadow, &t->tls_array[i]))
642 return;
643
644 *shadow = t->tls_array[i];
645
646 gdt = get_cpu_gdt_table(cpu);
647 maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
648 mc = __xen_mc_entry(0);
649
650 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
651}
652
653static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
654{
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
674#ifdef CONFIG_X86_32
675 lazy_load_gs(0);
676#else
677 loadsegment(fs, 0);
678#endif
679 }
680
681 xen_mc_batch();
682
683 load_TLS_descriptor(t, cpu, 0);
684 load_TLS_descriptor(t, cpu, 1);
685 load_TLS_descriptor(t, cpu, 2);
686
687 xen_mc_issue(PARAVIRT_LAZY_CPU);
688}
689
690#ifdef CONFIG_X86_64
691static void xen_load_gs_index(unsigned int idx)
692{
693 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
694 BUG();
695}
696#endif
697
698static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
699 const void *ptr)
700{
701 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
702 u64 entry = *(u64 *)ptr;
703
704 trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
705
706 preempt_disable();
707
708 xen_mc_flush();
709 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
710 BUG();
711
712 preempt_enable();
713}
714
715static int cvt_gate_to_trap(int vector, const gate_desc *val,
716 struct trap_info *info)
717{
718 unsigned long addr;
719
720 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
721 return 0;
722
723 info->vector = vector;
724
725 addr = gate_offset(*val);
726#ifdef CONFIG_X86_64
727
728
729
730
731
732
733
734 if (addr == (unsigned long)debug)
735 addr = (unsigned long)xen_debug;
736 else if (addr == (unsigned long)int3)
737 addr = (unsigned long)xen_int3;
738 else if (addr == (unsigned long)stack_segment)
739 addr = (unsigned long)xen_stack_segment;
740 else if (addr == (unsigned long)double_fault) {
741
742 return 0;
743#ifdef CONFIG_X86_MCE
744 } else if (addr == (unsigned long)machine_check) {
745
746
747
748
749 ;
750#endif
751 } else if (addr == (unsigned long)nmi)
752
753
754
755 ;
756 else {
757
758 if (WARN_ON(val->ist != 0))
759 return 0;
760 }
761#endif
762 info->address = addr;
763
764 info->cs = gate_segment(*val);
765 info->flags = val->dpl;
766
767 if (val->type == GATE_INTERRUPT)
768 info->flags |= 1 << 2;
769
770 return 1;
771}
772
773
774static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
775
776
777
778static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
779{
780 unsigned long p = (unsigned long)&dt[entrynum];
781 unsigned long start, end;
782
783 trace_xen_cpu_write_idt_entry(dt, entrynum, g);
784
785 preempt_disable();
786
787 start = __this_cpu_read(idt_desc.address);
788 end = start + __this_cpu_read(idt_desc.size) + 1;
789
790 xen_mc_flush();
791
792 native_write_idt_entry(dt, entrynum, g);
793
794 if (p >= start && (p + 8) <= end) {
795 struct trap_info info[2];
796
797 info[1].address = 0;
798
799 if (cvt_gate_to_trap(entrynum, g, &info[0]))
800 if (HYPERVISOR_set_trap_table(info))
801 BUG();
802 }
803
804 preempt_enable();
805}
806
807static void xen_convert_trap_info(const struct desc_ptr *desc,
808 struct trap_info *traps)
809{
810 unsigned in, out, count;
811
812 count = (desc->size+1) / sizeof(gate_desc);
813 BUG_ON(count > 256);
814
815 for (in = out = 0; in < count; in++) {
816 gate_desc *entry = (gate_desc*)(desc->address) + in;
817
818 if (cvt_gate_to_trap(in, entry, &traps[out]))
819 out++;
820 }
821 traps[out].address = 0;
822}
823
824void xen_copy_trap_info(struct trap_info *traps)
825{
826 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
827
828 xen_convert_trap_info(desc, traps);
829}
830
831
832
833
834static void xen_load_idt(const struct desc_ptr *desc)
835{
836 static DEFINE_SPINLOCK(lock);
837 static struct trap_info traps[257];
838
839 trace_xen_cpu_load_idt(desc);
840
841 spin_lock(&lock);
842
843 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
844
845 xen_convert_trap_info(desc, traps);
846
847 xen_mc_flush();
848 if (HYPERVISOR_set_trap_table(traps))
849 BUG();
850
851 spin_unlock(&lock);
852}
853
854
855
856static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
857 const void *desc, int type)
858{
859 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
860
861 preempt_disable();
862
863 switch (type) {
864 case DESC_LDT:
865 case DESC_TSS:
866
867 break;
868
869 default: {
870 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
871
872 xen_mc_flush();
873 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
874 BUG();
875 }
876
877 }
878
879 preempt_enable();
880}
881
882
883
884
885
886static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
887 const void *desc, int type)
888{
889 trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
890
891 switch (type) {
892 case DESC_LDT:
893 case DESC_TSS:
894
895 break;
896
897 default: {
898 xmaddr_t maddr = virt_to_machine(&dt[entry]);
899
900 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
901 dt[entry] = *(struct desc_struct *)desc;
902 }
903
904 }
905}
906
907static void xen_load_sp0(struct tss_struct *tss,
908 struct thread_struct *thread)
909{
910 struct multicall_space mcs;
911
912 mcs = xen_mc_entry(0);
913 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
914 xen_mc_issue(PARAVIRT_LAZY_CPU);
915}
916
917static void xen_set_iopl_mask(unsigned mask)
918{
919 struct physdev_set_iopl set_iopl;
920
921
922 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
923 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
924}
925
926static void xen_io_delay(void)
927{
928}
929
930#ifdef CONFIG_X86_LOCAL_APIC
931static unsigned long xen_set_apic_id(unsigned int x)
932{
933 WARN_ON(1);
934 return x;
935}
936static unsigned int xen_get_apic_id(unsigned long x)
937{
938 return ((x)>>24) & 0xFFu;
939}
940static u32 xen_apic_read(u32 reg)
941{
942 struct xen_platform_op op = {
943 .cmd = XENPF_get_cpuinfo,
944 .interface_version = XENPF_INTERFACE_VERSION,
945 .u.pcpu_info.xen_cpuid = 0,
946 };
947 int ret = 0;
948
949
950
951 if (!xen_initial_domain() || smp_processor_id())
952 return 0;
953
954 if (reg == APIC_LVR)
955 return 0x10;
956
957 if (reg != APIC_ID)
958 return 0;
959
960 ret = HYPERVISOR_dom0_op(&op);
961 if (ret)
962 return 0;
963
964 return op.u.pcpu_info.apic_id << 24;
965}
966
967static void xen_apic_write(u32 reg, u32 val)
968{
969
970 WARN_ON(1);
971}
972
973static u64 xen_apic_icr_read(void)
974{
975 return 0;
976}
977
978static void xen_apic_icr_write(u32 low, u32 id)
979{
980
981 WARN_ON(1);
982}
983
984static void xen_apic_wait_icr_idle(void)
985{
986 return;
987}
988
989static u32 xen_safe_apic_wait_icr_idle(void)
990{
991 return 0;
992}
993
994static void set_xen_basic_apic_ops(void)
995{
996 apic->read = xen_apic_read;
997 apic->write = xen_apic_write;
998 apic->icr_read = xen_apic_icr_read;
999 apic->icr_write = xen_apic_icr_write;
1000 apic->wait_icr_idle = xen_apic_wait_icr_idle;
1001 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
1002 apic->set_apic_id = xen_set_apic_id;
1003 apic->get_apic_id = xen_get_apic_id;
1004
1005#ifdef CONFIG_SMP
1006 apic->send_IPI_allbutself = xen_send_IPI_allbutself;
1007 apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
1008 apic->send_IPI_mask = xen_send_IPI_mask;
1009 apic->send_IPI_all = xen_send_IPI_all;
1010 apic->send_IPI_self = xen_send_IPI_self;
1011#endif
1012}
1013
1014#endif
1015
1016static void xen_clts(void)
1017{
1018 struct multicall_space mcs;
1019
1020 mcs = xen_mc_entry(0);
1021
1022 MULTI_fpu_taskswitch(mcs.mc, 0);
1023
1024 xen_mc_issue(PARAVIRT_LAZY_CPU);
1025}
1026
1027static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
1028
1029static unsigned long xen_read_cr0(void)
1030{
1031 unsigned long cr0 = this_cpu_read(xen_cr0_value);
1032
1033 if (unlikely(cr0 == 0)) {
1034 cr0 = native_read_cr0();
1035 this_cpu_write(xen_cr0_value, cr0);
1036 }
1037
1038 return cr0;
1039}
1040
1041static void xen_write_cr0(unsigned long cr0)
1042{
1043 struct multicall_space mcs;
1044
1045 this_cpu_write(xen_cr0_value, cr0);
1046
1047
1048
1049 mcs = xen_mc_entry(0);
1050
1051 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1052
1053 xen_mc_issue(PARAVIRT_LAZY_CPU);
1054}
1055
1056static void xen_write_cr4(unsigned long cr4)
1057{
1058 cr4 &= ~X86_CR4_PGE;
1059 cr4 &= ~X86_CR4_PSE;
1060
1061 native_write_cr4(cr4);
1062}
1063#ifdef CONFIG_X86_64
1064static inline unsigned long xen_read_cr8(void)
1065{
1066 return 0;
1067}
1068static inline void xen_write_cr8(unsigned long val)
1069{
1070 BUG_ON(val);
1071}
1072#endif
1073
1074static u64 xen_read_msr_safe(unsigned int msr, int *err)
1075{
1076 u64 val;
1077
1078 val = native_read_msr_safe(msr, err);
1079 switch (msr) {
1080 case MSR_IA32_APICBASE:
1081#ifdef CONFIG_X86_X2APIC
1082 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
1083#endif
1084 val &= ~X2APIC_ENABLE;
1085 break;
1086 }
1087 return val;
1088}
1089
1090static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1091{
1092 int ret;
1093
1094 ret = 0;
1095
1096 switch (msr) {
1097#ifdef CONFIG_X86_64
1098 unsigned which;
1099 u64 base;
1100
1101 case MSR_FS_BASE: which = SEGBASE_FS; goto set;
1102 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
1103 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
1104
1105 set:
1106 base = ((u64)high << 32) | low;
1107 if (HYPERVISOR_set_segment_base(which, base) != 0)
1108 ret = -EIO;
1109 break;
1110#endif
1111
1112 case MSR_STAR:
1113 case MSR_CSTAR:
1114 case MSR_LSTAR:
1115 case MSR_SYSCALL_MASK:
1116 case MSR_IA32_SYSENTER_CS:
1117 case MSR_IA32_SYSENTER_ESP:
1118 case MSR_IA32_SYSENTER_EIP:
1119
1120
1121
1122
1123 default:
1124 ret = native_write_msr_safe(msr, low, high);
1125 }
1126
1127 return ret;
1128}
1129
1130void xen_setup_shared_info(void)
1131{
1132 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1133 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1134 xen_start_info->shared_info);
1135
1136 HYPERVISOR_shared_info =
1137 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1138 } else
1139 HYPERVISOR_shared_info =
1140 (struct shared_info *)__va(xen_start_info->shared_info);
1141
1142#ifndef CONFIG_SMP
1143
1144 xen_setup_vcpu_info_placement();
1145#endif
1146
1147 xen_setup_mfn_list_list();
1148}
1149
1150
1151void xen_setup_vcpu_info_placement(void)
1152{
1153 int cpu;
1154
1155 for_each_possible_cpu(cpu)
1156 xen_vcpu_setup(cpu);
1157
1158
1159
1160
1161 if (have_vcpu_info_placement && !xen_pvh_domain()) {
1162 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1163 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1164 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1165 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1166 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1167 }
1168}
1169
1170static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1171 unsigned long addr, unsigned len)
1172{
1173 char *start, *end, *reloc;
1174 unsigned ret;
1175
1176 start = end = reloc = NULL;
1177
1178#define SITE(op, x) \
1179 case PARAVIRT_PATCH(op.x): \
1180 if (have_vcpu_info_placement) { \
1181 start = (char *)xen_##x##_direct; \
1182 end = xen_##x##_direct_end; \
1183 reloc = xen_##x##_direct_reloc; \
1184 } \
1185 goto patch_site
1186
1187 switch (type) {
1188 SITE(pv_irq_ops, irq_enable);
1189 SITE(pv_irq_ops, irq_disable);
1190 SITE(pv_irq_ops, save_fl);
1191 SITE(pv_irq_ops, restore_fl);
1192#undef SITE
1193
1194 patch_site:
1195 if (start == NULL || (end-start) > len)
1196 goto default_patch;
1197
1198 ret = paravirt_patch_insns(insnbuf, len, start, end);
1199
1200
1201
1202
1203
1204 if (reloc > start && reloc < end) {
1205 int reloc_off = reloc - start;
1206 long *relocp = (long *)(insnbuf + reloc_off);
1207 long delta = start - (char *)addr;
1208
1209 *relocp += delta;
1210 }
1211 break;
1212
1213 default_patch:
1214 default:
1215 ret = paravirt_patch_default(type, clobbers, insnbuf,
1216 addr, len);
1217 break;
1218 }
1219
1220 return ret;
1221}
1222
1223static const struct pv_info xen_info __initconst = {
1224 .paravirt_enabled = 1,
1225 .shared_kernel_pmd = 0,
1226
1227#ifdef CONFIG_X86_64
1228 .extra_user_64bit_cs = FLAT_USER_CS64,
1229#endif
1230
1231 .name = "Xen",
1232};
1233
1234static const struct pv_init_ops xen_init_ops __initconst = {
1235 .patch = xen_patch,
1236};
1237
1238static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1239 .cpuid = xen_cpuid,
1240
1241 .set_debugreg = xen_set_debugreg,
1242 .get_debugreg = xen_get_debugreg,
1243
1244 .clts = xen_clts,
1245
1246 .read_cr0 = xen_read_cr0,
1247 .write_cr0 = xen_write_cr0,
1248
1249 .read_cr4 = native_read_cr4,
1250 .read_cr4_safe = native_read_cr4_safe,
1251 .write_cr4 = xen_write_cr4,
1252
1253#ifdef CONFIG_X86_64
1254 .read_cr8 = xen_read_cr8,
1255 .write_cr8 = xen_write_cr8,
1256#endif
1257
1258 .wbinvd = native_wbinvd,
1259
1260 .read_msr = xen_read_msr_safe,
1261 .write_msr = xen_write_msr_safe,
1262
1263 .read_tsc = native_read_tsc,
1264 .read_pmc = native_read_pmc,
1265
1266 .read_tscp = native_read_tscp,
1267
1268 .iret = xen_iret,
1269 .irq_enable_sysexit = xen_sysexit,
1270#ifdef CONFIG_X86_64
1271 .usergs_sysret32 = xen_sysret32,
1272 .usergs_sysret64 = xen_sysret64,
1273#endif
1274
1275 .load_tr_desc = paravirt_nop,
1276 .set_ldt = xen_set_ldt,
1277 .load_gdt = xen_load_gdt,
1278 .load_idt = xen_load_idt,
1279 .load_tls = xen_load_tls,
1280#ifdef CONFIG_X86_64
1281 .load_gs_index = xen_load_gs_index,
1282#endif
1283
1284 .alloc_ldt = xen_alloc_ldt,
1285 .free_ldt = xen_free_ldt,
1286
1287 .store_idt = native_store_idt,
1288 .store_tr = xen_store_tr,
1289
1290 .write_ldt_entry = xen_write_ldt_entry,
1291 .write_gdt_entry = xen_write_gdt_entry,
1292 .write_idt_entry = xen_write_idt_entry,
1293 .load_sp0 = xen_load_sp0,
1294
1295 .set_iopl_mask = xen_set_iopl_mask,
1296 .io_delay = xen_io_delay,
1297
1298
1299 .swapgs = paravirt_nop,
1300
1301 .start_context_switch = paravirt_start_context_switch,
1302 .end_context_switch = xen_end_context_switch,
1303};
1304
1305static const struct pv_apic_ops xen_apic_ops __initconst = {
1306#ifdef CONFIG_X86_LOCAL_APIC
1307 .startup_ipi_hook = paravirt_nop,
1308#endif
1309};
1310
1311static void xen_reboot(int reason)
1312{
1313 struct sched_shutdown r = { .reason = reason };
1314
1315 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1316 BUG();
1317}
1318
1319static void xen_restart(char *msg)
1320{
1321 xen_reboot(SHUTDOWN_reboot);
1322}
1323
1324static void xen_emergency_restart(void)
1325{
1326 xen_reboot(SHUTDOWN_reboot);
1327}
1328
1329static void xen_machine_halt(void)
1330{
1331 xen_reboot(SHUTDOWN_poweroff);
1332}
1333
1334static void xen_machine_power_off(void)
1335{
1336 if (pm_power_off)
1337 pm_power_off();
1338 xen_reboot(SHUTDOWN_poweroff);
1339}
1340
1341static void xen_crash_shutdown(struct pt_regs *regs)
1342{
1343 xen_reboot(SHUTDOWN_crash);
1344}
1345
1346static int
1347xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1348{
1349 xen_reboot(SHUTDOWN_crash);
1350 return NOTIFY_DONE;
1351}
1352
1353static struct notifier_block xen_panic_block = {
1354 .notifier_call= xen_panic_event,
1355 .priority = INT_MIN
1356};
1357
1358int xen_panic_handler_init(void)
1359{
1360 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1361 return 0;
1362}
1363
1364static const struct machine_ops xen_machine_ops __initconst = {
1365 .restart = xen_restart,
1366 .halt = xen_machine_halt,
1367 .power_off = xen_machine_power_off,
1368 .shutdown = xen_machine_halt,
1369 .crash_shutdown = xen_crash_shutdown,
1370 .emergency_restart = xen_emergency_restart,
1371};
1372
1373static unsigned char xen_get_nmi_reason(void)
1374{
1375 unsigned char reason = 0;
1376
1377
1378 if (test_bit(_XEN_NMIREASON_io_error,
1379 &HYPERVISOR_shared_info->arch.nmi_reason))
1380 reason |= NMI_REASON_IOCHK;
1381 if (test_bit(_XEN_NMIREASON_pci_serr,
1382 &HYPERVISOR_shared_info->arch.nmi_reason))
1383 reason |= NMI_REASON_SERR;
1384
1385 return reason;
1386}
1387
1388static void __init xen_boot_params_init_edd(void)
1389{
1390#if IS_ENABLED(CONFIG_EDD)
1391 struct xen_platform_op op;
1392 struct edd_info *edd_info;
1393 u32 *mbr_signature;
1394 unsigned nr;
1395 int ret;
1396
1397 edd_info = boot_params.eddbuf;
1398 mbr_signature = boot_params.edd_mbr_sig_buffer;
1399
1400 op.cmd = XENPF_firmware_info;
1401
1402 op.u.firmware_info.type = XEN_FW_DISK_INFO;
1403 for (nr = 0; nr < EDDMAXNR; nr++) {
1404 struct edd_info *info = edd_info + nr;
1405
1406 op.u.firmware_info.index = nr;
1407 info->params.length = sizeof(info->params);
1408 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1409 &info->params);
1410 ret = HYPERVISOR_dom0_op(&op);
1411 if (ret)
1412 break;
1413
1414#define C(x) info->x = op.u.firmware_info.u.disk_info.x
1415 C(device);
1416 C(version);
1417 C(interface_support);
1418 C(legacy_max_cylinder);
1419 C(legacy_max_head);
1420 C(legacy_sectors_per_track);
1421#undef C
1422 }
1423 boot_params.eddbuf_entries = nr;
1424
1425 op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1426 for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1427 op.u.firmware_info.index = nr;
1428 ret = HYPERVISOR_dom0_op(&op);
1429 if (ret)
1430 break;
1431 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1432 }
1433 boot_params.edd_mbr_sig_buf_entries = nr;
1434#endif
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static void __ref xen_setup_gdt(int cpu)
1447{
1448 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1449#ifdef CONFIG_X86_64
1450 unsigned long dummy;
1451
1452 load_percpu_segment(cpu);
1453 switch_to_new_gdt(cpu);
1454
1455
1456
1457
1458
1459 asm volatile ("pushq %0\n"
1460 "leaq 1f(%%rip),%0\n"
1461 "pushq %0\n"
1462 "lretq\n"
1463 "1:\n"
1464 : "=&r" (dummy) : "0" (__KERNEL_CS));
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475 loadsegment(es, 0);
1476 loadsegment(ds, 0);
1477 loadsegment(fs, 0);
1478#else
1479
1480 BUG();
1481#endif
1482 return;
1483 }
1484 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1485 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1486
1487 setup_stack_canary_segment(0);
1488 switch_to_new_gdt(0);
1489
1490 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1491 pv_cpu_ops.load_gdt = xen_load_gdt;
1492}
1493
1494#ifdef CONFIG_XEN_PVH
1495
1496
1497
1498
1499static void xen_pvh_set_cr_flags(int cpu)
1500{
1501
1502
1503
1504
1505 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
1506
1507 if (!cpu)
1508 return;
1509
1510
1511
1512
1513 if (cpu_has_pse)
1514 cr4_set_bits_and_update_boot(X86_CR4_PSE);
1515
1516 if (cpu_has_pge)
1517 cr4_set_bits_and_update_boot(X86_CR4_PGE);
1518}
1519
1520
1521
1522
1523
1524
1525void __ref xen_pvh_secondary_vcpu_init(int cpu)
1526{
1527 xen_setup_gdt(cpu);
1528 xen_pvh_set_cr_flags(cpu);
1529}
1530
1531static void __init xen_pvh_early_guest_init(void)
1532{
1533 if (!xen_feature(XENFEAT_auto_translated_physmap))
1534 return;
1535
1536 if (!xen_feature(XENFEAT_hvm_callback_vector))
1537 return;
1538
1539 xen_have_vector_callback = 1;
1540
1541 xen_pvh_early_cpu_init(0, false);
1542 xen_pvh_set_cr_flags(0);
1543
1544#ifdef CONFIG_X86_32
1545 BUG();
1546#endif
1547}
1548#endif
1549
1550
1551asmlinkage __visible void __init xen_start_kernel(void)
1552{
1553 struct physdev_set_iopl set_iopl;
1554 unsigned long initrd_start = 0;
1555 int rc;
1556
1557 if (!xen_start_info)
1558 return;
1559
1560 xen_domain_type = XEN_PV_DOMAIN;
1561
1562 xen_setup_features();
1563#ifdef CONFIG_XEN_PVH
1564 xen_pvh_early_guest_init();
1565#endif
1566 xen_setup_machphys_mapping();
1567
1568
1569 pv_info = xen_info;
1570 pv_init_ops = xen_init_ops;
1571 pv_apic_ops = xen_apic_ops;
1572 if (!xen_pvh_domain()) {
1573 pv_cpu_ops = xen_cpu_ops;
1574
1575 x86_platform.get_nmi_reason = xen_get_nmi_reason;
1576 }
1577
1578 if (xen_feature(XENFEAT_auto_translated_physmap))
1579 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1580 else
1581 x86_init.resources.memory_setup = xen_memory_setup;
1582 x86_init.oem.arch_setup = xen_arch_setup;
1583 x86_init.oem.banner = xen_banner;
1584
1585 xen_init_time_ops();
1586
1587
1588
1589
1590
1591 xen_init_mmu_ops();
1592
1593
1594 __supported_pte_mask &= ~_PAGE_GLOBAL;
1595
1596
1597
1598
1599
1600 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1601
1602
1603 x86_configure_nx();
1604
1605
1606 xen_build_dynamic_phys_to_machine();
1607
1608
1609
1610
1611
1612 xen_setup_gdt(0);
1613
1614 xen_init_irq_ops();
1615 xen_init_cpuid_mask();
1616
1617#ifdef CONFIG_X86_LOCAL_APIC
1618
1619
1620
1621 set_xen_basic_apic_ops();
1622#endif
1623
1624 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1625 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1626 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1627 }
1628
1629 machine_ops = xen_machine_ops;
1630
1631
1632
1633
1634
1635
1636 xen_initial_gdt = &per_cpu(gdt_page, 0);
1637
1638 xen_smp_init();
1639
1640#ifdef CONFIG_ACPI_NUMA
1641
1642
1643
1644
1645
1646 acpi_numa = -1;
1647#endif
1648
1649
1650 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1651
1652 local_irq_disable();
1653 early_boot_irqs_disabled = true;
1654
1655 xen_raw_console_write("mapping kernel into physical memory\n");
1656 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
1657
1658
1659
1660
1661
1662
1663 pat_init_cache_modes();
1664
1665
1666
1667#ifdef CONFIG_X86_32
1668 pv_info.kernel_rpl = 1;
1669 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1670 pv_info.kernel_rpl = 0;
1671#else
1672 pv_info.kernel_rpl = 0;
1673#endif
1674
1675 xen_reserve_top();
1676
1677
1678 if (!xen_pvh_domain()) {
1679
1680
1681
1682
1683
1684 set_iopl.iopl = 1;
1685 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1686 if (rc != 0)
1687 xen_raw_printk("physdev_op failed %d\n", rc);
1688 }
1689
1690#ifdef CONFIG_X86_32
1691
1692 cpu_detect(&new_cpu_data);
1693 set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1694 new_cpu_data.wp_works_ok = 1;
1695 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1696#endif
1697
1698 if (xen_start_info->mod_start) {
1699 if (xen_start_info->flags & SIF_MOD_START_PFN)
1700 initrd_start = PFN_PHYS(xen_start_info->mod_start);
1701 else
1702 initrd_start = __pa(xen_start_info->mod_start);
1703 }
1704
1705
1706 boot_params.hdr.type_of_loader = (9 << 4) | 0;
1707 boot_params.hdr.ramdisk_image = initrd_start;
1708 boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1709 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1710
1711 if (!xen_initial_domain()) {
1712 add_preferred_console("xenboot", 0, NULL);
1713 add_preferred_console("tty", 0, NULL);
1714 add_preferred_console("hvc", 0, NULL);
1715 if (pci_xen)
1716 x86_init.pci.arch_init = pci_xen_init;
1717 } else {
1718 const struct dom0_vga_console_info *info =
1719 (void *)((char *)xen_start_info +
1720 xen_start_info->console.dom0.info_off);
1721 struct xen_platform_op op = {
1722 .cmd = XENPF_firmware_info,
1723 .interface_version = XENPF_INTERFACE_VERSION,
1724 .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1725 };
1726
1727 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1728 xen_start_info->console.domU.mfn = 0;
1729 xen_start_info->console.domU.evtchn = 0;
1730
1731 if (HYPERVISOR_dom0_op(&op) == 0)
1732 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1733
1734 xen_init_apic();
1735
1736
1737 pci_request_acs();
1738
1739 xen_acpi_sleep_register();
1740
1741
1742 x86_init.mpparse.find_smp_config = x86_init_noop;
1743 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1744
1745 xen_boot_params_init_edd();
1746 }
1747#ifdef CONFIG_PCI
1748
1749 pci_probe &= ~PCI_PROBE_BIOS;
1750#endif
1751 xen_raw_console_write("about to get started...\n");
1752
1753 xen_setup_runstate_info(0);
1754
1755 xen_efi_init();
1756
1757
1758#ifdef CONFIG_X86_32
1759 i386_start_kernel();
1760#else
1761 cr4_init_shadow();
1762 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1763#endif
1764}
1765
1766void __ref xen_hvm_init_shared_info(void)
1767{
1768 int cpu;
1769 struct xen_add_to_physmap xatp;
1770 static struct shared_info *shared_info_page = 0;
1771
1772 if (!shared_info_page)
1773 shared_info_page = (struct shared_info *)
1774 extend_brk(PAGE_SIZE, PAGE_SIZE);
1775 xatp.domid = DOMID_SELF;
1776 xatp.idx = 0;
1777 xatp.space = XENMAPSPACE_shared_info;
1778 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1779 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1780 BUG();
1781
1782 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 for_each_online_cpu(cpu) {
1793
1794 if (cpu >= MAX_VIRT_CPUS)
1795 continue;
1796 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1797 }
1798}
1799
1800#ifdef CONFIG_XEN_PVHVM
1801static void __init init_hvm_pv_info(void)
1802{
1803 int major, minor;
1804 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1805 u64 pfn;
1806
1807 base = xen_cpuid_base();
1808 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1809
1810 major = eax >> 16;
1811 minor = eax & 0xffff;
1812 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1813
1814 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1815
1816 pfn = __pa(hypercall_page);
1817 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1818
1819 xen_setup_features();
1820
1821 pv_info.name = "Xen HVM";
1822
1823 xen_domain_type = XEN_HVM_DOMAIN;
1824}
1825
1826static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1827 void *hcpu)
1828{
1829 int cpu = (long)hcpu;
1830 switch (action) {
1831 case CPU_UP_PREPARE:
1832 xen_vcpu_setup(cpu);
1833 if (xen_have_vector_callback) {
1834 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1835 xen_setup_timer(cpu);
1836 }
1837 break;
1838 default:
1839 break;
1840 }
1841 return NOTIFY_OK;
1842}
1843
1844static struct notifier_block xen_hvm_cpu_notifier = {
1845 .notifier_call = xen_hvm_cpu_notify,
1846};
1847
1848static void __init xen_hvm_guest_init(void)
1849{
1850 init_hvm_pv_info();
1851
1852 xen_hvm_init_shared_info();
1853
1854 xen_panic_handler_init();
1855
1856 if (xen_feature(XENFEAT_hvm_callback_vector))
1857 xen_have_vector_callback = 1;
1858 xen_hvm_smp_init();
1859 register_cpu_notifier(&xen_hvm_cpu_notifier);
1860 xen_unplug_emulated_devices();
1861 x86_init.irqs.intr_init = xen_init_IRQ;
1862 xen_hvm_init_time_ops();
1863 xen_hvm_init_mmu_ops();
1864}
1865
1866static bool xen_nopv = false;
1867static __init int xen_parse_nopv(char *arg)
1868{
1869 xen_nopv = true;
1870 return 0;
1871}
1872early_param("xen_nopv", xen_parse_nopv);
1873
1874static uint32_t __init xen_hvm_platform(void)
1875{
1876 if (xen_nopv)
1877 return 0;
1878
1879 if (xen_pv_domain())
1880 return 0;
1881
1882 return xen_cpuid_base();
1883}
1884
1885bool xen_hvm_need_lapic(void)
1886{
1887 if (xen_nopv)
1888 return false;
1889 if (xen_pv_domain())
1890 return false;
1891 if (!xen_hvm_domain())
1892 return false;
1893 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1894 return false;
1895 return true;
1896}
1897EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1898
1899const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1900 .name = "Xen HVM",
1901 .detect = xen_hvm_platform,
1902 .init_platform = xen_hvm_guest_init,
1903 .x2apic_available = xen_x2apic_para_available,
1904};
1905EXPORT_SYMBOL(x86_hyper_xen_hvm);
1906#endif
1907