1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/smp.h>
18#include <linux/preempt.h>
19#include <linux/hardirq.h>
20#include <linux/percpu.h>
21#include <linux/delay.h>
22#include <linux/start_kernel.h>
23#include <linux/sched.h>
24#include <linux/kprobes.h>
25#include <linux/bootmem.h>
26#include <linux/module.h>
27#include <linux/mm.h>
28#include <linux/page-flags.h>
29#include <linux/highmem.h>
30#include <linux/console.h>
31#include <linux/pci.h>
32#include <linux/gfp.h>
33#include <linux/memblock.h>
34
35#include <xen/xen.h>
36#include <xen/interface/xen.h>
37#include <xen/interface/version.h>
38#include <xen/interface/physdev.h>
39#include <xen/interface/vcpu.h>
40#include <xen/interface/memory.h>
41#include <xen/features.h>
42#include <xen/page.h>
43#include <xen/hvm.h>
44#include <xen/hvc-console.h>
45
46#include <asm/paravirt.h>
47#include <asm/apic.h>
48#include <asm/page.h>
49#include <asm/xen/pci.h>
50#include <asm/xen/hypercall.h>
51#include <asm/xen/hypervisor.h>
52#include <asm/fixmap.h>
53#include <asm/processor.h>
54#include <asm/proto.h>
55#include <asm/msr-index.h>
56#include <asm/traps.h>
57#include <asm/setup.h>
58#include <asm/desc.h>
59#include <asm/pgalloc.h>
60#include <asm/pgtable.h>
61#include <asm/tlbflush.h>
62#include <asm/reboot.h>
63#include <asm/stackprotector.h>
64#include <asm/hypervisor.h>
65
66#include "xen-ops.h"
67#include "mmu.h"
68#include "multicalls.h"
69
70EXPORT_SYMBOL_GPL(hypercall_page);
71
72DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
73DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
74
75enum xen_domain_type xen_domain_type = XEN_NATIVE;
76EXPORT_SYMBOL_GPL(xen_domain_type);
77
78unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
79EXPORT_SYMBOL(machine_to_phys_mapping);
80unsigned int machine_to_phys_order;
81EXPORT_SYMBOL(machine_to_phys_order);
82
83struct start_info *xen_start_info;
84EXPORT_SYMBOL_GPL(xen_start_info);
85
86struct shared_info xen_dummy_shared_info;
87
88void *xen_initial_gdt;
89
90RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
91__read_mostly int xen_have_vector_callback;
92EXPORT_SYMBOL_GPL(xen_have_vector_callback);
93
94
95
96
97
98struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113static int have_vcpu_info_placement = 1;
114
115static void clamp_max_cpus(void)
116{
117#ifdef CONFIG_SMP
118 if (setup_max_cpus > MAX_VIRT_CPUS)
119 setup_max_cpus = MAX_VIRT_CPUS;
120#endif
121}
122
123static void xen_vcpu_setup(int cpu)
124{
125 struct vcpu_register_vcpu_info info;
126 int err;
127 struct vcpu_info *vcpup;
128
129 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
130
131 if (cpu < MAX_VIRT_CPUS)
132 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
133
134 if (!have_vcpu_info_placement) {
135 if (cpu >= MAX_VIRT_CPUS)
136 clamp_max_cpus();
137 return;
138 }
139
140 vcpup = &per_cpu(xen_vcpu_info, cpu);
141 info.mfn = arbitrary_virt_to_mfn(vcpup);
142 info.offset = offset_in_page(vcpup);
143
144
145
146
147 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
148
149 if (err) {
150 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
151 have_vcpu_info_placement = 0;
152 clamp_max_cpus();
153 } else {
154
155
156 per_cpu(xen_vcpu, cpu) = vcpup;
157 }
158}
159
160
161
162
163
164
165void xen_vcpu_restore(void)
166{
167 int cpu;
168
169 for_each_online_cpu(cpu) {
170 bool other_cpu = (cpu != smp_processor_id());
171
172 if (other_cpu &&
173 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
174 BUG();
175
176 xen_setup_runstate_info(cpu);
177
178 if (have_vcpu_info_placement)
179 xen_vcpu_setup(cpu);
180
181 if (other_cpu &&
182 HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
183 BUG();
184 }
185}
186
187static void __init xen_banner(void)
188{
189 unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
190 struct xen_extraversion extra;
191 HYPERVISOR_xen_version(XENVER_extraversion, &extra);
192
193 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
194 pv_info.name);
195 printk(KERN_INFO "Xen version: %d.%d%s%s\n",
196 version >> 16, version & 0xffff, extra.extraversion,
197 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
198}
199
200static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
201static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
202
203static void xen_cpuid(unsigned int *ax, unsigned int *bx,
204 unsigned int *cx, unsigned int *dx)
205{
206 unsigned maskebx = ~0;
207 unsigned maskecx = ~0;
208 unsigned maskedx = ~0;
209
210
211
212
213
214 switch (*ax) {
215 case 1:
216 maskecx = cpuid_leaf1_ecx_mask;
217 maskedx = cpuid_leaf1_edx_mask;
218 break;
219
220 case 0xb:
221
222 maskebx = 0;
223 break;
224 }
225
226 asm(XEN_EMULATE_PREFIX "cpuid"
227 : "=a" (*ax),
228 "=b" (*bx),
229 "=c" (*cx),
230 "=d" (*dx)
231 : "0" (*ax), "2" (*cx));
232
233 *bx &= maskebx;
234 *cx &= maskecx;
235 *dx &= maskedx;
236}
237
238static __init void xen_init_cpuid_mask(void)
239{
240 unsigned int ax, bx, cx, dx;
241
242 cpuid_leaf1_edx_mask =
243 ~((1 << X86_FEATURE_MCE) |
244 (1 << X86_FEATURE_MCA) |
245 (1 << X86_FEATURE_MTRR) |
246 (1 << X86_FEATURE_ACC));
247
248 if (!xen_initial_domain())
249 cpuid_leaf1_edx_mask &=
250 ~((1 << X86_FEATURE_APIC) |
251 (1 << X86_FEATURE_ACPI));
252
253 ax = 1;
254 cx = 0;
255 xen_cpuid(&ax, &bx, &cx, &dx);
256
257
258 if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
259 unsigned long cr4;
260
261 set_in_cr4(X86_CR4_OSXSAVE);
262
263 cr4 = read_cr4();
264
265 if ((cr4 & X86_CR4_OSXSAVE) == 0)
266 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
267
268 clear_in_cr4(X86_CR4_OSXSAVE);
269 }
270}
271
272static void xen_set_debugreg(int reg, unsigned long val)
273{
274 HYPERVISOR_set_debugreg(reg, val);
275}
276
277static unsigned long xen_get_debugreg(int reg)
278{
279 return HYPERVISOR_get_debugreg(reg);
280}
281
282static void xen_end_context_switch(struct task_struct *next)
283{
284 xen_mc_flush();
285 paravirt_end_context_switch(next);
286}
287
288static unsigned long xen_store_tr(void)
289{
290 return 0;
291}
292
293
294
295
296
297
298
299static void set_aliased_prot(void *v, pgprot_t prot)
300{
301 int level;
302 pte_t *ptep;
303 pte_t pte;
304 unsigned long pfn;
305 struct page *page;
306
307 ptep = lookup_address((unsigned long)v, &level);
308 BUG_ON(ptep == NULL);
309
310 pfn = pte_pfn(*ptep);
311 page = pfn_to_page(pfn);
312
313 pte = pfn_pte(pfn, prot);
314
315 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
316 BUG();
317
318 if (!PageHighMem(page)) {
319 void *av = __va(PFN_PHYS(pfn));
320
321 if (av != v)
322 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
323 BUG();
324 } else
325 kmap_flush_unused();
326}
327
328static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
329{
330 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
331 int i;
332
333 for(i = 0; i < entries; i += entries_per_page)
334 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
335}
336
337static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
338{
339 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
340 int i;
341
342 for(i = 0; i < entries; i += entries_per_page)
343 set_aliased_prot(ldt + i, PAGE_KERNEL);
344}
345
346static void xen_set_ldt(const void *addr, unsigned entries)
347{
348 struct mmuext_op *op;
349 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
350
351 op = mcs.args;
352 op->cmd = MMUEXT_SET_LDT;
353 op->arg1.linear_addr = (unsigned long)addr;
354 op->arg2.nr_ents = entries;
355
356 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
357
358 xen_mc_issue(PARAVIRT_LAZY_CPU);
359}
360
361static void xen_load_gdt(const struct desc_ptr *dtr)
362{
363 unsigned long va = dtr->address;
364 unsigned int size = dtr->size + 1;
365 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
366 unsigned long frames[pages];
367 int f;
368
369
370
371
372
373
374 BUG_ON(size > 65536);
375 BUG_ON(va & ~PAGE_MASK);
376
377 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
378 int level;
379 pte_t *ptep;
380 unsigned long pfn, mfn;
381 void *virt;
382
383
384
385
386
387
388
389
390 ptep = lookup_address(va, &level);
391 BUG_ON(ptep == NULL);
392
393 pfn = pte_pfn(*ptep);
394 mfn = pfn_to_mfn(pfn);
395 virt = __va(PFN_PHYS(pfn));
396
397 frames[f] = mfn;
398
399 make_lowmem_page_readonly((void *)va);
400 make_lowmem_page_readonly(virt);
401 }
402
403 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
404 BUG();
405}
406
407
408
409
410static __init void xen_load_gdt_boot(const struct desc_ptr *dtr)
411{
412 unsigned long va = dtr->address;
413 unsigned int size = dtr->size + 1;
414 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
415 unsigned long frames[pages];
416 int f;
417
418
419
420
421
422
423 BUG_ON(size > 65536);
424 BUG_ON(va & ~PAGE_MASK);
425
426 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
427 pte_t pte;
428 unsigned long pfn, mfn;
429
430 pfn = virt_to_pfn(va);
431 mfn = pfn_to_mfn(pfn);
432
433 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
434
435 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
436 BUG();
437
438 frames[f] = mfn;
439 }
440
441 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
442 BUG();
443}
444
445static void load_TLS_descriptor(struct thread_struct *t,
446 unsigned int cpu, unsigned int i)
447{
448 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
449 xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
450 struct multicall_space mc = __xen_mc_entry(0);
451
452 MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
453}
454
455static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
456{
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
476#ifdef CONFIG_X86_32
477 lazy_load_gs(0);
478#else
479 loadsegment(fs, 0);
480#endif
481 }
482
483 xen_mc_batch();
484
485 load_TLS_descriptor(t, cpu, 0);
486 load_TLS_descriptor(t, cpu, 1);
487 load_TLS_descriptor(t, cpu, 2);
488
489 xen_mc_issue(PARAVIRT_LAZY_CPU);
490}
491
492#ifdef CONFIG_X86_64
493static void xen_load_gs_index(unsigned int idx)
494{
495 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
496 BUG();
497}
498#endif
499
500static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
501 const void *ptr)
502{
503 xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
504 u64 entry = *(u64 *)ptr;
505
506 preempt_disable();
507
508 xen_mc_flush();
509 if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
510 BUG();
511
512 preempt_enable();
513}
514
515static int cvt_gate_to_trap(int vector, const gate_desc *val,
516 struct trap_info *info)
517{
518 unsigned long addr;
519
520 if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
521 return 0;
522
523 info->vector = vector;
524
525 addr = gate_offset(*val);
526#ifdef CONFIG_X86_64
527
528
529
530
531
532
533
534 if (addr == (unsigned long)debug)
535 addr = (unsigned long)xen_debug;
536 else if (addr == (unsigned long)int3)
537 addr = (unsigned long)xen_int3;
538 else if (addr == (unsigned long)stack_segment)
539 addr = (unsigned long)xen_stack_segment;
540 else if (addr == (unsigned long)double_fault ||
541 addr == (unsigned long)nmi) {
542
543 return 0;
544#ifdef CONFIG_X86_MCE
545 } else if (addr == (unsigned long)machine_check) {
546 return 0;
547#endif
548 } else {
549
550 if (WARN_ON(val->ist != 0))
551 return 0;
552 }
553#endif
554 info->address = addr;
555
556 info->cs = gate_segment(*val);
557 info->flags = val->dpl;
558
559 if (val->type == GATE_INTERRUPT)
560 info->flags |= 1 << 2;
561
562 return 1;
563}
564
565
566static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
567
568
569
570static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
571{
572 unsigned long p = (unsigned long)&dt[entrynum];
573 unsigned long start, end;
574
575 preempt_disable();
576
577 start = __this_cpu_read(idt_desc.address);
578 end = start + __this_cpu_read(idt_desc.size) + 1;
579
580 xen_mc_flush();
581
582 native_write_idt_entry(dt, entrynum, g);
583
584 if (p >= start && (p + 8) <= end) {
585 struct trap_info info[2];
586
587 info[1].address = 0;
588
589 if (cvt_gate_to_trap(entrynum, g, &info[0]))
590 if (HYPERVISOR_set_trap_table(info))
591 BUG();
592 }
593
594 preempt_enable();
595}
596
597static void xen_convert_trap_info(const struct desc_ptr *desc,
598 struct trap_info *traps)
599{
600 unsigned in, out, count;
601
602 count = (desc->size+1) / sizeof(gate_desc);
603 BUG_ON(count > 256);
604
605 for (in = out = 0; in < count; in++) {
606 gate_desc *entry = (gate_desc*)(desc->address) + in;
607
608 if (cvt_gate_to_trap(in, entry, &traps[out]))
609 out++;
610 }
611 traps[out].address = 0;
612}
613
614void xen_copy_trap_info(struct trap_info *traps)
615{
616 const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
617
618 xen_convert_trap_info(desc, traps);
619}
620
621
622
623
624static void xen_load_idt(const struct desc_ptr *desc)
625{
626 static DEFINE_SPINLOCK(lock);
627 static struct trap_info traps[257];
628
629 spin_lock(&lock);
630
631 __get_cpu_var(idt_desc) = *desc;
632
633 xen_convert_trap_info(desc, traps);
634
635 xen_mc_flush();
636 if (HYPERVISOR_set_trap_table(traps))
637 BUG();
638
639 spin_unlock(&lock);
640}
641
642
643
644static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
645 const void *desc, int type)
646{
647 preempt_disable();
648
649 switch (type) {
650 case DESC_LDT:
651 case DESC_TSS:
652
653 break;
654
655 default: {
656 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
657
658 xen_mc_flush();
659 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
660 BUG();
661 }
662
663 }
664
665 preempt_enable();
666}
667
668
669
670
671
672static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
673 const void *desc, int type)
674{
675 switch (type) {
676 case DESC_LDT:
677 case DESC_TSS:
678
679 break;
680
681 default: {
682 xmaddr_t maddr = virt_to_machine(&dt[entry]);
683
684 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
685 dt[entry] = *(struct desc_struct *)desc;
686 }
687
688 }
689}
690
691static void xen_load_sp0(struct tss_struct *tss,
692 struct thread_struct *thread)
693{
694 struct multicall_space mcs = xen_mc_entry(0);
695 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
696 xen_mc_issue(PARAVIRT_LAZY_CPU);
697}
698
699static void xen_set_iopl_mask(unsigned mask)
700{
701 struct physdev_set_iopl set_iopl;
702
703
704 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
705 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
706}
707
708static void xen_io_delay(void)
709{
710}
711
712#ifdef CONFIG_X86_LOCAL_APIC
713static u32 xen_apic_read(u32 reg)
714{
715 return 0;
716}
717
718static void xen_apic_write(u32 reg, u32 val)
719{
720
721 WARN_ON(1);
722}
723
724static u64 xen_apic_icr_read(void)
725{
726 return 0;
727}
728
729static void xen_apic_icr_write(u32 low, u32 id)
730{
731
732 WARN_ON(1);
733}
734
735static void xen_apic_wait_icr_idle(void)
736{
737 return;
738}
739
740static u32 xen_safe_apic_wait_icr_idle(void)
741{
742 return 0;
743}
744
745static void set_xen_basic_apic_ops(void)
746{
747 apic->read = xen_apic_read;
748 apic->write = xen_apic_write;
749 apic->icr_read = xen_apic_icr_read;
750 apic->icr_write = xen_apic_icr_write;
751 apic->wait_icr_idle = xen_apic_wait_icr_idle;
752 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
753}
754
755#endif
756
757static void xen_clts(void)
758{
759 struct multicall_space mcs;
760
761 mcs = xen_mc_entry(0);
762
763 MULTI_fpu_taskswitch(mcs.mc, 0);
764
765 xen_mc_issue(PARAVIRT_LAZY_CPU);
766}
767
768static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
769
770static unsigned long xen_read_cr0(void)
771{
772 unsigned long cr0 = percpu_read(xen_cr0_value);
773
774 if (unlikely(cr0 == 0)) {
775 cr0 = native_read_cr0();
776 percpu_write(xen_cr0_value, cr0);
777 }
778
779 return cr0;
780}
781
782static void xen_write_cr0(unsigned long cr0)
783{
784 struct multicall_space mcs;
785
786 percpu_write(xen_cr0_value, cr0);
787
788
789
790 mcs = xen_mc_entry(0);
791
792 MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
793
794 xen_mc_issue(PARAVIRT_LAZY_CPU);
795}
796
797static void xen_write_cr4(unsigned long cr4)
798{
799 cr4 &= ~X86_CR4_PGE;
800 cr4 &= ~X86_CR4_PSE;
801
802 native_write_cr4(cr4);
803}
804
805static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
806{
807 int ret;
808
809 ret = 0;
810
811 switch (msr) {
812#ifdef CONFIG_X86_64
813 unsigned which;
814 u64 base;
815
816 case MSR_FS_BASE: which = SEGBASE_FS; goto set;
817 case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
818 case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
819
820 set:
821 base = ((u64)high << 32) | low;
822 if (HYPERVISOR_set_segment_base(which, base) != 0)
823 ret = -EIO;
824 break;
825#endif
826
827 case MSR_STAR:
828 case MSR_CSTAR:
829 case MSR_LSTAR:
830 case MSR_SYSCALL_MASK:
831 case MSR_IA32_SYSENTER_CS:
832 case MSR_IA32_SYSENTER_ESP:
833 case MSR_IA32_SYSENTER_EIP:
834
835
836
837 break;
838
839 case MSR_IA32_CR_PAT:
840 if (smp_processor_id() == 0)
841 xen_set_pat(((u64)high << 32) | low);
842 break;
843
844 default:
845 ret = native_write_msr_safe(msr, low, high);
846 }
847
848 return ret;
849}
850
851void xen_setup_shared_info(void)
852{
853 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
854 set_fixmap(FIX_PARAVIRT_BOOTMAP,
855 xen_start_info->shared_info);
856
857 HYPERVISOR_shared_info =
858 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
859 } else
860 HYPERVISOR_shared_info =
861 (struct shared_info *)__va(xen_start_info->shared_info);
862
863#ifndef CONFIG_SMP
864
865 xen_setup_vcpu_info_placement();
866#endif
867
868 xen_setup_mfn_list_list();
869}
870
871
872void xen_setup_vcpu_info_placement(void)
873{
874 int cpu;
875
876 for_each_possible_cpu(cpu)
877 xen_vcpu_setup(cpu);
878
879
880
881 if (have_vcpu_info_placement) {
882 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
883 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
884 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
885 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
886 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
887 }
888}
889
890static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
891 unsigned long addr, unsigned len)
892{
893 char *start, *end, *reloc;
894 unsigned ret;
895
896 start = end = reloc = NULL;
897
898#define SITE(op, x) \
899 case PARAVIRT_PATCH(op.x): \
900 if (have_vcpu_info_placement) { \
901 start = (char *)xen_##x##_direct; \
902 end = xen_##x##_direct_end; \
903 reloc = xen_##x##_direct_reloc; \
904 } \
905 goto patch_site
906
907 switch (type) {
908 SITE(pv_irq_ops, irq_enable);
909 SITE(pv_irq_ops, irq_disable);
910 SITE(pv_irq_ops, save_fl);
911 SITE(pv_irq_ops, restore_fl);
912#undef SITE
913
914 patch_site:
915 if (start == NULL || (end-start) > len)
916 goto default_patch;
917
918 ret = paravirt_patch_insns(insnbuf, len, start, end);
919
920
921
922
923
924 if (reloc > start && reloc < end) {
925 int reloc_off = reloc - start;
926 long *relocp = (long *)(insnbuf + reloc_off);
927 long delta = start - (char *)addr;
928
929 *relocp += delta;
930 }
931 break;
932
933 default_patch:
934 default:
935 ret = paravirt_patch_default(type, clobbers, insnbuf,
936 addr, len);
937 break;
938 }
939
940 return ret;
941}
942
943static const struct pv_info xen_info __initdata = {
944 .paravirt_enabled = 1,
945 .shared_kernel_pmd = 0,
946
947 .name = "Xen",
948};
949
950static const struct pv_init_ops xen_init_ops __initdata = {
951 .patch = xen_patch,
952};
953
954static const struct pv_cpu_ops xen_cpu_ops __initdata = {
955 .cpuid = xen_cpuid,
956
957 .set_debugreg = xen_set_debugreg,
958 .get_debugreg = xen_get_debugreg,
959
960 .clts = xen_clts,
961
962 .read_cr0 = xen_read_cr0,
963 .write_cr0 = xen_write_cr0,
964
965 .read_cr4 = native_read_cr4,
966 .read_cr4_safe = native_read_cr4_safe,
967 .write_cr4 = xen_write_cr4,
968
969 .wbinvd = native_wbinvd,
970
971 .read_msr = native_read_msr_safe,
972 .write_msr = xen_write_msr_safe,
973 .read_tsc = native_read_tsc,
974 .read_pmc = native_read_pmc,
975
976 .iret = xen_iret,
977 .irq_enable_sysexit = xen_sysexit,
978#ifdef CONFIG_X86_64
979 .usergs_sysret32 = xen_sysret32,
980 .usergs_sysret64 = xen_sysret64,
981#endif
982
983 .load_tr_desc = paravirt_nop,
984 .set_ldt = xen_set_ldt,
985 .load_gdt = xen_load_gdt,
986 .load_idt = xen_load_idt,
987 .load_tls = xen_load_tls,
988#ifdef CONFIG_X86_64
989 .load_gs_index = xen_load_gs_index,
990#endif
991
992 .alloc_ldt = xen_alloc_ldt,
993 .free_ldt = xen_free_ldt,
994
995 .store_gdt = native_store_gdt,
996 .store_idt = native_store_idt,
997 .store_tr = xen_store_tr,
998
999 .write_ldt_entry = xen_write_ldt_entry,
1000 .write_gdt_entry = xen_write_gdt_entry,
1001 .write_idt_entry = xen_write_idt_entry,
1002 .load_sp0 = xen_load_sp0,
1003
1004 .set_iopl_mask = xen_set_iopl_mask,
1005 .io_delay = xen_io_delay,
1006
1007
1008 .swapgs = paravirt_nop,
1009
1010 .start_context_switch = paravirt_start_context_switch,
1011 .end_context_switch = xen_end_context_switch,
1012};
1013
1014static const struct pv_apic_ops xen_apic_ops __initdata = {
1015#ifdef CONFIG_X86_LOCAL_APIC
1016 .startup_ipi_hook = paravirt_nop,
1017#endif
1018};
1019
1020static void xen_reboot(int reason)
1021{
1022 struct sched_shutdown r = { .reason = reason };
1023
1024 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1025 BUG();
1026}
1027
1028static void xen_restart(char *msg)
1029{
1030 xen_reboot(SHUTDOWN_reboot);
1031}
1032
1033static void xen_emergency_restart(void)
1034{
1035 xen_reboot(SHUTDOWN_reboot);
1036}
1037
1038static void xen_machine_halt(void)
1039{
1040 xen_reboot(SHUTDOWN_poweroff);
1041}
1042
1043static void xen_crash_shutdown(struct pt_regs *regs)
1044{
1045 xen_reboot(SHUTDOWN_crash);
1046}
1047
1048static int
1049xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1050{
1051 xen_reboot(SHUTDOWN_crash);
1052 return NOTIFY_DONE;
1053}
1054
1055static struct notifier_block xen_panic_block = {
1056 .notifier_call= xen_panic_event,
1057};
1058
1059int xen_panic_handler_init(void)
1060{
1061 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1062 return 0;
1063}
1064
1065static const struct machine_ops __initdata xen_machine_ops = {
1066 .restart = xen_restart,
1067 .halt = xen_machine_halt,
1068 .power_off = xen_machine_halt,
1069 .shutdown = xen_machine_halt,
1070 .crash_shutdown = xen_crash_shutdown,
1071 .emergency_restart = xen_emergency_restart,
1072};
1073
1074
1075
1076
1077
1078
1079static void __init xen_setup_stackprotector(void)
1080{
1081 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1082 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1083
1084 setup_stack_canary_segment(0);
1085 switch_to_new_gdt(0);
1086
1087 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1088 pv_cpu_ops.load_gdt = xen_load_gdt;
1089}
1090
1091
1092asmlinkage void __init xen_start_kernel(void)
1093{
1094 struct physdev_set_iopl set_iopl;
1095 int rc;
1096 pgd_t *pgd;
1097
1098 if (!xen_start_info)
1099 return;
1100
1101 xen_domain_type = XEN_PV_DOMAIN;
1102
1103 xen_setup_machphys_mapping();
1104
1105
1106 pv_info = xen_info;
1107 pv_init_ops = xen_init_ops;
1108 pv_cpu_ops = xen_cpu_ops;
1109 pv_apic_ops = xen_apic_ops;
1110
1111 x86_init.resources.memory_setup = xen_memory_setup;
1112 x86_init.oem.arch_setup = xen_arch_setup;
1113 x86_init.oem.banner = xen_banner;
1114
1115 xen_init_time_ops();
1116
1117
1118
1119
1120
1121 xen_init_mmu_ops();
1122
1123
1124 __supported_pte_mask &= ~_PAGE_GLOBAL;
1125 if (!xen_initial_domain())
1126 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1127
1128 __supported_pte_mask |= _PAGE_IOMAP;
1129
1130
1131
1132
1133
1134 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1135
1136
1137 x86_configure_nx();
1138
1139 xen_setup_features();
1140
1141
1142 if (!xen_feature(XENFEAT_auto_translated_physmap))
1143 xen_build_dynamic_phys_to_machine();
1144
1145
1146
1147
1148
1149 xen_setup_stackprotector();
1150
1151 xen_init_irq_ops();
1152 xen_init_cpuid_mask();
1153
1154#ifdef CONFIG_X86_LOCAL_APIC
1155
1156
1157
1158 set_xen_basic_apic_ops();
1159#endif
1160
1161 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1162 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1163 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1164 }
1165
1166 machine_ops = xen_machine_ops;
1167
1168
1169
1170
1171
1172
1173 xen_initial_gdt = &per_cpu(gdt_page, 0);
1174
1175 xen_smp_init();
1176
1177#ifdef CONFIG_ACPI_NUMA
1178
1179
1180
1181
1182
1183 acpi_numa = -1;
1184#endif
1185
1186 pgd = (pgd_t *)xen_start_info->pt_base;
1187
1188 if (!xen_initial_domain())
1189 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1190
1191 __supported_pte_mask |= _PAGE_IOMAP;
1192
1193
1194 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1195
1196 local_irq_disable();
1197 early_boot_irqs_disabled = true;
1198
1199 memblock_init();
1200
1201 xen_raw_console_write("mapping kernel into physical memory\n");
1202 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
1203 xen_ident_map_ISA();
1204
1205
1206 xen_build_mfn_list_list();
1207
1208
1209
1210#ifdef CONFIG_X86_32
1211 pv_info.kernel_rpl = 1;
1212 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1213 pv_info.kernel_rpl = 0;
1214#else
1215 pv_info.kernel_rpl = 0;
1216#endif
1217
1218 xen_reserve_top();
1219
1220
1221
1222
1223
1224 set_iopl.iopl = 1;
1225 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1226 if (rc != 0)
1227 xen_raw_printk("physdev_op failed %d\n", rc);
1228
1229#ifdef CONFIG_X86_32
1230
1231 cpu_detect(&new_cpu_data);
1232 new_cpu_data.hard_math = 1;
1233 new_cpu_data.wp_works_ok = 1;
1234 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1235#endif
1236
1237
1238 boot_params.hdr.type_of_loader = (9 << 4) | 0;
1239 boot_params.hdr.ramdisk_image = xen_start_info->mod_start
1240 ? __pa(xen_start_info->mod_start) : 0;
1241 boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1242 boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1243
1244 if (!xen_initial_domain()) {
1245 add_preferred_console("xenboot", 0, NULL);
1246 add_preferred_console("tty", 0, NULL);
1247 add_preferred_console("hvc", 0, NULL);
1248 if (pci_xen)
1249 x86_init.pci.arch_init = pci_xen_init;
1250 } else {
1251
1252 pci_request_acs();
1253 }
1254
1255
1256 xen_raw_console_write("about to get started...\n");
1257
1258 xen_setup_runstate_info(0);
1259
1260
1261#ifdef CONFIG_X86_32
1262 i386_start_kernel();
1263#else
1264 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1265#endif
1266}
1267
1268static int init_hvm_pv_info(int *major, int *minor)
1269{
1270 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1271 u64 pfn;
1272
1273 base = xen_cpuid_base();
1274 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1275
1276 *major = eax >> 16;
1277 *minor = eax & 0xffff;
1278 printk(KERN_INFO "Xen version %d.%d.\n", *major, *minor);
1279
1280 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1281
1282 pfn = __pa(hypercall_page);
1283 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1284
1285 xen_setup_features();
1286
1287 pv_info = xen_info;
1288 pv_info.kernel_rpl = 0;
1289
1290 xen_domain_type = XEN_HVM_DOMAIN;
1291
1292 return 0;
1293}
1294
1295void xen_hvm_init_shared_info(void)
1296{
1297 int cpu;
1298 struct xen_add_to_physmap xatp;
1299 static struct shared_info *shared_info_page = 0;
1300
1301 if (!shared_info_page)
1302 shared_info_page = (struct shared_info *)
1303 extend_brk(PAGE_SIZE, PAGE_SIZE);
1304 xatp.domid = DOMID_SELF;
1305 xatp.idx = 0;
1306 xatp.space = XENMAPSPACE_shared_info;
1307 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1308 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1309 BUG();
1310
1311 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 for_each_online_cpu(cpu) {
1322 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1323 }
1324}
1325
1326#ifdef CONFIG_XEN_PVHVM
1327static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1328 unsigned long action, void *hcpu)
1329{
1330 int cpu = (long)hcpu;
1331 switch (action) {
1332 case CPU_UP_PREPARE:
1333 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1334 break;
1335 default:
1336 break;
1337 }
1338 return NOTIFY_OK;
1339}
1340
1341static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = {
1342 .notifier_call = xen_hvm_cpu_notify,
1343};
1344
1345static void __init xen_hvm_guest_init(void)
1346{
1347 int r;
1348 int major, minor;
1349
1350 r = init_hvm_pv_info(&major, &minor);
1351 if (r < 0)
1352 return;
1353
1354 xen_hvm_init_shared_info();
1355
1356 if (xen_feature(XENFEAT_hvm_callback_vector))
1357 xen_have_vector_callback = 1;
1358 register_cpu_notifier(&xen_hvm_cpu_notifier);
1359 xen_unplug_emulated_devices();
1360 have_vcpu_info_placement = 0;
1361 x86_init.irqs.intr_init = xen_init_IRQ;
1362 xen_hvm_init_time_ops();
1363 xen_hvm_init_mmu_ops();
1364}
1365
1366static bool __init xen_hvm_platform(void)
1367{
1368 if (xen_pv_domain())
1369 return false;
1370
1371 if (!xen_cpuid_base())
1372 return false;
1373
1374 return true;
1375}
1376
1377bool xen_hvm_need_lapic(void)
1378{
1379 if (xen_pv_domain())
1380 return false;
1381 if (!xen_hvm_domain())
1382 return false;
1383 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1384 return false;
1385 return true;
1386}
1387EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1388
1389const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = {
1390 .name = "Xen HVM",
1391 .detect = xen_hvm_platform,
1392 .init_platform = xen_hvm_guest_init,
1393};
1394EXPORT_SYMBOL(x86_hyper_xen_hvm);
1395#endif
1396