1#include <linux/acpi.h>
2#include <linux/cpu.h>
3#include <linux/kexec.h>
4#include <linux/memblock.h>
5
6#include <xen/features.h>
7#include <xen/events.h>
8#include <xen/interface/memory.h>
9
10#include <asm/cpu.h>
11#include <asm/smp.h>
12#include <asm/reboot.h>
13#include <asm/setup.h>
14#include <asm/hypervisor.h>
15#include <asm/e820/api.h>
16#include <asm/early_ioremap.h>
17
18#include <asm/xen/cpuid.h>
19#include <asm/xen/hypervisor.h>
20#include <asm/xen/page.h>
21
22#include "xen-ops.h"
23#include "mmu.h"
24#include "smp.h"
25
26static unsigned long shared_info_pfn;
27
28void xen_hvm_init_shared_info(void)
29{
30 struct xen_add_to_physmap xatp;
31
32 xatp.domid = DOMID_SELF;
33 xatp.idx = 0;
34 xatp.space = XENMAPSPACE_shared_info;
35 xatp.gpfn = shared_info_pfn;
36 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
37 BUG();
38}
39
40static void __init reserve_shared_info(void)
41{
42 u64 pa;
43
44
45
46
47
48
49
50
51
52 for (pa = PAGE_SIZE;
53 !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
54 memblock_is_reserved(pa);
55 pa += PAGE_SIZE)
56 ;
57
58 shared_info_pfn = PHYS_PFN(pa);
59
60 memblock_reserve(pa, PAGE_SIZE);
61 HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
62}
63
64static void __init xen_hvm_init_mem_mapping(void)
65{
66 early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
67 HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
68}
69
70static void __init init_hvm_pv_info(void)
71{
72 int major, minor;
73 uint32_t eax, ebx, ecx, edx, base;
74
75 base = xen_cpuid_base();
76 eax = cpuid_eax(base + 1);
77
78 major = eax >> 16;
79 minor = eax & 0xffff;
80 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
81
82 xen_domain_type = XEN_HVM_DOMAIN;
83
84
85 if (xen_pvh_domain())
86 pv_info.name = "Xen PVH";
87 else {
88 u64 pfn;
89 uint32_t msr;
90
91 pv_info.name = "Xen HVM";
92 msr = cpuid_ebx(base + 2);
93 pfn = __pa(hypercall_page);
94 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
95 }
96
97 xen_setup_features();
98
99 cpuid(base + 4, &eax, &ebx, &ecx, &edx);
100 if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
101 this_cpu_write(xen_vcpu_id, ebx);
102 else
103 this_cpu_write(xen_vcpu_id, smp_processor_id());
104}
105
106#ifdef CONFIG_KEXEC_CORE
107static void xen_hvm_shutdown(void)
108{
109 native_machine_shutdown();
110 if (kexec_in_progress)
111 xen_reboot(SHUTDOWN_soft_reset);
112}
113
114static void xen_hvm_crash_shutdown(struct pt_regs *regs)
115{
116 native_machine_crash_shutdown(regs);
117 xen_reboot(SHUTDOWN_soft_reset);
118}
119#endif
120
121static int xen_cpu_up_prepare_hvm(unsigned int cpu)
122{
123 int rc = 0;
124
125
126
127
128
129 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
130 xen_smp_intr_free(cpu);
131 xen_uninit_lock_cpu(cpu);
132 }
133
134 if (cpu_acpi_id(cpu) != U32_MAX)
135 per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
136 else
137 per_cpu(xen_vcpu_id, cpu) = cpu;
138 rc = xen_vcpu_setup(cpu);
139 if (rc)
140 return rc;
141
142 if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
143 xen_setup_timer(cpu);
144
145 rc = xen_smp_intr_init(cpu);
146 if (rc) {
147 WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
148 cpu, rc);
149 }
150 return rc;
151}
152
153static int xen_cpu_dead_hvm(unsigned int cpu)
154{
155 xen_smp_intr_free(cpu);
156
157 if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
158 xen_teardown_timer(cpu);
159
160 return 0;
161}
162
163static void __init xen_hvm_guest_init(void)
164{
165 if (xen_pv_domain())
166 return;
167
168 init_hvm_pv_info();
169
170 reserve_shared_info();
171 xen_hvm_init_shared_info();
172
173
174
175
176
177
178 xen_vcpu_info_reset(0);
179
180 xen_panic_handler_init();
181
182 if (xen_feature(XENFEAT_hvm_callback_vector))
183 xen_have_vector_callback = 1;
184
185 xen_hvm_smp_init();
186 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
187 xen_unplug_emulated_devices();
188 x86_init.irqs.intr_init = xen_init_IRQ;
189 xen_hvm_init_time_ops();
190 xen_hvm_init_mmu_ops();
191
192#ifdef CONFIG_KEXEC_CORE
193 machine_ops.shutdown = xen_hvm_shutdown;
194 machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
195#endif
196}
197
198static bool xen_nopv;
199static __init int xen_parse_nopv(char *arg)
200{
201 xen_nopv = true;
202 return 0;
203}
204early_param("xen_nopv", xen_parse_nopv);
205
206bool xen_hvm_need_lapic(void)
207{
208 if (xen_nopv)
209 return false;
210 if (xen_pv_domain())
211 return false;
212 if (!xen_hvm_domain())
213 return false;
214 if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
215 return false;
216 return true;
217}
218EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
219
220static uint32_t __init xen_platform_hvm(void)
221{
222 if (xen_pv_domain() || xen_nopv)
223 return 0;
224
225 return xen_cpuid_base();
226}
227
228static __init void xen_hvm_guest_late_init(void)
229{
230#ifdef CONFIG_XEN_PVH
231
232 if (!xen_pvh &&
233 (x86_platform.legacy.rtc || !x86_platform.legacy.no_vga))
234 return;
235
236
237 xen_pvh = true;
238
239
240 if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC)
241 acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
242
243 machine_ops.emergency_restart = xen_emergency_restart;
244 pv_info.name = "Xen PVH";
245#endif
246}
247
248const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = {
249 .name = "Xen HVM",
250 .detect = xen_platform_hvm,
251 .type = X86_HYPER_XEN_HVM,
252 .init.init_platform = xen_hvm_guest_init,
253 .init.x2apic_available = xen_x2apic_para_available,
254 .init.init_mem_mapping = xen_hvm_init_mem_mapping,
255 .init.guest_late_init = xen_hvm_guest_late_init,
256 .runtime.pin_vcpu = xen_pin_vcpu,
257};
258