1#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
2#include <linux/bootmem.h>
3#endif
4#include <linux/cpu.h>
5#include <linux/kexec.h>
6#include <linux/slab.h>
7
8#include <xen/features.h>
9#include <xen/page.h>
10#include <xen/interface/memory.h>
11
12#include <asm/xen/hypercall.h>
13#include <asm/xen/hypervisor.h>
14#include <asm/cpu.h>
15#include <asm/e820/api.h>
16
17#include "xen-ops.h"
18#include "smp.h"
19#include "pmu.h"
20
21EXPORT_SYMBOL_GPL(hypercall_page);
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
40
41
42
43
44
45
46DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
47
48
49DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
50EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
51
52enum xen_domain_type xen_domain_type = XEN_NATIVE;
53EXPORT_SYMBOL_GPL(xen_domain_type);
54
55unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
56EXPORT_SYMBOL(machine_to_phys_mapping);
57unsigned long machine_to_phys_nr;
58EXPORT_SYMBOL(machine_to_phys_nr);
59
60struct start_info *xen_start_info;
61EXPORT_SYMBOL_GPL(xen_start_info);
62
63struct shared_info xen_dummy_shared_info;
64
65__read_mostly int xen_have_vector_callback;
66EXPORT_SYMBOL_GPL(xen_have_vector_callback);
67
68
69
70
71
72uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
73EXPORT_SYMBOL(xen_start_flags);
74
75
76
77
78
79struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94int xen_have_vcpu_info_placement = 1;
95
96static int xen_cpu_up_online(unsigned int cpu)
97{
98 xen_init_lock_cpu(cpu);
99 return 0;
100}
101
102int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
103 int (*cpu_dead_cb)(unsigned int))
104{
105 int rc;
106
107 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
108 "x86/xen/guest:prepare",
109 cpu_up_prepare_cb, cpu_dead_cb);
110 if (rc >= 0) {
111 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
112 "x86/xen/guest:online",
113 xen_cpu_up_online, NULL);
114 if (rc < 0)
115 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
116 }
117
118 return rc >= 0 ? 0 : rc;
119}
120
121static int xen_vcpu_setup_restore(int cpu)
122{
123 int rc = 0;
124
125
126 xen_vcpu_info_reset(cpu);
127
128
129
130
131
132 if (xen_pv_domain() ||
133 (xen_hvm_domain() && cpu_online(cpu))) {
134 rc = xen_vcpu_setup(cpu);
135 }
136
137 return rc;
138}
139
140
141
142
143
144
145void xen_vcpu_restore(void)
146{
147 int cpu, rc;
148
149 for_each_possible_cpu(cpu) {
150 bool other_cpu = (cpu != smp_processor_id());
151 bool is_up;
152
153 if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID)
154 continue;
155
156
157 is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up,
158 xen_vcpu_nr(cpu), NULL) > 0;
159
160 if (other_cpu && is_up &&
161 HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
162 BUG();
163
164 if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
165 xen_setup_runstate_info(cpu);
166
167 rc = xen_vcpu_setup_restore(cpu);
168 if (rc)
169 pr_emerg_once("vcpu restore failed for cpu=%d err=%d. "
170 "System will hang.\n", cpu, rc);
171
172
173
174
175
176
177
178
179
180 if (other_cpu && is_up && (rc == 0) &&
181 HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
182 BUG();
183 }
184}
185
186void xen_vcpu_info_reset(int cpu)
187{
188 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) {
189 per_cpu(xen_vcpu, cpu) =
190 &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
191 } else {
192
193 per_cpu(xen_vcpu, cpu) = NULL;
194 }
195}
196
197int xen_vcpu_setup(int cpu)
198{
199 struct vcpu_register_vcpu_info info;
200 int err;
201 struct vcpu_info *vcpup;
202
203 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
204
205
206
207
208
209
210
211
212
213
214
215
216 if (xen_hvm_domain()) {
217 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
218 return 0;
219 }
220
221 if (xen_have_vcpu_info_placement) {
222 vcpup = &per_cpu(xen_vcpu_info, cpu);
223 info.mfn = arbitrary_virt_to_mfn(vcpup);
224 info.offset = offset_in_page(vcpup);
225
226
227
228
229
230
231
232
233
234
235
236 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info,
237 xen_vcpu_nr(cpu), &info);
238
239 if (err) {
240 pr_warn_once("register_vcpu_info failed: cpu=%d err=%d\n",
241 cpu, err);
242 xen_have_vcpu_info_placement = 0;
243 } else {
244
245
246
247
248 per_cpu(xen_vcpu, cpu) = vcpup;
249 }
250 }
251
252 if (!xen_have_vcpu_info_placement)
253 xen_vcpu_info_reset(cpu);
254
255 return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
256}
257
258void xen_reboot(int reason)
259{
260 struct sched_shutdown r = { .reason = reason };
261 int cpu;
262
263 for_each_online_cpu(cpu)
264 xen_pmu_finish(cpu);
265
266 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
267 BUG();
268}
269
270void xen_emergency_restart(void)
271{
272 xen_reboot(SHUTDOWN_reboot);
273}
274
275static int
276xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
277{
278 if (!kexec_crash_loaded())
279 xen_reboot(SHUTDOWN_crash);
280 return NOTIFY_DONE;
281}
282
283static struct notifier_block xen_panic_block = {
284 .notifier_call = xen_panic_event,
285 .priority = INT_MIN
286};
287
288int xen_panic_handler_init(void)
289{
290 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
291 return 0;
292}
293
294void xen_pin_vcpu(int cpu)
295{
296 static bool disable_pinning;
297 struct sched_pin_override pin_override;
298 int ret;
299
300 if (disable_pinning)
301 return;
302
303 pin_override.pcpu = cpu;
304 ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override);
305
306
307 if (cpu < 0)
308 return;
309
310 switch (ret) {
311 case -ENOSYS:
312 pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n",
313 cpu);
314 disable_pinning = true;
315 break;
316 case -EPERM:
317 WARN(1, "Trying to pin vcpu without having privilege to do so\n");
318 disable_pinning = true;
319 break;
320 case -EINVAL:
321 case -EBUSY:
322 pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n",
323 cpu);
324 break;
325 case 0:
326 break;
327 default:
328 WARN(1, "rc %d while trying to pin vcpu\n", ret);
329 disable_pinning = true;
330 }
331}
332
333#ifdef CONFIG_HOTPLUG_CPU
334void xen_arch_register_cpu(int num)
335{
336 arch_register_cpu(num);
337}
338EXPORT_SYMBOL(xen_arch_register_cpu);
339
340void xen_arch_unregister_cpu(int num)
341{
342 arch_unregister_cpu(num);
343}
344EXPORT_SYMBOL(xen_arch_unregister_cpu);
345#endif
346
347#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
348void __init arch_xen_balloon_init(struct resource *hostmem_resource)
349{
350 struct xen_memory_map memmap;
351 int rc;
352 unsigned int i, last_guest_ram;
353 phys_addr_t max_addr = PFN_PHYS(max_pfn);
354 struct e820_table *xen_e820_table;
355 const struct e820_entry *entry;
356 struct resource *res;
357
358 if (!xen_initial_domain())
359 return;
360
361 xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
362 if (!xen_e820_table)
363 return;
364
365 memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
366 set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
367 rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
368 if (rc) {
369 pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
370 goto out;
371 }
372
373 last_guest_ram = 0;
374 for (i = 0; i < memmap.nr_entries; i++) {
375 if (xen_e820_table->entries[i].addr >= max_addr)
376 break;
377 if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
378 last_guest_ram = i;
379 }
380
381 entry = &xen_e820_table->entries[last_guest_ram];
382 if (max_addr >= entry->addr + entry->size)
383 goto out;
384
385 hostmem_resource->start = max_addr;
386 hostmem_resource->end = entry->addr + entry->size;
387
388
389
390
391
392
393 for (; i < memmap.nr_entries; i++) {
394 entry = &xen_e820_table->entries[i];
395
396 if (entry->type == E820_TYPE_RAM)
397 continue;
398
399 if (entry->addr >= hostmem_resource->end)
400 break;
401
402 res = kzalloc(sizeof(*res), GFP_KERNEL);
403 if (!res)
404 goto out;
405
406 res->name = "Unavailable host RAM";
407 res->start = entry->addr;
408 res->end = (entry->addr + entry->size < hostmem_resource->end) ?
409 entry->addr + entry->size : hostmem_resource->end;
410 rc = insert_resource(hostmem_resource, res);
411 if (rc) {
412 pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
413 __func__, res->start, res->end, rc);
414 kfree(res);
415 goto out;
416 }
417 }
418
419 out:
420 kfree(xen_e820_table);
421}
422#endif
423