1
2
3
4
5
6
7
8
9
10
11#include "qemu/osdep.h"
12
13#include "cpu.h"
14#include "hw/pci/pci.h"
15#include "hw/pci/pci_host.h"
16#include "hw/i386/pc.h"
17#include "hw/southbridge/piix.h"
18#include "hw/irq.h"
19#include "hw/hw.h"
20#include "hw/i386/apic-msidef.h"
21#include "hw/xen/xen_common.h"
22#include "hw/xen/xen-legacy-backend.h"
23#include "hw/xen/xen-bus.h"
24#include "qapi/error.h"
25#include "qapi/qapi-commands-misc.h"
26#include "qemu/error-report.h"
27#include "qemu/main-loop.h"
28#include "qemu/range.h"
29#include "sysemu/runstate.h"
30#include "sysemu/sysemu.h"
31#include "sysemu/xen-mapcache.h"
32#include "trace.h"
33#include "exec/address-spaces.h"
34
35#include <xen/hvm/ioreq.h>
36#include <xen/hvm/e820.h>
37
38
39
40#ifdef DEBUG_XEN_HVM
41#define DPRINTF(fmt, ...) \
42 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
43#else
44#define DPRINTF(fmt, ...) \
45 do { } while (0)
46#endif
47
48static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
49static MemoryRegion *framebuffer;
50static bool xen_in_migration;
51
52
53
54
55
56
57
58
59#ifndef IOREQ_TYPE_VMWARE_PORT
60#define IOREQ_TYPE_VMWARE_PORT 3
61struct vmware_regs {
62 uint32_t esi;
63 uint32_t edi;
64 uint32_t ebx;
65 uint32_t ecx;
66 uint32_t edx;
67};
68typedef struct vmware_regs vmware_regs_t;
69
70struct shared_vmport_iopage {
71 struct vmware_regs vcpu_vmport_regs[1];
72};
73typedef struct shared_vmport_iopage shared_vmport_iopage_t;
74#endif
75
76static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
77{
78 return shared_page->vcpu_ioreq[i].vp_eport;
79}
80static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
81{
82 return &shared_page->vcpu_ioreq[vcpu];
83}
84
85#define BUFFER_IO_MAX_DELAY 100
86
87typedef struct XenPhysmap {
88 hwaddr start_addr;
89 ram_addr_t size;
90 const char *name;
91 hwaddr phys_offset;
92
93 QLIST_ENTRY(XenPhysmap) list;
94} XenPhysmap;
95
96static QLIST_HEAD(, XenPhysmap) xen_physmap;
97
98typedef struct XenPciDevice {
99 PCIDevice *pci_dev;
100 uint32_t sbdf;
101 QLIST_ENTRY(XenPciDevice) entry;
102} XenPciDevice;
103
104typedef struct XenIOState {
105 ioservid_t ioservid;
106 shared_iopage_t *shared_page;
107 shared_vmport_iopage_t *shared_vmport_page;
108 buffered_iopage_t *buffered_io_page;
109 QEMUTimer *buffered_io_timer;
110 CPUState **cpu_by_vcpu_id;
111
112 evtchn_port_t *ioreq_local_port;
113
114 evtchn_port_t bufioreq_remote_port;
115 evtchn_port_t bufioreq_local_port;
116
117 xenevtchn_handle *xce_handle;
118
119 int send_vcpu;
120
121 struct xs_handle *xenstore;
122 MemoryListener memory_listener;
123 MemoryListener io_listener;
124 QLIST_HEAD(, XenPciDevice) dev_list;
125 DeviceListener device_listener;
126 hwaddr free_phys_offset;
127 const XenPhysmap *log_for_dirtybit;
128
129 unsigned long *dirty_bitmap;
130
131 Notifier exit;
132 Notifier suspend;
133 Notifier wakeup;
134} XenIOState;
135
136
137
138int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
139{
140 return irq_num + ((pci_dev->devfn >> 3) << 2);
141}
142
143void xen_piix3_set_irq(void *opaque, int irq_num, int level)
144{
145 xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2,
146 irq_num & 3, level);
147}
148
149void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
150{
151 int i;
152
153
154 for (i = 0; i < len; i++) {
155 uint8_t v = (val >> (8 * i)) & 0xff;
156 if (v & 0x80) {
157 v = 0;
158 }
159 v &= 0xf;
160 if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) {
161 xen_set_pci_link_route(xen_domid, address + i - PIIX_PIRQCA, v);
162 }
163 }
164}
165
166int xen_is_pirq_msi(uint32_t msi_data)
167{
168
169
170
171 return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
172}
173
174void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
175{
176 xen_inject_msi(xen_domid, addr, data);
177}
178
179static void xen_suspend_notifier(Notifier *notifier, void *data)
180{
181 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
182}
183
184
185
186static void xen_set_irq(void *opaque, int irq, int level)
187{
188 xen_set_isa_irq_level(xen_domid, irq, level);
189}
190
191qemu_irq *xen_interrupt_controller_init(void)
192{
193 return qemu_allocate_irqs(xen_set_irq, NULL, 16);
194}
195
196
197
198static void xen_ram_init(PCMachineState *pcms,
199 ram_addr_t ram_size, MemoryRegion **ram_memory_p)
200{
201 X86MachineState *x86ms = X86_MACHINE(pcms);
202 MemoryRegion *sysmem = get_system_memory();
203 ram_addr_t block_len;
204 uint64_t user_lowmem =
205 object_property_get_uint(qdev_get_machine(),
206 X86_MACHINE_MAX_RAM_BELOW_4G,
207 &error_abort);
208
209
210
211
212 if (!user_lowmem) {
213 user_lowmem = HVM_BELOW_4G_RAM_END;
214 }
215 if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
216 user_lowmem = HVM_BELOW_4G_RAM_END;
217 }
218
219 if (ram_size >= user_lowmem) {
220 x86ms->above_4g_mem_size = ram_size - user_lowmem;
221 x86ms->below_4g_mem_size = user_lowmem;
222 } else {
223 x86ms->above_4g_mem_size = 0;
224 x86ms->below_4g_mem_size = ram_size;
225 }
226 if (!x86ms->above_4g_mem_size) {
227 block_len = ram_size;
228 } else {
229
230
231
232
233 block_len = (1ULL << 32) + x86ms->above_4g_mem_size;
234 }
235 memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
236 &error_fatal);
237 *ram_memory_p = &ram_memory;
238
239 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
240 &ram_memory, 0, 0xa0000);
241 memory_region_add_subregion(sysmem, 0, &ram_640k);
242
243
244
245
246
247
248 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
249 &ram_memory, 0xc0000,
250 x86ms->below_4g_mem_size - 0xc0000);
251 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
252 if (x86ms->above_4g_mem_size > 0) {
253 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
254 &ram_memory, 0x100000000ULL,
255 x86ms->above_4g_mem_size);
256 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
257 }
258}
259
260void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
261 Error **errp)
262{
263 unsigned long nr_pfn;
264 xen_pfn_t *pfn_list;
265 int i;
266
267 if (runstate_check(RUN_STATE_INMIGRATE)) {
268
269 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
270 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
271 __func__, size, ram_addr);
272 return;
273 }
274
275 if (mr == &ram_memory) {
276 return;
277 }
278
279 trace_xen_ram_alloc(ram_addr, size);
280
281 nr_pfn = size >> TARGET_PAGE_BITS;
282 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
283
284 for (i = 0; i < nr_pfn; i++) {
285 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
286 }
287
288 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
289 error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
290 ram_addr);
291 }
292
293 g_free(pfn_list);
294}
295
296static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
297{
298 XenPhysmap *physmap = NULL;
299
300 start_addr &= TARGET_PAGE_MASK;
301
302 QLIST_FOREACH(physmap, &xen_physmap, list) {
303 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
304 return physmap;
305 }
306 }
307 return NULL;
308}
309
310static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size)
311{
312 hwaddr addr = phys_offset & TARGET_PAGE_MASK;
313 XenPhysmap *physmap = NULL;
314
315 QLIST_FOREACH(physmap, &xen_physmap, list) {
316 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
317 return physmap->start_addr + (phys_offset - physmap->phys_offset);
318 }
319 }
320
321 return phys_offset;
322}
323
324#ifdef XEN_COMPAT_PHYSMAP
325static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
326{
327 char path[80], value[17];
328
329 snprintf(path, sizeof(path),
330 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
331 xen_domid, (uint64_t)physmap->phys_offset);
332 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr);
333 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
334 return -1;
335 }
336 snprintf(path, sizeof(path),
337 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
338 xen_domid, (uint64_t)physmap->phys_offset);
339 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size);
340 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
341 return -1;
342 }
343 if (physmap->name) {
344 snprintf(path, sizeof(path),
345 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
346 xen_domid, (uint64_t)physmap->phys_offset);
347 if (!xs_write(state->xenstore, 0, path,
348 physmap->name, strlen(physmap->name))) {
349 return -1;
350 }
351 }
352 return 0;
353}
354#else
355static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
356{
357 return 0;
358}
359#endif
360
361static int xen_add_to_physmap(XenIOState *state,
362 hwaddr start_addr,
363 ram_addr_t size,
364 MemoryRegion *mr,
365 hwaddr offset_within_region)
366{
367 unsigned long nr_pages;
368 int rc = 0;
369 XenPhysmap *physmap = NULL;
370 hwaddr pfn, start_gpfn;
371 hwaddr phys_offset = memory_region_get_ram_addr(mr);
372 const char *mr_name;
373
374 if (get_physmapping(start_addr, size)) {
375 return 0;
376 }
377 if (size <= 0) {
378 return -1;
379 }
380
381
382
383
384
385 if (mr == framebuffer && start_addr > 0xbffff) {
386 goto go_physmap;
387 }
388 return -1;
389
390go_physmap:
391 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
392 start_addr, start_addr + size);
393
394 mr_name = memory_region_name(mr);
395
396 physmap = g_malloc(sizeof(XenPhysmap));
397
398 physmap->start_addr = start_addr;
399 physmap->size = size;
400 physmap->name = mr_name;
401 physmap->phys_offset = phys_offset;
402
403 QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
404
405 if (runstate_check(RUN_STATE_INMIGRATE)) {
406
407
408 uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size);
409 assert(p && p == memory_region_get_ram_ptr(mr));
410
411 return 0;
412 }
413
414 pfn = phys_offset >> TARGET_PAGE_BITS;
415 start_gpfn = start_addr >> TARGET_PAGE_BITS;
416 nr_pages = size >> TARGET_PAGE_BITS;
417 rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn,
418 start_gpfn);
419 if (rc) {
420 int saved_errno = errno;
421
422 error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx
423 " to GFN %"HWADDR_PRIx" failed: %s",
424 nr_pages, pfn, start_gpfn, strerror(saved_errno));
425 errno = saved_errno;
426 return -1;
427 }
428
429 rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid,
430 start_addr >> TARGET_PAGE_BITS,
431 (start_addr + size - 1) >> TARGET_PAGE_BITS,
432 XEN_DOMCTL_MEM_CACHEATTR_WB);
433 if (rc) {
434 error_report("pin_memory_cacheattr failed: %s", strerror(errno));
435 }
436 return xen_save_physmap(state, physmap);
437}
438
439static int xen_remove_from_physmap(XenIOState *state,
440 hwaddr start_addr,
441 ram_addr_t size)
442{
443 int rc = 0;
444 XenPhysmap *physmap = NULL;
445 hwaddr phys_offset = 0;
446
447 physmap = get_physmapping(start_addr, size);
448 if (physmap == NULL) {
449 return -1;
450 }
451
452 phys_offset = physmap->phys_offset;
453 size = physmap->size;
454
455 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
456 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
457
458 size >>= TARGET_PAGE_BITS;
459 start_addr >>= TARGET_PAGE_BITS;
460 phys_offset >>= TARGET_PAGE_BITS;
461 rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr,
462 phys_offset);
463 if (rc) {
464 int saved_errno = errno;
465
466 error_report("relocate_memory "RAM_ADDR_FMT" pages"
467 " from GFN %"HWADDR_PRIx
468 " to GFN %"HWADDR_PRIx" failed: %s",
469 size, start_addr, phys_offset, strerror(saved_errno));
470 errno = saved_errno;
471 return -1;
472 }
473
474 QLIST_REMOVE(physmap, list);
475 if (state->log_for_dirtybit == physmap) {
476 state->log_for_dirtybit = NULL;
477 g_free(state->dirty_bitmap);
478 state->dirty_bitmap = NULL;
479 }
480 g_free(physmap);
481
482 return 0;
483}
484
485static void xen_set_memory(struct MemoryListener *listener,
486 MemoryRegionSection *section,
487 bool add)
488{
489 XenIOState *state = container_of(listener, XenIOState, memory_listener);
490 hwaddr start_addr = section->offset_within_address_space;
491 ram_addr_t size = int128_get64(section->size);
492 bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
493 hvmmem_type_t mem_type;
494
495 if (section->mr == &ram_memory) {
496 return;
497 } else {
498 if (add) {
499 xen_map_memory_section(xen_domid, state->ioservid,
500 section);
501 } else {
502 xen_unmap_memory_section(xen_domid, state->ioservid,
503 section);
504 }
505 }
506
507 if (!memory_region_is_ram(section->mr)) {
508 return;
509 }
510
511 if (log_dirty != add) {
512 return;
513 }
514
515 trace_xen_client_set_memory(start_addr, size, log_dirty);
516
517 start_addr &= TARGET_PAGE_MASK;
518 size = TARGET_PAGE_ALIGN(size);
519
520 if (add) {
521 if (!memory_region_is_rom(section->mr)) {
522 xen_add_to_physmap(state, start_addr, size,
523 section->mr, section->offset_within_region);
524 } else {
525 mem_type = HVMMEM_ram_ro;
526 if (xen_set_mem_type(xen_domid, mem_type,
527 start_addr >> TARGET_PAGE_BITS,
528 size >> TARGET_PAGE_BITS)) {
529 DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n",
530 start_addr);
531 }
532 }
533 } else {
534 if (xen_remove_from_physmap(state, start_addr, size) < 0) {
535 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
536 }
537 }
538}
539
540static void xen_region_add(MemoryListener *listener,
541 MemoryRegionSection *section)
542{
543 memory_region_ref(section->mr);
544 xen_set_memory(listener, section, true);
545}
546
547static void xen_region_del(MemoryListener *listener,
548 MemoryRegionSection *section)
549{
550 xen_set_memory(listener, section, false);
551 memory_region_unref(section->mr);
552}
553
554static void xen_io_add(MemoryListener *listener,
555 MemoryRegionSection *section)
556{
557 XenIOState *state = container_of(listener, XenIOState, io_listener);
558 MemoryRegion *mr = section->mr;
559
560 if (mr->ops == &unassigned_io_ops) {
561 return;
562 }
563
564 memory_region_ref(mr);
565
566 xen_map_io_section(xen_domid, state->ioservid, section);
567}
568
569static void xen_io_del(MemoryListener *listener,
570 MemoryRegionSection *section)
571{
572 XenIOState *state = container_of(listener, XenIOState, io_listener);
573 MemoryRegion *mr = section->mr;
574
575 if (mr->ops == &unassigned_io_ops) {
576 return;
577 }
578
579 xen_unmap_io_section(xen_domid, state->ioservid, section);
580
581 memory_region_unref(mr);
582}
583
584static void xen_device_realize(DeviceListener *listener,
585 DeviceState *dev)
586{
587 XenIOState *state = container_of(listener, XenIOState, device_listener);
588
589 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
590 PCIDevice *pci_dev = PCI_DEVICE(dev);
591 XenPciDevice *xendev = g_new(XenPciDevice, 1);
592
593 xendev->pci_dev = pci_dev;
594 xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
595 pci_dev->devfn);
596 QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
597
598 xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
599 }
600}
601
602static void xen_device_unrealize(DeviceListener *listener,
603 DeviceState *dev)
604{
605 XenIOState *state = container_of(listener, XenIOState, device_listener);
606
607 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
608 PCIDevice *pci_dev = PCI_DEVICE(dev);
609 XenPciDevice *xendev, *next;
610
611 xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
612
613 QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
614 if (xendev->pci_dev == pci_dev) {
615 QLIST_REMOVE(xendev, entry);
616 g_free(xendev);
617 break;
618 }
619 }
620 }
621}
622
623static void xen_sync_dirty_bitmap(XenIOState *state,
624 hwaddr start_addr,
625 ram_addr_t size)
626{
627 hwaddr npages = size >> TARGET_PAGE_BITS;
628 const int width = sizeof(unsigned long) * 8;
629 size_t bitmap_size = DIV_ROUND_UP(npages, width);
630 int rc, i, j;
631 const XenPhysmap *physmap = NULL;
632
633 physmap = get_physmapping(start_addr, size);
634 if (physmap == NULL) {
635
636 return;
637 }
638
639 if (state->log_for_dirtybit == NULL) {
640 state->log_for_dirtybit = physmap;
641 state->dirty_bitmap = g_new(unsigned long, bitmap_size);
642 } else if (state->log_for_dirtybit != physmap) {
643
644 return;
645 }
646
647 rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS,
648 npages, state->dirty_bitmap);
649 if (rc < 0) {
650#ifndef ENODATA
651#define ENODATA ENOENT
652#endif
653 if (errno == ENODATA) {
654 memory_region_set_dirty(framebuffer, 0, size);
655 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
656 ", 0x" TARGET_FMT_plx "): %s\n",
657 start_addr, start_addr + size, strerror(errno));
658 }
659 return;
660 }
661
662 for (i = 0; i < bitmap_size; i++) {
663 unsigned long map = state->dirty_bitmap[i];
664 while (map != 0) {
665 j = ctzl(map);
666 map &= ~(1ul << j);
667 memory_region_set_dirty(framebuffer,
668 (i * width + j) * TARGET_PAGE_SIZE,
669 TARGET_PAGE_SIZE);
670 };
671 }
672}
673
674static void xen_log_start(MemoryListener *listener,
675 MemoryRegionSection *section,
676 int old, int new)
677{
678 XenIOState *state = container_of(listener, XenIOState, memory_listener);
679
680 if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
681 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
682 int128_get64(section->size));
683 }
684}
685
686static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
687 int old, int new)
688{
689 XenIOState *state = container_of(listener, XenIOState, memory_listener);
690
691 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
692 state->log_for_dirtybit = NULL;
693 g_free(state->dirty_bitmap);
694 state->dirty_bitmap = NULL;
695
696 xen_track_dirty_vram(xen_domid, 0, 0, NULL);
697 }
698}
699
700static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
701{
702 XenIOState *state = container_of(listener, XenIOState, memory_listener);
703
704 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
705 int128_get64(section->size));
706}
707
708static void xen_log_global_start(MemoryListener *listener)
709{
710 if (xen_enabled()) {
711 xen_in_migration = true;
712 }
713}
714
715static void xen_log_global_stop(MemoryListener *listener)
716{
717 xen_in_migration = false;
718}
719
720static MemoryListener xen_memory_listener = {
721 .region_add = xen_region_add,
722 .region_del = xen_region_del,
723 .log_start = xen_log_start,
724 .log_stop = xen_log_stop,
725 .log_sync = xen_log_sync,
726 .log_global_start = xen_log_global_start,
727 .log_global_stop = xen_log_global_stop,
728 .priority = 10,
729};
730
731static MemoryListener xen_io_listener = {
732 .region_add = xen_io_add,
733 .region_del = xen_io_del,
734 .priority = 10,
735};
736
737static DeviceListener xen_device_listener = {
738 .realize = xen_device_realize,
739 .unrealize = xen_device_unrealize,
740};
741
742
743static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
744{
745 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
746
747 if (req->state != STATE_IOREQ_READY) {
748 DPRINTF("I/O request not ready: "
749 "%x, ptr: %x, port: %"PRIx64", "
750 "data: %"PRIx64", count: %u, size: %u\n",
751 req->state, req->data_is_ptr, req->addr,
752 req->data, req->count, req->size);
753 return NULL;
754 }
755
756 xen_rmb();
757
758 req->state = STATE_IOREQ_INPROCESS;
759 return req;
760}
761
762
763
764
765static ioreq_t *cpu_get_ioreq(XenIOState *state)
766{
767 MachineState *ms = MACHINE(qdev_get_machine());
768 unsigned int max_cpus = ms->smp.max_cpus;
769 int i;
770 evtchn_port_t port;
771
772 port = xenevtchn_pending(state->xce_handle);
773 if (port == state->bufioreq_local_port) {
774 timer_mod(state->buffered_io_timer,
775 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
776 return NULL;
777 }
778
779 if (port != -1) {
780 for (i = 0; i < max_cpus; i++) {
781 if (state->ioreq_local_port[i] == port) {
782 break;
783 }
784 }
785
786 if (i == max_cpus) {
787 hw_error("Fatal error while trying to get io event!\n");
788 }
789
790
791 xenevtchn_unmask(state->xce_handle, port);
792
793
794 state->send_vcpu = i;
795 return cpu_get_ioreq_from_shared_memory(state, i);
796 }
797
798
799 return NULL;
800}
801
802static uint32_t do_inp(uint32_t addr, unsigned long size)
803{
804 switch (size) {
805 case 1:
806 return cpu_inb(addr);
807 case 2:
808 return cpu_inw(addr);
809 case 4:
810 return cpu_inl(addr);
811 default:
812 hw_error("inp: bad size: %04x %lx", addr, size);
813 }
814}
815
816static void do_outp(uint32_t addr,
817 unsigned long size, uint32_t val)
818{
819 switch (size) {
820 case 1:
821 return cpu_outb(addr, val);
822 case 2:
823 return cpu_outw(addr, val);
824 case 4:
825 return cpu_outl(addr, val);
826 default:
827 hw_error("outp: bad size: %04x %lx", addr, size);
828 }
829}
830
831
832
833
834
835
836
837
838
839
840static void rw_phys_req_item(hwaddr addr,
841 ioreq_t *req, uint32_t i, void *val, int rw)
842{
843
844
845
846 hwaddr offset = (hwaddr)req->size * i;
847 if (req->df) {
848 addr -= offset;
849 } else {
850 addr += offset;
851 }
852 cpu_physical_memory_rw(addr, val, req->size, rw);
853}
854
855static inline void read_phys_req_item(hwaddr addr,
856 ioreq_t *req, uint32_t i, void *val)
857{
858 rw_phys_req_item(addr, req, i, val, 0);
859}
860static inline void write_phys_req_item(hwaddr addr,
861 ioreq_t *req, uint32_t i, void *val)
862{
863 rw_phys_req_item(addr, req, i, val, 1);
864}
865
866
867static void cpu_ioreq_pio(ioreq_t *req)
868{
869 uint32_t i;
870
871 trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
872 req->data, req->count, req->size);
873
874 if (req->size > sizeof(uint32_t)) {
875 hw_error("PIO: bad size (%u)", req->size);
876 }
877
878 if (req->dir == IOREQ_READ) {
879 if (!req->data_is_ptr) {
880 req->data = do_inp(req->addr, req->size);
881 trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
882 req->size);
883 } else {
884 uint32_t tmp;
885
886 for (i = 0; i < req->count; i++) {
887 tmp = do_inp(req->addr, req->size);
888 write_phys_req_item(req->data, req, i, &tmp);
889 }
890 }
891 } else if (req->dir == IOREQ_WRITE) {
892 if (!req->data_is_ptr) {
893 trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
894 req->size);
895 do_outp(req->addr, req->size, req->data);
896 } else {
897 for (i = 0; i < req->count; i++) {
898 uint32_t tmp = 0;
899
900 read_phys_req_item(req->data, req, i, &tmp);
901 do_outp(req->addr, req->size, tmp);
902 }
903 }
904 }
905}
906
907static void cpu_ioreq_move(ioreq_t *req)
908{
909 uint32_t i;
910
911 trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
912 req->data, req->count, req->size);
913
914 if (req->size > sizeof(req->data)) {
915 hw_error("MMIO: bad size (%u)", req->size);
916 }
917
918 if (!req->data_is_ptr) {
919 if (req->dir == IOREQ_READ) {
920 for (i = 0; i < req->count; i++) {
921 read_phys_req_item(req->addr, req, i, &req->data);
922 }
923 } else if (req->dir == IOREQ_WRITE) {
924 for (i = 0; i < req->count; i++) {
925 write_phys_req_item(req->addr, req, i, &req->data);
926 }
927 }
928 } else {
929 uint64_t tmp;
930
931 if (req->dir == IOREQ_READ) {
932 for (i = 0; i < req->count; i++) {
933 read_phys_req_item(req->addr, req, i, &tmp);
934 write_phys_req_item(req->data, req, i, &tmp);
935 }
936 } else if (req->dir == IOREQ_WRITE) {
937 for (i = 0; i < req->count; i++) {
938 read_phys_req_item(req->data, req, i, &tmp);
939 write_phys_req_item(req->addr, req, i, &tmp);
940 }
941 }
942 }
943}
944
945static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
946{
947 uint32_t sbdf = req->addr >> 32;
948 uint32_t reg = req->addr;
949 XenPciDevice *xendev;
950
951 if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
952 req->size != sizeof(uint32_t)) {
953 hw_error("PCI config access: bad size (%u)", req->size);
954 }
955
956 if (req->count != 1) {
957 hw_error("PCI config access: bad count (%u)", req->count);
958 }
959
960 QLIST_FOREACH(xendev, &state->dev_list, entry) {
961 if (xendev->sbdf != sbdf) {
962 continue;
963 }
964
965 if (!req->data_is_ptr) {
966 if (req->dir == IOREQ_READ) {
967 req->data = pci_host_config_read_common(
968 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
969 req->size);
970 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
971 req->size, req->data);
972 } else if (req->dir == IOREQ_WRITE) {
973 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
974 req->size, req->data);
975 pci_host_config_write_common(
976 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
977 req->data, req->size);
978 }
979 } else {
980 uint32_t tmp;
981
982 if (req->dir == IOREQ_READ) {
983 tmp = pci_host_config_read_common(
984 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
985 req->size);
986 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
987 req->size, tmp);
988 write_phys_req_item(req->data, req, 0, &tmp);
989 } else if (req->dir == IOREQ_WRITE) {
990 read_phys_req_item(req->data, req, 0, &tmp);
991 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
992 req->size, tmp);
993 pci_host_config_write_common(
994 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
995 tmp, req->size);
996 }
997 }
998 }
999}
1000
1001static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
1002{
1003 X86CPU *cpu;
1004 CPUX86State *env;
1005
1006 cpu = X86_CPU(current_cpu);
1007 env = &cpu->env;
1008 env->regs[R_EAX] = req->data;
1009 env->regs[R_EBX] = vmport_regs->ebx;
1010 env->regs[R_ECX] = vmport_regs->ecx;
1011 env->regs[R_EDX] = vmport_regs->edx;
1012 env->regs[R_ESI] = vmport_regs->esi;
1013 env->regs[R_EDI] = vmport_regs->edi;
1014}
1015
1016static void regs_from_cpu(vmware_regs_t *vmport_regs)
1017{
1018 X86CPU *cpu = X86_CPU(current_cpu);
1019 CPUX86State *env = &cpu->env;
1020
1021 vmport_regs->ebx = env->regs[R_EBX];
1022 vmport_regs->ecx = env->regs[R_ECX];
1023 vmport_regs->edx = env->regs[R_EDX];
1024 vmport_regs->esi = env->regs[R_ESI];
1025 vmport_regs->edi = env->regs[R_EDI];
1026}
1027
1028static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
1029{
1030 vmware_regs_t *vmport_regs;
1031
1032 assert(state->shared_vmport_page);
1033 vmport_regs =
1034 &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
1035 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
1036
1037 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
1038 regs_to_cpu(vmport_regs, req);
1039 cpu_ioreq_pio(req);
1040 regs_from_cpu(vmport_regs);
1041 current_cpu = NULL;
1042}
1043
1044static void handle_ioreq(XenIOState *state, ioreq_t *req)
1045{
1046 trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
1047 req->addr, req->data, req->count, req->size);
1048
1049 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
1050 (req->size < sizeof (target_ulong))) {
1051 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
1052 }
1053
1054 if (req->dir == IOREQ_WRITE)
1055 trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
1056 req->addr, req->data, req->count, req->size);
1057
1058 switch (req->type) {
1059 case IOREQ_TYPE_PIO:
1060 cpu_ioreq_pio(req);
1061 break;
1062 case IOREQ_TYPE_COPY:
1063 cpu_ioreq_move(req);
1064 break;
1065 case IOREQ_TYPE_VMWARE_PORT:
1066 handle_vmport_ioreq(state, req);
1067 break;
1068 case IOREQ_TYPE_TIMEOFFSET:
1069 break;
1070 case IOREQ_TYPE_INVALIDATE:
1071 xen_invalidate_map_cache();
1072 break;
1073 case IOREQ_TYPE_PCI_CONFIG:
1074 cpu_ioreq_config(state, req);
1075 break;
1076 default:
1077 hw_error("Invalid ioreq type 0x%x\n", req->type);
1078 }
1079 if (req->dir == IOREQ_READ) {
1080 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
1081 req->addr, req->data, req->count, req->size);
1082 }
1083}
1084
1085static int handle_buffered_iopage(XenIOState *state)
1086{
1087 buffered_iopage_t *buf_page = state->buffered_io_page;
1088 buf_ioreq_t *buf_req = NULL;
1089 ioreq_t req;
1090 int qw;
1091
1092 if (!buf_page) {
1093 return 0;
1094 }
1095
1096 memset(&req, 0x00, sizeof(req));
1097 req.state = STATE_IOREQ_READY;
1098 req.count = 1;
1099 req.dir = IOREQ_WRITE;
1100
1101 for (;;) {
1102 uint32_t rdptr = buf_page->read_pointer, wrptr;
1103
1104 xen_rmb();
1105 wrptr = buf_page->write_pointer;
1106 xen_rmb();
1107 if (rdptr != buf_page->read_pointer) {
1108 continue;
1109 }
1110 if (rdptr == wrptr) {
1111 break;
1112 }
1113 buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
1114 req.size = 1U << buf_req->size;
1115 req.addr = buf_req->addr;
1116 req.data = buf_req->data;
1117 req.type = buf_req->type;
1118 xen_rmb();
1119 qw = (req.size == 8);
1120 if (qw) {
1121 if (rdptr + 1 == wrptr) {
1122 hw_error("Incomplete quad word buffered ioreq");
1123 }
1124 buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
1125 IOREQ_BUFFER_SLOT_NUM];
1126 req.data |= ((uint64_t)buf_req->data) << 32;
1127 xen_rmb();
1128 }
1129
1130 handle_ioreq(state, &req);
1131
1132
1133
1134
1135
1136 assert(req.state == STATE_IOREQ_READY);
1137 assert(req.count == 1);
1138 assert(req.dir == IOREQ_WRITE);
1139 assert(!req.data_is_ptr);
1140
1141 atomic_add(&buf_page->read_pointer, qw + 1);
1142 }
1143
1144 return req.count;
1145}
1146
1147static void handle_buffered_io(void *opaque)
1148{
1149 XenIOState *state = opaque;
1150
1151 if (handle_buffered_iopage(state)) {
1152 timer_mod(state->buffered_io_timer,
1153 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1154 } else {
1155 timer_del(state->buffered_io_timer);
1156 xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
1157 }
1158}
1159
1160static void cpu_handle_ioreq(void *opaque)
1161{
1162 XenIOState *state = opaque;
1163 ioreq_t *req = cpu_get_ioreq(state);
1164
1165 handle_buffered_iopage(state);
1166 if (req) {
1167 ioreq_t copy = *req;
1168
1169 xen_rmb();
1170 handle_ioreq(state, ©);
1171 req->data = copy.data;
1172
1173 if (req->state != STATE_IOREQ_INPROCESS) {
1174 fprintf(stderr, "Badness in I/O request ... not in service?!: "
1175 "%x, ptr: %x, port: %"PRIx64", "
1176 "data: %"PRIx64", count: %u, size: %u, type: %u\n",
1177 req->state, req->data_is_ptr, req->addr,
1178 req->data, req->count, req->size, req->type);
1179 destroy_hvm_domain(false);
1180 return;
1181 }
1182
1183 xen_wmb();
1184
1185
1186
1187
1188
1189
1190
1191 if (runstate_is_running()) {
1192 ShutdownCause request;
1193
1194 if (qemu_shutdown_requested_get()) {
1195 destroy_hvm_domain(false);
1196 }
1197 request = qemu_reset_requested_get();
1198 if (request) {
1199 qemu_system_reset(request);
1200 destroy_hvm_domain(true);
1201 }
1202 }
1203
1204 req->state = STATE_IORESP_READY;
1205 xenevtchn_notify(state->xce_handle,
1206 state->ioreq_local_port[state->send_vcpu]);
1207 }
1208}
1209
1210static void xen_main_loop_prepare(XenIOState *state)
1211{
1212 int evtchn_fd = -1;
1213
1214 if (state->xce_handle != NULL) {
1215 evtchn_fd = xenevtchn_fd(state->xce_handle);
1216 }
1217
1218 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
1219 state);
1220
1221 if (evtchn_fd != -1) {
1222 CPUState *cpu_state;
1223
1224 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
1225 CPU_FOREACH(cpu_state) {
1226 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1227 __func__, cpu_state->cpu_index, cpu_state);
1228 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
1229 }
1230 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1231 }
1232}
1233
1234
1235static void xen_hvm_change_state_handler(void *opaque, int running,
1236 RunState rstate)
1237{
1238 XenIOState *state = opaque;
1239
1240 if (running) {
1241 xen_main_loop_prepare(state);
1242 }
1243
1244 xen_set_ioreq_server_state(xen_domid,
1245 state->ioservid,
1246 (rstate == RUN_STATE_RUNNING));
1247}
1248
1249static void xen_exit_notifier(Notifier *n, void *data)
1250{
1251 XenIOState *state = container_of(n, XenIOState, exit);
1252
1253 xen_destroy_ioreq_server(xen_domid, state->ioservid);
1254
1255 xenevtchn_close(state->xce_handle);
1256 xs_daemon_close(state->xenstore);
1257}
1258
1259#ifdef XEN_COMPAT_PHYSMAP
1260static void xen_read_physmap(XenIOState *state)
1261{
1262 XenPhysmap *physmap = NULL;
1263 unsigned int len, num, i;
1264 char path[80], *value = NULL;
1265 char **entries = NULL;
1266
1267 snprintf(path, sizeof(path),
1268 "/local/domain/0/device-model/%d/physmap", xen_domid);
1269 entries = xs_directory(state->xenstore, 0, path, &num);
1270 if (entries == NULL)
1271 return;
1272
1273 for (i = 0; i < num; i++) {
1274 physmap = g_malloc(sizeof (XenPhysmap));
1275 physmap->phys_offset = strtoull(entries[i], NULL, 16);
1276 snprintf(path, sizeof(path),
1277 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1278 xen_domid, entries[i]);
1279 value = xs_read(state->xenstore, 0, path, &len);
1280 if (value == NULL) {
1281 g_free(physmap);
1282 continue;
1283 }
1284 physmap->start_addr = strtoull(value, NULL, 16);
1285 free(value);
1286
1287 snprintf(path, sizeof(path),
1288 "/local/domain/0/device-model/%d/physmap/%s/size",
1289 xen_domid, entries[i]);
1290 value = xs_read(state->xenstore, 0, path, &len);
1291 if (value == NULL) {
1292 g_free(physmap);
1293 continue;
1294 }
1295 physmap->size = strtoull(value, NULL, 16);
1296 free(value);
1297
1298 snprintf(path, sizeof(path),
1299 "/local/domain/0/device-model/%d/physmap/%s/name",
1300 xen_domid, entries[i]);
1301 physmap->name = xs_read(state->xenstore, 0, path, &len);
1302
1303 QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
1304 }
1305 free(entries);
1306}
1307#else
1308static void xen_read_physmap(XenIOState *state)
1309{
1310}
1311#endif
1312
1313static void xen_wakeup_notifier(Notifier *notifier, void *data)
1314{
1315 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
1316}
1317
1318static int xen_map_ioreq_server(XenIOState *state)
1319{
1320 void *addr = NULL;
1321 xenforeignmemory_resource_handle *fres;
1322 xen_pfn_t ioreq_pfn;
1323 xen_pfn_t bufioreq_pfn;
1324 evtchn_port_t bufioreq_evtchn;
1325 int rc;
1326
1327
1328
1329
1330
1331 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
1332 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
1333 fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
1334 XENMEM_resource_ioreq_server,
1335 state->ioservid, 0, 2,
1336 &addr,
1337 PROT_READ | PROT_WRITE, 0);
1338 if (fres != NULL) {
1339 trace_xen_map_resource_ioreq(state->ioservid, addr);
1340 state->buffered_io_page = addr;
1341 state->shared_page = addr + TARGET_PAGE_SIZE;
1342 } else if (errno != EOPNOTSUPP) {
1343 error_report("failed to map ioreq server resources: error %d handle=%p",
1344 errno, xen_xc);
1345 return -1;
1346 }
1347
1348 rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
1349 (state->shared_page == NULL) ?
1350 &ioreq_pfn : NULL,
1351 (state->buffered_io_page == NULL) ?
1352 &bufioreq_pfn : NULL,
1353 &bufioreq_evtchn);
1354 if (rc < 0) {
1355 error_report("failed to get ioreq server info: error %d handle=%p",
1356 errno, xen_xc);
1357 return rc;
1358 }
1359
1360 if (state->shared_page == NULL) {
1361 DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1362
1363 state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
1364 PROT_READ | PROT_WRITE,
1365 1, &ioreq_pfn, NULL);
1366 if (state->shared_page == NULL) {
1367 error_report("map shared IO page returned error %d handle=%p",
1368 errno, xen_xc);
1369 }
1370 }
1371
1372 if (state->buffered_io_page == NULL) {
1373 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
1374
1375 state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
1376 PROT_READ | PROT_WRITE,
1377 1, &bufioreq_pfn,
1378 NULL);
1379 if (state->buffered_io_page == NULL) {
1380 error_report("map buffered IO page returned error %d", errno);
1381 return -1;
1382 }
1383 }
1384
1385 if (state->shared_page == NULL || state->buffered_io_page == NULL) {
1386 return -1;
1387 }
1388
1389 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
1390
1391 state->bufioreq_remote_port = bufioreq_evtchn;
1392
1393 return 0;
1394}
1395
1396void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
1397{
1398 MachineState *ms = MACHINE(pcms);
1399 unsigned int max_cpus = ms->smp.max_cpus;
1400 int i, rc;
1401 xen_pfn_t ioreq_pfn;
1402 XenIOState *state;
1403
1404 state = g_malloc0(sizeof (XenIOState));
1405
1406 state->xce_handle = xenevtchn_open(NULL, 0);
1407 if (state->xce_handle == NULL) {
1408 perror("xen: event channel open");
1409 goto err;
1410 }
1411
1412 state->xenstore = xs_daemon_open();
1413 if (state->xenstore == NULL) {
1414 perror("xen: xenstore open");
1415 goto err;
1416 }
1417
1418 xen_create_ioreq_server(xen_domid, &state->ioservid);
1419
1420 state->exit.notify = xen_exit_notifier;
1421 qemu_add_exit_notifier(&state->exit);
1422
1423 state->suspend.notify = xen_suspend_notifier;
1424 qemu_register_suspend_notifier(&state->suspend);
1425
1426 state->wakeup.notify = xen_wakeup_notifier;
1427 qemu_register_wakeup_notifier(&state->wakeup);
1428
1429
1430
1431
1432 qemu_register_wakeup_support();
1433
1434 rc = xen_map_ioreq_server(state);
1435 if (rc < 0) {
1436 goto err;
1437 }
1438
1439 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
1440 if (!rc) {
1441 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
1442 state->shared_vmport_page =
1443 xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
1444 1, &ioreq_pfn, NULL);
1445 if (state->shared_vmport_page == NULL) {
1446 error_report("map shared vmport IO page returned error %d handle=%p",
1447 errno, xen_xc);
1448 goto err;
1449 }
1450 } else if (rc != -ENOSYS) {
1451 error_report("get vmport regs pfn returned error %d, rc=%d",
1452 errno, rc);
1453 goto err;
1454 }
1455
1456
1457 state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
1458
1459 rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
1460 if (rc < 0) {
1461 error_report("failed to enable ioreq server info: error %d handle=%p",
1462 errno, xen_xc);
1463 goto err;
1464 }
1465
1466 state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
1467
1468
1469 for (i = 0; i < max_cpus; i++) {
1470 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1471 xen_vcpu_eport(state->shared_page, i));
1472 if (rc == -1) {
1473 error_report("shared evtchn %d bind error %d", i, errno);
1474 goto err;
1475 }
1476 state->ioreq_local_port[i] = rc;
1477 }
1478
1479 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1480 state->bufioreq_remote_port);
1481 if (rc == -1) {
1482 error_report("buffered evtchn bind error %d", errno);
1483 goto err;
1484 }
1485 state->bufioreq_local_port = rc;
1486
1487
1488#ifdef XEN_COMPAT_PHYSMAP
1489 xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1490#else
1491 xen_map_cache_init(NULL, state);
1492#endif
1493 xen_ram_init(pcms, ram_size, ram_memory);
1494
1495 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1496
1497 state->memory_listener = xen_memory_listener;
1498 memory_listener_register(&state->memory_listener, &address_space_memory);
1499 state->log_for_dirtybit = NULL;
1500
1501 state->io_listener = xen_io_listener;
1502 memory_listener_register(&state->io_listener, &address_space_io);
1503
1504 state->device_listener = xen_device_listener;
1505 QLIST_INIT(&state->dev_list);
1506 device_listener_register(&state->device_listener);
1507
1508 xen_bus_init();
1509
1510
1511 if (xen_be_init() != 0) {
1512 error_report("xen backend core setup failed");
1513 goto err;
1514 }
1515 xen_be_register_common();
1516
1517 QLIST_INIT(&xen_physmap);
1518 xen_read_physmap(state);
1519
1520
1521 pcms->acpi_build_enabled = false;
1522
1523 return;
1524
1525err:
1526 error_report("xen hardware virtual machine initialisation failed");
1527 exit(1);
1528}
1529
1530void destroy_hvm_domain(bool reboot)
1531{
1532 xc_interface *xc_handle;
1533 int sts;
1534 int rc;
1535
1536 unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
1537
1538 if (xen_dmod) {
1539 rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
1540 if (!rc) {
1541 return;
1542 }
1543 if (errno != ENOTTY ) {
1544 perror("xendevicemodel_shutdown failed");
1545 }
1546
1547 }
1548
1549 xc_handle = xc_interface_open(0, 0, 0);
1550 if (xc_handle == NULL) {
1551 fprintf(stderr, "Cannot acquire xenctrl handle\n");
1552 } else {
1553 sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
1554 if (sts != 0) {
1555 fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1556 "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1557 sts, strerror(errno));
1558 } else {
1559 fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1560 reboot ? "reboot" : "poweroff");
1561 }
1562 xc_interface_close(xc_handle);
1563 }
1564}
1565
1566void xen_register_framebuffer(MemoryRegion *mr)
1567{
1568 framebuffer = mr;
1569}
1570
1571void xen_shutdown_fatal_error(const char *fmt, ...)
1572{
1573 va_list ap;
1574
1575 va_start(ap, fmt);
1576 vfprintf(stderr, fmt, ap);
1577 va_end(ap);
1578 fprintf(stderr, "Will destroy the domain.\n");
1579
1580 qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
1581}
1582
1583void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
1584{
1585 if (unlikely(xen_in_migration)) {
1586 int rc;
1587 ram_addr_t start_pfn, nb_pages;
1588
1589 start = xen_phys_offset_to_gaddr(start, length);
1590
1591 if (length == 0) {
1592 length = TARGET_PAGE_SIZE;
1593 }
1594 start_pfn = start >> TARGET_PAGE_BITS;
1595 nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
1596 - start_pfn;
1597 rc = xen_modified_memory(xen_domid, start_pfn, nb_pages);
1598 if (rc) {
1599 fprintf(stderr,
1600 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
1601 __func__, start, nb_pages, errno, strerror(errno));
1602 }
1603 }
1604}
1605
1606void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
1607{
1608 if (enable) {
1609 memory_global_dirty_log_start();
1610 } else {
1611 memory_global_dirty_log_stop();
1612 }
1613}
1614