1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef MEMORY_INTERNAL_H
20#define MEMORY_INTERNAL_H
21
22#ifndef CONFIG_USER_ONLY
23#include "hw/xen/xen.h"
24
25typedef struct PhysPageEntry PhysPageEntry;
26
27struct PhysPageEntry {
28 uint16_t is_leaf : 1;
29
30 uint16_t ptr : 15;
31};
32
33typedef struct AddressSpaceDispatch AddressSpaceDispatch;
34
35struct AddressSpaceDispatch {
36
37
38
39 PhysPageEntry phys_map;
40 MemoryListener listener;
41};
42
43void address_space_init_dispatch(AddressSpace *as);
44void address_space_destroy_dispatch(AddressSpace *as);
45
46ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
47 MemoryRegion *mr);
48ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
49void qemu_ram_free(ram_addr_t addr);
50void qemu_ram_free_from_ptr(ram_addr_t addr);
51
52#define VGA_DIRTY_FLAG 0x01
53#define CODE_DIRTY_FLAG 0x02
54#define MIGRATION_DIRTY_FLAG 0x08
55
56static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
57{
58 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
59}
60
61
62static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
63{
64 return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
65}
66
67static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
68 ram_addr_t length,
69 int dirty_flags)
70{
71 int ret = 0;
72 ram_addr_t addr, end;
73
74 end = TARGET_PAGE_ALIGN(start + length);
75 start &= TARGET_PAGE_MASK;
76 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
77 ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
78 }
79 return ret;
80}
81
82static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
83 int dirty_flags)
84{
85 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
86}
87
88static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
89{
90 cpu_physical_memory_set_dirty_flags(addr, 0xff);
91}
92
93static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
94 int dirty_flags)
95{
96 int mask = ~dirty_flags;
97
98 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
99}
100
101static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
102 ram_addr_t length,
103 int dirty_flags)
104{
105 ram_addr_t addr, end;
106
107 end = TARGET_PAGE_ALIGN(start + length);
108 start &= TARGET_PAGE_MASK;
109 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
110 cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
111 }
112 xen_modified_memory(addr, length);
113}
114
115static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
116 ram_addr_t length,
117 int dirty_flags)
118{
119 ram_addr_t addr, end;
120
121 end = TARGET_PAGE_ALIGN(start + length);
122 start &= TARGET_PAGE_MASK;
123 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
124 cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
125 }
126}
127
128void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
129 int dirty_flags);
130
131extern const IORangeOps memory_region_iorange_ops;
132
133#endif
134
135#endif
136