1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef RAM_ADDR_H
20#define RAM_ADDR_H
21
22#ifndef CONFIG_USER_ONLY
23#include "hw/xen/xen.h"
24
25struct RAMBlock {
26 struct rcu_head rcu;
27 struct MemoryRegion *mr;
28 uint8_t *host;
29 ram_addr_t offset;
30 ram_addr_t used_length;
31 ram_addr_t max_length;
32 void (*resized)(const char*, uint64_t length, void *host);
33 uint32_t flags;
34
35 char idstr[256];
36
37 QLIST_ENTRY(RAMBlock) next;
38 int fd;
39};
40
41static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
42{
43 assert(offset < block->used_length);
44 assert(block->host);
45 return (char *)block->host + offset;
46}
47
48typedef struct RAMList {
49 QemuMutex mutex;
50
51 unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
52 RAMBlock *mru_block;
53
54 QLIST_HEAD(, RAMBlock) blocks;
55 uint32_t version;
56} RAMList;
57extern RAMList ram_list;
58
59ram_addr_t last_ram_offset(void);
60void qemu_mutex_lock_ramlist(void);
61void qemu_mutex_unlock_ramlist(void);
62
63ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
64 bool share, const char *mem_path,
65 Error **errp);
66ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
67 MemoryRegion *mr, Error **errp);
68ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
69ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
70 void (*resized)(const char*,
71 uint64_t length,
72 void *host),
73 MemoryRegion *mr, Error **errp);
74int qemu_get_ram_fd(ram_addr_t addr);
75void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
76void *qemu_get_ram_ptr(ram_addr_t addr);
77void qemu_ram_free(ram_addr_t addr);
78void qemu_ram_free_from_ptr(ram_addr_t addr);
79
80int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
81
82#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
83#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
84
85static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
86 ram_addr_t length,
87 unsigned client)
88{
89 unsigned long end, page, next;
90
91 assert(client < DIRTY_MEMORY_NUM);
92
93 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
94 page = start >> TARGET_PAGE_BITS;
95 next = find_next_bit(ram_list.dirty_memory[client], end, page);
96
97 return next < end;
98}
99
100static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
101 ram_addr_t length,
102 unsigned client)
103{
104 unsigned long end, page, next;
105
106 assert(client < DIRTY_MEMORY_NUM);
107
108 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
109 page = start >> TARGET_PAGE_BITS;
110 next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
111
112 return next >= end;
113}
114
115static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
116 unsigned client)
117{
118 return cpu_physical_memory_get_dirty(addr, 1, client);
119}
120
121static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
122{
123 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
124 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
125 bool migration =
126 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
127 return !(vga && code && migration);
128}
129
130static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
131 ram_addr_t length,
132 uint8_t mask)
133{
134 uint8_t ret = 0;
135
136 if (mask & (1 << DIRTY_MEMORY_VGA) &&
137 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
138 ret |= (1 << DIRTY_MEMORY_VGA);
139 }
140 if (mask & (1 << DIRTY_MEMORY_CODE) &&
141 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
142 ret |= (1 << DIRTY_MEMORY_CODE);
143 }
144 if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
145 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
146 ret |= (1 << DIRTY_MEMORY_MIGRATION);
147 }
148 return ret;
149}
150
151static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
152 unsigned client)
153{
154 assert(client < DIRTY_MEMORY_NUM);
155 set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
156}
157
158static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
159 ram_addr_t length,
160 uint8_t mask)
161{
162 unsigned long end, page;
163 unsigned long **d = ram_list.dirty_memory;
164
165 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
166 page = start >> TARGET_PAGE_BITS;
167 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
168 bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
169 }
170 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
171 bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
172 }
173 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
174 bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
175 }
176 xen_modified_memory(start, length);
177}
178
179#if !defined(_WIN32)
180static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
181 ram_addr_t start,
182 ram_addr_t pages)
183{
184 unsigned long i, j;
185 unsigned long page_number, c;
186 hwaddr addr;
187 ram_addr_t ram_addr;
188 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
189 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
190 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
191
192
193 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
194 (hpratio == 1)) {
195 long k;
196 long nr = BITS_TO_LONGS(pages);
197
198 for (k = 0; k < nr; k++) {
199 if (bitmap[k]) {
200 unsigned long temp = leul_to_cpu(bitmap[k]);
201 unsigned long **d = ram_list.dirty_memory;
202
203 atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
204 atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
205 if (tcg_enabled()) {
206 atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
207 }
208 }
209 }
210 xen_modified_memory(start, pages << TARGET_PAGE_BITS);
211 } else {
212 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
213
214
215
216
217 for (i = 0; i < len; i++) {
218 if (bitmap[i] != 0) {
219 c = leul_to_cpu(bitmap[i]);
220 do {
221 j = ctzl(c);
222 c &= ~(1ul << j);
223 page_number = (i * HOST_LONG_BITS + j) * hpratio;
224 addr = page_number * TARGET_PAGE_SIZE;
225 ram_addr = start + addr;
226 cpu_physical_memory_set_dirty_range(ram_addr,
227 TARGET_PAGE_SIZE * hpratio, clients);
228 } while (c != 0);
229 }
230 }
231 }
232}
233#endif
234
235bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
236 ram_addr_t length,
237 unsigned client);
238
239static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
240 ram_addr_t length)
241{
242 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
243 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
244 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
245}
246
247
248static inline
249uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
250 ram_addr_t start,
251 ram_addr_t length)
252{
253 ram_addr_t addr;
254 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
255 uint64_t num_dirty = 0;
256
257
258 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
259 int k;
260 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
261 unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
262
263 for (k = page; k < page + nr; k++) {
264 if (src[k]) {
265 unsigned long bits = atomic_xchg(&src[k], 0);
266 unsigned long new_dirty;
267 new_dirty = ~dest[k];
268 dest[k] |= bits;
269 new_dirty &= bits;
270 num_dirty += ctpopl(new_dirty);
271 }
272 }
273 } else {
274 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
275 if (cpu_physical_memory_test_and_clear_dirty(
276 start + addr,
277 TARGET_PAGE_SIZE,
278 DIRTY_MEMORY_MIGRATION)) {
279 long k = (start + addr) >> TARGET_PAGE_BITS;
280 if (!test_and_set_bit(k, dest)) {
281 num_dirty++;
282 }
283 }
284 }
285 }
286
287 return num_dirty;
288}
289
290void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
291#endif
292#endif
293