1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef RAM_ADDR_H
20#define RAM_ADDR_H
21
22#ifndef CONFIG_USER_ONLY
23#include "hw/xen/xen.h"
24
25struct RAMBlock {
26 struct rcu_head rcu;
27 struct MemoryRegion *mr;
28 uint8_t *host;
29 ram_addr_t offset;
30 ram_addr_t used_length;
31 ram_addr_t max_length;
32 void (*resized)(const char*, uint64_t length, void *host);
33 uint32_t flags;
34
35 char idstr[256];
36
37 QLIST_ENTRY(RAMBlock) next;
38 int fd;
39};
40
41static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
42{
43 return (b && b->host && offset < b->used_length) ? true : false;
44}
45
46static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
47{
48 assert(offset_in_ramblock(block, offset));
49 return (char *)block->host + offset;
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
78typedef struct {
79 struct rcu_head rcu;
80 unsigned long *blocks[];
81} DirtyMemoryBlocks;
82
83typedef struct RAMList {
84 QemuMutex mutex;
85 RAMBlock *mru_block;
86
87 QLIST_HEAD(, RAMBlock) blocks;
88 DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
89 uint32_t version;
90} RAMList;
91extern RAMList ram_list;
92
93ram_addr_t last_ram_offset(void);
94void qemu_mutex_lock_ramlist(void);
95void qemu_mutex_unlock_ramlist(void);
96
97RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
98 bool share, const char *mem_path,
99 Error **errp);
100RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
101 MemoryRegion *mr, Error **errp);
102RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
103RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
104 void (*resized)(const char*,
105 uint64_t length,
106 void *host),
107 MemoryRegion *mr, Error **errp);
108void qemu_ram_free(RAMBlock *block);
109
110int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
111
112#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
113#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
114
115static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
116 ram_addr_t length,
117 unsigned client)
118{
119 DirtyMemoryBlocks *blocks;
120 unsigned long end, page;
121 unsigned long idx, offset, base;
122 bool dirty = false;
123
124 assert(client < DIRTY_MEMORY_NUM);
125
126 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
127 page = start >> TARGET_PAGE_BITS;
128
129 rcu_read_lock();
130
131 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
132
133 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
134 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
135 base = page - offset;
136 while (page < end) {
137 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
138 unsigned long num = next - base;
139 unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
140 if (found < num) {
141 dirty = true;
142 break;
143 }
144
145 page = next;
146 idx++;
147 offset = 0;
148 base += DIRTY_MEMORY_BLOCK_SIZE;
149 }
150
151 rcu_read_unlock();
152
153 return dirty;
154}
155
156static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
157 ram_addr_t length,
158 unsigned client)
159{
160 DirtyMemoryBlocks *blocks;
161 unsigned long end, page;
162 unsigned long idx, offset, base;
163 bool dirty = true;
164
165 assert(client < DIRTY_MEMORY_NUM);
166
167 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
168 page = start >> TARGET_PAGE_BITS;
169
170 rcu_read_lock();
171
172 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
173
174 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
175 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
176 base = page - offset;
177 while (page < end) {
178 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
179 unsigned long num = next - base;
180 unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
181 if (found < num) {
182 dirty = false;
183 break;
184 }
185
186 page = next;
187 idx++;
188 offset = 0;
189 base += DIRTY_MEMORY_BLOCK_SIZE;
190 }
191
192 rcu_read_unlock();
193
194 return dirty;
195}
196
197static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
198 unsigned client)
199{
200 return cpu_physical_memory_get_dirty(addr, 1, client);
201}
202
203static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
204{
205 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
206 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
207 bool migration =
208 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
209 return !(vga && code && migration);
210}
211
212static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
213 ram_addr_t length,
214 uint8_t mask)
215{
216 uint8_t ret = 0;
217
218 if (mask & (1 << DIRTY_MEMORY_VGA) &&
219 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
220 ret |= (1 << DIRTY_MEMORY_VGA);
221 }
222 if (mask & (1 << DIRTY_MEMORY_CODE) &&
223 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
224 ret |= (1 << DIRTY_MEMORY_CODE);
225 }
226 if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
227 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
228 ret |= (1 << DIRTY_MEMORY_MIGRATION);
229 }
230 return ret;
231}
232
233static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
234 unsigned client)
235{
236 unsigned long page, idx, offset;
237 DirtyMemoryBlocks *blocks;
238
239 assert(client < DIRTY_MEMORY_NUM);
240
241 page = addr >> TARGET_PAGE_BITS;
242 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
243 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
244
245 rcu_read_lock();
246
247 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
248
249 set_bit_atomic(offset, blocks->blocks[idx]);
250
251 rcu_read_unlock();
252}
253
254static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
255 ram_addr_t length,
256 uint8_t mask)
257{
258 DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
259 unsigned long end, page;
260 unsigned long idx, offset, base;
261 int i;
262
263 if (!mask && !xen_enabled()) {
264 return;
265 }
266
267 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
268 page = start >> TARGET_PAGE_BITS;
269
270 rcu_read_lock();
271
272 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
273 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
274 }
275
276 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
277 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
278 base = page - offset;
279 while (page < end) {
280 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
281
282 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
283 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
284 offset, next - page);
285 }
286 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
287 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
288 offset, next - page);
289 }
290 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
291 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
292 offset, next - page);
293 }
294
295 page = next;
296 idx++;
297 offset = 0;
298 base += DIRTY_MEMORY_BLOCK_SIZE;
299 }
300
301 rcu_read_unlock();
302
303 xen_modified_memory(start, length);
304}
305
306#if !defined(_WIN32)
307static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
308 ram_addr_t start,
309 ram_addr_t pages)
310{
311 unsigned long i, j;
312 unsigned long page_number, c;
313 hwaddr addr;
314 ram_addr_t ram_addr;
315 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
316 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
317 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
318
319
320 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
321 (hpratio == 1)) {
322 unsigned long **blocks[DIRTY_MEMORY_NUM];
323 unsigned long idx;
324 unsigned long offset;
325 long k;
326 long nr = BITS_TO_LONGS(pages);
327
328 idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
329 offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
330 DIRTY_MEMORY_BLOCK_SIZE);
331
332 rcu_read_lock();
333
334 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
335 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
336 }
337
338 for (k = 0; k < nr; k++) {
339 if (bitmap[k]) {
340 unsigned long temp = leul_to_cpu(bitmap[k]);
341
342 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
343 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
344 if (tcg_enabled()) {
345 atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
346 }
347 }
348
349 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
350 offset = 0;
351 idx++;
352 }
353 }
354
355 rcu_read_unlock();
356
357 xen_modified_memory(start, pages << TARGET_PAGE_BITS);
358 } else {
359 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
360
361
362
363
364 for (i = 0; i < len; i++) {
365 if (bitmap[i] != 0) {
366 c = leul_to_cpu(bitmap[i]);
367 do {
368 j = ctzl(c);
369 c &= ~(1ul << j);
370 page_number = (i * HOST_LONG_BITS + j) * hpratio;
371 addr = page_number * TARGET_PAGE_SIZE;
372 ram_addr = start + addr;
373 cpu_physical_memory_set_dirty_range(ram_addr,
374 TARGET_PAGE_SIZE * hpratio, clients);
375 } while (c != 0);
376 }
377 }
378 }
379}
380#endif
381
382bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
383 ram_addr_t length,
384 unsigned client);
385
386static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
387 ram_addr_t length)
388{
389 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
390 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
391 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
392}
393
394
395static inline
396uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
397 ram_addr_t start,
398 ram_addr_t length)
399{
400 ram_addr_t addr;
401 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
402 uint64_t num_dirty = 0;
403
404
405 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
406 int k;
407 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
408 unsigned long * const *src;
409 unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
410 unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
411 DIRTY_MEMORY_BLOCK_SIZE);
412
413 rcu_read_lock();
414
415 src = atomic_rcu_read(
416 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
417
418 for (k = page; k < page + nr; k++) {
419 if (src[idx][offset]) {
420 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
421 unsigned long new_dirty;
422 new_dirty = ~dest[k];
423 dest[k] |= bits;
424 new_dirty &= bits;
425 num_dirty += ctpopl(new_dirty);
426 }
427
428 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
429 offset = 0;
430 idx++;
431 }
432 }
433
434 rcu_read_unlock();
435 } else {
436 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
437 if (cpu_physical_memory_test_and_clear_dirty(
438 start + addr,
439 TARGET_PAGE_SIZE,
440 DIRTY_MEMORY_MIGRATION)) {
441 long k = (start + addr) >> TARGET_PAGE_BITS;
442 if (!test_and_set_bit(k, dest)) {
443 num_dirty++;
444 }
445 }
446 }
447 }
448
449 return num_dirty;
450}
451
452void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
453#endif
454#endif
455