qemu/include/exec/ram_addr.h
<<
>>
Prefs
   1/*
   2 * Declarations for cpu physical memory functions
   3 *
   4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
   5 *
   6 * Authors:
   7 *  Avi Kivity <avi@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or
  10 * later.  See the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14/*
  15 * This header is for use by exec.c and memory.c ONLY.  Do not include it.
  16 * The functions declared here will be removed soon.
  17 */
  18
  19#ifndef RAM_ADDR_H
  20#define RAM_ADDR_H
  21
  22#ifndef CONFIG_USER_ONLY
  23#include "hw/xen/xen.h"
  24#include "exec/ramlist.h"
  25
  26struct RAMBlock {
  27    struct rcu_head rcu;
  28    struct MemoryRegion *mr;
  29    uint8_t *host;
  30    ram_addr_t offset;
  31    ram_addr_t used_length;
  32    ram_addr_t max_length;
  33    void (*resized)(const char*, uint64_t length, void *host);
  34    uint32_t flags;
  35    /* Protected by iothread lock.  */
  36    char idstr[256];
  37    /* RCU-enabled, writes protected by the ramlist lock */
  38    QLIST_ENTRY(RAMBlock) next;
  39    QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
  40    int fd;
  41    size_t page_size;
  42    /* dirty bitmap used during migration */
  43    unsigned long *bmap;
  44    /* bitmap of pages that haven't been sent even once
  45     * only maintained and used in postcopy at the moment
  46     * where it's used to send the dirtymap at the start
  47     * of the postcopy phase
  48     */
  49    unsigned long *unsentmap;
  50    /* bitmap of already received pages in postcopy */
  51    unsigned long *receivedmap;
  52};
  53
  54static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
  55{
  56    return (b && b->host && offset < b->used_length) ? true : false;
  57}
  58
  59static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
  60{
  61    assert(offset_in_ramblock(block, offset));
  62    return (char *)block->host + offset;
  63}
  64
  65static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
  66                                                            RAMBlock *rb)
  67{
  68    uint64_t host_addr_offset =
  69            (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
  70    return host_addr_offset >> TARGET_PAGE_BITS;
  71}
  72
  73long qemu_getrampagesize(void);
  74unsigned long last_ram_page(void);
  75RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
  76                                   bool share, const char *mem_path,
  77                                   Error **errp);
  78RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
  79                                 bool share, int fd,
  80                                 Error **errp);
  81RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
  82                                  MemoryRegion *mr, Error **errp);
  83RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
  84                         Error **errp);
  85RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
  86                                    void (*resized)(const char*,
  87                                                    uint64_t length,
  88                                                    void *host),
  89                                    MemoryRegion *mr, Error **errp);
  90void qemu_ram_free(RAMBlock *block);
  91
  92int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
  93
  94#define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
  95#define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
  96
  97static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
  98                                                 ram_addr_t length,
  99                                                 unsigned client)
 100{
 101    DirtyMemoryBlocks *blocks;
 102    unsigned long end, page;
 103    unsigned long idx, offset, base;
 104    bool dirty = false;
 105
 106    assert(client < DIRTY_MEMORY_NUM);
 107
 108    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
 109    page = start >> TARGET_PAGE_BITS;
 110
 111    rcu_read_lock();
 112
 113    blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
 114
 115    idx = page / DIRTY_MEMORY_BLOCK_SIZE;
 116    offset = page % DIRTY_MEMORY_BLOCK_SIZE;
 117    base = page - offset;
 118    while (page < end) {
 119        unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
 120        unsigned long num = next - base;
 121        unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
 122        if (found < num) {
 123            dirty = true;
 124            break;
 125        }
 126
 127        page = next;
 128        idx++;
 129        offset = 0;
 130        base += DIRTY_MEMORY_BLOCK_SIZE;
 131    }
 132
 133    rcu_read_unlock();
 134
 135    return dirty;
 136}
 137
 138static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
 139                                                 ram_addr_t length,
 140                                                 unsigned client)
 141{
 142    DirtyMemoryBlocks *blocks;
 143    unsigned long end, page;
 144    unsigned long idx, offset, base;
 145    bool dirty = true;
 146
 147    assert(client < DIRTY_MEMORY_NUM);
 148
 149    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
 150    page = start >> TARGET_PAGE_BITS;
 151
 152    rcu_read_lock();
 153
 154    blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
 155
 156    idx = page / DIRTY_MEMORY_BLOCK_SIZE;
 157    offset = page % DIRTY_MEMORY_BLOCK_SIZE;
 158    base = page - offset;
 159    while (page < end) {
 160        unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
 161        unsigned long num = next - base;
 162        unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
 163        if (found < num) {
 164            dirty = false;
 165            break;
 166        }
 167
 168        page = next;
 169        idx++;
 170        offset = 0;
 171        base += DIRTY_MEMORY_BLOCK_SIZE;
 172    }
 173
 174    rcu_read_unlock();
 175
 176    return dirty;
 177}
 178
 179static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
 180                                                      unsigned client)
 181{
 182    return cpu_physical_memory_get_dirty(addr, 1, client);
 183}
 184
 185static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
 186{
 187    bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
 188    bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
 189    bool migration =
 190        cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
 191    return !(vga && code && migration);
 192}
 193
 194static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
 195                                                               ram_addr_t length,
 196                                                               uint8_t mask)
 197{
 198    uint8_t ret = 0;
 199
 200    if (mask & (1 << DIRTY_MEMORY_VGA) &&
 201        !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
 202        ret |= (1 << DIRTY_MEMORY_VGA);
 203    }
 204    if (mask & (1 << DIRTY_MEMORY_CODE) &&
 205        !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
 206        ret |= (1 << DIRTY_MEMORY_CODE);
 207    }
 208    if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
 209        !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
 210        ret |= (1 << DIRTY_MEMORY_MIGRATION);
 211    }
 212    return ret;
 213}
 214
 215static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
 216                                                      unsigned client)
 217{
 218    unsigned long page, idx, offset;
 219    DirtyMemoryBlocks *blocks;
 220
 221    assert(client < DIRTY_MEMORY_NUM);
 222
 223    page = addr >> TARGET_PAGE_BITS;
 224    idx = page / DIRTY_MEMORY_BLOCK_SIZE;
 225    offset = page % DIRTY_MEMORY_BLOCK_SIZE;
 226
 227    rcu_read_lock();
 228
 229    blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
 230
 231    set_bit_atomic(offset, blocks->blocks[idx]);
 232
 233    rcu_read_unlock();
 234}
 235
 236static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
 237                                                       ram_addr_t length,
 238                                                       uint8_t mask)
 239{
 240    DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
 241    unsigned long end, page;
 242    unsigned long idx, offset, base;
 243    int i;
 244
 245    if (!mask && !xen_enabled()) {
 246        return;
 247    }
 248
 249    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
 250    page = start >> TARGET_PAGE_BITS;
 251
 252    rcu_read_lock();
 253
 254    for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
 255        blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
 256    }
 257
 258    idx = page / DIRTY_MEMORY_BLOCK_SIZE;
 259    offset = page % DIRTY_MEMORY_BLOCK_SIZE;
 260    base = page - offset;
 261    while (page < end) {
 262        unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
 263
 264        if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
 265            bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
 266                              offset, next - page);
 267        }
 268        if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
 269            bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
 270                              offset, next - page);
 271        }
 272        if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
 273            bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
 274                              offset, next - page);
 275        }
 276
 277        page = next;
 278        idx++;
 279        offset = 0;
 280        base += DIRTY_MEMORY_BLOCK_SIZE;
 281    }
 282
 283    rcu_read_unlock();
 284
 285    xen_hvm_modified_memory(start, length);
 286}
 287
 288#if !defined(_WIN32)
 289static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
 290                                                          ram_addr_t start,
 291                                                          ram_addr_t pages)
 292{
 293    unsigned long i, j;
 294    unsigned long page_number, c;
 295    hwaddr addr;
 296    ram_addr_t ram_addr;
 297    unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
 298    unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
 299    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
 300
 301    /* start address is aligned at the start of a word? */
 302    if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
 303        (hpratio == 1)) {
 304        unsigned long **blocks[DIRTY_MEMORY_NUM];
 305        unsigned long idx;
 306        unsigned long offset;
 307        long k;
 308        long nr = BITS_TO_LONGS(pages);
 309
 310        idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
 311        offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
 312                          DIRTY_MEMORY_BLOCK_SIZE);
 313
 314        rcu_read_lock();
 315
 316        for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
 317            blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
 318        }
 319
 320        for (k = 0; k < nr; k++) {
 321            if (bitmap[k]) {
 322                unsigned long temp = leul_to_cpu(bitmap[k]);
 323
 324                atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
 325                atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
 326                if (tcg_enabled()) {
 327                    atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
 328                }
 329            }
 330
 331            if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
 332                offset = 0;
 333                idx++;
 334            }
 335        }
 336
 337        rcu_read_unlock();
 338
 339        xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
 340    } else {
 341        uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
 342        /*
 343         * bitmap-traveling is faster than memory-traveling (for addr...)
 344         * especially when most of the memory is not dirty.
 345         */
 346        for (i = 0; i < len; i++) {
 347            if (bitmap[i] != 0) {
 348                c = leul_to_cpu(bitmap[i]);
 349                do {
 350                    j = ctzl(c);
 351                    c &= ~(1ul << j);
 352                    page_number = (i * HOST_LONG_BITS + j) * hpratio;
 353                    addr = page_number * TARGET_PAGE_SIZE;
 354                    ram_addr = start + addr;
 355                    cpu_physical_memory_set_dirty_range(ram_addr,
 356                                       TARGET_PAGE_SIZE * hpratio, clients);
 357                } while (c != 0);
 358            }
 359        }
 360    }
 361}
 362#endif /* not _WIN32 */
 363
 364bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
 365                                              ram_addr_t length,
 366                                              unsigned client);
 367
 368DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
 369    (ram_addr_t start, ram_addr_t length, unsigned client);
 370
 371bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
 372                                            ram_addr_t start,
 373                                            ram_addr_t length);
 374
 375static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
 376                                                         ram_addr_t length)
 377{
 378    cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
 379    cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
 380    cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
 381}
 382
 383
 384static inline
 385uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
 386                                               ram_addr_t start,
 387                                               ram_addr_t length,
 388                                               uint64_t *real_dirty_pages)
 389{
 390    ram_addr_t addr;
 391    unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
 392    uint64_t num_dirty = 0;
 393    unsigned long *dest = rb->bmap;
 394
 395    /* start address and length is aligned at the start of a word? */
 396    if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
 397         (start + rb->offset) &&
 398        !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
 399        int k;
 400        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
 401        unsigned long * const *src;
 402        unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
 403        unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
 404                                        DIRTY_MEMORY_BLOCK_SIZE);
 405        unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
 406
 407        rcu_read_lock();
 408
 409        src = atomic_rcu_read(
 410                &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
 411
 412        for (k = page; k < page + nr; k++) {
 413            if (src[idx][offset]) {
 414                unsigned long bits = atomic_xchg(&src[idx][offset], 0);
 415                unsigned long new_dirty;
 416                *real_dirty_pages += ctpopl(bits);
 417                new_dirty = ~dest[k];
 418                dest[k] |= bits;
 419                new_dirty &= bits;
 420                num_dirty += ctpopl(new_dirty);
 421            }
 422
 423            if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
 424                offset = 0;
 425                idx++;
 426            }
 427        }
 428
 429        rcu_read_unlock();
 430    } else {
 431        ram_addr_t offset = rb->offset;
 432
 433        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
 434            if (cpu_physical_memory_test_and_clear_dirty(
 435                        start + addr + offset,
 436                        TARGET_PAGE_SIZE,
 437                        DIRTY_MEMORY_MIGRATION)) {
 438                *real_dirty_pages += 1;
 439                long k = (start + addr) >> TARGET_PAGE_BITS;
 440                if (!test_and_set_bit(k, dest)) {
 441                    num_dirty++;
 442                }
 443            }
 444        }
 445    }
 446
 447    return num_dirty;
 448}
 449#endif
 450#endif
 451