qemu/migration/ram.c
<<
>>
Prefs
   1/*
   2 * QEMU System Emulator
   3 *
   4 * Copyright (c) 2003-2008 Fabrice Bellard
   5 * Copyright (c) 2011-2015 Red Hat Inc
   6 *
   7 * Authors:
   8 *  Juan Quintela <quintela@redhat.com>
   9 *
  10 * Permission is hereby granted, free of charge, to any person obtaining a copy
  11 * of this software and associated documentation files (the "Software"), to deal
  12 * in the Software without restriction, including without limitation the rights
  13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  14 * copies of the Software, and to permit persons to whom the Software is
  15 * furnished to do so, subject to the following conditions:
  16 *
  17 * The above copyright notice and this permission notice shall be included in
  18 * all copies or substantial portions of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  26 * THE SOFTWARE.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qemu/cutils.h"
  31#include "qemu/bitops.h"
  32#include "qemu/bitmap.h"
  33#include "qemu/madvise.h"
  34#include "qemu/main-loop.h"
  35#include "xbzrle.h"
  36#include "ram.h"
  37#include "migration.h"
  38#include "migration-stats.h"
  39#include "migration/register.h"
  40#include "migration/misc.h"
  41#include "qemu-file.h"
  42#include "postcopy-ram.h"
  43#include "page_cache.h"
  44#include "qemu/error-report.h"
  45#include "qapi/error.h"
  46#include "qapi/qapi-types-migration.h"
  47#include "qapi/qapi-events-migration.h"
  48#include "qapi/qapi-commands-migration.h"
  49#include "qapi/qmp/qerror.h"
  50#include "trace.h"
  51#include "system/ram_addr.h"
  52#include "exec/target_page.h"
  53#include "qemu/rcu_queue.h"
  54#include "migration/colo.h"
  55#include "system/cpu-throttle.h"
  56#include "savevm.h"
  57#include "qemu/iov.h"
  58#include "multifd.h"
  59#include "system/runstate.h"
  60#include "rdma.h"
  61#include "options.h"
  62#include "system/dirtylimit.h"
  63#include "system/kvm.h"
  64
  65#include "hw/boards.h" /* for machine_dump_guest_core() */
  66
  67#if defined(__linux__)
  68#include "qemu/userfaultfd.h"
  69#endif /* defined(__linux__) */
  70
  71/***********************************************************/
  72/* ram save/restore */
  73
  74/*
  75 * mapped-ram migration supports O_DIRECT, so we need to make sure the
  76 * userspace buffer, the IO operation size and the file offset are
  77 * aligned according to the underlying device's block size. The first
  78 * two are already aligned to page size, but we need to add padding to
  79 * the file to align the offset.  We cannot read the block size
  80 * dynamically because the migration file can be moved between
  81 * different systems, so use 1M to cover most block sizes and to keep
  82 * the file offset aligned at page size as well.
  83 */
  84#define MAPPED_RAM_FILE_OFFSET_ALIGNMENT 0x100000
  85
  86/*
  87 * When doing mapped-ram migration, this is the amount we read from
  88 * the pages region in the migration file at a time.
  89 */
  90#define MAPPED_RAM_LOAD_BUF_SIZE 0x100000
  91
  92XBZRLECacheStats xbzrle_counters;
  93
  94/*
  95 * This structure locates a specific location of a guest page.  In QEMU,
  96 * it's described in a tuple of (ramblock, offset).
  97 */
  98struct PageLocation {
  99    RAMBlock *block;
 100    unsigned long offset;
 101};
 102typedef struct PageLocation PageLocation;
 103
 104/**
 105 * PageLocationHint: describes a hint to a page location
 106 *
 107 * @valid     set if the hint is vaild and to be consumed
 108 * @location: the hint content
 109 *
 110 * In postcopy preempt mode, the urgent channel may provide hints to the
 111 * background channel, so that QEMU source can try to migrate whatever is
 112 * right after the requested urgent pages.
 113 *
 114 * This is based on the assumption that the VM (already running on the
 115 * destination side) tends to access the memory with spatial locality.
 116 * This is also the default behavior of vanilla postcopy (preempt off).
 117 */
 118struct PageLocationHint {
 119    bool valid;
 120    PageLocation location;
 121};
 122typedef struct PageLocationHint PageLocationHint;
 123
 124/* used by the search for pages to send */
 125struct PageSearchStatus {
 126    /* The migration channel used for a specific host page */
 127    QEMUFile    *pss_channel;
 128    /* Last block from where we have sent data */
 129    RAMBlock *last_sent_block;
 130    /* Current block being searched */
 131    RAMBlock    *block;
 132    /* Current page to search from */
 133    unsigned long page;
 134    /* Set once we wrap around */
 135    bool         complete_round;
 136    /* Whether we're sending a host page */
 137    bool          host_page_sending;
 138    /* The start/end of current host page.  Invalid if host_page_sending==false */
 139    unsigned long host_page_start;
 140    unsigned long host_page_end;
 141};
 142typedef struct PageSearchStatus PageSearchStatus;
 143
 144/* struct contains XBZRLE cache and a static page
 145   used by the compression */
 146static struct {
 147    /* buffer used for XBZRLE encoding */
 148    uint8_t *encoded_buf;
 149    /* buffer for storing page content */
 150    uint8_t *current_buf;
 151    /* Cache for XBZRLE, Protected by lock. */
 152    PageCache *cache;
 153    QemuMutex lock;
 154    /* it will store a page full of zeros */
 155    uint8_t *zero_target_page;
 156    /* buffer used for XBZRLE decoding */
 157    uint8_t *decoded_buf;
 158} XBZRLE;
 159
 160static void XBZRLE_cache_lock(void)
 161{
 162    if (migrate_xbzrle()) {
 163        qemu_mutex_lock(&XBZRLE.lock);
 164    }
 165}
 166
 167static void XBZRLE_cache_unlock(void)
 168{
 169    if (migrate_xbzrle()) {
 170        qemu_mutex_unlock(&XBZRLE.lock);
 171    }
 172}
 173
 174/**
 175 * xbzrle_cache_resize: resize the xbzrle cache
 176 *
 177 * This function is called from migrate_params_apply in main
 178 * thread, possibly while a migration is in progress.  A running
 179 * migration may be using the cache and might finish during this call,
 180 * hence changes to the cache are protected by XBZRLE.lock().
 181 *
 182 * Returns 0 for success or -1 for error
 183 *
 184 * @new_size: new cache size
 185 * @errp: set *errp if the check failed, with reason
 186 */
 187int xbzrle_cache_resize(uint64_t new_size, Error **errp)
 188{
 189    PageCache *new_cache;
 190    int64_t ret = 0;
 191
 192    /* Check for truncation */
 193    if (new_size != (size_t)new_size) {
 194        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
 195                   "exceeding address space");
 196        return -1;
 197    }
 198
 199    if (new_size == migrate_xbzrle_cache_size()) {
 200        /* nothing to do */
 201        return 0;
 202    }
 203
 204    XBZRLE_cache_lock();
 205
 206    if (XBZRLE.cache != NULL) {
 207        new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
 208        if (!new_cache) {
 209            ret = -1;
 210            goto out;
 211        }
 212
 213        cache_fini(XBZRLE.cache);
 214        XBZRLE.cache = new_cache;
 215    }
 216out:
 217    XBZRLE_cache_unlock();
 218    return ret;
 219}
 220
 221static bool postcopy_preempt_active(void)
 222{
 223    return migrate_postcopy_preempt() && migration_in_postcopy();
 224}
 225
 226bool migrate_ram_is_ignored(RAMBlock *block)
 227{
 228    MigMode mode = migrate_mode();
 229    return !qemu_ram_is_migratable(block) ||
 230           mode == MIG_MODE_CPR_TRANSFER ||
 231           (migrate_ignore_shared() && qemu_ram_is_shared(block)
 232                                    && qemu_ram_is_named_file(block));
 233}
 234
 235#undef RAMBLOCK_FOREACH
 236
 237int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
 238{
 239    RAMBlock *block;
 240    int ret = 0;
 241
 242    RCU_READ_LOCK_GUARD();
 243
 244    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
 245        ret = func(block, opaque);
 246        if (ret) {
 247            break;
 248        }
 249    }
 250    return ret;
 251}
 252
 253static void ramblock_recv_map_init(void)
 254{
 255    RAMBlock *rb;
 256
 257    RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
 258        assert(!rb->receivedmap);
 259        rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
 260    }
 261}
 262
 263int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
 264{
 265    return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
 266                    rb->receivedmap);
 267}
 268
 269bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
 270{
 271    return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
 272}
 273
 274void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
 275{
 276    set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
 277}
 278
 279void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
 280                                    size_t nr)
 281{
 282    bitmap_set_atomic(rb->receivedmap,
 283                      ramblock_recv_bitmap_offset(host_addr, rb),
 284                      nr);
 285}
 286
 287void ramblock_recv_bitmap_set_offset(RAMBlock *rb, uint64_t byte_offset)
 288{
 289    set_bit_atomic(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
 290}
 291#define  RAMBLOCK_RECV_BITMAP_ENDING  (0x0123456789abcdefULL)
 292
 293/*
 294 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
 295 *
 296 * Returns >0 if success with sent bytes, or <0 if error.
 297 */
 298int64_t ramblock_recv_bitmap_send(QEMUFile *file,
 299                                  const char *block_name)
 300{
 301    RAMBlock *block = qemu_ram_block_by_name(block_name);
 302    unsigned long *le_bitmap, nbits;
 303    uint64_t size;
 304
 305    if (!block) {
 306        error_report("%s: invalid block name: %s", __func__, block_name);
 307        return -1;
 308    }
 309
 310    nbits = block->postcopy_length >> TARGET_PAGE_BITS;
 311
 312    /*
 313     * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
 314     * machines we may need 4 more bytes for padding (see below
 315     * comment). So extend it a bit before hand.
 316     */
 317    le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
 318
 319    /*
 320     * Always use little endian when sending the bitmap. This is
 321     * required that when source and destination VMs are not using the
 322     * same endianness. (Note: big endian won't work.)
 323     */
 324    bitmap_to_le(le_bitmap, block->receivedmap, nbits);
 325
 326    /* Size of the bitmap, in bytes */
 327    size = DIV_ROUND_UP(nbits, 8);
 328
 329    /*
 330     * size is always aligned to 8 bytes for 64bit machines, but it
 331     * may not be true for 32bit machines. We need this padding to
 332     * make sure the migration can survive even between 32bit and
 333     * 64bit machines.
 334     */
 335    size = ROUND_UP(size, 8);
 336
 337    qemu_put_be64(file, size);
 338    qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
 339    g_free(le_bitmap);
 340    /*
 341     * Mark as an end, in case the middle part is screwed up due to
 342     * some "mysterious" reason.
 343     */
 344    qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
 345    int ret = qemu_fflush(file);
 346    if (ret) {
 347        return ret;
 348    }
 349
 350    return size + sizeof(size);
 351}
 352
 353/*
 354 * An outstanding page request, on the source, having been received
 355 * and queued
 356 */
 357struct RAMSrcPageRequest {
 358    RAMBlock *rb;
 359    hwaddr    offset;
 360    hwaddr    len;
 361
 362    QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
 363};
 364
 365/* State of RAM for migration */
 366struct RAMState {
 367    /*
 368     * PageSearchStatus structures for the channels when send pages.
 369     * Protected by the bitmap_mutex.
 370     */
 371    PageSearchStatus pss[RAM_CHANNEL_MAX];
 372    /* UFFD file descriptor, used in 'write-tracking' migration */
 373    int uffdio_fd;
 374    /* total ram size in bytes */
 375    uint64_t ram_bytes_total;
 376    /* Last block that we have visited searching for dirty pages */
 377    RAMBlock *last_seen_block;
 378    /* Last dirty target page we have sent */
 379    ram_addr_t last_page;
 380    /* last ram version we have seen */
 381    uint32_t last_version;
 382    /* How many times we have dirty too many pages */
 383    int dirty_rate_high_cnt;
 384    /* these variables are used for bitmap sync */
 385    /* last time we did a full bitmap_sync */
 386    int64_t time_last_bitmap_sync;
 387    /* bytes transferred at start_time */
 388    uint64_t bytes_xfer_prev;
 389    /* number of dirty pages since start_time */
 390    uint64_t num_dirty_pages_period;
 391    /* xbzrle misses since the beginning of the period */
 392    uint64_t xbzrle_cache_miss_prev;
 393    /* Amount of xbzrle pages since the beginning of the period */
 394    uint64_t xbzrle_pages_prev;
 395    /* Amount of xbzrle encoded bytes since the beginning of the period */
 396    uint64_t xbzrle_bytes_prev;
 397    /* Are we really using XBZRLE (e.g., after the first round). */
 398    bool xbzrle_started;
 399    /* Are we on the last stage of migration */
 400    bool last_stage;
 401
 402    /* total handled target pages at the beginning of period */
 403    uint64_t target_page_count_prev;
 404    /* total handled target pages since start */
 405    uint64_t target_page_count;
 406    /* number of dirty bits in the bitmap */
 407    uint64_t migration_dirty_pages;
 408    /*
 409     * Protects:
 410     * - dirty/clear bitmap
 411     * - migration_dirty_pages
 412     * - pss structures
 413     */
 414    QemuMutex bitmap_mutex;
 415    /* The RAMBlock used in the last src_page_requests */
 416    RAMBlock *last_req_rb;
 417    /* Queue of outstanding page requests from the destination */
 418    QemuMutex src_page_req_mutex;
 419    QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
 420
 421    /*
 422     * This is only used when postcopy is in recovery phase, to communicate
 423     * between the migration thread and the return path thread on dirty
 424     * bitmap synchronizations.  This field is unused in other stages of
 425     * RAM migration.
 426     */
 427    unsigned int postcopy_bmap_sync_requested;
 428    /*
 429     * Page hint during postcopy when preempt mode is on.  Return path
 430     * thread sets it, while background migration thread consumes it.
 431     *
 432     * Protected by @bitmap_mutex.
 433     */
 434    PageLocationHint page_hint;
 435};
 436typedef struct RAMState RAMState;
 437
 438static RAMState *ram_state;
 439
 440static NotifierWithReturnList precopy_notifier_list;
 441
 442/* Whether postcopy has queued requests? */
 443static bool postcopy_has_request(RAMState *rs)
 444{
 445    return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests);
 446}
 447
 448void precopy_infrastructure_init(void)
 449{
 450    notifier_with_return_list_init(&precopy_notifier_list);
 451}
 452
 453void precopy_add_notifier(NotifierWithReturn *n)
 454{
 455    notifier_with_return_list_add(&precopy_notifier_list, n);
 456}
 457
 458void precopy_remove_notifier(NotifierWithReturn *n)
 459{
 460    notifier_with_return_remove(n);
 461}
 462
 463int precopy_notify(PrecopyNotifyReason reason, Error **errp)
 464{
 465    PrecopyNotifyData pnd;
 466    pnd.reason = reason;
 467
 468    return notifier_with_return_list_notify(&precopy_notifier_list, &pnd, errp);
 469}
 470
 471uint64_t ram_bytes_remaining(void)
 472{
 473    return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
 474                       0;
 475}
 476
 477void ram_transferred_add(uint64_t bytes)
 478{
 479    if (runstate_is_running()) {
 480        stat64_add(&mig_stats.precopy_bytes, bytes);
 481    } else if (migration_in_postcopy()) {
 482        stat64_add(&mig_stats.postcopy_bytes, bytes);
 483    } else {
 484        stat64_add(&mig_stats.downtime_bytes, bytes);
 485    }
 486}
 487
 488static int ram_save_host_page_urgent(PageSearchStatus *pss);
 489
 490/* NOTE: page is the PFN not real ram_addr_t. */
 491static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page)
 492{
 493    pss->block = rb;
 494    pss->page = page;
 495    pss->complete_round = false;
 496}
 497
 498/*
 499 * Check whether two PSSs are actively sending the same page.  Return true
 500 * if it is, false otherwise.
 501 */
 502static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2)
 503{
 504    return pss1->host_page_sending && pss2->host_page_sending &&
 505        (pss1->host_page_start == pss2->host_page_start);
 506}
 507
 508/**
 509 * save_page_header: write page header to wire
 510 *
 511 * If this is the 1st block, it also writes the block identification
 512 *
 513 * Returns the number of bytes written
 514 *
 515 * @pss: current PSS channel status
 516 * @block: block that contains the page we want to send
 517 * @offset: offset inside the block for the page
 518 *          in the lower bits, it contains flags
 519 */
 520static size_t save_page_header(PageSearchStatus *pss, QEMUFile *f,
 521                               RAMBlock *block, ram_addr_t offset)
 522{
 523    size_t size, len;
 524    bool same_block = (block == pss->last_sent_block);
 525
 526    if (same_block) {
 527        offset |= RAM_SAVE_FLAG_CONTINUE;
 528    }
 529    qemu_put_be64(f, offset);
 530    size = 8;
 531
 532    if (!same_block) {
 533        len = strlen(block->idstr);
 534        qemu_put_byte(f, len);
 535        qemu_put_buffer(f, (uint8_t *)block->idstr, len);
 536        size += 1 + len;
 537        pss->last_sent_block = block;
 538    }
 539    return size;
 540}
 541
 542/**
 543 * mig_throttle_guest_down: throttle down the guest
 544 *
 545 * Reduce amount of guest cpu execution to hopefully slow down memory
 546 * writes. If guest dirty memory rate is reduced below the rate at
 547 * which we can transfer pages to the destination then we should be
 548 * able to complete migration. Some workloads dirty memory way too
 549 * fast and will not effectively converge, even with auto-converge.
 550 */
 551static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
 552                                    uint64_t bytes_dirty_threshold)
 553{
 554    uint64_t pct_initial = migrate_cpu_throttle_initial();
 555    uint64_t pct_increment = migrate_cpu_throttle_increment();
 556    bool pct_tailslow = migrate_cpu_throttle_tailslow();
 557    int pct_max = migrate_max_cpu_throttle();
 558
 559    uint64_t throttle_now = cpu_throttle_get_percentage();
 560    uint64_t cpu_now, cpu_ideal, throttle_inc;
 561
 562    /* We have not started throttling yet. Let's start it. */
 563    if (!cpu_throttle_active()) {
 564        cpu_throttle_set(pct_initial);
 565    } else {
 566        /* Throttling already on, just increase the rate */
 567        if (!pct_tailslow) {
 568            throttle_inc = pct_increment;
 569        } else {
 570            /* Compute the ideal CPU percentage used by Guest, which may
 571             * make the dirty rate match the dirty rate threshold. */
 572            cpu_now = 100 - throttle_now;
 573            cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 /
 574                        bytes_dirty_period);
 575            throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment);
 576        }
 577        cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max));
 578    }
 579}
 580
 581void mig_throttle_counter_reset(void)
 582{
 583    RAMState *rs = ram_state;
 584
 585    rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 586    rs->num_dirty_pages_period = 0;
 587    rs->bytes_xfer_prev = migration_transferred_bytes();
 588}
 589
 590/**
 591 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
 592 *
 593 * @current_addr: address for the zero page
 594 *
 595 * Update the xbzrle cache to reflect a page that's been sent as all 0.
 596 * The important thing is that a stale (not-yet-0'd) page be replaced
 597 * by the new data.
 598 * As a bonus, if the page wasn't in the cache it gets added so that
 599 * when a small write is made into the 0'd page it gets XBZRLE sent.
 600 */
 601static void xbzrle_cache_zero_page(ram_addr_t current_addr)
 602{
 603    /* We don't care if this fails to allocate a new cache page
 604     * as long as it updated an old one */
 605    cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
 606                 stat64_get(&mig_stats.dirty_sync_count));
 607}
 608
 609#define ENCODING_FLAG_XBZRLE 0x1
 610
 611/**
 612 * save_xbzrle_page: compress and send current page
 613 *
 614 * Returns: 1 means that we wrote the page
 615 *          0 means that page is identical to the one already sent
 616 *          -1 means that xbzrle would be longer than normal
 617 *
 618 * @rs: current RAM state
 619 * @pss: current PSS channel
 620 * @current_data: pointer to the address of the page contents
 621 * @current_addr: addr of the page
 622 * @block: block that contains the page we want to send
 623 * @offset: offset inside the block for the page
 624 */
 625static int save_xbzrle_page(RAMState *rs, PageSearchStatus *pss,
 626                            uint8_t **current_data, ram_addr_t current_addr,
 627                            RAMBlock *block, ram_addr_t offset)
 628{
 629    int encoded_len = 0, bytes_xbzrle;
 630    uint8_t *prev_cached_page;
 631    QEMUFile *file = pss->pss_channel;
 632    uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
 633
 634    if (!cache_is_cached(XBZRLE.cache, current_addr, generation)) {
 635        xbzrle_counters.cache_miss++;
 636        if (!rs->last_stage) {
 637            if (cache_insert(XBZRLE.cache, current_addr, *current_data,
 638                             generation) == -1) {
 639                return -1;
 640            } else {
 641                /* update *current_data when the page has been
 642                   inserted into cache */
 643                *current_data = get_cached_data(XBZRLE.cache, current_addr);
 644            }
 645        }
 646        return -1;
 647    }
 648
 649    /*
 650     * Reaching here means the page has hit the xbzrle cache, no matter what
 651     * encoding result it is (normal encoding, overflow or skipping the page),
 652     * count the page as encoded. This is used to calculate the encoding rate.
 653     *
 654     * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
 655     * 2nd page turns out to be skipped (i.e. no new bytes written to the
 656     * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
 657     * skipped page included. In this way, the encoding rate can tell if the
 658     * guest page is good for xbzrle encoding.
 659     */
 660    xbzrle_counters.pages++;
 661    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
 662
 663    /* save current buffer into memory */
 664    memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
 665
 666    /* XBZRLE encoding (if there is no overflow) */
 667    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
 668                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
 669                                       TARGET_PAGE_SIZE);
 670
 671    /*
 672     * Update the cache contents, so that it corresponds to the data
 673     * sent, in all cases except where we skip the page.
 674     */
 675    if (!rs->last_stage && encoded_len != 0) {
 676        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
 677        /*
 678         * In the case where we couldn't compress, ensure that the caller
 679         * sends the data from the cache, since the guest might have
 680         * changed the RAM since we copied it.
 681         */
 682        *current_data = prev_cached_page;
 683    }
 684
 685    if (encoded_len == 0) {
 686        trace_save_xbzrle_page_skipping();
 687        return 0;
 688    } else if (encoded_len == -1) {
 689        trace_save_xbzrle_page_overflow();
 690        xbzrle_counters.overflow++;
 691        xbzrle_counters.bytes += TARGET_PAGE_SIZE;
 692        return -1;
 693    }
 694
 695    /* Send XBZRLE based compressed page */
 696    bytes_xbzrle = save_page_header(pss, pss->pss_channel, block,
 697                                    offset | RAM_SAVE_FLAG_XBZRLE);
 698    qemu_put_byte(file, ENCODING_FLAG_XBZRLE);
 699    qemu_put_be16(file, encoded_len);
 700    qemu_put_buffer(file, XBZRLE.encoded_buf, encoded_len);
 701    bytes_xbzrle += encoded_len + 1 + 2;
 702    /*
 703     * The xbzrle encoded bytes don't count the 8 byte header with
 704     * RAM_SAVE_FLAG_CONTINUE.
 705     */
 706    xbzrle_counters.bytes += bytes_xbzrle - 8;
 707    ram_transferred_add(bytes_xbzrle);
 708
 709    return 1;
 710}
 711
 712/**
 713 * pss_find_next_dirty: find the next dirty page of current ramblock
 714 *
 715 * This function updates pss->page to point to the next dirty page index
 716 * within the ramblock to migrate, or the end of ramblock when nothing
 717 * found.  Note that when pss->host_page_sending==true it means we're
 718 * during sending a host page, so we won't look for dirty page that is
 719 * outside the host page boundary.
 720 *
 721 * @pss: the current page search status
 722 */
 723static void pss_find_next_dirty(PageSearchStatus *pss)
 724{
 725    RAMBlock *rb = pss->block;
 726    unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
 727    unsigned long *bitmap = rb->bmap;
 728
 729    if (migrate_ram_is_ignored(rb)) {
 730        /* Points directly to the end, so we know no dirty page */
 731        pss->page = size;
 732        return;
 733    }
 734
 735    /*
 736     * If during sending a host page, only look for dirty pages within the
 737     * current host page being send.
 738     */
 739    if (pss->host_page_sending) {
 740        assert(pss->host_page_end);
 741        size = MIN(size, pss->host_page_end);
 742    }
 743
 744    pss->page = find_next_bit(bitmap, size, pss->page);
 745}
 746
 747static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb,
 748                                                       unsigned long page)
 749{
 750    uint8_t shift;
 751    hwaddr size, start;
 752
 753    if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) {
 754        return;
 755    }
 756
 757    shift = rb->clear_bmap_shift;
 758    /*
 759     * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
 760     * can make things easier sometimes since then start address
 761     * of the small chunk will always be 64 pages aligned so the
 762     * bitmap will always be aligned to unsigned long. We should
 763     * even be able to remove this restriction but I'm simply
 764     * keeping it.
 765     */
 766    assert(shift >= 6);
 767
 768    size = 1ULL << (TARGET_PAGE_BITS + shift);
 769    start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size);
 770    trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
 771    memory_region_clear_dirty_bitmap(rb->mr, start, size);
 772}
 773
 774static void
 775migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb,
 776                                                 unsigned long start,
 777                                                 unsigned long npages)
 778{
 779    unsigned long i, chunk_pages = 1UL << rb->clear_bmap_shift;
 780    unsigned long chunk_start = QEMU_ALIGN_DOWN(start, chunk_pages);
 781    unsigned long chunk_end = QEMU_ALIGN_UP(start + npages, chunk_pages);
 782
 783    /*
 784     * Clear pages from start to start + npages - 1, so the end boundary is
 785     * exclusive.
 786     */
 787    for (i = chunk_start; i < chunk_end; i += chunk_pages) {
 788        migration_clear_memory_region_dirty_bitmap(rb, i);
 789    }
 790}
 791
 792/*
 793 * colo_bitmap_find_diry:find contiguous dirty pages from start
 794 *
 795 * Returns the page offset within memory region of the start of the contiguout
 796 * dirty page
 797 *
 798 * @rs: current RAM state
 799 * @rb: RAMBlock where to search for dirty pages
 800 * @start: page where we start the search
 801 * @num: the number of contiguous dirty pages
 802 */
 803static inline
 804unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
 805                                     unsigned long start, unsigned long *num)
 806{
 807    unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
 808    unsigned long *bitmap = rb->bmap;
 809    unsigned long first, next;
 810
 811    *num = 0;
 812
 813    if (migrate_ram_is_ignored(rb)) {
 814        return size;
 815    }
 816
 817    first = find_next_bit(bitmap, size, start);
 818    if (first >= size) {
 819        return first;
 820    }
 821    next = find_next_zero_bit(bitmap, size, first + 1);
 822    assert(next >= first);
 823    *num = next - first;
 824    return first;
 825}
 826
 827static inline bool migration_bitmap_clear_dirty(RAMState *rs,
 828                                                RAMBlock *rb,
 829                                                unsigned long page)
 830{
 831    bool ret;
 832
 833    /*
 834     * During the last stage (after source VM stopped), resetting the write
 835     * protections isn't needed as we know there will be either (1) no
 836     * further writes if migration will complete, or (2) migration fails
 837     * at last then tracking isn't needed either.
 838     *
 839     * Do the same for postcopy due to the same reason.
 840     */
 841    if (!rs->last_stage && !migration_in_postcopy()) {
 842        /*
 843         * Clear dirty bitmap if needed.  This _must_ be called before we
 844         * send any of the page in the chunk because we need to make sure
 845         * we can capture further page content changes when we sync dirty
 846         * log the next time.  So as long as we are going to send any of
 847         * the page in the chunk we clear the remote dirty bitmap for all.
 848         * Clearing it earlier won't be a problem, but too late will.
 849         */
 850        migration_clear_memory_region_dirty_bitmap(rb, page);
 851    }
 852
 853    ret = test_and_clear_bit(page, rb->bmap);
 854    if (ret) {
 855        rs->migration_dirty_pages--;
 856    }
 857
 858    return ret;
 859}
 860
 861static int dirty_bitmap_clear_section(MemoryRegionSection *section,
 862                                      void *opaque)
 863{
 864    const hwaddr offset = section->offset_within_region;
 865    const hwaddr size = int128_get64(section->size);
 866    const unsigned long start = offset >> TARGET_PAGE_BITS;
 867    const unsigned long npages = size >> TARGET_PAGE_BITS;
 868    RAMBlock *rb = section->mr->ram_block;
 869    uint64_t *cleared_bits = opaque;
 870
 871    /*
 872     * We don't grab ram_state->bitmap_mutex because we expect to run
 873     * only when starting migration or during postcopy recovery where
 874     * we don't have concurrent access.
 875     */
 876    if (!migration_in_postcopy() && !migrate_background_snapshot()) {
 877        migration_clear_memory_region_dirty_bitmap_range(rb, start, npages);
 878    }
 879    *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages);
 880    bitmap_clear(rb->bmap, start, npages);
 881    return 0;
 882}
 883
 884/*
 885 * Exclude all dirty pages from migration that fall into a discarded range as
 886 * managed by a RamDiscardManager responsible for the mapped memory region of
 887 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
 888 *
 889 * Discarded pages ("logically unplugged") have undefined content and must
 890 * not get migrated, because even reading these pages for migration might
 891 * result in undesired behavior.
 892 *
 893 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
 894 *
 895 * Note: The result is only stable while migrating (precopy/postcopy).
 896 */
 897static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb)
 898{
 899    uint64_t cleared_bits = 0;
 900
 901    if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) {
 902        RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
 903        MemoryRegionSection section = {
 904            .mr = rb->mr,
 905            .offset_within_region = 0,
 906            .size = int128_make64(qemu_ram_get_used_length(rb)),
 907        };
 908
 909        ram_discard_manager_replay_discarded(rdm, &section,
 910                                             dirty_bitmap_clear_section,
 911                                             &cleared_bits);
 912    }
 913    return cleared_bits;
 914}
 915
 916/*
 917 * Check if a host-page aligned page falls into a discarded range as managed by
 918 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
 919 *
 920 * Note: The result is only stable while migrating (precopy/postcopy).
 921 */
 922bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start)
 923{
 924    if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
 925        RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
 926        MemoryRegionSection section = {
 927            .mr = rb->mr,
 928            .offset_within_region = start,
 929            .size = int128_make64(qemu_ram_pagesize(rb)),
 930        };
 931
 932        return !ram_discard_manager_is_populated(rdm, &section);
 933    }
 934    return false;
 935}
 936
 937/* Called with RCU critical section */
 938static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
 939{
 940    uint64_t new_dirty_pages =
 941        cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
 942
 943    rs->migration_dirty_pages += new_dirty_pages;
 944    rs->num_dirty_pages_period += new_dirty_pages;
 945}
 946
 947/**
 948 * ram_pagesize_summary: calculate all the pagesizes of a VM
 949 *
 950 * Returns a summary bitmap of the page sizes of all RAMBlocks
 951 *
 952 * For VMs with just normal pages this is equivalent to the host page
 953 * size. If it's got some huge pages then it's the OR of all the
 954 * different page sizes.
 955 */
 956uint64_t ram_pagesize_summary(void)
 957{
 958    RAMBlock *block;
 959    uint64_t summary = 0;
 960
 961    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
 962        summary |= block->page_size;
 963    }
 964
 965    return summary;
 966}
 967
 968uint64_t ram_get_total_transferred_pages(void)
 969{
 970    return stat64_get(&mig_stats.normal_pages) +
 971        stat64_get(&mig_stats.zero_pages) +
 972        xbzrle_counters.pages;
 973}
 974
 975static void migration_update_rates(RAMState *rs, int64_t end_time)
 976{
 977    uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
 978
 979    /* calculate period counters */
 980    stat64_set(&mig_stats.dirty_pages_rate,
 981               rs->num_dirty_pages_period * 1000 /
 982               (end_time - rs->time_last_bitmap_sync));
 983
 984    if (!page_count) {
 985        return;
 986    }
 987
 988    if (migrate_xbzrle()) {
 989        double encoded_size, unencoded_size;
 990
 991        xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
 992            rs->xbzrle_cache_miss_prev) / page_count;
 993        rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
 994        unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) *
 995                         TARGET_PAGE_SIZE;
 996        encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev;
 997        if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) {
 998            xbzrle_counters.encoding_rate = 0;
 999        } else {
1000            xbzrle_counters.encoding_rate = unencoded_size / encoded_size;
1001        }
1002        rs->xbzrle_pages_prev = xbzrle_counters.pages;
1003        rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
1004    }
1005}
1006
1007/*
1008 * Enable dirty-limit to throttle down the guest
1009 */
1010static void migration_dirty_limit_guest(void)
1011{
1012    /*
1013     * dirty page rate quota for all vCPUs fetched from
1014     * migration parameter 'vcpu_dirty_limit'
1015     */
1016    static int64_t quota_dirtyrate;
1017    MigrationState *s = migrate_get_current();
1018
1019    /*
1020     * If dirty limit already enabled and migration parameter
1021     * vcpu-dirty-limit untouched.
1022     */
1023    if (dirtylimit_in_service() &&
1024        quota_dirtyrate == s->parameters.vcpu_dirty_limit) {
1025        return;
1026    }
1027
1028    quota_dirtyrate = s->parameters.vcpu_dirty_limit;
1029
1030    /*
1031     * Set all vCPU a quota dirtyrate, note that the second
1032     * parameter will be ignored if setting all vCPU for the vm
1033     */
1034    qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL);
1035    trace_migration_dirty_limit_guest(quota_dirtyrate);
1036}
1037
1038static void migration_trigger_throttle(RAMState *rs)
1039{
1040    uint64_t threshold = migrate_throttle_trigger_threshold();
1041    uint64_t bytes_xfer_period =
1042        migration_transferred_bytes() - rs->bytes_xfer_prev;
1043    uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
1044    uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
1045
1046    /*
1047     * The following detection logic can be refined later. For now:
1048     * Check to see if the ratio between dirtied bytes and the approx.
1049     * amount of bytes that just got transferred since the last time
1050     * we were in this routine reaches the threshold. If that happens
1051     * twice, start or increase throttling.
1052     */
1053    if ((bytes_dirty_period > bytes_dirty_threshold) &&
1054        (++rs->dirty_rate_high_cnt >= 2)) {
1055        rs->dirty_rate_high_cnt = 0;
1056        if (migrate_auto_converge()) {
1057            trace_migration_throttle();
1058            mig_throttle_guest_down(bytes_dirty_period,
1059                                    bytes_dirty_threshold);
1060        } else if (migrate_dirty_limit()) {
1061            migration_dirty_limit_guest();
1062        }
1063    }
1064}
1065
1066static void migration_bitmap_sync(RAMState *rs, bool last_stage)
1067{
1068    RAMBlock *block;
1069    int64_t end_time;
1070
1071    stat64_add(&mig_stats.dirty_sync_count, 1);
1072
1073    if (!rs->time_last_bitmap_sync) {
1074        rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1075    }
1076
1077    trace_migration_bitmap_sync_start();
1078    memory_global_dirty_log_sync(last_stage);
1079
1080    WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) {
1081        WITH_RCU_READ_LOCK_GUARD() {
1082            RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1083                ramblock_sync_dirty_bitmap(rs, block);
1084            }
1085            stat64_set(&mig_stats.dirty_bytes_last_sync, ram_bytes_remaining());
1086        }
1087    }
1088
1089    memory_global_after_dirty_log_sync();
1090    trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1091
1092    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1093
1094    /* more than 1 second = 1000 millisecons */
1095    if (end_time > rs->time_last_bitmap_sync + 1000) {
1096        migration_trigger_throttle(rs);
1097
1098        migration_update_rates(rs, end_time);
1099
1100        rs->target_page_count_prev = rs->target_page_count;
1101
1102        /* reset period counters */
1103        rs->time_last_bitmap_sync = end_time;
1104        rs->num_dirty_pages_period = 0;
1105        rs->bytes_xfer_prev = migration_transferred_bytes();
1106    }
1107    if (migrate_events()) {
1108        uint64_t generation = stat64_get(&mig_stats.dirty_sync_count);
1109        qapi_event_send_migration_pass(generation);
1110    }
1111}
1112
1113void migration_bitmap_sync_precopy(bool last_stage)
1114{
1115    Error *local_err = NULL;
1116    assert(ram_state);
1117
1118    /*
1119     * The current notifier usage is just an optimization to migration, so we
1120     * don't stop the normal migration process in the error case.
1121     */
1122    if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1123        error_report_err(local_err);
1124        local_err = NULL;
1125    }
1126
1127    migration_bitmap_sync(ram_state, last_stage);
1128
1129    if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1130        error_report_err(local_err);
1131    }
1132}
1133
1134void ram_release_page(const char *rbname, uint64_t offset)
1135{
1136    if (!migrate_release_ram() || !migration_in_postcopy()) {
1137        return;
1138    }
1139
1140    ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
1141}
1142
1143/**
1144 * save_zero_page: send the zero page to the stream
1145 *
1146 * Returns the number of pages written.
1147 *
1148 * @rs: current RAM state
1149 * @pss: current PSS channel
1150 * @offset: offset inside the block for the page
1151 */
1152static int save_zero_page(RAMState *rs, PageSearchStatus *pss,
1153                          ram_addr_t offset)
1154{
1155    uint8_t *p = pss->block->host + offset;
1156    QEMUFile *file = pss->pss_channel;
1157    int len = 0;
1158
1159    if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE) {
1160        return 0;
1161    }
1162
1163    if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) {
1164        return 0;
1165    }
1166
1167    stat64_add(&mig_stats.zero_pages, 1);
1168
1169    if (migrate_mapped_ram()) {
1170        /* zero pages are not transferred with mapped-ram */
1171        clear_bit_atomic(offset >> TARGET_PAGE_BITS, pss->block->file_bmap);
1172        return 1;
1173    }
1174
1175    len += save_page_header(pss, file, pss->block, offset | RAM_SAVE_FLAG_ZERO);
1176    qemu_put_byte(file, 0);
1177    len += 1;
1178    ram_release_page(pss->block->idstr, offset);
1179    ram_transferred_add(len);
1180
1181    /*
1182     * Must let xbzrle know, otherwise a previous (now 0'd) cached
1183     * page would be stale.
1184     */
1185    if (rs->xbzrle_started) {
1186        XBZRLE_cache_lock();
1187        xbzrle_cache_zero_page(pss->block->offset + offset);
1188        XBZRLE_cache_unlock();
1189    }
1190
1191    return len;
1192}
1193
1194/*
1195 * directly send the page to the stream
1196 *
1197 * Returns the number of pages written.
1198 *
1199 * @pss: current PSS channel
1200 * @block: block that contains the page we want to send
1201 * @offset: offset inside the block for the page
1202 * @buf: the page to be sent
1203 * @async: send to page asyncly
1204 */
1205static int save_normal_page(PageSearchStatus *pss, RAMBlock *block,
1206                            ram_addr_t offset, uint8_t *buf, bool async)
1207{
1208    QEMUFile *file = pss->pss_channel;
1209
1210    if (migrate_mapped_ram()) {
1211        qemu_put_buffer_at(file, buf, TARGET_PAGE_SIZE,
1212                           block->pages_offset + offset);
1213        set_bit(offset >> TARGET_PAGE_BITS, block->file_bmap);
1214    } else {
1215        ram_transferred_add(save_page_header(pss, pss->pss_channel, block,
1216                                             offset | RAM_SAVE_FLAG_PAGE));
1217        if (async) {
1218            qemu_put_buffer_async(file, buf, TARGET_PAGE_SIZE,
1219                                  migrate_release_ram() &&
1220                                  migration_in_postcopy());
1221        } else {
1222            qemu_put_buffer(file, buf, TARGET_PAGE_SIZE);
1223        }
1224    }
1225    ram_transferred_add(TARGET_PAGE_SIZE);
1226    stat64_add(&mig_stats.normal_pages, 1);
1227    return 1;
1228}
1229
1230/**
1231 * ram_save_page: send the given page to the stream
1232 *
1233 * Returns the number of pages written.
1234 *          < 0 - error
1235 *          >=0 - Number of pages written - this might legally be 0
1236 *                if xbzrle noticed the page was the same.
1237 *
1238 * @rs: current RAM state
1239 * @block: block that contains the page we want to send
1240 * @offset: offset inside the block for the page
1241 */
1242static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
1243{
1244    int pages = -1;
1245    uint8_t *p;
1246    bool send_async = true;
1247    RAMBlock *block = pss->block;
1248    ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
1249    ram_addr_t current_addr = block->offset + offset;
1250
1251    p = block->host + offset;
1252    trace_ram_save_page(block->idstr, (uint64_t)offset, p);
1253
1254    XBZRLE_cache_lock();
1255    if (rs->xbzrle_started && !migration_in_postcopy()) {
1256        pages = save_xbzrle_page(rs, pss, &p, current_addr,
1257                                 block, offset);
1258        if (!rs->last_stage) {
1259            /* Can't send this cached data async, since the cache page
1260             * might get updated before it gets to the wire
1261             */
1262            send_async = false;
1263        }
1264    }
1265
1266    /* XBZRLE overflow or normal page */
1267    if (pages == -1) {
1268        pages = save_normal_page(pss, block, offset, p, send_async);
1269    }
1270
1271    XBZRLE_cache_unlock();
1272
1273    return pages;
1274}
1275
1276static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset)
1277{
1278    if (!multifd_queue_page(block, offset)) {
1279        return -1;
1280    }
1281
1282    return 1;
1283}
1284
1285
1286#define PAGE_ALL_CLEAN 0
1287#define PAGE_TRY_AGAIN 1
1288#define PAGE_DIRTY_FOUND 2
1289/**
1290 * find_dirty_block: find the next dirty page and update any state
1291 * associated with the search process.
1292 *
1293 * Returns:
1294 *         <0: An error happened
1295 *         PAGE_ALL_CLEAN: no dirty page found, give up
1296 *         PAGE_TRY_AGAIN: no dirty page found, retry for next block
1297 *         PAGE_DIRTY_FOUND: dirty page found
1298 *
1299 * @rs: current RAM state
1300 * @pss: data about the state of the current dirty page scan
1301 * @again: set to false if the search has scanned the whole of RAM
1302 */
1303static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
1304{
1305    /* Update pss->page for the next dirty bit in ramblock */
1306    pss_find_next_dirty(pss);
1307
1308    if (pss->complete_round && pss->block == rs->last_seen_block &&
1309        pss->page >= rs->last_page) {
1310        /*
1311         * We've been once around the RAM and haven't found anything.
1312         * Give up.
1313         */
1314        return PAGE_ALL_CLEAN;
1315    }
1316    if (!offset_in_ramblock(pss->block,
1317                            ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) {
1318        /* Didn't find anything in this RAM Block */
1319        pss->page = 0;
1320        pss->block = QLIST_NEXT_RCU(pss->block, next);
1321        if (!pss->block) {
1322            if (multifd_ram_sync_per_round()) {
1323                QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
1324                int ret = multifd_ram_flush_and_sync(f);
1325                if (ret < 0) {
1326                    return ret;
1327                }
1328            }
1329
1330            /* Hit the end of the list */
1331            pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1332            /* Flag that we've looped */
1333            pss->complete_round = true;
1334            /* After the first round, enable XBZRLE. */
1335            if (migrate_xbzrle()) {
1336                rs->xbzrle_started = true;
1337            }
1338        }
1339        /* Didn't find anything this time, but try again on the new block */
1340        return PAGE_TRY_AGAIN;
1341    } else {
1342        /* We've found something */
1343        return PAGE_DIRTY_FOUND;
1344    }
1345}
1346
1347/**
1348 * unqueue_page: gets a page of the queue
1349 *
1350 * Helper for 'get_queued_page' - gets a page off the queue
1351 *
1352 * Returns the block of the page (or NULL if none available)
1353 *
1354 * @rs: current RAM state
1355 * @offset: used to return the offset within the RAMBlock
1356 */
1357static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
1358{
1359    struct RAMSrcPageRequest *entry;
1360    RAMBlock *block = NULL;
1361
1362    if (!postcopy_has_request(rs)) {
1363        return NULL;
1364    }
1365
1366    QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
1367
1368    /*
1369     * This should _never_ change even after we take the lock, because no one
1370     * should be taking anything off the request list other than us.
1371     */
1372    assert(postcopy_has_request(rs));
1373
1374    entry = QSIMPLEQ_FIRST(&rs->src_page_requests);
1375    block = entry->rb;
1376    *offset = entry->offset;
1377
1378    if (entry->len > TARGET_PAGE_SIZE) {
1379        entry->len -= TARGET_PAGE_SIZE;
1380        entry->offset += TARGET_PAGE_SIZE;
1381    } else {
1382        memory_region_unref(block->mr);
1383        QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1384        g_free(entry);
1385        migration_consume_urgent_request();
1386    }
1387
1388    return block;
1389}
1390
1391#if defined(__linux__)
1392/**
1393 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1394 *   is found, return RAM block pointer and page offset
1395 *
1396 * Returns pointer to the RAMBlock containing faulting page,
1397 *   NULL if no write faults are pending
1398 *
1399 * @rs: current RAM state
1400 * @offset: page offset from the beginning of the block
1401 */
1402static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1403{
1404    struct uffd_msg uffd_msg;
1405    void *page_address;
1406    RAMBlock *block;
1407    int res;
1408
1409    if (!migrate_background_snapshot()) {
1410        return NULL;
1411    }
1412
1413    res = uffd_read_events(rs->uffdio_fd, &uffd_msg, 1);
1414    if (res <= 0) {
1415        return NULL;
1416    }
1417
1418    page_address = (void *)(uintptr_t) uffd_msg.arg.pagefault.address;
1419    block = qemu_ram_block_from_host(page_address, false, offset);
1420    assert(block && (block->flags & RAM_UF_WRITEPROTECT) != 0);
1421    return block;
1422}
1423
1424/**
1425 * ram_save_release_protection: release UFFD write protection after
1426 *   a range of pages has been saved
1427 *
1428 * @rs: current RAM state
1429 * @pss: page-search-status structure
1430 * @start_page: index of the first page in the range relative to pss->block
1431 *
1432 * Returns 0 on success, negative value in case of an error
1433*/
1434static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1435        unsigned long start_page)
1436{
1437    int res = 0;
1438
1439    /* Check if page is from UFFD-managed region. */
1440    if (pss->block->flags & RAM_UF_WRITEPROTECT) {
1441        void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS);
1442        uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS;
1443
1444        /* Flush async buffers before un-protect. */
1445        qemu_fflush(pss->pss_channel);
1446        /* Un-protect memory range. */
1447        res = uffd_change_protection(rs->uffdio_fd, page_address, run_length,
1448                false, false);
1449    }
1450
1451    return res;
1452}
1453
1454/* ram_write_tracking_available: check if kernel supports required UFFD features
1455 *
1456 * Returns true if supports, false otherwise
1457 */
1458bool ram_write_tracking_available(void)
1459{
1460    uint64_t uffd_features;
1461    int res;
1462
1463    res = uffd_query_features(&uffd_features);
1464    return (res == 0 &&
1465            (uffd_features & UFFD_FEATURE_PAGEFAULT_FLAG_WP) != 0);
1466}
1467
1468/* ram_write_tracking_compatible: check if guest configuration is
1469 *   compatible with 'write-tracking'
1470 *
1471 * Returns true if compatible, false otherwise
1472 */
1473bool ram_write_tracking_compatible(void)
1474{
1475    const uint64_t uffd_ioctls_mask = BIT(_UFFDIO_WRITEPROTECT);
1476    int uffd_fd;
1477    RAMBlock *block;
1478    bool ret = false;
1479
1480    /* Open UFFD file descriptor */
1481    uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, false);
1482    if (uffd_fd < 0) {
1483        return false;
1484    }
1485
1486    RCU_READ_LOCK_GUARD();
1487
1488    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1489        uint64_t uffd_ioctls;
1490
1491        /* Nothing to do with read-only and MMIO-writable regions */
1492        if (block->mr->readonly || block->mr->rom_device) {
1493            continue;
1494        }
1495        /* Try to register block memory via UFFD-IO to track writes */
1496        if (uffd_register_memory(uffd_fd, block->host, block->max_length,
1497                UFFDIO_REGISTER_MODE_WP, &uffd_ioctls)) {
1498            goto out;
1499        }
1500        if ((uffd_ioctls & uffd_ioctls_mask) != uffd_ioctls_mask) {
1501            goto out;
1502        }
1503    }
1504    ret = true;
1505
1506out:
1507    uffd_close_fd(uffd_fd);
1508    return ret;
1509}
1510
1511static inline void populate_read_range(RAMBlock *block, ram_addr_t offset,
1512                                       ram_addr_t size)
1513{
1514    const ram_addr_t end = offset + size;
1515
1516    /*
1517     * We read one byte of each page; this will preallocate page tables if
1518     * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1519     * where no page was populated yet. This might require adaption when
1520     * supporting other mappings, like shmem.
1521     */
1522    for (; offset < end; offset += block->page_size) {
1523        char tmp = *((char *)block->host + offset);
1524
1525        /* Don't optimize the read out */
1526        asm volatile("" : "+r" (tmp));
1527    }
1528}
1529
1530static inline int populate_read_section(MemoryRegionSection *section,
1531                                        void *opaque)
1532{
1533    const hwaddr size = int128_get64(section->size);
1534    hwaddr offset = section->offset_within_region;
1535    RAMBlock *block = section->mr->ram_block;
1536
1537    populate_read_range(block, offset, size);
1538    return 0;
1539}
1540
1541/*
1542 * ram_block_populate_read: preallocate page tables and populate pages in the
1543 *   RAM block by reading a byte of each page.
1544 *
1545 * Since it's solely used for userfault_fd WP feature, here we just
1546 *   hardcode page size to qemu_real_host_page_size.
1547 *
1548 * @block: RAM block to populate
1549 */
1550static void ram_block_populate_read(RAMBlock *rb)
1551{
1552    /*
1553     * Skip populating all pages that fall into a discarded range as managed by
1554     * a RamDiscardManager responsible for the mapped memory region of the
1555     * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1556     * must not get populated automatically. We don't have to track
1557     * modifications via userfaultfd WP reliably, because these pages will
1558     * not be part of the migration stream either way -- see
1559     * ramblock_dirty_bitmap_exclude_discarded_pages().
1560     *
1561     * Note: The result is only stable while migrating (precopy/postcopy).
1562     */
1563    if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1564        RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1565        MemoryRegionSection section = {
1566            .mr = rb->mr,
1567            .offset_within_region = 0,
1568            .size = rb->mr->size,
1569        };
1570
1571        ram_discard_manager_replay_populated(rdm, &section,
1572                                             populate_read_section, NULL);
1573    } else {
1574        populate_read_range(rb, 0, rb->used_length);
1575    }
1576}
1577
1578/*
1579 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1580 */
1581void ram_write_tracking_prepare(void)
1582{
1583    RAMBlock *block;
1584
1585    RCU_READ_LOCK_GUARD();
1586
1587    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1588        /* Nothing to do with read-only and MMIO-writable regions */
1589        if (block->mr->readonly || block->mr->rom_device) {
1590            continue;
1591        }
1592
1593        /*
1594         * Populate pages of the RAM block before enabling userfault_fd
1595         * write protection.
1596         *
1597         * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1598         * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1599         * pages with pte_none() entries in page table.
1600         */
1601        ram_block_populate_read(block);
1602    }
1603}
1604
1605static inline int uffd_protect_section(MemoryRegionSection *section,
1606                                       void *opaque)
1607{
1608    const hwaddr size = int128_get64(section->size);
1609    const hwaddr offset = section->offset_within_region;
1610    RAMBlock *rb = section->mr->ram_block;
1611    int uffd_fd = (uintptr_t)opaque;
1612
1613    return uffd_change_protection(uffd_fd, rb->host + offset, size, true,
1614                                  false);
1615}
1616
1617static int ram_block_uffd_protect(RAMBlock *rb, int uffd_fd)
1618{
1619    assert(rb->flags & RAM_UF_WRITEPROTECT);
1620
1621    /* See ram_block_populate_read() */
1622    if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) {
1623        RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr);
1624        MemoryRegionSection section = {
1625            .mr = rb->mr,
1626            .offset_within_region = 0,
1627            .size = rb->mr->size,
1628        };
1629
1630        return ram_discard_manager_replay_populated(rdm, &section,
1631                                                    uffd_protect_section,
1632                                                    (void *)(uintptr_t)uffd_fd);
1633    }
1634    return uffd_change_protection(uffd_fd, rb->host,
1635                                  rb->used_length, true, false);
1636}
1637
1638/*
1639 * ram_write_tracking_start: start UFFD-WP memory tracking
1640 *
1641 * Returns 0 for success or negative value in case of error
1642 */
1643int ram_write_tracking_start(void)
1644{
1645    int uffd_fd;
1646    RAMState *rs = ram_state;
1647    RAMBlock *block;
1648
1649    /* Open UFFD file descriptor */
1650    uffd_fd = uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP, true);
1651    if (uffd_fd < 0) {
1652        return uffd_fd;
1653    }
1654    rs->uffdio_fd = uffd_fd;
1655
1656    RCU_READ_LOCK_GUARD();
1657
1658    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1659        /* Nothing to do with read-only and MMIO-writable regions */
1660        if (block->mr->readonly || block->mr->rom_device) {
1661            continue;
1662        }
1663
1664        /* Register block memory with UFFD to track writes */
1665        if (uffd_register_memory(rs->uffdio_fd, block->host,
1666                block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) {
1667            goto fail;
1668        }
1669        block->flags |= RAM_UF_WRITEPROTECT;
1670        memory_region_ref(block->mr);
1671
1672        /* Apply UFFD write protection to the block memory range */
1673        if (ram_block_uffd_protect(block, uffd_fd)) {
1674            goto fail;
1675        }
1676
1677        trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size,
1678                block->host, block->max_length);
1679    }
1680
1681    return 0;
1682
1683fail:
1684    error_report("ram_write_tracking_start() failed: restoring initial memory state");
1685
1686    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1687        if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1688            continue;
1689        }
1690        uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1691        /* Cleanup flags and remove reference */
1692        block->flags &= ~RAM_UF_WRITEPROTECT;
1693        memory_region_unref(block->mr);
1694    }
1695
1696    uffd_close_fd(uffd_fd);
1697    rs->uffdio_fd = -1;
1698    return -1;
1699}
1700
1701/**
1702 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1703 */
1704void ram_write_tracking_stop(void)
1705{
1706    RAMState *rs = ram_state;
1707    RAMBlock *block;
1708
1709    RCU_READ_LOCK_GUARD();
1710
1711    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1712        if ((block->flags & RAM_UF_WRITEPROTECT) == 0) {
1713            continue;
1714        }
1715        uffd_unregister_memory(rs->uffdio_fd, block->host, block->max_length);
1716
1717        trace_ram_write_tracking_ramblock_stop(block->idstr, block->page_size,
1718                block->host, block->max_length);
1719
1720        /* Cleanup flags and remove reference */
1721        block->flags &= ~RAM_UF_WRITEPROTECT;
1722        memory_region_unref(block->mr);
1723    }
1724
1725    /* Finally close UFFD file descriptor */
1726    uffd_close_fd(rs->uffdio_fd);
1727    rs->uffdio_fd = -1;
1728}
1729
1730#else
1731/* No target OS support, stubs just fail or ignore */
1732
1733static RAMBlock *poll_fault_page(RAMState *rs, ram_addr_t *offset)
1734{
1735    (void) rs;
1736    (void) offset;
1737
1738    return NULL;
1739}
1740
1741static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
1742        unsigned long start_page)
1743{
1744    (void) rs;
1745    (void) pss;
1746    (void) start_page;
1747
1748    return 0;
1749}
1750
1751bool ram_write_tracking_available(void)
1752{
1753    return false;
1754}
1755
1756bool ram_write_tracking_compatible(void)
1757{
1758    g_assert_not_reached();
1759}
1760
1761int ram_write_tracking_start(void)
1762{
1763    g_assert_not_reached();
1764}
1765
1766void ram_write_tracking_stop(void)
1767{
1768    g_assert_not_reached();
1769}
1770#endif /* defined(__linux__) */
1771
1772/**
1773 * get_queued_page: unqueue a page from the postcopy requests
1774 *
1775 * Skips pages that are already sent (!dirty)
1776 *
1777 * Returns true if a queued page is found
1778 *
1779 * @rs: current RAM state
1780 * @pss: data about the state of the current dirty page scan
1781 */
1782static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
1783{
1784    RAMBlock  *block;
1785    ram_addr_t offset;
1786    bool dirty = false;
1787
1788    do {
1789        block = unqueue_page(rs, &offset);
1790        /*
1791         * We're sending this page, and since it's postcopy nothing else
1792         * will dirty it, and we must make sure it doesn't get sent again
1793         * even if this queue request was received after the background
1794         * search already sent it.
1795         */
1796        if (block) {
1797            unsigned long page;
1798
1799            page = offset >> TARGET_PAGE_BITS;
1800            dirty = test_bit(page, block->bmap);
1801            if (!dirty) {
1802                trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
1803                                                page);
1804            } else {
1805                trace_get_queued_page(block->idstr, (uint64_t)offset, page);
1806            }
1807        }
1808
1809    } while (block && !dirty);
1810
1811    if (!block) {
1812        /*
1813         * Poll write faults too if background snapshot is enabled; that's
1814         * when we have vcpus got blocked by the write protected pages.
1815         */
1816        block = poll_fault_page(rs, &offset);
1817    }
1818
1819    if (block) {
1820        /*
1821         * We want the background search to continue from the queued page
1822         * since the guest is likely to want other pages near to the page
1823         * it just requested.
1824         */
1825        pss->block = block;
1826        pss->page = offset >> TARGET_PAGE_BITS;
1827
1828        /*
1829         * This unqueued page would break the "one round" check, even is
1830         * really rare.
1831         */
1832        pss->complete_round = false;
1833    }
1834
1835    return !!block;
1836}
1837
1838/**
1839 * migration_page_queue_free: drop any remaining pages in the ram
1840 * request queue
1841 *
1842 * It should be empty at the end anyway, but in error cases there may
1843 * be some left.  in case that there is any page left, we drop it.
1844 *
1845 */
1846static void migration_page_queue_free(RAMState *rs)
1847{
1848    struct RAMSrcPageRequest *mspr, *next_mspr;
1849    /* This queue generally should be empty - but in the case of a failed
1850     * migration might have some droppings in.
1851     */
1852    RCU_READ_LOCK_GUARD();
1853    QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
1854        memory_region_unref(mspr->rb->mr);
1855        QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1856        g_free(mspr);
1857    }
1858}
1859
1860/**
1861 * ram_save_queue_pages: queue the page for transmission
1862 *
1863 * A request from postcopy destination for example.
1864 *
1865 * Returns zero on success or negative on error
1866 *
1867 * @rbname: Name of the RAMBLock of the request. NULL means the
1868 *          same that last one.
1869 * @start: starting address from the start of the RAMBlock
1870 * @len: length (in bytes) to send
1871 */
1872int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len,
1873                         Error **errp)
1874{
1875    RAMBlock *ramblock;
1876    RAMState *rs = ram_state;
1877
1878    stat64_add(&mig_stats.postcopy_requests, 1);
1879    RCU_READ_LOCK_GUARD();
1880
1881    if (!rbname) {
1882        /* Reuse last RAMBlock */
1883        ramblock = rs->last_req_rb;
1884
1885        if (!ramblock) {
1886            /*
1887             * Shouldn't happen, we can't reuse the last RAMBlock if
1888             * it's the 1st request.
1889             */
1890            error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no previous block");
1891            return -1;
1892        }
1893    } else {
1894        ramblock = qemu_ram_block_by_name(rbname);
1895
1896        if (!ramblock) {
1897            /* We shouldn't be asked for a non-existent RAMBlock */
1898            error_setg(errp, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname);
1899            return -1;
1900        }
1901        rs->last_req_rb = ramblock;
1902    }
1903    trace_ram_save_queue_pages(ramblock->idstr, start, len);
1904    if (!offset_in_ramblock(ramblock, start + len - 1)) {
1905        error_setg(errp, "MIG_RP_MSG_REQ_PAGES request overrun, "
1906                   "start=" RAM_ADDR_FMT " len="
1907                   RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1908                   start, len, ramblock->used_length);
1909        return -1;
1910    }
1911
1912    /*
1913     * When with postcopy preempt, we send back the page directly in the
1914     * rp-return thread.
1915     */
1916    if (postcopy_preempt_active()) {
1917        ram_addr_t page_start = start >> TARGET_PAGE_BITS;
1918        size_t page_size = qemu_ram_pagesize(ramblock);
1919        PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY];
1920        int ret = 0;
1921
1922        qemu_mutex_lock(&rs->bitmap_mutex);
1923
1924        pss_init(pss, ramblock, page_start);
1925        /*
1926         * Always use the preempt channel, and make sure it's there.  It's
1927         * safe to access without lock, because when rp-thread is running
1928         * we should be the only one who operates on the qemufile
1929         */
1930        pss->pss_channel = migrate_get_current()->postcopy_qemufile_src;
1931        assert(pss->pss_channel);
1932
1933        /*
1934         * It must be either one or multiple of host page size.  Just
1935         * assert; if something wrong we're mostly split brain anyway.
1936         */
1937        assert(len % page_size == 0);
1938        while (len) {
1939            if (ram_save_host_page_urgent(pss)) {
1940                error_setg(errp, "ram_save_host_page_urgent() failed: "
1941                           "ramblock=%s, start_addr=0x"RAM_ADDR_FMT,
1942                           ramblock->idstr, start);
1943                ret = -1;
1944                break;
1945            }
1946            /*
1947             * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
1948             * will automatically be moved and point to the next host page
1949             * we're going to send, so no need to update here.
1950             *
1951             * Normally QEMU never sends >1 host page in requests, so
1952             * logically we don't even need that as the loop should only
1953             * run once, but just to be consistent.
1954             */
1955            len -= page_size;
1956        };
1957        qemu_mutex_unlock(&rs->bitmap_mutex);
1958
1959        return ret;
1960    }
1961
1962    struct RAMSrcPageRequest *new_entry =
1963        g_new0(struct RAMSrcPageRequest, 1);
1964    new_entry->rb = ramblock;
1965    new_entry->offset = start;
1966    new_entry->len = len;
1967
1968    memory_region_ref(ramblock->mr);
1969    qemu_mutex_lock(&rs->src_page_req_mutex);
1970    QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
1971    migration_make_urgent_request();
1972    qemu_mutex_unlock(&rs->src_page_req_mutex);
1973
1974    return 0;
1975}
1976
1977/**
1978 * ram_save_target_page: save one target page to the precopy thread
1979 * OR to multifd workers.
1980 *
1981 * @rs: current RAM state
1982 * @pss: data about the page we want to send
1983 */
1984static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
1985{
1986    ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
1987    int res;
1988
1989    /* Hand over to RDMA first */
1990    if (migrate_rdma()) {
1991        res = rdma_control_save_page(pss->pss_channel, pss->block->offset,
1992                                     offset, TARGET_PAGE_SIZE);
1993
1994        if (res == RAM_SAVE_CONTROL_DELAYED) {
1995            res = 1;
1996        }
1997        return res;
1998    }
1999
2000    if (!migrate_multifd()
2001        || migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) {
2002        if (save_zero_page(rs, pss, offset)) {
2003            return 1;
2004        }
2005    }
2006
2007    if (migrate_multifd() && !migration_in_postcopy()) {
2008        return ram_save_multifd_page(pss->block, offset);
2009    }
2010
2011    return ram_save_page(rs, pss);
2012}
2013
2014/* Should be called before sending a host page */
2015static void pss_host_page_prepare(PageSearchStatus *pss)
2016{
2017    /* How many guest pages are there in one host page? */
2018    size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2019
2020    pss->host_page_sending = true;
2021    if (guest_pfns <= 1) {
2022        /*
2023         * This covers both when guest psize == host psize, or when guest
2024         * has larger psize than the host (guest_pfns==0).
2025         *
2026         * For the latter, we always send one whole guest page per
2027         * iteration of the host page (example: an Alpha VM on x86 host
2028         * will have guest psize 8K while host psize 4K).
2029         */
2030        pss->host_page_start = pss->page;
2031        pss->host_page_end = pss->page + 1;
2032    } else {
2033        /*
2034         * The host page spans over multiple guest pages, we send them
2035         * within the same host page iteration.
2036         */
2037        pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns);
2038        pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns);
2039    }
2040}
2041
2042/*
2043 * Whether the page pointed by PSS is within the host page being sent.
2044 * Must be called after a previous pss_host_page_prepare().
2045 */
2046static bool pss_within_range(PageSearchStatus *pss)
2047{
2048    ram_addr_t ram_addr;
2049
2050    assert(pss->host_page_sending);
2051
2052    /* Over host-page boundary? */
2053    if (pss->page >= pss->host_page_end) {
2054        return false;
2055    }
2056
2057    ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
2058
2059    return offset_in_ramblock(pss->block, ram_addr);
2060}
2061
2062static void pss_host_page_finish(PageSearchStatus *pss)
2063{
2064    pss->host_page_sending = false;
2065    /* This is not needed, but just to reset it */
2066    pss->host_page_start = pss->host_page_end = 0;
2067}
2068
2069static void ram_page_hint_update(RAMState *rs, PageSearchStatus *pss)
2070{
2071    PageLocationHint *hint = &rs->page_hint;
2072
2073    /* If there's a pending hint not consumed, don't bother */
2074    if (hint->valid) {
2075        return;
2076    }
2077
2078    /* Provide a hint to the background stream otherwise */
2079    hint->location.block = pss->block;
2080    hint->location.offset = pss->page;
2081    hint->valid = true;
2082}
2083
2084/*
2085 * Send an urgent host page specified by `pss'.  Need to be called with
2086 * bitmap_mutex held.
2087 *
2088 * Returns 0 if save host page succeeded, false otherwise.
2089 */
2090static int ram_save_host_page_urgent(PageSearchStatus *pss)
2091{
2092    bool page_dirty, sent = false;
2093    RAMState *rs = ram_state;
2094    int ret = 0;
2095
2096    trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page);
2097    pss_host_page_prepare(pss);
2098
2099    /*
2100     * If precopy is sending the same page, let it be done in precopy, or
2101     * we could send the same page in two channels and none of them will
2102     * receive the whole page.
2103     */
2104    if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) {
2105        trace_postcopy_preempt_hit(pss->block->idstr,
2106                                   pss->page << TARGET_PAGE_BITS);
2107        return 0;
2108    }
2109
2110    do {
2111        page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page);
2112
2113        if (page_dirty) {
2114            /* Be strict to return code; it must be 1, or what else? */
2115            if (ram_save_target_page(rs, pss) != 1) {
2116                error_report_once("%s: ram_save_target_page failed", __func__);
2117                ret = -1;
2118                goto out;
2119            }
2120            sent = true;
2121        }
2122        pss_find_next_dirty(pss);
2123    } while (pss_within_range(pss));
2124out:
2125    pss_host_page_finish(pss);
2126    /* For urgent requests, flush immediately if sent */
2127    if (sent) {
2128        qemu_fflush(pss->pss_channel);
2129        ram_page_hint_update(rs, pss);
2130    }
2131    return ret;
2132}
2133
2134/**
2135 * ram_save_host_page: save a whole host page
2136 *
2137 * Starting at *offset send pages up to the end of the current host
2138 * page. It's valid for the initial offset to point into the middle of
2139 * a host page in which case the remainder of the hostpage is sent.
2140 * Only dirty target pages are sent. Note that the host page size may
2141 * be a huge page for this block.
2142 *
2143 * The saving stops at the boundary of the used_length of the block
2144 * if the RAMBlock isn't a multiple of the host page size.
2145 *
2146 * The caller must be with ram_state.bitmap_mutex held to call this
2147 * function.  Note that this function can temporarily release the lock, but
2148 * when the function is returned it'll make sure the lock is still held.
2149 *
2150 * Returns the number of pages written or negative on error
2151 *
2152 * @rs: current RAM state
2153 * @pss: data about the page we want to send
2154 */
2155static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
2156{
2157    bool page_dirty, preempt_active = postcopy_preempt_active();
2158    int tmppages, pages = 0;
2159    size_t pagesize_bits =
2160        qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
2161    unsigned long start_page = pss->page;
2162    int res;
2163
2164    if (migrate_ram_is_ignored(pss->block)) {
2165        error_report("block %s should not be migrated !", pss->block->idstr);
2166        return 0;
2167    }
2168
2169    /* Update host page boundary information */
2170    pss_host_page_prepare(pss);
2171
2172    do {
2173        page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page);
2174
2175        /* Check the pages is dirty and if it is send it */
2176        if (page_dirty) {
2177            /*
2178             * Properly yield the lock only in postcopy preempt mode
2179             * because both migration thread and rp-return thread can
2180             * operate on the bitmaps.
2181             */
2182            if (preempt_active) {
2183                qemu_mutex_unlock(&rs->bitmap_mutex);
2184            }
2185            tmppages = ram_save_target_page(rs, pss);
2186            if (tmppages >= 0) {
2187                pages += tmppages;
2188                /*
2189                 * Allow rate limiting to happen in the middle of huge pages if
2190                 * something is sent in the current iteration.
2191                 */
2192                if (pagesize_bits > 1 && tmppages > 0) {
2193                    migration_rate_limit();
2194                }
2195            }
2196            if (preempt_active) {
2197                qemu_mutex_lock(&rs->bitmap_mutex);
2198            }
2199        } else {
2200            tmppages = 0;
2201        }
2202
2203        if (tmppages < 0) {
2204            pss_host_page_finish(pss);
2205            return tmppages;
2206        }
2207
2208        pss_find_next_dirty(pss);
2209    } while (pss_within_range(pss));
2210
2211    pss_host_page_finish(pss);
2212
2213    res = ram_save_release_protection(rs, pss, start_page);
2214    return (res < 0 ? res : pages);
2215}
2216
2217static bool ram_page_hint_valid(RAMState *rs)
2218{
2219    /* There's only page hint during postcopy preempt mode */
2220    if (!postcopy_preempt_active()) {
2221        return false;
2222    }
2223
2224    return rs->page_hint.valid;
2225}
2226
2227static void ram_page_hint_collect(RAMState *rs, RAMBlock **block,
2228                                  unsigned long *page)
2229{
2230    PageLocationHint *hint = &rs->page_hint;
2231
2232    assert(hint->valid);
2233
2234    *block = hint->location.block;
2235    *page = hint->location.offset;
2236
2237    /* Mark the hint consumed */
2238    hint->valid = false;
2239}
2240
2241/**
2242 * ram_find_and_save_block: finds a dirty page and sends it to f
2243 *
2244 * Called within an RCU critical section.
2245 *
2246 * Returns the number of pages written where zero means no dirty pages,
2247 * or negative on error
2248 *
2249 * @rs: current RAM state
2250 *
2251 * On systems where host-page-size > target-page-size it will send all the
2252 * pages in a host page that are dirty.
2253 */
2254static int ram_find_and_save_block(RAMState *rs)
2255{
2256    PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
2257    unsigned long next_page;
2258    RAMBlock *next_block;
2259    int pages = 0;
2260
2261    /* No dirty page as there is zero RAM */
2262    if (!rs->ram_bytes_total) {
2263        return pages;
2264    }
2265
2266    /*
2267     * Always keep last_seen_block/last_page valid during this procedure,
2268     * because find_dirty_block() relies on these values (e.g., we compare
2269     * last_seen_block with pss.block to see whether we searched all the
2270     * ramblocks) to detect the completion of migration.  Having NULL value
2271     * of last_seen_block can conditionally cause below loop to run forever.
2272     */
2273    if (!rs->last_seen_block) {
2274        rs->last_seen_block = QLIST_FIRST_RCU(&ram_list.blocks);
2275        rs->last_page = 0;
2276    }
2277
2278    if (ram_page_hint_valid(rs)) {
2279        ram_page_hint_collect(rs, &next_block, &next_page);
2280    } else {
2281        next_block = rs->last_seen_block;
2282        next_page = rs->last_page;
2283    }
2284
2285    pss_init(pss, next_block, next_page);
2286
2287    while (true){
2288        if (!get_queued_page(rs, pss)) {
2289            /* priority queue empty, so just search for something dirty */
2290            int res = find_dirty_block(rs, pss);
2291
2292            if (res == PAGE_ALL_CLEAN) {
2293                break;
2294            } else if (res == PAGE_TRY_AGAIN) {
2295                continue;
2296            } else if (res < 0) {
2297                pages = res;
2298                break;
2299            }
2300
2301            /* Otherwise we must have a dirty page to move */
2302            assert(res == PAGE_DIRTY_FOUND);
2303        }
2304        pages = ram_save_host_page(rs, pss);
2305        if (pages) {
2306            break;
2307        }
2308    }
2309
2310    rs->last_seen_block = pss->block;
2311    rs->last_page = pss->page;
2312
2313    return pages;
2314}
2315
2316static uint64_t ram_bytes_total_with_ignored(void)
2317{
2318    RAMBlock *block;
2319    uint64_t total = 0;
2320
2321    RCU_READ_LOCK_GUARD();
2322
2323    RAMBLOCK_FOREACH_MIGRATABLE(block) {
2324        total += block->used_length;
2325    }
2326    return total;
2327}
2328
2329uint64_t ram_bytes_total(void)
2330{
2331    RAMBlock *block;
2332    uint64_t total = 0;
2333
2334    RCU_READ_LOCK_GUARD();
2335
2336    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2337        total += block->used_length;
2338    }
2339    return total;
2340}
2341
2342static void xbzrle_load_setup(void)
2343{
2344    XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2345}
2346
2347static void xbzrle_load_cleanup(void)
2348{
2349    g_free(XBZRLE.decoded_buf);
2350    XBZRLE.decoded_buf = NULL;
2351}
2352
2353static void ram_state_cleanup(RAMState **rsp)
2354{
2355    if (*rsp) {
2356        migration_page_queue_free(*rsp);
2357        qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2358        qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2359        g_free(*rsp);
2360        *rsp = NULL;
2361    }
2362}
2363
2364static void xbzrle_cleanup(void)
2365{
2366    XBZRLE_cache_lock();
2367    if (XBZRLE.cache) {
2368        cache_fini(XBZRLE.cache);
2369        g_free(XBZRLE.encoded_buf);
2370        g_free(XBZRLE.current_buf);
2371        g_free(XBZRLE.zero_target_page);
2372        XBZRLE.cache = NULL;
2373        XBZRLE.encoded_buf = NULL;
2374        XBZRLE.current_buf = NULL;
2375        XBZRLE.zero_target_page = NULL;
2376    }
2377    XBZRLE_cache_unlock();
2378}
2379
2380static void ram_bitmaps_destroy(void)
2381{
2382    RAMBlock *block;
2383
2384    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2385        g_free(block->clear_bmap);
2386        block->clear_bmap = NULL;
2387        g_free(block->bmap);
2388        block->bmap = NULL;
2389        g_free(block->file_bmap);
2390        block->file_bmap = NULL;
2391    }
2392}
2393
2394static void ram_save_cleanup(void *opaque)
2395{
2396    RAMState **rsp = opaque;
2397
2398    /* We don't use dirty log with background snapshots */
2399    if (!migrate_background_snapshot()) {
2400        /* caller have hold BQL or is in a bh, so there is
2401         * no writing race against the migration bitmap
2402         */
2403        if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
2404            /*
2405             * do not stop dirty log without starting it, since
2406             * memory_global_dirty_log_stop will assert that
2407             * memory_global_dirty_log_start/stop used in pairs
2408             */
2409            memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
2410        }
2411    }
2412
2413    ram_bitmaps_destroy();
2414
2415    xbzrle_cleanup();
2416    multifd_ram_save_cleanup();
2417    ram_state_cleanup(rsp);
2418}
2419
2420static void ram_page_hint_reset(PageLocationHint *hint)
2421{
2422    hint->location.block = NULL;
2423    hint->location.offset = 0;
2424    hint->valid = false;
2425}
2426
2427static void ram_state_reset(RAMState *rs)
2428{
2429    int i;
2430
2431    for (i = 0; i < RAM_CHANNEL_MAX; i++) {
2432        rs->pss[i].last_sent_block = NULL;
2433    }
2434
2435    rs->last_seen_block = NULL;
2436    rs->last_page = 0;
2437    rs->last_version = ram_list.version;
2438    rs->xbzrle_started = false;
2439
2440    ram_page_hint_reset(&rs->page_hint);
2441}
2442
2443#define MAX_WAIT 50 /* ms, half buffered_file limit */
2444
2445/* **** functions for postcopy ***** */
2446
2447void ram_postcopy_migrated_memory_release(MigrationState *ms)
2448{
2449    struct RAMBlock *block;
2450
2451    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2452        unsigned long *bitmap = block->bmap;
2453        unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2454        unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
2455
2456        while (run_start < range) {
2457            unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
2458            ram_discard_range(block->idstr,
2459                              ((ram_addr_t)run_start) << TARGET_PAGE_BITS,
2460                              ((ram_addr_t)(run_end - run_start))
2461                                << TARGET_PAGE_BITS);
2462            run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2463        }
2464    }
2465}
2466
2467/**
2468 * postcopy_send_discard_bm_ram: discard a RAMBlock
2469 *
2470 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2471 *
2472 * @ms: current migration state
2473 * @block: RAMBlock to discard
2474 */
2475static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2476{
2477    unsigned long end = block->used_length >> TARGET_PAGE_BITS;
2478    unsigned long current;
2479    unsigned long *bitmap = block->bmap;
2480
2481    for (current = 0; current < end; ) {
2482        unsigned long one = find_next_bit(bitmap, end, current);
2483        unsigned long zero, discard_length;
2484
2485        if (one >= end) {
2486            break;
2487        }
2488
2489        zero = find_next_zero_bit(bitmap, end, one + 1);
2490
2491        if (zero >= end) {
2492            discard_length = end - one;
2493        } else {
2494            discard_length = zero - one;
2495        }
2496        postcopy_discard_send_range(ms, one, discard_length);
2497        current = one + discard_length;
2498    }
2499}
2500
2501static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
2502
2503/**
2504 * postcopy_each_ram_send_discard: discard all RAMBlocks
2505 *
2506 * Utility for the outgoing postcopy code.
2507 *   Calls postcopy_send_discard_bm_ram for each RAMBlock
2508 *   passing it bitmap indexes and name.
2509 * (qemu_ram_foreach_block ends up passing unscaled lengths
2510 *  which would mean postcopy code would have to deal with target page)
2511 *
2512 * @ms: current migration state
2513 */
2514static void postcopy_each_ram_send_discard(MigrationState *ms)
2515{
2516    struct RAMBlock *block;
2517
2518    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2519        postcopy_discard_send_init(ms, block->idstr);
2520
2521        /*
2522         * Deal with TPS != HPS and huge pages.  It discard any partially sent
2523         * host-page size chunks, mark any partially dirty host-page size
2524         * chunks as all dirty.  In this case the host-page is the host-page
2525         * for the particular RAMBlock, i.e. it might be a huge page.
2526         */
2527        postcopy_chunk_hostpages_pass(ms, block);
2528
2529        /*
2530         * Postcopy sends chunks of bitmap over the wire, but it
2531         * just needs indexes at this point, avoids it having
2532         * target page specific code.
2533         */
2534        postcopy_send_discard_bm_ram(ms, block);
2535        postcopy_discard_send_finish(ms);
2536    }
2537}
2538
2539/**
2540 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2541 *
2542 * Helper for postcopy_chunk_hostpages; it's called twice to
2543 * canonicalize the two bitmaps, that are similar, but one is
2544 * inverted.
2545 *
2546 * Postcopy requires that all target pages in a hostpage are dirty or
2547 * clean, not a mix.  This function canonicalizes the bitmaps.
2548 *
2549 * @ms: current migration state
2550 * @block: block that contains the page we want to canonicalize
2551 */
2552static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
2553{
2554    RAMState *rs = ram_state;
2555    unsigned long *bitmap = block->bmap;
2556    unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
2557    unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2558    unsigned long run_start;
2559
2560    if (block->page_size == TARGET_PAGE_SIZE) {
2561        /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2562        return;
2563    }
2564
2565    /* Find a dirty page */
2566    run_start = find_next_bit(bitmap, pages, 0);
2567
2568    while (run_start < pages) {
2569
2570        /*
2571         * If the start of this run of pages is in the middle of a host
2572         * page, then we need to fixup this host page.
2573         */
2574        if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
2575            /* Find the end of this run */
2576            run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
2577            /*
2578             * If the end isn't at the start of a host page, then the
2579             * run doesn't finish at the end of a host page
2580             * and we need to discard.
2581             */
2582        }
2583
2584        if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
2585            unsigned long page;
2586            unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2587                                                             host_ratio);
2588            run_start = QEMU_ALIGN_UP(run_start, host_ratio);
2589
2590            /* Clean up the bitmap */
2591            for (page = fixup_start_addr;
2592                 page < fixup_start_addr + host_ratio; page++) {
2593                /*
2594                 * Remark them as dirty, updating the count for any pages
2595                 * that weren't previously dirty.
2596                 */
2597                rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
2598            }
2599        }
2600
2601        /* Find the next dirty page for the next iteration */
2602        run_start = find_next_bit(bitmap, pages, run_start);
2603    }
2604}
2605
2606/**
2607 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2608 *
2609 * Transmit the set of pages to be discarded after precopy to the target
2610 * these are pages that:
2611 *     a) Have been previously transmitted but are now dirty again
2612 *     b) Pages that have never been transmitted, this ensures that
2613 *        any pages on the destination that have been mapped by background
2614 *        tasks get discarded (transparent huge pages is the specific concern)
2615 * Hopefully this is pretty sparse
2616 *
2617 * @ms: current migration state
2618 */
2619void ram_postcopy_send_discard_bitmap(MigrationState *ms)
2620{
2621    RAMState *rs = ram_state;
2622
2623    RCU_READ_LOCK_GUARD();
2624
2625    /* This should be our last sync, the src is now paused */
2626    migration_bitmap_sync(rs, false);
2627
2628    /* Easiest way to make sure we don't resume in the middle of a host-page */
2629    rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
2630    rs->last_seen_block = NULL;
2631    rs->last_page = 0;
2632
2633    postcopy_each_ram_send_discard(ms);
2634
2635    trace_ram_postcopy_send_discard_bitmap();
2636}
2637
2638/**
2639 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2640 *
2641 * Returns zero on success
2642 *
2643 * @rbname: name of the RAMBlock of the request. NULL means the
2644 *          same that last one.
2645 * @start: RAMBlock starting page
2646 * @length: RAMBlock size
2647 */
2648int ram_discard_range(const char *rbname, uint64_t start, size_t length)
2649{
2650    trace_ram_discard_range(rbname, start, length);
2651
2652    RCU_READ_LOCK_GUARD();
2653    RAMBlock *rb = qemu_ram_block_by_name(rbname);
2654
2655    if (!rb) {
2656        error_report("ram_discard_range: Failed to find block '%s'", rbname);
2657        return -1;
2658    }
2659
2660    /*
2661     * On source VM, we don't need to update the received bitmap since
2662     * we don't even have one.
2663     */
2664    if (rb->receivedmap) {
2665        bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2666                     length >> qemu_target_page_bits());
2667    }
2668
2669    return ram_block_discard_range(rb, start, length);
2670}
2671
2672/*
2673 * For every allocation, we will try not to crash the VM if the
2674 * allocation failed.
2675 */
2676static bool xbzrle_init(Error **errp)
2677{
2678    if (!migrate_xbzrle()) {
2679        return true;
2680    }
2681
2682    XBZRLE_cache_lock();
2683
2684    XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2685    if (!XBZRLE.zero_target_page) {
2686        error_setg(errp, "%s: Error allocating zero page", __func__);
2687        goto err_out;
2688    }
2689
2690    XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2691                              TARGET_PAGE_SIZE, errp);
2692    if (!XBZRLE.cache) {
2693        goto free_zero_page;
2694    }
2695
2696    XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2697    if (!XBZRLE.encoded_buf) {
2698        error_setg(errp, "%s: Error allocating encoded_buf", __func__);
2699        goto free_cache;
2700    }
2701
2702    XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2703    if (!XBZRLE.current_buf) {
2704        error_setg(errp, "%s: Error allocating current_buf", __func__);
2705        goto free_encoded_buf;
2706    }
2707
2708    /* We are all good */
2709    XBZRLE_cache_unlock();
2710    return true;
2711
2712free_encoded_buf:
2713    g_free(XBZRLE.encoded_buf);
2714    XBZRLE.encoded_buf = NULL;
2715free_cache:
2716    cache_fini(XBZRLE.cache);
2717    XBZRLE.cache = NULL;
2718free_zero_page:
2719    g_free(XBZRLE.zero_target_page);
2720    XBZRLE.zero_target_page = NULL;
2721err_out:
2722    XBZRLE_cache_unlock();
2723    return false;
2724}
2725
2726static bool ram_state_init(RAMState **rsp, Error **errp)
2727{
2728    *rsp = g_try_new0(RAMState, 1);
2729
2730    if (!*rsp) {
2731        error_setg(errp, "%s: Init ramstate fail", __func__);
2732        return false;
2733    }
2734
2735    qemu_mutex_init(&(*rsp)->bitmap_mutex);
2736    qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2737    QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
2738    (*rsp)->ram_bytes_total = ram_bytes_total();
2739
2740    /*
2741     * Count the total number of pages used by ram blocks not including any
2742     * gaps due to alignment or unplugs.
2743     * This must match with the initial values of dirty bitmap.
2744     */
2745    (*rsp)->migration_dirty_pages = (*rsp)->ram_bytes_total >> TARGET_PAGE_BITS;
2746    ram_state_reset(*rsp);
2747
2748    return true;
2749}
2750
2751static void ram_list_init_bitmaps(void)
2752{
2753    MigrationState *ms = migrate_get_current();
2754    RAMBlock *block;
2755    unsigned long pages;
2756    uint8_t shift;
2757
2758    /* Skip setting bitmap if there is no RAM */
2759    if (ram_bytes_total()) {
2760        shift = ms->clear_bitmap_shift;
2761        if (shift > CLEAR_BITMAP_SHIFT_MAX) {
2762            error_report("clear_bitmap_shift (%u) too big, using "
2763                         "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
2764            shift = CLEAR_BITMAP_SHIFT_MAX;
2765        } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
2766            error_report("clear_bitmap_shift (%u) too small, using "
2767                         "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
2768            shift = CLEAR_BITMAP_SHIFT_MIN;
2769        }
2770
2771        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2772            pages = block->max_length >> TARGET_PAGE_BITS;
2773            /*
2774             * The initial dirty bitmap for migration must be set with all
2775             * ones to make sure we'll migrate every guest RAM page to
2776             * destination.
2777             * Here we set RAMBlock.bmap all to 1 because when rebegin a
2778             * new migration after a failed migration, ram_list.
2779             * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2780             * guest memory.
2781             */
2782            block->bmap = bitmap_new(pages);
2783            bitmap_set(block->bmap, 0, pages);
2784            if (migrate_mapped_ram()) {
2785                block->file_bmap = bitmap_new(pages);
2786            }
2787            block->clear_bmap_shift = shift;
2788            block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
2789        }
2790    }
2791}
2792
2793static void migration_bitmap_clear_discarded_pages(RAMState *rs)
2794{
2795    unsigned long pages;
2796    RAMBlock *rb;
2797
2798    RCU_READ_LOCK_GUARD();
2799
2800    RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
2801            pages = ramblock_dirty_bitmap_clear_discarded_pages(rb);
2802            rs->migration_dirty_pages -= pages;
2803    }
2804}
2805
2806static bool ram_init_bitmaps(RAMState *rs, Error **errp)
2807{
2808    bool ret = true;
2809
2810    qemu_mutex_lock_ramlist();
2811
2812    WITH_RCU_READ_LOCK_GUARD() {
2813        ram_list_init_bitmaps();
2814        /* We don't use dirty log with background snapshots */
2815        if (!migrate_background_snapshot()) {
2816            ret = memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION, errp);
2817            if (!ret) {
2818                goto out_unlock;
2819            }
2820            migration_bitmap_sync_precopy(false);
2821        }
2822    }
2823out_unlock:
2824    qemu_mutex_unlock_ramlist();
2825
2826    if (!ret) {
2827        ram_bitmaps_destroy();
2828        return false;
2829    }
2830
2831    /*
2832     * After an eventual first bitmap sync, fixup the initial bitmap
2833     * containing all 1s to exclude any discarded pages from migration.
2834     */
2835    migration_bitmap_clear_discarded_pages(rs);
2836    return true;
2837}
2838
2839static int ram_init_all(RAMState **rsp, Error **errp)
2840{
2841    if (!ram_state_init(rsp, errp)) {
2842        return -1;
2843    }
2844
2845    if (!xbzrle_init(errp)) {
2846        ram_state_cleanup(rsp);
2847        return -1;
2848    }
2849
2850    if (!ram_init_bitmaps(*rsp, errp)) {
2851        return -1;
2852    }
2853
2854    return 0;
2855}
2856
2857static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
2858{
2859    RAMBlock *block;
2860    uint64_t pages = 0;
2861
2862    /*
2863     * Postcopy is not using xbzrle/compression, so no need for that.
2864     * Also, since source are already halted, we don't need to care
2865     * about dirty page logging as well.
2866     */
2867
2868    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2869        pages += bitmap_count_one(block->bmap,
2870                                  block->used_length >> TARGET_PAGE_BITS);
2871    }
2872
2873    /* This may not be aligned with current bitmaps. Recalculate. */
2874    rs->migration_dirty_pages = pages;
2875
2876    ram_state_reset(rs);
2877
2878    /* Update RAMState cache of output QEMUFile */
2879    rs->pss[RAM_CHANNEL_PRECOPY].pss_channel = out;
2880
2881    trace_ram_state_resume_prepare(pages);
2882}
2883
2884/*
2885 * This function clears bits of the free pages reported by the caller from the
2886 * migration dirty bitmap. @addr is the host address corresponding to the
2887 * start of the continuous guest free pages, and @len is the total bytes of
2888 * those pages.
2889 */
2890void qemu_guest_free_page_hint(void *addr, size_t len)
2891{
2892    RAMBlock *block;
2893    ram_addr_t offset;
2894    size_t used_len, start, npages;
2895
2896    /* This function is currently expected to be used during live migration */
2897    if (!migration_is_running()) {
2898        return;
2899    }
2900
2901    for (; len > 0; len -= used_len, addr += used_len) {
2902        block = qemu_ram_block_from_host(addr, false, &offset);
2903        if (unlikely(!block || offset >= block->used_length)) {
2904            /*
2905             * The implementation might not support RAMBlock resize during
2906             * live migration, but it could happen in theory with future
2907             * updates. So we add a check here to capture that case.
2908             */
2909            error_report_once("%s unexpected error", __func__);
2910            return;
2911        }
2912
2913        if (len <= block->used_length - offset) {
2914            used_len = len;
2915        } else {
2916            used_len = block->used_length - offset;
2917        }
2918
2919        start = offset >> TARGET_PAGE_BITS;
2920        npages = used_len >> TARGET_PAGE_BITS;
2921
2922        qemu_mutex_lock(&ram_state->bitmap_mutex);
2923        /*
2924         * The skipped free pages are equavalent to be sent from clear_bmap's
2925         * perspective, so clear the bits from the memory region bitmap which
2926         * are initially set. Otherwise those skipped pages will be sent in
2927         * the next round after syncing from the memory region bitmap.
2928         */
2929        migration_clear_memory_region_dirty_bitmap_range(block, start, npages);
2930        ram_state->migration_dirty_pages -=
2931                      bitmap_count_one_with_offset(block->bmap, start, npages);
2932        bitmap_clear(block->bmap, start, npages);
2933        qemu_mutex_unlock(&ram_state->bitmap_mutex);
2934    }
2935}
2936
2937#define MAPPED_RAM_HDR_VERSION 1
2938struct MappedRamHeader {
2939    uint32_t version;
2940    /*
2941     * The target's page size, so we know how many pages are in the
2942     * bitmap.
2943     */
2944    uint64_t page_size;
2945    /*
2946     * The offset in the migration file where the pages bitmap is
2947     * stored.
2948     */
2949    uint64_t bitmap_offset;
2950    /*
2951     * The offset in the migration file where the actual pages (data)
2952     * are stored.
2953     */
2954    uint64_t pages_offset;
2955} QEMU_PACKED;
2956typedef struct MappedRamHeader MappedRamHeader;
2957
2958static void mapped_ram_setup_ramblock(QEMUFile *file, RAMBlock *block)
2959{
2960    g_autofree MappedRamHeader *header = NULL;
2961    size_t header_size, bitmap_size;
2962    long num_pages;
2963
2964    header = g_new0(MappedRamHeader, 1);
2965    header_size = sizeof(MappedRamHeader);
2966
2967    num_pages = block->used_length >> TARGET_PAGE_BITS;
2968    bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long);
2969
2970    /*
2971     * Save the file offsets of where the bitmap and the pages should
2972     * go as they are written at the end of migration and during the
2973     * iterative phase, respectively.
2974     */
2975    block->bitmap_offset = qemu_get_offset(file) + header_size;
2976    block->pages_offset = ROUND_UP(block->bitmap_offset +
2977                                   bitmap_size,
2978                                   MAPPED_RAM_FILE_OFFSET_ALIGNMENT);
2979
2980    header->version = cpu_to_be32(MAPPED_RAM_HDR_VERSION);
2981    header->page_size = cpu_to_be64(TARGET_PAGE_SIZE);
2982    header->bitmap_offset = cpu_to_be64(block->bitmap_offset);
2983    header->pages_offset = cpu_to_be64(block->pages_offset);
2984
2985    qemu_put_buffer(file, (uint8_t *) header, header_size);
2986
2987    /* prepare offset for next ramblock */
2988    qemu_set_offset(file, block->pages_offset + block->used_length, SEEK_SET);
2989}
2990
2991static bool mapped_ram_read_header(QEMUFile *file, MappedRamHeader *header,
2992                                   Error **errp)
2993{
2994    size_t ret, header_size = sizeof(MappedRamHeader);
2995
2996    ret = qemu_get_buffer(file, (uint8_t *)header, header_size);
2997    if (ret != header_size) {
2998        error_setg(errp, "Could not read whole mapped-ram migration header "
2999                   "(expected %zd, got %zd bytes)", header_size, ret);
3000        return false;
3001    }
3002
3003    /* migration stream is big-endian */
3004    header->version = be32_to_cpu(header->version);
3005
3006    if (header->version > MAPPED_RAM_HDR_VERSION) {
3007        error_setg(errp, "Migration mapped-ram capability version not "
3008                   "supported (expected <= %d, got %d)", MAPPED_RAM_HDR_VERSION,
3009                   header->version);
3010        return false;
3011    }
3012
3013    header->page_size = be64_to_cpu(header->page_size);
3014    header->bitmap_offset = be64_to_cpu(header->bitmap_offset);
3015    header->pages_offset = be64_to_cpu(header->pages_offset);
3016
3017    return true;
3018}
3019
3020/*
3021 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
3022 * long-running RCU critical section.  When rcu-reclaims in the code
3023 * start to become numerous it will be necessary to reduce the
3024 * granularity of these critical sections.
3025 */
3026
3027/**
3028 * ram_save_setup: Setup RAM for migration
3029 *
3030 * Returns zero to indicate success and negative for error
3031 *
3032 * @f: QEMUFile where to send the data
3033 * @opaque: RAMState pointer
3034 * @errp: pointer to Error*, to store an error if it happens.
3035 */
3036static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp)
3037{
3038    RAMState **rsp = opaque;
3039    RAMBlock *block;
3040    int ret, max_hg_page_size;
3041
3042    /* migration has already setup the bitmap, reuse it. */
3043    if (!migration_in_colo_state()) {
3044        if (ram_init_all(rsp, errp) != 0) {
3045            return -1;
3046        }
3047    }
3048    (*rsp)->pss[RAM_CHANNEL_PRECOPY].pss_channel = f;
3049
3050    /*
3051     * ??? Mirrors the previous value of qemu_host_page_size,
3052     * but is this really what was intended for the migration?
3053     */
3054    max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE);
3055
3056    WITH_RCU_READ_LOCK_GUARD() {
3057        qemu_put_be64(f, ram_bytes_total_with_ignored()
3058                         | RAM_SAVE_FLAG_MEM_SIZE);
3059
3060        RAMBLOCK_FOREACH_MIGRATABLE(block) {
3061            qemu_put_byte(f, strlen(block->idstr));
3062            qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3063            qemu_put_be64(f, block->used_length);
3064            if (migrate_postcopy_ram() &&
3065                block->page_size != max_hg_page_size) {
3066                qemu_put_be64(f, block->page_size);
3067            }
3068            if (migrate_ignore_shared()) {
3069                qemu_put_be64(f, block->mr->addr);
3070            }
3071
3072            if (migrate_mapped_ram()) {
3073                mapped_ram_setup_ramblock(f, block);
3074            }
3075        }
3076    }
3077
3078    ret = rdma_registration_start(f, RAM_CONTROL_SETUP);
3079    if (ret < 0) {
3080        error_setg(errp, "%s: failed to start RDMA registration", __func__);
3081        qemu_file_set_error(f, ret);
3082        return ret;
3083    }
3084
3085    ret = rdma_registration_stop(f, RAM_CONTROL_SETUP);
3086    if (ret < 0) {
3087        error_setg(errp, "%s: failed to stop RDMA registration", __func__);
3088        qemu_file_set_error(f, ret);
3089        return ret;
3090    }
3091
3092    if (migrate_multifd()) {
3093        multifd_ram_save_setup();
3094    }
3095
3096    /*
3097     * This operation is unfortunate..
3098     *
3099     * For legacy QEMUs using per-section sync
3100     * =======================================
3101     *
3102     * This must exist because the EOS below requires the SYNC messages
3103     * per-channel to work.
3104     *
3105     * For modern QEMUs using per-round sync
3106     * =====================================
3107     *
3108     * Logically such sync is not needed, and recv threads should not run
3109     * until setup ready (using things like channels_ready on src).  Then
3110     * we should be all fine.
3111     *
3112     * However even if we add channels_ready to recv side in new QEMUs, old
3113     * QEMU won't have them so this sync will still be needed to make sure
3114     * multifd recv threads won't start processing guest pages early before
3115     * ram_load_setup() is properly done.
3116     *
3117     * Let's stick with this.  Fortunately the overhead is low to sync
3118     * during setup because the VM is running, so at least it's not
3119     * accounted as part of downtime.
3120     */
3121    bql_unlock();
3122    ret = multifd_ram_flush_and_sync(f);
3123    bql_lock();
3124    if (ret < 0) {
3125        error_setg(errp, "%s: multifd synchronization failed", __func__);
3126        return ret;
3127    }
3128
3129    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3130    ret = qemu_fflush(f);
3131    if (ret < 0) {
3132        error_setg_errno(errp, -ret, "%s failed", __func__);
3133    }
3134    return ret;
3135}
3136
3137static void ram_save_file_bmap(QEMUFile *f)
3138{
3139    RAMBlock *block;
3140
3141    RAMBLOCK_FOREACH_MIGRATABLE(block) {
3142        long num_pages = block->used_length >> TARGET_PAGE_BITS;
3143        long bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long);
3144
3145        qemu_put_buffer_at(f, (uint8_t *)block->file_bmap, bitmap_size,
3146                           block->bitmap_offset);
3147        ram_transferred_add(bitmap_size);
3148
3149        /*
3150         * Free the bitmap here to catch any synchronization issues
3151         * with multifd channels. No channels should be sending pages
3152         * after we've written the bitmap to file.
3153         */
3154        g_free(block->file_bmap);
3155        block->file_bmap = NULL;
3156    }
3157}
3158
3159void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset, bool set)
3160{
3161    if (set) {
3162        set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap);
3163    } else {
3164        clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap);
3165    }
3166}
3167
3168/**
3169 * ram_save_iterate: iterative stage for migration
3170 *
3171 * Returns zero to indicate success and negative for error
3172 *
3173 * @f: QEMUFile where to send the data
3174 * @opaque: RAMState pointer
3175 */
3176static int ram_save_iterate(QEMUFile *f, void *opaque)
3177{
3178    RAMState **temp = opaque;
3179    RAMState *rs = *temp;
3180    int ret = 0;
3181    int i;
3182    int64_t t0;
3183    int done = 0;
3184
3185    /*
3186     * We'll take this lock a little bit long, but it's okay for two reasons.
3187     * Firstly, the only possible other thread to take it is who calls
3188     * qemu_guest_free_page_hint(), which should be rare; secondly, see
3189     * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3190     * guarantees that we'll at least released it in a regular basis.
3191     */
3192    WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) {
3193        WITH_RCU_READ_LOCK_GUARD() {
3194            if (ram_list.version != rs->last_version) {
3195                ram_state_reset(rs);
3196            }
3197
3198            /* Read version before ram_list.blocks */
3199            smp_rmb();
3200
3201            ret = rdma_registration_start(f, RAM_CONTROL_ROUND);
3202            if (ret < 0) {
3203                qemu_file_set_error(f, ret);
3204                goto out;
3205            }
3206
3207            t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3208            i = 0;
3209            while ((ret = migration_rate_exceeded(f)) == 0 ||
3210                   postcopy_has_request(rs)) {
3211                int pages;
3212
3213                if (qemu_file_get_error(f)) {
3214                    break;
3215                }
3216
3217                pages = ram_find_and_save_block(rs);
3218                /* no more pages to sent */
3219                if (pages == 0) {
3220                    done = 1;
3221                    break;
3222                }
3223
3224                if (pages < 0) {
3225                    qemu_file_set_error(f, pages);
3226                    break;
3227                }
3228
3229                rs->target_page_count += pages;
3230
3231                /*
3232                 * we want to check in the 1st loop, just in case it was the 1st
3233                 * time and we had to sync the dirty bitmap.
3234                 * qemu_clock_get_ns() is a bit expensive, so we only check each
3235                 * some iterations
3236                 */
3237                if ((i & 63) == 0) {
3238                    uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3239                        1000000;
3240                    if (t1 > MAX_WAIT) {
3241                        trace_ram_save_iterate_big_wait(t1, i);
3242                        break;
3243                    }
3244                }
3245                i++;
3246            }
3247        }
3248    }
3249
3250    /*
3251     * Must occur before EOS (or any QEMUFile operation)
3252     * because of RDMA protocol.
3253     */
3254    ret = rdma_registration_stop(f, RAM_CONTROL_ROUND);
3255    if (ret < 0) {
3256        qemu_file_set_error(f, ret);
3257    }
3258
3259out:
3260    if (ret >= 0 && migration_is_running()) {
3261        if (multifd_ram_sync_per_section()) {
3262            ret = multifd_ram_flush_and_sync(f);
3263            if (ret < 0) {
3264                return ret;
3265            }
3266        }
3267
3268        qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3269        ram_transferred_add(8);
3270        ret = qemu_fflush(f);
3271    }
3272    if (ret < 0) {
3273        return ret;
3274    }
3275
3276    return done;
3277}
3278
3279/**
3280 * ram_save_complete: function called to send the remaining amount of ram
3281 *
3282 * Returns zero to indicate success or negative on error
3283 *
3284 * Called with the BQL
3285 *
3286 * @f: QEMUFile where to send the data
3287 * @opaque: RAMState pointer
3288 */
3289static int ram_save_complete(QEMUFile *f, void *opaque)
3290{
3291    RAMState **temp = opaque;
3292    RAMState *rs = *temp;
3293    int ret = 0;
3294
3295    trace_ram_save_complete(rs->migration_dirty_pages, 0);
3296
3297    rs->last_stage = !migration_in_colo_state();
3298
3299    WITH_RCU_READ_LOCK_GUARD() {
3300        if (!migration_in_postcopy()) {
3301            migration_bitmap_sync_precopy(true);
3302        }
3303
3304        ret = rdma_registration_start(f, RAM_CONTROL_FINISH);
3305        if (ret < 0) {
3306            qemu_file_set_error(f, ret);
3307            return ret;
3308        }
3309
3310        /* try transferring iterative blocks of memory */
3311
3312        /* flush all remaining blocks regardless of rate limiting */
3313        qemu_mutex_lock(&rs->bitmap_mutex);
3314        while (true) {
3315            int pages;
3316
3317            pages = ram_find_and_save_block(rs);
3318            /* no more blocks to sent */
3319            if (pages == 0) {
3320                break;
3321            }
3322            if (pages < 0) {
3323                qemu_mutex_unlock(&rs->bitmap_mutex);
3324                return pages;
3325            }
3326        }
3327        qemu_mutex_unlock(&rs->bitmap_mutex);
3328
3329        ret = rdma_registration_stop(f, RAM_CONTROL_FINISH);
3330        if (ret < 0) {
3331            qemu_file_set_error(f, ret);
3332            return ret;
3333        }
3334    }
3335
3336    if (multifd_ram_sync_per_section()) {
3337        /*
3338         * Only the old dest QEMU will need this sync, because each EOS
3339         * will require one SYNC message on each channel.
3340         */
3341        ret = multifd_ram_flush_and_sync(f);
3342        if (ret < 0) {
3343            return ret;
3344        }
3345    }
3346
3347    if (migrate_mapped_ram()) {
3348        ram_save_file_bmap(f);
3349
3350        if (qemu_file_get_error(f)) {
3351            Error *local_err = NULL;
3352            int err = qemu_file_get_error_obj(f, &local_err);
3353
3354            error_reportf_err(local_err, "Failed to write bitmap to file: ");
3355            return -err;
3356        }
3357    }
3358
3359    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
3360
3361    trace_ram_save_complete(rs->migration_dirty_pages, 1);
3362
3363    return qemu_fflush(f);
3364}
3365
3366static void ram_state_pending_estimate(void *opaque, uint64_t *must_precopy,
3367                                       uint64_t *can_postcopy)
3368{
3369    RAMState **temp = opaque;
3370    RAMState *rs = *temp;
3371
3372    uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3373
3374    if (migrate_postcopy_ram()) {
3375        /* We can do postcopy, and all the data is postcopiable */
3376        *can_postcopy += remaining_size;
3377    } else {
3378        *must_precopy += remaining_size;
3379    }
3380}
3381
3382static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
3383                                    uint64_t *can_postcopy)
3384{
3385    RAMState **temp = opaque;
3386    RAMState *rs = *temp;
3387    uint64_t remaining_size;
3388
3389    if (!migration_in_postcopy()) {
3390        bql_lock();
3391        WITH_RCU_READ_LOCK_GUARD() {
3392            migration_bitmap_sync_precopy(false);
3393        }
3394        bql_unlock();
3395    }
3396
3397    remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
3398
3399    if (migrate_postcopy_ram()) {
3400        /* We can do postcopy, and all the data is postcopiable */
3401        *can_postcopy += remaining_size;
3402    } else {
3403        *must_precopy += remaining_size;
3404    }
3405}
3406
3407static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3408{
3409    unsigned int xh_len;
3410    int xh_flags;
3411    uint8_t *loaded_data;
3412
3413    /* extract RLE header */
3414    xh_flags = qemu_get_byte(f);
3415    xh_len = qemu_get_be16(f);
3416
3417    if (xh_flags != ENCODING_FLAG_XBZRLE) {
3418        error_report("Failed to load XBZRLE page - wrong compression!");
3419        return -1;
3420    }
3421
3422    if (xh_len > TARGET_PAGE_SIZE) {
3423        error_report("Failed to load XBZRLE page - len overflow!");
3424        return -1;
3425    }
3426    loaded_data = XBZRLE.decoded_buf;
3427    /* load data and decode */
3428    /* it can change loaded_data to point to an internal buffer */
3429    qemu_get_buffer_in_place(f, &loaded_data, xh_len);
3430
3431    /* decode RLE */
3432    if (xbzrle_decode_buffer(loaded_data, xh_len, host,
3433                             TARGET_PAGE_SIZE) == -1) {
3434        error_report("Failed to load XBZRLE page - decode error!");
3435        return -1;
3436    }
3437
3438    return 0;
3439}
3440
3441/**
3442 * ram_block_from_stream: read a RAMBlock id from the migration stream
3443 *
3444 * Must be called from within a rcu critical section.
3445 *
3446 * Returns a pointer from within the RCU-protected ram_list.
3447 *
3448 * @mis: the migration incoming state pointer
3449 * @f: QEMUFile where to read the data from
3450 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3451 * @channel: the channel we're using
3452 */
3453static inline RAMBlock *ram_block_from_stream(MigrationIncomingState *mis,
3454                                              QEMUFile *f, int flags,
3455                                              int channel)
3456{
3457    RAMBlock *block = mis->last_recv_block[channel];
3458    char id[256];
3459    uint8_t len;
3460
3461    if (flags & RAM_SAVE_FLAG_CONTINUE) {
3462        if (!block) {
3463            error_report("Ack, bad migration stream!");
3464            return NULL;
3465        }
3466        return block;
3467    }
3468
3469    len = qemu_get_byte(f);
3470    qemu_get_buffer(f, (uint8_t *)id, len);
3471    id[len] = 0;
3472
3473    block = qemu_ram_block_by_name(id);
3474    if (!block) {
3475        error_report("Can't find block %s", id);
3476        return NULL;
3477    }
3478
3479    if (migrate_ram_is_ignored(block)) {
3480        error_report("block %s should not be migrated !", id);
3481        return NULL;
3482    }
3483
3484    mis->last_recv_block[channel] = block;
3485
3486    return block;
3487}
3488
3489static inline void *host_from_ram_block_offset(RAMBlock *block,
3490                                               ram_addr_t offset)
3491{
3492    if (!offset_in_ramblock(block, offset)) {
3493        return NULL;
3494    }
3495
3496    return block->host + offset;
3497}
3498
3499static void *host_page_from_ram_block_offset(RAMBlock *block,
3500                                             ram_addr_t offset)
3501{
3502    /* Note: Explicitly no check against offset_in_ramblock(). */
3503    return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset),
3504                                   block->page_size);
3505}
3506
3507static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block,
3508                                                         ram_addr_t offset)
3509{
3510    return ((uintptr_t)block->host + offset) & (block->page_size - 1);
3511}
3512
3513void colo_record_bitmap(RAMBlock *block, ram_addr_t *normal, uint32_t pages)
3514{
3515    qemu_mutex_lock(&ram_state->bitmap_mutex);
3516    for (int i = 0; i < pages; i++) {
3517        ram_addr_t offset = normal[i];
3518        ram_state->migration_dirty_pages += !test_and_set_bit(
3519                                                offset >> TARGET_PAGE_BITS,
3520                                                block->bmap);
3521    }
3522    qemu_mutex_unlock(&ram_state->bitmap_mutex);
3523}
3524
3525static inline void *colo_cache_from_block_offset(RAMBlock *block,
3526                             ram_addr_t offset, bool record_bitmap)
3527{
3528    if (!offset_in_ramblock(block, offset)) {
3529        return NULL;
3530    }
3531    if (!block->colo_cache) {
3532        error_report("%s: colo_cache is NULL in block :%s",
3533                     __func__, block->idstr);
3534        return NULL;
3535    }
3536
3537    /*
3538    * During colo checkpoint, we need bitmap of these migrated pages.
3539    * It help us to decide which pages in ram cache should be flushed
3540    * into VM's RAM later.
3541    */
3542    if (record_bitmap) {
3543        colo_record_bitmap(block, &offset, 1);
3544    }
3545    return block->colo_cache + offset;
3546}
3547
3548/**
3549 * ram_handle_zero: handle the zero page case
3550 *
3551 * If a page (or a whole RDMA chunk) has been
3552 * determined to be zero, then zap it.
3553 *
3554 * @host: host address for the zero page
3555 * @ch: what the page is filled from.  We only support zero
3556 * @size: size of the zero page
3557 */
3558void ram_handle_zero(void *host, uint64_t size)
3559{
3560    if (!buffer_is_zero(host, size)) {
3561        memset(host, 0, size);
3562    }
3563}
3564
3565static void colo_init_ram_state(void)
3566{
3567    Error *local_err = NULL;
3568
3569    if (!ram_state_init(&ram_state, &local_err)) {
3570        error_report_err(local_err);
3571    }
3572}
3573
3574/*
3575 * colo cache: this is for secondary VM, we cache the whole
3576 * memory of the secondary VM, it is need to hold the global lock
3577 * to call this helper.
3578 */
3579int colo_init_ram_cache(void)
3580{
3581    RAMBlock *block;
3582
3583    WITH_RCU_READ_LOCK_GUARD() {
3584        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3585            block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3586                                                    NULL, false, false);
3587            if (!block->colo_cache) {
3588                error_report("%s: Can't alloc memory for COLO cache of block %s,"
3589                             "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3590                             block->used_length);
3591                RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3592                    if (block->colo_cache) {
3593                        qemu_anon_ram_free(block->colo_cache, block->used_length);
3594                        block->colo_cache = NULL;
3595                    }
3596                }
3597                return -errno;
3598            }
3599            if (!machine_dump_guest_core(current_machine)) {
3600                qemu_madvise(block->colo_cache, block->used_length,
3601                             QEMU_MADV_DONTDUMP);
3602            }
3603        }
3604    }
3605
3606    /*
3607    * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3608    * with to decide which page in cache should be flushed into SVM's RAM. Here
3609    * we use the same name 'ram_bitmap' as for migration.
3610    */
3611    if (ram_bytes_total()) {
3612        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3613            unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3614            block->bmap = bitmap_new(pages);
3615        }
3616    }
3617
3618    colo_init_ram_state();
3619    return 0;
3620}
3621
3622/* TODO: duplicated with ram_init_bitmaps */
3623void colo_incoming_start_dirty_log(void)
3624{
3625    RAMBlock *block = NULL;
3626    Error *local_err = NULL;
3627
3628    /* For memory_global_dirty_log_start below. */
3629    bql_lock();
3630    qemu_mutex_lock_ramlist();
3631
3632    memory_global_dirty_log_sync(false);
3633    WITH_RCU_READ_LOCK_GUARD() {
3634        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3635            ramblock_sync_dirty_bitmap(ram_state, block);
3636            /* Discard this dirty bitmap record */
3637            bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
3638        }
3639        if (!memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION,
3640                                           &local_err)) {
3641            error_report_err(local_err);
3642        }
3643    }
3644    ram_state->migration_dirty_pages = 0;
3645    qemu_mutex_unlock_ramlist();
3646    bql_unlock();
3647}
3648
3649/* It is need to hold the global lock to call this helper */
3650void colo_release_ram_cache(void)
3651{
3652    RAMBlock *block;
3653
3654    memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
3655    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3656        g_free(block->bmap);
3657        block->bmap = NULL;
3658    }
3659
3660    WITH_RCU_READ_LOCK_GUARD() {
3661        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3662            if (block->colo_cache) {
3663                qemu_anon_ram_free(block->colo_cache, block->used_length);
3664                block->colo_cache = NULL;
3665            }
3666        }
3667    }
3668    ram_state_cleanup(&ram_state);
3669}
3670
3671/**
3672 * ram_load_setup: Setup RAM for migration incoming side
3673 *
3674 * Returns zero to indicate success and negative for error
3675 *
3676 * @f: QEMUFile where to receive the data
3677 * @opaque: RAMState pointer
3678 * @errp: pointer to Error*, to store an error if it happens.
3679 */
3680static int ram_load_setup(QEMUFile *f, void *opaque, Error **errp)
3681{
3682    xbzrle_load_setup();
3683    ramblock_recv_map_init();
3684
3685    return 0;
3686}
3687
3688static int ram_load_cleanup(void *opaque)
3689{
3690    RAMBlock *rb;
3691
3692    RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3693        if (memory_region_is_nonvolatile(rb->mr)) {
3694            qemu_ram_block_writeback(rb);
3695        }
3696    }
3697
3698    xbzrle_load_cleanup();
3699
3700    RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
3701        g_free(rb->receivedmap);
3702        rb->receivedmap = NULL;
3703    }
3704
3705    return 0;
3706}
3707
3708/**
3709 * ram_postcopy_incoming_init: allocate postcopy data structures
3710 *
3711 * Returns 0 for success and negative if there was one error
3712 *
3713 * @mis: current migration incoming state
3714 *
3715 * Allocate data structures etc needed by incoming migration with
3716 * postcopy-ram. postcopy-ram's similarly names
3717 * postcopy_ram_incoming_init does the work.
3718 */
3719int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3720{
3721    return postcopy_ram_incoming_init(mis);
3722}
3723
3724/**
3725 * ram_load_postcopy: load a page in postcopy case
3726 *
3727 * Returns 0 for success or -errno in case of error
3728 *
3729 * Called in postcopy mode by ram_load().
3730 * rcu_read_lock is taken prior to this being called.
3731 *
3732 * @f: QEMUFile where to send the data
3733 * @channel: the channel to use for loading
3734 */
3735int ram_load_postcopy(QEMUFile *f, int channel)
3736{
3737    int flags = 0, ret = 0;
3738    bool place_needed = false;
3739    bool matches_target_page_size = false;
3740    MigrationIncomingState *mis = migration_incoming_get_current();
3741    PostcopyTmpPage *tmp_page = &mis->postcopy_tmp_pages[channel];
3742
3743    while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3744        ram_addr_t addr;
3745        void *page_buffer = NULL;
3746        void *place_source = NULL;
3747        RAMBlock *block = NULL;
3748        uint8_t ch;
3749
3750        addr = qemu_get_be64(f);
3751
3752        /*
3753         * If qemu file error, we should stop here, and then "addr"
3754         * may be invalid
3755         */
3756        ret = qemu_file_get_error(f);
3757        if (ret) {
3758            break;
3759        }
3760
3761        flags = addr & ~TARGET_PAGE_MASK;
3762        addr &= TARGET_PAGE_MASK;
3763
3764        trace_ram_load_postcopy_loop(channel, (uint64_t)addr, flags);
3765        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
3766            block = ram_block_from_stream(mis, f, flags, channel);
3767            if (!block) {
3768                ret = -EINVAL;
3769                break;
3770            }
3771
3772            /*
3773             * Relying on used_length is racy and can result in false positives.
3774             * We might place pages beyond used_length in case RAM was shrunk
3775             * while in postcopy, which is fine - trying to place via
3776             * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3777             */
3778            if (!block->host || addr >= block->postcopy_length) {
3779                error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3780                ret = -EINVAL;
3781                break;
3782            }
3783            tmp_page->target_pages++;
3784            matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
3785            /*
3786             * Postcopy requires that we place whole host pages atomically;
3787             * these may be huge pages for RAMBlocks that are backed by
3788             * hugetlbfs.
3789             * To make it atomic, the data is read into a temporary page
3790             * that's moved into place later.
3791             * The migration protocol uses,  possibly smaller, target-pages
3792             * however the source ensures it always sends all the components
3793             * of a host page in one chunk.
3794             */
3795            page_buffer = tmp_page->tmp_huge_page +
3796                          host_page_offset_from_ram_block_offset(block, addr);
3797            /* If all TP are zero then we can optimise the place */
3798            if (tmp_page->target_pages == 1) {
3799                tmp_page->host_addr =
3800                    host_page_from_ram_block_offset(block, addr);
3801            } else if (tmp_page->host_addr !=
3802                       host_page_from_ram_block_offset(block, addr)) {
3803                /* not the 1st TP within the HP */
3804                error_report("Non-same host page detected on channel %d: "
3805                             "Target host page %p, received host page %p "
3806                             "(rb %s offset 0x"RAM_ADDR_FMT" target_pages %d)",
3807                             channel, tmp_page->host_addr,
3808                             host_page_from_ram_block_offset(block, addr),
3809                             block->idstr, addr, tmp_page->target_pages);
3810                ret = -EINVAL;
3811                break;
3812            }
3813
3814            /*
3815             * If it's the last part of a host page then we place the host
3816             * page
3817             */
3818            if (tmp_page->target_pages ==
3819                (block->page_size / TARGET_PAGE_SIZE)) {
3820                place_needed = true;
3821            }
3822            place_source = tmp_page->tmp_huge_page;
3823        }
3824
3825        switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3826        case RAM_SAVE_FLAG_ZERO:
3827            ch = qemu_get_byte(f);
3828            if (ch != 0) {
3829                error_report("Found a zero page with value %d", ch);
3830                ret = -EINVAL;
3831                break;
3832            }
3833            /*
3834             * Can skip to set page_buffer when
3835             * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3836             */
3837            if (!matches_target_page_size) {
3838                memset(page_buffer, ch, TARGET_PAGE_SIZE);
3839            }
3840            break;
3841
3842        case RAM_SAVE_FLAG_PAGE:
3843            tmp_page->all_zero = false;
3844            if (!matches_target_page_size) {
3845                /* For huge pages, we always use temporary buffer */
3846                qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3847            } else {
3848                /*
3849                 * For small pages that matches target page size, we
3850                 * avoid the qemu_file copy.  Instead we directly use
3851                 * the buffer of QEMUFile to place the page.  Note: we
3852                 * cannot do any QEMUFile operation before using that
3853                 * buffer to make sure the buffer is valid when
3854                 * placing the page.
3855                 */
3856                qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3857                                         TARGET_PAGE_SIZE);
3858            }
3859            break;
3860        case RAM_SAVE_FLAG_EOS:
3861            break;
3862        default:
3863            error_report("Unknown combination of migration flags: 0x%x"
3864                         " (postcopy mode)", flags);
3865            ret = -EINVAL;
3866            break;
3867        }
3868
3869        /* Detect for any possible file errors */
3870        if (!ret && qemu_file_get_error(f)) {
3871            ret = qemu_file_get_error(f);
3872        }
3873
3874        if (!ret && place_needed) {
3875            if (tmp_page->all_zero) {
3876                ret = postcopy_place_page_zero(mis, tmp_page->host_addr, block);
3877            } else {
3878                ret = postcopy_place_page(mis, tmp_page->host_addr,
3879                                          place_source, block);
3880            }
3881            place_needed = false;
3882            postcopy_temp_page_reset(tmp_page);
3883        }
3884    }
3885
3886    return ret;
3887}
3888
3889static bool postcopy_is_running(void)
3890{
3891    PostcopyState ps = postcopy_state_get();
3892    return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3893}
3894
3895/*
3896 * Flush content of RAM cache into SVM's memory.
3897 * Only flush the pages that be dirtied by PVM or SVM or both.
3898 */
3899void colo_flush_ram_cache(void)
3900{
3901    RAMBlock *block = NULL;
3902    void *dst_host;
3903    void *src_host;
3904    unsigned long offset = 0;
3905
3906    memory_global_dirty_log_sync(false);
3907    qemu_mutex_lock(&ram_state->bitmap_mutex);
3908    WITH_RCU_READ_LOCK_GUARD() {
3909        RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3910            ramblock_sync_dirty_bitmap(ram_state, block);
3911        }
3912    }
3913
3914    trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
3915    WITH_RCU_READ_LOCK_GUARD() {
3916        block = QLIST_FIRST_RCU(&ram_list.blocks);
3917
3918        while (block) {
3919            unsigned long num = 0;
3920
3921            offset = colo_bitmap_find_dirty(ram_state, block, offset, &num);
3922            if (!offset_in_ramblock(block,
3923                                    ((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
3924                offset = 0;
3925                num = 0;
3926                block = QLIST_NEXT_RCU(block, next);
3927            } else {
3928                unsigned long i = 0;
3929
3930                for (i = 0; i < num; i++) {
3931                    migration_bitmap_clear_dirty(ram_state, block, offset + i);
3932                }
3933                dst_host = block->host
3934                         + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3935                src_host = block->colo_cache
3936                         + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3937                memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num);
3938                offset += num;
3939            }
3940        }
3941    }
3942    qemu_mutex_unlock(&ram_state->bitmap_mutex);
3943    trace_colo_flush_ram_cache_end();
3944}
3945
3946static size_t ram_load_multifd_pages(void *host_addr, size_t size,
3947                                     uint64_t offset)
3948{
3949    MultiFDRecvData *data = multifd_get_recv_data();
3950
3951    data->opaque = host_addr;
3952    data->file_offset = offset;
3953    data->size = size;
3954
3955    if (!multifd_recv()) {
3956        return 0;
3957    }
3958
3959    return size;
3960}
3961
3962static bool read_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
3963                                     long num_pages, unsigned long *bitmap,
3964                                     Error **errp)
3965{
3966    ERRP_GUARD();
3967    unsigned long set_bit_idx, clear_bit_idx;
3968    ram_addr_t offset;
3969    void *host;
3970    size_t read, unread, size;
3971
3972    for (set_bit_idx = find_first_bit(bitmap, num_pages);
3973         set_bit_idx < num_pages;
3974         set_bit_idx = find_next_bit(bitmap, num_pages, clear_bit_idx + 1)) {
3975
3976        clear_bit_idx = find_next_zero_bit(bitmap, num_pages, set_bit_idx + 1);
3977
3978        unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx);
3979        offset = set_bit_idx << TARGET_PAGE_BITS;
3980
3981        while (unread > 0) {
3982            host = host_from_ram_block_offset(block, offset);
3983            if (!host) {
3984                error_setg(errp, "page outside of ramblock %s range",
3985                           block->idstr);
3986                return false;
3987            }
3988
3989            size = MIN(unread, MAPPED_RAM_LOAD_BUF_SIZE);
3990
3991            if (migrate_multifd()) {
3992                read = ram_load_multifd_pages(host, size,
3993                                              block->pages_offset + offset);
3994            } else {
3995                read = qemu_get_buffer_at(f, host, size,
3996                                          block->pages_offset + offset);
3997            }
3998
3999            if (!read) {
4000                goto err;
4001            }
4002            offset += read;
4003            unread -= read;
4004        }
4005    }
4006
4007    return true;
4008
4009err:
4010    qemu_file_get_error_obj(f, errp);
4011    error_prepend(errp, "(%s) failed to read page " RAM_ADDR_FMT
4012                  "from file offset %" PRIx64 ": ", block->idstr, offset,
4013                  block->pages_offset + offset);
4014    return false;
4015}
4016
4017static void parse_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
4018                                      ram_addr_t length, Error **errp)
4019{
4020    g_autofree unsigned long *bitmap = NULL;
4021    MappedRamHeader header;
4022    size_t bitmap_size;
4023    long num_pages;
4024
4025    if (!mapped_ram_read_header(f, &header, errp)) {
4026        return;
4027    }
4028
4029    block->pages_offset = header.pages_offset;
4030
4031    /*
4032     * Check the alignment of the file region that contains pages. We
4033     * don't enforce MAPPED_RAM_FILE_OFFSET_ALIGNMENT to allow that
4034     * value to change in the future. Do only a sanity check with page
4035     * size alignment.
4036     */
4037    if (!QEMU_IS_ALIGNED(block->pages_offset, TARGET_PAGE_SIZE)) {
4038        error_setg(errp,
4039                   "Error reading ramblock %s pages, region has bad alignment",
4040                   block->idstr);
4041        return;
4042    }
4043
4044    num_pages = length / header.page_size;
4045    bitmap_size = BITS_TO_LONGS(num_pages) * sizeof(unsigned long);
4046
4047    bitmap = g_malloc0(bitmap_size);
4048    if (qemu_get_buffer_at(f, (uint8_t *)bitmap, bitmap_size,
4049                           header.bitmap_offset) != bitmap_size) {
4050        error_setg(errp, "Error reading dirty bitmap");
4051        return;
4052    }
4053
4054    if (!read_ramblock_mapped_ram(f, block, num_pages, bitmap, errp)) {
4055        return;
4056    }
4057
4058    /* Skip pages array */
4059    qemu_set_offset(f, block->pages_offset + length, SEEK_SET);
4060}
4061
4062static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
4063{
4064    int ret = 0;
4065    /* ADVISE is earlier, it shows the source has the postcopy capability on */
4066    bool postcopy_advised = migration_incoming_postcopy_advised();
4067    int max_hg_page_size;
4068    Error *local_err = NULL;
4069
4070    assert(block);
4071
4072    if (migrate_mapped_ram()) {
4073        parse_ramblock_mapped_ram(f, block, length, &local_err);
4074        if (local_err) {
4075            error_report_err(local_err);
4076            return -EINVAL;
4077        }
4078        return 0;
4079    }
4080
4081    if (!qemu_ram_is_migratable(block)) {
4082        error_report("block %s should not be migrated !", block->idstr);
4083        return -EINVAL;
4084    }
4085
4086    if (length != block->used_length) {
4087        ret = qemu_ram_resize(block, length, &local_err);
4088        if (local_err) {
4089            error_report_err(local_err);
4090            return ret;
4091        }
4092    }
4093
4094    /*
4095     * ??? Mirrors the previous value of qemu_host_page_size,
4096     * but is this really what was intended for the migration?
4097     */
4098    max_hg_page_size = MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE);
4099
4100    /* For postcopy we need to check hugepage sizes match */
4101    if (postcopy_advised && migrate_postcopy_ram() &&
4102        block->page_size != max_hg_page_size) {
4103        uint64_t remote_page_size = qemu_get_be64(f);
4104        if (remote_page_size != block->page_size) {
4105            error_report("Mismatched RAM page size %s "
4106                         "(local) %zd != %" PRId64, block->idstr,
4107                         block->page_size, remote_page_size);
4108            return -EINVAL;
4109        }
4110    }
4111    if (migrate_ignore_shared()) {
4112        hwaddr addr = qemu_get_be64(f);
4113        if (migrate_ram_is_ignored(block) &&
4114            block->mr->addr != addr) {
4115            error_report("Mismatched GPAs for block %s "
4116                         "%" PRId64 "!= %" PRId64, block->idstr,
4117                         (uint64_t)addr, (uint64_t)block->mr->addr);
4118            return -EINVAL;
4119        }
4120    }
4121    ret = rdma_block_notification_handle(f, block->idstr);
4122    if (ret < 0) {
4123        qemu_file_set_error(f, ret);
4124    }
4125
4126    return ret;
4127}
4128
4129static int parse_ramblocks(QEMUFile *f, ram_addr_t total_ram_bytes)
4130{
4131    int ret = 0;
4132
4133    /* Synchronize RAM block list */
4134    while (!ret && total_ram_bytes) {
4135        RAMBlock *block;
4136        char id[256];
4137        ram_addr_t length;
4138        int len = qemu_get_byte(f);
4139
4140        qemu_get_buffer(f, (uint8_t *)id, len);
4141        id[len] = 0;
4142        length = qemu_get_be64(f);
4143
4144        block = qemu_ram_block_by_name(id);
4145        if (block) {
4146            ret = parse_ramblock(f, block, length);
4147        } else {
4148            error_report("Unknown ramblock \"%s\", cannot accept "
4149                         "migration", id);
4150            ret = -EINVAL;
4151        }
4152        total_ram_bytes -= length;
4153    }
4154
4155    return ret;
4156}
4157
4158/**
4159 * ram_load_precopy: load pages in precopy case
4160 *
4161 * Returns 0 for success or -errno in case of error
4162 *
4163 * Called in precopy mode by ram_load().
4164 * rcu_read_lock is taken prior to this being called.
4165 *
4166 * @f: QEMUFile where to send the data
4167 */
4168static int ram_load_precopy(QEMUFile *f)
4169{
4170    MigrationIncomingState *mis = migration_incoming_get_current();
4171    int flags = 0, ret = 0, invalid_flags = 0, i = 0;
4172
4173    if (migrate_mapped_ram()) {
4174        invalid_flags |= (RAM_SAVE_FLAG_HOOK | RAM_SAVE_FLAG_MULTIFD_FLUSH |
4175                          RAM_SAVE_FLAG_PAGE | RAM_SAVE_FLAG_XBZRLE |
4176                          RAM_SAVE_FLAG_ZERO);
4177    }
4178
4179    while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4180        ram_addr_t addr;
4181        void *host = NULL, *host_bak = NULL;
4182        uint8_t ch;
4183
4184        /*
4185         * Yield periodically to let main loop run, but an iteration of
4186         * the main loop is expensive, so do it each some iterations
4187         */
4188        if ((i & 32767) == 0 && qemu_in_coroutine()) {
4189            aio_co_schedule(qemu_get_current_aio_context(),
4190                            qemu_coroutine_self());
4191            qemu_coroutine_yield();
4192        }
4193        i++;
4194
4195        addr = qemu_get_be64(f);
4196        ret = qemu_file_get_error(f);
4197        if (ret) {
4198            error_report("Getting RAM address failed");
4199            break;
4200        }
4201
4202        flags = addr & ~TARGET_PAGE_MASK;
4203        addr &= TARGET_PAGE_MASK;
4204
4205        if (flags & invalid_flags) {
4206            error_report("Unexpected RAM flags: %d", flags & invalid_flags);
4207
4208            ret = -EINVAL;
4209            break;
4210        }
4211
4212        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4213                     RAM_SAVE_FLAG_XBZRLE)) {
4214            RAMBlock *block = ram_block_from_stream(mis, f, flags,
4215                                                    RAM_CHANNEL_PRECOPY);
4216
4217            host = host_from_ram_block_offset(block, addr);
4218            /*
4219             * After going into COLO stage, we should not load the page
4220             * into SVM's memory directly, we put them into colo_cache firstly.
4221             * NOTE: We need to keep a copy of SVM's ram in colo_cache.
4222             * Previously, we copied all these memory in preparing stage of COLO
4223             * while we need to stop VM, which is a time-consuming process.
4224             * Here we optimize it by a trick, back-up every page while in
4225             * migration process while COLO is enabled, though it affects the
4226             * speed of the migration, but it obviously reduce the downtime of
4227             * back-up all SVM'S memory in COLO preparing stage.
4228             */
4229            if (migration_incoming_colo_enabled()) {
4230                if (migration_incoming_in_colo_state()) {
4231                    /* In COLO stage, put all pages into cache temporarily */
4232                    host = colo_cache_from_block_offset(block, addr, true);
4233                } else {
4234                   /*
4235                    * In migration stage but before COLO stage,
4236                    * Put all pages into both cache and SVM's memory.
4237                    */
4238                    host_bak = colo_cache_from_block_offset(block, addr, false);
4239                }
4240            }
4241            if (!host) {
4242                error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4243                ret = -EINVAL;
4244                break;
4245            }
4246            if (!migration_incoming_in_colo_state()) {
4247                ramblock_recv_bitmap_set(block, host);
4248            }
4249
4250            trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
4251        }
4252
4253        switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4254        case RAM_SAVE_FLAG_MEM_SIZE:
4255            ret = parse_ramblocks(f, addr);
4256            /*
4257             * For mapped-ram migration (to a file) using multifd, we sync
4258             * once and for all here to make sure all tasks we queued to
4259             * multifd threads are completed, so that all the ramblocks
4260             * (including all the guest memory pages within) are fully
4261             * loaded after this sync returns.
4262             */
4263            if (migrate_mapped_ram()) {
4264                multifd_recv_sync_main();
4265            }
4266            break;
4267
4268        case RAM_SAVE_FLAG_ZERO:
4269            ch = qemu_get_byte(f);
4270            if (ch != 0) {
4271                error_report("Found a zero page with value %d", ch);
4272                ret = -EINVAL;
4273                break;
4274            }
4275            ram_handle_zero(host, TARGET_PAGE_SIZE);
4276            break;
4277
4278        case RAM_SAVE_FLAG_PAGE:
4279            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4280            break;
4281
4282        case RAM_SAVE_FLAG_XBZRLE:
4283            if (load_xbzrle(f, addr, host) < 0) {
4284                error_report("Failed to decompress XBZRLE page at "
4285                             RAM_ADDR_FMT, addr);
4286                ret = -EINVAL;
4287                break;
4288            }
4289            break;
4290        case RAM_SAVE_FLAG_MULTIFD_FLUSH:
4291            multifd_recv_sync_main();
4292            break;
4293        case RAM_SAVE_FLAG_EOS:
4294            /* normal exit */
4295            if (migrate_multifd() &&
4296                migrate_multifd_flush_after_each_section() &&
4297                /*
4298                 * Mapped-ram migration flushes once and for all after
4299                 * parsing ramblocks. Always ignore EOS for it.
4300                 */
4301                !migrate_mapped_ram()) {
4302                multifd_recv_sync_main();
4303            }
4304            break;
4305        case RAM_SAVE_FLAG_HOOK:
4306            ret = rdma_registration_handle(f);
4307            if (ret < 0) {
4308                qemu_file_set_error(f, ret);
4309            }
4310            break;
4311        default:
4312            error_report("Unknown combination of migration flags: 0x%x", flags);
4313            ret = -EINVAL;
4314        }
4315        if (!ret) {
4316            ret = qemu_file_get_error(f);
4317        }
4318        if (!ret && host_bak) {
4319            memcpy(host_bak, host, TARGET_PAGE_SIZE);
4320        }
4321    }
4322
4323    return ret;
4324}
4325
4326static int ram_load(QEMUFile *f, void *opaque, int version_id)
4327{
4328    int ret = 0;
4329    static uint64_t seq_iter;
4330    /*
4331     * If system is running in postcopy mode, page inserts to host memory must
4332     * be atomic
4333     */
4334    bool postcopy_running = postcopy_is_running();
4335
4336    seq_iter++;
4337
4338    if (version_id != 4) {
4339        return -EINVAL;
4340    }
4341
4342    /*
4343     * This RCU critical section can be very long running.
4344     * When RCU reclaims in the code start to become numerous,
4345     * it will be necessary to reduce the granularity of this
4346     * critical section.
4347     */
4348    trace_ram_load_start();
4349    WITH_RCU_READ_LOCK_GUARD() {
4350        if (postcopy_running) {
4351            /*
4352             * Note!  Here RAM_CHANNEL_PRECOPY is the precopy channel of
4353             * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4354             * service fast page faults.
4355             */
4356            ret = ram_load_postcopy(f, RAM_CHANNEL_PRECOPY);
4357        } else {
4358            ret = ram_load_precopy(f);
4359        }
4360    }
4361    trace_ram_load_complete(ret, seq_iter);
4362
4363    return ret;
4364}
4365
4366static bool ram_has_postcopy(void *opaque)
4367{
4368    RAMBlock *rb;
4369    RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
4370        if (ramblock_is_pmem(rb)) {
4371            info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4372                         "is not supported now!", rb->idstr, rb->host);
4373            return false;
4374        }
4375    }
4376
4377    return migrate_postcopy_ram();
4378}
4379
4380/* Sync all the dirty bitmap with destination VM.  */
4381static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4382{
4383    RAMBlock *block;
4384    QEMUFile *file = s->to_dst_file;
4385
4386    trace_ram_dirty_bitmap_sync_start();
4387
4388    qatomic_set(&rs->postcopy_bmap_sync_requested, 0);
4389    RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4390        qemu_savevm_send_recv_bitmap(file, block->idstr);
4391        trace_ram_dirty_bitmap_request(block->idstr);
4392        qatomic_inc(&rs->postcopy_bmap_sync_requested);
4393    }
4394
4395    trace_ram_dirty_bitmap_sync_wait();
4396
4397    /* Wait until all the ramblocks' dirty bitmap synced */
4398    while (qatomic_read(&rs->postcopy_bmap_sync_requested)) {
4399        if (migration_rp_wait(s)) {
4400            return -1;
4401        }
4402    }
4403
4404    trace_ram_dirty_bitmap_sync_complete();
4405
4406    return 0;
4407}
4408
4409/*
4410 * Read the received bitmap, revert it as the initial dirty bitmap.
4411 * This is only used when the postcopy migration is paused but wants
4412 * to resume from a middle point.
4413 *
4414 * Returns true if succeeded, false for errors.
4415 */
4416bool ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block, Error **errp)
4417{
4418    /* from_dst_file is always valid because we're within rp_thread */
4419    QEMUFile *file = s->rp_state.from_dst_file;
4420    g_autofree unsigned long *le_bitmap = NULL;
4421    unsigned long nbits = block->used_length >> TARGET_PAGE_BITS;
4422    uint64_t local_size = DIV_ROUND_UP(nbits, 8);
4423    uint64_t size, end_mark;
4424    RAMState *rs = ram_state;
4425
4426    trace_ram_dirty_bitmap_reload_begin(block->idstr);
4427
4428    if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4429        error_setg(errp, "Reload bitmap in incorrect state %s",
4430                   MigrationStatus_str(s->state));
4431        return false;
4432    }
4433
4434    /*
4435     * Note: see comments in ramblock_recv_bitmap_send() on why we
4436     * need the endianness conversion, and the paddings.
4437     */
4438    local_size = ROUND_UP(local_size, 8);
4439
4440    /* Add paddings */
4441    le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4442
4443    size = qemu_get_be64(file);
4444
4445    /* The size of the bitmap should match with our ramblock */
4446    if (size != local_size) {
4447        error_setg(errp, "ramblock '%s' bitmap size mismatch (0x%"PRIx64
4448                   " != 0x%"PRIx64")", block->idstr, size, local_size);
4449        return false;
4450    }
4451
4452    size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4453    end_mark = qemu_get_be64(file);
4454
4455    if (qemu_file_get_error(file) || size != local_size) {
4456        error_setg(errp, "read bitmap failed for ramblock '%s': "
4457                   "(size 0x%"PRIx64", got: 0x%"PRIx64")",
4458                   block->idstr, local_size, size);
4459        return false;
4460    }
4461
4462    if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4463        error_setg(errp, "ramblock '%s' end mark incorrect: 0x%"PRIx64,
4464                   block->idstr, end_mark);
4465        return false;
4466    }
4467
4468    /*
4469     * Endianness conversion. We are during postcopy (though paused).
4470     * The dirty bitmap won't change. We can directly modify it.
4471     */
4472    bitmap_from_le(block->bmap, le_bitmap, nbits);
4473
4474    /*
4475     * What we received is "received bitmap". Revert it as the initial
4476     * dirty bitmap for this ramblock.
4477     */
4478    bitmap_complement(block->bmap, block->bmap, nbits);
4479
4480    /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4481    ramblock_dirty_bitmap_clear_discarded_pages(block);
4482
4483    /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4484    trace_ram_dirty_bitmap_reload_complete(block->idstr);
4485
4486    qatomic_dec(&rs->postcopy_bmap_sync_requested);
4487
4488    /*
4489     * We succeeded to sync bitmap for current ramblock. Always kick the
4490     * migration thread to check whether all requested bitmaps are
4491     * reloaded.  NOTE: it's racy to only kick when requested==0, because
4492     * we don't know whether the migration thread may still be increasing
4493     * it.
4494     */
4495    migration_rp_kick(s);
4496
4497    return true;
4498}
4499
4500static int ram_resume_prepare(MigrationState *s, void *opaque)
4501{
4502    RAMState *rs = *(RAMState **)opaque;
4503    int ret;
4504
4505    ret = ram_dirty_bitmap_sync_all(s, rs);
4506    if (ret) {
4507        return ret;
4508    }
4509
4510    ram_state_resume_prepare(rs, s->to_dst_file);
4511
4512    return 0;
4513}
4514
4515static bool ram_save_postcopy_prepare(QEMUFile *f, void *opaque, Error **errp)
4516{
4517    int ret;
4518
4519    if (migrate_multifd()) {
4520        /*
4521         * When multifd is enabled, source QEMU needs to make sure all the
4522         * pages queued before postcopy starts have been flushed.
4523         *
4524         * The load of these pages must happen before switching to postcopy.
4525         * It's because loading of guest pages (so far) in multifd recv
4526         * threads is still non-atomic, so the load cannot happen with vCPUs
4527         * running on the destination side.
4528         *
4529         * This flush and sync will guarantee that those pages are loaded
4530         * _before_ postcopy starts on the destination. The rationale is,
4531         * this happens before VM stops (and before source QEMU sends all
4532         * the rest of the postcopy messages).  So when the destination QEMU
4533         * receives the postcopy messages, it must have received the sync
4534         * message on the main channel (either RAM_SAVE_FLAG_MULTIFD_FLUSH,
4535         * or RAM_SAVE_FLAG_EOS), and such message would guarantee that
4536         * all previous guest pages queued in the multifd channels are
4537         * completely loaded.
4538         */
4539        ret = multifd_ram_flush_and_sync(f);
4540        if (ret < 0) {
4541            error_setg(errp, "%s: multifd flush and sync failed", __func__);
4542            return false;
4543        }
4544    }
4545
4546    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
4547
4548    return true;
4549}
4550
4551void postcopy_preempt_shutdown_file(MigrationState *s)
4552{
4553    qemu_put_be64(s->postcopy_qemufile_src, RAM_SAVE_FLAG_EOS);
4554    qemu_fflush(s->postcopy_qemufile_src);
4555}
4556
4557static SaveVMHandlers savevm_ram_handlers = {
4558    .save_setup = ram_save_setup,
4559    .save_live_iterate = ram_save_iterate,
4560    .save_complete = ram_save_complete,
4561    .has_postcopy = ram_has_postcopy,
4562    .state_pending_exact = ram_state_pending_exact,
4563    .state_pending_estimate = ram_state_pending_estimate,
4564    .load_state = ram_load,
4565    .save_cleanup = ram_save_cleanup,
4566    .load_setup = ram_load_setup,
4567    .load_cleanup = ram_load_cleanup,
4568    .resume_prepare = ram_resume_prepare,
4569    .save_postcopy_prepare = ram_save_postcopy_prepare,
4570};
4571
4572static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
4573                                      size_t old_size, size_t new_size)
4574{
4575    PostcopyState ps = postcopy_state_get();
4576    ram_addr_t offset;
4577    RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset);
4578    Error *err = NULL;
4579
4580    if (!rb) {
4581        error_report("RAM block not found");
4582        return;
4583    }
4584
4585    if (migrate_ram_is_ignored(rb)) {
4586        return;
4587    }
4588
4589    if (migration_is_running()) {
4590        /*
4591         * Precopy code on the source cannot deal with the size of RAM blocks
4592         * changing at random points in time - especially after sending the
4593         * RAM block sizes in the migration stream, they must no longer change.
4594         * Abort and indicate a proper reason.
4595         */
4596        error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr);
4597        migrate_set_error(migrate_get_current(), err);
4598        error_free(err);
4599
4600        migration_cancel();
4601    }
4602
4603    switch (ps) {
4604    case POSTCOPY_INCOMING_ADVISE:
4605        /*
4606         * Update what ram_postcopy_incoming_init()->init_range() does at the
4607         * time postcopy was advised. Syncing RAM blocks with the source will
4608         * result in RAM resizes.
4609         */
4610        if (old_size < new_size) {
4611            if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) {
4612                error_report("RAM block '%s' discard of resized RAM failed",
4613                             rb->idstr);
4614            }
4615        }
4616        rb->postcopy_length = new_size;
4617        break;
4618    case POSTCOPY_INCOMING_NONE:
4619    case POSTCOPY_INCOMING_RUNNING:
4620    case POSTCOPY_INCOMING_END:
4621        /*
4622         * Once our guest is running, postcopy does no longer care about
4623         * resizes. When growing, the new memory was not available on the
4624         * source, no handler needed.
4625         */
4626        break;
4627    default:
4628        error_report("RAM block '%s' resized during postcopy state: %d",
4629                     rb->idstr, ps);
4630        exit(-1);
4631    }
4632}
4633
4634static RAMBlockNotifier ram_mig_ram_notifier = {
4635    .ram_block_resized = ram_mig_ram_block_resized,
4636};
4637
4638void ram_mig_init(void)
4639{
4640    qemu_mutex_init(&XBZRLE.lock);
4641    register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
4642    ram_block_notifier_add(&ram_mig_ram_notifier);
4643}
4644