qemu/migration/postcopy-ram.c
<<
>>
Prefs
   1/*
   2 * Postcopy migration for RAM
   3 *
   4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
   5 *
   6 * Authors:
   7 *  Dave Gilbert  <dgilbert@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14/*
  15 * Postcopy is a migration technique where the execution flips from the
  16 * source to the destination before all the data has been copied.
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "exec/target_page.h"
  21#include "migration.h"
  22#include "qemu-file.h"
  23#include "savevm.h"
  24#include "postcopy-ram.h"
  25#include "ram.h"
  26#include "sysemu/sysemu.h"
  27#include "sysemu/balloon.h"
  28#include "qemu/error-report.h"
  29#include "trace.h"
  30
  31/* Arbitrary limit on size of each discard command,
  32 * keeps them around ~200 bytes
  33 */
  34#define MAX_DISCARDS_PER_COMMAND 12
  35
  36struct PostcopyDiscardState {
  37    const char *ramblock_name;
  38    uint16_t cur_entry;
  39    /*
  40     * Start and length of a discard range (bytes)
  41     */
  42    uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
  43    uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
  44    unsigned int nsentwords;
  45    unsigned int nsentcmds;
  46};
  47
  48/* Postcopy needs to detect accesses to pages that haven't yet been copied
  49 * across, and efficiently map new pages in, the techniques for doing this
  50 * are target OS specific.
  51 */
  52#if defined(__linux__)
  53
  54#include <poll.h>
  55#include <sys/ioctl.h>
  56#include <sys/syscall.h>
  57#include <asm/types.h> /* for __u64 */
  58#endif
  59
  60#if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
  61#include <sys/eventfd.h>
  62#include <linux/userfaultfd.h>
  63
  64
  65/**
  66 * receive_ufd_features: check userfault fd features, to request only supported
  67 * features in the future.
  68 *
  69 * Returns: true on success
  70 *
  71 * __NR_userfaultfd - should be checked before
  72 *  @features: out parameter will contain uffdio_api.features provided by kernel
  73 *              in case of success
  74 */
  75static bool receive_ufd_features(uint64_t *features)
  76{
  77    struct uffdio_api api_struct = {0};
  78    int ufd;
  79    bool ret = true;
  80
  81    /* if we are here __NR_userfaultfd should exists */
  82    ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
  83    if (ufd == -1) {
  84        error_report("%s: syscall __NR_userfaultfd failed: %s", __func__,
  85                     strerror(errno));
  86        return false;
  87    }
  88
  89    /* ask features */
  90    api_struct.api = UFFD_API;
  91    api_struct.features = 0;
  92    if (ioctl(ufd, UFFDIO_API, &api_struct)) {
  93        error_report("%s: UFFDIO_API failed: %s", __func__,
  94                     strerror(errno));
  95        ret = false;
  96        goto release_ufd;
  97    }
  98
  99    *features = api_struct.features;
 100
 101release_ufd:
 102    close(ufd);
 103    return ret;
 104}
 105
 106/**
 107 * request_ufd_features: this function should be called only once on a newly
 108 * opened ufd, subsequent calls will lead to error.
 109 *
 110 * Returns: true on succes
 111 *
 112 * @ufd: fd obtained from userfaultfd syscall
 113 * @features: bit mask see UFFD_API_FEATURES
 114 */
 115static bool request_ufd_features(int ufd, uint64_t features)
 116{
 117    struct uffdio_api api_struct = {0};
 118    uint64_t ioctl_mask;
 119
 120    api_struct.api = UFFD_API;
 121    api_struct.features = features;
 122    if (ioctl(ufd, UFFDIO_API, &api_struct)) {
 123        error_report("%s failed: UFFDIO_API failed: %s", __func__,
 124                     strerror(errno));
 125        return false;
 126    }
 127
 128    ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
 129                 (__u64)1 << _UFFDIO_UNREGISTER;
 130    if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
 131        error_report("Missing userfault features: %" PRIx64,
 132                     (uint64_t)(~api_struct.ioctls & ioctl_mask));
 133        return false;
 134    }
 135
 136    return true;
 137}
 138
 139static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
 140{
 141    uint64_t asked_features = 0;
 142    static uint64_t supported_features;
 143
 144    /*
 145     * it's not possible to
 146     * request UFFD_API twice per one fd
 147     * userfault fd features is persistent
 148     */
 149    if (!supported_features) {
 150        if (!receive_ufd_features(&supported_features)) {
 151            error_report("%s failed", __func__);
 152            return false;
 153        }
 154    }
 155
 156    /*
 157     * request features, even if asked_features is 0, due to
 158     * kernel expects UFFD_API before UFFDIO_REGISTER, per
 159     * userfault file descriptor
 160     */
 161    if (!request_ufd_features(ufd, asked_features)) {
 162        error_report("%s failed: features %" PRIu64, __func__,
 163                     asked_features);
 164        return false;
 165    }
 166
 167    if (getpagesize() != ram_pagesize_summary()) {
 168        bool have_hp = false;
 169        /* We've got a huge page */
 170#ifdef UFFD_FEATURE_MISSING_HUGETLBFS
 171        have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS;
 172#endif
 173        if (!have_hp) {
 174            error_report("Userfault on this host does not support huge pages");
 175            return false;
 176        }
 177    }
 178    return true;
 179}
 180
 181/* Callback from postcopy_ram_supported_by_host block iterator.
 182 */
 183static int test_ramblock_postcopiable(const char *block_name, void *host_addr,
 184                             ram_addr_t offset, ram_addr_t length, void *opaque)
 185{
 186    RAMBlock *rb = qemu_ram_block_by_name(block_name);
 187    size_t pagesize = qemu_ram_pagesize(rb);
 188
 189    if (qemu_ram_is_shared(rb)) {
 190        error_report("Postcopy on shared RAM (%s) is not yet supported",
 191                     block_name);
 192        return 1;
 193    }
 194
 195    if (length % pagesize) {
 196        error_report("Postcopy requires RAM blocks to be a page size multiple,"
 197                     " block %s is 0x" RAM_ADDR_FMT " bytes with a "
 198                     "page size of 0x%zx", block_name, length, pagesize);
 199        return 1;
 200    }
 201    return 0;
 202}
 203
 204/*
 205 * Note: This has the side effect of munlock'ing all of RAM, that's
 206 * normally fine since if the postcopy succeeds it gets turned back on at the
 207 * end.
 208 */
 209bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
 210{
 211    long pagesize = getpagesize();
 212    int ufd = -1;
 213    bool ret = false; /* Error unless we change it */
 214    void *testarea = NULL;
 215    struct uffdio_register reg_struct;
 216    struct uffdio_range range_struct;
 217    uint64_t feature_mask;
 218
 219    if (qemu_target_page_size() > pagesize) {
 220        error_report("Target page size bigger than host page size");
 221        goto out;
 222    }
 223
 224    ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
 225    if (ufd == -1) {
 226        error_report("%s: userfaultfd not available: %s", __func__,
 227                     strerror(errno));
 228        goto out;
 229    }
 230
 231    /* Version and features check */
 232    if (!ufd_check_and_apply(ufd, mis)) {
 233        goto out;
 234    }
 235
 236    /* We don't support postcopy with shared RAM yet */
 237    if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) {
 238        goto out;
 239    }
 240
 241    /*
 242     * userfault and mlock don't go together; we'll put it back later if
 243     * it was enabled.
 244     */
 245    if (munlockall()) {
 246        error_report("%s: munlockall: %s", __func__,  strerror(errno));
 247        return -1;
 248    }
 249
 250    /*
 251     *  We need to check that the ops we need are supported on anon memory
 252     *  To do that we need to register a chunk and see the flags that
 253     *  are returned.
 254     */
 255    testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
 256                                    MAP_ANONYMOUS, -1, 0);
 257    if (testarea == MAP_FAILED) {
 258        error_report("%s: Failed to map test area: %s", __func__,
 259                     strerror(errno));
 260        goto out;
 261    }
 262    g_assert(((size_t)testarea & (pagesize-1)) == 0);
 263
 264    reg_struct.range.start = (uintptr_t)testarea;
 265    reg_struct.range.len = pagesize;
 266    reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
 267
 268    if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
 269        error_report("%s userfault register: %s", __func__, strerror(errno));
 270        goto out;
 271    }
 272
 273    range_struct.start = (uintptr_t)testarea;
 274    range_struct.len = pagesize;
 275    if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
 276        error_report("%s userfault unregister: %s", __func__, strerror(errno));
 277        goto out;
 278    }
 279
 280    feature_mask = (__u64)1 << _UFFDIO_WAKE |
 281                   (__u64)1 << _UFFDIO_COPY |
 282                   (__u64)1 << _UFFDIO_ZEROPAGE;
 283    if ((reg_struct.ioctls & feature_mask) != feature_mask) {
 284        error_report("Missing userfault map features: %" PRIx64,
 285                     (uint64_t)(~reg_struct.ioctls & feature_mask));
 286        goto out;
 287    }
 288
 289    /* Success! */
 290    ret = true;
 291out:
 292    if (testarea) {
 293        munmap(testarea, pagesize);
 294    }
 295    if (ufd != -1) {
 296        close(ufd);
 297    }
 298    return ret;
 299}
 300
 301/*
 302 * Setup an area of RAM so that it *can* be used for postcopy later; this
 303 * must be done right at the start prior to pre-copy.
 304 * opaque should be the MIS.
 305 */
 306static int init_range(const char *block_name, void *host_addr,
 307                      ram_addr_t offset, ram_addr_t length, void *opaque)
 308{
 309    trace_postcopy_init_range(block_name, host_addr, offset, length);
 310
 311    /*
 312     * We need the whole of RAM to be truly empty for postcopy, so things
 313     * like ROMs and any data tables built during init must be zero'd
 314     * - we're going to get the copy from the source anyway.
 315     * (Precopy will just overwrite this data, so doesn't need the discard)
 316     */
 317    if (ram_discard_range(block_name, 0, length)) {
 318        return -1;
 319    }
 320
 321    return 0;
 322}
 323
 324/*
 325 * At the end of migration, undo the effects of init_range
 326 * opaque should be the MIS.
 327 */
 328static int cleanup_range(const char *block_name, void *host_addr,
 329                        ram_addr_t offset, ram_addr_t length, void *opaque)
 330{
 331    MigrationIncomingState *mis = opaque;
 332    struct uffdio_range range_struct;
 333    trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
 334
 335    /*
 336     * We turned off hugepage for the precopy stage with postcopy enabled
 337     * we can turn it back on now.
 338     */
 339    qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE);
 340
 341    /*
 342     * We can also turn off userfault now since we should have all the
 343     * pages.   It can be useful to leave it on to debug postcopy
 344     * if you're not sure it's always getting every page.
 345     */
 346    range_struct.start = (uintptr_t)host_addr;
 347    range_struct.len = length;
 348
 349    if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
 350        error_report("%s: userfault unregister %s", __func__, strerror(errno));
 351
 352        return -1;
 353    }
 354
 355    return 0;
 356}
 357
 358/*
 359 * Initialise postcopy-ram, setting the RAM to a state where we can go into
 360 * postcopy later; must be called prior to any precopy.
 361 * called from arch_init's similarly named ram_postcopy_incoming_init
 362 */
 363int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
 364{
 365    if (qemu_ram_foreach_block(init_range, NULL)) {
 366        return -1;
 367    }
 368
 369    return 0;
 370}
 371
 372/*
 373 * At the end of a migration where postcopy_ram_incoming_init was called.
 374 */
 375int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
 376{
 377    trace_postcopy_ram_incoming_cleanup_entry();
 378
 379    if (mis->have_fault_thread) {
 380        uint64_t tmp64;
 381
 382        if (qemu_ram_foreach_block(cleanup_range, mis)) {
 383            return -1;
 384        }
 385        /*
 386         * Tell the fault_thread to exit, it's an eventfd that should
 387         * currently be at 0, we're going to increment it to 1
 388         */
 389        tmp64 = 1;
 390        if (write(mis->userfault_quit_fd, &tmp64, 8) == 8) {
 391            trace_postcopy_ram_incoming_cleanup_join();
 392            qemu_thread_join(&mis->fault_thread);
 393        } else {
 394            /* Not much we can do here, but may as well report it */
 395            error_report("%s: incrementing userfault_quit_fd: %s", __func__,
 396                         strerror(errno));
 397        }
 398        trace_postcopy_ram_incoming_cleanup_closeuf();
 399        close(mis->userfault_fd);
 400        close(mis->userfault_quit_fd);
 401        mis->have_fault_thread = false;
 402    }
 403
 404    qemu_balloon_inhibit(false);
 405
 406    if (enable_mlock) {
 407        if (os_mlock() < 0) {
 408            error_report("mlock: %s", strerror(errno));
 409            /*
 410             * It doesn't feel right to fail at this point, we have a valid
 411             * VM state.
 412             */
 413        }
 414    }
 415
 416    postcopy_state_set(POSTCOPY_INCOMING_END);
 417
 418    if (mis->postcopy_tmp_page) {
 419        munmap(mis->postcopy_tmp_page, mis->largest_page_size);
 420        mis->postcopy_tmp_page = NULL;
 421    }
 422    if (mis->postcopy_tmp_zero_page) {
 423        munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
 424        mis->postcopy_tmp_zero_page = NULL;
 425    }
 426    trace_postcopy_ram_incoming_cleanup_exit();
 427    return 0;
 428}
 429
 430/*
 431 * Disable huge pages on an area
 432 */
 433static int nhp_range(const char *block_name, void *host_addr,
 434                    ram_addr_t offset, ram_addr_t length, void *opaque)
 435{
 436    trace_postcopy_nhp_range(block_name, host_addr, offset, length);
 437
 438    /*
 439     * Before we do discards we need to ensure those discards really
 440     * do delete areas of the page, even if THP thinks a hugepage would
 441     * be a good idea, so force hugepages off.
 442     */
 443    qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE);
 444
 445    return 0;
 446}
 447
 448/*
 449 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
 450 * however leaving it until after precopy means that most of the precopy
 451 * data is still THPd
 452 */
 453int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
 454{
 455    if (qemu_ram_foreach_block(nhp_range, mis)) {
 456        return -1;
 457    }
 458
 459    postcopy_state_set(POSTCOPY_INCOMING_DISCARD);
 460
 461    return 0;
 462}
 463
 464/*
 465 * Mark the given area of RAM as requiring notification to unwritten areas
 466 * Used as a  callback on qemu_ram_foreach_block.
 467 *   host_addr: Base of area to mark
 468 *   offset: Offset in the whole ram arena
 469 *   length: Length of the section
 470 *   opaque: MigrationIncomingState pointer
 471 * Returns 0 on success
 472 */
 473static int ram_block_enable_notify(const char *block_name, void *host_addr,
 474                                   ram_addr_t offset, ram_addr_t length,
 475                                   void *opaque)
 476{
 477    MigrationIncomingState *mis = opaque;
 478    struct uffdio_register reg_struct;
 479
 480    reg_struct.range.start = (uintptr_t)host_addr;
 481    reg_struct.range.len = length;
 482    reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
 483
 484    /* Now tell our userfault_fd that it's responsible for this area */
 485    if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, &reg_struct)) {
 486        error_report("%s userfault register: %s", __func__, strerror(errno));
 487        return -1;
 488    }
 489    if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
 490        error_report("%s userfault: Region doesn't support COPY", __func__);
 491        return -1;
 492    }
 493
 494    return 0;
 495}
 496
 497/*
 498 * Handle faults detected by the USERFAULT markings
 499 */
 500static void *postcopy_ram_fault_thread(void *opaque)
 501{
 502    MigrationIncomingState *mis = opaque;
 503    struct uffd_msg msg;
 504    int ret;
 505    RAMBlock *rb = NULL;
 506    RAMBlock *last_rb = NULL; /* last RAMBlock we sent part of */
 507
 508    trace_postcopy_ram_fault_thread_entry();
 509    qemu_sem_post(&mis->fault_thread_sem);
 510
 511    while (true) {
 512        ram_addr_t rb_offset;
 513        struct pollfd pfd[2];
 514
 515        /*
 516         * We're mainly waiting for the kernel to give us a faulting HVA,
 517         * however we can be told to quit via userfault_quit_fd which is
 518         * an eventfd
 519         */
 520        pfd[0].fd = mis->userfault_fd;
 521        pfd[0].events = POLLIN;
 522        pfd[0].revents = 0;
 523        pfd[1].fd = mis->userfault_quit_fd;
 524        pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */
 525        pfd[1].revents = 0;
 526
 527        if (poll(pfd, 2, -1 /* Wait forever */) == -1) {
 528            error_report("%s: userfault poll: %s", __func__, strerror(errno));
 529            break;
 530        }
 531
 532        if (pfd[1].revents) {
 533            trace_postcopy_ram_fault_thread_quit();
 534            break;
 535        }
 536
 537        ret = read(mis->userfault_fd, &msg, sizeof(msg));
 538        if (ret != sizeof(msg)) {
 539            if (errno == EAGAIN) {
 540                /*
 541                 * if a wake up happens on the other thread just after
 542                 * the poll, there is nothing to read.
 543                 */
 544                continue;
 545            }
 546            if (ret < 0) {
 547                error_report("%s: Failed to read full userfault message: %s",
 548                             __func__, strerror(errno));
 549                break;
 550            } else {
 551                error_report("%s: Read %d bytes from userfaultfd expected %zd",
 552                             __func__, ret, sizeof(msg));
 553                break; /* Lost alignment, don't know what we'd read next */
 554            }
 555        }
 556        if (msg.event != UFFD_EVENT_PAGEFAULT) {
 557            error_report("%s: Read unexpected event %ud from userfaultfd",
 558                         __func__, msg.event);
 559            continue; /* It's not a page fault, shouldn't happen */
 560        }
 561
 562        rb = qemu_ram_block_from_host(
 563                 (void *)(uintptr_t)msg.arg.pagefault.address,
 564                 true, &rb_offset);
 565        if (!rb) {
 566            error_report("postcopy_ram_fault_thread: Fault outside guest: %"
 567                         PRIx64, (uint64_t)msg.arg.pagefault.address);
 568            break;
 569        }
 570
 571        rb_offset &= ~(qemu_ram_pagesize(rb) - 1);
 572        trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
 573                                                qemu_ram_get_idstr(rb),
 574                                                rb_offset);
 575
 576        /*
 577         * Send the request to the source - we want to request one
 578         * of our host page sizes (which is >= TPS)
 579         */
 580        if (rb != last_rb) {
 581            last_rb = rb;
 582            migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
 583                                     rb_offset, qemu_ram_pagesize(rb));
 584        } else {
 585            /* Save some space */
 586            migrate_send_rp_req_pages(mis, NULL,
 587                                     rb_offset, qemu_ram_pagesize(rb));
 588        }
 589    }
 590    trace_postcopy_ram_fault_thread_exit();
 591    return NULL;
 592}
 593
 594int postcopy_ram_enable_notify(MigrationIncomingState *mis)
 595{
 596    /* Open the fd for the kernel to give us userfaults */
 597    mis->userfault_fd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
 598    if (mis->userfault_fd == -1) {
 599        error_report("%s: Failed to open userfault fd: %s", __func__,
 600                     strerror(errno));
 601        return -1;
 602    }
 603
 604    /*
 605     * Although the host check already tested the API, we need to
 606     * do the check again as an ABI handshake on the new fd.
 607     */
 608    if (!ufd_check_and_apply(mis->userfault_fd, mis)) {
 609        return -1;
 610    }
 611
 612    /* Now an eventfd we use to tell the fault-thread to quit */
 613    mis->userfault_quit_fd = eventfd(0, EFD_CLOEXEC);
 614    if (mis->userfault_quit_fd == -1) {
 615        error_report("%s: Opening userfault_quit_fd: %s", __func__,
 616                     strerror(errno));
 617        close(mis->userfault_fd);
 618        return -1;
 619    }
 620
 621    qemu_sem_init(&mis->fault_thread_sem, 0);
 622    qemu_thread_create(&mis->fault_thread, "postcopy/fault",
 623                       postcopy_ram_fault_thread, mis, QEMU_THREAD_JOINABLE);
 624    qemu_sem_wait(&mis->fault_thread_sem);
 625    qemu_sem_destroy(&mis->fault_thread_sem);
 626    mis->have_fault_thread = true;
 627
 628    /* Mark so that we get notified of accesses to unwritten areas */
 629    if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
 630        return -1;
 631    }
 632
 633    /*
 634     * Ballooning can mark pages as absent while we're postcopying
 635     * that would cause false userfaults.
 636     */
 637    qemu_balloon_inhibit(true);
 638
 639    trace_postcopy_ram_enable_notify();
 640
 641    return 0;
 642}
 643
 644static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr,
 645                               void *from_addr, uint64_t pagesize, RAMBlock *rb)
 646{
 647    int ret;
 648    if (from_addr) {
 649        struct uffdio_copy copy_struct;
 650        copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
 651        copy_struct.src = (uint64_t)(uintptr_t)from_addr;
 652        copy_struct.len = pagesize;
 653        copy_struct.mode = 0;
 654        ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
 655    } else {
 656        struct uffdio_zeropage zero_struct;
 657        zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
 658        zero_struct.range.len = pagesize;
 659        zero_struct.mode = 0;
 660        ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
 661    }
 662    if (!ret) {
 663        ramblock_recv_bitmap_set_range(rb, host_addr,
 664                                       pagesize / qemu_target_page_size());
 665    }
 666    return ret;
 667}
 668
 669/*
 670 * Place a host page (from) at (host) atomically
 671 * returns 0 on success
 672 */
 673int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
 674                        RAMBlock *rb)
 675{
 676    size_t pagesize = qemu_ram_pagesize(rb);
 677
 678    /* copy also acks to the kernel waking the stalled thread up
 679     * TODO: We can inhibit that ack and only do it if it was requested
 680     * which would be slightly cheaper, but we'd have to be careful
 681     * of the order of updating our page state.
 682     */
 683    if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) {
 684        int e = errno;
 685        error_report("%s: %s copy host: %p from: %p (size: %zd)",
 686                     __func__, strerror(e), host, from, pagesize);
 687
 688        return -e;
 689    }
 690
 691    trace_postcopy_place_page(host);
 692    return 0;
 693}
 694
 695/*
 696 * Place a zero page at (host) atomically
 697 * returns 0 on success
 698 */
 699int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
 700                             RAMBlock *rb)
 701{
 702    trace_postcopy_place_page_zero(host);
 703
 704    if (qemu_ram_pagesize(rb) == getpagesize()) {
 705        if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, getpagesize(),
 706                                rb)) {
 707            int e = errno;
 708            error_report("%s: %s zero host: %p",
 709                         __func__, strerror(e), host);
 710
 711            return -e;
 712        }
 713    } else {
 714        /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
 715        if (!mis->postcopy_tmp_zero_page) {
 716            mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
 717                                               PROT_READ | PROT_WRITE,
 718                                               MAP_PRIVATE | MAP_ANONYMOUS,
 719                                               -1, 0);
 720            if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
 721                int e = errno;
 722                mis->postcopy_tmp_zero_page = NULL;
 723                error_report("%s: %s mapping large zero page",
 724                             __func__, strerror(e));
 725                return -e;
 726            }
 727            memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
 728        }
 729        return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page,
 730                                   rb);
 731    }
 732
 733    return 0;
 734}
 735
 736/*
 737 * Returns a target page of memory that can be mapped at a later point in time
 738 * using postcopy_place_page
 739 * The same address is used repeatedly, postcopy_place_page just takes the
 740 * backing page away.
 741 * Returns: Pointer to allocated page
 742 *
 743 */
 744void *postcopy_get_tmp_page(MigrationIncomingState *mis)
 745{
 746    if (!mis->postcopy_tmp_page) {
 747        mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
 748                             PROT_READ | PROT_WRITE, MAP_PRIVATE |
 749                             MAP_ANONYMOUS, -1, 0);
 750        if (mis->postcopy_tmp_page == MAP_FAILED) {
 751            mis->postcopy_tmp_page = NULL;
 752            error_report("%s: %s", __func__, strerror(errno));
 753            return NULL;
 754        }
 755    }
 756
 757    return mis->postcopy_tmp_page;
 758}
 759
 760#else
 761/* No target OS support, stubs just fail */
 762bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
 763{
 764    error_report("%s: No OS support", __func__);
 765    return false;
 766}
 767
 768int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
 769{
 770    error_report("postcopy_ram_incoming_init: No OS support");
 771    return -1;
 772}
 773
 774int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
 775{
 776    assert(0);
 777    return -1;
 778}
 779
 780int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
 781{
 782    assert(0);
 783    return -1;
 784}
 785
 786int postcopy_ram_enable_notify(MigrationIncomingState *mis)
 787{
 788    assert(0);
 789    return -1;
 790}
 791
 792int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
 793                        RAMBlock *rb)
 794{
 795    assert(0);
 796    return -1;
 797}
 798
 799int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
 800                        RAMBlock *rb)
 801{
 802    assert(0);
 803    return -1;
 804}
 805
 806void *postcopy_get_tmp_page(MigrationIncomingState *mis)
 807{
 808    assert(0);
 809    return NULL;
 810}
 811
 812#endif
 813
 814/* ------------------------------------------------------------------------- */
 815
 816/**
 817 * postcopy_discard_send_init: Called at the start of each RAMBlock before
 818 *   asking to discard individual ranges.
 819 *
 820 * @ms: The current migration state.
 821 * @offset: the bitmap offset of the named RAMBlock in the migration
 822 *   bitmap.
 823 * @name: RAMBlock that discards will operate on.
 824 *
 825 * returns: a new PDS.
 826 */
 827PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
 828                                                 const char *name)
 829{
 830    PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
 831
 832    if (res) {
 833        res->ramblock_name = name;
 834    }
 835
 836    return res;
 837}
 838
 839/**
 840 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
 841 *   discard. May send a discard message, may just leave it queued to
 842 *   be sent later.
 843 *
 844 * @ms: Current migration state.
 845 * @pds: Structure initialised by postcopy_discard_send_init().
 846 * @start,@length: a range of pages in the migration bitmap in the
 847 *   RAM block passed to postcopy_discard_send_init() (length=1 is one page)
 848 */
 849void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
 850                                unsigned long start, unsigned long length)
 851{
 852    size_t tp_size = qemu_target_page_size();
 853    /* Convert to byte offsets within the RAM block */
 854    pds->start_list[pds->cur_entry] = start  * tp_size;
 855    pds->length_list[pds->cur_entry] = length * tp_size;
 856    trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
 857    pds->cur_entry++;
 858    pds->nsentwords++;
 859
 860    if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) {
 861        /* Full set, ship it! */
 862        qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
 863                                              pds->ramblock_name,
 864                                              pds->cur_entry,
 865                                              pds->start_list,
 866                                              pds->length_list);
 867        pds->nsentcmds++;
 868        pds->cur_entry = 0;
 869    }
 870}
 871
 872/**
 873 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
 874 * bitmap code. Sends any outstanding discard messages, frees the PDS
 875 *
 876 * @ms: Current migration state.
 877 * @pds: Structure initialised by postcopy_discard_send_init().
 878 */
 879void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
 880{
 881    /* Anything unsent? */
 882    if (pds->cur_entry) {
 883        qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file,
 884                                              pds->ramblock_name,
 885                                              pds->cur_entry,
 886                                              pds->start_list,
 887                                              pds->length_list);
 888        pds->nsentcmds++;
 889    }
 890
 891    trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords,
 892                                       pds->nsentcmds);
 893
 894    g_free(pds);
 895}
 896
 897/*
 898 * Current state of incoming postcopy; note this is not part of
 899 * MigrationIncomingState since it's state is used during cleanup
 900 * at the end as MIS is being freed.
 901 */
 902static PostcopyState incoming_postcopy_state;
 903
 904PostcopyState  postcopy_state_get(void)
 905{
 906    return atomic_mb_read(&incoming_postcopy_state);
 907}
 908
 909/* Set the state and return the old state */
 910PostcopyState postcopy_state_set(PostcopyState new_state)
 911{
 912    return atomic_xchg(&incoming_postcopy_state, new_state);
 913}
 914