qemu/migration/migration.h
<<
>>
Prefs
   1/*
   2 * QEMU live migration
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *
   6 * Authors:
   7 *  Anthony Liguori   <aliguori@us.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#ifndef QEMU_MIGRATION_H
  15#define QEMU_MIGRATION_H
  16
  17#include "exec/cpu-common.h"
  18#include "hw/qdev-core.h"
  19#include "qapi/qapi-types-migration.h"
  20#include "qapi/qmp/json-writer.h"
  21#include "qemu/thread.h"
  22#include "qemu/coroutine_int.h"
  23#include "io/channel.h"
  24#include "io/channel-buffer.h"
  25#include "net/announce.h"
  26#include "qom/object.h"
  27#include "postcopy-ram.h"
  28#include "sysemu/runstate.h"
  29
  30struct PostcopyBlocktimeContext;
  31
  32#define  MIGRATION_RESUME_ACK_VALUE  (1)
  33
  34/*
  35 * 1<<6=64 pages -> 256K chunk when page size is 4K.  This gives us
  36 * the benefit that all the chunks are 64 pages aligned then the
  37 * bitmaps are always aligned to LONG.
  38 */
  39#define CLEAR_BITMAP_SHIFT_MIN             6
  40/*
  41 * 1<<18=256K pages -> 1G chunk when page size is 4K.  This is the
  42 * default value to use if no one specified.
  43 */
  44#define CLEAR_BITMAP_SHIFT_DEFAULT        18
  45/*
  46 * 1<<31=2G pages -> 8T chunk when page size is 4K.  This should be
  47 * big enough and make sure we won't overflow easily.
  48 */
  49#define CLEAR_BITMAP_SHIFT_MAX            31
  50
  51/* This is an abstraction of a "temp huge page" for postcopy's purpose */
  52typedef struct {
  53    /*
  54     * This points to a temporary huge page as a buffer for UFFDIO_COPY.  It's
  55     * mmap()ed and needs to be freed when cleanup.
  56     */
  57    void *tmp_huge_page;
  58    /*
  59     * This points to the host page we're going to install for this temp page.
  60     * It tells us after we've received the whole page, where we should put it.
  61     */
  62    void *host_addr;
  63    /* Number of small pages copied (in size of TARGET_PAGE_SIZE) */
  64    unsigned int target_pages;
  65    /* Whether this page contains all zeros */
  66    bool all_zero;
  67} PostcopyTmpPage;
  68
  69typedef enum {
  70    PREEMPT_THREAD_NONE = 0,
  71    PREEMPT_THREAD_CREATED,
  72    PREEMPT_THREAD_QUIT,
  73} PreemptThreadStatus;
  74
  75/* State for the incoming migration */
  76struct MigrationIncomingState {
  77    QEMUFile *from_src_file;
  78    /* Previously received RAM's RAMBlock pointer */
  79    RAMBlock *last_recv_block[RAM_CHANNEL_MAX];
  80    /* A hook to allow cleanup at the end of incoming migration */
  81    void *transport_data;
  82    void (*transport_cleanup)(void *data);
  83    /*
  84     * Used to sync thread creations.  Note that we can't create threads in
  85     * parallel with this sem.
  86     */
  87    QemuSemaphore  thread_sync_sem;
  88    /*
  89     * Free at the start of the main state load, set as the main thread finishes
  90     * loading state.
  91     */
  92    QemuEvent main_thread_load_event;
  93
  94    /* For network announces */
  95    AnnounceTimer  announce_timer;
  96
  97    size_t         largest_page_size;
  98    bool           have_fault_thread;
  99    QemuThread     fault_thread;
 100    /* Set this when we want the fault thread to quit */
 101    bool           fault_thread_quit;
 102
 103    bool           have_listen_thread;
 104    QemuThread     listen_thread;
 105
 106    /* For the kernel to send us notifications */
 107    int       userfault_fd;
 108    /* To notify the fault_thread to wake, e.g., when need to quit */
 109    int       userfault_event_fd;
 110    QEMUFile *to_src_file;
 111    QemuMutex rp_mutex;    /* We send replies from multiple threads */
 112    /* RAMBlock of last request sent to source */
 113    RAMBlock *last_rb;
 114    /*
 115     * Number of postcopy channels including the default precopy channel, so
 116     * vanilla postcopy will only contain one channel which contain both
 117     * precopy and postcopy streams.
 118     *
 119     * This is calculated when the src requests to enable postcopy but before
 120     * it starts.  Its value can depend on e.g. whether postcopy preemption is
 121     * enabled.
 122     */
 123    unsigned int postcopy_channels;
 124    /* QEMUFile for postcopy only; it'll be handled by a separate thread */
 125    QEMUFile *postcopy_qemufile_dst;
 126    /*
 127     * When postcopy_qemufile_dst is properly setup, this sem is posted.
 128     * One can wait on this semaphore to wait until the preempt channel is
 129     * properly setup.
 130     */
 131    QemuSemaphore postcopy_qemufile_dst_done;
 132    /* Postcopy priority thread is used to receive postcopy requested pages */
 133    QemuThread postcopy_prio_thread;
 134    /*
 135     * Always set by the main vm load thread only, but can be read by the
 136     * postcopy preempt thread.  "volatile" makes sure all reads will be
 137     * up-to-date across cores.
 138     */
 139    volatile PreemptThreadStatus preempt_thread_status;
 140    /*
 141     * Used to sync between the ram load main thread and the fast ram load
 142     * thread.  It protects postcopy_qemufile_dst, which is the postcopy
 143     * fast channel.
 144     *
 145     * The ram fast load thread will take it mostly for the whole lifecycle
 146     * because it needs to continuously read data from the channel, and
 147     * it'll only release this mutex if postcopy is interrupted, so that
 148     * the ram load main thread will take this mutex over and properly
 149     * release the broken channel.
 150     */
 151    QemuMutex postcopy_prio_thread_mutex;
 152    /*
 153     * An array of temp host huge pages to be used, one for each postcopy
 154     * channel.
 155     */
 156    PostcopyTmpPage *postcopy_tmp_pages;
 157    /* This is shared for all postcopy channels */
 158    void     *postcopy_tmp_zero_page;
 159    /* PostCopyFD's for external userfaultfds & handlers of shared memory */
 160    GArray   *postcopy_remote_fds;
 161
 162    QEMUBH *bh;
 163
 164    int state;
 165
 166    /*
 167     * The incoming migration coroutine, non-NULL during qemu_loadvm_state().
 168     * Used to wake the migration incoming coroutine from rdma code. How much is
 169     * it safe - it's a question.
 170     */
 171    Coroutine *loadvm_co;
 172
 173    /* The coroutine we should enter (back) after failover */
 174    Coroutine *colo_incoming_co;
 175    QemuSemaphore colo_incoming_sem;
 176
 177    /*
 178     * PostcopyBlocktimeContext to keep information for postcopy
 179     * live migration, to calculate vCPU block time
 180     * */
 181    struct PostcopyBlocktimeContext *blocktime_ctx;
 182
 183    /* notify PAUSED postcopy incoming migrations to try to continue */
 184    QemuSemaphore postcopy_pause_sem_dst;
 185    QemuSemaphore postcopy_pause_sem_fault;
 186    /*
 187     * This semaphore is used to allow the ram fast load thread (only when
 188     * postcopy preempt is enabled) fall into sleep when there's network
 189     * interruption detected.  When the recovery is done, the main load
 190     * thread will kick the fast ram load thread using this semaphore.
 191     */
 192    QemuSemaphore postcopy_pause_sem_fast_load;
 193
 194    /* List of listening socket addresses  */
 195    SocketAddressList *socket_address_list;
 196
 197    /* A tree of pages that we requested to the source VM */
 198    GTree *page_requested;
 199    /* For debugging purpose only, but would be nice to keep */
 200    int page_requested_count;
 201    /*
 202     * The mutex helps to maintain the requested pages that we sent to the
 203     * source, IOW, to guarantee coherent between the page_requests tree and
 204     * the per-ramblock receivedmap.  Note! This does not guarantee consistency
 205     * of the real page copy procedures (using UFFDIO_[ZERO]COPY).  E.g., even
 206     * if one bit in receivedmap is cleared, UFFDIO_COPY could have happened
 207     * for that page already.  This is intended so that the mutex won't
 208     * serialize and blocked by slow operations like UFFDIO_* ioctls.  However
 209     * this should be enough to make sure the page_requested tree always
 210     * contains valid information.
 211     */
 212    QemuMutex page_request_mutex;
 213
 214    /*
 215     * Number of devices that have yet to approve switchover. When this reaches
 216     * zero an ACK that it's OK to do switchover is sent to the source. No lock
 217     * is needed as this field is updated serially.
 218     */
 219    unsigned int switchover_ack_pending_num;
 220};
 221
 222MigrationIncomingState *migration_incoming_get_current(void);
 223void migration_incoming_state_destroy(void);
 224void migration_incoming_transport_cleanup(MigrationIncomingState *mis);
 225/*
 226 * Functions to work with blocktime context
 227 */
 228void fill_destination_postcopy_migration_info(MigrationInfo *info);
 229
 230#define TYPE_MIGRATION "migration"
 231
 232typedef struct MigrationClass MigrationClass;
 233DECLARE_OBJ_CHECKERS(MigrationState, MigrationClass,
 234                     MIGRATION_OBJ, TYPE_MIGRATION)
 235
 236struct MigrationClass {
 237    /*< private >*/
 238    DeviceClass parent_class;
 239};
 240
 241struct MigrationState {
 242    /*< private >*/
 243    DeviceState parent_obj;
 244
 245    /*< public >*/
 246    QemuThread thread;
 247    QEMUBH *vm_start_bh;
 248    QEMUBH *cleanup_bh;
 249    /* Protected by qemu_file_lock */
 250    QEMUFile *to_dst_file;
 251    /* Postcopy specific transfer channel */
 252    QEMUFile *postcopy_qemufile_src;
 253    /*
 254     * It is posted when the preempt channel is established.  Note: this is
 255     * used for both the start or recover of a postcopy migration.  We'll
 256     * post to this sem every time a new preempt channel is created in the
 257     * main thread, and we keep post() and wait() in pair.
 258     */
 259    QemuSemaphore postcopy_qemufile_src_sem;
 260    QIOChannelBuffer *bioc;
 261    /*
 262     * Protects to_dst_file/from_dst_file pointers.  We need to make sure we
 263     * won't yield or hang during the critical section, since this lock will be
 264     * used in OOB command handler.
 265     */
 266    QemuMutex qemu_file_lock;
 267
 268    /*
 269     * Used to allow urgent requests to override rate limiting.
 270     */
 271    QemuSemaphore rate_limit_sem;
 272
 273    /* pages already send at the beginning of current iteration */
 274    uint64_t iteration_initial_pages;
 275
 276    /* pages transferred per second */
 277    double pages_per_second;
 278
 279    /* bytes already send at the beginning of current iteration */
 280    uint64_t iteration_initial_bytes;
 281    /* time at the start of current iteration */
 282    int64_t iteration_start_time;
 283    /*
 284     * The final stage happens when the remaining data is smaller than
 285     * this threshold; it's calculated from the requested downtime and
 286     * measured bandwidth
 287     */
 288    int64_t threshold_size;
 289
 290    /* params from 'migrate-set-parameters' */
 291    MigrationParameters parameters;
 292
 293    int state;
 294
 295    /* State related to return path */
 296    struct {
 297        /* Protected by qemu_file_lock */
 298        QEMUFile     *from_dst_file;
 299        QemuThread    rp_thread;
 300        bool          error;
 301        /*
 302         * We can also check non-zero of rp_thread, but there's no "official"
 303         * way to do this, so this bool makes it slightly more elegant.
 304         * Checking from_dst_file for this is racy because from_dst_file will
 305         * be cleared in the rp_thread!
 306         */
 307        bool          rp_thread_created;
 308        QemuSemaphore rp_sem;
 309        /*
 310         * We post to this when we got one PONG from dest. So far it's an
 311         * easy way to know the main channel has successfully established
 312         * on dest QEMU.
 313         */
 314        QemuSemaphore rp_pong_acks;
 315    } rp_state;
 316
 317    double mbps;
 318    /* Timestamp when recent migration starts (ms) */
 319    int64_t start_time;
 320    /* Total time used by latest migration (ms) */
 321    int64_t total_time;
 322    /* Timestamp when VM is down (ms) to migrate the last stuff */
 323    int64_t downtime_start;
 324    int64_t downtime;
 325    int64_t expected_downtime;
 326    bool capabilities[MIGRATION_CAPABILITY__MAX];
 327    int64_t setup_time;
 328
 329    /*
 330     * State before stopping the vm by vm_stop_force_state().
 331     * If migration is interrupted by any reason, we need to continue
 332     * running the guest on source if it was running or restore its stopped
 333     * state.
 334     */
 335    RunState vm_old_state;
 336
 337    /* Flag set once the migration has been asked to enter postcopy */
 338    bool start_postcopy;
 339    /* Flag set after postcopy has sent the device state */
 340    bool postcopy_after_devices;
 341
 342    /* Flag set once the migration thread is running (and needs joining) */
 343    bool migration_thread_running;
 344
 345    /* Flag set once the migration thread called bdrv_inactivate_all */
 346    bool block_inactive;
 347
 348    /* Migration is waiting for guest to unplug device */
 349    QemuSemaphore wait_unplug_sem;
 350
 351    /* Migration is paused due to pause-before-switchover */
 352    QemuSemaphore pause_sem;
 353
 354    /* The semaphore is used to notify COLO thread that failover is finished */
 355    QemuSemaphore colo_exit_sem;
 356
 357    /* The event is used to notify COLO thread to do checkpoint */
 358    QemuEvent colo_checkpoint_event;
 359    int64_t colo_checkpoint_time;
 360    QEMUTimer *colo_delay_timer;
 361
 362    /* The first error that has occurred.
 363       We used the mutex to be able to return the 1st error message */
 364    Error *error;
 365    /* mutex to protect errp */
 366    QemuMutex error_mutex;
 367
 368    /* Do we have to clean up -b/-i from old migrate parameters */
 369    /* This feature is deprecated and will be removed */
 370    bool must_remove_block_options;
 371
 372    /*
 373     * Global switch on whether we need to store the global state
 374     * during migration.
 375     */
 376    bool store_global_state;
 377
 378    /* Whether we send QEMU_VM_CONFIGURATION during migration */
 379    bool send_configuration;
 380    /* Whether we send section footer during migration */
 381    bool send_section_footer;
 382
 383    /* Needed by postcopy-pause state */
 384    QemuSemaphore postcopy_pause_sem;
 385    QemuSemaphore postcopy_pause_rp_sem;
 386    /*
 387     * Whether we abort the migration if decompression errors are
 388     * detected at the destination. It is left at false for qemu
 389     * older than 3.0, since only newer qemu sends streams that
 390     * do not trigger spurious decompression errors.
 391     */
 392    bool decompress_error_check;
 393    /*
 394     * This variable only affects behavior when postcopy preempt mode is
 395     * enabled.
 396     *
 397     * When set:
 398     *
 399     * - postcopy preempt src QEMU instance will generate an EOS message at
 400     *   the end of migration to shut the preempt channel on dest side.
 401     *
 402     * - postcopy preempt channel will be created at the setup phase on src
 403         QEMU.
 404     *
 405     * When clear:
 406     *
 407     * - postcopy preempt src QEMU instance will _not_ generate an EOS
 408     *   message at the end of migration, the dest qemu will shutdown the
 409     *   channel itself.
 410     *
 411     * - postcopy preempt channel will be created at the switching phase
 412     *   from precopy -> postcopy (to avoid race condition of misordered
 413     *   creation of channels).
 414     *
 415     * NOTE: See message-id <ZBoShWArKDPpX/D7@work-vm> on qemu-devel
 416     * mailing list for more information on the possible race.  Everyone
 417     * should probably just keep this value untouched after set by the
 418     * machine type (or the default).
 419     */
 420    bool preempt_pre_7_2;
 421
 422    /*
 423     * flush every channel after each section sent.
 424     *
 425     * This assures that we can't mix pages from one iteration through
 426     * ram pages with pages for the following iteration.  We really
 427     * only need to do this flush after we have go through all the
 428     * dirty pages.  For historical reasons, we do that after each
 429     * section.  This is suboptimal (we flush too many times).
 430     * Default value is false. (since 8.1)
 431     */
 432    bool multifd_flush_after_each_section;
 433    /*
 434     * This decides the size of guest memory chunk that will be used
 435     * to track dirty bitmap clearing.  The size of memory chunk will
 436     * be GUEST_PAGE_SIZE << N.  Say, N=0 means we will clear dirty
 437     * bitmap for each page to send (1<<0=1); N=10 means we will clear
 438     * dirty bitmap only once for 1<<10=1K continuous guest pages
 439     * (which is in 4M chunk).
 440     */
 441    uint8_t clear_bitmap_shift;
 442
 443    /*
 444     * This save hostname when out-going migration starts
 445     */
 446    char *hostname;
 447
 448    /* QEMU_VM_VMDESCRIPTION content filled for all non-iterable devices. */
 449    JSONWriter *vmdesc;
 450
 451    /*
 452     * Indicates whether an ACK from the destination that it's OK to do
 453     * switchover has been received.
 454     */
 455    bool switchover_acked;
 456};
 457
 458void migrate_set_state(int *state, int old_state, int new_state);
 459
 460void migration_fd_process_incoming(QEMUFile *f, Error **errp);
 461void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
 462void migration_incoming_process(void);
 463
 464bool  migration_has_all_channels(void);
 465
 466uint64_t migrate_max_downtime(void);
 467
 468void migrate_set_error(MigrationState *s, const Error *error);
 469
 470void migrate_fd_connect(MigrationState *s, Error *error_in);
 471
 472bool migration_is_setup_or_active(int state);
 473bool migration_is_running(int state);
 474
 475void migrate_init(MigrationState *s);
 476bool migration_is_blocked(Error **errp);
 477/* True if outgoing migration has entered postcopy phase */
 478bool migration_in_postcopy(void);
 479MigrationState *migrate_get_current(void);
 480
 481uint64_t ram_get_total_transferred_pages(void);
 482
 483/* Sending on the return path - generic and then for each message type */
 484void migrate_send_rp_shut(MigrationIncomingState *mis,
 485                          uint32_t value);
 486void migrate_send_rp_pong(MigrationIncomingState *mis,
 487                          uint32_t value);
 488int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb,
 489                              ram_addr_t start, uint64_t haddr);
 490int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
 491                                      RAMBlock *rb, ram_addr_t start);
 492void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
 493                                 char *block_name);
 494void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
 495int migrate_send_rp_switchover_ack(MigrationIncomingState *mis);
 496
 497void dirty_bitmap_mig_before_vm_start(void);
 498void dirty_bitmap_mig_cancel_outgoing(void);
 499void dirty_bitmap_mig_cancel_incoming(void);
 500bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
 501                                      Error **errp);
 502
 503void migrate_add_address(SocketAddress *address);
 504
 505int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
 506
 507#define qemu_ram_foreach_block \
 508  #warning "Use foreach_not_ignored_block in migration code"
 509
 510void migration_make_urgent_request(void);
 511void migration_consume_urgent_request(void);
 512bool migration_rate_limit(void);
 513void migration_cancel(const Error *error);
 514
 515void populate_vfio_info(MigrationInfo *info);
 516void reset_vfio_bytes_transferred(void);
 517void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page);
 518
 519#endif
 520