qemu/migration/dirtyrate.c
<<
>>
Prefs
   1/*
   2 * Dirtyrate implement code
   3 *
   4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
   5 *
   6 * Authors:
   7 *  Chuan Zheng <zhengchuan@huawei.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include <zlib.h>
  15#include "qapi/error.h"
  16#include "cpu.h"
  17#include "exec/ramblock.h"
  18#include "exec/ram_addr.h"
  19#include "qemu/rcu_queue.h"
  20#include "qemu/main-loop.h"
  21#include "qapi/qapi-commands-migration.h"
  22#include "ram.h"
  23#include "trace.h"
  24#include "dirtyrate.h"
  25#include "monitor/hmp.h"
  26#include "monitor/monitor.h"
  27#include "qapi/qmp/qdict.h"
  28#include "sysemu/kvm.h"
  29#include "sysemu/runstate.h"
  30#include "exec/memory.h"
  31
  32/*
  33 * total_dirty_pages is procted by BQL and is used
  34 * to stat dirty pages during the period of two
  35 * memory_global_dirty_log_sync
  36 */
  37uint64_t total_dirty_pages;
  38
  39typedef struct DirtyPageRecord {
  40    uint64_t start_pages;
  41    uint64_t end_pages;
  42} DirtyPageRecord;
  43
  44static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
  45static struct DirtyRateStat DirtyStat;
  46static DirtyRateMeasureMode dirtyrate_mode =
  47                DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
  48
  49static int64_t set_sample_page_period(int64_t msec, int64_t initial_time)
  50{
  51    int64_t current_time;
  52
  53    current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  54    if ((current_time - initial_time) >= msec) {
  55        msec = current_time - initial_time;
  56    } else {
  57        g_usleep((msec + initial_time - current_time) * 1000);
  58    }
  59
  60    return msec;
  61}
  62
  63static bool is_sample_period_valid(int64_t sec)
  64{
  65    if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
  66        sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
  67        return false;
  68    }
  69
  70    return true;
  71}
  72
  73static bool is_sample_pages_valid(int64_t pages)
  74{
  75    return pages >= MIN_SAMPLE_PAGE_COUNT &&
  76           pages <= MAX_SAMPLE_PAGE_COUNT;
  77}
  78
  79static int dirtyrate_set_state(int *state, int old_state, int new_state)
  80{
  81    assert(new_state < DIRTY_RATE_STATUS__MAX);
  82    trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
  83    if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
  84        return 0;
  85    } else {
  86        return -1;
  87    }
  88}
  89
  90static struct DirtyRateInfo *query_dirty_rate_info(void)
  91{
  92    int i;
  93    int64_t dirty_rate = DirtyStat.dirty_rate;
  94    struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo));
  95    DirtyRateVcpuList *head = NULL, **tail = &head;
  96
  97    info->status = CalculatingState;
  98    info->start_time = DirtyStat.start_time;
  99    info->calc_time = DirtyStat.calc_time;
 100    info->sample_pages = DirtyStat.sample_pages;
 101    info->mode = dirtyrate_mode;
 102
 103    if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
 104        info->has_dirty_rate = true;
 105        info->dirty_rate = dirty_rate;
 106
 107        if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
 108            /*
 109             * set sample_pages with 0 to indicate page sampling
 110             * isn't enabled
 111             **/
 112            info->sample_pages = 0;
 113            info->has_vcpu_dirty_rate = true;
 114            for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
 115                DirtyRateVcpu *rate = g_malloc0(sizeof(DirtyRateVcpu));
 116                rate->id = DirtyStat.dirty_ring.rates[i].id;
 117                rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate;
 118                QAPI_LIST_APPEND(tail, rate);
 119            }
 120            info->vcpu_dirty_rate = head;
 121        }
 122
 123        if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
 124            info->sample_pages = 0;
 125        }
 126    }
 127
 128    trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
 129
 130    return info;
 131}
 132
 133static void init_dirtyrate_stat(int64_t start_time,
 134                                struct DirtyRateConfig config)
 135{
 136    DirtyStat.dirty_rate = -1;
 137    DirtyStat.start_time = start_time;
 138    DirtyStat.calc_time = config.sample_period_seconds;
 139    DirtyStat.sample_pages = config.sample_pages_per_gigabytes;
 140
 141    switch (config.mode) {
 142    case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING:
 143        DirtyStat.page_sampling.total_dirty_samples = 0;
 144        DirtyStat.page_sampling.total_sample_count = 0;
 145        DirtyStat.page_sampling.total_block_mem_MB = 0;
 146        break;
 147    case DIRTY_RATE_MEASURE_MODE_DIRTY_RING:
 148        DirtyStat.dirty_ring.nvcpu = -1;
 149        DirtyStat.dirty_ring.rates = NULL;
 150        break;
 151    default:
 152        break;
 153    }
 154}
 155
 156static void cleanup_dirtyrate_stat(struct DirtyRateConfig config)
 157{
 158    /* last calc-dirty-rate qmp use dirty ring mode */
 159    if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
 160        free(DirtyStat.dirty_ring.rates);
 161        DirtyStat.dirty_ring.rates = NULL;
 162    }
 163}
 164
 165static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
 166{
 167    DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
 168    DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
 169    /* size of total pages in MB */
 170    DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages *
 171                                                   TARGET_PAGE_SIZE) >> 20;
 172}
 173
 174static void update_dirtyrate(uint64_t msec)
 175{
 176    uint64_t dirtyrate;
 177    uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples;
 178    uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count;
 179    uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB;
 180
 181    dirtyrate = total_dirty_samples * total_block_mem_MB *
 182                1000 / (total_sample_count * msec);
 183
 184    DirtyStat.dirty_rate = dirtyrate;
 185}
 186
 187/*
 188 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
 189 * in ramblock, which starts from ramblock base address.
 190 */
 191static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
 192                                      uint64_t vfn)
 193{
 194    uint32_t crc;
 195
 196    crc = crc32(0, (info->ramblock_addr +
 197                vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
 198
 199    trace_get_ramblock_vfn_hash(info->idstr, vfn, crc);
 200    return crc;
 201}
 202
 203static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
 204{
 205    unsigned int sample_pages_count;
 206    int i;
 207    GRand *rand;
 208
 209    sample_pages_count = info->sample_pages_count;
 210
 211    /* ramblock size less than one page, return success to skip this ramblock */
 212    if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
 213        return true;
 214    }
 215
 216    info->hash_result = g_try_malloc0_n(sample_pages_count,
 217                                        sizeof(uint32_t));
 218    if (!info->hash_result) {
 219        return false;
 220    }
 221
 222    info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
 223                                            sizeof(uint64_t));
 224    if (!info->sample_page_vfn) {
 225        g_free(info->hash_result);
 226        return false;
 227    }
 228
 229    rand  = g_rand_new();
 230    for (i = 0; i < sample_pages_count; i++) {
 231        info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
 232                                                    info->ramblock_pages - 1);
 233        info->hash_result[i] = get_ramblock_vfn_hash(info,
 234                                                     info->sample_page_vfn[i]);
 235    }
 236    g_rand_free(rand);
 237
 238    return true;
 239}
 240
 241static void get_ramblock_dirty_info(RAMBlock *block,
 242                                    struct RamblockDirtyInfo *info,
 243                                    struct DirtyRateConfig *config)
 244{
 245    uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
 246
 247    /* Right shift 30 bits to calc ramblock size in GB */
 248    info->sample_pages_count = (qemu_ram_get_used_length(block) *
 249                                sample_pages_per_gigabytes) >> 30;
 250    /* Right shift TARGET_PAGE_BITS to calc page count */
 251    info->ramblock_pages = qemu_ram_get_used_length(block) >>
 252                           TARGET_PAGE_BITS;
 253    info->ramblock_addr = qemu_ram_get_host_addr(block);
 254    strcpy(info->idstr, qemu_ram_get_idstr(block));
 255}
 256
 257static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
 258{
 259    int i;
 260
 261    if (!infos) {
 262        return;
 263    }
 264
 265    for (i = 0; i < count; i++) {
 266        g_free(infos[i].sample_page_vfn);
 267        g_free(infos[i].hash_result);
 268    }
 269    g_free(infos);
 270}
 271
 272static bool skip_sample_ramblock(RAMBlock *block)
 273{
 274    /*
 275     * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
 276     */
 277    if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
 278        trace_skip_sample_ramblock(block->idstr,
 279                                   qemu_ram_get_used_length(block));
 280        return true;
 281    }
 282
 283    return false;
 284}
 285
 286static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
 287                                      struct DirtyRateConfig config,
 288                                      int *block_count)
 289{
 290    struct RamblockDirtyInfo *info = NULL;
 291    struct RamblockDirtyInfo *dinfo = NULL;
 292    RAMBlock *block = NULL;
 293    int total_count = 0;
 294    int index = 0;
 295    bool ret = false;
 296
 297    RAMBLOCK_FOREACH_MIGRATABLE(block) {
 298        if (skip_sample_ramblock(block)) {
 299            continue;
 300        }
 301        total_count++;
 302    }
 303
 304    dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
 305    if (dinfo == NULL) {
 306        goto out;
 307    }
 308
 309    RAMBLOCK_FOREACH_MIGRATABLE(block) {
 310        if (skip_sample_ramblock(block)) {
 311            continue;
 312        }
 313        if (index >= total_count) {
 314            break;
 315        }
 316        info = &dinfo[index];
 317        get_ramblock_dirty_info(block, info, &config);
 318        if (!save_ramblock_hash(info)) {
 319            goto out;
 320        }
 321        index++;
 322    }
 323    ret = true;
 324
 325out:
 326    *block_count = index;
 327    *block_dinfo = dinfo;
 328    return ret;
 329}
 330
 331static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
 332{
 333    uint32_t crc;
 334    int i;
 335
 336    for (i = 0; i < info->sample_pages_count; i++) {
 337        crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
 338        if (crc != info->hash_result[i]) {
 339            trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]);
 340            info->sample_dirty_count++;
 341        }
 342    }
 343}
 344
 345static struct RamblockDirtyInfo *
 346find_block_matched(RAMBlock *block, int count,
 347                  struct RamblockDirtyInfo *infos)
 348{
 349    int i;
 350    struct RamblockDirtyInfo *matched;
 351
 352    for (i = 0; i < count; i++) {
 353        if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
 354            break;
 355        }
 356    }
 357
 358    if (i == count) {
 359        return NULL;
 360    }
 361
 362    if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
 363        infos[i].ramblock_pages !=
 364            (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
 365        trace_find_page_matched(block->idstr);
 366        return NULL;
 367    }
 368
 369    matched = &infos[i];
 370
 371    return matched;
 372}
 373
 374static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
 375                                  int block_count)
 376{
 377    struct RamblockDirtyInfo *block_dinfo = NULL;
 378    RAMBlock *block = NULL;
 379
 380    RAMBLOCK_FOREACH_MIGRATABLE(block) {
 381        if (skip_sample_ramblock(block)) {
 382            continue;
 383        }
 384        block_dinfo = find_block_matched(block, block_count, info);
 385        if (block_dinfo == NULL) {
 386            continue;
 387        }
 388        calc_page_dirty_rate(block_dinfo);
 389        update_dirtyrate_stat(block_dinfo);
 390    }
 391
 392    if (DirtyStat.page_sampling.total_sample_count == 0) {
 393        return false;
 394    }
 395
 396    return true;
 397}
 398
 399static inline void record_dirtypages(DirtyPageRecord *dirty_pages,
 400                                     CPUState *cpu, bool start)
 401{
 402    if (start) {
 403        dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages;
 404    } else {
 405        dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages;
 406    }
 407}
 408
 409static void dirtyrate_global_dirty_log_start(void)
 410{
 411    qemu_mutex_lock_iothread();
 412    memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
 413    qemu_mutex_unlock_iothread();
 414}
 415
 416static void dirtyrate_global_dirty_log_stop(void)
 417{
 418    qemu_mutex_lock_iothread();
 419    memory_global_dirty_log_sync();
 420    memory_global_dirty_log_stop(GLOBAL_DIRTY_DIRTY_RATE);
 421    qemu_mutex_unlock_iothread();
 422}
 423
 424static int64_t do_calculate_dirtyrate_vcpu(DirtyPageRecord dirty_pages)
 425{
 426    uint64_t memory_size_MB;
 427    int64_t time_s;
 428    uint64_t increased_dirty_pages =
 429        dirty_pages.end_pages - dirty_pages.start_pages;
 430
 431    memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20;
 432    time_s = DirtyStat.calc_time;
 433
 434    return memory_size_MB / time_s;
 435}
 436
 437static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages,
 438                                            bool start)
 439{
 440    if (start) {
 441        dirty_pages->start_pages = total_dirty_pages;
 442    } else {
 443        dirty_pages->end_pages = total_dirty_pages;
 444    }
 445}
 446
 447static void do_calculate_dirtyrate_bitmap(DirtyPageRecord dirty_pages)
 448{
 449    DirtyStat.dirty_rate = do_calculate_dirtyrate_vcpu(dirty_pages);
 450}
 451
 452static inline void dirtyrate_manual_reset_protect(void)
 453{
 454    RAMBlock *block = NULL;
 455
 456    WITH_RCU_READ_LOCK_GUARD() {
 457        RAMBLOCK_FOREACH_MIGRATABLE(block) {
 458            memory_region_clear_dirty_bitmap(block->mr, 0,
 459                                             block->used_length);
 460        }
 461    }
 462}
 463
 464static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
 465{
 466    int64_t msec = 0;
 467    int64_t start_time;
 468    DirtyPageRecord dirty_pages;
 469
 470    qemu_mutex_lock_iothread();
 471    memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE);
 472
 473    /*
 474     * 1'round of log sync may return all 1 bits with
 475     * KVM_DIRTY_LOG_INITIALLY_SET enable
 476     * skip it unconditionally and start dirty tracking
 477     * from 2'round of log sync
 478     */
 479    memory_global_dirty_log_sync();
 480
 481    /*
 482     * reset page protect manually and unconditionally.
 483     * this make sure kvm dirty log be cleared if
 484     * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled.
 485     */
 486    dirtyrate_manual_reset_protect();
 487    qemu_mutex_unlock_iothread();
 488
 489    record_dirtypages_bitmap(&dirty_pages, true);
 490
 491    start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 492    DirtyStat.start_time = start_time / 1000;
 493
 494    msec = config.sample_period_seconds * 1000;
 495    msec = set_sample_page_period(msec, start_time);
 496    DirtyStat.calc_time = msec / 1000;
 497
 498    /*
 499     * dirtyrate_global_dirty_log_stop do two things.
 500     * 1. fetch dirty bitmap from kvm
 501     * 2. stop dirty tracking
 502     */
 503    dirtyrate_global_dirty_log_stop();
 504
 505    record_dirtypages_bitmap(&dirty_pages, false);
 506
 507    do_calculate_dirtyrate_bitmap(dirty_pages);
 508}
 509
 510static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config)
 511{
 512    CPUState *cpu;
 513    int64_t msec = 0;
 514    int64_t start_time;
 515    uint64_t dirtyrate = 0;
 516    uint64_t dirtyrate_sum = 0;
 517    DirtyPageRecord *dirty_pages;
 518    int nvcpu = 0;
 519    int i = 0;
 520
 521    CPU_FOREACH(cpu) {
 522        nvcpu++;
 523    }
 524
 525    dirty_pages = malloc(sizeof(*dirty_pages) * nvcpu);
 526
 527    DirtyStat.dirty_ring.nvcpu = nvcpu;
 528    DirtyStat.dirty_ring.rates = malloc(sizeof(DirtyRateVcpu) * nvcpu);
 529
 530    dirtyrate_global_dirty_log_start();
 531
 532    CPU_FOREACH(cpu) {
 533        record_dirtypages(dirty_pages, cpu, true);
 534    }
 535
 536    start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 537    DirtyStat.start_time = start_time / 1000;
 538
 539    msec = config.sample_period_seconds * 1000;
 540    msec = set_sample_page_period(msec, start_time);
 541    DirtyStat.calc_time = msec / 1000;
 542
 543    dirtyrate_global_dirty_log_stop();
 544
 545    CPU_FOREACH(cpu) {
 546        record_dirtypages(dirty_pages, cpu, false);
 547    }
 548
 549    for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) {
 550        dirtyrate = do_calculate_dirtyrate_vcpu(dirty_pages[i]);
 551        trace_dirtyrate_do_calculate_vcpu(i, dirtyrate);
 552
 553        DirtyStat.dirty_ring.rates[i].id = i;
 554        DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate;
 555        dirtyrate_sum += dirtyrate;
 556    }
 557
 558    DirtyStat.dirty_rate = dirtyrate_sum;
 559    free(dirty_pages);
 560}
 561
 562static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config)
 563{
 564    struct RamblockDirtyInfo *block_dinfo = NULL;
 565    int block_count = 0;
 566    int64_t msec = 0;
 567    int64_t initial_time;
 568
 569    rcu_read_lock();
 570    initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 571    if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
 572        goto out;
 573    }
 574    rcu_read_unlock();
 575
 576    msec = config.sample_period_seconds * 1000;
 577    msec = set_sample_page_period(msec, initial_time);
 578    DirtyStat.start_time = initial_time / 1000;
 579    DirtyStat.calc_time = msec / 1000;
 580
 581    rcu_read_lock();
 582    if (!compare_page_hash_info(block_dinfo, block_count)) {
 583        goto out;
 584    }
 585
 586    update_dirtyrate(msec);
 587
 588out:
 589    rcu_read_unlock();
 590    free_ramblock_dirty_info(block_dinfo, block_count);
 591}
 592
 593static void calculate_dirtyrate(struct DirtyRateConfig config)
 594{
 595    if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) {
 596        calculate_dirtyrate_dirty_bitmap(config);
 597    } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
 598        calculate_dirtyrate_dirty_ring(config);
 599    } else {
 600        calculate_dirtyrate_sample_vm(config);
 601    }
 602
 603    trace_dirtyrate_calculate(DirtyStat.dirty_rate);
 604}
 605
 606void *get_dirtyrate_thread(void *arg)
 607{
 608    struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
 609    int ret;
 610    rcu_register_thread();
 611
 612    ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
 613                              DIRTY_RATE_STATUS_MEASURING);
 614    if (ret == -1) {
 615        error_report("change dirtyrate state failed.");
 616        return NULL;
 617    }
 618
 619    calculate_dirtyrate(config);
 620
 621    ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
 622                              DIRTY_RATE_STATUS_MEASURED);
 623    if (ret == -1) {
 624        error_report("change dirtyrate state failed.");
 625    }
 626
 627    rcu_unregister_thread();
 628    return NULL;
 629}
 630
 631void qmp_calc_dirty_rate(int64_t calc_time,
 632                         bool has_sample_pages,
 633                         int64_t sample_pages,
 634                         bool has_mode,
 635                         DirtyRateMeasureMode mode,
 636                         Error **errp)
 637{
 638    static struct DirtyRateConfig config;
 639    QemuThread thread;
 640    int ret;
 641    int64_t start_time;
 642
 643    /*
 644     * If the dirty rate is already being measured, don't attempt to start.
 645     */
 646    if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
 647        error_setg(errp, "the dirty rate is already being measured.");
 648        return;
 649    }
 650
 651    if (!is_sample_period_valid(calc_time)) {
 652        error_setg(errp, "calc-time is out of range[%d, %d].",
 653                         MIN_FETCH_DIRTYRATE_TIME_SEC,
 654                         MAX_FETCH_DIRTYRATE_TIME_SEC);
 655        return;
 656    }
 657
 658    if (!has_mode) {
 659        mode =  DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
 660    }
 661
 662    if (has_sample_pages && mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) {
 663        error_setg(errp, "either sample-pages or dirty-ring can be specified.");
 664        return;
 665    }
 666
 667    if (has_sample_pages) {
 668        if (!is_sample_pages_valid(sample_pages)) {
 669            error_setg(errp, "sample-pages is out of range[%d, %d].",
 670                            MIN_SAMPLE_PAGE_COUNT,
 671                            MAX_SAMPLE_PAGE_COUNT);
 672            return;
 673        }
 674    } else {
 675        sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
 676    }
 677
 678    /*
 679     * dirty ring mode only works when kvm dirty ring is enabled.
 680     * on the contrary, dirty bitmap mode is not.
 681     */
 682    if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) &&
 683        !kvm_dirty_ring_enabled()) ||
 684        ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) &&
 685         kvm_dirty_ring_enabled())) {
 686        error_setg(errp, "mode %s is not enabled, use other method instead.",
 687                         DirtyRateMeasureMode_str(mode));
 688         return;
 689    }
 690
 691    /*
 692     * Init calculation state as unstarted.
 693     */
 694    ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
 695                              DIRTY_RATE_STATUS_UNSTARTED);
 696    if (ret == -1) {
 697        error_setg(errp, "init dirty rate calculation state failed.");
 698        return;
 699    }
 700
 701    config.sample_period_seconds = calc_time;
 702    config.sample_pages_per_gigabytes = sample_pages;
 703    config.mode = mode;
 704
 705    cleanup_dirtyrate_stat(config);
 706
 707    /*
 708     * update dirty rate mode so that we can figure out what mode has
 709     * been used in last calculation
 710     **/
 711    dirtyrate_mode = mode;
 712
 713    start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
 714    init_dirtyrate_stat(start_time, config);
 715
 716    qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
 717                       (void *)&config, QEMU_THREAD_DETACHED);
 718}
 719
 720struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
 721{
 722    return query_dirty_rate_info();
 723}
 724
 725void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict)
 726{
 727    DirtyRateInfo *info = query_dirty_rate_info();
 728
 729    monitor_printf(mon, "Status: %s\n",
 730                   DirtyRateStatus_str(info->status));
 731    monitor_printf(mon, "Start Time: %"PRIi64" (ms)\n",
 732                   info->start_time);
 733    monitor_printf(mon, "Sample Pages: %"PRIu64" (per GB)\n",
 734                   info->sample_pages);
 735    monitor_printf(mon, "Period: %"PRIi64" (sec)\n",
 736                   info->calc_time);
 737    monitor_printf(mon, "Mode: %s\n",
 738                   DirtyRateMeasureMode_str(info->mode));
 739    monitor_printf(mon, "Dirty rate: ");
 740    if (info->has_dirty_rate) {
 741        monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate);
 742        if (info->has_vcpu_dirty_rate) {
 743            DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate;
 744            for (rate = head; rate != NULL; rate = rate->next) {
 745                monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64
 746                               " (MB/s)\n", rate->value->id,
 747                               rate->value->dirty_rate);
 748            }
 749        }
 750    } else {
 751        monitor_printf(mon, "(not ready)\n");
 752    }
 753
 754    qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate);
 755    g_free(info);
 756}
 757
 758void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict)
 759{
 760    int64_t sec = qdict_get_try_int(qdict, "second", 0);
 761    int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1);
 762    bool has_sample_pages = (sample_pages != -1);
 763    bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false);
 764    bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false);
 765    DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING;
 766    Error *err = NULL;
 767
 768    if (!sec) {
 769        monitor_printf(mon, "Incorrect period length specified!\n");
 770        return;
 771    }
 772
 773    if (dirty_ring && dirty_bitmap) {
 774        monitor_printf(mon, "Either dirty ring or dirty bitmap "
 775                       "can be specified!\n");
 776        return;
 777    }
 778
 779    if (dirty_bitmap) {
 780        mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP;
 781    } else if (dirty_ring) {
 782        mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING;
 783    }
 784
 785    qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, true,
 786                        mode, &err);
 787    if (err) {
 788        hmp_handle_error(mon, err);
 789        return;
 790    }
 791
 792    monitor_printf(mon, "Starting dirty rate measurement with period %"PRIi64
 793                   " seconds\n", sec);
 794    monitor_printf(mon, "[Please use 'info dirty_rate' to check results]\n");
 795}
 796