qemu/softmmu/cpus.c
<<
>>
Prefs
   1/*
   2 * QEMU System Emulator
   3 *
   4 * Copyright (c) 2003-2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "monitor/monitor.h"
  27#include "qemu/coroutine-tls.h"
  28#include "qapi/error.h"
  29#include "qapi/qapi-commands-machine.h"
  30#include "qapi/qapi-commands-misc.h"
  31#include "qapi/qapi-events-run-state.h"
  32#include "qapi/qmp/qerror.h"
  33#include "exec/gdbstub.h"
  34#include "sysemu/hw_accel.h"
  35#include "exec/cpu-common.h"
  36#include "qemu/thread.h"
  37#include "qemu/plugin.h"
  38#include "sysemu/cpus.h"
  39#include "qemu/guest-random.h"
  40#include "hw/nmi.h"
  41#include "sysemu/replay.h"
  42#include "sysemu/runstate.h"
  43#include "sysemu/cpu-timers.h"
  44#include "sysemu/whpx.h"
  45#include "hw/boards.h"
  46#include "hw/hw.h"
  47#include "trace.h"
  48
  49#ifdef CONFIG_LINUX
  50
  51#include <sys/prctl.h>
  52
  53#ifndef PR_MCE_KILL
  54#define PR_MCE_KILL 33
  55#endif
  56
  57#ifndef PR_MCE_KILL_SET
  58#define PR_MCE_KILL_SET 1
  59#endif
  60
  61#ifndef PR_MCE_KILL_EARLY
  62#define PR_MCE_KILL_EARLY 1
  63#endif
  64
  65#endif /* CONFIG_LINUX */
  66
  67static QemuMutex qemu_global_mutex;
  68
  69/*
  70 * The chosen accelerator is supposed to register this.
  71 */
  72static const AccelOpsClass *cpus_accel;
  73
  74bool cpu_is_stopped(CPUState *cpu)
  75{
  76    return cpu->stopped || !runstate_is_running();
  77}
  78
  79bool cpu_work_list_empty(CPUState *cpu)
  80{
  81    return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list);
  82}
  83
  84bool cpu_thread_is_idle(CPUState *cpu)
  85{
  86    if (cpu->stop || !cpu_work_list_empty(cpu)) {
  87        return false;
  88    }
  89    if (cpu_is_stopped(cpu)) {
  90        return true;
  91    }
  92    if (!cpu->halted || cpu_has_work(cpu)) {
  93        return false;
  94    }
  95    if (cpus_accel->cpu_thread_is_idle) {
  96        return cpus_accel->cpu_thread_is_idle(cpu);
  97    }
  98    return true;
  99}
 100
 101bool all_cpu_threads_idle(void)
 102{
 103    CPUState *cpu;
 104
 105    CPU_FOREACH(cpu) {
 106        if (!cpu_thread_is_idle(cpu)) {
 107            return false;
 108        }
 109    }
 110    return true;
 111}
 112
 113/***********************************************************/
 114void hw_error(const char *fmt, ...)
 115{
 116    va_list ap;
 117    CPUState *cpu;
 118
 119    va_start(ap, fmt);
 120    fprintf(stderr, "qemu: hardware error: ");
 121    vfprintf(stderr, fmt, ap);
 122    fprintf(stderr, "\n");
 123    CPU_FOREACH(cpu) {
 124        fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
 125        cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
 126    }
 127    va_end(ap);
 128    abort();
 129}
 130
 131void cpu_synchronize_all_states(void)
 132{
 133    CPUState *cpu;
 134
 135    CPU_FOREACH(cpu) {
 136        cpu_synchronize_state(cpu);
 137    }
 138}
 139
 140void cpu_synchronize_all_post_reset(void)
 141{
 142    CPUState *cpu;
 143
 144    CPU_FOREACH(cpu) {
 145        cpu_synchronize_post_reset(cpu);
 146    }
 147}
 148
 149void cpu_synchronize_all_post_init(void)
 150{
 151    CPUState *cpu;
 152
 153    CPU_FOREACH(cpu) {
 154        cpu_synchronize_post_init(cpu);
 155    }
 156}
 157
 158void cpu_synchronize_all_pre_loadvm(void)
 159{
 160    CPUState *cpu;
 161
 162    CPU_FOREACH(cpu) {
 163        cpu_synchronize_pre_loadvm(cpu);
 164    }
 165}
 166
 167void cpu_synchronize_state(CPUState *cpu)
 168{
 169    if (cpus_accel->synchronize_state) {
 170        cpus_accel->synchronize_state(cpu);
 171    }
 172}
 173
 174void cpu_synchronize_post_reset(CPUState *cpu)
 175{
 176    if (cpus_accel->synchronize_post_reset) {
 177        cpus_accel->synchronize_post_reset(cpu);
 178    }
 179}
 180
 181void cpu_synchronize_post_init(CPUState *cpu)
 182{
 183    if (cpus_accel->synchronize_post_init) {
 184        cpus_accel->synchronize_post_init(cpu);
 185    }
 186}
 187
 188void cpu_synchronize_pre_loadvm(CPUState *cpu)
 189{
 190    if (cpus_accel->synchronize_pre_loadvm) {
 191        cpus_accel->synchronize_pre_loadvm(cpu);
 192    }
 193}
 194
 195bool cpus_are_resettable(void)
 196{
 197    if (cpus_accel->cpus_are_resettable) {
 198        return cpus_accel->cpus_are_resettable();
 199    }
 200    return true;
 201}
 202
 203int64_t cpus_get_virtual_clock(void)
 204{
 205    /*
 206     * XXX
 207     *
 208     * need to check that cpus_accel is not NULL, because qcow2 calls
 209     * qemu_get_clock_ns(CLOCK_VIRTUAL) without any accel initialized and
 210     * with ticks disabled in some io-tests:
 211     * 030 040 041 060 099 120 127 140 156 161 172 181 191 192 195 203 229 249 256 267
 212     *
 213     * is this expected?
 214     *
 215     * XXX
 216     */
 217    if (cpus_accel && cpus_accel->get_virtual_clock) {
 218        return cpus_accel->get_virtual_clock();
 219    }
 220    return cpu_get_clock();
 221}
 222
 223/*
 224 * return the time elapsed in VM between vm_start and vm_stop.  Unless
 225 * icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle
 226 * counter.
 227 */
 228int64_t cpus_get_elapsed_ticks(void)
 229{
 230    if (cpus_accel->get_elapsed_ticks) {
 231        return cpus_accel->get_elapsed_ticks();
 232    }
 233    return cpu_get_ticks();
 234}
 235
 236static void generic_handle_interrupt(CPUState *cpu, int mask)
 237{
 238    cpu->interrupt_request |= mask;
 239
 240    if (!qemu_cpu_is_self(cpu)) {
 241        qemu_cpu_kick(cpu);
 242    }
 243}
 244
 245void cpu_interrupt(CPUState *cpu, int mask)
 246{
 247    if (cpus_accel->handle_interrupt) {
 248        cpus_accel->handle_interrupt(cpu, mask);
 249    } else {
 250        generic_handle_interrupt(cpu, mask);
 251    }
 252}
 253
 254static int do_vm_stop(RunState state, bool send_stop)
 255{
 256    int ret = 0;
 257
 258    if (runstate_is_running()) {
 259        runstate_set(state);
 260        cpu_disable_ticks();
 261        pause_all_vcpus();
 262        vm_state_notify(0, state);
 263        if (send_stop) {
 264            qapi_event_send_stop();
 265        }
 266    }
 267
 268    bdrv_drain_all();
 269    ret = bdrv_flush_all();
 270    trace_vm_stop_flush_all(ret);
 271
 272    return ret;
 273}
 274
 275/* Special vm_stop() variant for terminating the process.  Historically clients
 276 * did not expect a QMP STOP event and so we need to retain compatibility.
 277 */
 278int vm_shutdown(void)
 279{
 280    return do_vm_stop(RUN_STATE_SHUTDOWN, false);
 281}
 282
 283bool cpu_can_run(CPUState *cpu)
 284{
 285    if (cpu->stop) {
 286        return false;
 287    }
 288    if (cpu_is_stopped(cpu)) {
 289        return false;
 290    }
 291    return true;
 292}
 293
 294void cpu_handle_guest_debug(CPUState *cpu)
 295{
 296    if (replay_running_debug()) {
 297        if (!cpu->singlestep_enabled) {
 298            /*
 299             * Report about the breakpoint and
 300             * make a single step to skip it
 301             */
 302            replay_breakpoint();
 303            cpu_single_step(cpu, SSTEP_ENABLE);
 304        } else {
 305            cpu_single_step(cpu, 0);
 306        }
 307    } else {
 308        gdb_set_stop_cpu(cpu);
 309        qemu_system_debug_request();
 310        cpu->stopped = true;
 311    }
 312}
 313
 314#ifdef CONFIG_LINUX
 315static void sigbus_reraise(void)
 316{
 317    sigset_t set;
 318    struct sigaction action;
 319
 320    memset(&action, 0, sizeof(action));
 321    action.sa_handler = SIG_DFL;
 322    if (!sigaction(SIGBUS, &action, NULL)) {
 323        raise(SIGBUS);
 324        sigemptyset(&set);
 325        sigaddset(&set, SIGBUS);
 326        pthread_sigmask(SIG_UNBLOCK, &set, NULL);
 327    }
 328    perror("Failed to re-raise SIGBUS!");
 329    abort();
 330}
 331
 332static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
 333{
 334    if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
 335        sigbus_reraise();
 336    }
 337
 338    if (current_cpu) {
 339        /* Called asynchronously in VCPU thread.  */
 340        if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
 341            sigbus_reraise();
 342        }
 343    } else {
 344        /* Called synchronously (via signalfd) in main thread.  */
 345        if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
 346            sigbus_reraise();
 347        }
 348    }
 349}
 350
 351static void qemu_init_sigbus(void)
 352{
 353    struct sigaction action;
 354
 355    /*
 356     * ALERT: when modifying this, take care that SIGBUS forwarding in
 357     * os_mem_prealloc() will continue working as expected.
 358     */
 359    memset(&action, 0, sizeof(action));
 360    action.sa_flags = SA_SIGINFO;
 361    action.sa_sigaction = sigbus_handler;
 362    sigaction(SIGBUS, &action, NULL);
 363
 364    prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
 365}
 366#else /* !CONFIG_LINUX */
 367static void qemu_init_sigbus(void)
 368{
 369}
 370#endif /* !CONFIG_LINUX */
 371
 372static QemuThread io_thread;
 373
 374/* cpu creation */
 375static QemuCond qemu_cpu_cond;
 376/* system init */
 377static QemuCond qemu_pause_cond;
 378
 379void qemu_init_cpu_loop(void)
 380{
 381    qemu_init_sigbus();
 382    qemu_cond_init(&qemu_cpu_cond);
 383    qemu_cond_init(&qemu_pause_cond);
 384    qemu_mutex_init(&qemu_global_mutex);
 385
 386    qemu_thread_get_self(&io_thread);
 387}
 388
 389void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
 390{
 391    do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
 392}
 393
 394static void qemu_cpu_stop(CPUState *cpu, bool exit)
 395{
 396    g_assert(qemu_cpu_is_self(cpu));
 397    cpu->stop = false;
 398    cpu->stopped = true;
 399    if (exit) {
 400        cpu_exit(cpu);
 401    }
 402    qemu_cond_broadcast(&qemu_pause_cond);
 403}
 404
 405void qemu_wait_io_event_common(CPUState *cpu)
 406{
 407    qatomic_mb_set(&cpu->thread_kicked, false);
 408    if (cpu->stop) {
 409        qemu_cpu_stop(cpu, false);
 410    }
 411    process_queued_cpu_work(cpu);
 412}
 413
 414void qemu_wait_io_event(CPUState *cpu)
 415{
 416    bool slept = false;
 417
 418    while (cpu_thread_is_idle(cpu)) {
 419        if (!slept) {
 420            slept = true;
 421            qemu_plugin_vcpu_idle_cb(cpu);
 422        }
 423        qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
 424    }
 425    if (slept) {
 426        qemu_plugin_vcpu_resume_cb(cpu);
 427    }
 428
 429#ifdef _WIN32
 430    /* Eat dummy APC queued by cpus_kick_thread. */
 431    if (hax_enabled()) {
 432        SleepEx(0, TRUE);
 433    }
 434#endif
 435    qemu_wait_io_event_common(cpu);
 436}
 437
 438void cpus_kick_thread(CPUState *cpu)
 439{
 440#ifndef _WIN32
 441    int err;
 442
 443    if (cpu->thread_kicked) {
 444        return;
 445    }
 446    cpu->thread_kicked = true;
 447    err = pthread_kill(cpu->thread->thread, SIG_IPI);
 448    if (err && err != ESRCH) {
 449        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
 450        exit(1);
 451    }
 452#endif
 453}
 454
 455void qemu_cpu_kick(CPUState *cpu)
 456{
 457    qemu_cond_broadcast(cpu->halt_cond);
 458    if (cpus_accel->kick_vcpu_thread) {
 459        cpus_accel->kick_vcpu_thread(cpu);
 460    } else { /* default */
 461        cpus_kick_thread(cpu);
 462    }
 463}
 464
 465void qemu_cpu_kick_self(void)
 466{
 467    assert(current_cpu);
 468    cpus_kick_thread(current_cpu);
 469}
 470
 471bool qemu_cpu_is_self(CPUState *cpu)
 472{
 473    return qemu_thread_is_self(cpu->thread);
 474}
 475
 476bool qemu_in_vcpu_thread(void)
 477{
 478    return current_cpu && qemu_cpu_is_self(current_cpu);
 479}
 480
 481QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked)
 482
 483bool qemu_mutex_iothread_locked(void)
 484{
 485    return get_iothread_locked();
 486}
 487
 488bool qemu_in_main_thread(void)
 489{
 490    return qemu_mutex_iothread_locked();
 491}
 492
 493/*
 494 * The BQL is taken from so many places that it is worth profiling the
 495 * callers directly, instead of funneling them all through a single function.
 496 */
 497void qemu_mutex_lock_iothread_impl(const char *file, int line)
 498{
 499    QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
 500
 501    g_assert(!qemu_mutex_iothread_locked());
 502    bql_lock(&qemu_global_mutex, file, line);
 503    set_iothread_locked(true);
 504}
 505
 506void qemu_mutex_unlock_iothread(void)
 507{
 508    g_assert(qemu_mutex_iothread_locked());
 509    set_iothread_locked(false);
 510    qemu_mutex_unlock(&qemu_global_mutex);
 511}
 512
 513void qemu_cond_wait_iothread(QemuCond *cond)
 514{
 515    qemu_cond_wait(cond, &qemu_global_mutex);
 516}
 517
 518void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
 519{
 520    qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
 521}
 522
 523/* signal CPU creation */
 524void cpu_thread_signal_created(CPUState *cpu)
 525{
 526    cpu->created = true;
 527    qemu_cond_signal(&qemu_cpu_cond);
 528}
 529
 530/* signal CPU destruction */
 531void cpu_thread_signal_destroyed(CPUState *cpu)
 532{
 533    cpu->created = false;
 534    qemu_cond_signal(&qemu_cpu_cond);
 535}
 536
 537
 538static bool all_vcpus_paused(void)
 539{
 540    CPUState *cpu;
 541
 542    CPU_FOREACH(cpu) {
 543        if (!cpu->stopped) {
 544            return false;
 545        }
 546    }
 547
 548    return true;
 549}
 550
 551void pause_all_vcpus(void)
 552{
 553    CPUState *cpu;
 554
 555    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
 556    CPU_FOREACH(cpu) {
 557        if (qemu_cpu_is_self(cpu)) {
 558            qemu_cpu_stop(cpu, true);
 559        } else {
 560            cpu->stop = true;
 561            qemu_cpu_kick(cpu);
 562        }
 563    }
 564
 565    /* We need to drop the replay_lock so any vCPU threads woken up
 566     * can finish their replay tasks
 567     */
 568    replay_mutex_unlock();
 569
 570    while (!all_vcpus_paused()) {
 571        qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
 572        CPU_FOREACH(cpu) {
 573            qemu_cpu_kick(cpu);
 574        }
 575    }
 576
 577    qemu_mutex_unlock_iothread();
 578    replay_mutex_lock();
 579    qemu_mutex_lock_iothread();
 580}
 581
 582void cpu_resume(CPUState *cpu)
 583{
 584    cpu->stop = false;
 585    cpu->stopped = false;
 586    qemu_cpu_kick(cpu);
 587}
 588
 589void resume_all_vcpus(void)
 590{
 591    CPUState *cpu;
 592
 593    if (!runstate_is_running()) {
 594        return;
 595    }
 596
 597    qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
 598    CPU_FOREACH(cpu) {
 599        cpu_resume(cpu);
 600    }
 601}
 602
 603void cpu_remove_sync(CPUState *cpu)
 604{
 605    cpu->stop = true;
 606    cpu->unplug = true;
 607    qemu_cpu_kick(cpu);
 608    qemu_mutex_unlock_iothread();
 609    qemu_thread_join(cpu->thread);
 610    qemu_mutex_lock_iothread();
 611}
 612
 613void cpus_register_accel(const AccelOpsClass *ops)
 614{
 615    assert(ops != NULL);
 616    assert(ops->create_vcpu_thread != NULL); /* mandatory */
 617    cpus_accel = ops;
 618}
 619
 620void qemu_init_vcpu(CPUState *cpu)
 621{
 622    MachineState *ms = MACHINE(qdev_get_machine());
 623
 624    cpu->nr_cores = ms->smp.cores;
 625    cpu->nr_threads =  ms->smp.threads;
 626    cpu->stopped = true;
 627    cpu->random_seed = qemu_guest_random_seed_thread_part1();
 628
 629    if (!cpu->as) {
 630        /* If the target cpu hasn't set up any address spaces itself,
 631         * give it the default one.
 632         */
 633        cpu->num_ases = 1;
 634        cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
 635    }
 636
 637    /* accelerators all implement the AccelOpsClass */
 638    g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
 639    cpus_accel->create_vcpu_thread(cpu);
 640
 641    while (!cpu->created) {
 642        qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
 643    }
 644}
 645
 646void cpu_stop_current(void)
 647{
 648    if (current_cpu) {
 649        current_cpu->stop = true;
 650        cpu_exit(current_cpu);
 651    }
 652}
 653
 654int vm_stop(RunState state)
 655{
 656    if (qemu_in_vcpu_thread()) {
 657        qemu_system_vmstop_request_prepare();
 658        qemu_system_vmstop_request(state);
 659        /*
 660         * FIXME: should not return to device code in case
 661         * vm_stop() has been requested.
 662         */
 663        cpu_stop_current();
 664        return 0;
 665    }
 666
 667    return do_vm_stop(state, true);
 668}
 669
 670/**
 671 * Prepare for (re)starting the VM.
 672 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
 673 * running or in case of an error condition), 0 otherwise.
 674 */
 675int vm_prepare_start(bool step_pending)
 676{
 677    RunState requested;
 678
 679    qemu_vmstop_requested(&requested);
 680    if (runstate_is_running() && requested == RUN_STATE__MAX) {
 681        return -1;
 682    }
 683
 684    /* Ensure that a STOP/RESUME pair of events is emitted if a
 685     * vmstop request was pending.  The BLOCK_IO_ERROR event, for
 686     * example, according to documentation is always followed by
 687     * the STOP event.
 688     */
 689    if (runstate_is_running()) {
 690        qapi_event_send_stop();
 691        qapi_event_send_resume();
 692        return -1;
 693    }
 694
 695    /*
 696     * WHPX accelerator needs to know whether we are going to step
 697     * any CPUs, before starting the first one.
 698     */
 699    if (cpus_accel->synchronize_pre_resume) {
 700        cpus_accel->synchronize_pre_resume(step_pending);
 701    }
 702
 703    /* We are sending this now, but the CPUs will be resumed shortly later */
 704    qapi_event_send_resume();
 705
 706    cpu_enable_ticks();
 707    runstate_set(RUN_STATE_RUNNING);
 708    vm_state_notify(1, RUN_STATE_RUNNING);
 709    return 0;
 710}
 711
 712void vm_start(void)
 713{
 714    if (!vm_prepare_start(false)) {
 715        resume_all_vcpus();
 716    }
 717}
 718
 719/* does a state transition even if the VM is already stopped,
 720   current state is forgotten forever */
 721int vm_stop_force_state(RunState state)
 722{
 723    if (runstate_is_running()) {
 724        return vm_stop(state);
 725    } else {
 726        int ret;
 727        runstate_set(state);
 728
 729        bdrv_drain_all();
 730        /* Make sure to return an error if the flush in a previous vm_stop()
 731         * failed. */
 732        ret = bdrv_flush_all();
 733        trace_vm_stop_flush_all(ret);
 734        return ret;
 735    }
 736}
 737
 738void qmp_memsave(int64_t addr, int64_t size, const char *filename,
 739                 bool has_cpu, int64_t cpu_index, Error **errp)
 740{
 741    FILE *f;
 742    uint32_t l;
 743    CPUState *cpu;
 744    uint8_t buf[1024];
 745    int64_t orig_addr = addr, orig_size = size;
 746
 747    if (!has_cpu) {
 748        cpu_index = 0;
 749    }
 750
 751    cpu = qemu_get_cpu(cpu_index);
 752    if (cpu == NULL) {
 753        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
 754                   "a CPU number");
 755        return;
 756    }
 757
 758    f = fopen(filename, "wb");
 759    if (!f) {
 760        error_setg_file_open(errp, errno, filename);
 761        return;
 762    }
 763
 764    while (size != 0) {
 765        l = sizeof(buf);
 766        if (l > size)
 767            l = size;
 768        if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
 769            error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
 770                             " specified", orig_addr, orig_size);
 771            goto exit;
 772        }
 773        if (fwrite(buf, 1, l, f) != l) {
 774            error_setg(errp, QERR_IO_ERROR);
 775            goto exit;
 776        }
 777        addr += l;
 778        size -= l;
 779    }
 780
 781exit:
 782    fclose(f);
 783}
 784
 785void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
 786                  Error **errp)
 787{
 788    FILE *f;
 789    uint32_t l;
 790    uint8_t buf[1024];
 791
 792    f = fopen(filename, "wb");
 793    if (!f) {
 794        error_setg_file_open(errp, errno, filename);
 795        return;
 796    }
 797
 798    while (size != 0) {
 799        l = sizeof(buf);
 800        if (l > size)
 801            l = size;
 802        cpu_physical_memory_read(addr, buf, l);
 803        if (fwrite(buf, 1, l, f) != l) {
 804            error_setg(errp, QERR_IO_ERROR);
 805            goto exit;
 806        }
 807        addr += l;
 808        size -= l;
 809    }
 810
 811exit:
 812    fclose(f);
 813}
 814
 815void qmp_inject_nmi(Error **errp)
 816{
 817    nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp);
 818}
 819
 820