qemu/dump.c
<<
>>
Prefs
   1/*
   2 * QEMU dump
   3 *
   4 * Copyright Fujitsu, Corp. 2011, 2012
   5 *
   6 * Authors:
   7 *     Wen Congyang <wency@cn.fujitsu.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu-common.h"
  15#include "elf.h"
  16#include "cpu.h"
  17#include "exec/cpu-all.h"
  18#include "exec/hwaddr.h"
  19#include "monitor/monitor.h"
  20#include "sysemu/kvm.h"
  21#include "sysemu/dump.h"
  22#include "sysemu/sysemu.h"
  23#include "sysemu/memory_mapping.h"
  24#include "qapi/error.h"
  25#include "qmp-commands.h"
  26
  27static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
  28{
  29    if (endian == ELFDATA2LSB) {
  30        val = cpu_to_le16(val);
  31    } else {
  32        val = cpu_to_be16(val);
  33    }
  34
  35    return val;
  36}
  37
  38static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
  39{
  40    if (endian == ELFDATA2LSB) {
  41        val = cpu_to_le32(val);
  42    } else {
  43        val = cpu_to_be32(val);
  44    }
  45
  46    return val;
  47}
  48
  49static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
  50{
  51    if (endian == ELFDATA2LSB) {
  52        val = cpu_to_le64(val);
  53    } else {
  54        val = cpu_to_be64(val);
  55    }
  56
  57    return val;
  58}
  59
  60typedef struct DumpState {
  61    ArchDumpInfo dump_info;
  62    MemoryMappingList list;
  63    uint16_t phdr_num;
  64    uint32_t sh_info;
  65    bool have_section;
  66    bool resume;
  67    size_t note_size;
  68    hwaddr memory_offset;
  69    int fd;
  70
  71    RAMBlock *block;
  72    ram_addr_t start;
  73    bool has_filter;
  74    int64_t begin;
  75    int64_t length;
  76    Error **errp;
  77} DumpState;
  78
  79static int dump_cleanup(DumpState *s)
  80{
  81    int ret = 0;
  82
  83    memory_mapping_list_free(&s->list);
  84    if (s->fd != -1) {
  85        close(s->fd);
  86    }
  87    if (s->resume) {
  88        vm_start();
  89    }
  90
  91    return ret;
  92}
  93
  94static void dump_error(DumpState *s, const char *reason)
  95{
  96    dump_cleanup(s);
  97}
  98
  99static int fd_write_vmcore(void *buf, size_t size, void *opaque)
 100{
 101    DumpState *s = opaque;
 102    size_t written_size;
 103
 104    written_size = qemu_write_full(s->fd, buf, size);
 105    if (written_size != size) {
 106        return -1;
 107    }
 108
 109    return 0;
 110}
 111
 112static int write_elf64_header(DumpState *s)
 113{
 114    Elf64_Ehdr elf_header;
 115    int ret;
 116    int endian = s->dump_info.d_endian;
 117
 118    memset(&elf_header, 0, sizeof(Elf64_Ehdr));
 119    memcpy(&elf_header, ELFMAG, SELFMAG);
 120    elf_header.e_ident[EI_CLASS] = ELFCLASS64;
 121    elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
 122    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
 123    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
 124    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
 125                                                   endian);
 126    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
 127    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
 128    elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
 129    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
 130                                                     endian);
 131    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
 132    if (s->have_section) {
 133        uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
 134
 135        elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
 136        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
 137                                                         endian);
 138        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
 139    }
 140
 141    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
 142    if (ret < 0) {
 143        dump_error(s, "dump: failed to write elf header.\n");
 144        return -1;
 145    }
 146
 147    return 0;
 148}
 149
 150static int write_elf32_header(DumpState *s)
 151{
 152    Elf32_Ehdr elf_header;
 153    int ret;
 154    int endian = s->dump_info.d_endian;
 155
 156    memset(&elf_header, 0, sizeof(Elf32_Ehdr));
 157    memcpy(&elf_header, ELFMAG, SELFMAG);
 158    elf_header.e_ident[EI_CLASS] = ELFCLASS32;
 159    elf_header.e_ident[EI_DATA] = endian;
 160    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
 161    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
 162    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
 163                                                   endian);
 164    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
 165    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
 166    elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
 167    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
 168                                                     endian);
 169    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
 170    if (s->have_section) {
 171        uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
 172
 173        elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
 174        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
 175                                                         endian);
 176        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
 177    }
 178
 179    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
 180    if (ret < 0) {
 181        dump_error(s, "dump: failed to write elf header.\n");
 182        return -1;
 183    }
 184
 185    return 0;
 186}
 187
 188static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
 189                            int phdr_index, hwaddr offset)
 190{
 191    Elf64_Phdr phdr;
 192    int ret;
 193    int endian = s->dump_info.d_endian;
 194
 195    memset(&phdr, 0, sizeof(Elf64_Phdr));
 196    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
 197    phdr.p_offset = cpu_convert_to_target64(offset, endian);
 198    phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
 199    if (offset == -1) {
 200        /* When the memory is not stored into vmcore, offset will be -1 */
 201        phdr.p_filesz = 0;
 202    } else {
 203        phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
 204    }
 205    phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
 206    phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
 207
 208    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
 209    if (ret < 0) {
 210        dump_error(s, "dump: failed to write program header table.\n");
 211        return -1;
 212    }
 213
 214    return 0;
 215}
 216
 217static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
 218                            int phdr_index, hwaddr offset)
 219{
 220    Elf32_Phdr phdr;
 221    int ret;
 222    int endian = s->dump_info.d_endian;
 223
 224    memset(&phdr, 0, sizeof(Elf32_Phdr));
 225    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
 226    phdr.p_offset = cpu_convert_to_target32(offset, endian);
 227    phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
 228    if (offset == -1) {
 229        /* When the memory is not stored into vmcore, offset will be -1 */
 230        phdr.p_filesz = 0;
 231    } else {
 232        phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
 233    }
 234    phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
 235    phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
 236
 237    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
 238    if (ret < 0) {
 239        dump_error(s, "dump: failed to write program header table.\n");
 240        return -1;
 241    }
 242
 243    return 0;
 244}
 245
 246static int write_elf64_note(DumpState *s)
 247{
 248    Elf64_Phdr phdr;
 249    int endian = s->dump_info.d_endian;
 250    hwaddr begin = s->memory_offset - s->note_size;
 251    int ret;
 252
 253    memset(&phdr, 0, sizeof(Elf64_Phdr));
 254    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
 255    phdr.p_offset = cpu_convert_to_target64(begin, endian);
 256    phdr.p_paddr = 0;
 257    phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
 258    phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
 259    phdr.p_vaddr = 0;
 260
 261    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
 262    if (ret < 0) {
 263        dump_error(s, "dump: failed to write program header table.\n");
 264        return -1;
 265    }
 266
 267    return 0;
 268}
 269
 270static inline int cpu_index(CPUState *cpu)
 271{
 272    return cpu->cpu_index + 1;
 273}
 274
 275static int write_elf64_notes(DumpState *s)
 276{
 277    CPUArchState *env;
 278    CPUState *cpu;
 279    int ret;
 280    int id;
 281
 282    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 283        cpu = ENV_GET_CPU(env);
 284        id = cpu_index(cpu);
 285        ret = cpu_write_elf64_note(fd_write_vmcore, cpu, id, s);
 286        if (ret < 0) {
 287            dump_error(s, "dump: failed to write elf notes.\n");
 288            return -1;
 289        }
 290    }
 291
 292    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 293        ret = cpu_write_elf64_qemunote(fd_write_vmcore, cpu, s);
 294        if (ret < 0) {
 295            dump_error(s, "dump: failed to write CPU status.\n");
 296            return -1;
 297        }
 298    }
 299
 300    return 0;
 301}
 302
 303static int write_elf32_note(DumpState *s)
 304{
 305    hwaddr begin = s->memory_offset - s->note_size;
 306    Elf32_Phdr phdr;
 307    int endian = s->dump_info.d_endian;
 308    int ret;
 309
 310    memset(&phdr, 0, sizeof(Elf32_Phdr));
 311    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
 312    phdr.p_offset = cpu_convert_to_target32(begin, endian);
 313    phdr.p_paddr = 0;
 314    phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
 315    phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
 316    phdr.p_vaddr = 0;
 317
 318    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
 319    if (ret < 0) {
 320        dump_error(s, "dump: failed to write program header table.\n");
 321        return -1;
 322    }
 323
 324    return 0;
 325}
 326
 327static int write_elf32_notes(DumpState *s)
 328{
 329    CPUArchState *env;
 330    CPUState *cpu;
 331    int ret;
 332    int id;
 333
 334    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 335        cpu = ENV_GET_CPU(env);
 336        id = cpu_index(cpu);
 337        ret = cpu_write_elf32_note(fd_write_vmcore, cpu, id, s);
 338        if (ret < 0) {
 339            dump_error(s, "dump: failed to write elf notes.\n");
 340            return -1;
 341        }
 342    }
 343
 344    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 345        ret = cpu_write_elf32_qemunote(fd_write_vmcore, cpu, s);
 346        if (ret < 0) {
 347            dump_error(s, "dump: failed to write CPU status.\n");
 348            return -1;
 349        }
 350    }
 351
 352    return 0;
 353}
 354
 355static int write_elf_section(DumpState *s, int type)
 356{
 357    Elf32_Shdr shdr32;
 358    Elf64_Shdr shdr64;
 359    int endian = s->dump_info.d_endian;
 360    int shdr_size;
 361    void *shdr;
 362    int ret;
 363
 364    if (type == 0) {
 365        shdr_size = sizeof(Elf32_Shdr);
 366        memset(&shdr32, 0, shdr_size);
 367        shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
 368        shdr = &shdr32;
 369    } else {
 370        shdr_size = sizeof(Elf64_Shdr);
 371        memset(&shdr64, 0, shdr_size);
 372        shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
 373        shdr = &shdr64;
 374    }
 375
 376    ret = fd_write_vmcore(&shdr, shdr_size, s);
 377    if (ret < 0) {
 378        dump_error(s, "dump: failed to write section header table.\n");
 379        return -1;
 380    }
 381
 382    return 0;
 383}
 384
 385static int write_data(DumpState *s, void *buf, int length)
 386{
 387    int ret;
 388
 389    ret = fd_write_vmcore(buf, length, s);
 390    if (ret < 0) {
 391        dump_error(s, "dump: failed to save memory.\n");
 392        return -1;
 393    }
 394
 395    return 0;
 396}
 397
 398/* write the memroy to vmcore. 1 page per I/O. */
 399static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
 400                        int64_t size)
 401{
 402    int64_t i;
 403    int ret;
 404
 405    for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
 406        ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
 407                         TARGET_PAGE_SIZE);
 408        if (ret < 0) {
 409            return ret;
 410        }
 411    }
 412
 413    if ((size % TARGET_PAGE_SIZE) != 0) {
 414        ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
 415                         size % TARGET_PAGE_SIZE);
 416        if (ret < 0) {
 417            return ret;
 418        }
 419    }
 420
 421    return 0;
 422}
 423
 424/* get the memory's offset in the vmcore */
 425static hwaddr get_offset(hwaddr phys_addr,
 426                                     DumpState *s)
 427{
 428    RAMBlock *block;
 429    hwaddr offset = s->memory_offset;
 430    int64_t size_in_block, start;
 431
 432    if (s->has_filter) {
 433        if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
 434            return -1;
 435        }
 436    }
 437
 438    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
 439        if (s->has_filter) {
 440            if (block->offset >= s->begin + s->length ||
 441                block->offset + block->length <= s->begin) {
 442                /* This block is out of the range */
 443                continue;
 444            }
 445
 446            if (s->begin <= block->offset) {
 447                start = block->offset;
 448            } else {
 449                start = s->begin;
 450            }
 451
 452            size_in_block = block->length - (start - block->offset);
 453            if (s->begin + s->length < block->offset + block->length) {
 454                size_in_block -= block->offset + block->length -
 455                                 (s->begin + s->length);
 456            }
 457        } else {
 458            start = block->offset;
 459            size_in_block = block->length;
 460        }
 461
 462        if (phys_addr >= start && phys_addr < start + size_in_block) {
 463            return phys_addr - start + offset;
 464        }
 465
 466        offset += size_in_block;
 467    }
 468
 469    return -1;
 470}
 471
 472static int write_elf_loads(DumpState *s)
 473{
 474    hwaddr offset;
 475    MemoryMapping *memory_mapping;
 476    uint32_t phdr_index = 1;
 477    int ret;
 478    uint32_t max_index;
 479
 480    if (s->have_section) {
 481        max_index = s->sh_info;
 482    } else {
 483        max_index = s->phdr_num;
 484    }
 485
 486    QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
 487        offset = get_offset(memory_mapping->phys_addr, s);
 488        if (s->dump_info.d_class == ELFCLASS64) {
 489            ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
 490        } else {
 491            ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
 492        }
 493
 494        if (ret < 0) {
 495            return -1;
 496        }
 497
 498        if (phdr_index >= max_index) {
 499            break;
 500        }
 501    }
 502
 503    return 0;
 504}
 505
 506/* write elf header, PT_NOTE and elf note to vmcore. */
 507static int dump_begin(DumpState *s)
 508{
 509    int ret;
 510
 511    /*
 512     * the vmcore's format is:
 513     *   --------------
 514     *   |  elf header |
 515     *   --------------
 516     *   |  PT_NOTE    |
 517     *   --------------
 518     *   |  PT_LOAD    |
 519     *   --------------
 520     *   |  ......     |
 521     *   --------------
 522     *   |  PT_LOAD    |
 523     *   --------------
 524     *   |  sec_hdr    |
 525     *   --------------
 526     *   |  elf note   |
 527     *   --------------
 528     *   |  memory     |
 529     *   --------------
 530     *
 531     * we only know where the memory is saved after we write elf note into
 532     * vmcore.
 533     */
 534
 535    /* write elf header to vmcore */
 536    if (s->dump_info.d_class == ELFCLASS64) {
 537        ret = write_elf64_header(s);
 538    } else {
 539        ret = write_elf32_header(s);
 540    }
 541    if (ret < 0) {
 542        return -1;
 543    }
 544
 545    if (s->dump_info.d_class == ELFCLASS64) {
 546        /* write PT_NOTE to vmcore */
 547        if (write_elf64_note(s) < 0) {
 548            return -1;
 549        }
 550
 551        /* write all PT_LOAD to vmcore */
 552        if (write_elf_loads(s) < 0) {
 553            return -1;
 554        }
 555
 556        /* write section to vmcore */
 557        if (s->have_section) {
 558            if (write_elf_section(s, 1) < 0) {
 559                return -1;
 560            }
 561        }
 562
 563        /* write notes to vmcore */
 564        if (write_elf64_notes(s) < 0) {
 565            return -1;
 566        }
 567
 568    } else {
 569        /* write PT_NOTE to vmcore */
 570        if (write_elf32_note(s) < 0) {
 571            return -1;
 572        }
 573
 574        /* write all PT_LOAD to vmcore */
 575        if (write_elf_loads(s) < 0) {
 576            return -1;
 577        }
 578
 579        /* write section to vmcore */
 580        if (s->have_section) {
 581            if (write_elf_section(s, 0) < 0) {
 582                return -1;
 583            }
 584        }
 585
 586        /* write notes to vmcore */
 587        if (write_elf32_notes(s) < 0) {
 588            return -1;
 589        }
 590    }
 591
 592    return 0;
 593}
 594
 595/* write PT_LOAD to vmcore */
 596static int dump_completed(DumpState *s)
 597{
 598    dump_cleanup(s);
 599    return 0;
 600}
 601
 602static int get_next_block(DumpState *s, RAMBlock *block)
 603{
 604    while (1) {
 605        block = QTAILQ_NEXT(block, next);
 606        if (!block) {
 607            /* no more block */
 608            return 1;
 609        }
 610
 611        s->start = 0;
 612        s->block = block;
 613        if (s->has_filter) {
 614            if (block->offset >= s->begin + s->length ||
 615                block->offset + block->length <= s->begin) {
 616                /* This block is out of the range */
 617                continue;
 618            }
 619
 620            if (s->begin > block->offset) {
 621                s->start = s->begin - block->offset;
 622            }
 623        }
 624
 625        return 0;
 626    }
 627}
 628
 629/* write all memory to vmcore */
 630static int dump_iterate(DumpState *s)
 631{
 632    RAMBlock *block;
 633    int64_t size;
 634    int ret;
 635
 636    while (1) {
 637        block = s->block;
 638
 639        size = block->length;
 640        if (s->has_filter) {
 641            size -= s->start;
 642            if (s->begin + s->length < block->offset + block->length) {
 643                size -= block->offset + block->length - (s->begin + s->length);
 644            }
 645        }
 646        ret = write_memory(s, block, s->start, size);
 647        if (ret == -1) {
 648            return ret;
 649        }
 650
 651        ret = get_next_block(s, block);
 652        if (ret == 1) {
 653            dump_completed(s);
 654            return 0;
 655        }
 656    }
 657}
 658
 659static int create_vmcore(DumpState *s)
 660{
 661    int ret;
 662
 663    ret = dump_begin(s);
 664    if (ret < 0) {
 665        return -1;
 666    }
 667
 668    ret = dump_iterate(s);
 669    if (ret < 0) {
 670        return -1;
 671    }
 672
 673    return 0;
 674}
 675
 676static ram_addr_t get_start_block(DumpState *s)
 677{
 678    RAMBlock *block;
 679
 680    if (!s->has_filter) {
 681        s->block = QTAILQ_FIRST(&ram_list.blocks);
 682        return 0;
 683    }
 684
 685    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
 686        if (block->offset >= s->begin + s->length ||
 687            block->offset + block->length <= s->begin) {
 688            /* This block is out of the range */
 689            continue;
 690        }
 691
 692        s->block = block;
 693        if (s->begin > block->offset) {
 694            s->start = s->begin - block->offset;
 695        } else {
 696            s->start = 0;
 697        }
 698        return s->start;
 699    }
 700
 701    return -1;
 702}
 703
 704static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
 705                     int64_t begin, int64_t length, Error **errp)
 706{
 707    CPUArchState *env;
 708    int nr_cpus;
 709    int ret;
 710
 711    if (runstate_is_running()) {
 712        vm_stop(RUN_STATE_SAVE_VM);
 713        s->resume = true;
 714    } else {
 715        s->resume = false;
 716    }
 717
 718    s->errp = errp;
 719    s->fd = fd;
 720    s->has_filter = has_filter;
 721    s->begin = begin;
 722    s->length = length;
 723    s->start = get_start_block(s);
 724    if (s->start == -1) {
 725        error_set(errp, QERR_INVALID_PARAMETER, "begin");
 726        goto cleanup;
 727    }
 728
 729    /*
 730     * get dump info: endian, class and architecture.
 731     * If the target architecture is not supported, cpu_get_dump_info() will
 732     * return -1.
 733     *
 734     * if we use kvm, we should synchronize the register before we get dump
 735     * info.
 736     */
 737    nr_cpus = 0;
 738    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 739        cpu_synchronize_state(env);
 740        nr_cpus++;
 741    }
 742
 743    ret = cpu_get_dump_info(&s->dump_info);
 744    if (ret < 0) {
 745        error_set(errp, QERR_UNSUPPORTED);
 746        goto cleanup;
 747    }
 748
 749    s->note_size = cpu_get_note_size(s->dump_info.d_class,
 750                                     s->dump_info.d_machine, nr_cpus);
 751    if (ret < 0) {
 752        error_set(errp, QERR_UNSUPPORTED);
 753        goto cleanup;
 754    }
 755
 756    /* get memory mapping */
 757    memory_mapping_list_init(&s->list);
 758    if (paging) {
 759        qemu_get_guest_memory_mapping(&s->list);
 760    } else {
 761        qemu_get_guest_simple_memory_mapping(&s->list);
 762    }
 763
 764    if (s->has_filter) {
 765        memory_mapping_filter(&s->list, s->begin, s->length);
 766    }
 767
 768    /*
 769     * calculate phdr_num
 770     *
 771     * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
 772     */
 773    s->phdr_num = 1; /* PT_NOTE */
 774    if (s->list.num < UINT16_MAX - 2) {
 775        s->phdr_num += s->list.num;
 776        s->have_section = false;
 777    } else {
 778        s->have_section = true;
 779        s->phdr_num = PN_XNUM;
 780        s->sh_info = 1; /* PT_NOTE */
 781
 782        /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
 783        if (s->list.num <= UINT32_MAX - 1) {
 784            s->sh_info += s->list.num;
 785        } else {
 786            s->sh_info = UINT32_MAX;
 787        }
 788    }
 789
 790    if (s->dump_info.d_class == ELFCLASS64) {
 791        if (s->have_section) {
 792            s->memory_offset = sizeof(Elf64_Ehdr) +
 793                               sizeof(Elf64_Phdr) * s->sh_info +
 794                               sizeof(Elf64_Shdr) + s->note_size;
 795        } else {
 796            s->memory_offset = sizeof(Elf64_Ehdr) +
 797                               sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
 798        }
 799    } else {
 800        if (s->have_section) {
 801            s->memory_offset = sizeof(Elf32_Ehdr) +
 802                               sizeof(Elf32_Phdr) * s->sh_info +
 803                               sizeof(Elf32_Shdr) + s->note_size;
 804        } else {
 805            s->memory_offset = sizeof(Elf32_Ehdr) +
 806                               sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
 807        }
 808    }
 809
 810    return 0;
 811
 812cleanup:
 813    if (s->resume) {
 814        vm_start();
 815    }
 816
 817    return -1;
 818}
 819
 820void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
 821                           int64_t begin, bool has_length, int64_t length,
 822                           Error **errp)
 823{
 824    const char *p;
 825    int fd = -1;
 826    DumpState *s;
 827    int ret;
 828
 829    if (has_begin && !has_length) {
 830        error_set(errp, QERR_MISSING_PARAMETER, "length");
 831        return;
 832    }
 833    if (!has_begin && has_length) {
 834        error_set(errp, QERR_MISSING_PARAMETER, "begin");
 835        return;
 836    }
 837
 838#if !defined(WIN32)
 839    if (strstart(file, "fd:", &p)) {
 840        fd = monitor_get_fd(cur_mon, p, errp);
 841        if (fd == -1) {
 842            return;
 843        }
 844    }
 845#endif
 846
 847    if  (strstart(file, "file:", &p)) {
 848        fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
 849        if (fd < 0) {
 850            error_set(errp, QERR_OPEN_FILE_FAILED, p);
 851            return;
 852        }
 853    }
 854
 855    if (fd == -1) {
 856        error_set(errp, QERR_INVALID_PARAMETER, "protocol");
 857        return;
 858    }
 859
 860    s = g_malloc(sizeof(DumpState));
 861
 862    ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
 863    if (ret < 0) {
 864        g_free(s);
 865        return;
 866    }
 867
 868    if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
 869        error_set(errp, QERR_IO_ERROR);
 870    }
 871
 872    g_free(s);
 873}
 874