qemu/dump.c
<<
>>
Prefs
   1/*
   2 * QEMU dump
   3 *
   4 * Copyright Fujitsu, Corp. 2011, 2012
   5 *
   6 * Authors:
   7 *     Wen Congyang <wency@cn.fujitsu.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu-common.h"
  15#include "elf.h"
  16#include "cpu.h"
  17#include "cpu-all.h"
  18#include "targphys.h"
  19#include "monitor.h"
  20#include "kvm.h"
  21#include "dump.h"
  22#include "sysemu.h"
  23#include "memory_mapping.h"
  24#include "error.h"
  25#include "qmp-commands.h"
  26#include "gdbstub.h"
  27
  28static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
  29{
  30    if (endian == ELFDATA2LSB) {
  31        val = cpu_to_le16(val);
  32    } else {
  33        val = cpu_to_be16(val);
  34    }
  35
  36    return val;
  37}
  38
  39static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
  40{
  41    if (endian == ELFDATA2LSB) {
  42        val = cpu_to_le32(val);
  43    } else {
  44        val = cpu_to_be32(val);
  45    }
  46
  47    return val;
  48}
  49
  50static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
  51{
  52    if (endian == ELFDATA2LSB) {
  53        val = cpu_to_le64(val);
  54    } else {
  55        val = cpu_to_be64(val);
  56    }
  57
  58    return val;
  59}
  60
  61typedef struct DumpState {
  62    ArchDumpInfo dump_info;
  63    MemoryMappingList list;
  64    uint16_t phdr_num;
  65    uint32_t sh_info;
  66    bool have_section;
  67    bool resume;
  68    size_t note_size;
  69    target_phys_addr_t memory_offset;
  70    int fd;
  71
  72    RAMBlock *block;
  73    ram_addr_t start;
  74    bool has_filter;
  75    int64_t begin;
  76    int64_t length;
  77    Error **errp;
  78} DumpState;
  79
  80static int dump_cleanup(DumpState *s)
  81{
  82    int ret = 0;
  83
  84    memory_mapping_list_free(&s->list);
  85    if (s->fd != -1) {
  86        close(s->fd);
  87    }
  88    if (s->resume) {
  89        vm_start();
  90    }
  91
  92    return ret;
  93}
  94
  95static void dump_error(DumpState *s, const char *reason)
  96{
  97    dump_cleanup(s);
  98}
  99
 100static int fd_write_vmcore(void *buf, size_t size, void *opaque)
 101{
 102    DumpState *s = opaque;
 103    int fd = s->fd;
 104    size_t writen_size;
 105
 106    /* The fd may be passed from user, and it can be non-blocked */
 107    while (size) {
 108        writen_size = qemu_write_full(fd, buf, size);
 109        if (writen_size != size && errno != EAGAIN) {
 110            return -1;
 111        }
 112
 113        buf += writen_size;
 114        size -= writen_size;
 115    }
 116
 117    return 0;
 118}
 119
 120static int write_elf64_header(DumpState *s)
 121{
 122    Elf64_Ehdr elf_header;
 123    int ret;
 124    int endian = s->dump_info.d_endian;
 125
 126    memset(&elf_header, 0, sizeof(Elf64_Ehdr));
 127    memcpy(&elf_header, ELFMAG, SELFMAG);
 128    elf_header.e_ident[EI_CLASS] = ELFCLASS64;
 129    elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
 130    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
 131    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
 132    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
 133                                                   endian);
 134    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
 135    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
 136    elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
 137    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
 138                                                     endian);
 139    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
 140    if (s->have_section) {
 141        uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
 142
 143        elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
 144        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
 145                                                         endian);
 146        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
 147    }
 148
 149    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
 150    if (ret < 0) {
 151        dump_error(s, "dump: failed to write elf header.\n");
 152        return -1;
 153    }
 154
 155    return 0;
 156}
 157
 158static int write_elf32_header(DumpState *s)
 159{
 160    Elf32_Ehdr elf_header;
 161    int ret;
 162    int endian = s->dump_info.d_endian;
 163
 164    memset(&elf_header, 0, sizeof(Elf32_Ehdr));
 165    memcpy(&elf_header, ELFMAG, SELFMAG);
 166    elf_header.e_ident[EI_CLASS] = ELFCLASS32;
 167    elf_header.e_ident[EI_DATA] = endian;
 168    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
 169    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
 170    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
 171                                                   endian);
 172    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
 173    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
 174    elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
 175    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
 176                                                     endian);
 177    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
 178    if (s->have_section) {
 179        uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
 180
 181        elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
 182        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
 183                                                         endian);
 184        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
 185    }
 186
 187    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
 188    if (ret < 0) {
 189        dump_error(s, "dump: failed to write elf header.\n");
 190        return -1;
 191    }
 192
 193    return 0;
 194}
 195
 196static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
 197                            int phdr_index, target_phys_addr_t offset)
 198{
 199    Elf64_Phdr phdr;
 200    int ret;
 201    int endian = s->dump_info.d_endian;
 202
 203    memset(&phdr, 0, sizeof(Elf64_Phdr));
 204    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
 205    phdr.p_offset = cpu_convert_to_target64(offset, endian);
 206    phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
 207    if (offset == -1) {
 208        /* When the memory is not stored into vmcore, offset will be -1 */
 209        phdr.p_filesz = 0;
 210    } else {
 211        phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
 212    }
 213    phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
 214    phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
 215
 216    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
 217    if (ret < 0) {
 218        dump_error(s, "dump: failed to write program header table.\n");
 219        return -1;
 220    }
 221
 222    return 0;
 223}
 224
 225static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
 226                            int phdr_index, target_phys_addr_t offset)
 227{
 228    Elf32_Phdr phdr;
 229    int ret;
 230    int endian = s->dump_info.d_endian;
 231
 232    memset(&phdr, 0, sizeof(Elf32_Phdr));
 233    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
 234    phdr.p_offset = cpu_convert_to_target32(offset, endian);
 235    phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
 236    if (offset == -1) {
 237        /* When the memory is not stored into vmcore, offset will be -1 */
 238        phdr.p_filesz = 0;
 239    } else {
 240        phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
 241    }
 242    phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
 243    phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
 244
 245    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
 246    if (ret < 0) {
 247        dump_error(s, "dump: failed to write program header table.\n");
 248        return -1;
 249    }
 250
 251    return 0;
 252}
 253
 254static int write_elf64_note(DumpState *s)
 255{
 256    Elf64_Phdr phdr;
 257    int endian = s->dump_info.d_endian;
 258    target_phys_addr_t begin = s->memory_offset - s->note_size;
 259    int ret;
 260
 261    memset(&phdr, 0, sizeof(Elf64_Phdr));
 262    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
 263    phdr.p_offset = cpu_convert_to_target64(begin, endian);
 264    phdr.p_paddr = 0;
 265    phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
 266    phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
 267    phdr.p_vaddr = 0;
 268
 269    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
 270    if (ret < 0) {
 271        dump_error(s, "dump: failed to write program header table.\n");
 272        return -1;
 273    }
 274
 275    return 0;
 276}
 277
 278static int write_elf64_notes(DumpState *s)
 279{
 280    CPUArchState *env;
 281    int ret;
 282    int id;
 283
 284    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 285        id = cpu_index(env);
 286        ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
 287        if (ret < 0) {
 288            dump_error(s, "dump: failed to write elf notes.\n");
 289            return -1;
 290        }
 291    }
 292
 293    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 294        ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
 295        if (ret < 0) {
 296            dump_error(s, "dump: failed to write CPU status.\n");
 297            return -1;
 298        }
 299    }
 300
 301    return 0;
 302}
 303
 304static int write_elf32_note(DumpState *s)
 305{
 306    target_phys_addr_t begin = s->memory_offset - s->note_size;
 307    Elf32_Phdr phdr;
 308    int endian = s->dump_info.d_endian;
 309    int ret;
 310
 311    memset(&phdr, 0, sizeof(Elf32_Phdr));
 312    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
 313    phdr.p_offset = cpu_convert_to_target32(begin, endian);
 314    phdr.p_paddr = 0;
 315    phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
 316    phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
 317    phdr.p_vaddr = 0;
 318
 319    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
 320    if (ret < 0) {
 321        dump_error(s, "dump: failed to write program header table.\n");
 322        return -1;
 323    }
 324
 325    return 0;
 326}
 327
 328static int write_elf32_notes(DumpState *s)
 329{
 330    CPUArchState *env;
 331    int ret;
 332    int id;
 333
 334    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 335        id = cpu_index(env);
 336        ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
 337        if (ret < 0) {
 338            dump_error(s, "dump: failed to write elf notes.\n");
 339            return -1;
 340        }
 341    }
 342
 343    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 344        ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
 345        if (ret < 0) {
 346            dump_error(s, "dump: failed to write CPU status.\n");
 347            return -1;
 348        }
 349    }
 350
 351    return 0;
 352}
 353
 354static int write_elf_section(DumpState *s, int type)
 355{
 356    Elf32_Shdr shdr32;
 357    Elf64_Shdr shdr64;
 358    int endian = s->dump_info.d_endian;
 359    int shdr_size;
 360    void *shdr;
 361    int ret;
 362
 363    if (type == 0) {
 364        shdr_size = sizeof(Elf32_Shdr);
 365        memset(&shdr32, 0, shdr_size);
 366        shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
 367        shdr = &shdr32;
 368    } else {
 369        shdr_size = sizeof(Elf64_Shdr);
 370        memset(&shdr64, 0, shdr_size);
 371        shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
 372        shdr = &shdr64;
 373    }
 374
 375    ret = fd_write_vmcore(&shdr, shdr_size, s);
 376    if (ret < 0) {
 377        dump_error(s, "dump: failed to write section header table.\n");
 378        return -1;
 379    }
 380
 381    return 0;
 382}
 383
 384static int write_data(DumpState *s, void *buf, int length)
 385{
 386    int ret;
 387
 388    ret = fd_write_vmcore(buf, length, s);
 389    if (ret < 0) {
 390        dump_error(s, "dump: failed to save memory.\n");
 391        return -1;
 392    }
 393
 394    return 0;
 395}
 396
 397/* write the memroy to vmcore. 1 page per I/O. */
 398static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
 399                        int64_t size)
 400{
 401    int64_t i;
 402    int ret;
 403
 404    for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
 405        ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
 406                         TARGET_PAGE_SIZE);
 407        if (ret < 0) {
 408            return ret;
 409        }
 410    }
 411
 412    if ((size % TARGET_PAGE_SIZE) != 0) {
 413        ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
 414                         size % TARGET_PAGE_SIZE);
 415        if (ret < 0) {
 416            return ret;
 417        }
 418    }
 419
 420    return 0;
 421}
 422
 423/* get the memory's offset in the vmcore */
 424static target_phys_addr_t get_offset(target_phys_addr_t phys_addr,
 425                                     DumpState *s)
 426{
 427    RAMBlock *block;
 428    target_phys_addr_t offset = s->memory_offset;
 429    int64_t size_in_block, start;
 430
 431    if (s->has_filter) {
 432        if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
 433            return -1;
 434        }
 435    }
 436
 437    QLIST_FOREACH(block, &ram_list.blocks, next) {
 438        if (s->has_filter) {
 439            if (block->offset >= s->begin + s->length ||
 440                block->offset + block->length <= s->begin) {
 441                /* This block is out of the range */
 442                continue;
 443            }
 444
 445            if (s->begin <= block->offset) {
 446                start = block->offset;
 447            } else {
 448                start = s->begin;
 449            }
 450
 451            size_in_block = block->length - (start - block->offset);
 452            if (s->begin + s->length < block->offset + block->length) {
 453                size_in_block -= block->offset + block->length -
 454                                 (s->begin + s->length);
 455            }
 456        } else {
 457            start = block->offset;
 458            size_in_block = block->length;
 459        }
 460
 461        if (phys_addr >= start && phys_addr < start + size_in_block) {
 462            return phys_addr - start + offset;
 463        }
 464
 465        offset += size_in_block;
 466    }
 467
 468    return -1;
 469}
 470
 471static int write_elf_loads(DumpState *s)
 472{
 473    target_phys_addr_t offset;
 474    MemoryMapping *memory_mapping;
 475    uint32_t phdr_index = 1;
 476    int ret;
 477    uint32_t max_index;
 478
 479    if (s->have_section) {
 480        max_index = s->sh_info;
 481    } else {
 482        max_index = s->phdr_num;
 483    }
 484
 485    QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
 486        offset = get_offset(memory_mapping->phys_addr, s);
 487        if (s->dump_info.d_class == ELFCLASS64) {
 488            ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
 489        } else {
 490            ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
 491        }
 492
 493        if (ret < 0) {
 494            return -1;
 495        }
 496
 497        if (phdr_index >= max_index) {
 498            break;
 499        }
 500    }
 501
 502    return 0;
 503}
 504
 505/* write elf header, PT_NOTE and elf note to vmcore. */
 506static int dump_begin(DumpState *s)
 507{
 508    int ret;
 509
 510    /*
 511     * the vmcore's format is:
 512     *   --------------
 513     *   |  elf header |
 514     *   --------------
 515     *   |  PT_NOTE    |
 516     *   --------------
 517     *   |  PT_LOAD    |
 518     *   --------------
 519     *   |  ......     |
 520     *   --------------
 521     *   |  PT_LOAD    |
 522     *   --------------
 523     *   |  sec_hdr    |
 524     *   --------------
 525     *   |  elf note   |
 526     *   --------------
 527     *   |  memory     |
 528     *   --------------
 529     *
 530     * we only know where the memory is saved after we write elf note into
 531     * vmcore.
 532     */
 533
 534    /* write elf header to vmcore */
 535    if (s->dump_info.d_class == ELFCLASS64) {
 536        ret = write_elf64_header(s);
 537    } else {
 538        ret = write_elf32_header(s);
 539    }
 540    if (ret < 0) {
 541        return -1;
 542    }
 543
 544    if (s->dump_info.d_class == ELFCLASS64) {
 545        /* write PT_NOTE to vmcore */
 546        if (write_elf64_note(s) < 0) {
 547            return -1;
 548        }
 549
 550        /* write all PT_LOAD to vmcore */
 551        if (write_elf_loads(s) < 0) {
 552            return -1;
 553        }
 554
 555        /* write section to vmcore */
 556        if (s->have_section) {
 557            if (write_elf_section(s, 1) < 0) {
 558                return -1;
 559            }
 560        }
 561
 562        /* write notes to vmcore */
 563        if (write_elf64_notes(s) < 0) {
 564            return -1;
 565        }
 566
 567    } else {
 568        /* write PT_NOTE to vmcore */
 569        if (write_elf32_note(s) < 0) {
 570            return -1;
 571        }
 572
 573        /* write all PT_LOAD to vmcore */
 574        if (write_elf_loads(s) < 0) {
 575            return -1;
 576        }
 577
 578        /* write section to vmcore */
 579        if (s->have_section) {
 580            if (write_elf_section(s, 0) < 0) {
 581                return -1;
 582            }
 583        }
 584
 585        /* write notes to vmcore */
 586        if (write_elf32_notes(s) < 0) {
 587            return -1;
 588        }
 589    }
 590
 591    return 0;
 592}
 593
 594/* write PT_LOAD to vmcore */
 595static int dump_completed(DumpState *s)
 596{
 597    dump_cleanup(s);
 598    return 0;
 599}
 600
 601static int get_next_block(DumpState *s, RAMBlock *block)
 602{
 603    while (1) {
 604        block = QLIST_NEXT(block, next);
 605        if (!block) {
 606            /* no more block */
 607            return 1;
 608        }
 609
 610        s->start = 0;
 611        s->block = block;
 612        if (s->has_filter) {
 613            if (block->offset >= s->begin + s->length ||
 614                block->offset + block->length <= s->begin) {
 615                /* This block is out of the range */
 616                continue;
 617            }
 618
 619            if (s->begin > block->offset) {
 620                s->start = s->begin - block->offset;
 621            }
 622        }
 623
 624        return 0;
 625    }
 626}
 627
 628/* write all memory to vmcore */
 629static int dump_iterate(DumpState *s)
 630{
 631    RAMBlock *block;
 632    int64_t size;
 633    int ret;
 634
 635    while (1) {
 636        block = s->block;
 637
 638        size = block->length;
 639        if (s->has_filter) {
 640            size -= s->start;
 641            if (s->begin + s->length < block->offset + block->length) {
 642                size -= block->offset + block->length - (s->begin + s->length);
 643            }
 644        }
 645        ret = write_memory(s, block, s->start, size);
 646        if (ret == -1) {
 647            return ret;
 648        }
 649
 650        ret = get_next_block(s, block);
 651        if (ret == 1) {
 652            dump_completed(s);
 653            return 0;
 654        }
 655    }
 656}
 657
 658static int create_vmcore(DumpState *s)
 659{
 660    int ret;
 661
 662    ret = dump_begin(s);
 663    if (ret < 0) {
 664        return -1;
 665    }
 666
 667    ret = dump_iterate(s);
 668    if (ret < 0) {
 669        return -1;
 670    }
 671
 672    return 0;
 673}
 674
 675static ram_addr_t get_start_block(DumpState *s)
 676{
 677    RAMBlock *block;
 678
 679    if (!s->has_filter) {
 680        s->block = QLIST_FIRST(&ram_list.blocks);
 681        return 0;
 682    }
 683
 684    QLIST_FOREACH(block, &ram_list.blocks, next) {
 685        if (block->offset >= s->begin + s->length ||
 686            block->offset + block->length <= s->begin) {
 687            /* This block is out of the range */
 688            continue;
 689        }
 690
 691        s->block = block;
 692        if (s->begin > block->offset) {
 693            s->start = s->begin - block->offset;
 694        } else {
 695            s->start = 0;
 696        }
 697        return s->start;
 698    }
 699
 700    return -1;
 701}
 702
 703static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
 704                     int64_t begin, int64_t length, Error **errp)
 705{
 706    CPUArchState *env;
 707    int nr_cpus;
 708    int ret;
 709
 710    if (runstate_is_running()) {
 711        vm_stop(RUN_STATE_SAVE_VM);
 712        s->resume = true;
 713    } else {
 714        s->resume = false;
 715    }
 716
 717    s->errp = errp;
 718    s->fd = fd;
 719    s->has_filter = has_filter;
 720    s->begin = begin;
 721    s->length = length;
 722    s->start = get_start_block(s);
 723    if (s->start == -1) {
 724        error_set(errp, QERR_INVALID_PARAMETER, "begin");
 725        goto cleanup;
 726    }
 727
 728    /*
 729     * get dump info: endian, class and architecture.
 730     * If the target architecture is not supported, cpu_get_dump_info() will
 731     * return -1.
 732     *
 733     * if we use kvm, we should synchronize the register before we get dump
 734     * info.
 735     */
 736    nr_cpus = 0;
 737    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 738        cpu_synchronize_state(env);
 739        nr_cpus++;
 740    }
 741
 742    ret = cpu_get_dump_info(&s->dump_info);
 743    if (ret < 0) {
 744        error_set(errp, QERR_UNSUPPORTED);
 745        goto cleanup;
 746    }
 747
 748    s->note_size = cpu_get_note_size(s->dump_info.d_class,
 749                                     s->dump_info.d_machine, nr_cpus);
 750    if (ret < 0) {
 751        error_set(errp, QERR_UNSUPPORTED);
 752        goto cleanup;
 753    }
 754
 755    /* get memory mapping */
 756    memory_mapping_list_init(&s->list);
 757    if (paging) {
 758        qemu_get_guest_memory_mapping(&s->list);
 759    } else {
 760        qemu_get_guest_simple_memory_mapping(&s->list);
 761    }
 762
 763    if (s->has_filter) {
 764        memory_mapping_filter(&s->list, s->begin, s->length);
 765    }
 766
 767    /*
 768     * calculate phdr_num
 769     *
 770     * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
 771     */
 772    s->phdr_num = 1; /* PT_NOTE */
 773    if (s->list.num < UINT16_MAX - 2) {
 774        s->phdr_num += s->list.num;
 775        s->have_section = false;
 776    } else {
 777        s->have_section = true;
 778        s->phdr_num = PN_XNUM;
 779        s->sh_info = 1; /* PT_NOTE */
 780
 781        /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
 782        if (s->list.num <= UINT32_MAX - 1) {
 783            s->sh_info += s->list.num;
 784        } else {
 785            s->sh_info = UINT32_MAX;
 786        }
 787    }
 788
 789    if (s->dump_info.d_class == ELFCLASS64) {
 790        if (s->have_section) {
 791            s->memory_offset = sizeof(Elf64_Ehdr) +
 792                               sizeof(Elf64_Phdr) * s->sh_info +
 793                               sizeof(Elf64_Shdr) + s->note_size;
 794        } else {
 795            s->memory_offset = sizeof(Elf64_Ehdr) +
 796                               sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
 797        }
 798    } else {
 799        if (s->have_section) {
 800            s->memory_offset = sizeof(Elf32_Ehdr) +
 801                               sizeof(Elf32_Phdr) * s->sh_info +
 802                               sizeof(Elf32_Shdr) + s->note_size;
 803        } else {
 804            s->memory_offset = sizeof(Elf32_Ehdr) +
 805                               sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
 806        }
 807    }
 808
 809    return 0;
 810
 811cleanup:
 812    if (s->resume) {
 813        vm_start();
 814    }
 815
 816    return -1;
 817}
 818
 819void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
 820                           int64_t begin, bool has_length, int64_t length,
 821                           Error **errp)
 822{
 823    const char *p;
 824    int fd = -1;
 825    DumpState *s;
 826    int ret;
 827
 828    if (has_begin && !has_length) {
 829        error_set(errp, QERR_MISSING_PARAMETER, "length");
 830        return;
 831    }
 832    if (!has_begin && has_length) {
 833        error_set(errp, QERR_MISSING_PARAMETER, "begin");
 834        return;
 835    }
 836
 837#if !defined(WIN32)
 838    if (strstart(file, "fd:", &p)) {
 839        fd = monitor_get_fd(cur_mon, p);
 840        if (fd == -1) {
 841            error_set(errp, QERR_FD_NOT_FOUND, p);
 842            return;
 843        }
 844    }
 845#endif
 846
 847    if  (strstart(file, "file:", &p)) {
 848        fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
 849        if (fd < 0) {
 850            error_set(errp, QERR_OPEN_FILE_FAILED, p);
 851            return;
 852        }
 853    }
 854
 855    if (fd == -1) {
 856        error_set(errp, QERR_INVALID_PARAMETER, "protocol");
 857        return;
 858    }
 859
 860    s = g_malloc(sizeof(DumpState));
 861
 862    ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
 863    if (ret < 0) {
 864        g_free(s);
 865        return;
 866    }
 867
 868    if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
 869        error_set(errp, QERR_IO_ERROR);
 870    }
 871
 872    g_free(s);
 873}
 874