qemu/dump.c
<<
>>
Prefs
   1/*
   2 * QEMU dump
   3 *
   4 * Copyright Fujitsu, Corp. 2011, 2012
   5 *
   6 * Authors:
   7 *     Wen Congyang <wency@cn.fujitsu.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu-common.h"
  15#include "elf.h"
  16#include "cpu.h"
  17#include "exec/cpu-all.h"
  18#include "exec/hwaddr.h"
  19#include "monitor/monitor.h"
  20#include "sysemu/kvm.h"
  21#include "sysemu/dump.h"
  22#include "sysemu/sysemu.h"
  23#include "sysemu/memory_mapping.h"
  24#include "sysemu/cpus.h"
  25#include "qapi/error.h"
  26#include "qmp-commands.h"
  27
  28static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
  29{
  30    if (endian == ELFDATA2LSB) {
  31        val = cpu_to_le16(val);
  32    } else {
  33        val = cpu_to_be16(val);
  34    }
  35
  36    return val;
  37}
  38
  39static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
  40{
  41    if (endian == ELFDATA2LSB) {
  42        val = cpu_to_le32(val);
  43    } else {
  44        val = cpu_to_be32(val);
  45    }
  46
  47    return val;
  48}
  49
  50static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
  51{
  52    if (endian == ELFDATA2LSB) {
  53        val = cpu_to_le64(val);
  54    } else {
  55        val = cpu_to_be64(val);
  56    }
  57
  58    return val;
  59}
  60
  61typedef struct DumpState {
  62    GuestPhysBlockList guest_phys_blocks;
  63    ArchDumpInfo dump_info;
  64    MemoryMappingList list;
  65    uint16_t phdr_num;
  66    uint32_t sh_info;
  67    bool have_section;
  68    bool resume;
  69    ssize_t note_size;
  70    hwaddr memory_offset;
  71    int fd;
  72
  73    GuestPhysBlock *next_block;
  74    ram_addr_t start;
  75    bool has_filter;
  76    int64_t begin;
  77    int64_t length;
  78    Error **errp;
  79} DumpState;
  80
  81static int dump_cleanup(DumpState *s)
  82{
  83    int ret = 0;
  84
  85    guest_phys_blocks_free(&s->guest_phys_blocks);
  86    memory_mapping_list_free(&s->list);
  87    if (s->fd != -1) {
  88        close(s->fd);
  89    }
  90    if (s->resume) {
  91        vm_start();
  92    }
  93
  94    return ret;
  95}
  96
  97static void dump_error(DumpState *s, const char *reason)
  98{
  99    dump_cleanup(s);
 100}
 101
 102static int fd_write_vmcore(void *buf, size_t size, void *opaque)
 103{
 104    DumpState *s = opaque;
 105    size_t written_size;
 106
 107    written_size = qemu_write_full(s->fd, buf, size);
 108    if (written_size != size) {
 109        return -1;
 110    }
 111
 112    return 0;
 113}
 114
 115static int write_elf64_header(DumpState *s)
 116{
 117    Elf64_Ehdr elf_header;
 118    int ret;
 119    int endian = s->dump_info.d_endian;
 120
 121    memset(&elf_header, 0, sizeof(Elf64_Ehdr));
 122    memcpy(&elf_header, ELFMAG, SELFMAG);
 123    elf_header.e_ident[EI_CLASS] = ELFCLASS64;
 124    elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
 125    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
 126    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
 127    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
 128                                                   endian);
 129    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
 130    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
 131    elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
 132    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
 133                                                     endian);
 134    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
 135    if (s->have_section) {
 136        uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
 137
 138        elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
 139        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
 140                                                         endian);
 141        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
 142    }
 143
 144    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
 145    if (ret < 0) {
 146        dump_error(s, "dump: failed to write elf header.\n");
 147        return -1;
 148    }
 149
 150    return 0;
 151}
 152
 153static int write_elf32_header(DumpState *s)
 154{
 155    Elf32_Ehdr elf_header;
 156    int ret;
 157    int endian = s->dump_info.d_endian;
 158
 159    memset(&elf_header, 0, sizeof(Elf32_Ehdr));
 160    memcpy(&elf_header, ELFMAG, SELFMAG);
 161    elf_header.e_ident[EI_CLASS] = ELFCLASS32;
 162    elf_header.e_ident[EI_DATA] = endian;
 163    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
 164    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
 165    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
 166                                                   endian);
 167    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
 168    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
 169    elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
 170    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
 171                                                     endian);
 172    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
 173    if (s->have_section) {
 174        uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
 175
 176        elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
 177        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
 178                                                         endian);
 179        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
 180    }
 181
 182    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
 183    if (ret < 0) {
 184        dump_error(s, "dump: failed to write elf header.\n");
 185        return -1;
 186    }
 187
 188    return 0;
 189}
 190
 191static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
 192                            int phdr_index, hwaddr offset,
 193                            hwaddr filesz)
 194{
 195    Elf64_Phdr phdr;
 196    int ret;
 197    int endian = s->dump_info.d_endian;
 198
 199    memset(&phdr, 0, sizeof(Elf64_Phdr));
 200    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
 201    phdr.p_offset = cpu_convert_to_target64(offset, endian);
 202    phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
 203    phdr.p_filesz = cpu_convert_to_target64(filesz, endian);
 204    phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
 205    phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
 206
 207    assert(memory_mapping->length >= filesz);
 208
 209    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
 210    if (ret < 0) {
 211        dump_error(s, "dump: failed to write program header table.\n");
 212        return -1;
 213    }
 214
 215    return 0;
 216}
 217
 218static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
 219                            int phdr_index, hwaddr offset,
 220                            hwaddr filesz)
 221{
 222    Elf32_Phdr phdr;
 223    int ret;
 224    int endian = s->dump_info.d_endian;
 225
 226    memset(&phdr, 0, sizeof(Elf32_Phdr));
 227    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
 228    phdr.p_offset = cpu_convert_to_target32(offset, endian);
 229    phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
 230    phdr.p_filesz = cpu_convert_to_target32(filesz, endian);
 231    phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
 232    phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
 233
 234    assert(memory_mapping->length >= filesz);
 235
 236    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
 237    if (ret < 0) {
 238        dump_error(s, "dump: failed to write program header table.\n");
 239        return -1;
 240    }
 241
 242    return 0;
 243}
 244
 245static int write_elf64_note(DumpState *s)
 246{
 247    Elf64_Phdr phdr;
 248    int endian = s->dump_info.d_endian;
 249    hwaddr begin = s->memory_offset - s->note_size;
 250    int ret;
 251
 252    memset(&phdr, 0, sizeof(Elf64_Phdr));
 253    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
 254    phdr.p_offset = cpu_convert_to_target64(begin, endian);
 255    phdr.p_paddr = 0;
 256    phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
 257    phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
 258    phdr.p_vaddr = 0;
 259
 260    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
 261    if (ret < 0) {
 262        dump_error(s, "dump: failed to write program header table.\n");
 263        return -1;
 264    }
 265
 266    return 0;
 267}
 268
 269static inline int cpu_index(CPUState *cpu)
 270{
 271    return cpu->cpu_index + 1;
 272}
 273
 274static int write_elf64_notes(DumpState *s)
 275{
 276    CPUState *cpu;
 277    int ret;
 278    int id;
 279
 280    CPU_FOREACH(cpu) {
 281        id = cpu_index(cpu);
 282        ret = cpu_write_elf64_note(fd_write_vmcore, cpu, id, s);
 283        if (ret < 0) {
 284            dump_error(s, "dump: failed to write elf notes.\n");
 285            return -1;
 286        }
 287    }
 288
 289    CPU_FOREACH(cpu) {
 290        ret = cpu_write_elf64_qemunote(fd_write_vmcore, cpu, s);
 291        if (ret < 0) {
 292            dump_error(s, "dump: failed to write CPU status.\n");
 293            return -1;
 294        }
 295    }
 296
 297    return 0;
 298}
 299
 300static int write_elf32_note(DumpState *s)
 301{
 302    hwaddr begin = s->memory_offset - s->note_size;
 303    Elf32_Phdr phdr;
 304    int endian = s->dump_info.d_endian;
 305    int ret;
 306
 307    memset(&phdr, 0, sizeof(Elf32_Phdr));
 308    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
 309    phdr.p_offset = cpu_convert_to_target32(begin, endian);
 310    phdr.p_paddr = 0;
 311    phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
 312    phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
 313    phdr.p_vaddr = 0;
 314
 315    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
 316    if (ret < 0) {
 317        dump_error(s, "dump: failed to write program header table.\n");
 318        return -1;
 319    }
 320
 321    return 0;
 322}
 323
 324static int write_elf32_notes(DumpState *s)
 325{
 326    CPUState *cpu;
 327    int ret;
 328    int id;
 329
 330    CPU_FOREACH(cpu) {
 331        id = cpu_index(cpu);
 332        ret = cpu_write_elf32_note(fd_write_vmcore, cpu, id, s);
 333        if (ret < 0) {
 334            dump_error(s, "dump: failed to write elf notes.\n");
 335            return -1;
 336        }
 337    }
 338
 339    CPU_FOREACH(cpu) {
 340        ret = cpu_write_elf32_qemunote(fd_write_vmcore, cpu, s);
 341        if (ret < 0) {
 342            dump_error(s, "dump: failed to write CPU status.\n");
 343            return -1;
 344        }
 345    }
 346
 347    return 0;
 348}
 349
 350static int write_elf_section(DumpState *s, int type)
 351{
 352    Elf32_Shdr shdr32;
 353    Elf64_Shdr shdr64;
 354    int endian = s->dump_info.d_endian;
 355    int shdr_size;
 356    void *shdr;
 357    int ret;
 358
 359    if (type == 0) {
 360        shdr_size = sizeof(Elf32_Shdr);
 361        memset(&shdr32, 0, shdr_size);
 362        shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
 363        shdr = &shdr32;
 364    } else {
 365        shdr_size = sizeof(Elf64_Shdr);
 366        memset(&shdr64, 0, shdr_size);
 367        shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
 368        shdr = &shdr64;
 369    }
 370
 371    ret = fd_write_vmcore(&shdr, shdr_size, s);
 372    if (ret < 0) {
 373        dump_error(s, "dump: failed to write section header table.\n");
 374        return -1;
 375    }
 376
 377    return 0;
 378}
 379
 380static int write_data(DumpState *s, void *buf, int length)
 381{
 382    int ret;
 383
 384    ret = fd_write_vmcore(buf, length, s);
 385    if (ret < 0) {
 386        dump_error(s, "dump: failed to save memory.\n");
 387        return -1;
 388    }
 389
 390    return 0;
 391}
 392
 393/* write the memroy to vmcore. 1 page per I/O. */
 394static int write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
 395                        int64_t size)
 396{
 397    int64_t i;
 398    int ret;
 399
 400    for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
 401        ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
 402                         TARGET_PAGE_SIZE);
 403        if (ret < 0) {
 404            return ret;
 405        }
 406    }
 407
 408    if ((size % TARGET_PAGE_SIZE) != 0) {
 409        ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
 410                         size % TARGET_PAGE_SIZE);
 411        if (ret < 0) {
 412            return ret;
 413        }
 414    }
 415
 416    return 0;
 417}
 418
 419/* get the memory's offset and size in the vmcore */
 420static void get_offset_range(hwaddr phys_addr,
 421                             ram_addr_t mapping_length,
 422                             DumpState *s,
 423                             hwaddr *p_offset,
 424                             hwaddr *p_filesz)
 425{
 426    GuestPhysBlock *block;
 427    hwaddr offset = s->memory_offset;
 428    int64_t size_in_block, start;
 429
 430    /* When the memory is not stored into vmcore, offset will be -1 */
 431    *p_offset = -1;
 432    *p_filesz = 0;
 433
 434    if (s->has_filter) {
 435        if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
 436            return;
 437        }
 438    }
 439
 440    QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
 441        if (s->has_filter) {
 442            if (block->target_start >= s->begin + s->length ||
 443                block->target_end <= s->begin) {
 444                /* This block is out of the range */
 445                continue;
 446            }
 447
 448            if (s->begin <= block->target_start) {
 449                start = block->target_start;
 450            } else {
 451                start = s->begin;
 452            }
 453
 454            size_in_block = block->target_end - start;
 455            if (s->begin + s->length < block->target_end) {
 456                size_in_block -= block->target_end - (s->begin + s->length);
 457            }
 458        } else {
 459            start = block->target_start;
 460            size_in_block = block->target_end - block->target_start;
 461        }
 462
 463        if (phys_addr >= start && phys_addr < start + size_in_block) {
 464            *p_offset = phys_addr - start + offset;
 465
 466            /* The offset range mapped from the vmcore file must not spill over
 467             * the GuestPhysBlock, clamp it. The rest of the mapping will be
 468             * zero-filled in memory at load time; see
 469             * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
 470             */
 471            *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
 472                        mapping_length :
 473                        size_in_block - (phys_addr - start);
 474            return;
 475        }
 476
 477        offset += size_in_block;
 478    }
 479}
 480
 481static int write_elf_loads(DumpState *s)
 482{
 483    hwaddr offset, filesz;
 484    MemoryMapping *memory_mapping;
 485    uint32_t phdr_index = 1;
 486    int ret;
 487    uint32_t max_index;
 488
 489    if (s->have_section) {
 490        max_index = s->sh_info;
 491    } else {
 492        max_index = s->phdr_num;
 493    }
 494
 495    QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
 496        get_offset_range(memory_mapping->phys_addr,
 497                         memory_mapping->length,
 498                         s, &offset, &filesz);
 499        if (s->dump_info.d_class == ELFCLASS64) {
 500            ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
 501                                   filesz);
 502        } else {
 503            ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
 504                                   filesz);
 505        }
 506
 507        if (ret < 0) {
 508            return -1;
 509        }
 510
 511        if (phdr_index >= max_index) {
 512            break;
 513        }
 514    }
 515
 516    return 0;
 517}
 518
 519/* write elf header, PT_NOTE and elf note to vmcore. */
 520static int dump_begin(DumpState *s)
 521{
 522    int ret;
 523
 524    /*
 525     * the vmcore's format is:
 526     *   --------------
 527     *   |  elf header |
 528     *   --------------
 529     *   |  PT_NOTE    |
 530     *   --------------
 531     *   |  PT_LOAD    |
 532     *   --------------
 533     *   |  ......     |
 534     *   --------------
 535     *   |  PT_LOAD    |
 536     *   --------------
 537     *   |  sec_hdr    |
 538     *   --------------
 539     *   |  elf note   |
 540     *   --------------
 541     *   |  memory     |
 542     *   --------------
 543     *
 544     * we only know where the memory is saved after we write elf note into
 545     * vmcore.
 546     */
 547
 548    /* write elf header to vmcore */
 549    if (s->dump_info.d_class == ELFCLASS64) {
 550        ret = write_elf64_header(s);
 551    } else {
 552        ret = write_elf32_header(s);
 553    }
 554    if (ret < 0) {
 555        return -1;
 556    }
 557
 558    if (s->dump_info.d_class == ELFCLASS64) {
 559        /* write PT_NOTE to vmcore */
 560        if (write_elf64_note(s) < 0) {
 561            return -1;
 562        }
 563
 564        /* write all PT_LOAD to vmcore */
 565        if (write_elf_loads(s) < 0) {
 566            return -1;
 567        }
 568
 569        /* write section to vmcore */
 570        if (s->have_section) {
 571            if (write_elf_section(s, 1) < 0) {
 572                return -1;
 573            }
 574        }
 575
 576        /* write notes to vmcore */
 577        if (write_elf64_notes(s) < 0) {
 578            return -1;
 579        }
 580
 581    } else {
 582        /* write PT_NOTE to vmcore */
 583        if (write_elf32_note(s) < 0) {
 584            return -1;
 585        }
 586
 587        /* write all PT_LOAD to vmcore */
 588        if (write_elf_loads(s) < 0) {
 589            return -1;
 590        }
 591
 592        /* write section to vmcore */
 593        if (s->have_section) {
 594            if (write_elf_section(s, 0) < 0) {
 595                return -1;
 596            }
 597        }
 598
 599        /* write notes to vmcore */
 600        if (write_elf32_notes(s) < 0) {
 601            return -1;
 602        }
 603    }
 604
 605    return 0;
 606}
 607
 608/* write PT_LOAD to vmcore */
 609static int dump_completed(DumpState *s)
 610{
 611    dump_cleanup(s);
 612    return 0;
 613}
 614
 615static int get_next_block(DumpState *s, GuestPhysBlock *block)
 616{
 617    while (1) {
 618        block = QTAILQ_NEXT(block, next);
 619        if (!block) {
 620            /* no more block */
 621            return 1;
 622        }
 623
 624        s->start = 0;
 625        s->next_block = block;
 626        if (s->has_filter) {
 627            if (block->target_start >= s->begin + s->length ||
 628                block->target_end <= s->begin) {
 629                /* This block is out of the range */
 630                continue;
 631            }
 632
 633            if (s->begin > block->target_start) {
 634                s->start = s->begin - block->target_start;
 635            }
 636        }
 637
 638        return 0;
 639    }
 640}
 641
 642/* write all memory to vmcore */
 643static int dump_iterate(DumpState *s)
 644{
 645    GuestPhysBlock *block;
 646    int64_t size;
 647    int ret;
 648
 649    while (1) {
 650        block = s->next_block;
 651
 652        size = block->target_end - block->target_start;
 653        if (s->has_filter) {
 654            size -= s->start;
 655            if (s->begin + s->length < block->target_end) {
 656                size -= block->target_end - (s->begin + s->length);
 657            }
 658        }
 659        ret = write_memory(s, block, s->start, size);
 660        if (ret == -1) {
 661            return ret;
 662        }
 663
 664        ret = get_next_block(s, block);
 665        if (ret == 1) {
 666            dump_completed(s);
 667            return 0;
 668        }
 669    }
 670}
 671
 672static int create_vmcore(DumpState *s)
 673{
 674    int ret;
 675
 676    ret = dump_begin(s);
 677    if (ret < 0) {
 678        return -1;
 679    }
 680
 681    ret = dump_iterate(s);
 682    if (ret < 0) {
 683        return -1;
 684    }
 685
 686    return 0;
 687}
 688
 689static ram_addr_t get_start_block(DumpState *s)
 690{
 691    GuestPhysBlock *block;
 692
 693    if (!s->has_filter) {
 694        s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
 695        return 0;
 696    }
 697
 698    QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
 699        if (block->target_start >= s->begin + s->length ||
 700            block->target_end <= s->begin) {
 701            /* This block is out of the range */
 702            continue;
 703        }
 704
 705        s->next_block = block;
 706        if (s->begin > block->target_start) {
 707            s->start = s->begin - block->target_start;
 708        } else {
 709            s->start = 0;
 710        }
 711        return s->start;
 712    }
 713
 714    return -1;
 715}
 716
 717static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
 718                     int64_t begin, int64_t length, Error **errp)
 719{
 720    CPUState *cpu;
 721    int nr_cpus;
 722    Error *err = NULL;
 723    int ret;
 724
 725    if (runstate_is_running()) {
 726        vm_stop(RUN_STATE_SAVE_VM);
 727        s->resume = true;
 728    } else {
 729        s->resume = false;
 730    }
 731
 732    /* If we use KVM, we should synchronize the registers before we get dump
 733     * info or physmap info.
 734     */
 735    cpu_synchronize_all_states();
 736    nr_cpus = 0;
 737    CPU_FOREACH(cpu) {
 738        nr_cpus++;
 739    }
 740
 741    s->errp = errp;
 742    s->fd = fd;
 743    s->has_filter = has_filter;
 744    s->begin = begin;
 745    s->length = length;
 746
 747    guest_phys_blocks_init(&s->guest_phys_blocks);
 748    guest_phys_blocks_append(&s->guest_phys_blocks);
 749
 750    s->start = get_start_block(s);
 751    if (s->start == -1) {
 752        error_set(errp, QERR_INVALID_PARAMETER, "begin");
 753        goto cleanup;
 754    }
 755
 756    /* get dump info: endian, class and architecture.
 757     * If the target architecture is not supported, cpu_get_dump_info() will
 758     * return -1.
 759     */
 760    ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
 761    if (ret < 0) {
 762        error_set(errp, QERR_UNSUPPORTED);
 763        goto cleanup;
 764    }
 765
 766    s->note_size = cpu_get_note_size(s->dump_info.d_class,
 767                                     s->dump_info.d_machine, nr_cpus);
 768    if (s->note_size < 0) {
 769        error_set(errp, QERR_UNSUPPORTED);
 770        goto cleanup;
 771    }
 772
 773    /* get memory mapping */
 774    memory_mapping_list_init(&s->list);
 775    if (paging) {
 776        qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
 777        if (err != NULL) {
 778            error_propagate(errp, err);
 779            goto cleanup;
 780        }
 781    } else {
 782        qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
 783    }
 784
 785    if (s->has_filter) {
 786        memory_mapping_filter(&s->list, s->begin, s->length);
 787    }
 788
 789    /*
 790     * calculate phdr_num
 791     *
 792     * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
 793     */
 794    s->phdr_num = 1; /* PT_NOTE */
 795    if (s->list.num < UINT16_MAX - 2) {
 796        s->phdr_num += s->list.num;
 797        s->have_section = false;
 798    } else {
 799        s->have_section = true;
 800        s->phdr_num = PN_XNUM;
 801        s->sh_info = 1; /* PT_NOTE */
 802
 803        /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
 804        if (s->list.num <= UINT32_MAX - 1) {
 805            s->sh_info += s->list.num;
 806        } else {
 807            s->sh_info = UINT32_MAX;
 808        }
 809    }
 810
 811    if (s->dump_info.d_class == ELFCLASS64) {
 812        if (s->have_section) {
 813            s->memory_offset = sizeof(Elf64_Ehdr) +
 814                               sizeof(Elf64_Phdr) * s->sh_info +
 815                               sizeof(Elf64_Shdr) + s->note_size;
 816        } else {
 817            s->memory_offset = sizeof(Elf64_Ehdr) +
 818                               sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
 819        }
 820    } else {
 821        if (s->have_section) {
 822            s->memory_offset = sizeof(Elf32_Ehdr) +
 823                               sizeof(Elf32_Phdr) * s->sh_info +
 824                               sizeof(Elf32_Shdr) + s->note_size;
 825        } else {
 826            s->memory_offset = sizeof(Elf32_Ehdr) +
 827                               sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
 828        }
 829    }
 830
 831    return 0;
 832
 833cleanup:
 834    guest_phys_blocks_free(&s->guest_phys_blocks);
 835
 836    if (s->resume) {
 837        vm_start();
 838    }
 839
 840    return -1;
 841}
 842
 843void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
 844                           int64_t begin, bool has_length, int64_t length,
 845                           Error **errp)
 846{
 847    const char *p;
 848    int fd = -1;
 849    DumpState *s;
 850    int ret;
 851
 852    if (has_begin && !has_length) {
 853        error_set(errp, QERR_MISSING_PARAMETER, "length");
 854        return;
 855    }
 856    if (!has_begin && has_length) {
 857        error_set(errp, QERR_MISSING_PARAMETER, "begin");
 858        return;
 859    }
 860
 861#if !defined(WIN32)
 862    if (strstart(file, "fd:", &p)) {
 863        fd = monitor_get_fd(cur_mon, p, errp);
 864        if (fd == -1) {
 865            return;
 866        }
 867    }
 868#endif
 869
 870    if  (strstart(file, "file:", &p)) {
 871        fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
 872        if (fd < 0) {
 873            error_setg_file_open(errp, errno, p);
 874            return;
 875        }
 876    }
 877
 878    if (fd == -1) {
 879        error_set(errp, QERR_INVALID_PARAMETER, "protocol");
 880        return;
 881    }
 882
 883    s = g_malloc0(sizeof(DumpState));
 884
 885    ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
 886    if (ret < 0) {
 887        g_free(s);
 888        return;
 889    }
 890
 891    if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
 892        error_set(errp, QERR_IO_ERROR);
 893    }
 894
 895    g_free(s);
 896}
 897