qemu/linux-user/elfload.c
<<
>>
Prefs
   1/* This is the Linux kernel elf-loading code, ported into user space */
   2#include "qemu/osdep.h"
   3#include <sys/param.h>
   4
   5#include <sys/resource.h>
   6
   7#include "qemu.h"
   8#include "disas/disas.h"
   9#include "qemu/path.h"
  10
  11#ifdef _ARCH_PPC64
  12#undef ARCH_DLINFO
  13#undef ELF_PLATFORM
  14#undef ELF_HWCAP
  15#undef ELF_HWCAP2
  16#undef ELF_CLASS
  17#undef ELF_DATA
  18#undef ELF_ARCH
  19#endif
  20
  21#define ELF_OSABI   ELFOSABI_SYSV
  22
  23/* from personality.h */
  24
  25/*
  26 * Flags for bug emulation.
  27 *
  28 * These occupy the top three bytes.
  29 */
  30enum {
  31    ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
  32    FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
  33                                           descriptors (signal handling) */
  34    MMAP_PAGE_ZERO =    0x0100000,
  35    ADDR_COMPAT_LAYOUT = 0x0200000,
  36    READ_IMPLIES_EXEC = 0x0400000,
  37    ADDR_LIMIT_32BIT =  0x0800000,
  38    SHORT_INODE =       0x1000000,
  39    WHOLE_SECONDS =     0x2000000,
  40    STICKY_TIMEOUTS =   0x4000000,
  41    ADDR_LIMIT_3GB =    0x8000000,
  42};
  43
  44/*
  45 * Personality types.
  46 *
  47 * These go in the low byte.  Avoid using the top bit, it will
  48 * conflict with error returns.
  49 */
  50enum {
  51    PER_LINUX =         0x0000,
  52    PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
  53    PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
  54    PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
  55    PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
  56    PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
  57    PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
  58    PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
  59    PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
  60    PER_BSD =           0x0006,
  61    PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
  62    PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
  63    PER_LINUX32 =       0x0008,
  64    PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
  65    PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
  66    PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
  67    PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
  68    PER_RISCOS =        0x000c,
  69    PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
  70    PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
  71    PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
  72    PER_HPUX =          0x0010,
  73    PER_MASK =          0x00ff,
  74};
  75
  76/*
  77 * Return the base personality without flags.
  78 */
  79#define personality(pers)       (pers & PER_MASK)
  80
  81int info_is_fdpic(struct image_info *info)
  82{
  83    return info->personality == PER_LINUX_FDPIC;
  84}
  85
  86/* this flag is uneffective under linux too, should be deleted */
  87#ifndef MAP_DENYWRITE
  88#define MAP_DENYWRITE 0
  89#endif
  90
  91/* should probably go in elf.h */
  92#ifndef ELIBBAD
  93#define ELIBBAD 80
  94#endif
  95
  96#ifdef TARGET_WORDS_BIGENDIAN
  97#define ELF_DATA        ELFDATA2MSB
  98#else
  99#define ELF_DATA        ELFDATA2LSB
 100#endif
 101
 102#ifdef TARGET_ABI_MIPSN32
 103typedef abi_ullong      target_elf_greg_t;
 104#define tswapreg(ptr)   tswap64(ptr)
 105#else
 106typedef abi_ulong       target_elf_greg_t;
 107#define tswapreg(ptr)   tswapal(ptr)
 108#endif
 109
 110#ifdef USE_UID16
 111typedef abi_ushort      target_uid_t;
 112typedef abi_ushort      target_gid_t;
 113#else
 114typedef abi_uint        target_uid_t;
 115typedef abi_uint        target_gid_t;
 116#endif
 117typedef abi_int         target_pid_t;
 118
 119#ifdef TARGET_I386
 120
 121#define ELF_PLATFORM get_elf_platform()
 122
 123static const char *get_elf_platform(void)
 124{
 125    static char elf_platform[] = "i386";
 126    int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
 127    if (family > 6)
 128        family = 6;
 129    if (family >= 3)
 130        elf_platform[1] = '0' + family;
 131    return elf_platform;
 132}
 133
 134#define ELF_HWCAP get_elf_hwcap()
 135
 136static uint32_t get_elf_hwcap(void)
 137{
 138    X86CPU *cpu = X86_CPU(thread_cpu);
 139
 140    return cpu->env.features[FEAT_1_EDX];
 141}
 142
 143#ifdef TARGET_X86_64
 144#define ELF_START_MMAP 0x2aaaaab000ULL
 145
 146#define ELF_CLASS      ELFCLASS64
 147#define ELF_ARCH       EM_X86_64
 148
 149static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
 150{
 151    regs->rax = 0;
 152    regs->rsp = infop->start_stack;
 153    regs->rip = infop->entry;
 154}
 155
 156#define ELF_NREG    27
 157typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
 158
 159/*
 160 * Note that ELF_NREG should be 29 as there should be place for
 161 * TRAPNO and ERR "registers" as well but linux doesn't dump
 162 * those.
 163 *
 164 * See linux kernel: arch/x86/include/asm/elf.h
 165 */
 166static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
 167{
 168    (*regs)[0] = env->regs[15];
 169    (*regs)[1] = env->regs[14];
 170    (*regs)[2] = env->regs[13];
 171    (*regs)[3] = env->regs[12];
 172    (*regs)[4] = env->regs[R_EBP];
 173    (*regs)[5] = env->regs[R_EBX];
 174    (*regs)[6] = env->regs[11];
 175    (*regs)[7] = env->regs[10];
 176    (*regs)[8] = env->regs[9];
 177    (*regs)[9] = env->regs[8];
 178    (*regs)[10] = env->regs[R_EAX];
 179    (*regs)[11] = env->regs[R_ECX];
 180    (*regs)[12] = env->regs[R_EDX];
 181    (*regs)[13] = env->regs[R_ESI];
 182    (*regs)[14] = env->regs[R_EDI];
 183    (*regs)[15] = env->regs[R_EAX]; /* XXX */
 184    (*regs)[16] = env->eip;
 185    (*regs)[17] = env->segs[R_CS].selector & 0xffff;
 186    (*regs)[18] = env->eflags;
 187    (*regs)[19] = env->regs[R_ESP];
 188    (*regs)[20] = env->segs[R_SS].selector & 0xffff;
 189    (*regs)[21] = env->segs[R_FS].selector & 0xffff;
 190    (*regs)[22] = env->segs[R_GS].selector & 0xffff;
 191    (*regs)[23] = env->segs[R_DS].selector & 0xffff;
 192    (*regs)[24] = env->segs[R_ES].selector & 0xffff;
 193    (*regs)[25] = env->segs[R_FS].selector & 0xffff;
 194    (*regs)[26] = env->segs[R_GS].selector & 0xffff;
 195}
 196
 197#else
 198
 199#define ELF_START_MMAP 0x80000000
 200
 201/*
 202 * This is used to ensure we don't load something for the wrong architecture.
 203 */
 204#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
 205
 206/*
 207 * These are used to set parameters in the core dumps.
 208 */
 209#define ELF_CLASS       ELFCLASS32
 210#define ELF_ARCH        EM_386
 211
 212static inline void init_thread(struct target_pt_regs *regs,
 213                               struct image_info *infop)
 214{
 215    regs->esp = infop->start_stack;
 216    regs->eip = infop->entry;
 217
 218    /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
 219       starts %edx contains a pointer to a function which might be
 220       registered using `atexit'.  This provides a mean for the
 221       dynamic linker to call DT_FINI functions for shared libraries
 222       that have been loaded before the code runs.
 223
 224       A value of 0 tells we have no such handler.  */
 225    regs->edx = 0;
 226}
 227
 228#define ELF_NREG    17
 229typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
 230
 231/*
 232 * Note that ELF_NREG should be 19 as there should be place for
 233 * TRAPNO and ERR "registers" as well but linux doesn't dump
 234 * those.
 235 *
 236 * See linux kernel: arch/x86/include/asm/elf.h
 237 */
 238static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
 239{
 240    (*regs)[0] = env->regs[R_EBX];
 241    (*regs)[1] = env->regs[R_ECX];
 242    (*regs)[2] = env->regs[R_EDX];
 243    (*regs)[3] = env->regs[R_ESI];
 244    (*regs)[4] = env->regs[R_EDI];
 245    (*regs)[5] = env->regs[R_EBP];
 246    (*regs)[6] = env->regs[R_EAX];
 247    (*regs)[7] = env->segs[R_DS].selector & 0xffff;
 248    (*regs)[8] = env->segs[R_ES].selector & 0xffff;
 249    (*regs)[9] = env->segs[R_FS].selector & 0xffff;
 250    (*regs)[10] = env->segs[R_GS].selector & 0xffff;
 251    (*regs)[11] = env->regs[R_EAX]; /* XXX */
 252    (*regs)[12] = env->eip;
 253    (*regs)[13] = env->segs[R_CS].selector & 0xffff;
 254    (*regs)[14] = env->eflags;
 255    (*regs)[15] = env->regs[R_ESP];
 256    (*regs)[16] = env->segs[R_SS].selector & 0xffff;
 257}
 258#endif
 259
 260#define USE_ELF_CORE_DUMP
 261#define ELF_EXEC_PAGESIZE       4096
 262
 263#endif
 264
 265#ifdef TARGET_ARM
 266
 267#ifndef TARGET_AARCH64
 268/* 32 bit ARM definitions */
 269
 270#define ELF_START_MMAP 0x80000000
 271
 272#define ELF_ARCH        EM_ARM
 273#define ELF_CLASS       ELFCLASS32
 274
 275static inline void init_thread(struct target_pt_regs *regs,
 276                               struct image_info *infop)
 277{
 278    abi_long stack = infop->start_stack;
 279    memset(regs, 0, sizeof(*regs));
 280
 281    regs->uregs[16] = ARM_CPU_MODE_USR;
 282    if (infop->entry & 1) {
 283        regs->uregs[16] |= CPSR_T;
 284    }
 285    regs->uregs[15] = infop->entry & 0xfffffffe;
 286    regs->uregs[13] = infop->start_stack;
 287    /* FIXME - what to for failure of get_user()? */
 288    get_user_ual(regs->uregs[2], stack + 8); /* envp */
 289    get_user_ual(regs->uregs[1], stack + 4); /* envp */
 290    /* XXX: it seems that r0 is zeroed after ! */
 291    regs->uregs[0] = 0;
 292    /* For uClinux PIC binaries.  */
 293    /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
 294    regs->uregs[10] = infop->start_data;
 295
 296    /* Support ARM FDPIC.  */
 297    if (info_is_fdpic(infop)) {
 298        /* As described in the ABI document, r7 points to the loadmap info
 299         * prepared by the kernel. If an interpreter is needed, r8 points
 300         * to the interpreter loadmap and r9 points to the interpreter
 301         * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
 302         * r9 points to the main program PT_DYNAMIC info.
 303         */
 304        regs->uregs[7] = infop->loadmap_addr;
 305        if (infop->interpreter_loadmap_addr) {
 306            /* Executable is dynamically loaded.  */
 307            regs->uregs[8] = infop->interpreter_loadmap_addr;
 308            regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
 309        } else {
 310            regs->uregs[8] = 0;
 311            regs->uregs[9] = infop->pt_dynamic_addr;
 312        }
 313    }
 314}
 315
 316#define ELF_NREG    18
 317typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
 318
 319static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
 320{
 321    (*regs)[0] = tswapreg(env->regs[0]);
 322    (*regs)[1] = tswapreg(env->regs[1]);
 323    (*regs)[2] = tswapreg(env->regs[2]);
 324    (*regs)[3] = tswapreg(env->regs[3]);
 325    (*regs)[4] = tswapreg(env->regs[4]);
 326    (*regs)[5] = tswapreg(env->regs[5]);
 327    (*regs)[6] = tswapreg(env->regs[6]);
 328    (*regs)[7] = tswapreg(env->regs[7]);
 329    (*regs)[8] = tswapreg(env->regs[8]);
 330    (*regs)[9] = tswapreg(env->regs[9]);
 331    (*regs)[10] = tswapreg(env->regs[10]);
 332    (*regs)[11] = tswapreg(env->regs[11]);
 333    (*regs)[12] = tswapreg(env->regs[12]);
 334    (*regs)[13] = tswapreg(env->regs[13]);
 335    (*regs)[14] = tswapreg(env->regs[14]);
 336    (*regs)[15] = tswapreg(env->regs[15]);
 337
 338    (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env));
 339    (*regs)[17] = tswapreg(env->regs[0]); /* XXX */
 340}
 341
 342#define USE_ELF_CORE_DUMP
 343#define ELF_EXEC_PAGESIZE       4096
 344
 345enum
 346{
 347    ARM_HWCAP_ARM_SWP       = 1 << 0,
 348    ARM_HWCAP_ARM_HALF      = 1 << 1,
 349    ARM_HWCAP_ARM_THUMB     = 1 << 2,
 350    ARM_HWCAP_ARM_26BIT     = 1 << 3,
 351    ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
 352    ARM_HWCAP_ARM_FPA       = 1 << 5,
 353    ARM_HWCAP_ARM_VFP       = 1 << 6,
 354    ARM_HWCAP_ARM_EDSP      = 1 << 7,
 355    ARM_HWCAP_ARM_JAVA      = 1 << 8,
 356    ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
 357    ARM_HWCAP_ARM_CRUNCH    = 1 << 10,
 358    ARM_HWCAP_ARM_THUMBEE   = 1 << 11,
 359    ARM_HWCAP_ARM_NEON      = 1 << 12,
 360    ARM_HWCAP_ARM_VFPv3     = 1 << 13,
 361    ARM_HWCAP_ARM_VFPv3D16  = 1 << 14,
 362    ARM_HWCAP_ARM_TLS       = 1 << 15,
 363    ARM_HWCAP_ARM_VFPv4     = 1 << 16,
 364    ARM_HWCAP_ARM_IDIVA     = 1 << 17,
 365    ARM_HWCAP_ARM_IDIVT     = 1 << 18,
 366    ARM_HWCAP_ARM_VFPD32    = 1 << 19,
 367    ARM_HWCAP_ARM_LPAE      = 1 << 20,
 368    ARM_HWCAP_ARM_EVTSTRM   = 1 << 21,
 369};
 370
 371enum {
 372    ARM_HWCAP2_ARM_AES      = 1 << 0,
 373    ARM_HWCAP2_ARM_PMULL    = 1 << 1,
 374    ARM_HWCAP2_ARM_SHA1     = 1 << 2,
 375    ARM_HWCAP2_ARM_SHA2     = 1 << 3,
 376    ARM_HWCAP2_ARM_CRC32    = 1 << 4,
 377};
 378
 379/* The commpage only exists for 32 bit kernels */
 380
 381/* Return 1 if the proposed guest space is suitable for the guest.
 382 * Return 0 if the proposed guest space isn't suitable, but another
 383 * address space should be tried.
 384 * Return -1 if there is no way the proposed guest space can be
 385 * valid regardless of the base.
 386 * The guest code may leave a page mapped and populate it if the
 387 * address is suitable.
 388 */
 389static int init_guest_commpage(unsigned long guest_base,
 390                               unsigned long guest_size)
 391{
 392    unsigned long real_start, test_page_addr;
 393
 394    /* We need to check that we can force a fault on access to the
 395     * commpage at 0xffff0fxx
 396     */
 397    test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
 398
 399    /* If the commpage lies within the already allocated guest space,
 400     * then there is no way we can allocate it.
 401     *
 402     * You may be thinking that that this check is redundant because
 403     * we already validated the guest size against MAX_RESERVED_VA;
 404     * but if qemu_host_page_mask is unusually large, then
 405     * test_page_addr may be lower.
 406     */
 407    if (test_page_addr >= guest_base
 408        && test_page_addr < (guest_base + guest_size)) {
 409        return -1;
 410    }
 411
 412    /* Note it needs to be writeable to let us initialise it */
 413    real_start = (unsigned long)
 414                 mmap((void *)test_page_addr, qemu_host_page_size,
 415                     PROT_READ | PROT_WRITE,
 416                     MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 417
 418    /* If we can't map it then try another address */
 419    if (real_start == -1ul) {
 420        return 0;
 421    }
 422
 423    if (real_start != test_page_addr) {
 424        /* OS didn't put the page where we asked - unmap and reject */
 425        munmap((void *)real_start, qemu_host_page_size);
 426        return 0;
 427    }
 428
 429    /* Leave the page mapped
 430     * Populate it (mmap should have left it all 0'd)
 431     */
 432
 433    /* Kernel helper versions */
 434    __put_user(5, (uint32_t *)g2h(0xffff0ffcul));
 435
 436    /* Now it's populated make it RO */
 437    if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
 438        perror("Protecting guest commpage");
 439        exit(-1);
 440    }
 441
 442    return 1; /* All good */
 443}
 444
 445#define ELF_HWCAP get_elf_hwcap()
 446#define ELF_HWCAP2 get_elf_hwcap2()
 447
 448static uint32_t get_elf_hwcap(void)
 449{
 450    ARMCPU *cpu = ARM_CPU(thread_cpu);
 451    uint32_t hwcaps = 0;
 452
 453    hwcaps |= ARM_HWCAP_ARM_SWP;
 454    hwcaps |= ARM_HWCAP_ARM_HALF;
 455    hwcaps |= ARM_HWCAP_ARM_THUMB;
 456    hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
 457
 458    /* probe for the extra features */
 459#define GET_FEATURE(feat, hwcap) \
 460    do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
 461
 462#define GET_FEATURE_ID(feat, hwcap) \
 463    do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
 464
 465    /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
 466    GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
 467    GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
 468    GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
 469    GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
 470    GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
 471    GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
 472    GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
 473    GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
 474    GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
 475    GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
 476    /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
 477     * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
 478     * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
 479     * to our VFP_FP16 feature bit.
 480     */
 481    GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPD32);
 482    GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
 483
 484    return hwcaps;
 485}
 486
 487static uint32_t get_elf_hwcap2(void)
 488{
 489    ARMCPU *cpu = ARM_CPU(thread_cpu);
 490    uint32_t hwcaps = 0;
 491
 492    GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
 493    GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
 494    GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
 495    GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
 496    GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
 497    return hwcaps;
 498}
 499
 500#undef GET_FEATURE
 501#undef GET_FEATURE_ID
 502
 503#define ELF_PLATFORM get_elf_platform()
 504
 505static const char *get_elf_platform(void)
 506{
 507    CPUARMState *env = thread_cpu->env_ptr;
 508
 509#ifdef TARGET_WORDS_BIGENDIAN
 510# define END  "b"
 511#else
 512# define END  "l"
 513#endif
 514
 515    if (arm_feature(env, ARM_FEATURE_V8)) {
 516        return "v8" END;
 517    } else if (arm_feature(env, ARM_FEATURE_V7)) {
 518        if (arm_feature(env, ARM_FEATURE_M)) {
 519            return "v7m" END;
 520        } else {
 521            return "v7" END;
 522        }
 523    } else if (arm_feature(env, ARM_FEATURE_V6)) {
 524        return "v6" END;
 525    } else if (arm_feature(env, ARM_FEATURE_V5)) {
 526        return "v5" END;
 527    } else {
 528        return "v4" END;
 529    }
 530
 531#undef END
 532}
 533
 534#else
 535/* 64 bit ARM definitions */
 536#define ELF_START_MMAP 0x80000000
 537
 538#define ELF_ARCH        EM_AARCH64
 539#define ELF_CLASS       ELFCLASS64
 540#ifdef TARGET_WORDS_BIGENDIAN
 541# define ELF_PLATFORM    "aarch64_be"
 542#else
 543# define ELF_PLATFORM    "aarch64"
 544#endif
 545
 546static inline void init_thread(struct target_pt_regs *regs,
 547                               struct image_info *infop)
 548{
 549    abi_long stack = infop->start_stack;
 550    memset(regs, 0, sizeof(*regs));
 551
 552    regs->pc = infop->entry & ~0x3ULL;
 553    regs->sp = stack;
 554}
 555
 556#define ELF_NREG    34
 557typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
 558
 559static void elf_core_copy_regs(target_elf_gregset_t *regs,
 560                               const CPUARMState *env)
 561{
 562    int i;
 563
 564    for (i = 0; i < 32; i++) {
 565        (*regs)[i] = tswapreg(env->xregs[i]);
 566    }
 567    (*regs)[32] = tswapreg(env->pc);
 568    (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env));
 569}
 570
 571#define USE_ELF_CORE_DUMP
 572#define ELF_EXEC_PAGESIZE       4096
 573
 574enum {
 575    ARM_HWCAP_A64_FP            = 1 << 0,
 576    ARM_HWCAP_A64_ASIMD         = 1 << 1,
 577    ARM_HWCAP_A64_EVTSTRM       = 1 << 2,
 578    ARM_HWCAP_A64_AES           = 1 << 3,
 579    ARM_HWCAP_A64_PMULL         = 1 << 4,
 580    ARM_HWCAP_A64_SHA1          = 1 << 5,
 581    ARM_HWCAP_A64_SHA2          = 1 << 6,
 582    ARM_HWCAP_A64_CRC32         = 1 << 7,
 583    ARM_HWCAP_A64_ATOMICS       = 1 << 8,
 584    ARM_HWCAP_A64_FPHP          = 1 << 9,
 585    ARM_HWCAP_A64_ASIMDHP       = 1 << 10,
 586    ARM_HWCAP_A64_CPUID         = 1 << 11,
 587    ARM_HWCAP_A64_ASIMDRDM      = 1 << 12,
 588    ARM_HWCAP_A64_JSCVT         = 1 << 13,
 589    ARM_HWCAP_A64_FCMA          = 1 << 14,
 590    ARM_HWCAP_A64_LRCPC         = 1 << 15,
 591    ARM_HWCAP_A64_DCPOP         = 1 << 16,
 592    ARM_HWCAP_A64_SHA3          = 1 << 17,
 593    ARM_HWCAP_A64_SM3           = 1 << 18,
 594    ARM_HWCAP_A64_SM4           = 1 << 19,
 595    ARM_HWCAP_A64_ASIMDDP       = 1 << 20,
 596    ARM_HWCAP_A64_SHA512        = 1 << 21,
 597    ARM_HWCAP_A64_SVE           = 1 << 22,
 598    ARM_HWCAP_A64_ASIMDFHM      = 1 << 23,
 599    ARM_HWCAP_A64_DIT           = 1 << 24,
 600    ARM_HWCAP_A64_USCAT         = 1 << 25,
 601    ARM_HWCAP_A64_ILRCPC        = 1 << 26,
 602    ARM_HWCAP_A64_FLAGM         = 1 << 27,
 603    ARM_HWCAP_A64_SSBS          = 1 << 28,
 604    ARM_HWCAP_A64_SB            = 1 << 29,
 605    ARM_HWCAP_A64_PACA          = 1 << 30,
 606    ARM_HWCAP_A64_PACG          = 1UL << 31,
 607};
 608
 609#define ELF_HWCAP get_elf_hwcap()
 610
 611static uint32_t get_elf_hwcap(void)
 612{
 613    ARMCPU *cpu = ARM_CPU(thread_cpu);
 614    uint32_t hwcaps = 0;
 615
 616    hwcaps |= ARM_HWCAP_A64_FP;
 617    hwcaps |= ARM_HWCAP_A64_ASIMD;
 618    hwcaps |= ARM_HWCAP_A64_CPUID;
 619
 620    /* probe for the extra features */
 621#define GET_FEATURE_ID(feat, hwcap) \
 622    do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
 623
 624    GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
 625    GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
 626    GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
 627    GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
 628    GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
 629    GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
 630    GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
 631    GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
 632    GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
 633    GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
 634    GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
 635    GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
 636    GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
 637    GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
 638    GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
 639    GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
 640    GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
 641    GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
 642    GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
 643    GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
 644
 645#undef GET_FEATURE_ID
 646
 647    return hwcaps;
 648}
 649
 650#endif /* not TARGET_AARCH64 */
 651#endif /* TARGET_ARM */
 652
 653#ifdef TARGET_SPARC
 654#ifdef TARGET_SPARC64
 655
 656#define ELF_START_MMAP 0x80000000
 657#define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
 658                    | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
 659#ifndef TARGET_ABI32
 660#define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
 661#else
 662#define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
 663#endif
 664
 665#define ELF_CLASS   ELFCLASS64
 666#define ELF_ARCH    EM_SPARCV9
 667
 668#define STACK_BIAS              2047
 669
 670static inline void init_thread(struct target_pt_regs *regs,
 671                               struct image_info *infop)
 672{
 673#ifndef TARGET_ABI32
 674    regs->tstate = 0;
 675#endif
 676    regs->pc = infop->entry;
 677    regs->npc = regs->pc + 4;
 678    regs->y = 0;
 679#ifdef TARGET_ABI32
 680    regs->u_regs[14] = infop->start_stack - 16 * 4;
 681#else
 682    if (personality(infop->personality) == PER_LINUX32)
 683        regs->u_regs[14] = infop->start_stack - 16 * 4;
 684    else
 685        regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
 686#endif
 687}
 688
 689#else
 690#define ELF_START_MMAP 0x80000000
 691#define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
 692                    | HWCAP_SPARC_MULDIV)
 693
 694#define ELF_CLASS   ELFCLASS32
 695#define ELF_ARCH    EM_SPARC
 696
 697static inline void init_thread(struct target_pt_regs *regs,
 698                               struct image_info *infop)
 699{
 700    regs->psr = 0;
 701    regs->pc = infop->entry;
 702    regs->npc = regs->pc + 4;
 703    regs->y = 0;
 704    regs->u_regs[14] = infop->start_stack - 16 * 4;
 705}
 706
 707#endif
 708#endif
 709
 710#ifdef TARGET_PPC
 711
 712#define ELF_MACHINE    PPC_ELF_MACHINE
 713#define ELF_START_MMAP 0x80000000
 714
 715#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
 716
 717#define elf_check_arch(x) ( (x) == EM_PPC64 )
 718
 719#define ELF_CLASS       ELFCLASS64
 720
 721#else
 722
 723#define ELF_CLASS       ELFCLASS32
 724
 725#endif
 726
 727#define ELF_ARCH        EM_PPC
 728
 729/* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
 730   See arch/powerpc/include/asm/cputable.h.  */
 731enum {
 732    QEMU_PPC_FEATURE_32 = 0x80000000,
 733    QEMU_PPC_FEATURE_64 = 0x40000000,
 734    QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
 735    QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
 736    QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
 737    QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
 738    QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
 739    QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
 740    QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
 741    QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
 742    QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
 743    QEMU_PPC_FEATURE_NO_TB = 0x00100000,
 744    QEMU_PPC_FEATURE_POWER4 = 0x00080000,
 745    QEMU_PPC_FEATURE_POWER5 = 0x00040000,
 746    QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
 747    QEMU_PPC_FEATURE_CELL = 0x00010000,
 748    QEMU_PPC_FEATURE_BOOKE = 0x00008000,
 749    QEMU_PPC_FEATURE_SMT = 0x00004000,
 750    QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
 751    QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
 752    QEMU_PPC_FEATURE_PA6T = 0x00000800,
 753    QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
 754    QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
 755    QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
 756    QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
 757    QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
 758
 759    QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
 760    QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
 761
 762    /* Feature definitions in AT_HWCAP2.  */
 763    QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */
 764    QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */
 765    QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */
 766    QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
 767    QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
 768    QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
 769    QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
 770};
 771
 772#define ELF_HWCAP get_elf_hwcap()
 773
 774static uint32_t get_elf_hwcap(void)
 775{
 776    PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
 777    uint32_t features = 0;
 778
 779    /* We don't have to be terribly complete here; the high points are
 780       Altivec/FP/SPE support.  Anything else is just a bonus.  */
 781#define GET_FEATURE(flag, feature)                                      \
 782    do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
 783#define GET_FEATURE2(flags, feature) \
 784    do { \
 785        if ((cpu->env.insns_flags2 & flags) == flags) { \
 786            features |= feature; \
 787        } \
 788    } while (0)
 789    GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
 790    GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
 791    GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
 792    GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
 793    GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
 794    GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
 795    GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
 796    GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
 797    GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP);
 798    GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX);
 799    GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 |
 800                  PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206),
 801                  QEMU_PPC_FEATURE_ARCH_2_06);
 802#undef GET_FEATURE
 803#undef GET_FEATURE2
 804
 805    return features;
 806}
 807
 808#define ELF_HWCAP2 get_elf_hwcap2()
 809
 810static uint32_t get_elf_hwcap2(void)
 811{
 812    PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
 813    uint32_t features = 0;
 814
 815#define GET_FEATURE(flag, feature)                                      \
 816    do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
 817#define GET_FEATURE2(flag, feature)                                      \
 818    do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0)
 819
 820    GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL);
 821    GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
 822    GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
 823                  PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07);
 824    GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00);
 825
 826#undef GET_FEATURE
 827#undef GET_FEATURE2
 828
 829    return features;
 830}
 831
 832/*
 833 * The requirements here are:
 834 * - keep the final alignment of sp (sp & 0xf)
 835 * - make sure the 32-bit value at the first 16 byte aligned position of
 836 *   AUXV is greater than 16 for glibc compatibility.
 837 *   AT_IGNOREPPC is used for that.
 838 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
 839 *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
 840 */
 841#define DLINFO_ARCH_ITEMS       5
 842#define ARCH_DLINFO                                     \
 843    do {                                                \
 844        PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);              \
 845        /*                                              \
 846         * Handle glibc compatibility: these magic entries must \
 847         * be at the lowest addresses in the final auxv.        \
 848         */                                             \
 849        NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
 850        NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
 851        NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \
 852        NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \
 853        NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
 854    } while (0)
 855
 856static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
 857{
 858    _regs->gpr[1] = infop->start_stack;
 859#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
 860    if (get_ppc64_abi(infop) < 2) {
 861        uint64_t val;
 862        get_user_u64(val, infop->entry + 8);
 863        _regs->gpr[2] = val + infop->load_bias;
 864        get_user_u64(val, infop->entry);
 865        infop->entry = val + infop->load_bias;
 866    } else {
 867        _regs->gpr[12] = infop->entry;  /* r12 set to global entry address */
 868    }
 869#endif
 870    _regs->nip = infop->entry;
 871}
 872
 873/* See linux kernel: arch/powerpc/include/asm/elf.h.  */
 874#define ELF_NREG 48
 875typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
 876
 877static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
 878{
 879    int i;
 880    target_ulong ccr = 0;
 881
 882    for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
 883        (*regs)[i] = tswapreg(env->gpr[i]);
 884    }
 885
 886    (*regs)[32] = tswapreg(env->nip);
 887    (*regs)[33] = tswapreg(env->msr);
 888    (*regs)[35] = tswapreg(env->ctr);
 889    (*regs)[36] = tswapreg(env->lr);
 890    (*regs)[37] = tswapreg(env->xer);
 891
 892    for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
 893        ccr |= env->crf[i] << (32 - ((i + 1) * 4));
 894    }
 895    (*regs)[38] = tswapreg(ccr);
 896}
 897
 898#define USE_ELF_CORE_DUMP
 899#define ELF_EXEC_PAGESIZE       4096
 900
 901#endif
 902
 903#ifdef TARGET_MIPS
 904
 905#define ELF_START_MMAP 0x80000000
 906
 907#ifdef TARGET_MIPS64
 908#define ELF_CLASS   ELFCLASS64
 909#else
 910#define ELF_CLASS   ELFCLASS32
 911#endif
 912#define ELF_ARCH    EM_MIPS
 913
 914#define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS)
 915
 916static inline void init_thread(struct target_pt_regs *regs,
 917                               struct image_info *infop)
 918{
 919    regs->cp0_status = 2 << CP0St_KSU;
 920    regs->cp0_epc = infop->entry;
 921    regs->regs[29] = infop->start_stack;
 922}
 923
 924/* See linux kernel: arch/mips/include/asm/elf.h.  */
 925#define ELF_NREG 45
 926typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
 927
 928/* See linux kernel: arch/mips/include/asm/reg.h.  */
 929enum {
 930#ifdef TARGET_MIPS64
 931    TARGET_EF_R0 = 0,
 932#else
 933    TARGET_EF_R0 = 6,
 934#endif
 935    TARGET_EF_R26 = TARGET_EF_R0 + 26,
 936    TARGET_EF_R27 = TARGET_EF_R0 + 27,
 937    TARGET_EF_LO = TARGET_EF_R0 + 32,
 938    TARGET_EF_HI = TARGET_EF_R0 + 33,
 939    TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
 940    TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
 941    TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
 942    TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
 943};
 944
 945/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
 946static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
 947{
 948    int i;
 949
 950    for (i = 0; i < TARGET_EF_R0; i++) {
 951        (*regs)[i] = 0;
 952    }
 953    (*regs)[TARGET_EF_R0] = 0;
 954
 955    for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
 956        (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
 957    }
 958
 959    (*regs)[TARGET_EF_R26] = 0;
 960    (*regs)[TARGET_EF_R27] = 0;
 961    (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]);
 962    (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]);
 963    (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC);
 964    (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr);
 965    (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status);
 966    (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause);
 967}
 968
 969#define USE_ELF_CORE_DUMP
 970#define ELF_EXEC_PAGESIZE        4096
 971
 972/* See arch/mips/include/uapi/asm/hwcap.h.  */
 973enum {
 974    HWCAP_MIPS_R6           = (1 << 0),
 975    HWCAP_MIPS_MSA          = (1 << 1),
 976};
 977
 978#define ELF_HWCAP get_elf_hwcap()
 979
 980static uint32_t get_elf_hwcap(void)
 981{
 982    MIPSCPU *cpu = MIPS_CPU(thread_cpu);
 983    uint32_t hwcaps = 0;
 984
 985#define GET_FEATURE(flag, hwcap) \
 986    do { if (cpu->env.insn_flags & (flag)) { hwcaps |= hwcap; } } while (0)
 987
 988    GET_FEATURE(ISA_MIPS32R6 | ISA_MIPS64R6, HWCAP_MIPS_R6);
 989    GET_FEATURE(ASE_MSA, HWCAP_MIPS_MSA);
 990
 991#undef GET_FEATURE
 992
 993    return hwcaps;
 994}
 995
 996#endif /* TARGET_MIPS */
 997
 998#ifdef TARGET_MICROBLAZE
 999
1000#define ELF_START_MMAP 0x80000000
1001
1002#define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
1003
1004#define ELF_CLASS   ELFCLASS32
1005#define ELF_ARCH    EM_MICROBLAZE
1006
1007static inline void init_thread(struct target_pt_regs *regs,
1008                               struct image_info *infop)
1009{
1010    regs->pc = infop->entry;
1011    regs->r1 = infop->start_stack;
1012
1013}
1014
1015#define ELF_EXEC_PAGESIZE        4096
1016
1017#define USE_ELF_CORE_DUMP
1018#define ELF_NREG 38
1019typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1020
1021/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1022static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
1023{
1024    int i, pos = 0;
1025
1026    for (i = 0; i < 32; i++) {
1027        (*regs)[pos++] = tswapreg(env->regs[i]);
1028    }
1029
1030    for (i = 0; i < 6; i++) {
1031        (*regs)[pos++] = tswapreg(env->sregs[i]);
1032    }
1033}
1034
1035#endif /* TARGET_MICROBLAZE */
1036
1037#ifdef TARGET_NIOS2
1038
1039#define ELF_START_MMAP 0x80000000
1040
1041#define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
1042
1043#define ELF_CLASS   ELFCLASS32
1044#define ELF_ARCH    EM_ALTERA_NIOS2
1045
1046static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1047{
1048    regs->ea = infop->entry;
1049    regs->sp = infop->start_stack;
1050    regs->estatus = 0x3;
1051}
1052
1053#define ELF_EXEC_PAGESIZE        4096
1054
1055#define USE_ELF_CORE_DUMP
1056#define ELF_NREG 49
1057typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1058
1059/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
1060static void elf_core_copy_regs(target_elf_gregset_t *regs,
1061                               const CPUNios2State *env)
1062{
1063    int i;
1064
1065    (*regs)[0] = -1;
1066    for (i = 1; i < 8; i++)    /* r0-r7 */
1067        (*regs)[i] = tswapreg(env->regs[i + 7]);
1068
1069    for (i = 8; i < 16; i++)   /* r8-r15 */
1070        (*regs)[i] = tswapreg(env->regs[i - 8]);
1071
1072    for (i = 16; i < 24; i++)  /* r16-r23 */
1073        (*regs)[i] = tswapreg(env->regs[i + 7]);
1074    (*regs)[24] = -1;    /* R_ET */
1075    (*regs)[25] = -1;    /* R_BT */
1076    (*regs)[26] = tswapreg(env->regs[R_GP]);
1077    (*regs)[27] = tswapreg(env->regs[R_SP]);
1078    (*regs)[28] = tswapreg(env->regs[R_FP]);
1079    (*regs)[29] = tswapreg(env->regs[R_EA]);
1080    (*regs)[30] = -1;    /* R_SSTATUS */
1081    (*regs)[31] = tswapreg(env->regs[R_RA]);
1082
1083    (*regs)[32] = tswapreg(env->regs[R_PC]);
1084
1085    (*regs)[33] = -1; /* R_STATUS */
1086    (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]);
1087
1088    for (i = 35; i < 49; i++)    /* ... */
1089        (*regs)[i] = -1;
1090}
1091
1092#endif /* TARGET_NIOS2 */
1093
1094#ifdef TARGET_OPENRISC
1095
1096#define ELF_START_MMAP 0x08000000
1097
1098#define ELF_ARCH EM_OPENRISC
1099#define ELF_CLASS ELFCLASS32
1100#define ELF_DATA  ELFDATA2MSB
1101
1102static inline void init_thread(struct target_pt_regs *regs,
1103                               struct image_info *infop)
1104{
1105    regs->pc = infop->entry;
1106    regs->gpr[1] = infop->start_stack;
1107}
1108
1109#define USE_ELF_CORE_DUMP
1110#define ELF_EXEC_PAGESIZE 8192
1111
1112/* See linux kernel arch/openrisc/include/asm/elf.h.  */
1113#define ELF_NREG 34 /* gprs and pc, sr */
1114typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1115
1116static void elf_core_copy_regs(target_elf_gregset_t *regs,
1117                               const CPUOpenRISCState *env)
1118{
1119    int i;
1120
1121    for (i = 0; i < 32; i++) {
1122        (*regs)[i] = tswapreg(cpu_get_gpr(env, i));
1123    }
1124    (*regs)[32] = tswapreg(env->pc);
1125    (*regs)[33] = tswapreg(cpu_get_sr(env));
1126}
1127#define ELF_HWCAP 0
1128#define ELF_PLATFORM NULL
1129
1130#endif /* TARGET_OPENRISC */
1131
1132#ifdef TARGET_SH4
1133
1134#define ELF_START_MMAP 0x80000000
1135
1136#define ELF_CLASS ELFCLASS32
1137#define ELF_ARCH  EM_SH
1138
1139static inline void init_thread(struct target_pt_regs *regs,
1140                               struct image_info *infop)
1141{
1142    /* Check other registers XXXXX */
1143    regs->pc = infop->entry;
1144    regs->regs[15] = infop->start_stack;
1145}
1146
1147/* See linux kernel: arch/sh/include/asm/elf.h.  */
1148#define ELF_NREG 23
1149typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1150
1151/* See linux kernel: arch/sh/include/asm/ptrace.h.  */
1152enum {
1153    TARGET_REG_PC = 16,
1154    TARGET_REG_PR = 17,
1155    TARGET_REG_SR = 18,
1156    TARGET_REG_GBR = 19,
1157    TARGET_REG_MACH = 20,
1158    TARGET_REG_MACL = 21,
1159    TARGET_REG_SYSCALL = 22
1160};
1161
1162static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
1163                                      const CPUSH4State *env)
1164{
1165    int i;
1166
1167    for (i = 0; i < 16; i++) {
1168        (*regs)[i] = tswapreg(env->gregs[i]);
1169    }
1170
1171    (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1172    (*regs)[TARGET_REG_PR] = tswapreg(env->pr);
1173    (*regs)[TARGET_REG_SR] = tswapreg(env->sr);
1174    (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr);
1175    (*regs)[TARGET_REG_MACH] = tswapreg(env->mach);
1176    (*regs)[TARGET_REG_MACL] = tswapreg(env->macl);
1177    (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
1178}
1179
1180#define USE_ELF_CORE_DUMP
1181#define ELF_EXEC_PAGESIZE        4096
1182
1183enum {
1184    SH_CPU_HAS_FPU            = 0x0001, /* Hardware FPU support */
1185    SH_CPU_HAS_P2_FLUSH_BUG   = 0x0002, /* Need to flush the cache in P2 area */
1186    SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */
1187    SH_CPU_HAS_DSP            = 0x0008, /* SH-DSP: DSP support */
1188    SH_CPU_HAS_PERF_COUNTER   = 0x0010, /* Hardware performance counters */
1189    SH_CPU_HAS_PTEA           = 0x0020, /* PTEA register */
1190    SH_CPU_HAS_LLSC           = 0x0040, /* movli.l/movco.l */
1191    SH_CPU_HAS_L2_CACHE       = 0x0080, /* Secondary cache / URAM */
1192    SH_CPU_HAS_OP32           = 0x0100, /* 32-bit instruction support */
1193    SH_CPU_HAS_PTEAEX         = 0x0200, /* PTE ASID Extension support */
1194};
1195
1196#define ELF_HWCAP get_elf_hwcap()
1197
1198static uint32_t get_elf_hwcap(void)
1199{
1200    SuperHCPU *cpu = SUPERH_CPU(thread_cpu);
1201    uint32_t hwcap = 0;
1202
1203    hwcap |= SH_CPU_HAS_FPU;
1204
1205    if (cpu->env.features & SH_FEATURE_SH4A) {
1206        hwcap |= SH_CPU_HAS_LLSC;
1207    }
1208
1209    return hwcap;
1210}
1211
1212#endif
1213
1214#ifdef TARGET_CRIS
1215
1216#define ELF_START_MMAP 0x80000000
1217
1218#define ELF_CLASS ELFCLASS32
1219#define ELF_ARCH  EM_CRIS
1220
1221static inline void init_thread(struct target_pt_regs *regs,
1222                               struct image_info *infop)
1223{
1224    regs->erp = infop->entry;
1225}
1226
1227#define ELF_EXEC_PAGESIZE        8192
1228
1229#endif
1230
1231#ifdef TARGET_M68K
1232
1233#define ELF_START_MMAP 0x80000000
1234
1235#define ELF_CLASS       ELFCLASS32
1236#define ELF_ARCH        EM_68K
1237
1238/* ??? Does this need to do anything?
1239   #define ELF_PLAT_INIT(_r) */
1240
1241static inline void init_thread(struct target_pt_regs *regs,
1242                               struct image_info *infop)
1243{
1244    regs->usp = infop->start_stack;
1245    regs->sr = 0;
1246    regs->pc = infop->entry;
1247}
1248
1249/* See linux kernel: arch/m68k/include/asm/elf.h.  */
1250#define ELF_NREG 20
1251typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1252
1253static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
1254{
1255    (*regs)[0] = tswapreg(env->dregs[1]);
1256    (*regs)[1] = tswapreg(env->dregs[2]);
1257    (*regs)[2] = tswapreg(env->dregs[3]);
1258    (*regs)[3] = tswapreg(env->dregs[4]);
1259    (*regs)[4] = tswapreg(env->dregs[5]);
1260    (*regs)[5] = tswapreg(env->dregs[6]);
1261    (*regs)[6] = tswapreg(env->dregs[7]);
1262    (*regs)[7] = tswapreg(env->aregs[0]);
1263    (*regs)[8] = tswapreg(env->aregs[1]);
1264    (*regs)[9] = tswapreg(env->aregs[2]);
1265    (*regs)[10] = tswapreg(env->aregs[3]);
1266    (*regs)[11] = tswapreg(env->aregs[4]);
1267    (*regs)[12] = tswapreg(env->aregs[5]);
1268    (*regs)[13] = tswapreg(env->aregs[6]);
1269    (*regs)[14] = tswapreg(env->dregs[0]);
1270    (*regs)[15] = tswapreg(env->aregs[7]);
1271    (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */
1272    (*regs)[17] = tswapreg(env->sr);
1273    (*regs)[18] = tswapreg(env->pc);
1274    (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
1275}
1276
1277#define USE_ELF_CORE_DUMP
1278#define ELF_EXEC_PAGESIZE       8192
1279
1280#endif
1281
1282#ifdef TARGET_ALPHA
1283
1284#define ELF_START_MMAP (0x30000000000ULL)
1285
1286#define ELF_CLASS      ELFCLASS64
1287#define ELF_ARCH       EM_ALPHA
1288
1289static inline void init_thread(struct target_pt_regs *regs,
1290                               struct image_info *infop)
1291{
1292    regs->pc = infop->entry;
1293    regs->ps = 8;
1294    regs->usp = infop->start_stack;
1295}
1296
1297#define ELF_EXEC_PAGESIZE        8192
1298
1299#endif /* TARGET_ALPHA */
1300
1301#ifdef TARGET_S390X
1302
1303#define ELF_START_MMAP (0x20000000000ULL)
1304
1305#define ELF_CLASS       ELFCLASS64
1306#define ELF_DATA        ELFDATA2MSB
1307#define ELF_ARCH        EM_S390
1308
1309static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1310{
1311    regs->psw.addr = infop->entry;
1312    regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
1313    regs->gprs[15] = infop->start_stack;
1314}
1315
1316#endif /* TARGET_S390X */
1317
1318#ifdef TARGET_TILEGX
1319
1320/* 42 bits real used address, a half for user mode */
1321#define ELF_START_MMAP (0x00000020000000000ULL)
1322
1323#define elf_check_arch(x) ((x) == EM_TILEGX)
1324
1325#define ELF_CLASS   ELFCLASS64
1326#define ELF_DATA    ELFDATA2LSB
1327#define ELF_ARCH    EM_TILEGX
1328
1329static inline void init_thread(struct target_pt_regs *regs,
1330                               struct image_info *infop)
1331{
1332    regs->pc = infop->entry;
1333    regs->sp = infop->start_stack;
1334
1335}
1336
1337#define ELF_EXEC_PAGESIZE        65536 /* TILE-Gx page size is 64KB */
1338
1339#endif /* TARGET_TILEGX */
1340
1341#ifdef TARGET_RISCV
1342
1343#define ELF_START_MMAP 0x80000000
1344#define ELF_ARCH  EM_RISCV
1345
1346#ifdef TARGET_RISCV32
1347#define ELF_CLASS ELFCLASS32
1348#else
1349#define ELF_CLASS ELFCLASS64
1350#endif
1351
1352static inline void init_thread(struct target_pt_regs *regs,
1353                               struct image_info *infop)
1354{
1355    regs->sepc = infop->entry;
1356    regs->sp = infop->start_stack;
1357}
1358
1359#define ELF_EXEC_PAGESIZE 4096
1360
1361#endif /* TARGET_RISCV */
1362
1363#ifdef TARGET_HPPA
1364
1365#define ELF_START_MMAP  0x80000000
1366#define ELF_CLASS       ELFCLASS32
1367#define ELF_ARCH        EM_PARISC
1368#define ELF_PLATFORM    "PARISC"
1369#define STACK_GROWS_DOWN 0
1370#define STACK_ALIGNMENT  64
1371
1372static inline void init_thread(struct target_pt_regs *regs,
1373                               struct image_info *infop)
1374{
1375    regs->iaoq[0] = infop->entry;
1376    regs->iaoq[1] = infop->entry + 4;
1377    regs->gr[23] = 0;
1378    regs->gr[24] = infop->arg_start;
1379    regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong);
1380    /* The top-of-stack contains a linkage buffer.  */
1381    regs->gr[30] = infop->start_stack + 64;
1382    regs->gr[31] = infop->entry;
1383}
1384
1385#endif /* TARGET_HPPA */
1386
1387#ifdef TARGET_XTENSA
1388
1389#define ELF_START_MMAP 0x20000000
1390
1391#define ELF_CLASS       ELFCLASS32
1392#define ELF_ARCH        EM_XTENSA
1393
1394static inline void init_thread(struct target_pt_regs *regs,
1395                               struct image_info *infop)
1396{
1397    regs->windowbase = 0;
1398    regs->windowstart = 1;
1399    regs->areg[1] = infop->start_stack;
1400    regs->pc = infop->entry;
1401}
1402
1403/* See linux kernel: arch/xtensa/include/asm/elf.h.  */
1404#define ELF_NREG 128
1405typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1406
1407enum {
1408    TARGET_REG_PC,
1409    TARGET_REG_PS,
1410    TARGET_REG_LBEG,
1411    TARGET_REG_LEND,
1412    TARGET_REG_LCOUNT,
1413    TARGET_REG_SAR,
1414    TARGET_REG_WINDOWSTART,
1415    TARGET_REG_WINDOWBASE,
1416    TARGET_REG_THREADPTR,
1417    TARGET_REG_AR0 = 64,
1418};
1419
1420static void elf_core_copy_regs(target_elf_gregset_t *regs,
1421                               const CPUXtensaState *env)
1422{
1423    unsigned i;
1424
1425    (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1426    (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
1427    (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
1428    (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
1429    (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
1430    (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
1431    (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
1432    (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
1433    (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
1434    xtensa_sync_phys_from_window((CPUXtensaState *)env);
1435    for (i = 0; i < env->config->nareg; ++i) {
1436        (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
1437    }
1438}
1439
1440#define USE_ELF_CORE_DUMP
1441#define ELF_EXEC_PAGESIZE       4096
1442
1443#endif /* TARGET_XTENSA */
1444
1445#ifndef ELF_PLATFORM
1446#define ELF_PLATFORM (NULL)
1447#endif
1448
1449#ifndef ELF_MACHINE
1450#define ELF_MACHINE ELF_ARCH
1451#endif
1452
1453#ifndef elf_check_arch
1454#define elf_check_arch(x) ((x) == ELF_ARCH)
1455#endif
1456
1457#ifndef ELF_HWCAP
1458#define ELF_HWCAP 0
1459#endif
1460
1461#ifndef STACK_GROWS_DOWN
1462#define STACK_GROWS_DOWN 1
1463#endif
1464
1465#ifndef STACK_ALIGNMENT
1466#define STACK_ALIGNMENT 16
1467#endif
1468
1469#ifdef TARGET_ABI32
1470#undef ELF_CLASS
1471#define ELF_CLASS ELFCLASS32
1472#undef bswaptls
1473#define bswaptls(ptr) bswap32s(ptr)
1474#endif
1475
1476#include "elf.h"
1477
1478struct exec
1479{
1480    unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
1481    unsigned int a_text;   /* length of text, in bytes */
1482    unsigned int a_data;   /* length of data, in bytes */
1483    unsigned int a_bss;    /* length of uninitialized data area, in bytes */
1484    unsigned int a_syms;   /* length of symbol table data in file, in bytes */
1485    unsigned int a_entry;  /* start address */
1486    unsigned int a_trsize; /* length of relocation info for text, in bytes */
1487    unsigned int a_drsize; /* length of relocation info for data, in bytes */
1488};
1489
1490
1491#define N_MAGIC(exec) ((exec).a_info & 0xffff)
1492#define OMAGIC 0407
1493#define NMAGIC 0410
1494#define ZMAGIC 0413
1495#define QMAGIC 0314
1496
1497/* Necessary parameters */
1498#define TARGET_ELF_EXEC_PAGESIZE \
1499        (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
1500         TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
1501#define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
1502#define TARGET_ELF_PAGESTART(_v) ((_v) & \
1503                                 ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
1504#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
1505
1506#define DLINFO_ITEMS 15
1507
1508static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1509{
1510    memcpy(to, from, n);
1511}
1512
1513#ifdef BSWAP_NEEDED
1514static void bswap_ehdr(struct elfhdr *ehdr)
1515{
1516    bswap16s(&ehdr->e_type);            /* Object file type */
1517    bswap16s(&ehdr->e_machine);         /* Architecture */
1518    bswap32s(&ehdr->e_version);         /* Object file version */
1519    bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
1520    bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
1521    bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
1522    bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
1523    bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
1524    bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
1525    bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
1526    bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
1527    bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
1528    bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
1529}
1530
1531static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1532{
1533    int i;
1534    for (i = 0; i < phnum; ++i, ++phdr) {
1535        bswap32s(&phdr->p_type);        /* Segment type */
1536        bswap32s(&phdr->p_flags);       /* Segment flags */
1537        bswaptls(&phdr->p_offset);      /* Segment file offset */
1538        bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
1539        bswaptls(&phdr->p_paddr);       /* Segment physical address */
1540        bswaptls(&phdr->p_filesz);      /* Segment size in file */
1541        bswaptls(&phdr->p_memsz);       /* Segment size in memory */
1542        bswaptls(&phdr->p_align);       /* Segment alignment */
1543    }
1544}
1545
1546static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1547{
1548    int i;
1549    for (i = 0; i < shnum; ++i, ++shdr) {
1550        bswap32s(&shdr->sh_name);
1551        bswap32s(&shdr->sh_type);
1552        bswaptls(&shdr->sh_flags);
1553        bswaptls(&shdr->sh_addr);
1554        bswaptls(&shdr->sh_offset);
1555        bswaptls(&shdr->sh_size);
1556        bswap32s(&shdr->sh_link);
1557        bswap32s(&shdr->sh_info);
1558        bswaptls(&shdr->sh_addralign);
1559        bswaptls(&shdr->sh_entsize);
1560    }
1561}
1562
1563static void bswap_sym(struct elf_sym *sym)
1564{
1565    bswap32s(&sym->st_name);
1566    bswaptls(&sym->st_value);
1567    bswaptls(&sym->st_size);
1568    bswap16s(&sym->st_shndx);
1569}
1570
1571#ifdef TARGET_MIPS
1572static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
1573{
1574    bswap16s(&abiflags->version);
1575    bswap32s(&abiflags->ases);
1576    bswap32s(&abiflags->isa_ext);
1577    bswap32s(&abiflags->flags1);
1578    bswap32s(&abiflags->flags2);
1579}
1580#endif
1581#else
1582static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1583static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1584static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1585static inline void bswap_sym(struct elf_sym *sym) { }
1586#ifdef TARGET_MIPS
1587static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
1588#endif
1589#endif
1590
1591#ifdef USE_ELF_CORE_DUMP
1592static int elf_core_dump(int, const CPUArchState *);
1593#endif /* USE_ELF_CORE_DUMP */
1594static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1595
1596/* Verify the portions of EHDR within E_IDENT for the target.
1597   This can be performed before bswapping the entire header.  */
1598static bool elf_check_ident(struct elfhdr *ehdr)
1599{
1600    return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1601            && ehdr->e_ident[EI_MAG1] == ELFMAG1
1602            && ehdr->e_ident[EI_MAG2] == ELFMAG2
1603            && ehdr->e_ident[EI_MAG3] == ELFMAG3
1604            && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1605            && ehdr->e_ident[EI_DATA] == ELF_DATA
1606            && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1607}
1608
1609/* Verify the portions of EHDR outside of E_IDENT for the target.
1610   This has to wait until after bswapping the header.  */
1611static bool elf_check_ehdr(struct elfhdr *ehdr)
1612{
1613    return (elf_check_arch(ehdr->e_machine)
1614            && ehdr->e_ehsize == sizeof(struct elfhdr)
1615            && ehdr->e_phentsize == sizeof(struct elf_phdr)
1616            && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1617}
1618
1619/*
1620 * 'copy_elf_strings()' copies argument/envelope strings from user
1621 * memory to free pages in kernel mem. These are in a format ready
1622 * to be put directly into the top of new user memory.
1623 *
1624 */
1625static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch,
1626                                  abi_ulong p, abi_ulong stack_limit)
1627{
1628    char *tmp;
1629    int len, i;
1630    abi_ulong top = p;
1631
1632    if (!p) {
1633        return 0;       /* bullet-proofing */
1634    }
1635
1636    if (STACK_GROWS_DOWN) {
1637        int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
1638        for (i = argc - 1; i >= 0; --i) {
1639            tmp = argv[i];
1640            if (!tmp) {
1641                fprintf(stderr, "VFS: argc is wrong");
1642                exit(-1);
1643            }
1644            len = strlen(tmp) + 1;
1645            tmp += len;
1646
1647            if (len > (p - stack_limit)) {
1648                return 0;
1649            }
1650            while (len) {
1651                int bytes_to_copy = (len > offset) ? offset : len;
1652                tmp -= bytes_to_copy;
1653                p -= bytes_to_copy;
1654                offset -= bytes_to_copy;
1655                len -= bytes_to_copy;
1656
1657                memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
1658
1659                if (offset == 0) {
1660                    memcpy_to_target(p, scratch, top - p);
1661                    top = p;
1662                    offset = TARGET_PAGE_SIZE;
1663                }
1664            }
1665        }
1666        if (p != top) {
1667            memcpy_to_target(p, scratch + offset, top - p);
1668        }
1669    } else {
1670        int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE);
1671        for (i = 0; i < argc; ++i) {
1672            tmp = argv[i];
1673            if (!tmp) {
1674                fprintf(stderr, "VFS: argc is wrong");
1675                exit(-1);
1676            }
1677            len = strlen(tmp) + 1;
1678            if (len > (stack_limit - p)) {
1679                return 0;
1680            }
1681            while (len) {
1682                int bytes_to_copy = (len > remaining) ? remaining : len;
1683
1684                memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy);
1685
1686                tmp += bytes_to_copy;
1687                remaining -= bytes_to_copy;
1688                p += bytes_to_copy;
1689                len -= bytes_to_copy;
1690
1691                if (remaining == 0) {
1692                    memcpy_to_target(top, scratch, p - top);
1693                    top = p;
1694                    remaining = TARGET_PAGE_SIZE;
1695                }
1696            }
1697        }
1698        if (p != top) {
1699            memcpy_to_target(top, scratch, p - top);
1700        }
1701    }
1702
1703    return p;
1704}
1705
1706/* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of
1707 * argument/environment space. Newer kernels (>2.6.33) allow more,
1708 * dependent on stack size, but guarantee at least 32 pages for
1709 * backwards compatibility.
1710 */
1711#define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE)
1712
1713static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
1714                                 struct image_info *info)
1715{
1716    abi_ulong size, error, guard;
1717
1718    size = guest_stack_size;
1719    if (size < STACK_LOWER_LIMIT) {
1720        size = STACK_LOWER_LIMIT;
1721    }
1722    guard = TARGET_PAGE_SIZE;
1723    if (guard < qemu_real_host_page_size) {
1724        guard = qemu_real_host_page_size;
1725    }
1726
1727    error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1728                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1729    if (error == -1) {
1730        perror("mmap stack");
1731        exit(-1);
1732    }
1733
1734    /* We reserve one extra page at the top of the stack as guard.  */
1735    if (STACK_GROWS_DOWN) {
1736        target_mprotect(error, guard, PROT_NONE);
1737        info->stack_limit = error + guard;
1738        return info->stack_limit + size - sizeof(void *);
1739    } else {
1740        target_mprotect(error + size, guard, PROT_NONE);
1741        info->stack_limit = error + size;
1742        return error;
1743    }
1744}
1745
1746/* Map and zero the bss.  We need to explicitly zero any fractional pages
1747   after the data section (i.e. bss).  */
1748static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1749{
1750    uintptr_t host_start, host_map_start, host_end;
1751
1752    last_bss = TARGET_PAGE_ALIGN(last_bss);
1753
1754    /* ??? There is confusion between qemu_real_host_page_size and
1755       qemu_host_page_size here and elsewhere in target_mmap, which
1756       may lead to the end of the data section mapping from the file
1757       not being mapped.  At least there was an explicit test and
1758       comment for that here, suggesting that "the file size must
1759       be known".  The comment probably pre-dates the introduction
1760       of the fstat system call in target_mmap which does in fact
1761       find out the size.  What isn't clear is if the workaround
1762       here is still actually needed.  For now, continue with it,
1763       but merge it with the "normal" mmap that would allocate the bss.  */
1764
1765    host_start = (uintptr_t) g2h(elf_bss);
1766    host_end = (uintptr_t) g2h(last_bss);
1767    host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
1768
1769    if (host_map_start < host_end) {
1770        void *p = mmap((void *)host_map_start, host_end - host_map_start,
1771                       prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1772        if (p == MAP_FAILED) {
1773            perror("cannot mmap brk");
1774            exit(-1);
1775        }
1776    }
1777
1778    /* Ensure that the bss page(s) are valid */
1779    if ((page_get_flags(last_bss-1) & prot) != prot) {
1780        page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
1781    }
1782
1783    if (host_start < host_map_start) {
1784        memset((void *)host_start, 0, host_map_start - host_start);
1785    }
1786}
1787
1788#ifdef TARGET_ARM
1789static int elf_is_fdpic(struct elfhdr *exec)
1790{
1791    return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
1792}
1793#else
1794/* Default implementation, always false.  */
1795static int elf_is_fdpic(struct elfhdr *exec)
1796{
1797    return 0;
1798}
1799#endif
1800
1801static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1802{
1803    uint16_t n;
1804    struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1805
1806    /* elf32_fdpic_loadseg */
1807    n = info->nsegs;
1808    while (n--) {
1809        sp -= 12;
1810        put_user_u32(loadsegs[n].addr, sp+0);
1811        put_user_u32(loadsegs[n].p_vaddr, sp+4);
1812        put_user_u32(loadsegs[n].p_memsz, sp+8);
1813    }
1814
1815    /* elf32_fdpic_loadmap */
1816    sp -= 4;
1817    put_user_u16(0, sp+0); /* version */
1818    put_user_u16(info->nsegs, sp+2); /* nsegs */
1819
1820    info->personality = PER_LINUX_FDPIC;
1821    info->loadmap_addr = sp;
1822
1823    return sp;
1824}
1825
1826static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1827                                   struct elfhdr *exec,
1828                                   struct image_info *info,
1829                                   struct image_info *interp_info)
1830{
1831    abi_ulong sp;
1832    abi_ulong u_argc, u_argv, u_envp, u_auxv;
1833    int size;
1834    int i;
1835    abi_ulong u_rand_bytes;
1836    uint8_t k_rand_bytes[16];
1837    abi_ulong u_platform;
1838    const char *k_platform;
1839    const int n = sizeof(elf_addr_t);
1840
1841    sp = p;
1842
1843    /* Needs to be before we load the env/argc/... */
1844    if (elf_is_fdpic(exec)) {
1845        /* Need 4 byte alignment for these structs */
1846        sp &= ~3;
1847        sp = loader_build_fdpic_loadmap(info, sp);
1848        info->other_info = interp_info;
1849        if (interp_info) {
1850            interp_info->other_info = info;
1851            sp = loader_build_fdpic_loadmap(interp_info, sp);
1852            info->interpreter_loadmap_addr = interp_info->loadmap_addr;
1853            info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
1854        } else {
1855            info->interpreter_loadmap_addr = 0;
1856            info->interpreter_pt_dynamic_addr = 0;
1857        }
1858    }
1859
1860    u_platform = 0;
1861    k_platform = ELF_PLATFORM;
1862    if (k_platform) {
1863        size_t len = strlen(k_platform) + 1;
1864        if (STACK_GROWS_DOWN) {
1865            sp -= (len + n - 1) & ~(n - 1);
1866            u_platform = sp;
1867            /* FIXME - check return value of memcpy_to_target() for failure */
1868            memcpy_to_target(sp, k_platform, len);
1869        } else {
1870            memcpy_to_target(sp, k_platform, len);
1871            u_platform = sp;
1872            sp += len + 1;
1873        }
1874    }
1875
1876    /* Provide 16 byte alignment for the PRNG, and basic alignment for
1877     * the argv and envp pointers.
1878     */
1879    if (STACK_GROWS_DOWN) {
1880        sp = QEMU_ALIGN_DOWN(sp, 16);
1881    } else {
1882        sp = QEMU_ALIGN_UP(sp, 16);
1883    }
1884
1885    /*
1886     * Generate 16 random bytes for userspace PRNG seeding (not
1887     * cryptically secure but it's not the aim of QEMU).
1888     */
1889    for (i = 0; i < 16; i++) {
1890        k_rand_bytes[i] = rand();
1891    }
1892    if (STACK_GROWS_DOWN) {
1893        sp -= 16;
1894        u_rand_bytes = sp;
1895        /* FIXME - check return value of memcpy_to_target() for failure */
1896        memcpy_to_target(sp, k_rand_bytes, 16);
1897    } else {
1898        memcpy_to_target(sp, k_rand_bytes, 16);
1899        u_rand_bytes = sp;
1900        sp += 16;
1901    }
1902
1903    size = (DLINFO_ITEMS + 1) * 2;
1904    if (k_platform)
1905        size += 2;
1906#ifdef DLINFO_ARCH_ITEMS
1907    size += DLINFO_ARCH_ITEMS * 2;
1908#endif
1909#ifdef ELF_HWCAP2
1910    size += 2;
1911#endif
1912    info->auxv_len = size * n;
1913
1914    size += envc + argc + 2;
1915    size += 1;  /* argc itself */
1916    size *= n;
1917
1918    /* Allocate space and finalize stack alignment for entry now.  */
1919    if (STACK_GROWS_DOWN) {
1920        u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT);
1921        sp = u_argc;
1922    } else {
1923        u_argc = sp;
1924        sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT);
1925    }
1926
1927    u_argv = u_argc + n;
1928    u_envp = u_argv + (argc + 1) * n;
1929    u_auxv = u_envp + (envc + 1) * n;
1930    info->saved_auxv = u_auxv;
1931    info->arg_start = u_argv;
1932    info->arg_end = u_argv + argc * n;
1933
1934    /* This is correct because Linux defines
1935     * elf_addr_t as Elf32_Off / Elf64_Off
1936     */
1937#define NEW_AUX_ENT(id, val) do {               \
1938        put_user_ual(id, u_auxv);  u_auxv += n; \
1939        put_user_ual(val, u_auxv); u_auxv += n; \
1940    } while(0)
1941
1942#ifdef ARCH_DLINFO
1943    /*
1944     * ARCH_DLINFO must come first so platform specific code can enforce
1945     * special alignment requirements on the AUXV if necessary (eg. PPC).
1946     */
1947    ARCH_DLINFO;
1948#endif
1949    /* There must be exactly DLINFO_ITEMS entries here, or the assert
1950     * on info->auxv_len will trigger.
1951     */
1952    NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1953    NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1954    NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1955    if ((info->alignment & ~qemu_host_page_mask) != 0) {
1956        /* Target doesn't support host page size alignment */
1957        NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1958    } else {
1959        NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
1960                                               qemu_host_page_size)));
1961    }
1962    NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1963    NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1964    NEW_AUX_ENT(AT_ENTRY, info->entry);
1965    NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1966    NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1967    NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1968    NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1969    NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1970    NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1971    NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
1972    NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE));
1973
1974#ifdef ELF_HWCAP2
1975    NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
1976#endif
1977
1978    if (u_platform) {
1979        NEW_AUX_ENT(AT_PLATFORM, u_platform);
1980    }
1981    NEW_AUX_ENT (AT_NULL, 0);
1982#undef NEW_AUX_ENT
1983
1984    /* Check that our initial calculation of the auxv length matches how much
1985     * we actually put into it.
1986     */
1987    assert(info->auxv_len == u_auxv - info->saved_auxv);
1988
1989    put_user_ual(argc, u_argc);
1990
1991    p = info->arg_strings;
1992    for (i = 0; i < argc; ++i) {
1993        put_user_ual(p, u_argv);
1994        u_argv += n;
1995        p += target_strlen(p) + 1;
1996    }
1997    put_user_ual(0, u_argv);
1998
1999    p = info->env_strings;
2000    for (i = 0; i < envc; ++i) {
2001        put_user_ual(p, u_envp);
2002        u_envp += n;
2003        p += target_strlen(p) + 1;
2004    }
2005    put_user_ual(0, u_envp);
2006
2007    return sp;
2008}
2009
2010unsigned long init_guest_space(unsigned long host_start,
2011                               unsigned long host_size,
2012                               unsigned long guest_start,
2013                               bool fixed)
2014{
2015    unsigned long current_start, aligned_start;
2016    int flags;
2017
2018    assert(host_start || host_size);
2019
2020    /* If just a starting address is given, then just verify that
2021     * address.  */
2022    if (host_start && !host_size) {
2023#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
2024        if (init_guest_commpage(host_start, host_size) != 1) {
2025            return (unsigned long)-1;
2026        }
2027#endif
2028        return host_start;
2029    }
2030
2031    /* Setup the initial flags and start address.  */
2032    current_start = host_start & qemu_host_page_mask;
2033    flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
2034    if (fixed) {
2035        flags |= MAP_FIXED;
2036    }
2037
2038    /* Otherwise, a non-zero size region of memory needs to be mapped
2039     * and validated.  */
2040
2041#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
2042    /* On 32-bit ARM, we need to map not just the usable memory, but
2043     * also the commpage.  Try to find a suitable place by allocating
2044     * a big chunk for all of it.  If host_start, then the naive
2045     * strategy probably does good enough.
2046     */
2047    if (!host_start) {
2048        unsigned long guest_full_size, host_full_size, real_start;
2049
2050        guest_full_size =
2051            (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size;
2052        host_full_size = guest_full_size - guest_start;
2053        real_start = (unsigned long)
2054            mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0);
2055        if (real_start == (unsigned long)-1) {
2056            if (host_size < host_full_size - qemu_host_page_size) {
2057                /* We failed to map a continous segment, but we're
2058                 * allowed to have a gap between the usable memory and
2059                 * the commpage where other things can be mapped.
2060                 * This sparseness gives us more flexibility to find
2061                 * an address range.
2062                 */
2063                goto naive;
2064            }
2065            return (unsigned long)-1;
2066        }
2067        munmap((void *)real_start, host_full_size);
2068        if (real_start & ~qemu_host_page_mask) {
2069            /* The same thing again, but with an extra qemu_host_page_size
2070             * so that we can shift around alignment.
2071             */
2072            unsigned long real_size = host_full_size + qemu_host_page_size;
2073            real_start = (unsigned long)
2074                mmap(NULL, real_size, PROT_NONE, flags, -1, 0);
2075            if (real_start == (unsigned long)-1) {
2076                if (host_size < host_full_size - qemu_host_page_size) {
2077                    goto naive;
2078                }
2079                return (unsigned long)-1;
2080            }
2081            munmap((void *)real_start, real_size);
2082            real_start = HOST_PAGE_ALIGN(real_start);
2083        }
2084        current_start = real_start;
2085    }
2086 naive:
2087#endif
2088
2089    while (1) {
2090        unsigned long real_start, real_size, aligned_size;
2091        aligned_size = real_size = host_size;
2092
2093        /* Do not use mmap_find_vma here because that is limited to the
2094         * guest address space.  We are going to make the
2095         * guest address space fit whatever we're given.
2096         */
2097        real_start = (unsigned long)
2098            mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0);
2099        if (real_start == (unsigned long)-1) {
2100            return (unsigned long)-1;
2101        }
2102
2103        /* Check to see if the address is valid.  */
2104        if (host_start && real_start != current_start) {
2105            goto try_again;
2106        }
2107
2108        /* Ensure the address is properly aligned.  */
2109        if (real_start & ~qemu_host_page_mask) {
2110            /* Ideally, we adjust like
2111             *
2112             *    pages: [  ][  ][  ][  ][  ]
2113             *      old:   [   real   ]
2114             *             [ aligned  ]
2115             *      new:   [     real     ]
2116             *               [ aligned  ]
2117             *
2118             * But if there is something else mapped right after it,
2119             * then obviously it won't have room to grow, and the
2120             * kernel will put the new larger real someplace else with
2121             * unknown alignment (if we made it to here, then
2122             * fixed=false).  Which is why we grow real by a full page
2123             * size, instead of by part of one; so that even if we get
2124             * moved, we can still guarantee alignment.  But this does
2125             * mean that there is a padding of < 1 page both before
2126             * and after the aligned range; the "after" could could
2127             * cause problems for ARM emulation where it could butt in
2128             * to where we need to put the commpage.
2129             */
2130            munmap((void *)real_start, host_size);
2131            real_size = aligned_size + qemu_host_page_size;
2132            real_start = (unsigned long)
2133                mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
2134            if (real_start == (unsigned long)-1) {
2135                return (unsigned long)-1;
2136            }
2137            aligned_start = HOST_PAGE_ALIGN(real_start);
2138        } else {
2139            aligned_start = real_start;
2140        }
2141
2142#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
2143        /* On 32-bit ARM, we need to also be able to map the commpage.  */
2144        int valid = init_guest_commpage(aligned_start - guest_start,
2145                                        aligned_size + guest_start);
2146        if (valid == -1) {
2147            munmap((void *)real_start, real_size);
2148            return (unsigned long)-1;
2149        } else if (valid == 0) {
2150            goto try_again;
2151        }
2152#endif
2153
2154        /* If nothing has said `return -1` or `goto try_again` yet,
2155         * then the address we have is good.
2156         */
2157        break;
2158
2159    try_again:
2160        /* That address didn't work.  Unmap and try a different one.
2161         * The address the host picked because is typically right at
2162         * the top of the host address space and leaves the guest with
2163         * no usable address space.  Resort to a linear search.  We
2164         * already compensated for mmap_min_addr, so this should not
2165         * happen often.  Probably means we got unlucky and host
2166         * address space randomization put a shared library somewhere
2167         * inconvenient.
2168         *
2169         * This is probably a good strategy if host_start, but is
2170         * probably a bad strategy if not, which means we got here
2171         * because of trouble with ARM commpage setup.
2172         */
2173        munmap((void *)real_start, real_size);
2174        current_start += qemu_host_page_size;
2175        if (host_start == current_start) {
2176            /* Theoretically possible if host doesn't have any suitably
2177             * aligned areas.  Normally the first mmap will fail.
2178             */
2179            return (unsigned long)-1;
2180        }
2181    }
2182
2183    qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size);
2184
2185    return aligned_start;
2186}
2187
2188static void probe_guest_base(const char *image_name,
2189                             abi_ulong loaddr, abi_ulong hiaddr)
2190{
2191    /* Probe for a suitable guest base address, if the user has not set
2192     * it explicitly, and set guest_base appropriately.
2193     * In case of error we will print a suitable message and exit.
2194     */
2195    const char *errmsg;
2196    if (!have_guest_base && !reserved_va) {
2197        unsigned long host_start, real_start, host_size;
2198
2199        /* Round addresses to page boundaries.  */
2200        loaddr &= qemu_host_page_mask;
2201        hiaddr = HOST_PAGE_ALIGN(hiaddr);
2202
2203        if (loaddr < mmap_min_addr) {
2204            host_start = HOST_PAGE_ALIGN(mmap_min_addr);
2205        } else {
2206            host_start = loaddr;
2207            if (host_start != loaddr) {
2208                errmsg = "Address overflow loading ELF binary";
2209                goto exit_errmsg;
2210            }
2211        }
2212        host_size = hiaddr - loaddr;
2213
2214        /* Setup the initial guest memory space with ranges gleaned from
2215         * the ELF image that is being loaded.
2216         */
2217        real_start = init_guest_space(host_start, host_size, loaddr, false);
2218        if (real_start == (unsigned long)-1) {
2219            errmsg = "Unable to find space for application";
2220            goto exit_errmsg;
2221        }
2222        guest_base = real_start - loaddr;
2223
2224        qemu_log_mask(CPU_LOG_PAGE, "Relocating guest address space from 0x"
2225                      TARGET_ABI_FMT_lx " to 0x%lx\n",
2226                      loaddr, real_start);
2227    }
2228    return;
2229
2230exit_errmsg:
2231    fprintf(stderr, "%s: %s\n", image_name, errmsg);
2232    exit(-1);
2233}
2234
2235
2236/* Load an ELF image into the address space.
2237
2238   IMAGE_NAME is the filename of the image, to use in error messages.
2239   IMAGE_FD is the open file descriptor for the image.
2240
2241   BPRM_BUF is a copy of the beginning of the file; this of course
2242   contains the elf file header at offset 0.  It is assumed that this
2243   buffer is sufficiently aligned to present no problems to the host
2244   in accessing data at aligned offsets within the buffer.
2245
2246   On return: INFO values will be filled in, as necessary or available.  */
2247
2248static void load_elf_image(const char *image_name, int image_fd,
2249                           struct image_info *info, char **pinterp_name,
2250                           char bprm_buf[BPRM_BUF_SIZE])
2251{
2252    struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
2253    struct elf_phdr *phdr;
2254    abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
2255    int i, retval;
2256    const char *errmsg;
2257
2258    /* First of all, some simple consistency checks */
2259    errmsg = "Invalid ELF image for this architecture";
2260    if (!elf_check_ident(ehdr)) {
2261        goto exit_errmsg;
2262    }
2263    bswap_ehdr(ehdr);
2264    if (!elf_check_ehdr(ehdr)) {
2265        goto exit_errmsg;
2266    }
2267
2268    i = ehdr->e_phnum * sizeof(struct elf_phdr);
2269    if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
2270        phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
2271    } else {
2272        phdr = (struct elf_phdr *) alloca(i);
2273        retval = pread(image_fd, phdr, i, ehdr->e_phoff);
2274        if (retval != i) {
2275            goto exit_read;
2276        }
2277    }
2278    bswap_phdr(phdr, ehdr->e_phnum);
2279
2280    info->nsegs = 0;
2281    info->pt_dynamic_addr = 0;
2282
2283    mmap_lock();
2284
2285    /* Find the maximum size of the image and allocate an appropriate
2286       amount of memory to handle that.  */
2287    loaddr = -1, hiaddr = 0;
2288    info->alignment = 0;
2289    for (i = 0; i < ehdr->e_phnum; ++i) {
2290        if (phdr[i].p_type == PT_LOAD) {
2291            abi_ulong a = phdr[i].p_vaddr - phdr[i].p_offset;
2292            if (a < loaddr) {
2293                loaddr = a;
2294            }
2295            a = phdr[i].p_vaddr + phdr[i].p_memsz;
2296            if (a > hiaddr) {
2297                hiaddr = a;
2298            }
2299            ++info->nsegs;
2300            info->alignment |= phdr[i].p_align;
2301        }
2302    }
2303
2304    load_addr = loaddr;
2305    if (ehdr->e_type == ET_DYN) {
2306        /* The image indicates that it can be loaded anywhere.  Find a
2307           location that can hold the memory space required.  If the
2308           image is pre-linked, LOADDR will be non-zero.  Since we do
2309           not supply MAP_FIXED here we'll use that address if and
2310           only if it remains available.  */
2311        load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
2312                                MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
2313                                -1, 0);
2314        if (load_addr == -1) {
2315            goto exit_perror;
2316        }
2317    } else if (pinterp_name != NULL) {
2318        /* This is the main executable.  Make sure that the low
2319           address does not conflict with MMAP_MIN_ADDR or the
2320           QEMU application itself.  */
2321        probe_guest_base(image_name, loaddr, hiaddr);
2322    }
2323    load_bias = load_addr - loaddr;
2324
2325    if (elf_is_fdpic(ehdr)) {
2326        struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
2327            g_malloc(sizeof(*loadsegs) * info->nsegs);
2328
2329        for (i = 0; i < ehdr->e_phnum; ++i) {
2330            switch (phdr[i].p_type) {
2331            case PT_DYNAMIC:
2332                info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
2333                break;
2334            case PT_LOAD:
2335                loadsegs->addr = phdr[i].p_vaddr + load_bias;
2336                loadsegs->p_vaddr = phdr[i].p_vaddr;
2337                loadsegs->p_memsz = phdr[i].p_memsz;
2338                ++loadsegs;
2339                break;
2340            }
2341        }
2342    }
2343
2344    info->load_bias = load_bias;
2345    info->load_addr = load_addr;
2346    info->entry = ehdr->e_entry + load_bias;
2347    info->start_code = -1;
2348    info->end_code = 0;
2349    info->start_data = -1;
2350    info->end_data = 0;
2351    info->brk = 0;
2352    info->elf_flags = ehdr->e_flags;
2353
2354    for (i = 0; i < ehdr->e_phnum; i++) {
2355        struct elf_phdr *eppnt = phdr + i;
2356        if (eppnt->p_type == PT_LOAD) {
2357            abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
2358            int elf_prot = 0;
2359
2360            if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
2361            if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
2362            if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
2363
2364            vaddr = load_bias + eppnt->p_vaddr;
2365            vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
2366            vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
2367            vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
2368
2369            error = target_mmap(vaddr_ps, vaddr_len,
2370                                elf_prot, MAP_PRIVATE | MAP_FIXED,
2371                                image_fd, eppnt->p_offset - vaddr_po);
2372            if (error == -1) {
2373                goto exit_perror;
2374            }
2375
2376            vaddr_ef = vaddr + eppnt->p_filesz;
2377            vaddr_em = vaddr + eppnt->p_memsz;
2378
2379            /* If the load segment requests extra zeros (e.g. bss), map it.  */
2380            if (vaddr_ef < vaddr_em) {
2381                zero_bss(vaddr_ef, vaddr_em, elf_prot);
2382            }
2383
2384            /* Find the full program boundaries.  */
2385            if (elf_prot & PROT_EXEC) {
2386                if (vaddr < info->start_code) {
2387                    info->start_code = vaddr;
2388                }
2389                if (vaddr_ef > info->end_code) {
2390                    info->end_code = vaddr_ef;
2391                }
2392            }
2393            if (elf_prot & PROT_WRITE) {
2394                if (vaddr < info->start_data) {
2395                    info->start_data = vaddr;
2396                }
2397                if (vaddr_ef > info->end_data) {
2398                    info->end_data = vaddr_ef;
2399                }
2400                if (vaddr_em > info->brk) {
2401                    info->brk = vaddr_em;
2402                }
2403            }
2404        } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
2405            char *interp_name;
2406
2407            if (*pinterp_name) {
2408                errmsg = "Multiple PT_INTERP entries";
2409                goto exit_errmsg;
2410            }
2411            interp_name = malloc(eppnt->p_filesz);
2412            if (!interp_name) {
2413                goto exit_perror;
2414            }
2415
2416            if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
2417                memcpy(interp_name, bprm_buf + eppnt->p_offset,
2418                       eppnt->p_filesz);
2419            } else {
2420                retval = pread(image_fd, interp_name, eppnt->p_filesz,
2421                               eppnt->p_offset);
2422                if (retval != eppnt->p_filesz) {
2423                    goto exit_perror;
2424                }
2425            }
2426            if (interp_name[eppnt->p_filesz - 1] != 0) {
2427                errmsg = "Invalid PT_INTERP entry";
2428                goto exit_errmsg;
2429            }
2430            *pinterp_name = interp_name;
2431#ifdef TARGET_MIPS
2432        } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) {
2433            Mips_elf_abiflags_v0 abiflags;
2434            if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) {
2435                errmsg = "Invalid PT_MIPS_ABIFLAGS entry";
2436                goto exit_errmsg;
2437            }
2438            if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
2439                memcpy(&abiflags, bprm_buf + eppnt->p_offset,
2440                       sizeof(Mips_elf_abiflags_v0));
2441            } else {
2442                retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0),
2443                               eppnt->p_offset);
2444                if (retval != sizeof(Mips_elf_abiflags_v0)) {
2445                    goto exit_perror;
2446                }
2447            }
2448            bswap_mips_abiflags(&abiflags);
2449            info->fp_abi = abiflags.fp_abi;
2450#endif
2451        }
2452    }
2453
2454    if (info->end_data == 0) {
2455        info->start_data = info->end_code;
2456        info->end_data = info->end_code;
2457        info->brk = info->end_code;
2458    }
2459
2460    if (qemu_log_enabled()) {
2461        load_symbols(ehdr, image_fd, load_bias);
2462    }
2463
2464    mmap_unlock();
2465
2466    close(image_fd);
2467    return;
2468
2469 exit_read:
2470    if (retval >= 0) {
2471        errmsg = "Incomplete read of file header";
2472        goto exit_errmsg;
2473    }
2474 exit_perror:
2475    errmsg = strerror(errno);
2476 exit_errmsg:
2477    fprintf(stderr, "%s: %s\n", image_name, errmsg);
2478    exit(-1);
2479}
2480
2481static void load_elf_interp(const char *filename, struct image_info *info,
2482                            char bprm_buf[BPRM_BUF_SIZE])
2483{
2484    int fd, retval;
2485
2486    fd = open(path(filename), O_RDONLY);
2487    if (fd < 0) {
2488        goto exit_perror;
2489    }
2490
2491    retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
2492    if (retval < 0) {
2493        goto exit_perror;
2494    }
2495    if (retval < BPRM_BUF_SIZE) {
2496        memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
2497    }
2498
2499    load_elf_image(filename, fd, info, NULL, bprm_buf);
2500    return;
2501
2502 exit_perror:
2503    fprintf(stderr, "%s: %s\n", filename, strerror(errno));
2504    exit(-1);
2505}
2506
2507static int symfind(const void *s0, const void *s1)
2508{
2509    target_ulong addr = *(target_ulong *)s0;
2510    struct elf_sym *sym = (struct elf_sym *)s1;
2511    int result = 0;
2512    if (addr < sym->st_value) {
2513        result = -1;
2514    } else if (addr >= sym->st_value + sym->st_size) {
2515        result = 1;
2516    }
2517    return result;
2518}
2519
2520static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
2521{
2522#if ELF_CLASS == ELFCLASS32
2523    struct elf_sym *syms = s->disas_symtab.elf32;
2524#else
2525    struct elf_sym *syms = s->disas_symtab.elf64;
2526#endif
2527
2528    // binary search
2529    struct elf_sym *sym;
2530
2531    sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
2532    if (sym != NULL) {
2533        return s->disas_strtab + sym->st_name;
2534    }
2535
2536    return "";
2537}
2538
2539/* FIXME: This should use elf_ops.h  */
2540static int symcmp(const void *s0, const void *s1)
2541{
2542    struct elf_sym *sym0 = (struct elf_sym *)s0;
2543    struct elf_sym *sym1 = (struct elf_sym *)s1;
2544    return (sym0->st_value < sym1->st_value)
2545        ? -1
2546        : ((sym0->st_value > sym1->st_value) ? 1 : 0);
2547}
2548
2549/* Best attempt to load symbols from this ELF object. */
2550static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
2551{
2552    int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
2553    uint64_t segsz;
2554    struct elf_shdr *shdr;
2555    char *strings = NULL;
2556    struct syminfo *s = NULL;
2557    struct elf_sym *new_syms, *syms = NULL;
2558
2559    shnum = hdr->e_shnum;
2560    i = shnum * sizeof(struct elf_shdr);
2561    shdr = (struct elf_shdr *)alloca(i);
2562    if (pread(fd, shdr, i, hdr->e_shoff) != i) {
2563        return;
2564    }
2565
2566    bswap_shdr(shdr, shnum);
2567    for (i = 0; i < shnum; ++i) {
2568        if (shdr[i].sh_type == SHT_SYMTAB) {
2569            sym_idx = i;
2570            str_idx = shdr[i].sh_link;
2571            goto found;
2572        }
2573    }
2574
2575    /* There will be no symbol table if the file was stripped.  */
2576    return;
2577
2578 found:
2579    /* Now know where the strtab and symtab are.  Snarf them.  */
2580    s = g_try_new(struct syminfo, 1);
2581    if (!s) {
2582        goto give_up;
2583    }
2584
2585    segsz = shdr[str_idx].sh_size;
2586    s->disas_strtab = strings = g_try_malloc(segsz);
2587    if (!strings ||
2588        pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) {
2589        goto give_up;
2590    }
2591
2592    segsz = shdr[sym_idx].sh_size;
2593    syms = g_try_malloc(segsz);
2594    if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) {
2595        goto give_up;
2596    }
2597
2598    if (segsz / sizeof(struct elf_sym) > INT_MAX) {
2599        /* Implausibly large symbol table: give up rather than ploughing
2600         * on with the number of symbols calculation overflowing
2601         */
2602        goto give_up;
2603    }
2604    nsyms = segsz / sizeof(struct elf_sym);
2605    for (i = 0; i < nsyms; ) {
2606        bswap_sym(syms + i);
2607        /* Throw away entries which we do not need.  */
2608        if (syms[i].st_shndx == SHN_UNDEF
2609            || syms[i].st_shndx >= SHN_LORESERVE
2610            || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
2611            if (i < --nsyms) {
2612                syms[i] = syms[nsyms];
2613            }
2614        } else {
2615#if defined(TARGET_ARM) || defined (TARGET_MIPS)
2616            /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
2617            syms[i].st_value &= ~(target_ulong)1;
2618#endif
2619            syms[i].st_value += load_bias;
2620            i++;
2621        }
2622    }
2623
2624    /* No "useful" symbol.  */
2625    if (nsyms == 0) {
2626        goto give_up;
2627    }
2628
2629    /* Attempt to free the storage associated with the local symbols
2630       that we threw away.  Whether or not this has any effect on the
2631       memory allocation depends on the malloc implementation and how
2632       many symbols we managed to discard.  */
2633    new_syms = g_try_renew(struct elf_sym, syms, nsyms);
2634    if (new_syms == NULL) {
2635        goto give_up;
2636    }
2637    syms = new_syms;
2638
2639    qsort(syms, nsyms, sizeof(*syms), symcmp);
2640
2641    s->disas_num_syms = nsyms;
2642#if ELF_CLASS == ELFCLASS32
2643    s->disas_symtab.elf32 = syms;
2644#else
2645    s->disas_symtab.elf64 = syms;
2646#endif
2647    s->lookup_symbol = lookup_symbolxx;
2648    s->next = syminfos;
2649    syminfos = s;
2650
2651    return;
2652
2653give_up:
2654    g_free(s);
2655    g_free(strings);
2656    g_free(syms);
2657}
2658
2659uint32_t get_elf_eflags(int fd)
2660{
2661    struct elfhdr ehdr;
2662    off_t offset;
2663    int ret;
2664
2665    /* Read ELF header */
2666    offset = lseek(fd, 0, SEEK_SET);
2667    if (offset == (off_t) -1) {
2668        return 0;
2669    }
2670    ret = read(fd, &ehdr, sizeof(ehdr));
2671    if (ret < sizeof(ehdr)) {
2672        return 0;
2673    }
2674    offset = lseek(fd, offset, SEEK_SET);
2675    if (offset == (off_t) -1) {
2676        return 0;
2677    }
2678
2679    /* Check ELF signature */
2680    if (!elf_check_ident(&ehdr)) {
2681        return 0;
2682    }
2683
2684    /* check header */
2685    bswap_ehdr(&ehdr);
2686    if (!elf_check_ehdr(&ehdr)) {
2687        return 0;
2688    }
2689
2690    /* return architecture id */
2691    return ehdr.e_flags;
2692}
2693
2694int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
2695{
2696    struct image_info interp_info;
2697    struct elfhdr elf_ex;
2698    char *elf_interpreter = NULL;
2699    char *scratch;
2700
2701    info->start_mmap = (abi_ulong)ELF_START_MMAP;
2702
2703    load_elf_image(bprm->filename, bprm->fd, info,
2704                   &elf_interpreter, bprm->buf);
2705
2706    /* ??? We need a copy of the elf header for passing to create_elf_tables.
2707       If we do nothing, we'll have overwritten this when we re-use bprm->buf
2708       when we load the interpreter.  */
2709    elf_ex = *(struct elfhdr *)bprm->buf;
2710
2711    /* Do this so that we can load the interpreter, if need be.  We will
2712       change some of these later */
2713    bprm->p = setup_arg_pages(bprm, info);
2714
2715    scratch = g_new0(char, TARGET_PAGE_SIZE);
2716    if (STACK_GROWS_DOWN) {
2717        bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
2718                                   bprm->p, info->stack_limit);
2719        info->file_string = bprm->p;
2720        bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
2721                                   bprm->p, info->stack_limit);
2722        info->env_strings = bprm->p;
2723        bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
2724                                   bprm->p, info->stack_limit);
2725        info->arg_strings = bprm->p;
2726    } else {
2727        info->arg_strings = bprm->p;
2728        bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
2729                                   bprm->p, info->stack_limit);
2730        info->env_strings = bprm->p;
2731        bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
2732                                   bprm->p, info->stack_limit);
2733        info->file_string = bprm->p;
2734        bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
2735                                   bprm->p, info->stack_limit);
2736    }
2737
2738    g_free(scratch);
2739
2740    if (!bprm->p) {
2741        fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
2742        exit(-1);
2743    }
2744
2745    if (elf_interpreter) {
2746        load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
2747
2748        /* If the program interpreter is one of these two, then assume
2749           an iBCS2 image.  Otherwise assume a native linux image.  */
2750
2751        if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
2752            || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
2753            info->personality = PER_SVR4;
2754
2755            /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
2756               and some applications "depend" upon this behavior.  Since
2757               we do not have the power to recompile these, we emulate
2758               the SVr4 behavior.  Sigh.  */
2759            target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
2760                        MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2761        }
2762#ifdef TARGET_MIPS
2763        info->interp_fp_abi = interp_info.fp_abi;
2764#endif
2765    }
2766
2767    bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
2768                                info, (elf_interpreter ? &interp_info : NULL));
2769    info->start_stack = bprm->p;
2770
2771    /* If we have an interpreter, set that as the program's entry point.
2772       Copy the load_bias as well, to help PPC64 interpret the entry
2773       point as a function descriptor.  Do this after creating elf tables
2774       so that we copy the original program entry point into the AUXV.  */
2775    if (elf_interpreter) {
2776        info->load_bias = interp_info.load_bias;
2777        info->entry = interp_info.entry;
2778        free(elf_interpreter);
2779    }
2780
2781#ifdef USE_ELF_CORE_DUMP
2782    bprm->core_dump = &elf_core_dump;
2783#endif
2784
2785    return 0;
2786}
2787
2788#ifdef USE_ELF_CORE_DUMP
2789/*
2790 * Definitions to generate Intel SVR4-like core files.
2791 * These mostly have the same names as the SVR4 types with "target_elf_"
2792 * tacked on the front to prevent clashes with linux definitions,
2793 * and the typedef forms have been avoided.  This is mostly like
2794 * the SVR4 structure, but more Linuxy, with things that Linux does
2795 * not support and which gdb doesn't really use excluded.
2796 *
2797 * Fields we don't dump (their contents is zero) in linux-user qemu
2798 * are marked with XXX.
2799 *
2800 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
2801 *
2802 * Porting ELF coredump for target is (quite) simple process.  First you
2803 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
2804 * the target resides):
2805 *
2806 * #define USE_ELF_CORE_DUMP
2807 *
2808 * Next you define type of register set used for dumping.  ELF specification
2809 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
2810 *
2811 * typedef <target_regtype> target_elf_greg_t;
2812 * #define ELF_NREG <number of registers>
2813 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
2814 *
2815 * Last step is to implement target specific function that copies registers
2816 * from given cpu into just specified register set.  Prototype is:
2817 *
2818 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
2819 *                                const CPUArchState *env);
2820 *
2821 * Parameters:
2822 *     regs - copy register values into here (allocated and zeroed by caller)
2823 *     env - copy registers from here
2824 *
2825 * Example for ARM target is provided in this file.
2826 */
2827
2828/* An ELF note in memory */
2829struct memelfnote {
2830    const char *name;
2831    size_t     namesz;
2832    size_t     namesz_rounded;
2833    int        type;
2834    size_t     datasz;
2835    size_t     datasz_rounded;
2836    void       *data;
2837    size_t     notesz;
2838};
2839
2840struct target_elf_siginfo {
2841    abi_int    si_signo; /* signal number */
2842    abi_int    si_code;  /* extra code */
2843    abi_int    si_errno; /* errno */
2844};
2845
2846struct target_elf_prstatus {
2847    struct target_elf_siginfo pr_info;      /* Info associated with signal */
2848    abi_short          pr_cursig;    /* Current signal */
2849    abi_ulong          pr_sigpend;   /* XXX */
2850    abi_ulong          pr_sighold;   /* XXX */
2851    target_pid_t       pr_pid;
2852    target_pid_t       pr_ppid;
2853    target_pid_t       pr_pgrp;
2854    target_pid_t       pr_sid;
2855    struct target_timeval pr_utime;  /* XXX User time */
2856    struct target_timeval pr_stime;  /* XXX System time */
2857    struct target_timeval pr_cutime; /* XXX Cumulative user time */
2858    struct target_timeval pr_cstime; /* XXX Cumulative system time */
2859    target_elf_gregset_t      pr_reg;       /* GP registers */
2860    abi_int            pr_fpvalid;   /* XXX */
2861};
2862
2863#define ELF_PRARGSZ     (80) /* Number of chars for args */
2864
2865struct target_elf_prpsinfo {
2866    char         pr_state;       /* numeric process state */
2867    char         pr_sname;       /* char for pr_state */
2868    char         pr_zomb;        /* zombie */
2869    char         pr_nice;        /* nice val */
2870    abi_ulong    pr_flag;        /* flags */
2871    target_uid_t pr_uid;
2872    target_gid_t pr_gid;
2873    target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2874    /* Lots missing */
2875    char    pr_fname[16];           /* filename of executable */
2876    char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2877};
2878
2879/* Here is the structure in which status of each thread is captured. */
2880struct elf_thread_status {
2881    QTAILQ_ENTRY(elf_thread_status)  ets_link;
2882    struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
2883#if 0
2884    elf_fpregset_t fpu;             /* NT_PRFPREG */
2885    struct task_struct *thread;
2886    elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
2887#endif
2888    struct memelfnote notes[1];
2889    int num_notes;
2890};
2891
2892struct elf_note_info {
2893    struct memelfnote   *notes;
2894    struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
2895    struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
2896
2897    QTAILQ_HEAD(, elf_thread_status) thread_list;
2898#if 0
2899    /*
2900     * Current version of ELF coredump doesn't support
2901     * dumping fp regs etc.
2902     */
2903    elf_fpregset_t *fpu;
2904    elf_fpxregset_t *xfpu;
2905    int thread_status_size;
2906#endif
2907    int notes_size;
2908    int numnote;
2909};
2910
2911struct vm_area_struct {
2912    target_ulong   vma_start;  /* start vaddr of memory region */
2913    target_ulong   vma_end;    /* end vaddr of memory region */
2914    abi_ulong      vma_flags;  /* protection etc. flags for the region */
2915    QTAILQ_ENTRY(vm_area_struct) vma_link;
2916};
2917
2918struct mm_struct {
2919    QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2920    int mm_count;           /* number of mappings */
2921};
2922
2923static struct mm_struct *vma_init(void);
2924static void vma_delete(struct mm_struct *);
2925static int vma_add_mapping(struct mm_struct *, target_ulong,
2926                           target_ulong, abi_ulong);
2927static int vma_get_mapping_count(const struct mm_struct *);
2928static struct vm_area_struct *vma_first(const struct mm_struct *);
2929static struct vm_area_struct *vma_next(struct vm_area_struct *);
2930static abi_ulong vma_dump_size(const struct vm_area_struct *);
2931static int vma_walker(void *priv, target_ulong start, target_ulong end,
2932                      unsigned long flags);
2933
2934static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2935static void fill_note(struct memelfnote *, const char *, int,
2936                      unsigned int, void *);
2937static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2938static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2939static void fill_auxv_note(struct memelfnote *, const TaskState *);
2940static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2941static size_t note_size(const struct memelfnote *);
2942static void free_note_info(struct elf_note_info *);
2943static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
2944static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
2945static int core_dump_filename(const TaskState *, char *, size_t);
2946
2947static int dump_write(int, const void *, size_t);
2948static int write_note(struct memelfnote *, int);
2949static int write_note_info(struct elf_note_info *, int);
2950
2951#ifdef BSWAP_NEEDED
2952static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2953{
2954    prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
2955    prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
2956    prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
2957    prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2958    prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend);
2959    prstatus->pr_sighold = tswapal(prstatus->pr_sighold);
2960    prstatus->pr_pid = tswap32(prstatus->pr_pid);
2961    prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2962    prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2963    prstatus->pr_sid = tswap32(prstatus->pr_sid);
2964    /* cpu times are not filled, so we skip them */
2965    /* regs should be in correct format already */
2966    prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2967}
2968
2969static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2970{
2971    psinfo->pr_flag = tswapal(psinfo->pr_flag);
2972    psinfo->pr_uid = tswap16(psinfo->pr_uid);
2973    psinfo->pr_gid = tswap16(psinfo->pr_gid);
2974    psinfo->pr_pid = tswap32(psinfo->pr_pid);
2975    psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2976    psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2977    psinfo->pr_sid = tswap32(psinfo->pr_sid);
2978}
2979
2980static void bswap_note(struct elf_note *en)
2981{
2982    bswap32s(&en->n_namesz);
2983    bswap32s(&en->n_descsz);
2984    bswap32s(&en->n_type);
2985}
2986#else
2987static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2988static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2989static inline void bswap_note(struct elf_note *en) { }
2990#endif /* BSWAP_NEEDED */
2991
2992/*
2993 * Minimal support for linux memory regions.  These are needed
2994 * when we are finding out what memory exactly belongs to
2995 * emulated process.  No locks needed here, as long as
2996 * thread that received the signal is stopped.
2997 */
2998
2999static struct mm_struct *vma_init(void)
3000{
3001    struct mm_struct *mm;
3002
3003    if ((mm = g_malloc(sizeof (*mm))) == NULL)
3004        return (NULL);
3005
3006    mm->mm_count = 0;
3007    QTAILQ_INIT(&mm->mm_mmap);
3008
3009    return (mm);
3010}
3011
3012static void vma_delete(struct mm_struct *mm)
3013{
3014    struct vm_area_struct *vma;
3015
3016    while ((vma = vma_first(mm)) != NULL) {
3017        QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
3018        g_free(vma);
3019    }
3020    g_free(mm);
3021}
3022
3023static int vma_add_mapping(struct mm_struct *mm, target_ulong start,
3024                           target_ulong end, abi_ulong flags)
3025{
3026    struct vm_area_struct *vma;
3027
3028    if ((vma = g_malloc0(sizeof (*vma))) == NULL)
3029        return (-1);
3030
3031    vma->vma_start = start;
3032    vma->vma_end = end;
3033    vma->vma_flags = flags;
3034
3035    QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
3036    mm->mm_count++;
3037
3038    return (0);
3039}
3040
3041static struct vm_area_struct *vma_first(const struct mm_struct *mm)
3042{
3043    return (QTAILQ_FIRST(&mm->mm_mmap));
3044}
3045
3046static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
3047{
3048    return (QTAILQ_NEXT(vma, vma_link));
3049}
3050
3051static int vma_get_mapping_count(const struct mm_struct *mm)
3052{
3053    return (mm->mm_count);
3054}
3055
3056/*
3057 * Calculate file (dump) size of given memory region.
3058 */
3059static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
3060{
3061    /* if we cannot even read the first page, skip it */
3062    if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
3063        return (0);
3064
3065    /*
3066     * Usually we don't dump executable pages as they contain
3067     * non-writable code that debugger can read directly from
3068     * target library etc.  However, thread stacks are marked
3069     * also executable so we read in first page of given region
3070     * and check whether it contains elf header.  If there is
3071     * no elf header, we dump it.
3072     */
3073    if (vma->vma_flags & PROT_EXEC) {
3074        char page[TARGET_PAGE_SIZE];
3075
3076        copy_from_user(page, vma->vma_start, sizeof (page));
3077        if ((page[EI_MAG0] == ELFMAG0) &&
3078            (page[EI_MAG1] == ELFMAG1) &&
3079            (page[EI_MAG2] == ELFMAG2) &&
3080            (page[EI_MAG3] == ELFMAG3)) {
3081            /*
3082             * Mappings are possibly from ELF binary.  Don't dump
3083             * them.
3084             */
3085            return (0);
3086        }
3087    }
3088
3089    return (vma->vma_end - vma->vma_start);
3090}
3091
3092static int vma_walker(void *priv, target_ulong start, target_ulong end,
3093                      unsigned long flags)
3094{
3095    struct mm_struct *mm = (struct mm_struct *)priv;
3096
3097    vma_add_mapping(mm, start, end, flags);
3098    return (0);
3099}
3100
3101static void fill_note(struct memelfnote *note, const char *name, int type,
3102                      unsigned int sz, void *data)
3103{
3104    unsigned int namesz;
3105
3106    namesz = strlen(name) + 1;
3107    note->name = name;
3108    note->namesz = namesz;
3109    note->namesz_rounded = roundup(namesz, sizeof (int32_t));
3110    note->type = type;
3111    note->datasz = sz;
3112    note->datasz_rounded = roundup(sz, sizeof (int32_t));
3113
3114    note->data = data;
3115
3116    /*
3117     * We calculate rounded up note size here as specified by
3118     * ELF document.
3119     */
3120    note->notesz = sizeof (struct elf_note) +
3121        note->namesz_rounded + note->datasz_rounded;
3122}
3123
3124static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
3125                            uint32_t flags)
3126{
3127    (void) memset(elf, 0, sizeof(*elf));
3128
3129    (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
3130    elf->e_ident[EI_CLASS] = ELF_CLASS;
3131    elf->e_ident[EI_DATA] = ELF_DATA;
3132    elf->e_ident[EI_VERSION] = EV_CURRENT;
3133    elf->e_ident[EI_OSABI] = ELF_OSABI;
3134
3135    elf->e_type = ET_CORE;
3136    elf->e_machine = machine;
3137    elf->e_version = EV_CURRENT;
3138    elf->e_phoff = sizeof(struct elfhdr);
3139    elf->e_flags = flags;
3140    elf->e_ehsize = sizeof(struct elfhdr);
3141    elf->e_phentsize = sizeof(struct elf_phdr);
3142    elf->e_phnum = segs;
3143
3144    bswap_ehdr(elf);
3145}
3146
3147static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
3148{
3149    phdr->p_type = PT_NOTE;
3150    phdr->p_offset = offset;
3151    phdr->p_vaddr = 0;
3152    phdr->p_paddr = 0;
3153    phdr->p_filesz = sz;
3154    phdr->p_memsz = 0;
3155    phdr->p_flags = 0;
3156    phdr->p_align = 0;
3157
3158    bswap_phdr(phdr, 1);
3159}
3160
3161static size_t note_size(const struct memelfnote *note)
3162{
3163    return (note->notesz);
3164}
3165
3166static void fill_prstatus(struct target_elf_prstatus *prstatus,
3167                          const TaskState *ts, int signr)
3168{
3169    (void) memset(prstatus, 0, sizeof (*prstatus));
3170    prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
3171    prstatus->pr_pid = ts->ts_tid;
3172    prstatus->pr_ppid = getppid();
3173    prstatus->pr_pgrp = getpgrp();
3174    prstatus->pr_sid = getsid(0);
3175
3176    bswap_prstatus(prstatus);
3177}
3178
3179static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
3180{
3181    char *base_filename;
3182    unsigned int i, len;
3183
3184    (void) memset(psinfo, 0, sizeof (*psinfo));
3185
3186    len = ts->info->arg_end - ts->info->arg_start;
3187    if (len >= ELF_PRARGSZ)
3188        len = ELF_PRARGSZ - 1;
3189    if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
3190        return -EFAULT;
3191    for (i = 0; i < len; i++)
3192        if (psinfo->pr_psargs[i] == 0)
3193            psinfo->pr_psargs[i] = ' ';
3194    psinfo->pr_psargs[len] = 0;
3195
3196    psinfo->pr_pid = getpid();
3197    psinfo->pr_ppid = getppid();
3198    psinfo->pr_pgrp = getpgrp();
3199    psinfo->pr_sid = getsid(0);
3200    psinfo->pr_uid = getuid();
3201    psinfo->pr_gid = getgid();
3202
3203    base_filename = g_path_get_basename(ts->bprm->filename);
3204    /*
3205     * Using strncpy here is fine: at max-length,
3206     * this field is not NUL-terminated.
3207     */
3208    (void) strncpy(psinfo->pr_fname, base_filename,
3209                   sizeof(psinfo->pr_fname));
3210
3211    g_free(base_filename);
3212    bswap_psinfo(psinfo);
3213    return (0);
3214}
3215
3216static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
3217{
3218    elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
3219    elf_addr_t orig_auxv = auxv;
3220    void *ptr;
3221    int len = ts->info->auxv_len;
3222
3223    /*
3224     * Auxiliary vector is stored in target process stack.  It contains
3225     * {type, value} pairs that we need to dump into note.  This is not
3226     * strictly necessary but we do it here for sake of completeness.
3227     */
3228
3229    /* read in whole auxv vector and copy it to memelfnote */
3230    ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
3231    if (ptr != NULL) {
3232        fill_note(note, "CORE", NT_AUXV, len, ptr);
3233        unlock_user(ptr, auxv, len);
3234    }
3235}
3236
3237/*
3238 * Constructs name of coredump file.  We have following convention
3239 * for the name:
3240 *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
3241 *
3242 * Returns 0 in case of success, -1 otherwise (errno is set).
3243 */
3244static int core_dump_filename(const TaskState *ts, char *buf,
3245                              size_t bufsize)
3246{
3247    char timestamp[64];
3248    char *base_filename = NULL;
3249    struct timeval tv;
3250    struct tm tm;
3251
3252    assert(bufsize >= PATH_MAX);
3253
3254    if (gettimeofday(&tv, NULL) < 0) {
3255        (void) fprintf(stderr, "unable to get current timestamp: %s",
3256                       strerror(errno));
3257        return (-1);
3258    }
3259
3260    base_filename = g_path_get_basename(ts->bprm->filename);
3261    (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
3262                    localtime_r(&tv.tv_sec, &tm));
3263    (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
3264                    base_filename, timestamp, (int)getpid());
3265    g_free(base_filename);
3266
3267    return (0);
3268}
3269
3270static int dump_write(int fd, const void *ptr, size_t size)
3271{
3272    const char *bufp = (const char *)ptr;
3273    ssize_t bytes_written, bytes_left;
3274    struct rlimit dumpsize;
3275    off_t pos;
3276
3277    bytes_written = 0;
3278    getrlimit(RLIMIT_CORE, &dumpsize);
3279    if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
3280        if (errno == ESPIPE) { /* not a seekable stream */
3281            bytes_left = size;
3282        } else {
3283            return pos;
3284        }
3285    } else {
3286        if (dumpsize.rlim_cur <= pos) {
3287            return -1;
3288        } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
3289            bytes_left = size;
3290        } else {
3291            size_t limit_left=dumpsize.rlim_cur - pos;
3292            bytes_left = limit_left >= size ? size : limit_left ;
3293        }
3294    }
3295
3296    /*
3297     * In normal conditions, single write(2) should do but
3298     * in case of socket etc. this mechanism is more portable.
3299     */
3300    do {
3301        bytes_written = write(fd, bufp, bytes_left);
3302        if (bytes_written < 0) {
3303            if (errno == EINTR)
3304                continue;
3305            return (-1);
3306        } else if (bytes_written == 0) { /* eof */
3307            return (-1);
3308        }
3309        bufp += bytes_written;
3310        bytes_left -= bytes_written;
3311    } while (bytes_left > 0);
3312
3313    return (0);
3314}
3315
3316static int write_note(struct memelfnote *men, int fd)
3317{
3318    struct elf_note en;
3319
3320    en.n_namesz = men->namesz;
3321    en.n_type = men->type;
3322    en.n_descsz = men->datasz;
3323
3324    bswap_note(&en);
3325
3326    if (dump_write(fd, &en, sizeof(en)) != 0)
3327        return (-1);
3328    if (dump_write(fd, men->name, men->namesz_rounded) != 0)
3329        return (-1);
3330    if (dump_write(fd, men->data, men->datasz_rounded) != 0)
3331        return (-1);
3332
3333    return (0);
3334}
3335
3336static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
3337{
3338    CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
3339    TaskState *ts = (TaskState *)cpu->opaque;
3340    struct elf_thread_status *ets;
3341
3342    ets = g_malloc0(sizeof (*ets));
3343    ets->num_notes = 1; /* only prstatus is dumped */
3344    fill_prstatus(&ets->prstatus, ts, 0);
3345    elf_core_copy_regs(&ets->prstatus.pr_reg, env);
3346    fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
3347              &ets->prstatus);
3348
3349    QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
3350
3351    info->notes_size += note_size(&ets->notes[0]);
3352}
3353
3354static void init_note_info(struct elf_note_info *info)
3355{
3356    /* Initialize the elf_note_info structure so that it is at
3357     * least safe to call free_note_info() on it. Must be
3358     * called before calling fill_note_info().
3359     */
3360    memset(info, 0, sizeof (*info));
3361    QTAILQ_INIT(&info->thread_list);
3362}
3363
3364static int fill_note_info(struct elf_note_info *info,
3365                          long signr, const CPUArchState *env)
3366{
3367#define NUMNOTES 3
3368    CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
3369    TaskState *ts = (TaskState *)cpu->opaque;
3370    int i;
3371
3372    info->notes = g_new0(struct memelfnote, NUMNOTES);
3373    if (info->notes == NULL)
3374        return (-ENOMEM);
3375    info->prstatus = g_malloc0(sizeof (*info->prstatus));
3376    if (info->prstatus == NULL)
3377        return (-ENOMEM);
3378    info->psinfo = g_malloc0(sizeof (*info->psinfo));
3379    if (info->prstatus == NULL)
3380        return (-ENOMEM);
3381
3382    /*
3383     * First fill in status (and registers) of current thread
3384     * including process info & aux vector.
3385     */
3386    fill_prstatus(info->prstatus, ts, signr);
3387    elf_core_copy_regs(&info->prstatus->pr_reg, env);
3388    fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
3389              sizeof (*info->prstatus), info->prstatus);
3390    fill_psinfo(info->psinfo, ts);
3391    fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
3392              sizeof (*info->psinfo), info->psinfo);
3393    fill_auxv_note(&info->notes[2], ts);
3394    info->numnote = 3;
3395
3396    info->notes_size = 0;
3397    for (i = 0; i < info->numnote; i++)
3398        info->notes_size += note_size(&info->notes[i]);
3399
3400    /* read and fill status of all threads */
3401    cpu_list_lock();
3402    CPU_FOREACH(cpu) {
3403        if (cpu == thread_cpu) {
3404            continue;
3405        }
3406        fill_thread_info(info, (CPUArchState *)cpu->env_ptr);
3407    }
3408    cpu_list_unlock();
3409
3410    return (0);
3411}
3412
3413static void free_note_info(struct elf_note_info *info)
3414{
3415    struct elf_thread_status *ets;
3416
3417    while (!QTAILQ_EMPTY(&info->thread_list)) {
3418        ets = QTAILQ_FIRST(&info->thread_list);
3419        QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
3420        g_free(ets);
3421    }
3422
3423    g_free(info->prstatus);
3424    g_free(info->psinfo);
3425    g_free(info->notes);
3426}
3427
3428static int write_note_info(struct elf_note_info *info, int fd)
3429{
3430    struct elf_thread_status *ets;
3431    int i, error = 0;
3432
3433    /* write prstatus, psinfo and auxv for current thread */
3434    for (i = 0; i < info->numnote; i++)
3435        if ((error = write_note(&info->notes[i], fd)) != 0)
3436            return (error);
3437
3438    /* write prstatus for each thread */
3439    QTAILQ_FOREACH(ets, &info->thread_list, ets_link) {
3440        if ((error = write_note(&ets->notes[0], fd)) != 0)
3441            return (error);
3442    }
3443
3444    return (0);
3445}
3446
3447/*
3448 * Write out ELF coredump.
3449 *
3450 * See documentation of ELF object file format in:
3451 * http://www.caldera.com/developers/devspecs/gabi41.pdf
3452 *
3453 * Coredump format in linux is following:
3454 *
3455 * 0   +----------------------+         \
3456 *     | ELF header           | ET_CORE  |
3457 *     +----------------------+          |
3458 *     | ELF program headers  |          |--- headers
3459 *     | - NOTE section       |          |
3460 *     | - PT_LOAD sections   |          |
3461 *     +----------------------+         /
3462 *     | NOTEs:               |
3463 *     | - NT_PRSTATUS        |
3464 *     | - NT_PRSINFO         |
3465 *     | - NT_AUXV            |
3466 *     +----------------------+ <-- aligned to target page
3467 *     | Process memory dump  |
3468 *     :                      :
3469 *     .                      .
3470 *     :                      :
3471 *     |                      |
3472 *     +----------------------+
3473 *
3474 * NT_PRSTATUS -> struct elf_prstatus (per thread)
3475 * NT_PRSINFO  -> struct elf_prpsinfo
3476 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
3477 *
3478 * Format follows System V format as close as possible.  Current
3479 * version limitations are as follows:
3480 *     - no floating point registers are dumped
3481 *
3482 * Function returns 0 in case of success, negative errno otherwise.
3483 *
3484 * TODO: make this work also during runtime: it should be
3485 * possible to force coredump from running process and then
3486 * continue processing.  For example qemu could set up SIGUSR2
3487 * handler (provided that target process haven't registered
3488 * handler for that) that does the dump when signal is received.
3489 */
3490static int elf_core_dump(int signr, const CPUArchState *env)
3491{
3492    const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
3493    const TaskState *ts = (const TaskState *)cpu->opaque;
3494    struct vm_area_struct *vma = NULL;
3495    char corefile[PATH_MAX];
3496    struct elf_note_info info;
3497    struct elfhdr elf;
3498    struct elf_phdr phdr;
3499    struct rlimit dumpsize;
3500    struct mm_struct *mm = NULL;
3501    off_t offset = 0, data_offset = 0;
3502    int segs = 0;
3503    int fd = -1;
3504
3505    init_note_info(&info);
3506
3507    errno = 0;
3508    getrlimit(RLIMIT_CORE, &dumpsize);
3509    if (dumpsize.rlim_cur == 0)
3510        return 0;
3511
3512    if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
3513        return (-errno);
3514
3515    if ((fd = open(corefile, O_WRONLY | O_CREAT,
3516                   S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
3517        return (-errno);
3518
3519    /*
3520     * Walk through target process memory mappings and
3521     * set up structure containing this information.  After
3522     * this point vma_xxx functions can be used.
3523     */
3524    if ((mm = vma_init()) == NULL)
3525        goto out;
3526
3527    walk_memory_regions(mm, vma_walker);
3528    segs = vma_get_mapping_count(mm);
3529
3530    /*
3531     * Construct valid coredump ELF header.  We also
3532     * add one more segment for notes.
3533     */
3534    fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
3535    if (dump_write(fd, &elf, sizeof (elf)) != 0)
3536        goto out;
3537
3538    /* fill in the in-memory version of notes */
3539    if (fill_note_info(&info, signr, env) < 0)
3540        goto out;
3541
3542    offset += sizeof (elf);                             /* elf header */
3543    offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
3544
3545    /* write out notes program header */
3546    fill_elf_note_phdr(&phdr, info.notes_size, offset);
3547
3548    offset += info.notes_size;
3549    if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
3550        goto out;
3551
3552    /*
3553     * ELF specification wants data to start at page boundary so
3554     * we align it here.
3555     */
3556    data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
3557
3558    /*
3559     * Write program headers for memory regions mapped in
3560     * the target process.
3561     */
3562    for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
3563        (void) memset(&phdr, 0, sizeof (phdr));
3564
3565        phdr.p_type = PT_LOAD;
3566        phdr.p_offset = offset;
3567        phdr.p_vaddr = vma->vma_start;
3568        phdr.p_paddr = 0;
3569        phdr.p_filesz = vma_dump_size(vma);
3570        offset += phdr.p_filesz;
3571        phdr.p_memsz = vma->vma_end - vma->vma_start;
3572        phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
3573        if (vma->vma_flags & PROT_WRITE)
3574            phdr.p_flags |= PF_W;
3575        if (vma->vma_flags & PROT_EXEC)
3576            phdr.p_flags |= PF_X;
3577        phdr.p_align = ELF_EXEC_PAGESIZE;
3578
3579        bswap_phdr(&phdr, 1);
3580        if (dump_write(fd, &phdr, sizeof(phdr)) != 0) {
3581            goto out;
3582        }
3583    }
3584
3585    /*
3586     * Next we write notes just after program headers.  No
3587     * alignment needed here.
3588     */
3589    if (write_note_info(&info, fd) < 0)
3590        goto out;
3591
3592    /* align data to page boundary */
3593    if (lseek(fd, data_offset, SEEK_SET) != data_offset)
3594        goto out;
3595
3596    /*
3597     * Finally we can dump process memory into corefile as well.
3598     */
3599    for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
3600        abi_ulong addr;
3601        abi_ulong end;
3602
3603        end = vma->vma_start + vma_dump_size(vma);
3604
3605        for (addr = vma->vma_start; addr < end;
3606             addr += TARGET_PAGE_SIZE) {
3607            char page[TARGET_PAGE_SIZE];
3608            int error;
3609
3610            /*
3611             *  Read in page from target process memory and
3612             *  write it to coredump file.
3613             */
3614            error = copy_from_user(page, addr, sizeof (page));
3615            if (error != 0) {
3616                (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
3617                               addr);
3618                errno = -error;
3619                goto out;
3620            }
3621            if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
3622                goto out;
3623        }
3624    }
3625
3626 out:
3627    free_note_info(&info);
3628    if (mm != NULL)
3629        vma_delete(mm);
3630    (void) close(fd);
3631
3632    if (errno != 0)
3633        return (-errno);
3634    return (0);
3635}
3636#endif /* USE_ELF_CORE_DUMP */
3637
3638void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
3639{
3640    init_thread(regs, infop);
3641}
3642