qemu/linux-user/arm/cpu_loop.c
<<
>>
Prefs
   1/*
   2 *  qemu user cpu loop
   3 *
   4 *  Copyright (c) 2003-2008 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu.h"
  22#include "user-internals.h"
  23#include "elf.h"
  24#include "cpu_loop-common.h"
  25#include "signal-common.h"
  26#include "semihosting/common-semi.h"
  27#include "target/arm/syndrome.h"
  28
  29#define get_user_code_u32(x, gaddr, env)                \
  30    ({ abi_long __r = get_user_u32((x), (gaddr));       \
  31        if (!__r && bswap_code(arm_sctlr_b(env))) {     \
  32            (x) = bswap32(x);                           \
  33        }                                               \
  34        __r;                                            \
  35    })
  36
  37#define get_user_code_u16(x, gaddr, env)                \
  38    ({ abi_long __r = get_user_u16((x), (gaddr));       \
  39        if (!__r && bswap_code(arm_sctlr_b(env))) {     \
  40            (x) = bswap16(x);                           \
  41        }                                               \
  42        __r;                                            \
  43    })
  44
  45#define get_user_data_u32(x, gaddr, env)                \
  46    ({ abi_long __r = get_user_u32((x), (gaddr));       \
  47        if (!__r && arm_cpu_bswap_data(env)) {          \
  48            (x) = bswap32(x);                           \
  49        }                                               \
  50        __r;                                            \
  51    })
  52
  53#define get_user_data_u16(x, gaddr, env)                \
  54    ({ abi_long __r = get_user_u16((x), (gaddr));       \
  55        if (!__r && arm_cpu_bswap_data(env)) {          \
  56            (x) = bswap16(x);                           \
  57        }                                               \
  58        __r;                                            \
  59    })
  60
  61#define put_user_data_u32(x, gaddr, env)                \
  62    ({ typeof(x) __x = (x);                             \
  63        if (arm_cpu_bswap_data(env)) {                  \
  64            __x = bswap32(__x);                         \
  65        }                                               \
  66        put_user_u32(__x, (gaddr));                     \
  67    })
  68
  69#define put_user_data_u16(x, gaddr, env)                \
  70    ({ typeof(x) __x = (x);                             \
  71        if (arm_cpu_bswap_data(env)) {                  \
  72            __x = bswap16(__x);                         \
  73        }                                               \
  74        put_user_u16(__x, (gaddr));                     \
  75    })
  76
  77/*
  78 * Similar to code in accel/tcg/user-exec.c, but outside the execution loop.
  79 * Must be called with mmap_lock.
  80 * We get the PC of the entry address - which is as good as anything,
  81 * on a real kernel what you get depends on which mode it uses.
  82 */
  83static void *atomic_mmu_lookup(CPUArchState *env, uint32_t addr, int size)
  84{
  85    int need_flags = PAGE_READ | PAGE_WRITE_ORG | PAGE_VALID;
  86    int page_flags;
  87
  88    /* Enforce guest required alignment.  */
  89    if (unlikely(addr & (size - 1))) {
  90        force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
  91        return NULL;
  92    }
  93
  94    page_flags = page_get_flags(addr);
  95    if (unlikely((page_flags & need_flags) != need_flags)) {
  96        force_sig_fault(TARGET_SIGSEGV,
  97                        page_flags & PAGE_VALID ?
  98                        TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
  99        return NULL;
 100    }
 101
 102    return g2h(env_cpu(env), addr);
 103}
 104
 105/*
 106 * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
 107 * Input:
 108 * r0 = oldval
 109 * r1 = newval
 110 * r2 = pointer to target value
 111 *
 112 * Output:
 113 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
 114 * C set if *ptr was changed, clear if no exchange happened
 115 */
 116static void arm_kernel_cmpxchg32_helper(CPUARMState *env)
 117{
 118    uint32_t oldval, newval, val, addr, cpsr, *host_addr;
 119
 120    oldval = env->regs[0];
 121    newval = env->regs[1];
 122    addr = env->regs[2];
 123
 124    mmap_lock();
 125    host_addr = atomic_mmu_lookup(env, addr, 4);
 126    if (!host_addr) {
 127        mmap_unlock();
 128        return;
 129    }
 130
 131    val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
 132    mmap_unlock();
 133
 134    cpsr = (val == oldval) * CPSR_C;
 135    cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
 136    env->regs[0] = cpsr ? 0 : -1;
 137}
 138
 139/*
 140 * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst
 141 * Input:
 142 * r0 = pointer to oldval
 143 * r1 = pointer to newval
 144 * r2 = pointer to target value
 145 *
 146 * Output:
 147 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
 148 * C set if *ptr was changed, clear if no exchange happened
 149 *
 150 * Note segv's in kernel helpers are a bit tricky, we can set the
 151 * data address sensibly but the PC address is just the entry point.
 152 */
 153static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
 154{
 155    uint64_t oldval, newval, val;
 156    uint32_t addr, cpsr;
 157    uint64_t *host_addr;
 158
 159    addr = env->regs[0];
 160    if (get_user_u64(oldval, addr)) {
 161        goto segv;
 162    }
 163
 164    addr = env->regs[1];
 165    if (get_user_u64(newval, addr)) {
 166        goto segv;
 167    }
 168
 169    mmap_lock();
 170    addr = env->regs[2];
 171    host_addr = atomic_mmu_lookup(env, addr, 8);
 172    if (!host_addr) {
 173        mmap_unlock();
 174        return;
 175    }
 176
 177#ifdef CONFIG_ATOMIC64
 178    val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval);
 179    cpsr = (val == oldval) * CPSR_C;
 180#else
 181    /*
 182     * This only works between threads, not between processes, but since
 183     * the host has no 64-bit cmpxchg, it is the best that we can do.
 184     */
 185    start_exclusive();
 186    val = *host_addr;
 187    if (val == oldval) {
 188        *host_addr = newval;
 189        cpsr = CPSR_C;
 190    } else {
 191        cpsr = 0;
 192    }
 193    end_exclusive();
 194#endif
 195    mmap_unlock();
 196
 197    cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
 198    env->regs[0] = cpsr ? 0 : -1;
 199    return;
 200
 201 segv:
 202    force_sig_fault(TARGET_SIGSEGV,
 203                    page_get_flags(addr) & PAGE_VALID ?
 204                    TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr);
 205}
 206
 207/* Handle a jump to the kernel code page.  */
 208static int
 209do_kernel_trap(CPUARMState *env)
 210{
 211    uint32_t addr;
 212
 213    switch (env->regs[15]) {
 214    case 0xffff0fa0: /* __kernel_memory_barrier */
 215        smp_mb();
 216        break;
 217    case 0xffff0fc0: /* __kernel_cmpxchg */
 218        arm_kernel_cmpxchg32_helper(env);
 219        break;
 220    case 0xffff0fe0: /* __kernel_get_tls */
 221        env->regs[0] = cpu_get_tls(env);
 222        break;
 223    case 0xffff0f60: /* __kernel_cmpxchg64 */
 224        arm_kernel_cmpxchg64_helper(env);
 225        break;
 226
 227    default:
 228        return 1;
 229    }
 230    /* Jump back to the caller.  */
 231    addr = env->regs[14];
 232    if (addr & 1) {
 233        env->thumb = true;
 234        addr &= ~1;
 235    }
 236    env->regs[15] = addr;
 237
 238    return 0;
 239}
 240
 241static bool insn_is_linux_bkpt(uint32_t opcode, bool is_thumb)
 242{
 243    /*
 244     * Return true if this insn is one of the three magic UDF insns
 245     * which the kernel treats as breakpoint insns.
 246     */
 247    if (!is_thumb) {
 248        return (opcode & 0x0fffffff) == 0x07f001f0;
 249    } else {
 250        /*
 251         * Note that we get the two halves of the 32-bit T32 insn
 252         * in the opposite order to the value the kernel uses in
 253         * its undef_hook struct.
 254         */
 255        return ((opcode & 0xffff) == 0xde01) || (opcode == 0xa000f7f0);
 256    }
 257}
 258
 259static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
 260{
 261    TaskState *ts = env_cpu(env)->opaque;
 262    int rc = EmulateAll(opcode, &ts->fpa, env);
 263    int raise, enabled;
 264
 265    if (rc == 0) {
 266        /* Illegal instruction */
 267        return false;
 268    }
 269    if (rc > 0) {
 270        /* Everything ok. */
 271        env->regs[15] += 4;
 272        return true;
 273    }
 274
 275    /* FP exception */
 276    rc = -rc;
 277    raise = 0;
 278
 279    /* Translate softfloat flags to FPSR flags */
 280    if (rc & float_flag_invalid) {
 281        raise |= BIT_IOC;
 282    }
 283    if (rc & float_flag_divbyzero) {
 284        raise |= BIT_DZC;
 285    }
 286    if (rc & float_flag_overflow) {
 287        raise |= BIT_OFC;
 288    }
 289    if (rc & float_flag_underflow) {
 290        raise |= BIT_UFC;
 291    }
 292    if (rc & float_flag_inexact) {
 293        raise |= BIT_IXC;
 294    }
 295
 296    /* Accumulate unenabled exceptions */
 297    enabled = ts->fpa.fpsr >> 16;
 298    ts->fpa.fpsr |= raise & ~enabled;
 299
 300    if (raise & enabled) {
 301        /*
 302         * The kernel's nwfpe emulator does not pass a real si_code.
 303         * It merely uses send_sig(SIGFPE, current, 1), which results in
 304         * __send_signal() filling out SI_KERNEL with pid and uid 0 (under
 305         * the "SEND_SIG_PRIV" case). That's what our force_sig() does.
 306         */
 307        force_sig(TARGET_SIGFPE);
 308    } else {
 309        env->regs[15] += 4;
 310    }
 311    return true;
 312}
 313
 314void cpu_loop(CPUARMState *env)
 315{
 316    CPUState *cs = env_cpu(env);
 317    int trapnr, si_signo, si_code;
 318    unsigned int n, insn;
 319    abi_ulong ret;
 320
 321    for(;;) {
 322        cpu_exec_start(cs);
 323        trapnr = cpu_exec(cs);
 324        cpu_exec_end(cs);
 325        process_queued_cpu_work(cs);
 326
 327        switch(trapnr) {
 328        case EXCP_UDEF:
 329        case EXCP_NOCP:
 330        case EXCP_INVSTATE:
 331            {
 332                uint32_t opcode;
 333
 334                /* we handle the FPU emulation here, as Linux */
 335                /* we get the opcode */
 336                /* FIXME - what to do if get_user() fails? */
 337                get_user_code_u32(opcode, env->regs[15], env);
 338
 339                /*
 340                 * The Linux kernel treats some UDF patterns specially
 341                 * to use as breakpoints (instead of the architectural
 342                 * bkpt insn). These should trigger a SIGTRAP rather
 343                 * than SIGILL.
 344                 */
 345                if (insn_is_linux_bkpt(opcode, env->thumb)) {
 346                    goto excp_debug;
 347                }
 348
 349                if (!env->thumb && emulate_arm_fpa11(env, opcode)) {
 350                    break;
 351                }
 352
 353                force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN,
 354                                env->regs[15]);
 355            }
 356            break;
 357        case EXCP_SWI:
 358            {
 359                env->eabi = 1;
 360                /* system call */
 361                if (env->thumb) {
 362                    /* Thumb is always EABI style with syscall number in r7 */
 363                    n = env->regs[7];
 364                } else {
 365                    /*
 366                     * Equivalent of kernel CONFIG_OABI_COMPAT: read the
 367                     * Arm SVC insn to extract the immediate, which is the
 368                     * syscall number in OABI.
 369                     */
 370                    /* FIXME - what to do if get_user() fails? */
 371                    get_user_code_u32(insn, env->regs[15] - 4, env);
 372                    n = insn & 0xffffff;
 373                    if (n == 0) {
 374                        /* zero immediate: EABI, syscall number in r7 */
 375                        n = env->regs[7];
 376                    } else {
 377                        /*
 378                         * This XOR matches the kernel code: an immediate
 379                         * in the valid range (0x900000 .. 0x9fffff) is
 380                         * converted into the correct EABI-style syscall
 381                         * number; invalid immediates end up as values
 382                         * > 0xfffff and are handled below as out-of-range.
 383                         */
 384                        n ^= ARM_SYSCALL_BASE;
 385                        env->eabi = 0;
 386                    }
 387                }
 388
 389                if (n > ARM_NR_BASE) {
 390                    switch (n) {
 391                    case ARM_NR_cacheflush:
 392                        /* nop */
 393                        break;
 394                    case ARM_NR_set_tls:
 395                        cpu_set_tls(env, env->regs[0]);
 396                        env->regs[0] = 0;
 397                        break;
 398                    case ARM_NR_breakpoint:
 399                        env->regs[15] -= env->thumb ? 2 : 4;
 400                        goto excp_debug;
 401                    case ARM_NR_get_tls:
 402                        env->regs[0] = cpu_get_tls(env);
 403                        break;
 404                    default:
 405                        if (n < 0xf0800) {
 406                            /*
 407                             * Syscalls 0xf0000..0xf07ff (or 0x9f0000..
 408                             * 0x9f07ff in OABI numbering) are defined
 409                             * to return -ENOSYS rather than raising
 410                             * SIGILL. Note that we have already
 411                             * removed the 0x900000 prefix.
 412                             */
 413                            qemu_log_mask(LOG_UNIMP,
 414                                "qemu: Unsupported ARM syscall: 0x%x\n",
 415                                          n);
 416                            env->regs[0] = -TARGET_ENOSYS;
 417                        } else {
 418                            /*
 419                             * Otherwise SIGILL. This includes any SWI with
 420                             * immediate not originally 0x9fxxxx, because
 421                             * of the earlier XOR.
 422                             * Like the real kernel, we report the addr of the
 423                             * SWI in the siginfo si_addr but leave the PC
 424                             * pointing at the insn after the SWI.
 425                             */
 426                            abi_ulong faultaddr = env->regs[15];
 427                            faultaddr -= env->thumb ? 2 : 4;
 428                            force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP,
 429                                            faultaddr);
 430                        }
 431                        break;
 432                    }
 433                } else {
 434                    ret = do_syscall(env,
 435                                     n,
 436                                     env->regs[0],
 437                                     env->regs[1],
 438                                     env->regs[2],
 439                                     env->regs[3],
 440                                     env->regs[4],
 441                                     env->regs[5],
 442                                     0, 0);
 443                    if (ret == -QEMU_ERESTARTSYS) {
 444                        env->regs[15] -= env->thumb ? 2 : 4;
 445                    } else if (ret != -QEMU_ESIGRETURN) {
 446                        env->regs[0] = ret;
 447                    }
 448                }
 449            }
 450            break;
 451        case EXCP_SEMIHOST:
 452            do_common_semihosting(cs);
 453            env->regs[15] += env->thumb ? 2 : 4;
 454            break;
 455        case EXCP_INTERRUPT:
 456            /* just indicate that signals should be handled asap */
 457            break;
 458        case EXCP_PREFETCH_ABORT:
 459        case EXCP_DATA_ABORT:
 460            /* For user-only we don't set TTBCR_EAE, so look at the FSR. */
 461            switch (env->exception.fsr & 0x1f) {
 462            case 0x1: /* Alignment */
 463                si_signo = TARGET_SIGBUS;
 464                si_code = TARGET_BUS_ADRALN;
 465                break;
 466            case 0x3: /* Access flag fault, level 1 */
 467            case 0x6: /* Access flag fault, level 2 */
 468            case 0x9: /* Domain fault, level 1 */
 469            case 0xb: /* Domain fault, level 2 */
 470            case 0xd: /* Permission fault, level 1 */
 471            case 0xf: /* Permission fault, level 2 */
 472                si_signo = TARGET_SIGSEGV;
 473                si_code = TARGET_SEGV_ACCERR;
 474                break;
 475            case 0x5: /* Translation fault, level 1 */
 476            case 0x7: /* Translation fault, level 2 */
 477                si_signo = TARGET_SIGSEGV;
 478                si_code = TARGET_SEGV_MAPERR;
 479                break;
 480            default:
 481                g_assert_not_reached();
 482            }
 483            force_sig_fault(si_signo, si_code, env->exception.vaddress);
 484            break;
 485        case EXCP_DEBUG:
 486        case EXCP_BKPT:
 487        excp_debug:
 488            force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[15]);
 489            break;
 490        case EXCP_KERNEL_TRAP:
 491            if (do_kernel_trap(env))
 492              goto error;
 493            break;
 494        case EXCP_YIELD:
 495            /* nothing to do here for user-mode, just resume guest code */
 496            break;
 497        case EXCP_ATOMIC:
 498            cpu_exec_step_atomic(cs);
 499            break;
 500        default:
 501        error:
 502            EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
 503            abort();
 504        }
 505        process_pending_signals(env);
 506    }
 507}
 508
 509void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
 510{
 511    CPUState *cpu = env_cpu(env);
 512    TaskState *ts = cpu->opaque;
 513    struct image_info *info = ts->info;
 514    int i;
 515
 516    cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
 517               CPSRWriteByInstr);
 518    for(i = 0; i < 16; i++) {
 519        env->regs[i] = regs->uregs[i];
 520    }
 521#if TARGET_BIG_ENDIAN
 522    /* Enable BE8.  */
 523    if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
 524        && (info->elf_flags & EF_ARM_BE8)) {
 525        env->uncached_cpsr |= CPSR_E;
 526        env->cp15.sctlr_el[1] |= SCTLR_E0E;
 527    } else {
 528        env->cp15.sctlr_el[1] |= SCTLR_B;
 529    }
 530    arm_rebuild_hflags(env);
 531#endif
 532
 533    ts->stack_base = info->start_stack;
 534    ts->heap_base = info->brk;
 535    /* This will be filled in on the first SYS_HEAPINFO call.  */
 536    ts->heap_limit = 0;
 537}
 538