qemu/target/arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/main-loop.h"
  21#include "cpu.h"
  22#include "exec/helper-proto.h"
  23#include "internals.h"
  24#include "exec/exec-all.h"
  25#include "exec/cpu_ldst.h"
  26
  27#define SIGNBIT (uint32_t)0x80000000
  28#define SIGNBIT64 ((uint64_t)1 << 63)
  29
  30void raise_exception(CPUARMState *env, uint32_t excp,
  31                     uint32_t syndrome, uint32_t target_el)
  32{
  33    CPUState *cs = env_cpu(env);
  34
  35    if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
  36        /*
  37         * Redirect NS EL1 exceptions to NS EL2. These are reported with
  38         * their original syndrome register value, with the exception of
  39         * SIMD/FP access traps, which are reported as uncategorized
  40         * (see DDI0478C.a D1.10.4)
  41         */
  42        target_el = 2;
  43        if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
  44            syndrome = syn_uncategorized();
  45        }
  46    }
  47
  48    assert(!excp_is_internal(excp));
  49    cs->exception_index = excp;
  50    env->exception.syndrome = syndrome;
  51    env->exception.target_el = target_el;
  52    cpu_loop_exit(cs);
  53}
  54
  55void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
  56                        uint32_t target_el, uintptr_t ra)
  57{
  58    CPUState *cs = env_cpu(env);
  59
  60    /*
  61     * restore_state_to_opc() will set env->exception.syndrome, so
  62     * we must restore CPU state here before setting the syndrome
  63     * the caller passed us, and cannot use cpu_loop_exit_restore().
  64     */
  65    cpu_restore_state(cs, ra, true);
  66    raise_exception(env, excp, syndrome, target_el);
  67}
  68
  69uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
  70                          uint64_t ireg, uint64_t def)
  71{
  72    uint64_t tmp, val = 0;
  73    uint32_t maxindex = ((desc & 3) + 1) * 8;
  74    uint32_t base_reg = desc >> 2;
  75    uint32_t shift, index, reg;
  76
  77    for (shift = 0; shift < 64; shift += 8) {
  78        index = (ireg >> shift) & 0xff;
  79        if (index < maxindex) {
  80            reg = base_reg + (index >> 3);
  81            tmp = *aa32_vfp_dreg(env, reg);
  82            tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
  83        } else {
  84            tmp = def & (0xffull << shift);
  85        }
  86        val |= tmp;
  87    }
  88    return val;
  89}
  90
  91void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
  92{
  93    /*
  94     * Perform the v8M stack limit check for SP updates from translated code,
  95     * raising an exception if the limit is breached.
  96     */
  97    if (newvalue < v7m_sp_limit(env)) {
  98        /*
  99         * Stack limit exceptions are a rare case, so rather than syncing
 100         * PC/condbits before the call, we use raise_exception_ra() so
 101         * that cpu_restore_state() will sort them out.
 102         */
 103        raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
 104    }
 105}
 106
 107uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 108{
 109    uint32_t res = a + b;
 110    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 111        env->QF = 1;
 112    return res;
 113}
 114
 115uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 116{
 117    uint32_t res = a + b;
 118    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 119        env->QF = 1;
 120        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 121    }
 122    return res;
 123}
 124
 125uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 126{
 127    uint32_t res = a - b;
 128    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 129        env->QF = 1;
 130        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 131    }
 132    return res;
 133}
 134
 135uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 136{
 137    uint32_t res = a + b;
 138    if (res < a) {
 139        env->QF = 1;
 140        res = ~0;
 141    }
 142    return res;
 143}
 144
 145uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 146{
 147    uint32_t res = a - b;
 148    if (res > a) {
 149        env->QF = 1;
 150        res = 0;
 151    }
 152    return res;
 153}
 154
 155/* Signed saturation.  */
 156static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 157{
 158    int32_t top;
 159    uint32_t mask;
 160
 161    top = val >> shift;
 162    mask = (1u << shift) - 1;
 163    if (top > 0) {
 164        env->QF = 1;
 165        return mask;
 166    } else if (top < -1) {
 167        env->QF = 1;
 168        return ~mask;
 169    }
 170    return val;
 171}
 172
 173/* Unsigned saturation.  */
 174static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 175{
 176    uint32_t max;
 177
 178    max = (1u << shift) - 1;
 179    if (val < 0) {
 180        env->QF = 1;
 181        return 0;
 182    } else if (val > max) {
 183        env->QF = 1;
 184        return max;
 185    }
 186    return val;
 187}
 188
 189/* Signed saturate.  */
 190uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 191{
 192    return do_ssat(env, x, shift);
 193}
 194
 195/* Dual halfword signed saturate.  */
 196uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 197{
 198    uint32_t res;
 199
 200    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 201    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 202    return res;
 203}
 204
 205/* Unsigned saturate.  */
 206uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 207{
 208    return do_usat(env, x, shift);
 209}
 210
 211/* Dual halfword unsigned saturate.  */
 212uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 213{
 214    uint32_t res;
 215
 216    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 217    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 218    return res;
 219}
 220
 221void HELPER(setend)(CPUARMState *env)
 222{
 223    env->uncached_cpsr ^= CPSR_E;
 224    arm_rebuild_hflags(env);
 225}
 226
 227#ifndef CONFIG_USER_ONLY
 228/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 229 * The function returns the target EL (1-3) if the instruction is to be trapped;
 230 * otherwise it returns 0 indicating it is not trapped.
 231 */
 232static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 233{
 234    int cur_el = arm_current_el(env);
 235    uint64_t mask;
 236
 237    if (arm_feature(env, ARM_FEATURE_M)) {
 238        /* M profile cores can never trap WFI/WFE. */
 239        return 0;
 240    }
 241
 242    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 243     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 244     */
 245    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 246        int target_el;
 247
 248        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 249        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 250            /* Secure EL0 and Secure PL1 is at EL3 */
 251            target_el = 3;
 252        } else {
 253            target_el = 1;
 254        }
 255
 256        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 257            return target_el;
 258        }
 259    }
 260
 261    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 262     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 263     * bits will be zero indicating no trap.
 264     */
 265    if (cur_el < 2) {
 266        mask = is_wfe ? HCR_TWE : HCR_TWI;
 267        if (arm_hcr_el2_eff(env) & mask) {
 268            return 2;
 269        }
 270    }
 271
 272    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 273    if (cur_el < 3) {
 274        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 275        if (env->cp15.scr_el3 & mask) {
 276            return 3;
 277        }
 278    }
 279
 280    return 0;
 281}
 282#endif
 283
 284void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
 285{
 286#ifdef CONFIG_USER_ONLY
 287    /*
 288     * WFI in the user-mode emulator is technically permitted but not
 289     * something any real-world code would do. AArch64 Linux kernels
 290     * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
 291     * AArch32 kernels don't trap it so it will delay a bit.
 292     * For QEMU, make it NOP here, because trying to raise EXCP_HLT
 293     * would trigger an abort.
 294     */
 295    return;
 296#else
 297    CPUState *cs = env_cpu(env);
 298    int target_el = check_wfx_trap(env, false);
 299
 300    if (cpu_has_work(cs)) {
 301        /* Don't bother to go into our "low power state" if
 302         * we would just wake up immediately.
 303         */
 304        return;
 305    }
 306
 307    if (target_el) {
 308        if (env->aarch64) {
 309            env->pc -= insn_len;
 310        } else {
 311            env->regs[15] -= insn_len;
 312        }
 313
 314        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
 315                        target_el);
 316    }
 317
 318    cs->exception_index = EXCP_HLT;
 319    cs->halted = 1;
 320    cpu_loop_exit(cs);
 321#endif
 322}
 323
 324void HELPER(wfe)(CPUARMState *env)
 325{
 326    /* This is a hint instruction that is semantically different
 327     * from YIELD even though we currently implement it identically.
 328     * Don't actually halt the CPU, just yield back to top
 329     * level loop. This is not going into a "low power state"
 330     * (ie halting until some event occurs), so we never take
 331     * a configurable trap to a different exception level.
 332     */
 333    HELPER(yield)(env);
 334}
 335
 336void HELPER(yield)(CPUARMState *env)
 337{
 338    CPUState *cs = env_cpu(env);
 339
 340    /* This is a non-trappable hint instruction that generally indicates
 341     * that the guest is currently busy-looping. Yield control back to the
 342     * top level loop so that a more deserving VCPU has a chance to run.
 343     */
 344    cs->exception_index = EXCP_YIELD;
 345    cpu_loop_exit(cs);
 346}
 347
 348/* Raise an internal-to-QEMU exception. This is limited to only
 349 * those EXCP values which are special cases for QEMU to interrupt
 350 * execution and not to be used for exceptions which are passed to
 351 * the guest (those must all have syndrome information and thus should
 352 * use exception_with_syndrome).
 353 */
 354void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 355{
 356    CPUState *cs = env_cpu(env);
 357
 358    assert(excp_is_internal(excp));
 359    cs->exception_index = excp;
 360    cpu_loop_exit(cs);
 361}
 362
 363/* Raise an exception with the specified syndrome register value */
 364void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 365                                     uint32_t syndrome, uint32_t target_el)
 366{
 367    raise_exception(env, excp, syndrome, target_el);
 368}
 369
 370/* Raise an EXCP_BKPT with the specified syndrome register value,
 371 * targeting the correct exception level for debug exceptions.
 372 */
 373void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
 374{
 375    int debug_el = arm_debug_target_el(env);
 376    int cur_el = arm_current_el(env);
 377
 378    /* FSR will only be used if the debug target EL is AArch32. */
 379    env->exception.fsr = arm_debug_exception_fsr(env);
 380    /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
 381     * values to the guest that it shouldn't be able to see at its
 382     * exception/security level.
 383     */
 384    env->exception.vaddress = 0;
 385    /*
 386     * Other kinds of architectural debug exception are ignored if
 387     * they target an exception level below the current one (in QEMU
 388     * this is checked by arm_generate_debug_exceptions()). Breakpoint
 389     * instructions are special because they always generate an exception
 390     * to somewhere: if they can't go to the configured debug exception
 391     * level they are taken to the current exception level.
 392     */
 393    if (debug_el < cur_el) {
 394        debug_el = cur_el;
 395    }
 396    raise_exception(env, EXCP_BKPT, syndrome, debug_el);
 397}
 398
 399uint32_t HELPER(cpsr_read)(CPUARMState *env)
 400{
 401    return cpsr_read(env) & ~CPSR_EXEC;
 402}
 403
 404void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 405{
 406    cpsr_write(env, val, mask, CPSRWriteByInstr);
 407    /* TODO: Not all cpsr bits are relevant to hflags.  */
 408    arm_rebuild_hflags(env);
 409}
 410
 411/* Write the CPSR for a 32-bit exception return */
 412void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 413{
 414    uint32_t mask;
 415
 416    qemu_mutex_lock_iothread();
 417    arm_call_pre_el_change_hook(env_archcpu(env));
 418    qemu_mutex_unlock_iothread();
 419
 420    mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
 421    cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
 422
 423    /* Generated code has already stored the new PC value, but
 424     * without masking out its low bits, because which bits need
 425     * masking depends on whether we're returning to Thumb or ARM
 426     * state. Do the masking now.
 427     */
 428    env->regs[15] &= (env->thumb ? ~1 : ~3);
 429    arm_rebuild_hflags(env);
 430
 431    qemu_mutex_lock_iothread();
 432    arm_call_el_change_hook(env_archcpu(env));
 433    qemu_mutex_unlock_iothread();
 434}
 435
 436/* Access to user mode registers from privileged modes.  */
 437uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 438{
 439    uint32_t val;
 440
 441    if (regno == 13) {
 442        val = env->banked_r13[BANK_USRSYS];
 443    } else if (regno == 14) {
 444        val = env->banked_r14[BANK_USRSYS];
 445    } else if (regno >= 8
 446               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 447        val = env->usr_regs[regno - 8];
 448    } else {
 449        val = env->regs[regno];
 450    }
 451    return val;
 452}
 453
 454void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 455{
 456    if (regno == 13) {
 457        env->banked_r13[BANK_USRSYS] = val;
 458    } else if (regno == 14) {
 459        env->banked_r14[BANK_USRSYS] = val;
 460    } else if (regno >= 8
 461               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 462        env->usr_regs[regno - 8] = val;
 463    } else {
 464        env->regs[regno] = val;
 465    }
 466}
 467
 468void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 469{
 470    if ((env->uncached_cpsr & CPSR_M) == mode) {
 471        env->regs[13] = val;
 472    } else {
 473        env->banked_r13[bank_number(mode)] = val;
 474    }
 475}
 476
 477uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 478{
 479    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 480        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 481         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 482         */
 483        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 484                        exception_target_el(env));
 485    }
 486
 487    if ((env->uncached_cpsr & CPSR_M) == mode) {
 488        return env->regs[13];
 489    } else {
 490        return env->banked_r13[bank_number(mode)];
 491    }
 492}
 493
 494static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 495                                      uint32_t regno)
 496{
 497    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 498     * cases; otherwise return. This broadly corresponds to the pseudocode
 499     * BankedRegisterAccessValid() and SPSRAccessValid(),
 500     * except that we have already handled some cases at translate time.
 501     */
 502    int curmode = env->uncached_cpsr & CPSR_M;
 503
 504    if (regno == 17) {
 505        /* ELR_Hyp: a special case because access from tgtmode is OK */
 506        if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 507            goto undef;
 508        }
 509        return;
 510    }
 511
 512    if (curmode == tgtmode) {
 513        goto undef;
 514    }
 515
 516    if (tgtmode == ARM_CPU_MODE_USR) {
 517        switch (regno) {
 518        case 8 ... 12:
 519            if (curmode != ARM_CPU_MODE_FIQ) {
 520                goto undef;
 521            }
 522            break;
 523        case 13:
 524            if (curmode == ARM_CPU_MODE_SYS) {
 525                goto undef;
 526            }
 527            break;
 528        case 14:
 529            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 530                goto undef;
 531            }
 532            break;
 533        default:
 534            break;
 535        }
 536    }
 537
 538    if (tgtmode == ARM_CPU_MODE_HYP) {
 539        /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
 540        if (curmode != ARM_CPU_MODE_MON) {
 541            goto undef;
 542        }
 543    }
 544
 545    return;
 546
 547undef:
 548    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 549                    exception_target_el(env));
 550}
 551
 552void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 553                        uint32_t regno)
 554{
 555    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 556
 557    switch (regno) {
 558    case 16: /* SPSRs */
 559        env->banked_spsr[bank_number(tgtmode)] = value;
 560        break;
 561    case 17: /* ELR_Hyp */
 562        env->elr_el[2] = value;
 563        break;
 564    case 13:
 565        env->banked_r13[bank_number(tgtmode)] = value;
 566        break;
 567    case 14:
 568        env->banked_r14[r14_bank_number(tgtmode)] = value;
 569        break;
 570    case 8 ... 12:
 571        switch (tgtmode) {
 572        case ARM_CPU_MODE_USR:
 573            env->usr_regs[regno - 8] = value;
 574            break;
 575        case ARM_CPU_MODE_FIQ:
 576            env->fiq_regs[regno - 8] = value;
 577            break;
 578        default:
 579            g_assert_not_reached();
 580        }
 581        break;
 582    default:
 583        g_assert_not_reached();
 584    }
 585}
 586
 587uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 588{
 589    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 590
 591    switch (regno) {
 592    case 16: /* SPSRs */
 593        return env->banked_spsr[bank_number(tgtmode)];
 594    case 17: /* ELR_Hyp */
 595        return env->elr_el[2];
 596    case 13:
 597        return env->banked_r13[bank_number(tgtmode)];
 598    case 14:
 599        return env->banked_r14[r14_bank_number(tgtmode)];
 600    case 8 ... 12:
 601        switch (tgtmode) {
 602        case ARM_CPU_MODE_USR:
 603            return env->usr_regs[regno - 8];
 604        case ARM_CPU_MODE_FIQ:
 605            return env->fiq_regs[regno - 8];
 606        default:
 607            g_assert_not_reached();
 608        }
 609    default:
 610        g_assert_not_reached();
 611    }
 612}
 613
 614void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
 615                                 uint32_t isread)
 616{
 617    const ARMCPRegInfo *ri = rip;
 618    int target_el;
 619
 620    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 621        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 622        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 623    }
 624
 625    /*
 626     * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses
 627     * to sysregs non accessible at EL0 to have UNDEF-ed already.
 628     */
 629    if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 &&
 630        (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
 631        uint32_t mask = 1 << ri->crn;
 632
 633        if (ri->type & ARM_CP_64BIT) {
 634            mask = 1 << ri->crm;
 635        }
 636
 637        /* T4 and T14 are RES0 */
 638        mask &= ~((1 << 4) | (1 << 14));
 639
 640        if (env->cp15.hstr_el2 & mask) {
 641            target_el = 2;
 642            goto exept;
 643        }
 644    }
 645
 646    if (!ri->accessfn) {
 647        return;
 648    }
 649
 650    switch (ri->accessfn(env, ri, isread)) {
 651    case CP_ACCESS_OK:
 652        return;
 653    case CP_ACCESS_TRAP:
 654        target_el = exception_target_el(env);
 655        break;
 656    case CP_ACCESS_TRAP_EL2:
 657        /* Requesting a trap to EL2 when we're in EL3 is
 658         * a bug in the access function.
 659         */
 660        assert(arm_current_el(env) != 3);
 661        target_el = 2;
 662        break;
 663    case CP_ACCESS_TRAP_EL3:
 664        target_el = 3;
 665        break;
 666    case CP_ACCESS_TRAP_UNCATEGORIZED:
 667        target_el = exception_target_el(env);
 668        syndrome = syn_uncategorized();
 669        break;
 670    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
 671        target_el = 2;
 672        syndrome = syn_uncategorized();
 673        break;
 674    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
 675        target_el = 3;
 676        syndrome = syn_uncategorized();
 677        break;
 678    case CP_ACCESS_TRAP_FP_EL2:
 679        target_el = 2;
 680        /* Since we are an implementation that takes exceptions on a trapped
 681         * conditional insn only if the insn has passed its condition code
 682         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
 683         * (which is also the required value for AArch64 traps).
 684         */
 685        syndrome = syn_fp_access_trap(1, 0xe, false);
 686        break;
 687    case CP_ACCESS_TRAP_FP_EL3:
 688        target_el = 3;
 689        syndrome = syn_fp_access_trap(1, 0xe, false);
 690        break;
 691    default:
 692        g_assert_not_reached();
 693    }
 694
 695exept:
 696    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 697}
 698
 699void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 700{
 701    const ARMCPRegInfo *ri = rip;
 702
 703    if (ri->type & ARM_CP_IO) {
 704        qemu_mutex_lock_iothread();
 705        ri->writefn(env, ri, value);
 706        qemu_mutex_unlock_iothread();
 707    } else {
 708        ri->writefn(env, ri, value);
 709    }
 710}
 711
 712uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 713{
 714    const ARMCPRegInfo *ri = rip;
 715    uint32_t res;
 716
 717    if (ri->type & ARM_CP_IO) {
 718        qemu_mutex_lock_iothread();
 719        res = ri->readfn(env, ri);
 720        qemu_mutex_unlock_iothread();
 721    } else {
 722        res = ri->readfn(env, ri);
 723    }
 724
 725    return res;
 726}
 727
 728void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 729{
 730    const ARMCPRegInfo *ri = rip;
 731
 732    if (ri->type & ARM_CP_IO) {
 733        qemu_mutex_lock_iothread();
 734        ri->writefn(env, ri, value);
 735        qemu_mutex_unlock_iothread();
 736    } else {
 737        ri->writefn(env, ri, value);
 738    }
 739}
 740
 741uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 742{
 743    const ARMCPRegInfo *ri = rip;
 744    uint64_t res;
 745
 746    if (ri->type & ARM_CP_IO) {
 747        qemu_mutex_lock_iothread();
 748        res = ri->readfn(env, ri);
 749        qemu_mutex_unlock_iothread();
 750    } else {
 751        res = ri->readfn(env, ri);
 752    }
 753
 754    return res;
 755}
 756
 757void HELPER(pre_hvc)(CPUARMState *env)
 758{
 759    ARMCPU *cpu = env_archcpu(env);
 760    int cur_el = arm_current_el(env);
 761    /* FIXME: Use actual secure state.  */
 762    bool secure = false;
 763    bool undef;
 764
 765    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 766        /* If PSCI is enabled and this looks like a valid PSCI call then
 767         * that overrides the architecturally mandated HVC behaviour.
 768         */
 769        return;
 770    }
 771
 772    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 773        /* If EL2 doesn't exist, HVC always UNDEFs */
 774        undef = true;
 775    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 776        /* EL3.HCE has priority over EL2.HCD. */
 777        undef = !(env->cp15.scr_el3 & SCR_HCE);
 778    } else {
 779        undef = env->cp15.hcr_el2 & HCR_HCD;
 780    }
 781
 782    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 783     * For ARMv8/AArch64, HVC is allowed in EL3.
 784     * Note that we've already trapped HVC from EL0 at translation
 785     * time.
 786     */
 787    if (secure && (!is_a64(env) || cur_el == 1)) {
 788        undef = true;
 789    }
 790
 791    if (undef) {
 792        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 793                        exception_target_el(env));
 794    }
 795}
 796
 797void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 798{
 799    ARMCPU *cpu = env_archcpu(env);
 800    int cur_el = arm_current_el(env);
 801    bool secure = arm_is_secure(env);
 802    bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
 803
 804    /*
 805     * SMC behaviour is summarized in the following table.
 806     * This helper handles the "Trap to EL2" and "Undef insn" cases.
 807     * The "Trap to EL3" and "PSCI call" cases are handled in the exception
 808     * helper.
 809     *
 810     *  -> ARM_FEATURE_EL3 and !SMD
 811     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 812     *
 813     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 814     *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
 815     *  Conduit not SMC          Trap to EL2         Trap to EL3
 816     *
 817     *
 818     *  -> ARM_FEATURE_EL3 and SMD
 819     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 820     *
 821     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 822     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 823     *  Conduit not SMC          Trap to EL2         Undef insn
 824     *
 825     *
 826     *  -> !ARM_FEATURE_EL3
 827     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 828     *
 829     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 830     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 831     *  Conduit not SMC          Undef insn          Undef insn
 832     */
 833
 834    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 835     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 836     *  extensions, SMD only applies to NS state.
 837     * On ARMv7 without the Virtualization extensions, the SMD bit
 838     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 839     * so we need not special case this here.
 840     */
 841    bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
 842                                                     : smd_flag && !secure;
 843
 844    if (!arm_feature(env, ARM_FEATURE_EL3) &&
 845        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
 846        /* If we have no EL3 then SMC always UNDEFs and can't be
 847         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
 848         * firmware within QEMU, and we want an EL2 guest to be able
 849         * to forbid its EL1 from making PSCI calls into QEMU's
 850         * "firmware" via HCR.TSC, so for these purposes treat
 851         * PSCI-via-SMC as implying an EL3.
 852         * This handles the very last line of the previous table.
 853         */
 854        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 855                        exception_target_el(env));
 856    }
 857
 858    if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
 859        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
 860         * We also want an EL2 guest to be able to forbid its EL1 from
 861         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
 862         * This handles all the "Trap to EL2" cases of the previous table.
 863         */
 864        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
 865    }
 866
 867    /* Catch the two remaining "Undef insn" cases of the previous table:
 868     *    - PSCI conduit is SMC but we don't have a valid PCSI call,
 869     *    - We don't have EL3 or SMD is set.
 870     */
 871    if (!arm_is_psci_call(cpu, EXCP_SMC) &&
 872        (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
 873        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 874                        exception_target_el(env));
 875    }
 876}
 877
 878/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
 879   The only way to do that in TCG is a conditional branch, which clobbers
 880   all our temporaries.  For now implement these as helper functions.  */
 881
 882/* Similarly for variable shift instructions.  */
 883
 884uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 885{
 886    int shift = i & 0xff;
 887    if (shift >= 32) {
 888        if (shift == 32)
 889            env->CF = x & 1;
 890        else
 891            env->CF = 0;
 892        return 0;
 893    } else if (shift != 0) {
 894        env->CF = (x >> (32 - shift)) & 1;
 895        return x << shift;
 896    }
 897    return x;
 898}
 899
 900uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 901{
 902    int shift = i & 0xff;
 903    if (shift >= 32) {
 904        if (shift == 32)
 905            env->CF = (x >> 31) & 1;
 906        else
 907            env->CF = 0;
 908        return 0;
 909    } else if (shift != 0) {
 910        env->CF = (x >> (shift - 1)) & 1;
 911        return x >> shift;
 912    }
 913    return x;
 914}
 915
 916uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 917{
 918    int shift = i & 0xff;
 919    if (shift >= 32) {
 920        env->CF = (x >> 31) & 1;
 921        return (int32_t)x >> 31;
 922    } else if (shift != 0) {
 923        env->CF = (x >> (shift - 1)) & 1;
 924        return (int32_t)x >> shift;
 925    }
 926    return x;
 927}
 928
 929uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 930{
 931    int shift1, shift;
 932    shift1 = i & 0xff;
 933    shift = shift1 & 0x1f;
 934    if (shift == 0) {
 935        if (shift1 != 0)
 936            env->CF = (x >> 31) & 1;
 937        return x;
 938    } else {
 939        env->CF = (x >> (shift - 1)) & 1;
 940        return ((uint32_t)x >> shift) | (x << (32 - shift));
 941    }
 942}
 943
 944void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
 945                          uint32_t access_type, uint32_t mmu_idx,
 946                          uint32_t size)
 947{
 948    uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
 949    uintptr_t ra = GETPC();
 950
 951    if (likely(size <= in_page)) {
 952        probe_access(env, ptr, size, access_type, mmu_idx, ra);
 953    } else {
 954        probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
 955        probe_access(env, ptr + in_page, size - in_page,
 956                     access_type, mmu_idx, ra);
 957    }
 958}
 959