qemu/target/arm/tcg/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/main-loop.h"
  21#include "cpu.h"
  22#include "exec/helper-proto.h"
  23#include "internals.h"
  24#include "exec/exec-all.h"
  25#include "exec/cpu_ldst.h"
  26#include "cpregs.h"
  27
  28#define SIGNBIT (uint32_t)0x80000000
  29#define SIGNBIT64 ((uint64_t)1 << 63)
  30
  31int exception_target_el(CPUARMState *env)
  32{
  33    int target_el = MAX(1, arm_current_el(env));
  34
  35    /*
  36     * No such thing as secure EL1 if EL3 is aarch32,
  37     * so update the target EL to EL3 in this case.
  38     */
  39    if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
  40        target_el = 3;
  41    }
  42
  43    return target_el;
  44}
  45
  46void raise_exception(CPUARMState *env, uint32_t excp,
  47                     uint32_t syndrome, uint32_t target_el)
  48{
  49    CPUState *cs = env_cpu(env);
  50
  51    if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
  52        /*
  53         * Redirect NS EL1 exceptions to NS EL2. These are reported with
  54         * their original syndrome register value, with the exception of
  55         * SIMD/FP access traps, which are reported as uncategorized
  56         * (see DDI0478C.a D1.10.4)
  57         */
  58        target_el = 2;
  59        if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
  60            syndrome = syn_uncategorized();
  61        }
  62    }
  63
  64    assert(!excp_is_internal(excp));
  65    cs->exception_index = excp;
  66    env->exception.syndrome = syndrome;
  67    env->exception.target_el = target_el;
  68    cpu_loop_exit(cs);
  69}
  70
  71void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
  72                        uint32_t target_el, uintptr_t ra)
  73{
  74    CPUState *cs = env_cpu(env);
  75
  76    /*
  77     * restore_state_to_opc() will set env->exception.syndrome, so
  78     * we must restore CPU state here before setting the syndrome
  79     * the caller passed us, and cannot use cpu_loop_exit_restore().
  80     */
  81    cpu_restore_state(cs, ra);
  82    raise_exception(env, excp, syndrome, target_el);
  83}
  84
  85uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
  86                          uint64_t ireg, uint64_t def)
  87{
  88    uint64_t tmp, val = 0;
  89    uint32_t maxindex = ((desc & 3) + 1) * 8;
  90    uint32_t base_reg = desc >> 2;
  91    uint32_t shift, index, reg;
  92
  93    for (shift = 0; shift < 64; shift += 8) {
  94        index = (ireg >> shift) & 0xff;
  95        if (index < maxindex) {
  96            reg = base_reg + (index >> 3);
  97            tmp = *aa32_vfp_dreg(env, reg);
  98            tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
  99        } else {
 100            tmp = def & (0xffull << shift);
 101        }
 102        val |= tmp;
 103    }
 104    return val;
 105}
 106
 107void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
 108{
 109    /*
 110     * Perform the v8M stack limit check for SP updates from translated code,
 111     * raising an exception if the limit is breached.
 112     */
 113    if (newvalue < v7m_sp_limit(env)) {
 114        /*
 115         * Stack limit exceptions are a rare case, so rather than syncing
 116         * PC/condbits before the call, we use raise_exception_ra() so
 117         * that cpu_restore_state() will sort them out.
 118         */
 119        raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
 120    }
 121}
 122
 123uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 124{
 125    uint32_t res = a + b;
 126    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 127        env->QF = 1;
 128    return res;
 129}
 130
 131uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 132{
 133    uint32_t res = a + b;
 134    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 135        env->QF = 1;
 136        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 137    }
 138    return res;
 139}
 140
 141uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 142{
 143    uint32_t res = a - b;
 144    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 145        env->QF = 1;
 146        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 147    }
 148    return res;
 149}
 150
 151uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 152{
 153    uint32_t res = a + b;
 154    if (res < a) {
 155        env->QF = 1;
 156        res = ~0;
 157    }
 158    return res;
 159}
 160
 161uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 162{
 163    uint32_t res = a - b;
 164    if (res > a) {
 165        env->QF = 1;
 166        res = 0;
 167    }
 168    return res;
 169}
 170
 171/* Signed saturation.  */
 172static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 173{
 174    int32_t top;
 175    uint32_t mask;
 176
 177    top = val >> shift;
 178    mask = (1u << shift) - 1;
 179    if (top > 0) {
 180        env->QF = 1;
 181        return mask;
 182    } else if (top < -1) {
 183        env->QF = 1;
 184        return ~mask;
 185    }
 186    return val;
 187}
 188
 189/* Unsigned saturation.  */
 190static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 191{
 192    uint32_t max;
 193
 194    max = (1u << shift) - 1;
 195    if (val < 0) {
 196        env->QF = 1;
 197        return 0;
 198    } else if (val > max) {
 199        env->QF = 1;
 200        return max;
 201    }
 202    return val;
 203}
 204
 205/* Signed saturate.  */
 206uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 207{
 208    return do_ssat(env, x, shift);
 209}
 210
 211/* Dual halfword signed saturate.  */
 212uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 213{
 214    uint32_t res;
 215
 216    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 217    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 218    return res;
 219}
 220
 221/* Unsigned saturate.  */
 222uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 223{
 224    return do_usat(env, x, shift);
 225}
 226
 227/* Dual halfword unsigned saturate.  */
 228uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 229{
 230    uint32_t res;
 231
 232    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 233    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 234    return res;
 235}
 236
 237void HELPER(setend)(CPUARMState *env)
 238{
 239    env->uncached_cpsr ^= CPSR_E;
 240    arm_rebuild_hflags(env);
 241}
 242
 243void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
 244{
 245    /*
 246     * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
 247     * check if HSTR.TJDBX means we need to trap to EL2.
 248     */
 249    if (env->cp15.hstr_el2 & HSTR_TJDBX) {
 250        /*
 251         * We know the condition code check passed, so take the IMPDEF
 252         * choice to always report CV=1 COND 0xe
 253         */
 254        uint32_t syn = syn_bxjtrap(1, 0xe, rm);
 255        raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
 256    }
 257}
 258
 259#ifndef CONFIG_USER_ONLY
 260/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 261 * The function returns the target EL (1-3) if the instruction is to be trapped;
 262 * otherwise it returns 0 indicating it is not trapped.
 263 */
 264static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 265{
 266    int cur_el = arm_current_el(env);
 267    uint64_t mask;
 268
 269    if (arm_feature(env, ARM_FEATURE_M)) {
 270        /* M profile cores can never trap WFI/WFE. */
 271        return 0;
 272    }
 273
 274    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 275     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 276     */
 277    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 278        int target_el;
 279
 280        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 281        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 282            /* Secure EL0 and Secure PL1 is at EL3 */
 283            target_el = 3;
 284        } else {
 285            target_el = 1;
 286        }
 287
 288        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 289            return target_el;
 290        }
 291    }
 292
 293    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 294     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 295     * bits will be zero indicating no trap.
 296     */
 297    if (cur_el < 2) {
 298        mask = is_wfe ? HCR_TWE : HCR_TWI;
 299        if (arm_hcr_el2_eff(env) & mask) {
 300            return 2;
 301        }
 302    }
 303
 304    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 305    if (cur_el < 3) {
 306        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 307        if (env->cp15.scr_el3 & mask) {
 308            return 3;
 309        }
 310    }
 311
 312    return 0;
 313}
 314#endif
 315
 316void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
 317{
 318#ifdef CONFIG_USER_ONLY
 319    /*
 320     * WFI in the user-mode emulator is technically permitted but not
 321     * something any real-world code would do. AArch64 Linux kernels
 322     * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
 323     * AArch32 kernels don't trap it so it will delay a bit.
 324     * For QEMU, make it NOP here, because trying to raise EXCP_HLT
 325     * would trigger an abort.
 326     */
 327    return;
 328#else
 329    CPUState *cs = env_cpu(env);
 330    int target_el = check_wfx_trap(env, false);
 331
 332    if (cpu_has_work(cs)) {
 333        /* Don't bother to go into our "low power state" if
 334         * we would just wake up immediately.
 335         */
 336        return;
 337    }
 338
 339    if (target_el) {
 340        if (env->aarch64) {
 341            env->pc -= insn_len;
 342        } else {
 343            env->regs[15] -= insn_len;
 344        }
 345
 346        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
 347                        target_el);
 348    }
 349
 350    cs->exception_index = EXCP_HLT;
 351    cs->halted = 1;
 352    cpu_loop_exit(cs);
 353#endif
 354}
 355
 356void HELPER(wfe)(CPUARMState *env)
 357{
 358    /* This is a hint instruction that is semantically different
 359     * from YIELD even though we currently implement it identically.
 360     * Don't actually halt the CPU, just yield back to top
 361     * level loop. This is not going into a "low power state"
 362     * (ie halting until some event occurs), so we never take
 363     * a configurable trap to a different exception level.
 364     */
 365    HELPER(yield)(env);
 366}
 367
 368void HELPER(yield)(CPUARMState *env)
 369{
 370    CPUState *cs = env_cpu(env);
 371
 372    /* This is a non-trappable hint instruction that generally indicates
 373     * that the guest is currently busy-looping. Yield control back to the
 374     * top level loop so that a more deserving VCPU has a chance to run.
 375     */
 376    cs->exception_index = EXCP_YIELD;
 377    cpu_loop_exit(cs);
 378}
 379
 380/* Raise an internal-to-QEMU exception. This is limited to only
 381 * those EXCP values which are special cases for QEMU to interrupt
 382 * execution and not to be used for exceptions which are passed to
 383 * the guest (those must all have syndrome information and thus should
 384 * use exception_with_syndrome*).
 385 */
 386void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 387{
 388    CPUState *cs = env_cpu(env);
 389
 390    assert(excp_is_internal(excp));
 391    cs->exception_index = excp;
 392    cpu_loop_exit(cs);
 393}
 394
 395/* Raise an exception with the specified syndrome register value */
 396void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp,
 397                                        uint32_t syndrome, uint32_t target_el)
 398{
 399    raise_exception(env, excp, syndrome, target_el);
 400}
 401
 402/*
 403 * Raise an exception with the specified syndrome register value
 404 * to the default target el.
 405 */
 406void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 407                                     uint32_t syndrome)
 408{
 409    raise_exception(env, excp, syndrome, exception_target_el(env));
 410}
 411
 412uint32_t HELPER(cpsr_read)(CPUARMState *env)
 413{
 414    return cpsr_read(env) & ~CPSR_EXEC;
 415}
 416
 417void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 418{
 419    cpsr_write(env, val, mask, CPSRWriteByInstr);
 420    /* TODO: Not all cpsr bits are relevant to hflags.  */
 421    arm_rebuild_hflags(env);
 422}
 423
 424/* Write the CPSR for a 32-bit exception return */
 425void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 426{
 427    uint32_t mask;
 428
 429    qemu_mutex_lock_iothread();
 430    arm_call_pre_el_change_hook(env_archcpu(env));
 431    qemu_mutex_unlock_iothread();
 432
 433    mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
 434    cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
 435
 436    /* Generated code has already stored the new PC value, but
 437     * without masking out its low bits, because which bits need
 438     * masking depends on whether we're returning to Thumb or ARM
 439     * state. Do the masking now.
 440     */
 441    env->regs[15] &= (env->thumb ? ~1 : ~3);
 442    arm_rebuild_hflags(env);
 443
 444    qemu_mutex_lock_iothread();
 445    arm_call_el_change_hook(env_archcpu(env));
 446    qemu_mutex_unlock_iothread();
 447}
 448
 449/* Access to user mode registers from privileged modes.  */
 450uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 451{
 452    uint32_t val;
 453
 454    if (regno == 13) {
 455        val = env->banked_r13[BANK_USRSYS];
 456    } else if (regno == 14) {
 457        val = env->banked_r14[BANK_USRSYS];
 458    } else if (regno >= 8
 459               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 460        val = env->usr_regs[regno - 8];
 461    } else {
 462        val = env->regs[regno];
 463    }
 464    return val;
 465}
 466
 467void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 468{
 469    if (regno == 13) {
 470        env->banked_r13[BANK_USRSYS] = val;
 471    } else if (regno == 14) {
 472        env->banked_r14[BANK_USRSYS] = val;
 473    } else if (regno >= 8
 474               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 475        env->usr_regs[regno - 8] = val;
 476    } else {
 477        env->regs[regno] = val;
 478    }
 479}
 480
 481void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 482{
 483    if ((env->uncached_cpsr & CPSR_M) == mode) {
 484        env->regs[13] = val;
 485    } else {
 486        env->banked_r13[bank_number(mode)] = val;
 487    }
 488}
 489
 490uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 491{
 492    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 493        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 494         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 495         */
 496        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 497                        exception_target_el(env));
 498    }
 499
 500    if ((env->uncached_cpsr & CPSR_M) == mode) {
 501        return env->regs[13];
 502    } else {
 503        return env->banked_r13[bank_number(mode)];
 504    }
 505}
 506
 507static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 508                                      uint32_t regno)
 509{
 510    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 511     * cases; otherwise return. This broadly corresponds to the pseudocode
 512     * BankedRegisterAccessValid() and SPSRAccessValid(),
 513     * except that we have already handled some cases at translate time.
 514     */
 515    int curmode = env->uncached_cpsr & CPSR_M;
 516
 517    if (regno == 17) {
 518        /* ELR_Hyp: a special case because access from tgtmode is OK */
 519        if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 520            goto undef;
 521        }
 522        return;
 523    }
 524
 525    if (curmode == tgtmode) {
 526        goto undef;
 527    }
 528
 529    if (tgtmode == ARM_CPU_MODE_USR) {
 530        switch (regno) {
 531        case 8 ... 12:
 532            if (curmode != ARM_CPU_MODE_FIQ) {
 533                goto undef;
 534            }
 535            break;
 536        case 13:
 537            if (curmode == ARM_CPU_MODE_SYS) {
 538                goto undef;
 539            }
 540            break;
 541        case 14:
 542            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 543                goto undef;
 544            }
 545            break;
 546        default:
 547            break;
 548        }
 549    }
 550
 551    if (tgtmode == ARM_CPU_MODE_HYP) {
 552        /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
 553        if (curmode != ARM_CPU_MODE_MON) {
 554            goto undef;
 555        }
 556    }
 557
 558    return;
 559
 560undef:
 561    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 562                    exception_target_el(env));
 563}
 564
 565void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 566                        uint32_t regno)
 567{
 568    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 569
 570    switch (regno) {
 571    case 16: /* SPSRs */
 572        env->banked_spsr[bank_number(tgtmode)] = value;
 573        break;
 574    case 17: /* ELR_Hyp */
 575        env->elr_el[2] = value;
 576        break;
 577    case 13:
 578        env->banked_r13[bank_number(tgtmode)] = value;
 579        break;
 580    case 14:
 581        env->banked_r14[r14_bank_number(tgtmode)] = value;
 582        break;
 583    case 8 ... 12:
 584        switch (tgtmode) {
 585        case ARM_CPU_MODE_USR:
 586            env->usr_regs[regno - 8] = value;
 587            break;
 588        case ARM_CPU_MODE_FIQ:
 589            env->fiq_regs[regno - 8] = value;
 590            break;
 591        default:
 592            g_assert_not_reached();
 593        }
 594        break;
 595    default:
 596        g_assert_not_reached();
 597    }
 598}
 599
 600uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 601{
 602    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 603
 604    switch (regno) {
 605    case 16: /* SPSRs */
 606        return env->banked_spsr[bank_number(tgtmode)];
 607    case 17: /* ELR_Hyp */
 608        return env->elr_el[2];
 609    case 13:
 610        return env->banked_r13[bank_number(tgtmode)];
 611    case 14:
 612        return env->banked_r14[r14_bank_number(tgtmode)];
 613    case 8 ... 12:
 614        switch (tgtmode) {
 615        case ARM_CPU_MODE_USR:
 616            return env->usr_regs[regno - 8];
 617        case ARM_CPU_MODE_FIQ:
 618            return env->fiq_regs[regno - 8];
 619        default:
 620            g_assert_not_reached();
 621        }
 622    default:
 623        g_assert_not_reached();
 624    }
 625}
 626
 627const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
 628                                        uint32_t syndrome, uint32_t isread)
 629{
 630    ARMCPU *cpu = env_archcpu(env);
 631    const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
 632    CPAccessResult res = CP_ACCESS_OK;
 633    int target_el;
 634
 635    assert(ri != NULL);
 636
 637    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 638        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 639        res = CP_ACCESS_TRAP;
 640        goto fail;
 641    }
 642
 643    if (ri->accessfn) {
 644        res = ri->accessfn(env, ri, isread);
 645    }
 646
 647    /*
 648     * If the access function indicates a trap from EL0 to EL1 then
 649     * that always takes priority over the HSTR_EL2 trap. (If it indicates
 650     * a trap to EL3, then the HSTR_EL2 trap takes priority; if it indicates
 651     * a trap to EL2, then the syndrome is the same either way so we don't
 652     * care whether technically the architecture says that HSTR_EL2 trap or
 653     * the other trap takes priority. So we take the "check HSTR_EL2" path
 654     * for all of those cases.)
 655     */
 656    if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) == 0) &&
 657        arm_current_el(env) == 0) {
 658        goto fail;
 659    }
 660
 661    /*
 662     * HSTR_EL2 traps from EL1 are checked earlier, in generated code;
 663     * we only need to check here for traps from EL0.
 664     */
 665    if (!is_a64(env) && arm_current_el(env) == 0 && ri->cp == 15 &&
 666        arm_is_el2_enabled(env) &&
 667        (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
 668        uint32_t mask = 1 << ri->crn;
 669
 670        if (ri->type & ARM_CP_64BIT) {
 671            mask = 1 << ri->crm;
 672        }
 673
 674        /* T4 and T14 are RES0 */
 675        mask &= ~((1 << 4) | (1 << 14));
 676
 677        if (env->cp15.hstr_el2 & mask) {
 678            res = CP_ACCESS_TRAP_EL2;
 679            goto fail;
 680        }
 681    }
 682
 683    /*
 684     * Fine-grained traps also are lower priority than undef-to-EL1,
 685     * higher priority than trap-to-EL3, and we don't care about priority
 686     * order with other EL2 traps because the syndrome value is the same.
 687     */
 688    if (arm_fgt_active(env, arm_current_el(env))) {
 689        uint64_t trapword = 0;
 690        unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX);
 691        unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS);
 692        bool rev = FIELD_EX32(ri->fgt, FGT, REV);
 693        bool trapbit;
 694
 695        if (ri->fgt & FGT_EXEC) {
 696            assert(idx < ARRAY_SIZE(env->cp15.fgt_exec));
 697            trapword = env->cp15.fgt_exec[idx];
 698        } else if (isread && (ri->fgt & FGT_R)) {
 699            assert(idx < ARRAY_SIZE(env->cp15.fgt_read));
 700            trapword = env->cp15.fgt_read[idx];
 701        } else if (!isread && (ri->fgt & FGT_W)) {
 702            assert(idx < ARRAY_SIZE(env->cp15.fgt_write));
 703            trapword = env->cp15.fgt_write[idx];
 704        }
 705
 706        trapbit = extract64(trapword, bitpos, 1);
 707        if (trapbit != rev) {
 708            res = CP_ACCESS_TRAP_EL2;
 709            goto fail;
 710        }
 711    }
 712
 713    if (likely(res == CP_ACCESS_OK)) {
 714        return ri;
 715    }
 716
 717 fail:
 718    switch (res & ~CP_ACCESS_EL_MASK) {
 719    case CP_ACCESS_TRAP:
 720        break;
 721    case CP_ACCESS_TRAP_UNCATEGORIZED:
 722        /* Only CP_ACCESS_TRAP traps are direct to a specified EL */
 723        assert((res & CP_ACCESS_EL_MASK) == 0);
 724        if (cpu_isar_feature(aa64_ids, cpu) && isread &&
 725            arm_cpreg_in_idspace(ri)) {
 726            /*
 727             * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
 728             * not EC_UNCATEGORIZED
 729             */
 730            break;
 731        }
 732        syndrome = syn_uncategorized();
 733        break;
 734    default:
 735        g_assert_not_reached();
 736    }
 737
 738    target_el = res & CP_ACCESS_EL_MASK;
 739    switch (target_el) {
 740    case 0:
 741        target_el = exception_target_el(env);
 742        break;
 743    case 2:
 744        assert(arm_current_el(env) != 3);
 745        assert(arm_is_el2_enabled(env));
 746        break;
 747    case 3:
 748        assert(arm_feature(env, ARM_FEATURE_EL3));
 749        break;
 750    default:
 751        /* No "direct" traps to EL1 */
 752        g_assert_not_reached();
 753    }
 754
 755    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 756}
 757
 758const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
 759{
 760    ARMCPU *cpu = env_archcpu(env);
 761    const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
 762
 763    assert(ri != NULL);
 764    return ri;
 765}
 766
 767void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
 768{
 769    const ARMCPRegInfo *ri = rip;
 770
 771    if (ri->type & ARM_CP_IO) {
 772        qemu_mutex_lock_iothread();
 773        ri->writefn(env, ri, value);
 774        qemu_mutex_unlock_iothread();
 775    } else {
 776        ri->writefn(env, ri, value);
 777    }
 778}
 779
 780uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
 781{
 782    const ARMCPRegInfo *ri = rip;
 783    uint32_t res;
 784
 785    if (ri->type & ARM_CP_IO) {
 786        qemu_mutex_lock_iothread();
 787        res = ri->readfn(env, ri);
 788        qemu_mutex_unlock_iothread();
 789    } else {
 790        res = ri->readfn(env, ri);
 791    }
 792
 793    return res;
 794}
 795
 796void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
 797{
 798    const ARMCPRegInfo *ri = rip;
 799
 800    if (ri->type & ARM_CP_IO) {
 801        qemu_mutex_lock_iothread();
 802        ri->writefn(env, ri, value);
 803        qemu_mutex_unlock_iothread();
 804    } else {
 805        ri->writefn(env, ri, value);
 806    }
 807}
 808
 809uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
 810{
 811    const ARMCPRegInfo *ri = rip;
 812    uint64_t res;
 813
 814    if (ri->type & ARM_CP_IO) {
 815        qemu_mutex_lock_iothread();
 816        res = ri->readfn(env, ri);
 817        qemu_mutex_unlock_iothread();
 818    } else {
 819        res = ri->readfn(env, ri);
 820    }
 821
 822    return res;
 823}
 824
 825void HELPER(pre_hvc)(CPUARMState *env)
 826{
 827    ARMCPU *cpu = env_archcpu(env);
 828    int cur_el = arm_current_el(env);
 829    /* FIXME: Use actual secure state.  */
 830    bool secure = false;
 831    bool undef;
 832
 833    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 834        /* If PSCI is enabled and this looks like a valid PSCI call then
 835         * that overrides the architecturally mandated HVC behaviour.
 836         */
 837        return;
 838    }
 839
 840    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 841        /* If EL2 doesn't exist, HVC always UNDEFs */
 842        undef = true;
 843    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 844        /* EL3.HCE has priority over EL2.HCD. */
 845        undef = !(env->cp15.scr_el3 & SCR_HCE);
 846    } else {
 847        undef = env->cp15.hcr_el2 & HCR_HCD;
 848    }
 849
 850    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 851     * For ARMv8/AArch64, HVC is allowed in EL3.
 852     * Note that we've already trapped HVC from EL0 at translation
 853     * time.
 854     */
 855    if (secure && (!is_a64(env) || cur_el == 1)) {
 856        undef = true;
 857    }
 858
 859    if (undef) {
 860        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 861                        exception_target_el(env));
 862    }
 863}
 864
 865void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 866{
 867    ARMCPU *cpu = env_archcpu(env);
 868    int cur_el = arm_current_el(env);
 869    bool secure = arm_is_secure(env);
 870    bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
 871
 872    /*
 873     * SMC behaviour is summarized in the following table.
 874     * This helper handles the "Trap to EL2" and "Undef insn" cases.
 875     * The "Trap to EL3" and "PSCI call" cases are handled in the exception
 876     * helper.
 877     *
 878     *  -> ARM_FEATURE_EL3 and !SMD
 879     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 880     *
 881     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 882     *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
 883     *  Conduit not SMC          Trap to EL2         Trap to EL3
 884     *
 885     *
 886     *  -> ARM_FEATURE_EL3 and SMD
 887     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 888     *
 889     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 890     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 891     *  Conduit not SMC          Trap to EL2         Undef insn
 892     *
 893     *
 894     *  -> !ARM_FEATURE_EL3
 895     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 896     *
 897     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 898     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 899     *  Conduit not SMC          Undef insn          Undef insn
 900     */
 901
 902    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 903     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 904     *  extensions, SMD only applies to NS state.
 905     * On ARMv7 without the Virtualization extensions, the SMD bit
 906     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 907     * so we need not special case this here.
 908     */
 909    bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
 910                                                     : smd_flag && !secure;
 911
 912    if (!arm_feature(env, ARM_FEATURE_EL3) &&
 913        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
 914        /* If we have no EL3 then SMC always UNDEFs and can't be
 915         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
 916         * firmware within QEMU, and we want an EL2 guest to be able
 917         * to forbid its EL1 from making PSCI calls into QEMU's
 918         * "firmware" via HCR.TSC, so for these purposes treat
 919         * PSCI-via-SMC as implying an EL3.
 920         * This handles the very last line of the previous table.
 921         */
 922        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 923                        exception_target_el(env));
 924    }
 925
 926    if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
 927        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
 928         * We also want an EL2 guest to be able to forbid its EL1 from
 929         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
 930         * This handles all the "Trap to EL2" cases of the previous table.
 931         */
 932        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
 933    }
 934
 935    /* Catch the two remaining "Undef insn" cases of the previous table:
 936     *    - PSCI conduit is SMC but we don't have a valid PCSI call,
 937     *    - We don't have EL3 or SMD is set.
 938     */
 939    if (!arm_is_psci_call(cpu, EXCP_SMC) &&
 940        (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
 941        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 942                        exception_target_el(env));
 943    }
 944}
 945
 946/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
 947   The only way to do that in TCG is a conditional branch, which clobbers
 948   all our temporaries.  For now implement these as helper functions.  */
 949
 950/* Similarly for variable shift instructions.  */
 951
 952uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 953{
 954    int shift = i & 0xff;
 955    if (shift >= 32) {
 956        if (shift == 32)
 957            env->CF = x & 1;
 958        else
 959            env->CF = 0;
 960        return 0;
 961    } else if (shift != 0) {
 962        env->CF = (x >> (32 - shift)) & 1;
 963        return x << shift;
 964    }
 965    return x;
 966}
 967
 968uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 969{
 970    int shift = i & 0xff;
 971    if (shift >= 32) {
 972        if (shift == 32)
 973            env->CF = (x >> 31) & 1;
 974        else
 975            env->CF = 0;
 976        return 0;
 977    } else if (shift != 0) {
 978        env->CF = (x >> (shift - 1)) & 1;
 979        return x >> shift;
 980    }
 981    return x;
 982}
 983
 984uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 985{
 986    int shift = i & 0xff;
 987    if (shift >= 32) {
 988        env->CF = (x >> 31) & 1;
 989        return (int32_t)x >> 31;
 990    } else if (shift != 0) {
 991        env->CF = (x >> (shift - 1)) & 1;
 992        return (int32_t)x >> shift;
 993    }
 994    return x;
 995}
 996
 997uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 998{
 999    int shift1, shift;
1000    shift1 = i & 0xff;
1001    shift = shift1 & 0x1f;
1002    if (shift == 0) {
1003        if (shift1 != 0)
1004            env->CF = (x >> 31) & 1;
1005        return x;
1006    } else {
1007        env->CF = (x >> (shift - 1)) & 1;
1008        return ((uint32_t)x >> shift) | (x << (32 - shift));
1009    }
1010}
1011
1012void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
1013                          uint32_t access_type, uint32_t mmu_idx,
1014                          uint32_t size)
1015{
1016    uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
1017    uintptr_t ra = GETPC();
1018
1019    if (likely(size <= in_page)) {
1020        probe_access(env, ptr, size, access_type, mmu_idx, ra);
1021    } else {
1022        probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
1023        probe_access(env, ptr + in_page, size - in_page,
1024                     access_type, mmu_idx, ra);
1025    }
1026}
1027
1028/*
1029 * This function corresponds to AArch64.vESBOperation().
1030 * Note that the AArch32 version is not functionally different.
1031 */
1032void HELPER(vesb)(CPUARMState *env)
1033{
1034    /*
1035     * The EL2Enabled() check is done inside arm_hcr_el2_eff,
1036     * and will return HCR_EL2.VSE == 0, so nothing happens.
1037     */
1038    uint64_t hcr = arm_hcr_el2_eff(env);
1039    bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
1040    bool pending = enabled && (hcr & HCR_VSE);
1041    bool masked  = (env->daif & PSTATE_A);
1042
1043    /* If VSE pending and masked, defer the exception.  */
1044    if (pending && masked) {
1045        uint32_t syndrome;
1046
1047        if (arm_el_is_aa64(env, 1)) {
1048            /* Copy across IDS and ISS from VSESR. */
1049            syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
1050        } else {
1051            ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
1052
1053            if (extended_addresses_enabled(env)) {
1054                syndrome = arm_fi_to_lfsc(&fi);
1055            } else {
1056                syndrome = arm_fi_to_sfsc(&fi);
1057            }
1058            /* Copy across AET and ExT from VSESR. */
1059            syndrome |= env->cp15.vsesr_el2 & 0xd000;
1060        }
1061
1062        /* Set VDISR_EL2.A along with the syndrome. */
1063        env->cp15.vdisr_el2 = syndrome | (1u << 31);
1064
1065        /* Clear pending virtual SError */
1066        env->cp15.hcr_el2 &= ~HCR_VSE;
1067        cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
1068    }
1069}
1070