qemu/target/arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/log.h"
  21#include "qemu/main-loop.h"
  22#include "cpu.h"
  23#include "exec/helper-proto.h"
  24#include "internals.h"
  25#include "exec/exec-all.h"
  26#include "exec/cpu_ldst.h"
  27
  28#define SIGNBIT (uint32_t)0x80000000
  29#define SIGNBIT64 ((uint64_t)1 << 63)
  30
  31void raise_exception(CPUARMState *env, uint32_t excp,
  32                     uint32_t syndrome, uint32_t target_el)
  33{
  34    CPUState *cs = CPU(arm_env_get_cpu(env));
  35
  36    if ((env->cp15.hcr_el2 & HCR_TGE) &&
  37        target_el == 1 && !arm_is_secure(env)) {
  38        /*
  39         * Redirect NS EL1 exceptions to NS EL2. These are reported with
  40         * their original syndrome register value, with the exception of
  41         * SIMD/FP access traps, which are reported as uncategorized
  42         * (see DDI0478C.a D1.10.4)
  43         */
  44        target_el = 2;
  45        if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
  46            syndrome = syn_uncategorized();
  47        }
  48    }
  49
  50    assert(!excp_is_internal(excp));
  51    cs->exception_index = excp;
  52    env->exception.syndrome = syndrome;
  53    env->exception.target_el = target_el;
  54    cpu_loop_exit(cs);
  55}
  56
  57static int exception_target_el(CPUARMState *env)
  58{
  59    int target_el = MAX(1, arm_current_el(env));
  60
  61    /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
  62     * to EL3 in this case.
  63     */
  64    if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
  65        target_el = 3;
  66    }
  67
  68    return target_el;
  69}
  70
  71uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
  72                          uint32_t maxindex)
  73{
  74    uint32_t val, shift;
  75    uint64_t *table = vn;
  76
  77    val = 0;
  78    for (shift = 0; shift < 32; shift += 8) {
  79        uint32_t index = (ireg >> shift) & 0xff;
  80        if (index < maxindex) {
  81            uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
  82            val |= tmp << shift;
  83        } else {
  84            val |= def & (0xff << shift);
  85        }
  86    }
  87    return val;
  88}
  89
  90#if !defined(CONFIG_USER_ONLY)
  91
  92static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
  93                                            unsigned int target_el,
  94                                            bool same_el, bool ea,
  95                                            bool s1ptw, bool is_write,
  96                                            int fsc)
  97{
  98    uint32_t syn;
  99
 100    /* ISV is only set for data aborts routed to EL2 and
 101     * never for stage-1 page table walks faulting on stage 2.
 102     *
 103     * Furthermore, ISV is only set for certain kinds of load/stores.
 104     * If the template syndrome does not have ISV set, we should leave
 105     * it cleared.
 106     *
 107     * See ARMv8 specs, D7-1974:
 108     * ISS encoding for an exception from a Data Abort, the
 109     * ISV field.
 110     */
 111    if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
 112        syn = syn_data_abort_no_iss(same_el,
 113                                    ea, 0, s1ptw, is_write, fsc);
 114    } else {
 115        /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
 116         * syndrome created at translation time.
 117         * Now we create the runtime syndrome with the remaining fields.
 118         */
 119        syn = syn_data_abort_with_iss(same_el,
 120                                      0, 0, 0, 0, 0,
 121                                      ea, 0, s1ptw, is_write, fsc,
 122                                      false);
 123        /* Merge the runtime syndrome with the template syndrome.  */
 124        syn |= template_syn;
 125    }
 126    return syn;
 127}
 128
 129static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
 130                          int mmu_idx, ARMMMUFaultInfo *fi)
 131{
 132    CPUARMState *env = &cpu->env;
 133    int target_el;
 134    bool same_el;
 135    uint32_t syn, exc, fsr, fsc;
 136    ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
 137
 138    target_el = exception_target_el(env);
 139    if (fi->stage2) {
 140        target_el = 2;
 141        env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
 142    }
 143    same_el = (arm_current_el(env) == target_el);
 144
 145    if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
 146        arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
 147        /* LPAE format fault status register : bottom 6 bits are
 148         * status code in the same form as needed for syndrome
 149         */
 150        fsr = arm_fi_to_lfsc(fi);
 151        fsc = extract32(fsr, 0, 6);
 152    } else {
 153        fsr = arm_fi_to_sfsc(fi);
 154        /* Short format FSR : this fault will never actually be reported
 155         * to an EL that uses a syndrome register. Use a (currently)
 156         * reserved FSR code in case the constructed syndrome does leak
 157         * into the guest somehow.
 158         */
 159        fsc = 0x3f;
 160    }
 161
 162    if (access_type == MMU_INST_FETCH) {
 163        syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
 164        exc = EXCP_PREFETCH_ABORT;
 165    } else {
 166        syn = merge_syn_data_abort(env->exception.syndrome, target_el,
 167                                   same_el, fi->ea, fi->s1ptw,
 168                                   access_type == MMU_DATA_STORE,
 169                                   fsc);
 170        if (access_type == MMU_DATA_STORE
 171            && arm_feature(env, ARM_FEATURE_V6)) {
 172            fsr |= (1 << 11);
 173        }
 174        exc = EXCP_DATA_ABORT;
 175    }
 176
 177    env->exception.vaddress = addr;
 178    env->exception.fsr = fsr;
 179    raise_exception(env, exc, syn, target_el);
 180}
 181
 182/* try to fill the TLB and return an exception if error. If retaddr is
 183 * NULL, it means that the function was called in C code (i.e. not
 184 * from generated code or from helper.c)
 185 */
 186void tlb_fill(CPUState *cs, target_ulong addr, int size,
 187              MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
 188{
 189    bool ret;
 190    ARMMMUFaultInfo fi = {};
 191
 192    ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
 193    if (unlikely(ret)) {
 194        ARMCPU *cpu = ARM_CPU(cs);
 195
 196        /* now we have a real cpu fault */
 197        cpu_restore_state(cs, retaddr, true);
 198
 199        deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 200    }
 201}
 202
 203/* Raise a data fault alignment exception for the specified virtual address */
 204void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
 205                                 MMUAccessType access_type,
 206                                 int mmu_idx, uintptr_t retaddr)
 207{
 208    ARMCPU *cpu = ARM_CPU(cs);
 209    ARMMMUFaultInfo fi = {};
 210
 211    /* now we have a real cpu fault */
 212    cpu_restore_state(cs, retaddr, true);
 213
 214    fi.type = ARMFault_Alignment;
 215    deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
 216}
 217
 218/* arm_cpu_do_transaction_failed: handle a memory system error response
 219 * (eg "no device/memory present at address") by raising an external abort
 220 * exception
 221 */
 222void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
 223                                   vaddr addr, unsigned size,
 224                                   MMUAccessType access_type,
 225                                   int mmu_idx, MemTxAttrs attrs,
 226                                   MemTxResult response, uintptr_t retaddr)
 227{
 228    ARMCPU *cpu = ARM_CPU(cs);
 229    ARMMMUFaultInfo fi = {};
 230
 231    /* now we have a real cpu fault */
 232    cpu_restore_state(cs, retaddr, true);
 233
 234    fi.ea = arm_extabort_type(response);
 235    fi.type = ARMFault_SyncExternal;
 236    deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 237}
 238
 239#endif /* !defined(CONFIG_USER_ONLY) */
 240
 241void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
 242{
 243    /*
 244     * Perform the v8M stack limit check for SP updates from translated code,
 245     * raising an exception if the limit is breached.
 246     */
 247    if (newvalue < v7m_sp_limit(env)) {
 248        CPUState *cs = CPU(arm_env_get_cpu(env));
 249
 250        /*
 251         * Stack limit exceptions are a rare case, so rather than syncing
 252         * PC/condbits before the call, we use cpu_restore_state() to
 253         * get them right before raising the exception.
 254         */
 255        cpu_restore_state(cs, GETPC(), true);
 256        raise_exception(env, EXCP_STKOF, 0, 1);
 257    }
 258}
 259
 260uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 261{
 262    uint32_t res = a + b;
 263    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 264        env->QF = 1;
 265    return res;
 266}
 267
 268uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 269{
 270    uint32_t res = a + b;
 271    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 272        env->QF = 1;
 273        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 274    }
 275    return res;
 276}
 277
 278uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 279{
 280    uint32_t res = a - b;
 281    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 282        env->QF = 1;
 283        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 284    }
 285    return res;
 286}
 287
 288uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
 289{
 290    uint32_t res;
 291    if (val >= 0x40000000) {
 292        res = ~SIGNBIT;
 293        env->QF = 1;
 294    } else if (val <= (int32_t)0xc0000000) {
 295        res = SIGNBIT;
 296        env->QF = 1;
 297    } else {
 298        res = val << 1;
 299    }
 300    return res;
 301}
 302
 303uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 304{
 305    uint32_t res = a + b;
 306    if (res < a) {
 307        env->QF = 1;
 308        res = ~0;
 309    }
 310    return res;
 311}
 312
 313uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 314{
 315    uint32_t res = a - b;
 316    if (res > a) {
 317        env->QF = 1;
 318        res = 0;
 319    }
 320    return res;
 321}
 322
 323/* Signed saturation.  */
 324static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 325{
 326    int32_t top;
 327    uint32_t mask;
 328
 329    top = val >> shift;
 330    mask = (1u << shift) - 1;
 331    if (top > 0) {
 332        env->QF = 1;
 333        return mask;
 334    } else if (top < -1) {
 335        env->QF = 1;
 336        return ~mask;
 337    }
 338    return val;
 339}
 340
 341/* Unsigned saturation.  */
 342static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 343{
 344    uint32_t max;
 345
 346    max = (1u << shift) - 1;
 347    if (val < 0) {
 348        env->QF = 1;
 349        return 0;
 350    } else if (val > max) {
 351        env->QF = 1;
 352        return max;
 353    }
 354    return val;
 355}
 356
 357/* Signed saturate.  */
 358uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 359{
 360    return do_ssat(env, x, shift);
 361}
 362
 363/* Dual halfword signed saturate.  */
 364uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 365{
 366    uint32_t res;
 367
 368    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 369    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 370    return res;
 371}
 372
 373/* Unsigned saturate.  */
 374uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 375{
 376    return do_usat(env, x, shift);
 377}
 378
 379/* Dual halfword unsigned saturate.  */
 380uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 381{
 382    uint32_t res;
 383
 384    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 385    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 386    return res;
 387}
 388
 389void HELPER(setend)(CPUARMState *env)
 390{
 391    env->uncached_cpsr ^= CPSR_E;
 392}
 393
 394/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 395 * The function returns the target EL (1-3) if the instruction is to be trapped;
 396 * otherwise it returns 0 indicating it is not trapped.
 397 */
 398static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 399{
 400    int cur_el = arm_current_el(env);
 401    uint64_t mask;
 402
 403    if (arm_feature(env, ARM_FEATURE_M)) {
 404        /* M profile cores can never trap WFI/WFE. */
 405        return 0;
 406    }
 407
 408    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 409     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 410     */
 411    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 412        int target_el;
 413
 414        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 415        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 416            /* Secure EL0 and Secure PL1 is at EL3 */
 417            target_el = 3;
 418        } else {
 419            target_el = 1;
 420        }
 421
 422        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 423            return target_el;
 424        }
 425    }
 426
 427    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 428     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 429     * bits will be zero indicating no trap.
 430     */
 431    if (cur_el < 2 && !arm_is_secure(env)) {
 432        mask = (is_wfe) ? HCR_TWE : HCR_TWI;
 433        if (env->cp15.hcr_el2 & mask) {
 434            return 2;
 435        }
 436    }
 437
 438    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 439    if (cur_el < 3) {
 440        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 441        if (env->cp15.scr_el3 & mask) {
 442            return 3;
 443        }
 444    }
 445
 446    return 0;
 447}
 448
 449void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
 450{
 451    CPUState *cs = CPU(arm_env_get_cpu(env));
 452    int target_el = check_wfx_trap(env, false);
 453
 454    if (cpu_has_work(cs)) {
 455        /* Don't bother to go into our "low power state" if
 456         * we would just wake up immediately.
 457         */
 458        return;
 459    }
 460
 461    if (target_el) {
 462        env->pc -= insn_len;
 463        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
 464                        target_el);
 465    }
 466
 467    cs->exception_index = EXCP_HLT;
 468    cs->halted = 1;
 469    cpu_loop_exit(cs);
 470}
 471
 472void HELPER(wfe)(CPUARMState *env)
 473{
 474    /* This is a hint instruction that is semantically different
 475     * from YIELD even though we currently implement it identically.
 476     * Don't actually halt the CPU, just yield back to top
 477     * level loop. This is not going into a "low power state"
 478     * (ie halting until some event occurs), so we never take
 479     * a configurable trap to a different exception level.
 480     */
 481    HELPER(yield)(env);
 482}
 483
 484void HELPER(yield)(CPUARMState *env)
 485{
 486    ARMCPU *cpu = arm_env_get_cpu(env);
 487    CPUState *cs = CPU(cpu);
 488
 489    /* This is a non-trappable hint instruction that generally indicates
 490     * that the guest is currently busy-looping. Yield control back to the
 491     * top level loop so that a more deserving VCPU has a chance to run.
 492     */
 493    cs->exception_index = EXCP_YIELD;
 494    cpu_loop_exit(cs);
 495}
 496
 497/* Raise an internal-to-QEMU exception. This is limited to only
 498 * those EXCP values which are special cases for QEMU to interrupt
 499 * execution and not to be used for exceptions which are passed to
 500 * the guest (those must all have syndrome information and thus should
 501 * use exception_with_syndrome).
 502 */
 503void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 504{
 505    CPUState *cs = CPU(arm_env_get_cpu(env));
 506
 507    assert(excp_is_internal(excp));
 508    cs->exception_index = excp;
 509    cpu_loop_exit(cs);
 510}
 511
 512/* Raise an exception with the specified syndrome register value */
 513void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 514                                     uint32_t syndrome, uint32_t target_el)
 515{
 516    raise_exception(env, excp, syndrome, target_el);
 517}
 518
 519/* Raise an EXCP_BKPT with the specified syndrome register value,
 520 * targeting the correct exception level for debug exceptions.
 521 */
 522void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
 523{
 524    /* FSR will only be used if the debug target EL is AArch32. */
 525    env->exception.fsr = arm_debug_exception_fsr(env);
 526    /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
 527     * values to the guest that it shouldn't be able to see at its
 528     * exception/security level.
 529     */
 530    env->exception.vaddress = 0;
 531    raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
 532}
 533
 534uint32_t HELPER(cpsr_read)(CPUARMState *env)
 535{
 536    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
 537}
 538
 539void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 540{
 541    cpsr_write(env, val, mask, CPSRWriteByInstr);
 542}
 543
 544/* Write the CPSR for a 32-bit exception return */
 545void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 546{
 547    qemu_mutex_lock_iothread();
 548    arm_call_pre_el_change_hook(arm_env_get_cpu(env));
 549    qemu_mutex_unlock_iothread();
 550
 551    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
 552
 553    /* Generated code has already stored the new PC value, but
 554     * without masking out its low bits, because which bits need
 555     * masking depends on whether we're returning to Thumb or ARM
 556     * state. Do the masking now.
 557     */
 558    env->regs[15] &= (env->thumb ? ~1 : ~3);
 559
 560    qemu_mutex_lock_iothread();
 561    arm_call_el_change_hook(arm_env_get_cpu(env));
 562    qemu_mutex_unlock_iothread();
 563}
 564
 565/* Access to user mode registers from privileged modes.  */
 566uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 567{
 568    uint32_t val;
 569
 570    if (regno == 13) {
 571        val = env->banked_r13[BANK_USRSYS];
 572    } else if (regno == 14) {
 573        val = env->banked_r14[BANK_USRSYS];
 574    } else if (regno >= 8
 575               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 576        val = env->usr_regs[regno - 8];
 577    } else {
 578        val = env->regs[regno];
 579    }
 580    return val;
 581}
 582
 583void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 584{
 585    if (regno == 13) {
 586        env->banked_r13[BANK_USRSYS] = val;
 587    } else if (regno == 14) {
 588        env->banked_r14[BANK_USRSYS] = val;
 589    } else if (regno >= 8
 590               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 591        env->usr_regs[regno - 8] = val;
 592    } else {
 593        env->regs[regno] = val;
 594    }
 595}
 596
 597void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 598{
 599    if ((env->uncached_cpsr & CPSR_M) == mode) {
 600        env->regs[13] = val;
 601    } else {
 602        env->banked_r13[bank_number(mode)] = val;
 603    }
 604}
 605
 606uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 607{
 608    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 609        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 610         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 611         */
 612        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 613                        exception_target_el(env));
 614    }
 615
 616    if ((env->uncached_cpsr & CPSR_M) == mode) {
 617        return env->regs[13];
 618    } else {
 619        return env->banked_r13[bank_number(mode)];
 620    }
 621}
 622
 623static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 624                                      uint32_t regno)
 625{
 626    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 627     * cases; otherwise return. This broadly corresponds to the pseudocode
 628     * BankedRegisterAccessValid() and SPSRAccessValid(),
 629     * except that we have already handled some cases at translate time.
 630     */
 631    int curmode = env->uncached_cpsr & CPSR_M;
 632
 633    if (regno == 17) {
 634        /* ELR_Hyp: a special case because access from tgtmode is OK */
 635        if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 636            goto undef;
 637        }
 638        return;
 639    }
 640
 641    if (curmode == tgtmode) {
 642        goto undef;
 643    }
 644
 645    if (tgtmode == ARM_CPU_MODE_USR) {
 646        switch (regno) {
 647        case 8 ... 12:
 648            if (curmode != ARM_CPU_MODE_FIQ) {
 649                goto undef;
 650            }
 651            break;
 652        case 13:
 653            if (curmode == ARM_CPU_MODE_SYS) {
 654                goto undef;
 655            }
 656            break;
 657        case 14:
 658            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 659                goto undef;
 660            }
 661            break;
 662        default:
 663            break;
 664        }
 665    }
 666
 667    if (tgtmode == ARM_CPU_MODE_HYP) {
 668        /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
 669        if (curmode != ARM_CPU_MODE_MON) {
 670            goto undef;
 671        }
 672    }
 673
 674    return;
 675
 676undef:
 677    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 678                    exception_target_el(env));
 679}
 680
 681void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 682                        uint32_t regno)
 683{
 684    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 685
 686    switch (regno) {
 687    case 16: /* SPSRs */
 688        env->banked_spsr[bank_number(tgtmode)] = value;
 689        break;
 690    case 17: /* ELR_Hyp */
 691        env->elr_el[2] = value;
 692        break;
 693    case 13:
 694        env->banked_r13[bank_number(tgtmode)] = value;
 695        break;
 696    case 14:
 697        env->banked_r14[r14_bank_number(tgtmode)] = value;
 698        break;
 699    case 8 ... 12:
 700        switch (tgtmode) {
 701        case ARM_CPU_MODE_USR:
 702            env->usr_regs[regno - 8] = value;
 703            break;
 704        case ARM_CPU_MODE_FIQ:
 705            env->fiq_regs[regno - 8] = value;
 706            break;
 707        default:
 708            g_assert_not_reached();
 709        }
 710        break;
 711    default:
 712        g_assert_not_reached();
 713    }
 714}
 715
 716uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 717{
 718    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 719
 720    switch (regno) {
 721    case 16: /* SPSRs */
 722        return env->banked_spsr[bank_number(tgtmode)];
 723    case 17: /* ELR_Hyp */
 724        return env->elr_el[2];
 725    case 13:
 726        return env->banked_r13[bank_number(tgtmode)];
 727    case 14:
 728        return env->banked_r14[r14_bank_number(tgtmode)];
 729    case 8 ... 12:
 730        switch (tgtmode) {
 731        case ARM_CPU_MODE_USR:
 732            return env->usr_regs[regno - 8];
 733        case ARM_CPU_MODE_FIQ:
 734            return env->fiq_regs[regno - 8];
 735        default:
 736            g_assert_not_reached();
 737        }
 738    default:
 739        g_assert_not_reached();
 740    }
 741}
 742
 743void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
 744                                 uint32_t isread)
 745{
 746    const ARMCPRegInfo *ri = rip;
 747    int target_el;
 748
 749    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 750        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 751        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 752    }
 753
 754    if (!ri->accessfn) {
 755        return;
 756    }
 757
 758    switch (ri->accessfn(env, ri, isread)) {
 759    case CP_ACCESS_OK:
 760        return;
 761    case CP_ACCESS_TRAP:
 762        target_el = exception_target_el(env);
 763        break;
 764    case CP_ACCESS_TRAP_EL2:
 765        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
 766         * a bug in the access function.
 767         */
 768        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
 769        target_el = 2;
 770        break;
 771    case CP_ACCESS_TRAP_EL3:
 772        target_el = 3;
 773        break;
 774    case CP_ACCESS_TRAP_UNCATEGORIZED:
 775        target_el = exception_target_el(env);
 776        syndrome = syn_uncategorized();
 777        break;
 778    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
 779        target_el = 2;
 780        syndrome = syn_uncategorized();
 781        break;
 782    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
 783        target_el = 3;
 784        syndrome = syn_uncategorized();
 785        break;
 786    case CP_ACCESS_TRAP_FP_EL2:
 787        target_el = 2;
 788        /* Since we are an implementation that takes exceptions on a trapped
 789         * conditional insn only if the insn has passed its condition code
 790         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
 791         * (which is also the required value for AArch64 traps).
 792         */
 793        syndrome = syn_fp_access_trap(1, 0xe, false);
 794        break;
 795    case CP_ACCESS_TRAP_FP_EL3:
 796        target_el = 3;
 797        syndrome = syn_fp_access_trap(1, 0xe, false);
 798        break;
 799    default:
 800        g_assert_not_reached();
 801    }
 802
 803    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 804}
 805
 806void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 807{
 808    const ARMCPRegInfo *ri = rip;
 809
 810    if (ri->type & ARM_CP_IO) {
 811        qemu_mutex_lock_iothread();
 812        ri->writefn(env, ri, value);
 813        qemu_mutex_unlock_iothread();
 814    } else {
 815        ri->writefn(env, ri, value);
 816    }
 817}
 818
 819uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 820{
 821    const ARMCPRegInfo *ri = rip;
 822    uint32_t res;
 823
 824    if (ri->type & ARM_CP_IO) {
 825        qemu_mutex_lock_iothread();
 826        res = ri->readfn(env, ri);
 827        qemu_mutex_unlock_iothread();
 828    } else {
 829        res = ri->readfn(env, ri);
 830    }
 831
 832    return res;
 833}
 834
 835void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 836{
 837    const ARMCPRegInfo *ri = rip;
 838
 839    if (ri->type & ARM_CP_IO) {
 840        qemu_mutex_lock_iothread();
 841        ri->writefn(env, ri, value);
 842        qemu_mutex_unlock_iothread();
 843    } else {
 844        ri->writefn(env, ri, value);
 845    }
 846}
 847
 848uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 849{
 850    const ARMCPRegInfo *ri = rip;
 851    uint64_t res;
 852
 853    if (ri->type & ARM_CP_IO) {
 854        qemu_mutex_lock_iothread();
 855        res = ri->readfn(env, ri);
 856        qemu_mutex_unlock_iothread();
 857    } else {
 858        res = ri->readfn(env, ri);
 859    }
 860
 861    return res;
 862}
 863
 864void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
 865{
 866    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
 867     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
 868     * to catch that case at translate time.
 869     */
 870    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
 871        uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
 872                                                extract32(op, 3, 3), 4,
 873                                                imm, 0x1f, 0);
 874        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 875    }
 876
 877    switch (op) {
 878    case 0x05: /* SPSel */
 879        update_spsel(env, imm);
 880        break;
 881    case 0x1e: /* DAIFSet */
 882        env->daif |= (imm << 6) & PSTATE_DAIF;
 883        break;
 884    case 0x1f: /* DAIFClear */
 885        env->daif &= ~((imm << 6) & PSTATE_DAIF);
 886        break;
 887    default:
 888        g_assert_not_reached();
 889    }
 890}
 891
 892void HELPER(clear_pstate_ss)(CPUARMState *env)
 893{
 894    env->pstate &= ~PSTATE_SS;
 895}
 896
 897void HELPER(pre_hvc)(CPUARMState *env)
 898{
 899    ARMCPU *cpu = arm_env_get_cpu(env);
 900    int cur_el = arm_current_el(env);
 901    /* FIXME: Use actual secure state.  */
 902    bool secure = false;
 903    bool undef;
 904
 905    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 906        /* If PSCI is enabled and this looks like a valid PSCI call then
 907         * that overrides the architecturally mandated HVC behaviour.
 908         */
 909        return;
 910    }
 911
 912    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 913        /* If EL2 doesn't exist, HVC always UNDEFs */
 914        undef = true;
 915    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 916        /* EL3.HCE has priority over EL2.HCD. */
 917        undef = !(env->cp15.scr_el3 & SCR_HCE);
 918    } else {
 919        undef = env->cp15.hcr_el2 & HCR_HCD;
 920    }
 921
 922    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 923     * For ARMv8/AArch64, HVC is allowed in EL3.
 924     * Note that we've already trapped HVC from EL0 at translation
 925     * time.
 926     */
 927    if (secure && (!is_a64(env) || cur_el == 1)) {
 928        undef = true;
 929    }
 930
 931    if (undef) {
 932        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 933                        exception_target_el(env));
 934    }
 935}
 936
 937void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 938{
 939    ARMCPU *cpu = arm_env_get_cpu(env);
 940    int cur_el = arm_current_el(env);
 941    bool secure = arm_is_secure(env);
 942    bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
 943
 944    /*
 945     * SMC behaviour is summarized in the following table.
 946     * This helper handles the "Trap to EL2" and "Undef insn" cases.
 947     * The "Trap to EL3" and "PSCI call" cases are handled in the exception
 948     * helper.
 949     *
 950     *  -> ARM_FEATURE_EL3 and !SMD
 951     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 952     *
 953     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 954     *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
 955     *  Conduit not SMC          Trap to EL2         Trap to EL3
 956     *
 957     *
 958     *  -> ARM_FEATURE_EL3 and SMD
 959     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 960     *
 961     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 962     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 963     *  Conduit not SMC          Trap to EL2         Undef insn
 964     *
 965     *
 966     *  -> !ARM_FEATURE_EL3
 967     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 968     *
 969     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 970     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 971     *  Conduit not SMC          Undef insn          Undef insn
 972     */
 973
 974    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 975     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 976     *  extensions, SMD only applies to NS state.
 977     * On ARMv7 without the Virtualization extensions, the SMD bit
 978     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 979     * so we need not special case this here.
 980     */
 981    bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
 982                                                     : smd_flag && !secure;
 983
 984    if (!arm_feature(env, ARM_FEATURE_EL3) &&
 985        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
 986        /* If we have no EL3 then SMC always UNDEFs and can't be
 987         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
 988         * firmware within QEMU, and we want an EL2 guest to be able
 989         * to forbid its EL1 from making PSCI calls into QEMU's
 990         * "firmware" via HCR.TSC, so for these purposes treat
 991         * PSCI-via-SMC as implying an EL3.
 992         * This handles the very last line of the previous table.
 993         */
 994        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 995                        exception_target_el(env));
 996    }
 997
 998    if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
 999        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
1000         * We also want an EL2 guest to be able to forbid its EL1 from
1001         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
1002         * This handles all the "Trap to EL2" cases of the previous table.
1003         */
1004        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
1005    }
1006
1007    /* Catch the two remaining "Undef insn" cases of the previous table:
1008     *    - PSCI conduit is SMC but we don't have a valid PCSI call,
1009     *    - We don't have EL3 or SMD is set.
1010     */
1011    if (!arm_is_psci_call(cpu, EXCP_SMC) &&
1012        (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
1013        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1014                        exception_target_el(env));
1015    }
1016}
1017
1018static int el_from_spsr(uint32_t spsr)
1019{
1020    /* Return the exception level that this SPSR is requesting a return to,
1021     * or -1 if it is invalid (an illegal return)
1022     */
1023    if (spsr & PSTATE_nRW) {
1024        switch (spsr & CPSR_M) {
1025        case ARM_CPU_MODE_USR:
1026            return 0;
1027        case ARM_CPU_MODE_HYP:
1028            return 2;
1029        case ARM_CPU_MODE_FIQ:
1030        case ARM_CPU_MODE_IRQ:
1031        case ARM_CPU_MODE_SVC:
1032        case ARM_CPU_MODE_ABT:
1033        case ARM_CPU_MODE_UND:
1034        case ARM_CPU_MODE_SYS:
1035            return 1;
1036        case ARM_CPU_MODE_MON:
1037            /* Returning to Mon from AArch64 is never possible,
1038             * so this is an illegal return.
1039             */
1040        default:
1041            return -1;
1042        }
1043    } else {
1044        if (extract32(spsr, 1, 1)) {
1045            /* Return with reserved M[1] bit set */
1046            return -1;
1047        }
1048        if (extract32(spsr, 0, 4) == 1) {
1049            /* return to EL0 with M[0] bit set */
1050            return -1;
1051        }
1052        return extract32(spsr, 2, 2);
1053    }
1054}
1055
1056void HELPER(exception_return)(CPUARMState *env)
1057{
1058    int cur_el = arm_current_el(env);
1059    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
1060    uint32_t spsr = env->banked_spsr[spsr_idx];
1061    int new_el;
1062    bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
1063
1064    aarch64_save_sp(env, cur_el);
1065
1066    arm_clear_exclusive(env);
1067
1068    /* We must squash the PSTATE.SS bit to zero unless both of the
1069     * following hold:
1070     *  1. debug exceptions are currently disabled
1071     *  2. singlestep will be active in the EL we return to
1072     * We check 1 here and 2 after we've done the pstate/cpsr write() to
1073     * transition to the EL we're going to.
1074     */
1075    if (arm_generate_debug_exceptions(env)) {
1076        spsr &= ~PSTATE_SS;
1077    }
1078
1079    new_el = el_from_spsr(spsr);
1080    if (new_el == -1) {
1081        goto illegal_return;
1082    }
1083    if (new_el > cur_el
1084        || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1085        /* Disallow return to an EL which is unimplemented or higher
1086         * than the current one.
1087         */
1088        goto illegal_return;
1089    }
1090
1091    if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1092        /* Return to an EL which is configured for a different register width */
1093        goto illegal_return;
1094    }
1095
1096    if (new_el == 2 && arm_is_secure_below_el3(env)) {
1097        /* Return to the non-existent secure-EL2 */
1098        goto illegal_return;
1099    }
1100
1101    if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1102        && !arm_is_secure_below_el3(env)) {
1103        goto illegal_return;
1104    }
1105
1106    qemu_mutex_lock_iothread();
1107    arm_call_pre_el_change_hook(arm_env_get_cpu(env));
1108    qemu_mutex_unlock_iothread();
1109
1110    if (!return_to_aa64) {
1111        env->aarch64 = 0;
1112        /* We do a raw CPSR write because aarch64_sync_64_to_32()
1113         * will sort the register banks out for us, and we've already
1114         * caught all the bad-mode cases in el_from_spsr().
1115         */
1116        cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1117        if (!arm_singlestep_active(env)) {
1118            env->uncached_cpsr &= ~PSTATE_SS;
1119        }
1120        aarch64_sync_64_to_32(env);
1121
1122        if (spsr & CPSR_T) {
1123            env->regs[15] = env->elr_el[cur_el] & ~0x1;
1124        } else {
1125            env->regs[15] = env->elr_el[cur_el] & ~0x3;
1126        }
1127        qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1128                      "AArch32 EL%d PC 0x%" PRIx32 "\n",
1129                      cur_el, new_el, env->regs[15]);
1130    } else {
1131        env->aarch64 = 1;
1132        pstate_write(env, spsr);
1133        if (!arm_singlestep_active(env)) {
1134            env->pstate &= ~PSTATE_SS;
1135        }
1136        aarch64_restore_sp(env, new_el);
1137        env->pc = env->elr_el[cur_el];
1138        qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1139                      "AArch64 EL%d PC 0x%" PRIx64 "\n",
1140                      cur_el, new_el, env->pc);
1141    }
1142    /*
1143     * Note that cur_el can never be 0.  If new_el is 0, then
1144     * el0_a64 is return_to_aa64, else el0_a64 is ignored.
1145     */
1146    aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64);
1147
1148    qemu_mutex_lock_iothread();
1149    arm_call_el_change_hook(arm_env_get_cpu(env));
1150    qemu_mutex_unlock_iothread();
1151
1152    return;
1153
1154illegal_return:
1155    /* Illegal return events of various kinds have architecturally
1156     * mandated behaviour:
1157     * restore NZCV and DAIF from SPSR_ELx
1158     * set PSTATE.IL
1159     * restore PC from ELR_ELx
1160     * no change to exception level, execution state or stack pointer
1161     */
1162    env->pstate |= PSTATE_IL;
1163    env->pc = env->elr_el[cur_el];
1164    spsr &= PSTATE_NZCV | PSTATE_DAIF;
1165    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1166    pstate_write(env, spsr);
1167    if (!arm_singlestep_active(env)) {
1168        env->pstate &= ~PSTATE_SS;
1169    }
1170    qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1171                  "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
1172}
1173
1174/* Return true if the linked breakpoint entry lbn passes its checks */
1175static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1176{
1177    CPUARMState *env = &cpu->env;
1178    uint64_t bcr = env->cp15.dbgbcr[lbn];
1179    int brps = extract32(cpu->dbgdidr, 24, 4);
1180    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1181    int bt;
1182    uint32_t contextidr;
1183
1184    /* Links to unimplemented or non-context aware breakpoints are
1185     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1186     * as if linked to an UNKNOWN context-aware breakpoint (in which
1187     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1188     * We choose the former.
1189     */
1190    if (lbn > brps || lbn < (brps - ctx_cmps)) {
1191        return false;
1192    }
1193
1194    bcr = env->cp15.dbgbcr[lbn];
1195
1196    if (extract64(bcr, 0, 1) == 0) {
1197        /* Linked breakpoint disabled : generate no events */
1198        return false;
1199    }
1200
1201    bt = extract64(bcr, 20, 4);
1202
1203    /* We match the whole register even if this is AArch32 using the
1204     * short descriptor format (in which case it holds both PROCID and ASID),
1205     * since we don't implement the optional v7 context ID masking.
1206     */
1207    contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1208
1209    switch (bt) {
1210    case 3: /* linked context ID match */
1211        if (arm_current_el(env) > 1) {
1212            /* Context matches never fire in EL2 or (AArch64) EL3 */
1213            return false;
1214        }
1215        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1216    case 5: /* linked address mismatch (reserved in AArch64) */
1217    case 9: /* linked VMID match (reserved if no EL2) */
1218    case 11: /* linked context ID and VMID match (reserved if no EL2) */
1219    default:
1220        /* Links to Unlinked context breakpoints must generate no
1221         * events; we choose to do the same for reserved values too.
1222         */
1223        return false;
1224    }
1225
1226    return false;
1227}
1228
1229static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1230{
1231    CPUARMState *env = &cpu->env;
1232    uint64_t cr;
1233    int pac, hmc, ssc, wt, lbn;
1234    /* Note that for watchpoints the check is against the CPU security
1235     * state, not the S/NS attribute on the offending data access.
1236     */
1237    bool is_secure = arm_is_secure(env);
1238    int access_el = arm_current_el(env);
1239
1240    if (is_wp) {
1241        CPUWatchpoint *wp = env->cpu_watchpoint[n];
1242
1243        if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1244            return false;
1245        }
1246        cr = env->cp15.dbgwcr[n];
1247        if (wp->hitattrs.user) {
1248            /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1249             * match watchpoints as if they were accesses done at EL0, even if
1250             * the CPU is at EL1 or higher.
1251             */
1252            access_el = 0;
1253        }
1254    } else {
1255        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1256
1257        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1258            return false;
1259        }
1260        cr = env->cp15.dbgbcr[n];
1261    }
1262    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1263     * enabled and that the address and access type match; for breakpoints
1264     * we know the address matched; check the remaining fields, including
1265     * linked breakpoints. We rely on WCR and BCR having the same layout
1266     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1267     * Note that some combinations of {PAC, HMC, SSC} are reserved and
1268     * must act either like some valid combination or as if the watchpoint
1269     * were disabled. We choose the former, and use this together with
1270     * the fact that EL3 must always be Secure and EL2 must always be
1271     * Non-Secure to simplify the code slightly compared to the full
1272     * table in the ARM ARM.
1273     */
1274    pac = extract64(cr, 1, 2);
1275    hmc = extract64(cr, 13, 1);
1276    ssc = extract64(cr, 14, 2);
1277
1278    switch (ssc) {
1279    case 0:
1280        break;
1281    case 1:
1282    case 3:
1283        if (is_secure) {
1284            return false;
1285        }
1286        break;
1287    case 2:
1288        if (!is_secure) {
1289            return false;
1290        }
1291        break;
1292    }
1293
1294    switch (access_el) {
1295    case 3:
1296    case 2:
1297        if (!hmc) {
1298            return false;
1299        }
1300        break;
1301    case 1:
1302        if (extract32(pac, 0, 1) == 0) {
1303            return false;
1304        }
1305        break;
1306    case 0:
1307        if (extract32(pac, 1, 1) == 0) {
1308            return false;
1309        }
1310        break;
1311    default:
1312        g_assert_not_reached();
1313    }
1314
1315    wt = extract64(cr, 20, 1);
1316    lbn = extract64(cr, 16, 4);
1317
1318    if (wt && !linked_bp_matches(cpu, lbn)) {
1319        return false;
1320    }
1321
1322    return true;
1323}
1324
1325static bool check_watchpoints(ARMCPU *cpu)
1326{
1327    CPUARMState *env = &cpu->env;
1328    int n;
1329
1330    /* If watchpoints are disabled globally or we can't take debug
1331     * exceptions here then watchpoint firings are ignored.
1332     */
1333    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1334        || !arm_generate_debug_exceptions(env)) {
1335        return false;
1336    }
1337
1338    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1339        if (bp_wp_matches(cpu, n, true)) {
1340            return true;
1341        }
1342    }
1343    return false;
1344}
1345
1346static bool check_breakpoints(ARMCPU *cpu)
1347{
1348    CPUARMState *env = &cpu->env;
1349    int n;
1350
1351    /* If breakpoints are disabled globally or we can't take debug
1352     * exceptions here then breakpoint firings are ignored.
1353     */
1354    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1355        || !arm_generate_debug_exceptions(env)) {
1356        return false;
1357    }
1358
1359    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1360        if (bp_wp_matches(cpu, n, false)) {
1361            return true;
1362        }
1363    }
1364    return false;
1365}
1366
1367void HELPER(check_breakpoints)(CPUARMState *env)
1368{
1369    ARMCPU *cpu = arm_env_get_cpu(env);
1370
1371    if (check_breakpoints(cpu)) {
1372        HELPER(exception_internal(env, EXCP_DEBUG));
1373    }
1374}
1375
1376bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1377{
1378    /* Called by core code when a CPU watchpoint fires; need to check if this
1379     * is also an architectural watchpoint match.
1380     */
1381    ARMCPU *cpu = ARM_CPU(cs);
1382
1383    return check_watchpoints(cpu);
1384}
1385
1386vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1387{
1388    ARMCPU *cpu = ARM_CPU(cs);
1389    CPUARMState *env = &cpu->env;
1390
1391    /* In BE32 system mode, target memory is stored byteswapped (on a
1392     * little-endian host system), and by the time we reach here (via an
1393     * opcode helper) the addresses of subword accesses have been adjusted
1394     * to account for that, which means that watchpoints will not match.
1395     * Undo the adjustment here.
1396     */
1397    if (arm_sctlr_b(env)) {
1398        if (len == 1) {
1399            addr ^= 3;
1400        } else if (len == 2) {
1401            addr ^= 2;
1402        }
1403    }
1404
1405    return addr;
1406}
1407
1408void arm_debug_excp_handler(CPUState *cs)
1409{
1410    /* Called by core code when a watchpoint or breakpoint fires;
1411     * need to check which one and raise the appropriate exception.
1412     */
1413    ARMCPU *cpu = ARM_CPU(cs);
1414    CPUARMState *env = &cpu->env;
1415    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1416
1417    if (wp_hit) {
1418        if (wp_hit->flags & BP_CPU) {
1419            bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1420            bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1421
1422            cs->watchpoint_hit = NULL;
1423
1424            env->exception.fsr = arm_debug_exception_fsr(env);
1425            env->exception.vaddress = wp_hit->hitaddr;
1426            raise_exception(env, EXCP_DATA_ABORT,
1427                    syn_watchpoint(same_el, 0, wnr),
1428                    arm_debug_target_el(env));
1429        }
1430    } else {
1431        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1432        bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1433
1434        /* (1) GDB breakpoints should be handled first.
1435         * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1436         * since singlestep is also done by generating a debug internal
1437         * exception.
1438         */
1439        if (cpu_breakpoint_test(cs, pc, BP_GDB)
1440            || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1441            return;
1442        }
1443
1444        env->exception.fsr = arm_debug_exception_fsr(env);
1445        /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1446         * values to the guest that it shouldn't be able to see at its
1447         * exception/security level.
1448         */
1449        env->exception.vaddress = 0;
1450        raise_exception(env, EXCP_PREFETCH_ABORT,
1451                        syn_breakpoint(same_el),
1452                        arm_debug_target_el(env));
1453    }
1454}
1455
1456/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1457   The only way to do that in TCG is a conditional branch, which clobbers
1458   all our temporaries.  For now implement these as helper functions.  */
1459
1460/* Similarly for variable shift instructions.  */
1461
1462uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1463{
1464    int shift = i & 0xff;
1465    if (shift >= 32) {
1466        if (shift == 32)
1467            env->CF = x & 1;
1468        else
1469            env->CF = 0;
1470        return 0;
1471    } else if (shift != 0) {
1472        env->CF = (x >> (32 - shift)) & 1;
1473        return x << shift;
1474    }
1475    return x;
1476}
1477
1478uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1479{
1480    int shift = i & 0xff;
1481    if (shift >= 32) {
1482        if (shift == 32)
1483            env->CF = (x >> 31) & 1;
1484        else
1485            env->CF = 0;
1486        return 0;
1487    } else if (shift != 0) {
1488        env->CF = (x >> (shift - 1)) & 1;
1489        return x >> shift;
1490    }
1491    return x;
1492}
1493
1494uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1495{
1496    int shift = i & 0xff;
1497    if (shift >= 32) {
1498        env->CF = (x >> 31) & 1;
1499        return (int32_t)x >> 31;
1500    } else if (shift != 0) {
1501        env->CF = (x >> (shift - 1)) & 1;
1502        return (int32_t)x >> shift;
1503    }
1504    return x;
1505}
1506
1507uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1508{
1509    int shift1, shift;
1510    shift1 = i & 0xff;
1511    shift = shift1 & 0x1f;
1512    if (shift == 0) {
1513        if (shift1 != 0)
1514            env->CF = (x >> 31) & 1;
1515        return x;
1516    } else {
1517        env->CF = (x >> (shift - 1)) & 1;
1518        return ((uint32_t)x >> shift) | (x << (32 - shift));
1519    }
1520}
1521