qemu/target/arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/log.h"
  21#include "qemu/main-loop.h"
  22#include "cpu.h"
  23#include "exec/helper-proto.h"
  24#include "internals.h"
  25#include "exec/exec-all.h"
  26#include "exec/cpu_ldst.h"
  27
  28#define SIGNBIT (uint32_t)0x80000000
  29#define SIGNBIT64 ((uint64_t)1 << 63)
  30
  31static void raise_exception(CPUARMState *env, uint32_t excp,
  32                            uint32_t syndrome, uint32_t target_el)
  33{
  34    CPUState *cs = CPU(arm_env_get_cpu(env));
  35
  36    assert(!excp_is_internal(excp));
  37    cs->exception_index = excp;
  38    env->exception.syndrome = syndrome;
  39    env->exception.target_el = target_el;
  40    cpu_loop_exit(cs);
  41}
  42
  43static int exception_target_el(CPUARMState *env)
  44{
  45    int target_el = MAX(1, arm_current_el(env));
  46
  47    /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
  48     * to EL3 in this case.
  49     */
  50    if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
  51        target_el = 3;
  52    }
  53
  54    return target_el;
  55}
  56
  57uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
  58                          uint32_t maxindex)
  59{
  60    uint32_t val, shift;
  61    uint64_t *table = vn;
  62
  63    val = 0;
  64    for (shift = 0; shift < 32; shift += 8) {
  65        uint32_t index = (ireg >> shift) & 0xff;
  66        if (index < maxindex) {
  67            uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
  68            val |= tmp << shift;
  69        } else {
  70            val |= def & (0xff << shift);
  71        }
  72    }
  73    return val;
  74}
  75
  76#if !defined(CONFIG_USER_ONLY)
  77
  78static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
  79                                            unsigned int target_el,
  80                                            bool same_el, bool ea,
  81                                            bool s1ptw, bool is_write,
  82                                            int fsc)
  83{
  84    uint32_t syn;
  85
  86    /* ISV is only set for data aborts routed to EL2 and
  87     * never for stage-1 page table walks faulting on stage 2.
  88     *
  89     * Furthermore, ISV is only set for certain kinds of load/stores.
  90     * If the template syndrome does not have ISV set, we should leave
  91     * it cleared.
  92     *
  93     * See ARMv8 specs, D7-1974:
  94     * ISS encoding for an exception from a Data Abort, the
  95     * ISV field.
  96     */
  97    if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
  98        syn = syn_data_abort_no_iss(same_el,
  99                                    ea, 0, s1ptw, is_write, fsc);
 100    } else {
 101        /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
 102         * syndrome created at translation time.
 103         * Now we create the runtime syndrome with the remaining fields.
 104         */
 105        syn = syn_data_abort_with_iss(same_el,
 106                                      0, 0, 0, 0, 0,
 107                                      ea, 0, s1ptw, is_write, fsc,
 108                                      false);
 109        /* Merge the runtime syndrome with the template syndrome.  */
 110        syn |= template_syn;
 111    }
 112    return syn;
 113}
 114
 115static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
 116                          int mmu_idx, ARMMMUFaultInfo *fi)
 117{
 118    CPUARMState *env = &cpu->env;
 119    int target_el;
 120    bool same_el;
 121    uint32_t syn, exc, fsr, fsc;
 122    ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
 123
 124    target_el = exception_target_el(env);
 125    if (fi->stage2) {
 126        target_el = 2;
 127        env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
 128    }
 129    same_el = (arm_current_el(env) == target_el);
 130
 131    if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
 132        arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
 133        /* LPAE format fault status register : bottom 6 bits are
 134         * status code in the same form as needed for syndrome
 135         */
 136        fsr = arm_fi_to_lfsc(fi);
 137        fsc = extract32(fsr, 0, 6);
 138    } else {
 139        fsr = arm_fi_to_sfsc(fi);
 140        /* Short format FSR : this fault will never actually be reported
 141         * to an EL that uses a syndrome register. Use a (currently)
 142         * reserved FSR code in case the constructed syndrome does leak
 143         * into the guest somehow.
 144         */
 145        fsc = 0x3f;
 146    }
 147
 148    if (access_type == MMU_INST_FETCH) {
 149        syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
 150        exc = EXCP_PREFETCH_ABORT;
 151    } else {
 152        syn = merge_syn_data_abort(env->exception.syndrome, target_el,
 153                                   same_el, fi->ea, fi->s1ptw,
 154                                   access_type == MMU_DATA_STORE,
 155                                   fsc);
 156        if (access_type == MMU_DATA_STORE
 157            && arm_feature(env, ARM_FEATURE_V6)) {
 158            fsr |= (1 << 11);
 159        }
 160        exc = EXCP_DATA_ABORT;
 161    }
 162
 163    env->exception.vaddress = addr;
 164    env->exception.fsr = fsr;
 165    raise_exception(env, exc, syn, target_el);
 166}
 167
 168/* try to fill the TLB and return an exception if error. If retaddr is
 169 * NULL, it means that the function was called in C code (i.e. not
 170 * from generated code or from helper.c)
 171 */
 172void tlb_fill(CPUState *cs, target_ulong addr, int size,
 173              MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
 174{
 175    bool ret;
 176    ARMMMUFaultInfo fi = {};
 177
 178    ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
 179    if (unlikely(ret)) {
 180        ARMCPU *cpu = ARM_CPU(cs);
 181
 182        /* now we have a real cpu fault */
 183        cpu_restore_state(cs, retaddr, true);
 184
 185        deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 186    }
 187}
 188
 189/* Raise a data fault alignment exception for the specified virtual address */
 190void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
 191                                 MMUAccessType access_type,
 192                                 int mmu_idx, uintptr_t retaddr)
 193{
 194    ARMCPU *cpu = ARM_CPU(cs);
 195    ARMMMUFaultInfo fi = {};
 196
 197    /* now we have a real cpu fault */
 198    cpu_restore_state(cs, retaddr, true);
 199
 200    fi.type = ARMFault_Alignment;
 201    deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
 202}
 203
 204/* arm_cpu_do_transaction_failed: handle a memory system error response
 205 * (eg "no device/memory present at address") by raising an external abort
 206 * exception
 207 */
 208void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
 209                                   vaddr addr, unsigned size,
 210                                   MMUAccessType access_type,
 211                                   int mmu_idx, MemTxAttrs attrs,
 212                                   MemTxResult response, uintptr_t retaddr)
 213{
 214    ARMCPU *cpu = ARM_CPU(cs);
 215    ARMMMUFaultInfo fi = {};
 216
 217    /* now we have a real cpu fault */
 218    cpu_restore_state(cs, retaddr, true);
 219
 220    fi.ea = arm_extabort_type(response);
 221    fi.type = ARMFault_SyncExternal;
 222    deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 223}
 224
 225#endif /* !defined(CONFIG_USER_ONLY) */
 226
 227uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 228{
 229    uint32_t res = a + b;
 230    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 231        env->QF = 1;
 232    return res;
 233}
 234
 235uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 236{
 237    uint32_t res = a + b;
 238    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 239        env->QF = 1;
 240        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 241    }
 242    return res;
 243}
 244
 245uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 246{
 247    uint32_t res = a - b;
 248    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 249        env->QF = 1;
 250        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 251    }
 252    return res;
 253}
 254
 255uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
 256{
 257    uint32_t res;
 258    if (val >= 0x40000000) {
 259        res = ~SIGNBIT;
 260        env->QF = 1;
 261    } else if (val <= (int32_t)0xc0000000) {
 262        res = SIGNBIT;
 263        env->QF = 1;
 264    } else {
 265        res = val << 1;
 266    }
 267    return res;
 268}
 269
 270uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 271{
 272    uint32_t res = a + b;
 273    if (res < a) {
 274        env->QF = 1;
 275        res = ~0;
 276    }
 277    return res;
 278}
 279
 280uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 281{
 282    uint32_t res = a - b;
 283    if (res > a) {
 284        env->QF = 1;
 285        res = 0;
 286    }
 287    return res;
 288}
 289
 290/* Signed saturation.  */
 291static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 292{
 293    int32_t top;
 294    uint32_t mask;
 295
 296    top = val >> shift;
 297    mask = (1u << shift) - 1;
 298    if (top > 0) {
 299        env->QF = 1;
 300        return mask;
 301    } else if (top < -1) {
 302        env->QF = 1;
 303        return ~mask;
 304    }
 305    return val;
 306}
 307
 308/* Unsigned saturation.  */
 309static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 310{
 311    uint32_t max;
 312
 313    max = (1u << shift) - 1;
 314    if (val < 0) {
 315        env->QF = 1;
 316        return 0;
 317    } else if (val > max) {
 318        env->QF = 1;
 319        return max;
 320    }
 321    return val;
 322}
 323
 324/* Signed saturate.  */
 325uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 326{
 327    return do_ssat(env, x, shift);
 328}
 329
 330/* Dual halfword signed saturate.  */
 331uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 332{
 333    uint32_t res;
 334
 335    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 336    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 337    return res;
 338}
 339
 340/* Unsigned saturate.  */
 341uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 342{
 343    return do_usat(env, x, shift);
 344}
 345
 346/* Dual halfword unsigned saturate.  */
 347uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 348{
 349    uint32_t res;
 350
 351    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 352    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 353    return res;
 354}
 355
 356void HELPER(setend)(CPUARMState *env)
 357{
 358    env->uncached_cpsr ^= CPSR_E;
 359}
 360
 361/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 362 * The function returns the target EL (1-3) if the instruction is to be trapped;
 363 * otherwise it returns 0 indicating it is not trapped.
 364 */
 365static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 366{
 367    int cur_el = arm_current_el(env);
 368    uint64_t mask;
 369
 370    if (arm_feature(env, ARM_FEATURE_M)) {
 371        /* M profile cores can never trap WFI/WFE. */
 372        return 0;
 373    }
 374
 375    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 376     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 377     */
 378    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 379        int target_el;
 380
 381        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 382        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 383            /* Secure EL0 and Secure PL1 is at EL3 */
 384            target_el = 3;
 385        } else {
 386            target_el = 1;
 387        }
 388
 389        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 390            return target_el;
 391        }
 392    }
 393
 394    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 395     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 396     * bits will be zero indicating no trap.
 397     */
 398    if (cur_el < 2 && !arm_is_secure(env)) {
 399        mask = (is_wfe) ? HCR_TWE : HCR_TWI;
 400        if (env->cp15.hcr_el2 & mask) {
 401            return 2;
 402        }
 403    }
 404
 405    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 406    if (cur_el < 3) {
 407        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 408        if (env->cp15.scr_el3 & mask) {
 409            return 3;
 410        }
 411    }
 412
 413    return 0;
 414}
 415
 416void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
 417{
 418    CPUState *cs = CPU(arm_env_get_cpu(env));
 419    int target_el = check_wfx_trap(env, false);
 420
 421    if (cpu_has_work(cs)) {
 422        /* Don't bother to go into our "low power state" if
 423         * we would just wake up immediately.
 424         */
 425        return;
 426    }
 427
 428    if (target_el) {
 429        env->pc -= insn_len;
 430        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
 431                        target_el);
 432    }
 433
 434    cs->exception_index = EXCP_HLT;
 435    cs->halted = 1;
 436    cpu_loop_exit(cs);
 437}
 438
 439void HELPER(wfe)(CPUARMState *env)
 440{
 441    /* This is a hint instruction that is semantically different
 442     * from YIELD even though we currently implement it identically.
 443     * Don't actually halt the CPU, just yield back to top
 444     * level loop. This is not going into a "low power state"
 445     * (ie halting until some event occurs), so we never take
 446     * a configurable trap to a different exception level.
 447     */
 448    HELPER(yield)(env);
 449}
 450
 451void HELPER(yield)(CPUARMState *env)
 452{
 453    ARMCPU *cpu = arm_env_get_cpu(env);
 454    CPUState *cs = CPU(cpu);
 455
 456    /* This is a non-trappable hint instruction that generally indicates
 457     * that the guest is currently busy-looping. Yield control back to the
 458     * top level loop so that a more deserving VCPU has a chance to run.
 459     */
 460    cs->exception_index = EXCP_YIELD;
 461    cpu_loop_exit(cs);
 462}
 463
 464/* Raise an internal-to-QEMU exception. This is limited to only
 465 * those EXCP values which are special cases for QEMU to interrupt
 466 * execution and not to be used for exceptions which are passed to
 467 * the guest (those must all have syndrome information and thus should
 468 * use exception_with_syndrome).
 469 */
 470void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 471{
 472    CPUState *cs = CPU(arm_env_get_cpu(env));
 473
 474    assert(excp_is_internal(excp));
 475    cs->exception_index = excp;
 476    cpu_loop_exit(cs);
 477}
 478
 479/* Raise an exception with the specified syndrome register value */
 480void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 481                                     uint32_t syndrome, uint32_t target_el)
 482{
 483    raise_exception(env, excp, syndrome, target_el);
 484}
 485
 486/* Raise an EXCP_BKPT with the specified syndrome register value,
 487 * targeting the correct exception level for debug exceptions.
 488 */
 489void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
 490{
 491    /* FSR will only be used if the debug target EL is AArch32. */
 492    env->exception.fsr = arm_debug_exception_fsr(env);
 493    /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
 494     * values to the guest that it shouldn't be able to see at its
 495     * exception/security level.
 496     */
 497    env->exception.vaddress = 0;
 498    raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
 499}
 500
 501uint32_t HELPER(cpsr_read)(CPUARMState *env)
 502{
 503    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
 504}
 505
 506void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 507{
 508    cpsr_write(env, val, mask, CPSRWriteByInstr);
 509}
 510
 511/* Write the CPSR for a 32-bit exception return */
 512void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 513{
 514    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
 515
 516    /* Generated code has already stored the new PC value, but
 517     * without masking out its low bits, because which bits need
 518     * masking depends on whether we're returning to Thumb or ARM
 519     * state. Do the masking now.
 520     */
 521    env->regs[15] &= (env->thumb ? ~1 : ~3);
 522
 523    qemu_mutex_lock_iothread();
 524    arm_call_el_change_hook(arm_env_get_cpu(env));
 525    qemu_mutex_unlock_iothread();
 526}
 527
 528/* Access to user mode registers from privileged modes.  */
 529uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 530{
 531    uint32_t val;
 532
 533    if (regno == 13) {
 534        val = env->banked_r13[BANK_USRSYS];
 535    } else if (regno == 14) {
 536        val = env->banked_r14[BANK_USRSYS];
 537    } else if (regno >= 8
 538               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 539        val = env->usr_regs[regno - 8];
 540    } else {
 541        val = env->regs[regno];
 542    }
 543    return val;
 544}
 545
 546void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 547{
 548    if (regno == 13) {
 549        env->banked_r13[BANK_USRSYS] = val;
 550    } else if (regno == 14) {
 551        env->banked_r14[BANK_USRSYS] = val;
 552    } else if (regno >= 8
 553               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 554        env->usr_regs[regno - 8] = val;
 555    } else {
 556        env->regs[regno] = val;
 557    }
 558}
 559
 560void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 561{
 562    if ((env->uncached_cpsr & CPSR_M) == mode) {
 563        env->regs[13] = val;
 564    } else {
 565        env->banked_r13[bank_number(mode)] = val;
 566    }
 567}
 568
 569uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 570{
 571    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 572        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 573         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 574         */
 575        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 576                        exception_target_el(env));
 577    }
 578
 579    if ((env->uncached_cpsr & CPSR_M) == mode) {
 580        return env->regs[13];
 581    } else {
 582        return env->banked_r13[bank_number(mode)];
 583    }
 584}
 585
 586static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 587                                      uint32_t regno)
 588{
 589    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 590     * cases; otherwise return. This broadly corresponds to the pseudocode
 591     * BankedRegisterAccessValid() and SPSRAccessValid(),
 592     * except that we have already handled some cases at translate time.
 593     */
 594    int curmode = env->uncached_cpsr & CPSR_M;
 595
 596    if (curmode == tgtmode) {
 597        goto undef;
 598    }
 599
 600    if (tgtmode == ARM_CPU_MODE_USR) {
 601        switch (regno) {
 602        case 8 ... 12:
 603            if (curmode != ARM_CPU_MODE_FIQ) {
 604                goto undef;
 605            }
 606            break;
 607        case 13:
 608            if (curmode == ARM_CPU_MODE_SYS) {
 609                goto undef;
 610            }
 611            break;
 612        case 14:
 613            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 614                goto undef;
 615            }
 616            break;
 617        default:
 618            break;
 619        }
 620    }
 621
 622    if (tgtmode == ARM_CPU_MODE_HYP) {
 623        switch (regno) {
 624        case 17: /* ELR_Hyp */
 625            if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 626                goto undef;
 627            }
 628            break;
 629        default:
 630            if (curmode != ARM_CPU_MODE_MON) {
 631                goto undef;
 632            }
 633            break;
 634        }
 635    }
 636
 637    return;
 638
 639undef:
 640    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 641                    exception_target_el(env));
 642}
 643
 644void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 645                        uint32_t regno)
 646{
 647    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 648
 649    switch (regno) {
 650    case 16: /* SPSRs */
 651        env->banked_spsr[bank_number(tgtmode)] = value;
 652        break;
 653    case 17: /* ELR_Hyp */
 654        env->elr_el[2] = value;
 655        break;
 656    case 13:
 657        env->banked_r13[bank_number(tgtmode)] = value;
 658        break;
 659    case 14:
 660        env->banked_r14[bank_number(tgtmode)] = value;
 661        break;
 662    case 8 ... 12:
 663        switch (tgtmode) {
 664        case ARM_CPU_MODE_USR:
 665            env->usr_regs[regno - 8] = value;
 666            break;
 667        case ARM_CPU_MODE_FIQ:
 668            env->fiq_regs[regno - 8] = value;
 669            break;
 670        default:
 671            g_assert_not_reached();
 672        }
 673        break;
 674    default:
 675        g_assert_not_reached();
 676    }
 677}
 678
 679uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 680{
 681    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 682
 683    switch (regno) {
 684    case 16: /* SPSRs */
 685        return env->banked_spsr[bank_number(tgtmode)];
 686    case 17: /* ELR_Hyp */
 687        return env->elr_el[2];
 688    case 13:
 689        return env->banked_r13[bank_number(tgtmode)];
 690    case 14:
 691        return env->banked_r14[bank_number(tgtmode)];
 692    case 8 ... 12:
 693        switch (tgtmode) {
 694        case ARM_CPU_MODE_USR:
 695            return env->usr_regs[regno - 8];
 696        case ARM_CPU_MODE_FIQ:
 697            return env->fiq_regs[regno - 8];
 698        default:
 699            g_assert_not_reached();
 700        }
 701    default:
 702        g_assert_not_reached();
 703    }
 704}
 705
 706void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
 707                                 uint32_t isread)
 708{
 709    const ARMCPRegInfo *ri = rip;
 710    int target_el;
 711
 712    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 713        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 714        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 715    }
 716
 717    if (!ri->accessfn) {
 718        return;
 719    }
 720
 721    switch (ri->accessfn(env, ri, isread)) {
 722    case CP_ACCESS_OK:
 723        return;
 724    case CP_ACCESS_TRAP:
 725        target_el = exception_target_el(env);
 726        break;
 727    case CP_ACCESS_TRAP_EL2:
 728        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
 729         * a bug in the access function.
 730         */
 731        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
 732        target_el = 2;
 733        break;
 734    case CP_ACCESS_TRAP_EL3:
 735        target_el = 3;
 736        break;
 737    case CP_ACCESS_TRAP_UNCATEGORIZED:
 738        target_el = exception_target_el(env);
 739        syndrome = syn_uncategorized();
 740        break;
 741    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
 742        target_el = 2;
 743        syndrome = syn_uncategorized();
 744        break;
 745    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
 746        target_el = 3;
 747        syndrome = syn_uncategorized();
 748        break;
 749    case CP_ACCESS_TRAP_FP_EL2:
 750        target_el = 2;
 751        /* Since we are an implementation that takes exceptions on a trapped
 752         * conditional insn only if the insn has passed its condition code
 753         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
 754         * (which is also the required value for AArch64 traps).
 755         */
 756        syndrome = syn_fp_access_trap(1, 0xe, false);
 757        break;
 758    case CP_ACCESS_TRAP_FP_EL3:
 759        target_el = 3;
 760        syndrome = syn_fp_access_trap(1, 0xe, false);
 761        break;
 762    default:
 763        g_assert_not_reached();
 764    }
 765
 766    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 767}
 768
 769void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 770{
 771    const ARMCPRegInfo *ri = rip;
 772
 773    if (ri->type & ARM_CP_IO) {
 774        qemu_mutex_lock_iothread();
 775        ri->writefn(env, ri, value);
 776        qemu_mutex_unlock_iothread();
 777    } else {
 778        ri->writefn(env, ri, value);
 779    }
 780}
 781
 782uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 783{
 784    const ARMCPRegInfo *ri = rip;
 785    uint32_t res;
 786
 787    if (ri->type & ARM_CP_IO) {
 788        qemu_mutex_lock_iothread();
 789        res = ri->readfn(env, ri);
 790        qemu_mutex_unlock_iothread();
 791    } else {
 792        res = ri->readfn(env, ri);
 793    }
 794
 795    return res;
 796}
 797
 798void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 799{
 800    const ARMCPRegInfo *ri = rip;
 801
 802    if (ri->type & ARM_CP_IO) {
 803        qemu_mutex_lock_iothread();
 804        ri->writefn(env, ri, value);
 805        qemu_mutex_unlock_iothread();
 806    } else {
 807        ri->writefn(env, ri, value);
 808    }
 809}
 810
 811uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 812{
 813    const ARMCPRegInfo *ri = rip;
 814    uint64_t res;
 815
 816    if (ri->type & ARM_CP_IO) {
 817        qemu_mutex_lock_iothread();
 818        res = ri->readfn(env, ri);
 819        qemu_mutex_unlock_iothread();
 820    } else {
 821        res = ri->readfn(env, ri);
 822    }
 823
 824    return res;
 825}
 826
 827void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
 828{
 829    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
 830     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
 831     * to catch that case at translate time.
 832     */
 833    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
 834        uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
 835                                                extract32(op, 3, 3), 4,
 836                                                imm, 0x1f, 0);
 837        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 838    }
 839
 840    switch (op) {
 841    case 0x05: /* SPSel */
 842        update_spsel(env, imm);
 843        break;
 844    case 0x1e: /* DAIFSet */
 845        env->daif |= (imm << 6) & PSTATE_DAIF;
 846        break;
 847    case 0x1f: /* DAIFClear */
 848        env->daif &= ~((imm << 6) & PSTATE_DAIF);
 849        break;
 850    default:
 851        g_assert_not_reached();
 852    }
 853}
 854
 855void HELPER(clear_pstate_ss)(CPUARMState *env)
 856{
 857    env->pstate &= ~PSTATE_SS;
 858}
 859
 860void HELPER(pre_hvc)(CPUARMState *env)
 861{
 862    ARMCPU *cpu = arm_env_get_cpu(env);
 863    int cur_el = arm_current_el(env);
 864    /* FIXME: Use actual secure state.  */
 865    bool secure = false;
 866    bool undef;
 867
 868    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 869        /* If PSCI is enabled and this looks like a valid PSCI call then
 870         * that overrides the architecturally mandated HVC behaviour.
 871         */
 872        return;
 873    }
 874
 875    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 876        /* If EL2 doesn't exist, HVC always UNDEFs */
 877        undef = true;
 878    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 879        /* EL3.HCE has priority over EL2.HCD. */
 880        undef = !(env->cp15.scr_el3 & SCR_HCE);
 881    } else {
 882        undef = env->cp15.hcr_el2 & HCR_HCD;
 883    }
 884
 885    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 886     * For ARMv8/AArch64, HVC is allowed in EL3.
 887     * Note that we've already trapped HVC from EL0 at translation
 888     * time.
 889     */
 890    if (secure && (!is_a64(env) || cur_el == 1)) {
 891        undef = true;
 892    }
 893
 894    if (undef) {
 895        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 896                        exception_target_el(env));
 897    }
 898}
 899
 900void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 901{
 902    ARMCPU *cpu = arm_env_get_cpu(env);
 903    int cur_el = arm_current_el(env);
 904    bool secure = arm_is_secure(env);
 905    bool smd = env->cp15.scr_el3 & SCR_SMD;
 906    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 907     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 908     *  extensions, SMD only applies to NS state.
 909     * On ARMv7 without the Virtualization extensions, the SMD bit
 910     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 911     * so we need not special case this here.
 912     */
 913    bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
 914
 915    if (!arm_feature(env, ARM_FEATURE_EL3) &&
 916        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
 917        /* If we have no EL3 then SMC always UNDEFs and can't be
 918         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
 919         * firmware within QEMU, and we want an EL2 guest to be able
 920         * to forbid its EL1 from making PSCI calls into QEMU's
 921         * "firmware" via HCR.TSC, so for these purposes treat
 922         * PSCI-via-SMC as implying an EL3.
 923         */
 924        undef = true;
 925    } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
 926        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
 927         * We also want an EL2 guest to be able to forbid its EL1 from
 928         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
 929         */
 930        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
 931    }
 932
 933    /* If PSCI is enabled and this looks like a valid PSCI call then
 934     * suppress the UNDEF -- we'll catch the SMC exception and
 935     * implement the PSCI call behaviour there.
 936     */
 937    if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
 938        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 939                        exception_target_el(env));
 940    }
 941}
 942
 943static int el_from_spsr(uint32_t spsr)
 944{
 945    /* Return the exception level that this SPSR is requesting a return to,
 946     * or -1 if it is invalid (an illegal return)
 947     */
 948    if (spsr & PSTATE_nRW) {
 949        switch (spsr & CPSR_M) {
 950        case ARM_CPU_MODE_USR:
 951            return 0;
 952        case ARM_CPU_MODE_HYP:
 953            return 2;
 954        case ARM_CPU_MODE_FIQ:
 955        case ARM_CPU_MODE_IRQ:
 956        case ARM_CPU_MODE_SVC:
 957        case ARM_CPU_MODE_ABT:
 958        case ARM_CPU_MODE_UND:
 959        case ARM_CPU_MODE_SYS:
 960            return 1;
 961        case ARM_CPU_MODE_MON:
 962            /* Returning to Mon from AArch64 is never possible,
 963             * so this is an illegal return.
 964             */
 965        default:
 966            return -1;
 967        }
 968    } else {
 969        if (extract32(spsr, 1, 1)) {
 970            /* Return with reserved M[1] bit set */
 971            return -1;
 972        }
 973        if (extract32(spsr, 0, 4) == 1) {
 974            /* return to EL0 with M[0] bit set */
 975            return -1;
 976        }
 977        return extract32(spsr, 2, 2);
 978    }
 979}
 980
 981void HELPER(exception_return)(CPUARMState *env)
 982{
 983    int cur_el = arm_current_el(env);
 984    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
 985    uint32_t spsr = env->banked_spsr[spsr_idx];
 986    int new_el;
 987    bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
 988
 989    aarch64_save_sp(env, cur_el);
 990
 991    arm_clear_exclusive(env);
 992
 993    /* We must squash the PSTATE.SS bit to zero unless both of the
 994     * following hold:
 995     *  1. debug exceptions are currently disabled
 996     *  2. singlestep will be active in the EL we return to
 997     * We check 1 here and 2 after we've done the pstate/cpsr write() to
 998     * transition to the EL we're going to.
 999     */
1000    if (arm_generate_debug_exceptions(env)) {
1001        spsr &= ~PSTATE_SS;
1002    }
1003
1004    new_el = el_from_spsr(spsr);
1005    if (new_el == -1) {
1006        goto illegal_return;
1007    }
1008    if (new_el > cur_el
1009        || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1010        /* Disallow return to an EL which is unimplemented or higher
1011         * than the current one.
1012         */
1013        goto illegal_return;
1014    }
1015
1016    if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1017        /* Return to an EL which is configured for a different register width */
1018        goto illegal_return;
1019    }
1020
1021    if (new_el == 2 && arm_is_secure_below_el3(env)) {
1022        /* Return to the non-existent secure-EL2 */
1023        goto illegal_return;
1024    }
1025
1026    if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1027        && !arm_is_secure_below_el3(env)) {
1028        goto illegal_return;
1029    }
1030
1031    if (!return_to_aa64) {
1032        env->aarch64 = 0;
1033        /* We do a raw CPSR write because aarch64_sync_64_to_32()
1034         * will sort the register banks out for us, and we've already
1035         * caught all the bad-mode cases in el_from_spsr().
1036         */
1037        cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1038        if (!arm_singlestep_active(env)) {
1039            env->uncached_cpsr &= ~PSTATE_SS;
1040        }
1041        aarch64_sync_64_to_32(env);
1042
1043        if (spsr & CPSR_T) {
1044            env->regs[15] = env->elr_el[cur_el] & ~0x1;
1045        } else {
1046            env->regs[15] = env->elr_el[cur_el] & ~0x3;
1047        }
1048        qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1049                      "AArch32 EL%d PC 0x%" PRIx32 "\n",
1050                      cur_el, new_el, env->regs[15]);
1051    } else {
1052        env->aarch64 = 1;
1053        pstate_write(env, spsr);
1054        if (!arm_singlestep_active(env)) {
1055            env->pstate &= ~PSTATE_SS;
1056        }
1057        aarch64_restore_sp(env, new_el);
1058        env->pc = env->elr_el[cur_el];
1059        qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1060                      "AArch64 EL%d PC 0x%" PRIx64 "\n",
1061                      cur_el, new_el, env->pc);
1062    }
1063
1064    qemu_mutex_lock_iothread();
1065    arm_call_el_change_hook(arm_env_get_cpu(env));
1066    qemu_mutex_unlock_iothread();
1067
1068    return;
1069
1070illegal_return:
1071    /* Illegal return events of various kinds have architecturally
1072     * mandated behaviour:
1073     * restore NZCV and DAIF from SPSR_ELx
1074     * set PSTATE.IL
1075     * restore PC from ELR_ELx
1076     * no change to exception level, execution state or stack pointer
1077     */
1078    env->pstate |= PSTATE_IL;
1079    env->pc = env->elr_el[cur_el];
1080    spsr &= PSTATE_NZCV | PSTATE_DAIF;
1081    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1082    pstate_write(env, spsr);
1083    if (!arm_singlestep_active(env)) {
1084        env->pstate &= ~PSTATE_SS;
1085    }
1086    qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1087                  "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
1088}
1089
1090/* Return true if the linked breakpoint entry lbn passes its checks */
1091static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1092{
1093    CPUARMState *env = &cpu->env;
1094    uint64_t bcr = env->cp15.dbgbcr[lbn];
1095    int brps = extract32(cpu->dbgdidr, 24, 4);
1096    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1097    int bt;
1098    uint32_t contextidr;
1099
1100    /* Links to unimplemented or non-context aware breakpoints are
1101     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1102     * as if linked to an UNKNOWN context-aware breakpoint (in which
1103     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1104     * We choose the former.
1105     */
1106    if (lbn > brps || lbn < (brps - ctx_cmps)) {
1107        return false;
1108    }
1109
1110    bcr = env->cp15.dbgbcr[lbn];
1111
1112    if (extract64(bcr, 0, 1) == 0) {
1113        /* Linked breakpoint disabled : generate no events */
1114        return false;
1115    }
1116
1117    bt = extract64(bcr, 20, 4);
1118
1119    /* We match the whole register even if this is AArch32 using the
1120     * short descriptor format (in which case it holds both PROCID and ASID),
1121     * since we don't implement the optional v7 context ID masking.
1122     */
1123    contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1124
1125    switch (bt) {
1126    case 3: /* linked context ID match */
1127        if (arm_current_el(env) > 1) {
1128            /* Context matches never fire in EL2 or (AArch64) EL3 */
1129            return false;
1130        }
1131        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1132    case 5: /* linked address mismatch (reserved in AArch64) */
1133    case 9: /* linked VMID match (reserved if no EL2) */
1134    case 11: /* linked context ID and VMID match (reserved if no EL2) */
1135    default:
1136        /* Links to Unlinked context breakpoints must generate no
1137         * events; we choose to do the same for reserved values too.
1138         */
1139        return false;
1140    }
1141
1142    return false;
1143}
1144
1145static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1146{
1147    CPUARMState *env = &cpu->env;
1148    uint64_t cr;
1149    int pac, hmc, ssc, wt, lbn;
1150    /* Note that for watchpoints the check is against the CPU security
1151     * state, not the S/NS attribute on the offending data access.
1152     */
1153    bool is_secure = arm_is_secure(env);
1154    int access_el = arm_current_el(env);
1155
1156    if (is_wp) {
1157        CPUWatchpoint *wp = env->cpu_watchpoint[n];
1158
1159        if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1160            return false;
1161        }
1162        cr = env->cp15.dbgwcr[n];
1163        if (wp->hitattrs.user) {
1164            /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1165             * match watchpoints as if they were accesses done at EL0, even if
1166             * the CPU is at EL1 or higher.
1167             */
1168            access_el = 0;
1169        }
1170    } else {
1171        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1172
1173        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1174            return false;
1175        }
1176        cr = env->cp15.dbgbcr[n];
1177    }
1178    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1179     * enabled and that the address and access type match; for breakpoints
1180     * we know the address matched; check the remaining fields, including
1181     * linked breakpoints. We rely on WCR and BCR having the same layout
1182     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1183     * Note that some combinations of {PAC, HMC, SSC} are reserved and
1184     * must act either like some valid combination or as if the watchpoint
1185     * were disabled. We choose the former, and use this together with
1186     * the fact that EL3 must always be Secure and EL2 must always be
1187     * Non-Secure to simplify the code slightly compared to the full
1188     * table in the ARM ARM.
1189     */
1190    pac = extract64(cr, 1, 2);
1191    hmc = extract64(cr, 13, 1);
1192    ssc = extract64(cr, 14, 2);
1193
1194    switch (ssc) {
1195    case 0:
1196        break;
1197    case 1:
1198    case 3:
1199        if (is_secure) {
1200            return false;
1201        }
1202        break;
1203    case 2:
1204        if (!is_secure) {
1205            return false;
1206        }
1207        break;
1208    }
1209
1210    switch (access_el) {
1211    case 3:
1212    case 2:
1213        if (!hmc) {
1214            return false;
1215        }
1216        break;
1217    case 1:
1218        if (extract32(pac, 0, 1) == 0) {
1219            return false;
1220        }
1221        break;
1222    case 0:
1223        if (extract32(pac, 1, 1) == 0) {
1224            return false;
1225        }
1226        break;
1227    default:
1228        g_assert_not_reached();
1229    }
1230
1231    wt = extract64(cr, 20, 1);
1232    lbn = extract64(cr, 16, 4);
1233
1234    if (wt && !linked_bp_matches(cpu, lbn)) {
1235        return false;
1236    }
1237
1238    return true;
1239}
1240
1241static bool check_watchpoints(ARMCPU *cpu)
1242{
1243    CPUARMState *env = &cpu->env;
1244    int n;
1245
1246    /* If watchpoints are disabled globally or we can't take debug
1247     * exceptions here then watchpoint firings are ignored.
1248     */
1249    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1250        || !arm_generate_debug_exceptions(env)) {
1251        return false;
1252    }
1253
1254    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1255        if (bp_wp_matches(cpu, n, true)) {
1256            return true;
1257        }
1258    }
1259    return false;
1260}
1261
1262static bool check_breakpoints(ARMCPU *cpu)
1263{
1264    CPUARMState *env = &cpu->env;
1265    int n;
1266
1267    /* If breakpoints are disabled globally or we can't take debug
1268     * exceptions here then breakpoint firings are ignored.
1269     */
1270    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1271        || !arm_generate_debug_exceptions(env)) {
1272        return false;
1273    }
1274
1275    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1276        if (bp_wp_matches(cpu, n, false)) {
1277            return true;
1278        }
1279    }
1280    return false;
1281}
1282
1283void HELPER(check_breakpoints)(CPUARMState *env)
1284{
1285    ARMCPU *cpu = arm_env_get_cpu(env);
1286
1287    if (check_breakpoints(cpu)) {
1288        HELPER(exception_internal(env, EXCP_DEBUG));
1289    }
1290}
1291
1292bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1293{
1294    /* Called by core code when a CPU watchpoint fires; need to check if this
1295     * is also an architectural watchpoint match.
1296     */
1297    ARMCPU *cpu = ARM_CPU(cs);
1298
1299    return check_watchpoints(cpu);
1300}
1301
1302vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1303{
1304    ARMCPU *cpu = ARM_CPU(cs);
1305    CPUARMState *env = &cpu->env;
1306
1307    /* In BE32 system mode, target memory is stored byteswapped (on a
1308     * little-endian host system), and by the time we reach here (via an
1309     * opcode helper) the addresses of subword accesses have been adjusted
1310     * to account for that, which means that watchpoints will not match.
1311     * Undo the adjustment here.
1312     */
1313    if (arm_sctlr_b(env)) {
1314        if (len == 1) {
1315            addr ^= 3;
1316        } else if (len == 2) {
1317            addr ^= 2;
1318        }
1319    }
1320
1321    return addr;
1322}
1323
1324void arm_debug_excp_handler(CPUState *cs)
1325{
1326    /* Called by core code when a watchpoint or breakpoint fires;
1327     * need to check which one and raise the appropriate exception.
1328     */
1329    ARMCPU *cpu = ARM_CPU(cs);
1330    CPUARMState *env = &cpu->env;
1331    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1332
1333    if (wp_hit) {
1334        if (wp_hit->flags & BP_CPU) {
1335            bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1336            bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1337
1338            cs->watchpoint_hit = NULL;
1339
1340            env->exception.fsr = arm_debug_exception_fsr(env);
1341            env->exception.vaddress = wp_hit->hitaddr;
1342            raise_exception(env, EXCP_DATA_ABORT,
1343                    syn_watchpoint(same_el, 0, wnr),
1344                    arm_debug_target_el(env));
1345        }
1346    } else {
1347        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1348        bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1349
1350        /* (1) GDB breakpoints should be handled first.
1351         * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1352         * since singlestep is also done by generating a debug internal
1353         * exception.
1354         */
1355        if (cpu_breakpoint_test(cs, pc, BP_GDB)
1356            || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1357            return;
1358        }
1359
1360        env->exception.fsr = arm_debug_exception_fsr(env);
1361        /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1362         * values to the guest that it shouldn't be able to see at its
1363         * exception/security level.
1364         */
1365        env->exception.vaddress = 0;
1366        raise_exception(env, EXCP_PREFETCH_ABORT,
1367                        syn_breakpoint(same_el),
1368                        arm_debug_target_el(env));
1369    }
1370}
1371
1372/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1373   The only way to do that in TCG is a conditional branch, which clobbers
1374   all our temporaries.  For now implement these as helper functions.  */
1375
1376/* Similarly for variable shift instructions.  */
1377
1378uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1379{
1380    int shift = i & 0xff;
1381    if (shift >= 32) {
1382        if (shift == 32)
1383            env->CF = x & 1;
1384        else
1385            env->CF = 0;
1386        return 0;
1387    } else if (shift != 0) {
1388        env->CF = (x >> (32 - shift)) & 1;
1389        return x << shift;
1390    }
1391    return x;
1392}
1393
1394uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1395{
1396    int shift = i & 0xff;
1397    if (shift >= 32) {
1398        if (shift == 32)
1399            env->CF = (x >> 31) & 1;
1400        else
1401            env->CF = 0;
1402        return 0;
1403    } else if (shift != 0) {
1404        env->CF = (x >> (shift - 1)) & 1;
1405        return x >> shift;
1406    }
1407    return x;
1408}
1409
1410uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1411{
1412    int shift = i & 0xff;
1413    if (shift >= 32) {
1414        env->CF = (x >> 31) & 1;
1415        return (int32_t)x >> 31;
1416    } else if (shift != 0) {
1417        env->CF = (x >> (shift - 1)) & 1;
1418        return (int32_t)x >> shift;
1419    }
1420    return x;
1421}
1422
1423uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1424{
1425    int shift1, shift;
1426    shift1 = i & 0xff;
1427    shift = shift1 & 0x1f;
1428    if (shift == 0) {
1429        if (shift1 != 0)
1430            env->CF = (x >> 31) & 1;
1431        return x;
1432    } else {
1433        env->CF = (x >> (shift - 1)) & 1;
1434        return ((uint32_t)x >> shift) | (x << (32 - shift));
1435    }
1436}
1437