qemu/target/arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/log.h"
  21#include "qemu/main-loop.h"
  22#include "cpu.h"
  23#include "exec/helper-proto.h"
  24#include "internals.h"
  25#include "exec/exec-all.h"
  26#include "exec/cpu_ldst.h"
  27
  28#define SIGNBIT (uint32_t)0x80000000
  29#define SIGNBIT64 ((uint64_t)1 << 63)
  30
  31static void raise_exception(CPUARMState *env, uint32_t excp,
  32                            uint32_t syndrome, uint32_t target_el)
  33{
  34    CPUState *cs = CPU(arm_env_get_cpu(env));
  35
  36    assert(!excp_is_internal(excp));
  37    cs->exception_index = excp;
  38    env->exception.syndrome = syndrome;
  39    env->exception.target_el = target_el;
  40    cpu_loop_exit(cs);
  41}
  42
  43static int exception_target_el(CPUARMState *env)
  44{
  45    int target_el = MAX(1, arm_current_el(env));
  46
  47    /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
  48     * to EL3 in this case.
  49     */
  50    if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
  51        target_el = 3;
  52    }
  53
  54    return target_el;
  55}
  56
  57uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
  58                          uint32_t maxindex)
  59{
  60    uint32_t val, shift;
  61    uint64_t *table = vn;
  62
  63    val = 0;
  64    for (shift = 0; shift < 32; shift += 8) {
  65        uint32_t index = (ireg >> shift) & 0xff;
  66        if (index < maxindex) {
  67            uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
  68            val |= tmp << shift;
  69        } else {
  70            val |= def & (0xff << shift);
  71        }
  72    }
  73    return val;
  74}
  75
  76#if !defined(CONFIG_USER_ONLY)
  77
  78static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
  79                                            unsigned int target_el,
  80                                            bool same_el, bool ea,
  81                                            bool s1ptw, bool is_write,
  82                                            int fsc)
  83{
  84    uint32_t syn;
  85
  86    /* ISV is only set for data aborts routed to EL2 and
  87     * never for stage-1 page table walks faulting on stage 2.
  88     *
  89     * Furthermore, ISV is only set for certain kinds of load/stores.
  90     * If the template syndrome does not have ISV set, we should leave
  91     * it cleared.
  92     *
  93     * See ARMv8 specs, D7-1974:
  94     * ISS encoding for an exception from a Data Abort, the
  95     * ISV field.
  96     */
  97    if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
  98        syn = syn_data_abort_no_iss(same_el,
  99                                    ea, 0, s1ptw, is_write, fsc);
 100    } else {
 101        /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
 102         * syndrome created at translation time.
 103         * Now we create the runtime syndrome with the remaining fields.
 104         */
 105        syn = syn_data_abort_with_iss(same_el,
 106                                      0, 0, 0, 0, 0,
 107                                      ea, 0, s1ptw, is_write, fsc,
 108                                      false);
 109        /* Merge the runtime syndrome with the template syndrome.  */
 110        syn |= template_syn;
 111    }
 112    return syn;
 113}
 114
 115static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
 116                          int mmu_idx, ARMMMUFaultInfo *fi)
 117{
 118    CPUARMState *env = &cpu->env;
 119    int target_el;
 120    bool same_el;
 121    uint32_t syn, exc, fsr, fsc;
 122    ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
 123
 124    target_el = exception_target_el(env);
 125    if (fi->stage2) {
 126        target_el = 2;
 127        env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
 128    }
 129    same_el = (arm_current_el(env) == target_el);
 130
 131    if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
 132        arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
 133        /* LPAE format fault status register : bottom 6 bits are
 134         * status code in the same form as needed for syndrome
 135         */
 136        fsr = arm_fi_to_lfsc(fi);
 137        fsc = extract32(fsr, 0, 6);
 138    } else {
 139        fsr = arm_fi_to_sfsc(fi);
 140        /* Short format FSR : this fault will never actually be reported
 141         * to an EL that uses a syndrome register. Use a (currently)
 142         * reserved FSR code in case the constructed syndrome does leak
 143         * into the guest somehow.
 144         */
 145        fsc = 0x3f;
 146    }
 147
 148    if (access_type == MMU_INST_FETCH) {
 149        syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
 150        exc = EXCP_PREFETCH_ABORT;
 151    } else {
 152        syn = merge_syn_data_abort(env->exception.syndrome, target_el,
 153                                   same_el, fi->ea, fi->s1ptw,
 154                                   access_type == MMU_DATA_STORE,
 155                                   fsc);
 156        if (access_type == MMU_DATA_STORE
 157            && arm_feature(env, ARM_FEATURE_V6)) {
 158            fsr |= (1 << 11);
 159        }
 160        exc = EXCP_DATA_ABORT;
 161    }
 162
 163    env->exception.vaddress = addr;
 164    env->exception.fsr = fsr;
 165    raise_exception(env, exc, syn, target_el);
 166}
 167
 168/* try to fill the TLB and return an exception if error. If retaddr is
 169 * NULL, it means that the function was called in C code (i.e. not
 170 * from generated code or from helper.c)
 171 */
 172void tlb_fill(CPUState *cs, target_ulong addr, int size,
 173              MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
 174{
 175    bool ret;
 176    ARMMMUFaultInfo fi = {};
 177
 178    ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
 179    if (unlikely(ret)) {
 180        ARMCPU *cpu = ARM_CPU(cs);
 181
 182        /* now we have a real cpu fault */
 183        cpu_restore_state(cs, retaddr, true);
 184
 185        deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 186    }
 187}
 188
 189/* Raise a data fault alignment exception for the specified virtual address */
 190void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
 191                                 MMUAccessType access_type,
 192                                 int mmu_idx, uintptr_t retaddr)
 193{
 194    ARMCPU *cpu = ARM_CPU(cs);
 195    ARMMMUFaultInfo fi = {};
 196
 197    /* now we have a real cpu fault */
 198    cpu_restore_state(cs, retaddr, true);
 199
 200    fi.type = ARMFault_Alignment;
 201    deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
 202}
 203
 204/* arm_cpu_do_transaction_failed: handle a memory system error response
 205 * (eg "no device/memory present at address") by raising an external abort
 206 * exception
 207 */
 208void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
 209                                   vaddr addr, unsigned size,
 210                                   MMUAccessType access_type,
 211                                   int mmu_idx, MemTxAttrs attrs,
 212                                   MemTxResult response, uintptr_t retaddr)
 213{
 214    ARMCPU *cpu = ARM_CPU(cs);
 215    ARMMMUFaultInfo fi = {};
 216
 217    /* now we have a real cpu fault */
 218    cpu_restore_state(cs, retaddr, true);
 219
 220    fi.ea = arm_extabort_type(response);
 221    fi.type = ARMFault_SyncExternal;
 222    deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 223}
 224
 225#endif /* !defined(CONFIG_USER_ONLY) */
 226
 227uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 228{
 229    uint32_t res = a + b;
 230    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 231        env->QF = 1;
 232    return res;
 233}
 234
 235uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 236{
 237    uint32_t res = a + b;
 238    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 239        env->QF = 1;
 240        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 241    }
 242    return res;
 243}
 244
 245uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 246{
 247    uint32_t res = a - b;
 248    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 249        env->QF = 1;
 250        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 251    }
 252    return res;
 253}
 254
 255uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
 256{
 257    uint32_t res;
 258    if (val >= 0x40000000) {
 259        res = ~SIGNBIT;
 260        env->QF = 1;
 261    } else if (val <= (int32_t)0xc0000000) {
 262        res = SIGNBIT;
 263        env->QF = 1;
 264    } else {
 265        res = val << 1;
 266    }
 267    return res;
 268}
 269
 270uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 271{
 272    uint32_t res = a + b;
 273    if (res < a) {
 274        env->QF = 1;
 275        res = ~0;
 276    }
 277    return res;
 278}
 279
 280uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 281{
 282    uint32_t res = a - b;
 283    if (res > a) {
 284        env->QF = 1;
 285        res = 0;
 286    }
 287    return res;
 288}
 289
 290/* Signed saturation.  */
 291static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 292{
 293    int32_t top;
 294    uint32_t mask;
 295
 296    top = val >> shift;
 297    mask = (1u << shift) - 1;
 298    if (top > 0) {
 299        env->QF = 1;
 300        return mask;
 301    } else if (top < -1) {
 302        env->QF = 1;
 303        return ~mask;
 304    }
 305    return val;
 306}
 307
 308/* Unsigned saturation.  */
 309static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 310{
 311    uint32_t max;
 312
 313    max = (1u << shift) - 1;
 314    if (val < 0) {
 315        env->QF = 1;
 316        return 0;
 317    } else if (val > max) {
 318        env->QF = 1;
 319        return max;
 320    }
 321    return val;
 322}
 323
 324/* Signed saturate.  */
 325uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 326{
 327    return do_ssat(env, x, shift);
 328}
 329
 330/* Dual halfword signed saturate.  */
 331uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 332{
 333    uint32_t res;
 334
 335    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 336    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 337    return res;
 338}
 339
 340/* Unsigned saturate.  */
 341uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 342{
 343    return do_usat(env, x, shift);
 344}
 345
 346/* Dual halfword unsigned saturate.  */
 347uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 348{
 349    uint32_t res;
 350
 351    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 352    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 353    return res;
 354}
 355
 356void HELPER(setend)(CPUARMState *env)
 357{
 358    env->uncached_cpsr ^= CPSR_E;
 359}
 360
 361/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 362 * The function returns the target EL (1-3) if the instruction is to be trapped;
 363 * otherwise it returns 0 indicating it is not trapped.
 364 */
 365static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 366{
 367    int cur_el = arm_current_el(env);
 368    uint64_t mask;
 369
 370    if (arm_feature(env, ARM_FEATURE_M)) {
 371        /* M profile cores can never trap WFI/WFE. */
 372        return 0;
 373    }
 374
 375    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 376     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 377     */
 378    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 379        int target_el;
 380
 381        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 382        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 383            /* Secure EL0 and Secure PL1 is at EL3 */
 384            target_el = 3;
 385        } else {
 386            target_el = 1;
 387        }
 388
 389        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 390            return target_el;
 391        }
 392    }
 393
 394    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 395     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 396     * bits will be zero indicating no trap.
 397     */
 398    if (cur_el < 2 && !arm_is_secure(env)) {
 399        mask = (is_wfe) ? HCR_TWE : HCR_TWI;
 400        if (env->cp15.hcr_el2 & mask) {
 401            return 2;
 402        }
 403    }
 404
 405    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 406    if (cur_el < 3) {
 407        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 408        if (env->cp15.scr_el3 & mask) {
 409            return 3;
 410        }
 411    }
 412
 413    return 0;
 414}
 415
 416void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
 417{
 418    CPUState *cs = CPU(arm_env_get_cpu(env));
 419    int target_el = check_wfx_trap(env, false);
 420
 421    if (cpu_has_work(cs)) {
 422        /* Don't bother to go into our "low power state" if
 423         * we would just wake up immediately.
 424         */
 425        return;
 426    }
 427
 428    if (target_el) {
 429        env->pc -= insn_len;
 430        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
 431                        target_el);
 432    }
 433
 434    cs->exception_index = EXCP_HLT;
 435    cs->halted = 1;
 436    cpu_loop_exit(cs);
 437}
 438
 439void HELPER(wfe)(CPUARMState *env)
 440{
 441    /* This is a hint instruction that is semantically different
 442     * from YIELD even though we currently implement it identically.
 443     * Don't actually halt the CPU, just yield back to top
 444     * level loop. This is not going into a "low power state"
 445     * (ie halting until some event occurs), so we never take
 446     * a configurable trap to a different exception level.
 447     */
 448    HELPER(yield)(env);
 449}
 450
 451void HELPER(yield)(CPUARMState *env)
 452{
 453    ARMCPU *cpu = arm_env_get_cpu(env);
 454    CPUState *cs = CPU(cpu);
 455
 456    /* This is a non-trappable hint instruction that generally indicates
 457     * that the guest is currently busy-looping. Yield control back to the
 458     * top level loop so that a more deserving VCPU has a chance to run.
 459     */
 460    cs->exception_index = EXCP_YIELD;
 461    cpu_loop_exit(cs);
 462}
 463
 464/* Raise an internal-to-QEMU exception. This is limited to only
 465 * those EXCP values which are special cases for QEMU to interrupt
 466 * execution and not to be used for exceptions which are passed to
 467 * the guest (those must all have syndrome information and thus should
 468 * use exception_with_syndrome).
 469 */
 470void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 471{
 472    CPUState *cs = CPU(arm_env_get_cpu(env));
 473
 474    assert(excp_is_internal(excp));
 475    cs->exception_index = excp;
 476    cpu_loop_exit(cs);
 477}
 478
 479/* Raise an exception with the specified syndrome register value */
 480void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 481                                     uint32_t syndrome, uint32_t target_el)
 482{
 483    raise_exception(env, excp, syndrome, target_el);
 484}
 485
 486/* Raise an EXCP_BKPT with the specified syndrome register value,
 487 * targeting the correct exception level for debug exceptions.
 488 */
 489void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
 490{
 491    /* FSR will only be used if the debug target EL is AArch32. */
 492    env->exception.fsr = arm_debug_exception_fsr(env);
 493    /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
 494     * values to the guest that it shouldn't be able to see at its
 495     * exception/security level.
 496     */
 497    env->exception.vaddress = 0;
 498    raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
 499}
 500
 501uint32_t HELPER(cpsr_read)(CPUARMState *env)
 502{
 503    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
 504}
 505
 506void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 507{
 508    cpsr_write(env, val, mask, CPSRWriteByInstr);
 509}
 510
 511/* Write the CPSR for a 32-bit exception return */
 512void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 513{
 514    qemu_mutex_lock_iothread();
 515    arm_call_pre_el_change_hook(arm_env_get_cpu(env));
 516    qemu_mutex_unlock_iothread();
 517
 518    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
 519
 520    /* Generated code has already stored the new PC value, but
 521     * without masking out its low bits, because which bits need
 522     * masking depends on whether we're returning to Thumb or ARM
 523     * state. Do the masking now.
 524     */
 525    env->regs[15] &= (env->thumb ? ~1 : ~3);
 526
 527    qemu_mutex_lock_iothread();
 528    arm_call_el_change_hook(arm_env_get_cpu(env));
 529    qemu_mutex_unlock_iothread();
 530}
 531
 532/* Access to user mode registers from privileged modes.  */
 533uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 534{
 535    uint32_t val;
 536
 537    if (regno == 13) {
 538        val = env->banked_r13[BANK_USRSYS];
 539    } else if (regno == 14) {
 540        val = env->banked_r14[BANK_USRSYS];
 541    } else if (regno >= 8
 542               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 543        val = env->usr_regs[regno - 8];
 544    } else {
 545        val = env->regs[regno];
 546    }
 547    return val;
 548}
 549
 550void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 551{
 552    if (regno == 13) {
 553        env->banked_r13[BANK_USRSYS] = val;
 554    } else if (regno == 14) {
 555        env->banked_r14[BANK_USRSYS] = val;
 556    } else if (regno >= 8
 557               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 558        env->usr_regs[regno - 8] = val;
 559    } else {
 560        env->regs[regno] = val;
 561    }
 562}
 563
 564void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 565{
 566    if ((env->uncached_cpsr & CPSR_M) == mode) {
 567        env->regs[13] = val;
 568    } else {
 569        env->banked_r13[bank_number(mode)] = val;
 570    }
 571}
 572
 573uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 574{
 575    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 576        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 577         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 578         */
 579        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 580                        exception_target_el(env));
 581    }
 582
 583    if ((env->uncached_cpsr & CPSR_M) == mode) {
 584        return env->regs[13];
 585    } else {
 586        return env->banked_r13[bank_number(mode)];
 587    }
 588}
 589
 590static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 591                                      uint32_t regno)
 592{
 593    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 594     * cases; otherwise return. This broadly corresponds to the pseudocode
 595     * BankedRegisterAccessValid() and SPSRAccessValid(),
 596     * except that we have already handled some cases at translate time.
 597     */
 598    int curmode = env->uncached_cpsr & CPSR_M;
 599
 600    if (curmode == tgtmode) {
 601        goto undef;
 602    }
 603
 604    if (tgtmode == ARM_CPU_MODE_USR) {
 605        switch (regno) {
 606        case 8 ... 12:
 607            if (curmode != ARM_CPU_MODE_FIQ) {
 608                goto undef;
 609            }
 610            break;
 611        case 13:
 612            if (curmode == ARM_CPU_MODE_SYS) {
 613                goto undef;
 614            }
 615            break;
 616        case 14:
 617            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 618                goto undef;
 619            }
 620            break;
 621        default:
 622            break;
 623        }
 624    }
 625
 626    if (tgtmode == ARM_CPU_MODE_HYP) {
 627        switch (regno) {
 628        case 17: /* ELR_Hyp */
 629            if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 630                goto undef;
 631            }
 632            break;
 633        default:
 634            if (curmode != ARM_CPU_MODE_MON) {
 635                goto undef;
 636            }
 637            break;
 638        }
 639    }
 640
 641    return;
 642
 643undef:
 644    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 645                    exception_target_el(env));
 646}
 647
 648void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 649                        uint32_t regno)
 650{
 651    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 652
 653    switch (regno) {
 654    case 16: /* SPSRs */
 655        env->banked_spsr[bank_number(tgtmode)] = value;
 656        break;
 657    case 17: /* ELR_Hyp */
 658        env->elr_el[2] = value;
 659        break;
 660    case 13:
 661        env->banked_r13[bank_number(tgtmode)] = value;
 662        break;
 663    case 14:
 664        env->banked_r14[bank_number(tgtmode)] = value;
 665        break;
 666    case 8 ... 12:
 667        switch (tgtmode) {
 668        case ARM_CPU_MODE_USR:
 669            env->usr_regs[regno - 8] = value;
 670            break;
 671        case ARM_CPU_MODE_FIQ:
 672            env->fiq_regs[regno - 8] = value;
 673            break;
 674        default:
 675            g_assert_not_reached();
 676        }
 677        break;
 678    default:
 679        g_assert_not_reached();
 680    }
 681}
 682
 683uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 684{
 685    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 686
 687    switch (regno) {
 688    case 16: /* SPSRs */
 689        return env->banked_spsr[bank_number(tgtmode)];
 690    case 17: /* ELR_Hyp */
 691        return env->elr_el[2];
 692    case 13:
 693        return env->banked_r13[bank_number(tgtmode)];
 694    case 14:
 695        return env->banked_r14[bank_number(tgtmode)];
 696    case 8 ... 12:
 697        switch (tgtmode) {
 698        case ARM_CPU_MODE_USR:
 699            return env->usr_regs[regno - 8];
 700        case ARM_CPU_MODE_FIQ:
 701            return env->fiq_regs[regno - 8];
 702        default:
 703            g_assert_not_reached();
 704        }
 705    default:
 706        g_assert_not_reached();
 707    }
 708}
 709
 710void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
 711                                 uint32_t isread)
 712{
 713    const ARMCPRegInfo *ri = rip;
 714    int target_el;
 715
 716    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 717        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 718        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 719    }
 720
 721    if (!ri->accessfn) {
 722        return;
 723    }
 724
 725    switch (ri->accessfn(env, ri, isread)) {
 726    case CP_ACCESS_OK:
 727        return;
 728    case CP_ACCESS_TRAP:
 729        target_el = exception_target_el(env);
 730        break;
 731    case CP_ACCESS_TRAP_EL2:
 732        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
 733         * a bug in the access function.
 734         */
 735        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
 736        target_el = 2;
 737        break;
 738    case CP_ACCESS_TRAP_EL3:
 739        target_el = 3;
 740        break;
 741    case CP_ACCESS_TRAP_UNCATEGORIZED:
 742        target_el = exception_target_el(env);
 743        syndrome = syn_uncategorized();
 744        break;
 745    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
 746        target_el = 2;
 747        syndrome = syn_uncategorized();
 748        break;
 749    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
 750        target_el = 3;
 751        syndrome = syn_uncategorized();
 752        break;
 753    case CP_ACCESS_TRAP_FP_EL2:
 754        target_el = 2;
 755        /* Since we are an implementation that takes exceptions on a trapped
 756         * conditional insn only if the insn has passed its condition code
 757         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
 758         * (which is also the required value for AArch64 traps).
 759         */
 760        syndrome = syn_fp_access_trap(1, 0xe, false);
 761        break;
 762    case CP_ACCESS_TRAP_FP_EL3:
 763        target_el = 3;
 764        syndrome = syn_fp_access_trap(1, 0xe, false);
 765        break;
 766    default:
 767        g_assert_not_reached();
 768    }
 769
 770    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 771}
 772
 773void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 774{
 775    const ARMCPRegInfo *ri = rip;
 776
 777    if (ri->type & ARM_CP_IO) {
 778        qemu_mutex_lock_iothread();
 779        ri->writefn(env, ri, value);
 780        qemu_mutex_unlock_iothread();
 781    } else {
 782        ri->writefn(env, ri, value);
 783    }
 784}
 785
 786uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 787{
 788    const ARMCPRegInfo *ri = rip;
 789    uint32_t res;
 790
 791    if (ri->type & ARM_CP_IO) {
 792        qemu_mutex_lock_iothread();
 793        res = ri->readfn(env, ri);
 794        qemu_mutex_unlock_iothread();
 795    } else {
 796        res = ri->readfn(env, ri);
 797    }
 798
 799    return res;
 800}
 801
 802void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 803{
 804    const ARMCPRegInfo *ri = rip;
 805
 806    if (ri->type & ARM_CP_IO) {
 807        qemu_mutex_lock_iothread();
 808        ri->writefn(env, ri, value);
 809        qemu_mutex_unlock_iothread();
 810    } else {
 811        ri->writefn(env, ri, value);
 812    }
 813}
 814
 815uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 816{
 817    const ARMCPRegInfo *ri = rip;
 818    uint64_t res;
 819
 820    if (ri->type & ARM_CP_IO) {
 821        qemu_mutex_lock_iothread();
 822        res = ri->readfn(env, ri);
 823        qemu_mutex_unlock_iothread();
 824    } else {
 825        res = ri->readfn(env, ri);
 826    }
 827
 828    return res;
 829}
 830
 831void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
 832{
 833    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
 834     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
 835     * to catch that case at translate time.
 836     */
 837    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
 838        uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
 839                                                extract32(op, 3, 3), 4,
 840                                                imm, 0x1f, 0);
 841        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 842    }
 843
 844    switch (op) {
 845    case 0x05: /* SPSel */
 846        update_spsel(env, imm);
 847        break;
 848    case 0x1e: /* DAIFSet */
 849        env->daif |= (imm << 6) & PSTATE_DAIF;
 850        break;
 851    case 0x1f: /* DAIFClear */
 852        env->daif &= ~((imm << 6) & PSTATE_DAIF);
 853        break;
 854    default:
 855        g_assert_not_reached();
 856    }
 857}
 858
 859void HELPER(clear_pstate_ss)(CPUARMState *env)
 860{
 861    env->pstate &= ~PSTATE_SS;
 862}
 863
 864void HELPER(pre_hvc)(CPUARMState *env)
 865{
 866    ARMCPU *cpu = arm_env_get_cpu(env);
 867    int cur_el = arm_current_el(env);
 868    /* FIXME: Use actual secure state.  */
 869    bool secure = false;
 870    bool undef;
 871
 872    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 873        /* If PSCI is enabled and this looks like a valid PSCI call then
 874         * that overrides the architecturally mandated HVC behaviour.
 875         */
 876        return;
 877    }
 878
 879    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 880        /* If EL2 doesn't exist, HVC always UNDEFs */
 881        undef = true;
 882    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 883        /* EL3.HCE has priority over EL2.HCD. */
 884        undef = !(env->cp15.scr_el3 & SCR_HCE);
 885    } else {
 886        undef = env->cp15.hcr_el2 & HCR_HCD;
 887    }
 888
 889    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 890     * For ARMv8/AArch64, HVC is allowed in EL3.
 891     * Note that we've already trapped HVC from EL0 at translation
 892     * time.
 893     */
 894    if (secure && (!is_a64(env) || cur_el == 1)) {
 895        undef = true;
 896    }
 897
 898    if (undef) {
 899        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 900                        exception_target_el(env));
 901    }
 902}
 903
 904void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 905{
 906    ARMCPU *cpu = arm_env_get_cpu(env);
 907    int cur_el = arm_current_el(env);
 908    bool secure = arm_is_secure(env);
 909    bool smd = env->cp15.scr_el3 & SCR_SMD;
 910    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 911     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 912     *  extensions, SMD only applies to NS state.
 913     * On ARMv7 without the Virtualization extensions, the SMD bit
 914     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 915     * so we need not special case this here.
 916     */
 917    bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
 918
 919    if (!arm_feature(env, ARM_FEATURE_EL3) &&
 920        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
 921        /* If we have no EL3 then SMC always UNDEFs and can't be
 922         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
 923         * firmware within QEMU, and we want an EL2 guest to be able
 924         * to forbid its EL1 from making PSCI calls into QEMU's
 925         * "firmware" via HCR.TSC, so for these purposes treat
 926         * PSCI-via-SMC as implying an EL3.
 927         */
 928        undef = true;
 929    } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
 930        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
 931         * We also want an EL2 guest to be able to forbid its EL1 from
 932         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
 933         */
 934        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
 935    }
 936
 937    /* If PSCI is enabled and this looks like a valid PSCI call then
 938     * suppress the UNDEF -- we'll catch the SMC exception and
 939     * implement the PSCI call behaviour there.
 940     */
 941    if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
 942        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 943                        exception_target_el(env));
 944    }
 945}
 946
 947static int el_from_spsr(uint32_t spsr)
 948{
 949    /* Return the exception level that this SPSR is requesting a return to,
 950     * or -1 if it is invalid (an illegal return)
 951     */
 952    if (spsr & PSTATE_nRW) {
 953        switch (spsr & CPSR_M) {
 954        case ARM_CPU_MODE_USR:
 955            return 0;
 956        case ARM_CPU_MODE_HYP:
 957            return 2;
 958        case ARM_CPU_MODE_FIQ:
 959        case ARM_CPU_MODE_IRQ:
 960        case ARM_CPU_MODE_SVC:
 961        case ARM_CPU_MODE_ABT:
 962        case ARM_CPU_MODE_UND:
 963        case ARM_CPU_MODE_SYS:
 964            return 1;
 965        case ARM_CPU_MODE_MON:
 966            /* Returning to Mon from AArch64 is never possible,
 967             * so this is an illegal return.
 968             */
 969        default:
 970            return -1;
 971        }
 972    } else {
 973        if (extract32(spsr, 1, 1)) {
 974            /* Return with reserved M[1] bit set */
 975            return -1;
 976        }
 977        if (extract32(spsr, 0, 4) == 1) {
 978            /* return to EL0 with M[0] bit set */
 979            return -1;
 980        }
 981        return extract32(spsr, 2, 2);
 982    }
 983}
 984
 985void HELPER(exception_return)(CPUARMState *env)
 986{
 987    int cur_el = arm_current_el(env);
 988    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
 989    uint32_t spsr = env->banked_spsr[spsr_idx];
 990    int new_el;
 991    bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
 992
 993    aarch64_save_sp(env, cur_el);
 994
 995    arm_clear_exclusive(env);
 996
 997    /* We must squash the PSTATE.SS bit to zero unless both of the
 998     * following hold:
 999     *  1. debug exceptions are currently disabled
1000     *  2. singlestep will be active in the EL we return to
1001     * We check 1 here and 2 after we've done the pstate/cpsr write() to
1002     * transition to the EL we're going to.
1003     */
1004    if (arm_generate_debug_exceptions(env)) {
1005        spsr &= ~PSTATE_SS;
1006    }
1007
1008    new_el = el_from_spsr(spsr);
1009    if (new_el == -1) {
1010        goto illegal_return;
1011    }
1012    if (new_el > cur_el
1013        || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1014        /* Disallow return to an EL which is unimplemented or higher
1015         * than the current one.
1016         */
1017        goto illegal_return;
1018    }
1019
1020    if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1021        /* Return to an EL which is configured for a different register width */
1022        goto illegal_return;
1023    }
1024
1025    if (new_el == 2 && arm_is_secure_below_el3(env)) {
1026        /* Return to the non-existent secure-EL2 */
1027        goto illegal_return;
1028    }
1029
1030    if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1031        && !arm_is_secure_below_el3(env)) {
1032        goto illegal_return;
1033    }
1034
1035    qemu_mutex_lock_iothread();
1036    arm_call_pre_el_change_hook(arm_env_get_cpu(env));
1037    qemu_mutex_unlock_iothread();
1038
1039    if (!return_to_aa64) {
1040        env->aarch64 = 0;
1041        /* We do a raw CPSR write because aarch64_sync_64_to_32()
1042         * will sort the register banks out for us, and we've already
1043         * caught all the bad-mode cases in el_from_spsr().
1044         */
1045        cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1046        if (!arm_singlestep_active(env)) {
1047            env->uncached_cpsr &= ~PSTATE_SS;
1048        }
1049        aarch64_sync_64_to_32(env);
1050
1051        if (spsr & CPSR_T) {
1052            env->regs[15] = env->elr_el[cur_el] & ~0x1;
1053        } else {
1054            env->regs[15] = env->elr_el[cur_el] & ~0x3;
1055        }
1056        qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1057                      "AArch32 EL%d PC 0x%" PRIx32 "\n",
1058                      cur_el, new_el, env->regs[15]);
1059    } else {
1060        env->aarch64 = 1;
1061        pstate_write(env, spsr);
1062        if (!arm_singlestep_active(env)) {
1063            env->pstate &= ~PSTATE_SS;
1064        }
1065        aarch64_restore_sp(env, new_el);
1066        env->pc = env->elr_el[cur_el];
1067        qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1068                      "AArch64 EL%d PC 0x%" PRIx64 "\n",
1069                      cur_el, new_el, env->pc);
1070    }
1071
1072    qemu_mutex_lock_iothread();
1073    arm_call_el_change_hook(arm_env_get_cpu(env));
1074    qemu_mutex_unlock_iothread();
1075
1076    return;
1077
1078illegal_return:
1079    /* Illegal return events of various kinds have architecturally
1080     * mandated behaviour:
1081     * restore NZCV and DAIF from SPSR_ELx
1082     * set PSTATE.IL
1083     * restore PC from ELR_ELx
1084     * no change to exception level, execution state or stack pointer
1085     */
1086    env->pstate |= PSTATE_IL;
1087    env->pc = env->elr_el[cur_el];
1088    spsr &= PSTATE_NZCV | PSTATE_DAIF;
1089    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1090    pstate_write(env, spsr);
1091    if (!arm_singlestep_active(env)) {
1092        env->pstate &= ~PSTATE_SS;
1093    }
1094    qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1095                  "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
1096}
1097
1098/* Return true if the linked breakpoint entry lbn passes its checks */
1099static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1100{
1101    CPUARMState *env = &cpu->env;
1102    uint64_t bcr = env->cp15.dbgbcr[lbn];
1103    int brps = extract32(cpu->dbgdidr, 24, 4);
1104    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1105    int bt;
1106    uint32_t contextidr;
1107
1108    /* Links to unimplemented or non-context aware breakpoints are
1109     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1110     * as if linked to an UNKNOWN context-aware breakpoint (in which
1111     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1112     * We choose the former.
1113     */
1114    if (lbn > brps || lbn < (brps - ctx_cmps)) {
1115        return false;
1116    }
1117
1118    bcr = env->cp15.dbgbcr[lbn];
1119
1120    if (extract64(bcr, 0, 1) == 0) {
1121        /* Linked breakpoint disabled : generate no events */
1122        return false;
1123    }
1124
1125    bt = extract64(bcr, 20, 4);
1126
1127    /* We match the whole register even if this is AArch32 using the
1128     * short descriptor format (in which case it holds both PROCID and ASID),
1129     * since we don't implement the optional v7 context ID masking.
1130     */
1131    contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1132
1133    switch (bt) {
1134    case 3: /* linked context ID match */
1135        if (arm_current_el(env) > 1) {
1136            /* Context matches never fire in EL2 or (AArch64) EL3 */
1137            return false;
1138        }
1139        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1140    case 5: /* linked address mismatch (reserved in AArch64) */
1141    case 9: /* linked VMID match (reserved if no EL2) */
1142    case 11: /* linked context ID and VMID match (reserved if no EL2) */
1143    default:
1144        /* Links to Unlinked context breakpoints must generate no
1145         * events; we choose to do the same for reserved values too.
1146         */
1147        return false;
1148    }
1149
1150    return false;
1151}
1152
1153static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1154{
1155    CPUARMState *env = &cpu->env;
1156    uint64_t cr;
1157    int pac, hmc, ssc, wt, lbn;
1158    /* Note that for watchpoints the check is against the CPU security
1159     * state, not the S/NS attribute on the offending data access.
1160     */
1161    bool is_secure = arm_is_secure(env);
1162    int access_el = arm_current_el(env);
1163
1164    if (is_wp) {
1165        CPUWatchpoint *wp = env->cpu_watchpoint[n];
1166
1167        if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1168            return false;
1169        }
1170        cr = env->cp15.dbgwcr[n];
1171        if (wp->hitattrs.user) {
1172            /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1173             * match watchpoints as if they were accesses done at EL0, even if
1174             * the CPU is at EL1 or higher.
1175             */
1176            access_el = 0;
1177        }
1178    } else {
1179        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1180
1181        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1182            return false;
1183        }
1184        cr = env->cp15.dbgbcr[n];
1185    }
1186    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1187     * enabled and that the address and access type match; for breakpoints
1188     * we know the address matched; check the remaining fields, including
1189     * linked breakpoints. We rely on WCR and BCR having the same layout
1190     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1191     * Note that some combinations of {PAC, HMC, SSC} are reserved and
1192     * must act either like some valid combination or as if the watchpoint
1193     * were disabled. We choose the former, and use this together with
1194     * the fact that EL3 must always be Secure and EL2 must always be
1195     * Non-Secure to simplify the code slightly compared to the full
1196     * table in the ARM ARM.
1197     */
1198    pac = extract64(cr, 1, 2);
1199    hmc = extract64(cr, 13, 1);
1200    ssc = extract64(cr, 14, 2);
1201
1202    switch (ssc) {
1203    case 0:
1204        break;
1205    case 1:
1206    case 3:
1207        if (is_secure) {
1208            return false;
1209        }
1210        break;
1211    case 2:
1212        if (!is_secure) {
1213            return false;
1214        }
1215        break;
1216    }
1217
1218    switch (access_el) {
1219    case 3:
1220    case 2:
1221        if (!hmc) {
1222            return false;
1223        }
1224        break;
1225    case 1:
1226        if (extract32(pac, 0, 1) == 0) {
1227            return false;
1228        }
1229        break;
1230    case 0:
1231        if (extract32(pac, 1, 1) == 0) {
1232            return false;
1233        }
1234        break;
1235    default:
1236        g_assert_not_reached();
1237    }
1238
1239    wt = extract64(cr, 20, 1);
1240    lbn = extract64(cr, 16, 4);
1241
1242    if (wt && !linked_bp_matches(cpu, lbn)) {
1243        return false;
1244    }
1245
1246    return true;
1247}
1248
1249static bool check_watchpoints(ARMCPU *cpu)
1250{
1251    CPUARMState *env = &cpu->env;
1252    int n;
1253
1254    /* If watchpoints are disabled globally or we can't take debug
1255     * exceptions here then watchpoint firings are ignored.
1256     */
1257    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1258        || !arm_generate_debug_exceptions(env)) {
1259        return false;
1260    }
1261
1262    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1263        if (bp_wp_matches(cpu, n, true)) {
1264            return true;
1265        }
1266    }
1267    return false;
1268}
1269
1270static bool check_breakpoints(ARMCPU *cpu)
1271{
1272    CPUARMState *env = &cpu->env;
1273    int n;
1274
1275    /* If breakpoints are disabled globally or we can't take debug
1276     * exceptions here then breakpoint firings are ignored.
1277     */
1278    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1279        || !arm_generate_debug_exceptions(env)) {
1280        return false;
1281    }
1282
1283    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1284        if (bp_wp_matches(cpu, n, false)) {
1285            return true;
1286        }
1287    }
1288    return false;
1289}
1290
1291void HELPER(check_breakpoints)(CPUARMState *env)
1292{
1293    ARMCPU *cpu = arm_env_get_cpu(env);
1294
1295    if (check_breakpoints(cpu)) {
1296        HELPER(exception_internal(env, EXCP_DEBUG));
1297    }
1298}
1299
1300bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1301{
1302    /* Called by core code when a CPU watchpoint fires; need to check if this
1303     * is also an architectural watchpoint match.
1304     */
1305    ARMCPU *cpu = ARM_CPU(cs);
1306
1307    return check_watchpoints(cpu);
1308}
1309
1310vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1311{
1312    ARMCPU *cpu = ARM_CPU(cs);
1313    CPUARMState *env = &cpu->env;
1314
1315    /* In BE32 system mode, target memory is stored byteswapped (on a
1316     * little-endian host system), and by the time we reach here (via an
1317     * opcode helper) the addresses of subword accesses have been adjusted
1318     * to account for that, which means that watchpoints will not match.
1319     * Undo the adjustment here.
1320     */
1321    if (arm_sctlr_b(env)) {
1322        if (len == 1) {
1323            addr ^= 3;
1324        } else if (len == 2) {
1325            addr ^= 2;
1326        }
1327    }
1328
1329    return addr;
1330}
1331
1332void arm_debug_excp_handler(CPUState *cs)
1333{
1334    /* Called by core code when a watchpoint or breakpoint fires;
1335     * need to check which one and raise the appropriate exception.
1336     */
1337    ARMCPU *cpu = ARM_CPU(cs);
1338    CPUARMState *env = &cpu->env;
1339    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1340
1341    if (wp_hit) {
1342        if (wp_hit->flags & BP_CPU) {
1343            bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1344            bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1345
1346            cs->watchpoint_hit = NULL;
1347
1348            env->exception.fsr = arm_debug_exception_fsr(env);
1349            env->exception.vaddress = wp_hit->hitaddr;
1350            raise_exception(env, EXCP_DATA_ABORT,
1351                    syn_watchpoint(same_el, 0, wnr),
1352                    arm_debug_target_el(env));
1353        }
1354    } else {
1355        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1356        bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1357
1358        /* (1) GDB breakpoints should be handled first.
1359         * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1360         * since singlestep is also done by generating a debug internal
1361         * exception.
1362         */
1363        if (cpu_breakpoint_test(cs, pc, BP_GDB)
1364            || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1365            return;
1366        }
1367
1368        env->exception.fsr = arm_debug_exception_fsr(env);
1369        /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1370         * values to the guest that it shouldn't be able to see at its
1371         * exception/security level.
1372         */
1373        env->exception.vaddress = 0;
1374        raise_exception(env, EXCP_PREFETCH_ABORT,
1375                        syn_breakpoint(same_el),
1376                        arm_debug_target_el(env));
1377    }
1378}
1379
1380/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1381   The only way to do that in TCG is a conditional branch, which clobbers
1382   all our temporaries.  For now implement these as helper functions.  */
1383
1384/* Similarly for variable shift instructions.  */
1385
1386uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1387{
1388    int shift = i & 0xff;
1389    if (shift >= 32) {
1390        if (shift == 32)
1391            env->CF = x & 1;
1392        else
1393            env->CF = 0;
1394        return 0;
1395    } else if (shift != 0) {
1396        env->CF = (x >> (32 - shift)) & 1;
1397        return x << shift;
1398    }
1399    return x;
1400}
1401
1402uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1403{
1404    int shift = i & 0xff;
1405    if (shift >= 32) {
1406        if (shift == 32)
1407            env->CF = (x >> 31) & 1;
1408        else
1409            env->CF = 0;
1410        return 0;
1411    } else if (shift != 0) {
1412        env->CF = (x >> (shift - 1)) & 1;
1413        return x >> shift;
1414    }
1415    return x;
1416}
1417
1418uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1419{
1420    int shift = i & 0xff;
1421    if (shift >= 32) {
1422        env->CF = (x >> 31) & 1;
1423        return (int32_t)x >> 31;
1424    } else if (shift != 0) {
1425        env->CF = (x >> (shift - 1)) & 1;
1426        return (int32_t)x >> shift;
1427    }
1428    return x;
1429}
1430
1431uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1432{
1433    int shift1, shift;
1434    shift1 = i & 0xff;
1435    shift = shift1 & 0x1f;
1436    if (shift == 0) {
1437        if (shift1 != 0)
1438            env->CF = (x >> 31) & 1;
1439        return x;
1440    } else {
1441        env->CF = (x >> (shift - 1)) & 1;
1442        return ((uint32_t)x >> shift) | (x << (32 - shift));
1443    }
1444}
1445