qemu/target-arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "cpu.h"
  20#include "exec/helper-proto.h"
  21#include "internals.h"
  22#include "exec/cpu_ldst.h"
  23
  24#define SIGNBIT (uint32_t)0x80000000
  25#define SIGNBIT64 ((uint64_t)1 << 63)
  26
  27static void raise_exception(CPUARMState *env, int tt)
  28{
  29    ARMCPU *cpu = arm_env_get_cpu(env);
  30    CPUState *cs = CPU(cpu);
  31
  32    cs->exception_index = tt;
  33    cpu_loop_exit(cs);
  34}
  35
  36uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
  37                          uint32_t rn, uint32_t maxindex)
  38{
  39    uint32_t val;
  40    uint32_t tmp;
  41    int index;
  42    int shift;
  43    uint64_t *table;
  44    table = (uint64_t *)&env->vfp.regs[rn];
  45    val = 0;
  46    for (shift = 0; shift < 32; shift += 8) {
  47        index = (ireg >> shift) & 0xff;
  48        if (index < maxindex) {
  49            tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
  50            val |= tmp << shift;
  51        } else {
  52            val |= def & (0xff << shift);
  53        }
  54    }
  55    return val;
  56}
  57
  58#if !defined(CONFIG_USER_ONLY)
  59
  60/* try to fill the TLB and return an exception if error. If retaddr is
  61 * NULL, it means that the function was called in C code (i.e. not
  62 * from generated code or from helper.c)
  63 */
  64void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
  65              uintptr_t retaddr)
  66{
  67    int ret;
  68
  69    ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
  70    if (unlikely(ret)) {
  71        ARMCPU *cpu = ARM_CPU(cs);
  72        CPUARMState *env = &cpu->env;
  73
  74        if (retaddr) {
  75            /* now we have a real cpu fault */
  76            cpu_restore_state(cs, retaddr);
  77        }
  78        raise_exception(env, cs->exception_index);
  79    }
  80}
  81#endif
  82
  83uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
  84{
  85    uint32_t res = a + b;
  86    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
  87        env->QF = 1;
  88    return res;
  89}
  90
  91uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
  92{
  93    uint32_t res = a + b;
  94    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
  95        env->QF = 1;
  96        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
  97    }
  98    return res;
  99}
 100
 101uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 102{
 103    uint32_t res = a - b;
 104    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 105        env->QF = 1;
 106        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 107    }
 108    return res;
 109}
 110
 111uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
 112{
 113    uint32_t res;
 114    if (val >= 0x40000000) {
 115        res = ~SIGNBIT;
 116        env->QF = 1;
 117    } else if (val <= (int32_t)0xc0000000) {
 118        res = SIGNBIT;
 119        env->QF = 1;
 120    } else {
 121        res = val << 1;
 122    }
 123    return res;
 124}
 125
 126uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 127{
 128    uint32_t res = a + b;
 129    if (res < a) {
 130        env->QF = 1;
 131        res = ~0;
 132    }
 133    return res;
 134}
 135
 136uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 137{
 138    uint32_t res = a - b;
 139    if (res > a) {
 140        env->QF = 1;
 141        res = 0;
 142    }
 143    return res;
 144}
 145
 146/* Signed saturation.  */
 147static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 148{
 149    int32_t top;
 150    uint32_t mask;
 151
 152    top = val >> shift;
 153    mask = (1u << shift) - 1;
 154    if (top > 0) {
 155        env->QF = 1;
 156        return mask;
 157    } else if (top < -1) {
 158        env->QF = 1;
 159        return ~mask;
 160    }
 161    return val;
 162}
 163
 164/* Unsigned saturation.  */
 165static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 166{
 167    uint32_t max;
 168
 169    max = (1u << shift) - 1;
 170    if (val < 0) {
 171        env->QF = 1;
 172        return 0;
 173    } else if (val > max) {
 174        env->QF = 1;
 175        return max;
 176    }
 177    return val;
 178}
 179
 180/* Signed saturate.  */
 181uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 182{
 183    return do_ssat(env, x, shift);
 184}
 185
 186/* Dual halfword signed saturate.  */
 187uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 188{
 189    uint32_t res;
 190
 191    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 192    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 193    return res;
 194}
 195
 196/* Unsigned saturate.  */
 197uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 198{
 199    return do_usat(env, x, shift);
 200}
 201
 202/* Dual halfword unsigned saturate.  */
 203uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 204{
 205    uint32_t res;
 206
 207    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 208    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 209    return res;
 210}
 211
 212void HELPER(wfi)(CPUARMState *env)
 213{
 214    CPUState *cs = CPU(arm_env_get_cpu(env));
 215
 216    cs->exception_index = EXCP_HLT;
 217    cs->halted = 1;
 218    cpu_loop_exit(cs);
 219}
 220
 221void HELPER(wfe)(CPUARMState *env)
 222{
 223    CPUState *cs = CPU(arm_env_get_cpu(env));
 224
 225    /* Don't actually halt the CPU, just yield back to top
 226     * level loop
 227     */
 228    cs->exception_index = EXCP_YIELD;
 229    cpu_loop_exit(cs);
 230}
 231
 232/* Raise an internal-to-QEMU exception. This is limited to only
 233 * those EXCP values which are special cases for QEMU to interrupt
 234 * execution and not to be used for exceptions which are passed to
 235 * the guest (those must all have syndrome information and thus should
 236 * use exception_with_syndrome).
 237 */
 238void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 239{
 240    CPUState *cs = CPU(arm_env_get_cpu(env));
 241
 242    assert(excp_is_internal(excp));
 243    cs->exception_index = excp;
 244    cpu_loop_exit(cs);
 245}
 246
 247/* Raise an exception with the specified syndrome register value */
 248void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 249                                     uint32_t syndrome)
 250{
 251    CPUState *cs = CPU(arm_env_get_cpu(env));
 252
 253    assert(!excp_is_internal(excp));
 254    cs->exception_index = excp;
 255    env->exception.syndrome = syndrome;
 256    cpu_loop_exit(cs);
 257}
 258
 259uint32_t HELPER(cpsr_read)(CPUARMState *env)
 260{
 261    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
 262}
 263
 264void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 265{
 266    cpsr_write(env, val, mask);
 267}
 268
 269/* Access to user mode registers from privileged modes.  */
 270uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 271{
 272    uint32_t val;
 273
 274    if (regno == 13) {
 275        val = env->banked_r13[0];
 276    } else if (regno == 14) {
 277        val = env->banked_r14[0];
 278    } else if (regno >= 8
 279               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 280        val = env->usr_regs[regno - 8];
 281    } else {
 282        val = env->regs[regno];
 283    }
 284    return val;
 285}
 286
 287void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 288{
 289    if (regno == 13) {
 290        env->banked_r13[0] = val;
 291    } else if (regno == 14) {
 292        env->banked_r14[0] = val;
 293    } else if (regno >= 8
 294               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 295        env->usr_regs[regno - 8] = val;
 296    } else {
 297        env->regs[regno] = val;
 298    }
 299}
 300
 301void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
 302{
 303    const ARMCPRegInfo *ri = rip;
 304
 305    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 306        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 307        env->exception.syndrome = syndrome;
 308        raise_exception(env, EXCP_UDEF);
 309    }
 310
 311    if (!ri->accessfn) {
 312        return;
 313    }
 314
 315    switch (ri->accessfn(env, ri)) {
 316    case CP_ACCESS_OK:
 317        return;
 318    case CP_ACCESS_TRAP:
 319        env->exception.syndrome = syndrome;
 320        break;
 321    case CP_ACCESS_TRAP_UNCATEGORIZED:
 322        env->exception.syndrome = syn_uncategorized();
 323        break;
 324    default:
 325        g_assert_not_reached();
 326    }
 327    raise_exception(env, EXCP_UDEF);
 328}
 329
 330void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 331{
 332    const ARMCPRegInfo *ri = rip;
 333
 334    ri->writefn(env, ri, value);
 335}
 336
 337uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 338{
 339    const ARMCPRegInfo *ri = rip;
 340
 341    return ri->readfn(env, ri);
 342}
 343
 344void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 345{
 346    const ARMCPRegInfo *ri = rip;
 347
 348    ri->writefn(env, ri, value);
 349}
 350
 351uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 352{
 353    const ARMCPRegInfo *ri = rip;
 354
 355    return ri->readfn(env, ri);
 356}
 357
 358void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
 359{
 360    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
 361     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
 362     * to catch that case at translate time.
 363     */
 364    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
 365        raise_exception(env, EXCP_UDEF);
 366    }
 367
 368    switch (op) {
 369    case 0x05: /* SPSel */
 370        update_spsel(env, imm);
 371        break;
 372    case 0x1e: /* DAIFSet */
 373        env->daif |= (imm << 6) & PSTATE_DAIF;
 374        break;
 375    case 0x1f: /* DAIFClear */
 376        env->daif &= ~((imm << 6) & PSTATE_DAIF);
 377        break;
 378    default:
 379        g_assert_not_reached();
 380    }
 381}
 382
 383void HELPER(clear_pstate_ss)(CPUARMState *env)
 384{
 385    env->pstate &= ~PSTATE_SS;
 386}
 387
 388void HELPER(pre_hvc)(CPUARMState *env)
 389{
 390    ARMCPU *cpu = arm_env_get_cpu(env);
 391    int cur_el = arm_current_el(env);
 392    /* FIXME: Use actual secure state.  */
 393    bool secure = false;
 394    bool undef;
 395
 396    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 397        /* If PSCI is enabled and this looks like a valid PSCI call then
 398         * that overrides the architecturally mandated HVC behaviour.
 399         */
 400        return;
 401    }
 402
 403    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 404        /* If EL2 doesn't exist, HVC always UNDEFs */
 405        undef = true;
 406    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 407        /* EL3.HCE has priority over EL2.HCD. */
 408        undef = !(env->cp15.scr_el3 & SCR_HCE);
 409    } else {
 410        undef = env->cp15.hcr_el2 & HCR_HCD;
 411    }
 412
 413    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 414     * For ARMv8/AArch64, HVC is allowed in EL3.
 415     * Note that we've already trapped HVC from EL0 at translation
 416     * time.
 417     */
 418    if (secure && (!is_a64(env) || cur_el == 1)) {
 419        undef = true;
 420    }
 421
 422    if (undef) {
 423        env->exception.syndrome = syn_uncategorized();
 424        raise_exception(env, EXCP_UDEF);
 425    }
 426}
 427
 428void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 429{
 430    ARMCPU *cpu = arm_env_get_cpu(env);
 431    int cur_el = arm_current_el(env);
 432    bool secure = arm_is_secure(env);
 433    bool smd = env->cp15.scr_el3 & SCR_SMD;
 434    /* On ARMv8 AArch32, SMD only applies to NS state.
 435     * On ARMv7 SMD only applies to NS state and only if EL2 is available.
 436     * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
 437     * the EL2 condition here.
 438     */
 439    bool undef = is_a64(env) ? smd : (!secure && smd);
 440
 441    if (arm_is_psci_call(cpu, EXCP_SMC)) {
 442        /* If PSCI is enabled and this looks like a valid PSCI call then
 443         * that overrides the architecturally mandated SMC behaviour.
 444         */
 445        return;
 446    }
 447
 448    if (!arm_feature(env, ARM_FEATURE_EL3)) {
 449        /* If we have no EL3 then SMC always UNDEFs */
 450        undef = true;
 451    } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
 452        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
 453        env->exception.syndrome = syndrome;
 454        raise_exception(env, EXCP_HYP_TRAP);
 455    }
 456
 457    if (undef) {
 458        env->exception.syndrome = syn_uncategorized();
 459        raise_exception(env, EXCP_UDEF);
 460    }
 461}
 462
 463void HELPER(exception_return)(CPUARMState *env)
 464{
 465    int cur_el = arm_current_el(env);
 466    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
 467    uint32_t spsr = env->banked_spsr[spsr_idx];
 468    int new_el;
 469
 470    aarch64_save_sp(env, cur_el);
 471
 472    env->exclusive_addr = -1;
 473
 474    /* We must squash the PSTATE.SS bit to zero unless both of the
 475     * following hold:
 476     *  1. debug exceptions are currently disabled
 477     *  2. singlestep will be active in the EL we return to
 478     * We check 1 here and 2 after we've done the pstate/cpsr write() to
 479     * transition to the EL we're going to.
 480     */
 481    if (arm_generate_debug_exceptions(env)) {
 482        spsr &= ~PSTATE_SS;
 483    }
 484
 485    if (spsr & PSTATE_nRW) {
 486        /* TODO: We currently assume EL1/2/3 are running in AArch64.  */
 487        env->aarch64 = 0;
 488        new_el = 0;
 489        env->uncached_cpsr = 0x10;
 490        cpsr_write(env, spsr, ~0);
 491        if (!arm_singlestep_active(env)) {
 492            env->uncached_cpsr &= ~PSTATE_SS;
 493        }
 494        aarch64_sync_64_to_32(env);
 495
 496        env->regs[15] = env->elr_el[1] & ~0x1;
 497    } else {
 498        new_el = extract32(spsr, 2, 2);
 499        if (new_el > cur_el
 500            || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
 501            /* Disallow return to an EL which is unimplemented or higher
 502             * than the current one.
 503             */
 504            goto illegal_return;
 505        }
 506        if (extract32(spsr, 1, 1)) {
 507            /* Return with reserved M[1] bit set */
 508            goto illegal_return;
 509        }
 510        if (new_el == 0 && (spsr & PSTATE_SP)) {
 511            /* Return to EL0 with M[0] bit set */
 512            goto illegal_return;
 513        }
 514        env->aarch64 = 1;
 515        pstate_write(env, spsr);
 516        if (!arm_singlestep_active(env)) {
 517            env->pstate &= ~PSTATE_SS;
 518        }
 519        aarch64_restore_sp(env, new_el);
 520        env->pc = env->elr_el[cur_el];
 521    }
 522
 523    return;
 524
 525illegal_return:
 526    /* Illegal return events of various kinds have architecturally
 527     * mandated behaviour:
 528     * restore NZCV and DAIF from SPSR_ELx
 529     * set PSTATE.IL
 530     * restore PC from ELR_ELx
 531     * no change to exception level, execution state or stack pointer
 532     */
 533    env->pstate |= PSTATE_IL;
 534    env->pc = env->elr_el[cur_el];
 535    spsr &= PSTATE_NZCV | PSTATE_DAIF;
 536    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
 537    pstate_write(env, spsr);
 538    if (!arm_singlestep_active(env)) {
 539        env->pstate &= ~PSTATE_SS;
 540    }
 541}
 542
 543/* Return true if the linked breakpoint entry lbn passes its checks */
 544static bool linked_bp_matches(ARMCPU *cpu, int lbn)
 545{
 546    CPUARMState *env = &cpu->env;
 547    uint64_t bcr = env->cp15.dbgbcr[lbn];
 548    int brps = extract32(cpu->dbgdidr, 24, 4);
 549    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
 550    int bt;
 551    uint32_t contextidr;
 552
 553    /* Links to unimplemented or non-context aware breakpoints are
 554     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
 555     * as if linked to an UNKNOWN context-aware breakpoint (in which
 556     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
 557     * We choose the former.
 558     */
 559    if (lbn > brps || lbn < (brps - ctx_cmps)) {
 560        return false;
 561    }
 562
 563    bcr = env->cp15.dbgbcr[lbn];
 564
 565    if (extract64(bcr, 0, 1) == 0) {
 566        /* Linked breakpoint disabled : generate no events */
 567        return false;
 568    }
 569
 570    bt = extract64(bcr, 20, 4);
 571
 572    /* We match the whole register even if this is AArch32 using the
 573     * short descriptor format (in which case it holds both PROCID and ASID),
 574     * since we don't implement the optional v7 context ID masking.
 575     */
 576    contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
 577
 578    switch (bt) {
 579    case 3: /* linked context ID match */
 580        if (arm_current_el(env) > 1) {
 581            /* Context matches never fire in EL2 or (AArch64) EL3 */
 582            return false;
 583        }
 584        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
 585    case 5: /* linked address mismatch (reserved in AArch64) */
 586    case 9: /* linked VMID match (reserved if no EL2) */
 587    case 11: /* linked context ID and VMID match (reserved if no EL2) */
 588    default:
 589        /* Links to Unlinked context breakpoints must generate no
 590         * events; we choose to do the same for reserved values too.
 591         */
 592        return false;
 593    }
 594
 595    return false;
 596}
 597
 598static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
 599{
 600    CPUARMState *env = &cpu->env;
 601    uint64_t cr;
 602    int pac, hmc, ssc, wt, lbn;
 603    /* TODO: check against CPU security state when we implement TrustZone */
 604    bool is_secure = false;
 605
 606    if (is_wp) {
 607        if (!env->cpu_watchpoint[n]
 608            || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
 609            return false;
 610        }
 611        cr = env->cp15.dbgwcr[n];
 612    } else {
 613        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
 614
 615        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
 616            return false;
 617        }
 618        cr = env->cp15.dbgbcr[n];
 619    }
 620    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
 621     * enabled and that the address and access type match; for breakpoints
 622     * we know the address matched; check the remaining fields, including
 623     * linked breakpoints. We rely on WCR and BCR having the same layout
 624     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
 625     * Note that some combinations of {PAC, HMC, SSC} are reserved and
 626     * must act either like some valid combination or as if the watchpoint
 627     * were disabled. We choose the former, and use this together with
 628     * the fact that EL3 must always be Secure and EL2 must always be
 629     * Non-Secure to simplify the code slightly compared to the full
 630     * table in the ARM ARM.
 631     */
 632    pac = extract64(cr, 1, 2);
 633    hmc = extract64(cr, 13, 1);
 634    ssc = extract64(cr, 14, 2);
 635
 636    switch (ssc) {
 637    case 0:
 638        break;
 639    case 1:
 640    case 3:
 641        if (is_secure) {
 642            return false;
 643        }
 644        break;
 645    case 2:
 646        if (!is_secure) {
 647            return false;
 648        }
 649        break;
 650    }
 651
 652    /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT
 653     * "unprivileged access" instructions should match watchpoints as if
 654     * they were accesses done at EL0, even if the CPU is at EL1 or higher.
 655     * Implementing this would require reworking the core watchpoint code
 656     * to plumb the mmu_idx through to this point. Luckily Linux does not
 657     * rely on this behaviour currently.
 658     * For breakpoints we do want to use the current CPU state.
 659     */
 660    switch (arm_current_el(env)) {
 661    case 3:
 662    case 2:
 663        if (!hmc) {
 664            return false;
 665        }
 666        break;
 667    case 1:
 668        if (extract32(pac, 0, 1) == 0) {
 669            return false;
 670        }
 671        break;
 672    case 0:
 673        if (extract32(pac, 1, 1) == 0) {
 674            return false;
 675        }
 676        break;
 677    default:
 678        g_assert_not_reached();
 679    }
 680
 681    wt = extract64(cr, 20, 1);
 682    lbn = extract64(cr, 16, 4);
 683
 684    if (wt && !linked_bp_matches(cpu, lbn)) {
 685        return false;
 686    }
 687
 688    return true;
 689}
 690
 691static bool check_watchpoints(ARMCPU *cpu)
 692{
 693    CPUARMState *env = &cpu->env;
 694    int n;
 695
 696    /* If watchpoints are disabled globally or we can't take debug
 697     * exceptions here then watchpoint firings are ignored.
 698     */
 699    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
 700        || !arm_generate_debug_exceptions(env)) {
 701        return false;
 702    }
 703
 704    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
 705        if (bp_wp_matches(cpu, n, true)) {
 706            return true;
 707        }
 708    }
 709    return false;
 710}
 711
 712static bool check_breakpoints(ARMCPU *cpu)
 713{
 714    CPUARMState *env = &cpu->env;
 715    int n;
 716
 717    /* If breakpoints are disabled globally or we can't take debug
 718     * exceptions here then breakpoint firings are ignored.
 719     */
 720    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
 721        || !arm_generate_debug_exceptions(env)) {
 722        return false;
 723    }
 724
 725    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
 726        if (bp_wp_matches(cpu, n, false)) {
 727            return true;
 728        }
 729    }
 730    return false;
 731}
 732
 733void arm_debug_excp_handler(CPUState *cs)
 734{
 735    /* Called by core code when a watchpoint or breakpoint fires;
 736     * need to check which one and raise the appropriate exception.
 737     */
 738    ARMCPU *cpu = ARM_CPU(cs);
 739    CPUARMState *env = &cpu->env;
 740    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
 741
 742    if (wp_hit) {
 743        if (wp_hit->flags & BP_CPU) {
 744            cs->watchpoint_hit = NULL;
 745            if (check_watchpoints(cpu)) {
 746                bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
 747                bool same_el = arm_debug_target_el(env) == arm_current_el(env);
 748
 749                env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
 750                if (extended_addresses_enabled(env)) {
 751                    env->exception.fsr = (1 << 9) | 0x22;
 752                } else {
 753                    env->exception.fsr = 0x2;
 754                }
 755                env->exception.vaddress = wp_hit->hitaddr;
 756                raise_exception(env, EXCP_DATA_ABORT);
 757            } else {
 758                cpu_resume_from_signal(cs, NULL);
 759            }
 760        }
 761    } else {
 762        if (check_breakpoints(cpu)) {
 763            bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
 764            env->exception.syndrome = syn_breakpoint(same_el);
 765            if (extended_addresses_enabled(env)) {
 766                env->exception.fsr = (1 << 9) | 0x22;
 767            } else {
 768                env->exception.fsr = 0x2;
 769            }
 770            /* FAR is UNKNOWN, so doesn't need setting */
 771            raise_exception(env, EXCP_PREFETCH_ABORT);
 772        }
 773    }
 774}
 775
 776/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
 777   The only way to do that in TCG is a conditional branch, which clobbers
 778   all our temporaries.  For now implement these as helper functions.  */
 779
 780/* Similarly for variable shift instructions.  */
 781
 782uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 783{
 784    int shift = i & 0xff;
 785    if (shift >= 32) {
 786        if (shift == 32)
 787            env->CF = x & 1;
 788        else
 789            env->CF = 0;
 790        return 0;
 791    } else if (shift != 0) {
 792        env->CF = (x >> (32 - shift)) & 1;
 793        return x << shift;
 794    }
 795    return x;
 796}
 797
 798uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 799{
 800    int shift = i & 0xff;
 801    if (shift >= 32) {
 802        if (shift == 32)
 803            env->CF = (x >> 31) & 1;
 804        else
 805            env->CF = 0;
 806        return 0;
 807    } else if (shift != 0) {
 808        env->CF = (x >> (shift - 1)) & 1;
 809        return x >> shift;
 810    }
 811    return x;
 812}
 813
 814uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 815{
 816    int shift = i & 0xff;
 817    if (shift >= 32) {
 818        env->CF = (x >> 31) & 1;
 819        return (int32_t)x >> 31;
 820    } else if (shift != 0) {
 821        env->CF = (x >> (shift - 1)) & 1;
 822        return (int32_t)x >> shift;
 823    }
 824    return x;
 825}
 826
 827uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 828{
 829    int shift1, shift;
 830    shift1 = i & 0xff;
 831    shift = shift1 & 0x1f;
 832    if (shift == 0) {
 833        if (shift1 != 0)
 834            env->CF = (x >> 31) & 1;
 835        return x;
 836    } else {
 837        env->CF = (x >> (shift - 1)) & 1;
 838        return ((uint32_t)x >> shift) | (x << (32 - shift));
 839    }
 840}
 841