qemu/target-arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "internals.h"
  23#include "exec/exec-all.h"
  24#include "exec/cpu_ldst.h"
  25#include "sysemu/cpus.h"
  26
  27#define SIGNBIT (uint32_t)0x80000000
  28#define SIGNBIT64 ((uint64_t)1 << 63)
  29
  30static void raise_exception(CPUARMState *env, uint32_t excp,
  31                            uint32_t syndrome, uint32_t target_el)
  32{
  33    CPUState *cs = CPU(arm_env_get_cpu(env));
  34
  35    assert(!excp_is_internal(excp));
  36    cs->exception_index = excp;
  37    env->exception.syndrome = syndrome;
  38    env->exception.target_el = target_el;
  39    cpu_loop_exit(cs);
  40}
  41
  42static int exception_target_el(CPUARMState *env)
  43{
  44    int target_el = MAX(1, arm_current_el(env));
  45
  46    /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
  47     * to EL3 in this case.
  48     */
  49    if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
  50        target_el = 3;
  51    }
  52
  53    return target_el;
  54}
  55
  56uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
  57                          uint32_t rn, uint32_t maxindex)
  58{
  59    uint32_t val;
  60    uint32_t tmp;
  61    int index;
  62    int shift;
  63    uint64_t *table;
  64    table = (uint64_t *)&env->vfp.regs[rn];
  65    val = 0;
  66    for (shift = 0; shift < 32; shift += 8) {
  67        index = (ireg >> shift) & 0xff;
  68        if (index < maxindex) {
  69            tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
  70            val |= tmp << shift;
  71        } else {
  72            val |= def & (0xff << shift);
  73        }
  74    }
  75    return val;
  76}
  77
  78#if !defined(CONFIG_USER_ONLY)
  79
  80#include "hw/remote-port.h"
  81
  82static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
  83                                            unsigned int target_el,
  84                                            bool same_el,
  85                                            bool s1ptw, bool is_write,
  86                                            int fsc)
  87{
  88    uint32_t syn;
  89
  90    /* ISV is only set for data aborts routed to EL2 and
  91     * never for stage-1 page table walks faulting on stage 2.
  92     *
  93     * Furthermore, ISV is only set for certain kinds of load/stores.
  94     * If the template syndrome does not have ISV set, we should leave
  95     * it cleared.
  96     *
  97     * See ARMv8 specs, D7-1974:
  98     * ISS encoding for an exception from a Data Abort, the
  99     * ISV field.
 100     */
 101    if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
 102        syn = syn_data_abort_no_iss(same_el,
 103                                    0, 0, s1ptw, is_write, fsc);
 104    } else {
 105        /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
 106         * syndrome created at translation time.
 107         * Now we create the runtime syndrome with the remaining fields.
 108         */
 109        syn = syn_data_abort_with_iss(same_el,
 110                                      0, 0, 0, 0, 0,
 111                                      0, 0, s1ptw, is_write, fsc,
 112                                      false);
 113        /* Merge the runtime syndrome with the template syndrome.  */
 114        syn |= template_syn;
 115    }
 116    return syn;
 117}
 118
 119/* try to fill the TLB and return an exception if error. If retaddr is
 120 * NULL, it means that the function was called in C code (i.e. not
 121 * from generated code or from helper.c)
 122 */
 123void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
 124              int mmu_idx, uintptr_t retaddr)
 125{
 126    bool ret;
 127    uint32_t fsr = 0;
 128    ARMMMUFaultInfo fi = {};
 129
 130    ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
 131    if (unlikely(ret)) {
 132        ARMCPU *cpu = ARM_CPU(cs);
 133        CPUARMState *env = &cpu->env;
 134        uint32_t syn, exc;
 135        unsigned int target_el;
 136        bool same_el;
 137
 138        if (retaddr) {
 139            /* now we have a real cpu fault */
 140            cpu_restore_state(cs, retaddr);
 141        }
 142
 143        target_el = exception_target_el(env);
 144        if (fi.stage2) {
 145            target_el = 2;
 146            env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
 147        }
 148        same_el = arm_current_el(env) == target_el;
 149        /* AArch64 syndrome does not have an LPAE bit */
 150        syn = fsr & ~(1 << 9);
 151
 152        /* For insn and data aborts we assume there is no instruction syndrome
 153         * information; this is always true for exceptions reported to EL1.
 154         */
 155        if (access_type == MMU_INST_FETCH) {
 156            syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
 157            exc = EXCP_PREFETCH_ABORT;
 158        } else {
 159            syn = merge_syn_data_abort(env->exception.syndrome, target_el,
 160                                       same_el, fi.s1ptw,
 161                                       access_type == MMU_DATA_STORE, syn);
 162            if (access_type == MMU_DATA_STORE
 163                && arm_feature(env, ARM_FEATURE_V6)) {
 164                fsr |= (1 << 11);
 165            }
 166            exc = EXCP_DATA_ABORT;
 167        }
 168
 169        env->exception.vaddress = addr;
 170        env->exception.fsr = fsr;
 171        raise_exception(env, exc, syn, target_el);
 172    }
 173}
 174
 175/* Raise a data fault alignment exception for the specified virtual address */
 176void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
 177                                 MMUAccessType access_type,
 178                                 int mmu_idx, uintptr_t retaddr)
 179{
 180    ARMCPU *cpu = ARM_CPU(cs);
 181    CPUARMState *env = &cpu->env;
 182    int target_el;
 183    bool same_el;
 184    uint32_t syn;
 185
 186    if (retaddr) {
 187        /* now we have a real cpu fault */
 188        cpu_restore_state(cs, retaddr);
 189    }
 190
 191    target_el = exception_target_el(env);
 192    same_el = (arm_current_el(env) == target_el);
 193
 194    env->exception.vaddress = vaddr;
 195
 196    /* the DFSR for an alignment fault depends on whether we're using
 197     * the LPAE long descriptor format, or the short descriptor format
 198     */
 199    if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
 200        env->exception.fsr = (1 << 9) | 0x21;
 201    } else {
 202        env->exception.fsr = 0x1;
 203    }
 204
 205    if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
 206        env->exception.fsr |= (1 << 11);
 207    }
 208
 209    syn = merge_syn_data_abort(env->exception.syndrome, target_el,
 210                               same_el, 0, access_type == MMU_DATA_STORE,
 211                               0x21);
 212    raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
 213}
 214
 215#endif /* !defined(CONFIG_USER_ONLY) */
 216
 217uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 218{
 219    uint32_t res = a + b;
 220    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 221        env->QF = 1;
 222    return res;
 223}
 224
 225uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 226{
 227    uint32_t res = a + b;
 228    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 229        env->QF = 1;
 230        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 231    }
 232    return res;
 233}
 234
 235uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 236{
 237    uint32_t res = a - b;
 238    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 239        env->QF = 1;
 240        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 241    }
 242    return res;
 243}
 244
 245uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
 246{
 247    uint32_t res;
 248    if (val >= 0x40000000) {
 249        res = ~SIGNBIT;
 250        env->QF = 1;
 251    } else if (val <= (int32_t)0xc0000000) {
 252        res = SIGNBIT;
 253        env->QF = 1;
 254    } else {
 255        res = val << 1;
 256    }
 257    return res;
 258}
 259
 260uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 261{
 262    uint32_t res = a + b;
 263    if (res < a) {
 264        env->QF = 1;
 265        res = ~0;
 266    }
 267    return res;
 268}
 269
 270uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 271{
 272    uint32_t res = a - b;
 273    if (res > a) {
 274        env->QF = 1;
 275        res = 0;
 276    }
 277    return res;
 278}
 279
 280/* Signed saturation.  */
 281static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 282{
 283    int32_t top;
 284    uint32_t mask;
 285
 286    top = val >> shift;
 287    mask = (1u << shift) - 1;
 288    if (top > 0) {
 289        env->QF = 1;
 290        return mask;
 291    } else if (top < -1) {
 292        env->QF = 1;
 293        return ~mask;
 294    }
 295    return val;
 296}
 297
 298/* Unsigned saturation.  */
 299static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 300{
 301    uint32_t max;
 302
 303    max = (1u << shift) - 1;
 304    if (val < 0) {
 305        env->QF = 1;
 306        return 0;
 307    } else if (val > max) {
 308        env->QF = 1;
 309        return max;
 310    }
 311    return val;
 312}
 313
 314/* Signed saturate.  */
 315uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 316{
 317    return do_ssat(env, x, shift);
 318}
 319
 320/* Dual halfword signed saturate.  */
 321uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 322{
 323    uint32_t res;
 324
 325    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 326    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 327    return res;
 328}
 329
 330/* Unsigned saturate.  */
 331uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 332{
 333    return do_usat(env, x, shift);
 334}
 335
 336/* Dual halfword unsigned saturate.  */
 337uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 338{
 339    uint32_t res;
 340
 341    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 342    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 343    return res;
 344}
 345
 346void HELPER(setend)(CPUARMState *env)
 347{
 348    env->uncached_cpsr ^= CPSR_E;
 349}
 350
 351/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 352 * The function returns the target EL (1-3) if the instruction is to be trapped;
 353 * otherwise it returns 0 indicating it is not trapped.
 354 */
 355static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 356{
 357    int cur_el = arm_current_el(env);
 358    uint64_t mask;
 359
 360    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 361     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 362     */
 363    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 364        int target_el;
 365
 366        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 367        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 368            /* Secure EL0 and Secure PL1 is at EL3 */
 369            target_el = 3;
 370        } else {
 371            target_el = 1;
 372        }
 373
 374        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 375            return target_el;
 376        }
 377    }
 378
 379    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 380     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 381     * bits will be zero indicating no trap.
 382     */
 383    if (cur_el < 2 && !arm_is_secure(env)) {
 384        mask = (is_wfe) ? HCR_TWE : HCR_TWI;
 385        if (env->cp15.hcr_el2 & mask) {
 386            return 2;
 387        }
 388    }
 389
 390    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 391    if (cur_el < 3) {
 392        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 393        if (env->cp15.scr_el3 & mask) {
 394            return 3;
 395        }
 396    }
 397
 398    return 0;
 399}
 400
 401void HELPER(wfi)(CPUARMState *env)
 402{
 403    CPUState *cs = CPU(arm_env_get_cpu(env));
 404    ARMCPU *cpu = arm_env_get_cpu(env);
 405    int target_el = check_wfx_trap(env, false);
 406
 407    if (cpu_has_work(cs)) {
 408        cs->exception_index = -1;
 409        cpu_loop_exit(cs);
 410        return;
 411    }
 412
 413    if (target_el) {
 414        env->pc -= 4;
 415        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
 416    }
 417
 418    if (use_icount) {
 419        cs->exception_index = EXCP_YIELD;
 420    } else {
 421        cs->exception_index = EXCP_HLT;
 422        cs->halted = 1;
 423    }
 424
 425    cpu->is_in_wfi = true;
 426    qemu_set_irq(cpu->wfi, 1);
 427
 428    cpu_loop_exit(cs);
 429}
 430
 431void HELPER(wfe)(CPUARMState *env)
 432{
 433    ARMCPU *ac = ARM_CPU(arm_env_get_cpu(env));
 434    CPUState *cs = CPU(ac);
 435
 436    switch (ac->pe) {
 437    case  1:
 438        ac->pe = 0;
 439        return;
 440    case  0:
 441        cs->exception_index = EXCP_YIELD;
 442        cpu_loop_exit(cs);
 443        return;
 444    default:
 445        g_assert_not_reached();
 446    }
 447}
 448
 449void HELPER(sev)(CPUARMState *env)
 450{
 451    CPUState *cs = CPU(arm_env_get_cpu(env));
 452    CPUState *i;
 453
 454    for (i = first_cpu; i; i = CPU_NEXT(i)) {
 455        ARMCPU *ac = ARM_CPU(i);
 456        ac->pe = 1;
 457        if (i == cs || i->halt_pin || i->reset_pin || i->arch_halt_pin) {
 458            continue;
 459        }
 460        cpu_reset_interrupt(i, CPU_INTERRUPT_HALT);
 461        cpu_interrupt(i, CPU_INTERRUPT_EXITTB);
 462        i->halted = 0;
 463    }
 464}
 465
 466void HELPER(sevl)(CPUARMState *env)
 467{
 468    ARMCPU *ac = arm_env_get_cpu(env);
 469
 470    ac->pe = 1;
 471}
 472
 473void HELPER(yield)(CPUARMState *env)
 474{
 475    ARMCPU *cpu = arm_env_get_cpu(env);
 476    CPUState *cs = CPU(cpu);
 477
 478    /* This is a non-trappable hint instruction that generally indicates
 479     * that the guest is currently busy-looping. Yield control back to the
 480     * top level loop so that a more deserving VCPU has a chance to run.
 481     */
 482    cs->exception_index = EXCP_YIELD;
 483    cpu_loop_exit(cs);
 484}
 485
 486/* Raise an internal-to-QEMU exception. This is limited to only
 487 * those EXCP values which are special cases for QEMU to interrupt
 488 * execution and not to be used for exceptions which are passed to
 489 * the guest (those must all have syndrome information and thus should
 490 * use exception_with_syndrome).
 491 */
 492void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 493{
 494    CPUState *cs = CPU(arm_env_get_cpu(env));
 495
 496    assert(excp_is_internal(excp));
 497    cs->exception_index = excp;
 498    cpu_loop_exit(cs);
 499}
 500
 501/* Raise an exception with the specified syndrome register value */
 502void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 503                                     uint32_t syndrome, uint32_t target_el)
 504{
 505    raise_exception(env, excp, syndrome, target_el);
 506}
 507
 508uint32_t HELPER(cpsr_read)(CPUARMState *env)
 509{
 510    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
 511}
 512
 513void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 514{
 515    cpsr_write(env, val, mask, CPSRWriteByInstr);
 516}
 517
 518/* Write the CPSR for a 32-bit exception return */
 519void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 520{
 521    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
 522
 523    /* Generated code has already stored the new PC value, but
 524     * without masking out its low bits, because which bits need
 525     * masking depends on whether we're returning to Thumb or ARM
 526     * state. Do the masking now.
 527     */
 528    env->regs[15] &= (env->thumb ? ~1 : ~3);
 529
 530    arm_call_el_change_hook(arm_env_get_cpu(env));
 531}
 532
 533/* Access to user mode registers from privileged modes.  */
 534uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 535{
 536    uint32_t val;
 537
 538    if (regno == 13) {
 539        val = env->banked_r13[BANK_USRSYS];
 540    } else if (regno == 14) {
 541        val = env->banked_r14[BANK_USRSYS];
 542    } else if (regno >= 8
 543               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 544        val = env->usr_regs[regno - 8];
 545    } else {
 546        val = env->regs[regno];
 547    }
 548    return val;
 549}
 550
 551void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 552{
 553    if (regno == 13) {
 554        env->banked_r13[BANK_USRSYS] = val;
 555    } else if (regno == 14) {
 556        env->banked_r14[BANK_USRSYS] = val;
 557    } else if (regno >= 8
 558               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 559        env->usr_regs[regno - 8] = val;
 560    } else {
 561        env->regs[regno] = val;
 562    }
 563}
 564
 565void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 566{
 567    if ((env->uncached_cpsr & CPSR_M) == mode) {
 568        env->regs[13] = val;
 569    } else {
 570        env->banked_r13[bank_number(mode)] = val;
 571    }
 572}
 573
 574uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 575{
 576    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 577        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 578         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 579         */
 580        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 581                        exception_target_el(env));
 582    }
 583
 584    if ((env->uncached_cpsr & CPSR_M) == mode) {
 585        return env->regs[13];
 586    } else {
 587        return env->banked_r13[bank_number(mode)];
 588    }
 589}
 590
 591static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 592                                      uint32_t regno)
 593{
 594    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 595     * cases; otherwise return. This broadly corresponds to the pseudocode
 596     * BankedRegisterAccessValid() and SPSRAccessValid(),
 597     * except that we have already handled some cases at translate time.
 598     */
 599    int curmode = env->uncached_cpsr & CPSR_M;
 600
 601    if (curmode == tgtmode) {
 602        goto undef;
 603    }
 604
 605    if (tgtmode == ARM_CPU_MODE_USR) {
 606        switch (regno) {
 607        case 8 ... 12:
 608            if (curmode != ARM_CPU_MODE_FIQ) {
 609                goto undef;
 610            }
 611            break;
 612        case 13:
 613            if (curmode == ARM_CPU_MODE_SYS) {
 614                goto undef;
 615            }
 616            break;
 617        case 14:
 618            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 619                goto undef;
 620            }
 621            break;
 622        default:
 623            break;
 624        }
 625    }
 626
 627    if (tgtmode == ARM_CPU_MODE_HYP) {
 628        switch (regno) {
 629        case 17: /* ELR_Hyp */
 630            if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 631                goto undef;
 632            }
 633            break;
 634        default:
 635            if (curmode != ARM_CPU_MODE_MON) {
 636                goto undef;
 637            }
 638            break;
 639        }
 640    }
 641
 642    return;
 643
 644undef:
 645    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 646                    exception_target_el(env));
 647}
 648
 649void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 650                        uint32_t regno)
 651{
 652    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 653
 654    switch (regno) {
 655    case 16: /* SPSRs */
 656        env->banked_spsr[bank_number(tgtmode)] = value;
 657        break;
 658    case 17: /* ELR_Hyp */
 659        env->elr_el[2] = value;
 660        break;
 661    case 13:
 662        env->banked_r13[bank_number(tgtmode)] = value;
 663        break;
 664    case 14:
 665        env->banked_r14[bank_number(tgtmode)] = value;
 666        break;
 667    case 8 ... 12:
 668        switch (tgtmode) {
 669        case ARM_CPU_MODE_USR:
 670            env->usr_regs[regno - 8] = value;
 671            break;
 672        case ARM_CPU_MODE_FIQ:
 673            env->fiq_regs[regno - 8] = value;
 674            break;
 675        default:
 676            g_assert_not_reached();
 677        }
 678        break;
 679    default:
 680        g_assert_not_reached();
 681    }
 682}
 683
 684uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 685{
 686    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 687
 688    switch (regno) {
 689    case 16: /* SPSRs */
 690        return env->banked_spsr[bank_number(tgtmode)];
 691    case 17: /* ELR_Hyp */
 692        return env->elr_el[2];
 693    case 13:
 694        return env->banked_r13[bank_number(tgtmode)];
 695    case 14:
 696        return env->banked_r14[bank_number(tgtmode)];
 697    case 8 ... 12:
 698        switch (tgtmode) {
 699        case ARM_CPU_MODE_USR:
 700            return env->usr_regs[regno - 8];
 701        case ARM_CPU_MODE_FIQ:
 702            return env->fiq_regs[regno - 8];
 703        default:
 704            g_assert_not_reached();
 705        }
 706    default:
 707        g_assert_not_reached();
 708    }
 709}
 710
 711void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
 712                                 uint32_t isread)
 713{
 714    const ARMCPRegInfo *ri = rip;
 715    int target_el;
 716
 717    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 718        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 719        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 720    }
 721
 722    if (!ri->accessfn) {
 723        return;
 724    }
 725
 726    switch (ri->accessfn(env, ri, isread)) {
 727    case CP_ACCESS_OK:
 728        return;
 729    case CP_ACCESS_TRAP:
 730        target_el = exception_target_el(env);
 731        break;
 732    case CP_ACCESS_TRAP_EL2:
 733        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
 734         * a bug in the access function.
 735         */
 736        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
 737        target_el = 2;
 738        break;
 739    case CP_ACCESS_TRAP_EL3:
 740        target_el = 3;
 741        break;
 742    case CP_ACCESS_TRAP_UNCATEGORIZED:
 743        target_el = exception_target_el(env);
 744        syndrome = syn_uncategorized();
 745        break;
 746    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
 747        target_el = 2;
 748        syndrome = syn_uncategorized();
 749        break;
 750    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
 751        target_el = 3;
 752        syndrome = syn_uncategorized();
 753        break;
 754    case CP_ACCESS_TRAP_FP_EL2:
 755        target_el = 2;
 756        /* Since we are an implementation that takes exceptions on a trapped
 757         * conditional insn only if the insn has passed its condition code
 758         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
 759         * (which is also the required value for AArch64 traps).
 760         */
 761        syndrome = syn_fp_access_trap(1, 0xe, false);
 762        break;
 763    case CP_ACCESS_TRAP_FP_EL3:
 764        target_el = 3;
 765        syndrome = syn_fp_access_trap(1, 0xe, false);
 766        break;
 767    default:
 768        g_assert_not_reached();
 769    }
 770
 771    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 772}
 773
 774void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 775{
 776    const ARMCPRegInfo *ri = rip;
 777
 778    ri->writefn(env, ri, value);
 779}
 780
 781uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 782{
 783    const ARMCPRegInfo *ri = rip;
 784
 785    return ri->readfn(env, ri);
 786}
 787
 788void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 789{
 790    const ARMCPRegInfo *ri = rip;
 791
 792    ri->writefn(env, ri, value);
 793}
 794
 795uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 796{
 797    const ARMCPRegInfo *ri = rip;
 798
 799    return ri->readfn(env, ri);
 800}
 801
 802void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
 803{
 804    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
 805     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
 806     * to catch that case at translate time.
 807     */
 808    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
 809        uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
 810                                                extract32(op, 3, 3), 4,
 811                                                imm, 0x1f, 0);
 812        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 813    }
 814
 815    switch (op) {
 816    case 0x05: /* SPSel */
 817        update_spsel(env, imm);
 818        break;
 819    case 0x1e: /* DAIFSet */
 820        env->daif |= (imm << 6) & PSTATE_DAIF;
 821        break;
 822    case 0x1f: /* DAIFClear */
 823        env->daif &= ~((imm << 6) & PSTATE_DAIF);
 824        break;
 825    default:
 826        g_assert_not_reached();
 827    }
 828}
 829
 830void HELPER(clear_pstate_ss)(CPUARMState *env)
 831{
 832    env->pstate &= ~PSTATE_SS;
 833}
 834
 835void HELPER(pre_hvc)(CPUARMState *env)
 836{
 837    ARMCPU *cpu = arm_env_get_cpu(env);
 838    int cur_el = arm_current_el(env);
 839    /* FIXME: Use actual secure state.  */
 840    bool secure = false;
 841    bool undef;
 842
 843    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 844        /* If PSCI is enabled and this looks like a valid PSCI call then
 845         * that overrides the architecturally mandated HVC behaviour.
 846         */
 847        return;
 848    }
 849
 850    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 851        /* If EL2 doesn't exist, HVC always UNDEFs */
 852        undef = true;
 853    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 854        /* EL3.HCE has priority over EL2.HCD. */
 855        undef = !(env->cp15.scr_el3 & SCR_HCE);
 856    } else {
 857        undef = env->cp15.hcr_el2 & HCR_HCD;
 858    }
 859
 860    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 861     * For ARMv8/AArch64, HVC is allowed in EL3.
 862     * Note that we've already trapped HVC from EL0 at translation
 863     * time.
 864     */
 865    if (secure && (!is_a64(env) || cur_el == 1)) {
 866        undef = true;
 867    }
 868
 869    if (undef) {
 870        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 871                        exception_target_el(env));
 872    }
 873}
 874
 875void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 876{
 877    ARMCPU *cpu = arm_env_get_cpu(env);
 878    int cur_el = arm_current_el(env);
 879    bool secure = arm_is_secure(env);
 880    bool smd = env->cp15.scr_el3 & SCR_SMD;
 881    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 882     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 883     *  extensions, SMD only applies to NS state.
 884     * On ARMv7 without the Virtualization extensions, the SMD bit
 885     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 886     * so we need not special case this here.
 887     */
 888    bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
 889
 890    if (arm_is_psci_call(cpu, EXCP_SMC)) {
 891        /* If PSCI is enabled and this looks like a valid PSCI call then
 892         * that overrides the architecturally mandated SMC behaviour.
 893         */
 894        return;
 895    }
 896
 897    if (!arm_feature(env, ARM_FEATURE_EL3)) {
 898        /* If we have no EL3 then SMC always UNDEFs */
 899        undef = true;
 900    } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
 901        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
 902        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
 903    }
 904
 905    if (undef) {
 906        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 907                        exception_target_el(env));
 908    }
 909}
 910
 911static int el_from_spsr(uint32_t spsr)
 912{
 913    /* Return the exception level that this SPSR is requesting a return to,
 914     * or -1 if it is invalid (an illegal return)
 915     */
 916    if (spsr & PSTATE_nRW) {
 917        switch (spsr & CPSR_M) {
 918        case ARM_CPU_MODE_USR:
 919            return 0;
 920        case ARM_CPU_MODE_HYP:
 921            return 2;
 922        case ARM_CPU_MODE_FIQ:
 923        case ARM_CPU_MODE_IRQ:
 924        case ARM_CPU_MODE_SVC:
 925        case ARM_CPU_MODE_ABT:
 926        case ARM_CPU_MODE_UND:
 927        case ARM_CPU_MODE_SYS:
 928            return 1;
 929        case ARM_CPU_MODE_MON:
 930            /* Returning to Mon from AArch64 is never possible,
 931             * so this is an illegal return.
 932             */
 933        default:
 934            return -1;
 935        }
 936    } else {
 937        if (extract32(spsr, 1, 1)) {
 938            /* Return with reserved M[1] bit set */
 939            return -1;
 940        }
 941        if (extract32(spsr, 0, 4) == 1) {
 942            /* return to EL0 with M[0] bit set */
 943            return -1;
 944        }
 945        return extract32(spsr, 2, 2);
 946    }
 947}
 948
 949void HELPER(exception_return)(CPUARMState *env)
 950{
 951    int cur_el = arm_current_el(env);
 952    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
 953    uint32_t spsr = env->banked_spsr[spsr_idx];
 954    int new_el;
 955    bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
 956
 957    aarch64_save_sp(env, cur_el);
 958
 959    env->exclusive_addr = -1;
 960
 961    /* We must squash the PSTATE.SS bit to zero unless both of the
 962     * following hold:
 963     *  1. debug exceptions are currently disabled
 964     *  2. singlestep will be active in the EL we return to
 965     * We check 1 here and 2 after we've done the pstate/cpsr write() to
 966     * transition to the EL we're going to.
 967     */
 968    if (arm_generate_debug_exceptions(env)) {
 969        spsr &= ~PSTATE_SS;
 970    }
 971
 972    new_el = el_from_spsr(spsr);
 973    if (new_el == -1) {
 974        goto illegal_return;
 975    }
 976    if (new_el > cur_el
 977        || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
 978        /* Disallow return to an EL which is unimplemented or higher
 979         * than the current one.
 980         */
 981        goto illegal_return;
 982    }
 983
 984    if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
 985        /* Return to an EL which is configured for a different register width */
 986        goto illegal_return;
 987    }
 988
 989    if (new_el == 2 && arm_is_secure_below_el3(env)) {
 990        /* Return to the non-existent secure-EL2 */
 991        goto illegal_return;
 992    }
 993
 994    if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
 995        && !arm_is_secure_below_el3(env)) {
 996        goto illegal_return;
 997    }
 998
 999    if (!return_to_aa64) {
1000        env->aarch64 = 0;
1001        /* We do a raw CPSR write because aarch64_sync_64_to_32()
1002         * will sort the register banks out for us, and we've already
1003         * caught all the bad-mode cases in el_from_spsr().
1004         */
1005        cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1006        if (!arm_singlestep_active(env)) {
1007            env->uncached_cpsr &= ~PSTATE_SS;
1008        }
1009        aarch64_sync_64_to_32(env);
1010
1011        if (spsr & CPSR_T) {
1012            env->regs[15] = env->elr_el[cur_el] & ~0x1;
1013        } else {
1014            env->regs[15] = env->elr_el[cur_el] & ~0x3;
1015        }
1016    } else {
1017        env->aarch64 = 1;
1018        pstate_write(env, spsr);
1019        if (!arm_singlestep_active(env)) {
1020            env->pstate &= ~PSTATE_SS;
1021        }
1022        aarch64_restore_sp(env, new_el);
1023        env->pc = env->elr_el[cur_el];
1024    }
1025
1026    arm_call_el_change_hook(arm_env_get_cpu(env));
1027
1028    return;
1029
1030illegal_return:
1031    /* Illegal return events of various kinds have architecturally
1032     * mandated behaviour:
1033     * restore NZCV and DAIF from SPSR_ELx
1034     * set PSTATE.IL
1035     * restore PC from ELR_ELx
1036     * no change to exception level, execution state or stack pointer
1037     */
1038    env->pstate |= PSTATE_IL;
1039    env->pc = env->elr_el[cur_el];
1040    spsr &= PSTATE_NZCV | PSTATE_DAIF;
1041    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1042    pstate_write(env, spsr);
1043    if (!arm_singlestep_active(env)) {
1044        env->pstate &= ~PSTATE_SS;
1045    }
1046}
1047
1048/* Return true if the linked breakpoint entry lbn passes its checks */
1049static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1050{
1051    CPUARMState *env = &cpu->env;
1052    uint64_t bcr = env->cp15.dbgbcr[lbn];
1053    int brps = extract32(cpu->dbgdidr, 24, 4);
1054    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1055    int bt;
1056    uint32_t contextidr;
1057
1058    /* Links to unimplemented or non-context aware breakpoints are
1059     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1060     * as if linked to an UNKNOWN context-aware breakpoint (in which
1061     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1062     * We choose the former.
1063     */
1064    if (lbn > brps || lbn < (brps - ctx_cmps)) {
1065        return false;
1066    }
1067
1068    bcr = env->cp15.dbgbcr[lbn];
1069
1070    if (extract64(bcr, 0, 1) == 0) {
1071        /* Linked breakpoint disabled : generate no events */
1072        return false;
1073    }
1074
1075    bt = extract64(bcr, 20, 4);
1076
1077    /* We match the whole register even if this is AArch32 using the
1078     * short descriptor format (in which case it holds both PROCID and ASID),
1079     * since we don't implement the optional v7 context ID masking.
1080     */
1081    contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1082
1083    switch (bt) {
1084    case 3: /* linked context ID match */
1085        if (arm_current_el(env) > 1) {
1086            /* Context matches never fire in EL2 or (AArch64) EL3 */
1087            return false;
1088        }
1089        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1090    case 5: /* linked address mismatch (reserved in AArch64) */
1091    case 9: /* linked VMID match (reserved if no EL2) */
1092    case 11: /* linked context ID and VMID match (reserved if no EL2) */
1093    default:
1094        /* Links to Unlinked context breakpoints must generate no
1095         * events; we choose to do the same for reserved values too.
1096         */
1097        return false;
1098    }
1099
1100    return false;
1101}
1102
1103static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1104{
1105    CPUARMState *env = &cpu->env;
1106    uint64_t cr;
1107    int pac, hmc, ssc, wt, lbn;
1108    /* Note that for watchpoints the check is against the CPU security
1109     * state, not the S/NS attribute on the offending data access.
1110     */
1111    bool is_secure = arm_is_secure(env);
1112    int access_el = arm_current_el(env);
1113
1114    if (is_wp) {
1115        CPUWatchpoint *wp = env->cpu_watchpoint[n];
1116
1117        if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1118            return false;
1119        }
1120        cr = env->cp15.dbgwcr[n];
1121        if (wp->hitattrs.user) {
1122            /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1123             * match watchpoints as if they were accesses done at EL0, even if
1124             * the CPU is at EL1 or higher.
1125             */
1126            access_el = 0;
1127        }
1128    } else {
1129        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1130
1131        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1132            return false;
1133        }
1134        cr = env->cp15.dbgbcr[n];
1135    }
1136    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1137     * enabled and that the address and access type match; for breakpoints
1138     * we know the address matched; check the remaining fields, including
1139     * linked breakpoints. We rely on WCR and BCR having the same layout
1140     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1141     * Note that some combinations of {PAC, HMC, SSC} are reserved and
1142     * must act either like some valid combination or as if the watchpoint
1143     * were disabled. We choose the former, and use this together with
1144     * the fact that EL3 must always be Secure and EL2 must always be
1145     * Non-Secure to simplify the code slightly compared to the full
1146     * table in the ARM ARM.
1147     */
1148    pac = extract64(cr, 1, 2);
1149    hmc = extract64(cr, 13, 1);
1150    ssc = extract64(cr, 14, 2);
1151
1152    switch (ssc) {
1153    case 0:
1154        break;
1155    case 1:
1156    case 3:
1157        if (is_secure) {
1158            return false;
1159        }
1160        break;
1161    case 2:
1162        if (!is_secure) {
1163            return false;
1164        }
1165        break;
1166    }
1167
1168    switch (access_el) {
1169    case 3:
1170    case 2:
1171        if (!hmc) {
1172            return false;
1173        }
1174        break;
1175    case 1:
1176        if (extract32(pac, 0, 1) == 0) {
1177            return false;
1178        }
1179        break;
1180    case 0:
1181        if (extract32(pac, 1, 1) == 0) {
1182            return false;
1183        }
1184        break;
1185    default:
1186        g_assert_not_reached();
1187    }
1188
1189    wt = extract64(cr, 20, 1);
1190    lbn = extract64(cr, 16, 4);
1191
1192    if (wt && !linked_bp_matches(cpu, lbn)) {
1193        return false;
1194    }
1195
1196    return true;
1197}
1198
1199static bool check_watchpoints(ARMCPU *cpu)
1200{
1201    CPUARMState *env = &cpu->env;
1202    int n;
1203
1204    /* If watchpoints are disabled globally or we can't take debug
1205     * exceptions here then watchpoint firings are ignored.
1206     */
1207    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1208        || !arm_generate_debug_exceptions(env)) {
1209        return false;
1210    }
1211
1212    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1213        if (bp_wp_matches(cpu, n, true)) {
1214            return true;
1215        }
1216    }
1217    return false;
1218}
1219
1220static bool check_breakpoints(ARMCPU *cpu)
1221{
1222    CPUARMState *env = &cpu->env;
1223    int n;
1224
1225    /* If breakpoints are disabled globally or we can't take debug
1226     * exceptions here then breakpoint firings are ignored.
1227     */
1228    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1229        || !arm_generate_debug_exceptions(env)) {
1230        return false;
1231    }
1232
1233    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1234        if (bp_wp_matches(cpu, n, false)) {
1235            return true;
1236        }
1237    }
1238    return false;
1239}
1240
1241void HELPER(check_breakpoints)(CPUARMState *env)
1242{
1243    ARMCPU *cpu = arm_env_get_cpu(env);
1244
1245    if (check_breakpoints(cpu)) {
1246        HELPER(exception_internal(env, EXCP_DEBUG));
1247    }
1248}
1249
1250bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1251{
1252    /* Called by core code when a CPU watchpoint fires; need to check if this
1253     * is also an architectural watchpoint match.
1254     */
1255    ARMCPU *cpu = ARM_CPU(cs);
1256
1257    return check_watchpoints(cpu);
1258}
1259
1260void arm_debug_excp_handler(CPUState *cs)
1261{
1262    /* Called by core code when a watchpoint or breakpoint fires;
1263     * need to check which one and raise the appropriate exception.
1264     */
1265    ARMCPU *cpu = ARM_CPU(cs);
1266    CPUARMState *env = &cpu->env;
1267    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1268
1269    if (wp_hit) {
1270        if (wp_hit->flags & BP_CPU) {
1271            bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1272            bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1273
1274            cs->watchpoint_hit = NULL;
1275
1276            if (extended_addresses_enabled(env)) {
1277                env->exception.fsr = (1 << 9) | 0x22;
1278            } else {
1279                env->exception.fsr = 0x2;
1280            }
1281            env->exception.vaddress = wp_hit->hitaddr;
1282            raise_exception(env, EXCP_DATA_ABORT,
1283                    syn_watchpoint(same_el, 0, wnr),
1284                    arm_debug_target_el(env));
1285        }
1286    } else {
1287        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1288        bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1289
1290        /* (1) GDB breakpoints should be handled first.
1291         * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1292         * since singlestep is also done by generating a debug internal
1293         * exception.
1294         */
1295        if (cpu_breakpoint_test(cs, pc, BP_GDB)
1296            || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1297            return;
1298        }
1299
1300        if (extended_addresses_enabled(env)) {
1301            env->exception.fsr = (1 << 9) | 0x22;
1302        } else {
1303            env->exception.fsr = 0x2;
1304        }
1305        /* FAR is UNKNOWN, so doesn't need setting */
1306        raise_exception(env, EXCP_PREFETCH_ABORT,
1307                        syn_breakpoint(same_el),
1308                        arm_debug_target_el(env));
1309    }
1310}
1311
1312/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1313   The only way to do that in TCG is a conditional branch, which clobbers
1314   all our temporaries.  For now implement these as helper functions.  */
1315
1316/* Similarly for variable shift instructions.  */
1317
1318uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1319{
1320    int shift = i & 0xff;
1321    if (shift >= 32) {
1322        if (shift == 32)
1323            env->CF = x & 1;
1324        else
1325            env->CF = 0;
1326        return 0;
1327    } else if (shift != 0) {
1328        env->CF = (x >> (32 - shift)) & 1;
1329        return x << shift;
1330    }
1331    return x;
1332}
1333
1334uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1335{
1336    int shift = i & 0xff;
1337    if (shift >= 32) {
1338        if (shift == 32)
1339            env->CF = (x >> 31) & 1;
1340        else
1341            env->CF = 0;
1342        return 0;
1343    } else if (shift != 0) {
1344        env->CF = (x >> (shift - 1)) & 1;
1345        return x >> shift;
1346    }
1347    return x;
1348}
1349
1350uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1351{
1352    int shift = i & 0xff;
1353    if (shift >= 32) {
1354        env->CF = (x >> 31) & 1;
1355        return (int32_t)x >> 31;
1356    } else if (shift != 0) {
1357        env->CF = (x >> (shift - 1)) & 1;
1358        return (int32_t)x >> shift;
1359    }
1360    return x;
1361}
1362
1363uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1364{
1365    int shift1, shift;
1366    shift1 = i & 0xff;
1367    shift = shift1 & 0x1f;
1368    if (shift == 0) {
1369        if (shift1 != 0)
1370            env->CF = (x >> 31) & 1;
1371        return x;
1372    } else {
1373        env->CF = (x >> (shift - 1)) & 1;
1374        return ((uint32_t)x >> shift) | (x << (32 - shift));
1375    }
1376}
1377