qemu/target-arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "exec/helper-proto.h"
  22#include "internals.h"
  23#include "exec/cpu_ldst.h"
  24
  25#define SIGNBIT (uint32_t)0x80000000
  26#define SIGNBIT64 ((uint64_t)1 << 63)
  27
  28static void raise_exception(CPUARMState *env, uint32_t excp,
  29                            uint32_t syndrome, uint32_t target_el)
  30{
  31    CPUState *cs = CPU(arm_env_get_cpu(env));
  32
  33    assert(!excp_is_internal(excp));
  34    cs->exception_index = excp;
  35    env->exception.syndrome = syndrome;
  36    env->exception.target_el = target_el;
  37    cpu_loop_exit(cs);
  38}
  39
  40static int exception_target_el(CPUARMState *env)
  41{
  42    int target_el = MAX(1, arm_current_el(env));
  43
  44    /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
  45     * to EL3 in this case.
  46     */
  47    if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
  48        target_el = 3;
  49    }
  50
  51    return target_el;
  52}
  53
  54uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
  55                          uint32_t rn, uint32_t maxindex)
  56{
  57    uint32_t val;
  58    uint32_t tmp;
  59    int index;
  60    int shift;
  61    uint64_t *table;
  62    table = (uint64_t *)&env->vfp.regs[rn];
  63    val = 0;
  64    for (shift = 0; shift < 32; shift += 8) {
  65        index = (ireg >> shift) & 0xff;
  66        if (index < maxindex) {
  67            tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
  68            val |= tmp << shift;
  69        } else {
  70            val |= def & (0xff << shift);
  71        }
  72    }
  73    return val;
  74}
  75
  76#if !defined(CONFIG_USER_ONLY)
  77
  78/* try to fill the TLB and return an exception if error. If retaddr is
  79 * NULL, it means that the function was called in C code (i.e. not
  80 * from generated code or from helper.c)
  81 */
  82void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
  83              uintptr_t retaddr)
  84{
  85    bool ret;
  86    uint32_t fsr = 0;
  87    ARMMMUFaultInfo fi = {};
  88
  89    ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr, &fi);
  90    if (unlikely(ret)) {
  91        ARMCPU *cpu = ARM_CPU(cs);
  92        CPUARMState *env = &cpu->env;
  93        uint32_t syn, exc;
  94        unsigned int target_el;
  95        bool same_el;
  96
  97        if (retaddr) {
  98            /* now we have a real cpu fault */
  99            cpu_restore_state(cs, retaddr);
 100        }
 101
 102        target_el = exception_target_el(env);
 103        if (fi.stage2) {
 104            target_el = 2;
 105            env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
 106        }
 107        same_el = arm_current_el(env) == target_el;
 108        /* AArch64 syndrome does not have an LPAE bit */
 109        syn = fsr & ~(1 << 9);
 110
 111        /* For insn and data aborts we assume there is no instruction syndrome
 112         * information; this is always true for exceptions reported to EL1.
 113         */
 114        if (is_write == 2) {
 115            syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
 116            exc = EXCP_PREFETCH_ABORT;
 117        } else {
 118            syn = syn_data_abort(same_el, 0, 0, fi.s1ptw, is_write == 1, syn);
 119            if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
 120                fsr |= (1 << 11);
 121            }
 122            exc = EXCP_DATA_ABORT;
 123        }
 124
 125        env->exception.vaddress = addr;
 126        env->exception.fsr = fsr;
 127        raise_exception(env, exc, syn, target_el);
 128    }
 129}
 130
 131/* Raise a data fault alignment exception for the specified virtual address */
 132void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
 133                                 int is_user, uintptr_t retaddr)
 134{
 135    ARMCPU *cpu = ARM_CPU(cs);
 136    CPUARMState *env = &cpu->env;
 137    int target_el;
 138    bool same_el;
 139
 140    if (retaddr) {
 141        /* now we have a real cpu fault */
 142        cpu_restore_state(cs, retaddr);
 143    }
 144
 145    target_el = exception_target_el(env);
 146    same_el = (arm_current_el(env) == target_el);
 147
 148    env->exception.vaddress = vaddr;
 149
 150    /* the DFSR for an alignment fault depends on whether we're using
 151     * the LPAE long descriptor format, or the short descriptor format
 152     */
 153    if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
 154        env->exception.fsr = 0x21;
 155    } else {
 156        env->exception.fsr = 0x1;
 157    }
 158
 159    if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
 160        env->exception.fsr |= (1 << 11);
 161    }
 162
 163    raise_exception(env, EXCP_DATA_ABORT,
 164                    syn_data_abort(same_el, 0, 0, 0, is_write == 1, 0x21),
 165                    target_el);
 166}
 167
 168#endif /* !defined(CONFIG_USER_ONLY) */
 169
 170uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 171{
 172    uint32_t res = a + b;
 173    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 174        env->QF = 1;
 175    return res;
 176}
 177
 178uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 179{
 180    uint32_t res = a + b;
 181    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 182        env->QF = 1;
 183        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 184    }
 185    return res;
 186}
 187
 188uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 189{
 190    uint32_t res = a - b;
 191    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 192        env->QF = 1;
 193        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 194    }
 195    return res;
 196}
 197
 198uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
 199{
 200    uint32_t res;
 201    if (val >= 0x40000000) {
 202        res = ~SIGNBIT;
 203        env->QF = 1;
 204    } else if (val <= (int32_t)0xc0000000) {
 205        res = SIGNBIT;
 206        env->QF = 1;
 207    } else {
 208        res = val << 1;
 209    }
 210    return res;
 211}
 212
 213uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 214{
 215    uint32_t res = a + b;
 216    if (res < a) {
 217        env->QF = 1;
 218        res = ~0;
 219    }
 220    return res;
 221}
 222
 223uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 224{
 225    uint32_t res = a - b;
 226    if (res > a) {
 227        env->QF = 1;
 228        res = 0;
 229    }
 230    return res;
 231}
 232
 233/* Signed saturation.  */
 234static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 235{
 236    int32_t top;
 237    uint32_t mask;
 238
 239    top = val >> shift;
 240    mask = (1u << shift) - 1;
 241    if (top > 0) {
 242        env->QF = 1;
 243        return mask;
 244    } else if (top < -1) {
 245        env->QF = 1;
 246        return ~mask;
 247    }
 248    return val;
 249}
 250
 251/* Unsigned saturation.  */
 252static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 253{
 254    uint32_t max;
 255
 256    max = (1u << shift) - 1;
 257    if (val < 0) {
 258        env->QF = 1;
 259        return 0;
 260    } else if (val > max) {
 261        env->QF = 1;
 262        return max;
 263    }
 264    return val;
 265}
 266
 267/* Signed saturate.  */
 268uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 269{
 270    return do_ssat(env, x, shift);
 271}
 272
 273/* Dual halfword signed saturate.  */
 274uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 275{
 276    uint32_t res;
 277
 278    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 279    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 280    return res;
 281}
 282
 283/* Unsigned saturate.  */
 284uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 285{
 286    return do_usat(env, x, shift);
 287}
 288
 289/* Dual halfword unsigned saturate.  */
 290uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 291{
 292    uint32_t res;
 293
 294    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 295    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 296    return res;
 297}
 298
 299void HELPER(setend)(CPUARMState *env)
 300{
 301    env->uncached_cpsr ^= CPSR_E;
 302}
 303
 304/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 305 * The function returns the target EL (1-3) if the instruction is to be trapped;
 306 * otherwise it returns 0 indicating it is not trapped.
 307 */
 308static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 309{
 310    int cur_el = arm_current_el(env);
 311    uint64_t mask;
 312
 313    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 314     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 315     */
 316    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 317        int target_el;
 318
 319        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 320        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 321            /* Secure EL0 and Secure PL1 is at EL3 */
 322            target_el = 3;
 323        } else {
 324            target_el = 1;
 325        }
 326
 327        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 328            return target_el;
 329        }
 330    }
 331
 332    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 333     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 334     * bits will be zero indicating no trap.
 335     */
 336    if (cur_el < 2 && !arm_is_secure(env)) {
 337        mask = (is_wfe) ? HCR_TWE : HCR_TWI;
 338        if (env->cp15.hcr_el2 & mask) {
 339            return 2;
 340        }
 341    }
 342
 343    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 344    if (cur_el < 3) {
 345        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 346        if (env->cp15.scr_el3 & mask) {
 347            return 3;
 348        }
 349    }
 350
 351    return 0;
 352}
 353
 354void HELPER(wfi)(CPUARMState *env)
 355{
 356    CPUState *cs = CPU(arm_env_get_cpu(env));
 357    int target_el = check_wfx_trap(env, false);
 358
 359    if (cpu_has_work(cs)) {
 360        /* Don't bother to go into our "low power state" if
 361         * we would just wake up immediately.
 362         */
 363        return;
 364    }
 365
 366    if (target_el) {
 367        env->pc -= 4;
 368        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
 369    }
 370
 371    cs->exception_index = EXCP_HLT;
 372    cs->halted = 1;
 373    cpu_loop_exit(cs);
 374}
 375
 376void HELPER(wfe)(CPUARMState *env)
 377{
 378    /* This is a hint instruction that is semantically different
 379     * from YIELD even though we currently implement it identically.
 380     * Don't actually halt the CPU, just yield back to top
 381     * level loop. This is not going into a "low power state"
 382     * (ie halting until some event occurs), so we never take
 383     * a configurable trap to a different exception level.
 384     */
 385    HELPER(yield)(env);
 386}
 387
 388void HELPER(yield)(CPUARMState *env)
 389{
 390    ARMCPU *cpu = arm_env_get_cpu(env);
 391    CPUState *cs = CPU(cpu);
 392
 393    /* This is a non-trappable hint instruction that generally indicates
 394     * that the guest is currently busy-looping. Yield control back to the
 395     * top level loop so that a more deserving VCPU has a chance to run.
 396     */
 397    cs->exception_index = EXCP_YIELD;
 398    cpu_loop_exit(cs);
 399}
 400
 401/* Raise an internal-to-QEMU exception. This is limited to only
 402 * those EXCP values which are special cases for QEMU to interrupt
 403 * execution and not to be used for exceptions which are passed to
 404 * the guest (those must all have syndrome information and thus should
 405 * use exception_with_syndrome).
 406 */
 407void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 408{
 409    CPUState *cs = CPU(arm_env_get_cpu(env));
 410
 411    assert(excp_is_internal(excp));
 412    cs->exception_index = excp;
 413    cpu_loop_exit(cs);
 414}
 415
 416/* Raise an exception with the specified syndrome register value */
 417void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 418                                     uint32_t syndrome, uint32_t target_el)
 419{
 420    raise_exception(env, excp, syndrome, target_el);
 421}
 422
 423uint32_t HELPER(cpsr_read)(CPUARMState *env)
 424{
 425    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
 426}
 427
 428void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 429{
 430    cpsr_write(env, val, mask, CPSRWriteByInstr);
 431}
 432
 433/* Write the CPSR for a 32-bit exception return */
 434void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 435{
 436    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
 437}
 438
 439/* Access to user mode registers from privileged modes.  */
 440uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 441{
 442    uint32_t val;
 443
 444    if (regno == 13) {
 445        val = env->banked_r13[BANK_USRSYS];
 446    } else if (regno == 14) {
 447        val = env->banked_r14[BANK_USRSYS];
 448    } else if (regno >= 8
 449               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 450        val = env->usr_regs[regno - 8];
 451    } else {
 452        val = env->regs[regno];
 453    }
 454    return val;
 455}
 456
 457void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 458{
 459    if (regno == 13) {
 460        env->banked_r13[BANK_USRSYS] = val;
 461    } else if (regno == 14) {
 462        env->banked_r14[BANK_USRSYS] = val;
 463    } else if (regno >= 8
 464               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 465        env->usr_regs[regno - 8] = val;
 466    } else {
 467        env->regs[regno] = val;
 468    }
 469}
 470
 471void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 472{
 473    if ((env->uncached_cpsr & CPSR_M) == mode) {
 474        env->regs[13] = val;
 475    } else {
 476        env->banked_r13[bank_number(mode)] = val;
 477    }
 478}
 479
 480uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 481{
 482    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 483        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 484         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 485         */
 486        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 487                        exception_target_el(env));
 488    }
 489
 490    if ((env->uncached_cpsr & CPSR_M) == mode) {
 491        return env->regs[13];
 492    } else {
 493        return env->banked_r13[bank_number(mode)];
 494    }
 495}
 496
 497static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 498                                      uint32_t regno)
 499{
 500    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 501     * cases; otherwise return. This broadly corresponds to the pseudocode
 502     * BankedRegisterAccessValid() and SPSRAccessValid(),
 503     * except that we have already handled some cases at translate time.
 504     */
 505    int curmode = env->uncached_cpsr & CPSR_M;
 506
 507    if (curmode == tgtmode) {
 508        goto undef;
 509    }
 510
 511    if (tgtmode == ARM_CPU_MODE_USR) {
 512        switch (regno) {
 513        case 8 ... 12:
 514            if (curmode != ARM_CPU_MODE_FIQ) {
 515                goto undef;
 516            }
 517            break;
 518        case 13:
 519            if (curmode == ARM_CPU_MODE_SYS) {
 520                goto undef;
 521            }
 522            break;
 523        case 14:
 524            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 525                goto undef;
 526            }
 527            break;
 528        default:
 529            break;
 530        }
 531    }
 532
 533    if (tgtmode == ARM_CPU_MODE_HYP) {
 534        switch (regno) {
 535        case 17: /* ELR_Hyp */
 536            if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 537                goto undef;
 538            }
 539            break;
 540        default:
 541            if (curmode != ARM_CPU_MODE_MON) {
 542                goto undef;
 543            }
 544            break;
 545        }
 546    }
 547
 548    return;
 549
 550undef:
 551    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 552                    exception_target_el(env));
 553}
 554
 555void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 556                        uint32_t regno)
 557{
 558    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 559
 560    switch (regno) {
 561    case 16: /* SPSRs */
 562        env->banked_spsr[bank_number(tgtmode)] = value;
 563        break;
 564    case 17: /* ELR_Hyp */
 565        env->elr_el[2] = value;
 566        break;
 567    case 13:
 568        env->banked_r13[bank_number(tgtmode)] = value;
 569        break;
 570    case 14:
 571        env->banked_r14[bank_number(tgtmode)] = value;
 572        break;
 573    case 8 ... 12:
 574        switch (tgtmode) {
 575        case ARM_CPU_MODE_USR:
 576            env->usr_regs[regno - 8] = value;
 577            break;
 578        case ARM_CPU_MODE_FIQ:
 579            env->fiq_regs[regno - 8] = value;
 580            break;
 581        default:
 582            g_assert_not_reached();
 583        }
 584        break;
 585    default:
 586        g_assert_not_reached();
 587    }
 588}
 589
 590uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 591{
 592    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 593
 594    switch (regno) {
 595    case 16: /* SPSRs */
 596        return env->banked_spsr[bank_number(tgtmode)];
 597    case 17: /* ELR_Hyp */
 598        return env->elr_el[2];
 599    case 13:
 600        return env->banked_r13[bank_number(tgtmode)];
 601    case 14:
 602        return env->banked_r14[bank_number(tgtmode)];
 603    case 8 ... 12:
 604        switch (tgtmode) {
 605        case ARM_CPU_MODE_USR:
 606            return env->usr_regs[regno - 8];
 607        case ARM_CPU_MODE_FIQ:
 608            return env->fiq_regs[regno - 8];
 609        default:
 610            g_assert_not_reached();
 611        }
 612    default:
 613        g_assert_not_reached();
 614    }
 615}
 616
 617void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
 618                                 uint32_t isread)
 619{
 620    const ARMCPRegInfo *ri = rip;
 621    int target_el;
 622
 623    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 624        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 625        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 626    }
 627
 628    if (!ri->accessfn) {
 629        return;
 630    }
 631
 632    switch (ri->accessfn(env, ri, isread)) {
 633    case CP_ACCESS_OK:
 634        return;
 635    case CP_ACCESS_TRAP:
 636        target_el = exception_target_el(env);
 637        break;
 638    case CP_ACCESS_TRAP_EL2:
 639        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
 640         * a bug in the access function.
 641         */
 642        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
 643        target_el = 2;
 644        break;
 645    case CP_ACCESS_TRAP_EL3:
 646        target_el = 3;
 647        break;
 648    case CP_ACCESS_TRAP_UNCATEGORIZED:
 649        target_el = exception_target_el(env);
 650        syndrome = syn_uncategorized();
 651        break;
 652    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
 653        target_el = 2;
 654        syndrome = syn_uncategorized();
 655        break;
 656    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
 657        target_el = 3;
 658        syndrome = syn_uncategorized();
 659        break;
 660    case CP_ACCESS_TRAP_FP_EL2:
 661        target_el = 2;
 662        /* Since we are an implementation that takes exceptions on a trapped
 663         * conditional insn only if the insn has passed its condition code
 664         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
 665         * (which is also the required value for AArch64 traps).
 666         */
 667        syndrome = syn_fp_access_trap(1, 0xe, false);
 668        break;
 669    case CP_ACCESS_TRAP_FP_EL3:
 670        target_el = 3;
 671        syndrome = syn_fp_access_trap(1, 0xe, false);
 672        break;
 673    default:
 674        g_assert_not_reached();
 675    }
 676
 677    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 678}
 679
 680void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 681{
 682    const ARMCPRegInfo *ri = rip;
 683
 684    ri->writefn(env, ri, value);
 685}
 686
 687uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 688{
 689    const ARMCPRegInfo *ri = rip;
 690
 691    return ri->readfn(env, ri);
 692}
 693
 694void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 695{
 696    const ARMCPRegInfo *ri = rip;
 697
 698    ri->writefn(env, ri, value);
 699}
 700
 701uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 702{
 703    const ARMCPRegInfo *ri = rip;
 704
 705    return ri->readfn(env, ri);
 706}
 707
 708void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
 709{
 710    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
 711     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
 712     * to catch that case at translate time.
 713     */
 714    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
 715        uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
 716                                                extract32(op, 3, 3), 4,
 717                                                imm, 0x1f, 0);
 718        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 719    }
 720
 721    switch (op) {
 722    case 0x05: /* SPSel */
 723        update_spsel(env, imm);
 724        break;
 725    case 0x1e: /* DAIFSet */
 726        env->daif |= (imm << 6) & PSTATE_DAIF;
 727        break;
 728    case 0x1f: /* DAIFClear */
 729        env->daif &= ~((imm << 6) & PSTATE_DAIF);
 730        break;
 731    default:
 732        g_assert_not_reached();
 733    }
 734}
 735
 736void HELPER(clear_pstate_ss)(CPUARMState *env)
 737{
 738    env->pstate &= ~PSTATE_SS;
 739}
 740
 741void HELPER(pre_hvc)(CPUARMState *env)
 742{
 743    ARMCPU *cpu = arm_env_get_cpu(env);
 744    int cur_el = arm_current_el(env);
 745    /* FIXME: Use actual secure state.  */
 746    bool secure = false;
 747    bool undef;
 748
 749    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 750        /* If PSCI is enabled and this looks like a valid PSCI call then
 751         * that overrides the architecturally mandated HVC behaviour.
 752         */
 753        return;
 754    }
 755
 756    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 757        /* If EL2 doesn't exist, HVC always UNDEFs */
 758        undef = true;
 759    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 760        /* EL3.HCE has priority over EL2.HCD. */
 761        undef = !(env->cp15.scr_el3 & SCR_HCE);
 762    } else {
 763        undef = env->cp15.hcr_el2 & HCR_HCD;
 764    }
 765
 766    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 767     * For ARMv8/AArch64, HVC is allowed in EL3.
 768     * Note that we've already trapped HVC from EL0 at translation
 769     * time.
 770     */
 771    if (secure && (!is_a64(env) || cur_el == 1)) {
 772        undef = true;
 773    }
 774
 775    if (undef) {
 776        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 777                        exception_target_el(env));
 778    }
 779}
 780
 781void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 782{
 783    ARMCPU *cpu = arm_env_get_cpu(env);
 784    int cur_el = arm_current_el(env);
 785    bool secure = arm_is_secure(env);
 786    bool smd = env->cp15.scr_el3 & SCR_SMD;
 787    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 788     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 789     *  extensions, SMD only applies to NS state.
 790     * On ARMv7 without the Virtualization extensions, the SMD bit
 791     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 792     * so we need not special case this here.
 793     */
 794    bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
 795
 796    if (arm_is_psci_call(cpu, EXCP_SMC)) {
 797        /* If PSCI is enabled and this looks like a valid PSCI call then
 798         * that overrides the architecturally mandated SMC behaviour.
 799         */
 800        return;
 801    }
 802
 803    if (!arm_feature(env, ARM_FEATURE_EL3)) {
 804        /* If we have no EL3 then SMC always UNDEFs */
 805        undef = true;
 806    } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
 807        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
 808        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
 809    }
 810
 811    if (undef) {
 812        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 813                        exception_target_el(env));
 814    }
 815}
 816
 817static int el_from_spsr(uint32_t spsr)
 818{
 819    /* Return the exception level that this SPSR is requesting a return to,
 820     * or -1 if it is invalid (an illegal return)
 821     */
 822    if (spsr & PSTATE_nRW) {
 823        switch (spsr & CPSR_M) {
 824        case ARM_CPU_MODE_USR:
 825            return 0;
 826        case ARM_CPU_MODE_HYP:
 827            return 2;
 828        case ARM_CPU_MODE_FIQ:
 829        case ARM_CPU_MODE_IRQ:
 830        case ARM_CPU_MODE_SVC:
 831        case ARM_CPU_MODE_ABT:
 832        case ARM_CPU_MODE_UND:
 833        case ARM_CPU_MODE_SYS:
 834            return 1;
 835        case ARM_CPU_MODE_MON:
 836            /* Returning to Mon from AArch64 is never possible,
 837             * so this is an illegal return.
 838             */
 839        default:
 840            return -1;
 841        }
 842    } else {
 843        if (extract32(spsr, 1, 1)) {
 844            /* Return with reserved M[1] bit set */
 845            return -1;
 846        }
 847        if (extract32(spsr, 0, 4) == 1) {
 848            /* return to EL0 with M[0] bit set */
 849            return -1;
 850        }
 851        return extract32(spsr, 2, 2);
 852    }
 853}
 854
 855void HELPER(exception_return)(CPUARMState *env)
 856{
 857    int cur_el = arm_current_el(env);
 858    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
 859    uint32_t spsr = env->banked_spsr[spsr_idx];
 860    int new_el;
 861    bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
 862
 863    aarch64_save_sp(env, cur_el);
 864
 865    env->exclusive_addr = -1;
 866
 867    /* We must squash the PSTATE.SS bit to zero unless both of the
 868     * following hold:
 869     *  1. debug exceptions are currently disabled
 870     *  2. singlestep will be active in the EL we return to
 871     * We check 1 here and 2 after we've done the pstate/cpsr write() to
 872     * transition to the EL we're going to.
 873     */
 874    if (arm_generate_debug_exceptions(env)) {
 875        spsr &= ~PSTATE_SS;
 876    }
 877
 878    new_el = el_from_spsr(spsr);
 879    if (new_el == -1) {
 880        goto illegal_return;
 881    }
 882    if (new_el > cur_el
 883        || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
 884        /* Disallow return to an EL which is unimplemented or higher
 885         * than the current one.
 886         */
 887        goto illegal_return;
 888    }
 889
 890    if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
 891        /* Return to an EL which is configured for a different register width */
 892        goto illegal_return;
 893    }
 894
 895    if (new_el == 2 && arm_is_secure_below_el3(env)) {
 896        /* Return to the non-existent secure-EL2 */
 897        goto illegal_return;
 898    }
 899
 900    if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
 901        && !arm_is_secure_below_el3(env)) {
 902        goto illegal_return;
 903    }
 904
 905    if (!return_to_aa64) {
 906        env->aarch64 = 0;
 907        /* We do a raw CPSR write because aarch64_sync_64_to_32()
 908         * will sort the register banks out for us, and we've already
 909         * caught all the bad-mode cases in el_from_spsr().
 910         */
 911        cpsr_write(env, spsr, ~0, CPSRWriteRaw);
 912        if (!arm_singlestep_active(env)) {
 913            env->uncached_cpsr &= ~PSTATE_SS;
 914        }
 915        aarch64_sync_64_to_32(env);
 916
 917        if (spsr & CPSR_T) {
 918            env->regs[15] = env->elr_el[cur_el] & ~0x1;
 919        } else {
 920            env->regs[15] = env->elr_el[cur_el] & ~0x3;
 921        }
 922    } else {
 923        env->aarch64 = 1;
 924        pstate_write(env, spsr);
 925        if (!arm_singlestep_active(env)) {
 926            env->pstate &= ~PSTATE_SS;
 927        }
 928        aarch64_restore_sp(env, new_el);
 929        env->pc = env->elr_el[cur_el];
 930    }
 931
 932    return;
 933
 934illegal_return:
 935    /* Illegal return events of various kinds have architecturally
 936     * mandated behaviour:
 937     * restore NZCV and DAIF from SPSR_ELx
 938     * set PSTATE.IL
 939     * restore PC from ELR_ELx
 940     * no change to exception level, execution state or stack pointer
 941     */
 942    env->pstate |= PSTATE_IL;
 943    env->pc = env->elr_el[cur_el];
 944    spsr &= PSTATE_NZCV | PSTATE_DAIF;
 945    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
 946    pstate_write(env, spsr);
 947    if (!arm_singlestep_active(env)) {
 948        env->pstate &= ~PSTATE_SS;
 949    }
 950}
 951
 952/* Return true if the linked breakpoint entry lbn passes its checks */
 953static bool linked_bp_matches(ARMCPU *cpu, int lbn)
 954{
 955    CPUARMState *env = &cpu->env;
 956    uint64_t bcr = env->cp15.dbgbcr[lbn];
 957    int brps = extract32(cpu->dbgdidr, 24, 4);
 958    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
 959    int bt;
 960    uint32_t contextidr;
 961
 962    /* Links to unimplemented or non-context aware breakpoints are
 963     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
 964     * as if linked to an UNKNOWN context-aware breakpoint (in which
 965     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
 966     * We choose the former.
 967     */
 968    if (lbn > brps || lbn < (brps - ctx_cmps)) {
 969        return false;
 970    }
 971
 972    bcr = env->cp15.dbgbcr[lbn];
 973
 974    if (extract64(bcr, 0, 1) == 0) {
 975        /* Linked breakpoint disabled : generate no events */
 976        return false;
 977    }
 978
 979    bt = extract64(bcr, 20, 4);
 980
 981    /* We match the whole register even if this is AArch32 using the
 982     * short descriptor format (in which case it holds both PROCID and ASID),
 983     * since we don't implement the optional v7 context ID masking.
 984     */
 985    contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
 986
 987    switch (bt) {
 988    case 3: /* linked context ID match */
 989        if (arm_current_el(env) > 1) {
 990            /* Context matches never fire in EL2 or (AArch64) EL3 */
 991            return false;
 992        }
 993        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
 994    case 5: /* linked address mismatch (reserved in AArch64) */
 995    case 9: /* linked VMID match (reserved if no EL2) */
 996    case 11: /* linked context ID and VMID match (reserved if no EL2) */
 997    default:
 998        /* Links to Unlinked context breakpoints must generate no
 999         * events; we choose to do the same for reserved values too.
1000         */
1001        return false;
1002    }
1003
1004    return false;
1005}
1006
1007static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1008{
1009    CPUARMState *env = &cpu->env;
1010    uint64_t cr;
1011    int pac, hmc, ssc, wt, lbn;
1012    /* Note that for watchpoints the check is against the CPU security
1013     * state, not the S/NS attribute on the offending data access.
1014     */
1015    bool is_secure = arm_is_secure(env);
1016    int access_el = arm_current_el(env);
1017
1018    if (is_wp) {
1019        CPUWatchpoint *wp = env->cpu_watchpoint[n];
1020
1021        if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1022            return false;
1023        }
1024        cr = env->cp15.dbgwcr[n];
1025        if (wp->hitattrs.user) {
1026            /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1027             * match watchpoints as if they were accesses done at EL0, even if
1028             * the CPU is at EL1 or higher.
1029             */
1030            access_el = 0;
1031        }
1032    } else {
1033        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1034
1035        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1036            return false;
1037        }
1038        cr = env->cp15.dbgbcr[n];
1039    }
1040    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1041     * enabled and that the address and access type match; for breakpoints
1042     * we know the address matched; check the remaining fields, including
1043     * linked breakpoints. We rely on WCR and BCR having the same layout
1044     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1045     * Note that some combinations of {PAC, HMC, SSC} are reserved and
1046     * must act either like some valid combination or as if the watchpoint
1047     * were disabled. We choose the former, and use this together with
1048     * the fact that EL3 must always be Secure and EL2 must always be
1049     * Non-Secure to simplify the code slightly compared to the full
1050     * table in the ARM ARM.
1051     */
1052    pac = extract64(cr, 1, 2);
1053    hmc = extract64(cr, 13, 1);
1054    ssc = extract64(cr, 14, 2);
1055
1056    switch (ssc) {
1057    case 0:
1058        break;
1059    case 1:
1060    case 3:
1061        if (is_secure) {
1062            return false;
1063        }
1064        break;
1065    case 2:
1066        if (!is_secure) {
1067            return false;
1068        }
1069        break;
1070    }
1071
1072    switch (access_el) {
1073    case 3:
1074    case 2:
1075        if (!hmc) {
1076            return false;
1077        }
1078        break;
1079    case 1:
1080        if (extract32(pac, 0, 1) == 0) {
1081            return false;
1082        }
1083        break;
1084    case 0:
1085        if (extract32(pac, 1, 1) == 0) {
1086            return false;
1087        }
1088        break;
1089    default:
1090        g_assert_not_reached();
1091    }
1092
1093    wt = extract64(cr, 20, 1);
1094    lbn = extract64(cr, 16, 4);
1095
1096    if (wt && !linked_bp_matches(cpu, lbn)) {
1097        return false;
1098    }
1099
1100    return true;
1101}
1102
1103static bool check_watchpoints(ARMCPU *cpu)
1104{
1105    CPUARMState *env = &cpu->env;
1106    int n;
1107
1108    /* If watchpoints are disabled globally or we can't take debug
1109     * exceptions here then watchpoint firings are ignored.
1110     */
1111    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1112        || !arm_generate_debug_exceptions(env)) {
1113        return false;
1114    }
1115
1116    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1117        if (bp_wp_matches(cpu, n, true)) {
1118            return true;
1119        }
1120    }
1121    return false;
1122}
1123
1124static bool check_breakpoints(ARMCPU *cpu)
1125{
1126    CPUARMState *env = &cpu->env;
1127    int n;
1128
1129    /* If breakpoints are disabled globally or we can't take debug
1130     * exceptions here then breakpoint firings are ignored.
1131     */
1132    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1133        || !arm_generate_debug_exceptions(env)) {
1134        return false;
1135    }
1136
1137    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1138        if (bp_wp_matches(cpu, n, false)) {
1139            return true;
1140        }
1141    }
1142    return false;
1143}
1144
1145void HELPER(check_breakpoints)(CPUARMState *env)
1146{
1147    ARMCPU *cpu = arm_env_get_cpu(env);
1148
1149    if (check_breakpoints(cpu)) {
1150        HELPER(exception_internal(env, EXCP_DEBUG));
1151    }
1152}
1153
1154bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1155{
1156    /* Called by core code when a CPU watchpoint fires; need to check if this
1157     * is also an architectural watchpoint match.
1158     */
1159    ARMCPU *cpu = ARM_CPU(cs);
1160
1161    return check_watchpoints(cpu);
1162}
1163
1164void arm_debug_excp_handler(CPUState *cs)
1165{
1166    /* Called by core code when a watchpoint or breakpoint fires;
1167     * need to check which one and raise the appropriate exception.
1168     */
1169    ARMCPU *cpu = ARM_CPU(cs);
1170    CPUARMState *env = &cpu->env;
1171    CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1172
1173    if (wp_hit) {
1174        if (wp_hit->flags & BP_CPU) {
1175            bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1176            bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1177
1178            cs->watchpoint_hit = NULL;
1179
1180            if (extended_addresses_enabled(env)) {
1181                env->exception.fsr = (1 << 9) | 0x22;
1182            } else {
1183                env->exception.fsr = 0x2;
1184            }
1185            env->exception.vaddress = wp_hit->hitaddr;
1186            raise_exception(env, EXCP_DATA_ABORT,
1187                    syn_watchpoint(same_el, 0, wnr),
1188                    arm_debug_target_el(env));
1189        }
1190    } else {
1191        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1192        bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1193
1194        /* (1) GDB breakpoints should be handled first.
1195         * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1196         * since singlestep is also done by generating a debug internal
1197         * exception.
1198         */
1199        if (cpu_breakpoint_test(cs, pc, BP_GDB)
1200            || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1201            return;
1202        }
1203
1204        if (extended_addresses_enabled(env)) {
1205            env->exception.fsr = (1 << 9) | 0x22;
1206        } else {
1207            env->exception.fsr = 0x2;
1208        }
1209        /* FAR is UNKNOWN, so doesn't need setting */
1210        raise_exception(env, EXCP_PREFETCH_ABORT,
1211                        syn_breakpoint(same_el),
1212                        arm_debug_target_el(env));
1213    }
1214}
1215
1216/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1217   The only way to do that in TCG is a conditional branch, which clobbers
1218   all our temporaries.  For now implement these as helper functions.  */
1219
1220/* Similarly for variable shift instructions.  */
1221
1222uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1223{
1224    int shift = i & 0xff;
1225    if (shift >= 32) {
1226        if (shift == 32)
1227            env->CF = x & 1;
1228        else
1229            env->CF = 0;
1230        return 0;
1231    } else if (shift != 0) {
1232        env->CF = (x >> (32 - shift)) & 1;
1233        return x << shift;
1234    }
1235    return x;
1236}
1237
1238uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1239{
1240    int shift = i & 0xff;
1241    if (shift >= 32) {
1242        if (shift == 32)
1243            env->CF = (x >> 31) & 1;
1244        else
1245            env->CF = 0;
1246        return 0;
1247    } else if (shift != 0) {
1248        env->CF = (x >> (shift - 1)) & 1;
1249        return x >> shift;
1250    }
1251    return x;
1252}
1253
1254uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1255{
1256    int shift = i & 0xff;
1257    if (shift >= 32) {
1258        env->CF = (x >> 31) & 1;
1259        return (int32_t)x >> 31;
1260    } else if (shift != 0) {
1261        env->CF = (x >> (shift - 1)) & 1;
1262        return (int32_t)x >> shift;
1263    }
1264    return x;
1265}
1266
1267uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1268{
1269    int shift1, shift;
1270    shift1 = i & 0xff;
1271    shift = shift1 & 0x1f;
1272    if (shift == 0) {
1273        if (shift1 != 0)
1274            env->CF = (x >> 31) & 1;
1275        return x;
1276    } else {
1277        env->CF = (x >> (shift - 1)) & 1;
1278        return ((uint32_t)x >> shift) | (x << (32 - shift));
1279    }
1280}
1281