qemu/target/arm/op_helper.c
<<
>>
Prefs
   1/*
   2 *  ARM helper routines
   3 *
   4 *  Copyright (c) 2005-2007 CodeSourcery, LLC
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/units.h"
  21#include "qemu/log.h"
  22#include "qemu/main-loop.h"
  23#include "cpu.h"
  24#include "exec/helper-proto.h"
  25#include "internals.h"
  26#include "exec/exec-all.h"
  27#include "exec/cpu_ldst.h"
  28
  29#define SIGNBIT (uint32_t)0x80000000
  30#define SIGNBIT64 ((uint64_t)1 << 63)
  31
  32static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
  33                                    uint32_t syndrome, uint32_t target_el)
  34{
  35    CPUState *cs = env_cpu(env);
  36
  37    if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
  38        /*
  39         * Redirect NS EL1 exceptions to NS EL2. These are reported with
  40         * their original syndrome register value, with the exception of
  41         * SIMD/FP access traps, which are reported as uncategorized
  42         * (see DDI0478C.a D1.10.4)
  43         */
  44        target_el = 2;
  45        if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
  46            syndrome = syn_uncategorized();
  47        }
  48    }
  49
  50    assert(!excp_is_internal(excp));
  51    cs->exception_index = excp;
  52    env->exception.syndrome = syndrome;
  53    env->exception.target_el = target_el;
  54
  55    return cs;
  56}
  57
  58void raise_exception(CPUARMState *env, uint32_t excp,
  59                     uint32_t syndrome, uint32_t target_el)
  60{
  61    CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
  62    cpu_loop_exit(cs);
  63}
  64
  65void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
  66                        uint32_t target_el, uintptr_t ra)
  67{
  68    CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
  69    cpu_loop_exit_restore(cs, ra);
  70}
  71
  72uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
  73                          uint32_t maxindex)
  74{
  75    uint32_t val, shift;
  76    uint64_t *table = vn;
  77
  78    val = 0;
  79    for (shift = 0; shift < 32; shift += 8) {
  80        uint32_t index = (ireg >> shift) & 0xff;
  81        if (index < maxindex) {
  82            uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
  83            val |= tmp << shift;
  84        } else {
  85            val |= def & (0xff << shift);
  86        }
  87    }
  88    return val;
  89}
  90
  91void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
  92{
  93    /*
  94     * Perform the v8M stack limit check for SP updates from translated code,
  95     * raising an exception if the limit is breached.
  96     */
  97    if (newvalue < v7m_sp_limit(env)) {
  98        CPUState *cs = env_cpu(env);
  99
 100        /*
 101         * Stack limit exceptions are a rare case, so rather than syncing
 102         * PC/condbits before the call, we use cpu_restore_state() to
 103         * get them right before raising the exception.
 104         */
 105        cpu_restore_state(cs, GETPC(), true);
 106        raise_exception(env, EXCP_STKOF, 0, 1);
 107    }
 108}
 109
 110uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
 111{
 112    uint32_t res = a + b;
 113    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
 114        env->QF = 1;
 115    return res;
 116}
 117
 118uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 119{
 120    uint32_t res = a + b;
 121    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
 122        env->QF = 1;
 123        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 124    }
 125    return res;
 126}
 127
 128uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
 129{
 130    uint32_t res = a - b;
 131    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
 132        env->QF = 1;
 133        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
 134    }
 135    return res;
 136}
 137
 138uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
 139{
 140    uint32_t res;
 141    if (val >= 0x40000000) {
 142        res = ~SIGNBIT;
 143        env->QF = 1;
 144    } else if (val <= (int32_t)0xc0000000) {
 145        res = SIGNBIT;
 146        env->QF = 1;
 147    } else {
 148        res = val << 1;
 149    }
 150    return res;
 151}
 152
 153uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 154{
 155    uint32_t res = a + b;
 156    if (res < a) {
 157        env->QF = 1;
 158        res = ~0;
 159    }
 160    return res;
 161}
 162
 163uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
 164{
 165    uint32_t res = a - b;
 166    if (res > a) {
 167        env->QF = 1;
 168        res = 0;
 169    }
 170    return res;
 171}
 172
 173/* Signed saturation.  */
 174static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
 175{
 176    int32_t top;
 177    uint32_t mask;
 178
 179    top = val >> shift;
 180    mask = (1u << shift) - 1;
 181    if (top > 0) {
 182        env->QF = 1;
 183        return mask;
 184    } else if (top < -1) {
 185        env->QF = 1;
 186        return ~mask;
 187    }
 188    return val;
 189}
 190
 191/* Unsigned saturation.  */
 192static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
 193{
 194    uint32_t max;
 195
 196    max = (1u << shift) - 1;
 197    if (val < 0) {
 198        env->QF = 1;
 199        return 0;
 200    } else if (val > max) {
 201        env->QF = 1;
 202        return max;
 203    }
 204    return val;
 205}
 206
 207/* Signed saturate.  */
 208uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
 209{
 210    return do_ssat(env, x, shift);
 211}
 212
 213/* Dual halfword signed saturate.  */
 214uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 215{
 216    uint32_t res;
 217
 218    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
 219    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
 220    return res;
 221}
 222
 223/* Unsigned saturate.  */
 224uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
 225{
 226    return do_usat(env, x, shift);
 227}
 228
 229/* Dual halfword unsigned saturate.  */
 230uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
 231{
 232    uint32_t res;
 233
 234    res = (uint16_t)do_usat(env, (int16_t)x, shift);
 235    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
 236    return res;
 237}
 238
 239void HELPER(setend)(CPUARMState *env)
 240{
 241    env->uncached_cpsr ^= CPSR_E;
 242}
 243
 244/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 245 * The function returns the target EL (1-3) if the instruction is to be trapped;
 246 * otherwise it returns 0 indicating it is not trapped.
 247 */
 248static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
 249{
 250    int cur_el = arm_current_el(env);
 251    uint64_t mask;
 252
 253    if (arm_feature(env, ARM_FEATURE_M)) {
 254        /* M profile cores can never trap WFI/WFE. */
 255        return 0;
 256    }
 257
 258    /* If we are currently in EL0 then we need to check if SCTLR is set up for
 259     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
 260     */
 261    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
 262        int target_el;
 263
 264        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
 265        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
 266            /* Secure EL0 and Secure PL1 is at EL3 */
 267            target_el = 3;
 268        } else {
 269            target_el = 1;
 270        }
 271
 272        if (!(env->cp15.sctlr_el[target_el] & mask)) {
 273            return target_el;
 274        }
 275    }
 276
 277    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
 278     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
 279     * bits will be zero indicating no trap.
 280     */
 281    if (cur_el < 2) {
 282        mask = is_wfe ? HCR_TWE : HCR_TWI;
 283        if (arm_hcr_el2_eff(env) & mask) {
 284            return 2;
 285        }
 286    }
 287
 288    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
 289    if (cur_el < 3) {
 290        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
 291        if (env->cp15.scr_el3 & mask) {
 292            return 3;
 293        }
 294    }
 295
 296    return 0;
 297}
 298
 299void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
 300{
 301    CPUState *cs = env_cpu(env);
 302    int target_el = check_wfx_trap(env, false);
 303
 304    if (cpu_has_work(cs)) {
 305        /* Don't bother to go into our "low power state" if
 306         * we would just wake up immediately.
 307         */
 308        return;
 309    }
 310
 311    if (target_el) {
 312        env->pc -= insn_len;
 313        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
 314                        target_el);
 315    }
 316
 317    cs->exception_index = EXCP_HLT;
 318    cs->halted = 1;
 319    cpu_loop_exit(cs);
 320}
 321
 322void HELPER(wfe)(CPUARMState *env)
 323{
 324    /* This is a hint instruction that is semantically different
 325     * from YIELD even though we currently implement it identically.
 326     * Don't actually halt the CPU, just yield back to top
 327     * level loop. This is not going into a "low power state"
 328     * (ie halting until some event occurs), so we never take
 329     * a configurable trap to a different exception level.
 330     */
 331    HELPER(yield)(env);
 332}
 333
 334void HELPER(yield)(CPUARMState *env)
 335{
 336    CPUState *cs = env_cpu(env);
 337
 338    /* This is a non-trappable hint instruction that generally indicates
 339     * that the guest is currently busy-looping. Yield control back to the
 340     * top level loop so that a more deserving VCPU has a chance to run.
 341     */
 342    cs->exception_index = EXCP_YIELD;
 343    cpu_loop_exit(cs);
 344}
 345
 346/* Raise an internal-to-QEMU exception. This is limited to only
 347 * those EXCP values which are special cases for QEMU to interrupt
 348 * execution and not to be used for exceptions which are passed to
 349 * the guest (those must all have syndrome information and thus should
 350 * use exception_with_syndrome).
 351 */
 352void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
 353{
 354    CPUState *cs = env_cpu(env);
 355
 356    assert(excp_is_internal(excp));
 357    cs->exception_index = excp;
 358    cpu_loop_exit(cs);
 359}
 360
 361/* Raise an exception with the specified syndrome register value */
 362void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
 363                                     uint32_t syndrome, uint32_t target_el)
 364{
 365    raise_exception(env, excp, syndrome, target_el);
 366}
 367
 368/* Raise an EXCP_BKPT with the specified syndrome register value,
 369 * targeting the correct exception level for debug exceptions.
 370 */
 371void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
 372{
 373    int debug_el = arm_debug_target_el(env);
 374    int cur_el = arm_current_el(env);
 375
 376    /* FSR will only be used if the debug target EL is AArch32. */
 377    env->exception.fsr = arm_debug_exception_fsr(env);
 378    /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
 379     * values to the guest that it shouldn't be able to see at its
 380     * exception/security level.
 381     */
 382    env->exception.vaddress = 0;
 383    /*
 384     * Other kinds of architectural debug exception are ignored if
 385     * they target an exception level below the current one (in QEMU
 386     * this is checked by arm_generate_debug_exceptions()). Breakpoint
 387     * instructions are special because they always generate an exception
 388     * to somewhere: if they can't go to the configured debug exception
 389     * level they are taken to the current exception level.
 390     */
 391    if (debug_el < cur_el) {
 392        debug_el = cur_el;
 393    }
 394    raise_exception(env, EXCP_BKPT, syndrome, debug_el);
 395}
 396
 397uint32_t HELPER(cpsr_read)(CPUARMState *env)
 398{
 399    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
 400}
 401
 402void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 403{
 404    cpsr_write(env, val, mask, CPSRWriteByInstr);
 405}
 406
 407/* Write the CPSR for a 32-bit exception return */
 408void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 409{
 410    qemu_mutex_lock_iothread();
 411    arm_call_pre_el_change_hook(env_archcpu(env));
 412    qemu_mutex_unlock_iothread();
 413
 414    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
 415
 416    /* Generated code has already stored the new PC value, but
 417     * without masking out its low bits, because which bits need
 418     * masking depends on whether we're returning to Thumb or ARM
 419     * state. Do the masking now.
 420     */
 421    env->regs[15] &= (env->thumb ? ~1 : ~3);
 422
 423    qemu_mutex_lock_iothread();
 424    arm_call_el_change_hook(env_archcpu(env));
 425    qemu_mutex_unlock_iothread();
 426}
 427
 428/* Access to user mode registers from privileged modes.  */
 429uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
 430{
 431    uint32_t val;
 432
 433    if (regno == 13) {
 434        val = env->banked_r13[BANK_USRSYS];
 435    } else if (regno == 14) {
 436        val = env->banked_r14[BANK_USRSYS];
 437    } else if (regno >= 8
 438               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 439        val = env->usr_regs[regno - 8];
 440    } else {
 441        val = env->regs[regno];
 442    }
 443    return val;
 444}
 445
 446void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
 447{
 448    if (regno == 13) {
 449        env->banked_r13[BANK_USRSYS] = val;
 450    } else if (regno == 14) {
 451        env->banked_r14[BANK_USRSYS] = val;
 452    } else if (regno >= 8
 453               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
 454        env->usr_regs[regno - 8] = val;
 455    } else {
 456        env->regs[regno] = val;
 457    }
 458}
 459
 460void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
 461{
 462    if ((env->uncached_cpsr & CPSR_M) == mode) {
 463        env->regs[13] = val;
 464    } else {
 465        env->banked_r13[bank_number(mode)] = val;
 466    }
 467}
 468
 469uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
 470{
 471    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
 472        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
 473         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
 474         */
 475        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 476                        exception_target_el(env));
 477    }
 478
 479    if ((env->uncached_cpsr & CPSR_M) == mode) {
 480        return env->regs[13];
 481    } else {
 482        return env->banked_r13[bank_number(mode)];
 483    }
 484}
 485
 486static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
 487                                      uint32_t regno)
 488{
 489    /* Raise an exception if the requested access is one of the UNPREDICTABLE
 490     * cases; otherwise return. This broadly corresponds to the pseudocode
 491     * BankedRegisterAccessValid() and SPSRAccessValid(),
 492     * except that we have already handled some cases at translate time.
 493     */
 494    int curmode = env->uncached_cpsr & CPSR_M;
 495
 496    if (regno == 17) {
 497        /* ELR_Hyp: a special case because access from tgtmode is OK */
 498        if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
 499            goto undef;
 500        }
 501        return;
 502    }
 503
 504    if (curmode == tgtmode) {
 505        goto undef;
 506    }
 507
 508    if (tgtmode == ARM_CPU_MODE_USR) {
 509        switch (regno) {
 510        case 8 ... 12:
 511            if (curmode != ARM_CPU_MODE_FIQ) {
 512                goto undef;
 513            }
 514            break;
 515        case 13:
 516            if (curmode == ARM_CPU_MODE_SYS) {
 517                goto undef;
 518            }
 519            break;
 520        case 14:
 521            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
 522                goto undef;
 523            }
 524            break;
 525        default:
 526            break;
 527        }
 528    }
 529
 530    if (tgtmode == ARM_CPU_MODE_HYP) {
 531        /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
 532        if (curmode != ARM_CPU_MODE_MON) {
 533            goto undef;
 534        }
 535    }
 536
 537    return;
 538
 539undef:
 540    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 541                    exception_target_el(env));
 542}
 543
 544void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
 545                        uint32_t regno)
 546{
 547    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 548
 549    switch (regno) {
 550    case 16: /* SPSRs */
 551        env->banked_spsr[bank_number(tgtmode)] = value;
 552        break;
 553    case 17: /* ELR_Hyp */
 554        env->elr_el[2] = value;
 555        break;
 556    case 13:
 557        env->banked_r13[bank_number(tgtmode)] = value;
 558        break;
 559    case 14:
 560        env->banked_r14[r14_bank_number(tgtmode)] = value;
 561        break;
 562    case 8 ... 12:
 563        switch (tgtmode) {
 564        case ARM_CPU_MODE_USR:
 565            env->usr_regs[regno - 8] = value;
 566            break;
 567        case ARM_CPU_MODE_FIQ:
 568            env->fiq_regs[regno - 8] = value;
 569            break;
 570        default:
 571            g_assert_not_reached();
 572        }
 573        break;
 574    default:
 575        g_assert_not_reached();
 576    }
 577}
 578
 579uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
 580{
 581    msr_mrs_banked_exc_checks(env, tgtmode, regno);
 582
 583    switch (regno) {
 584    case 16: /* SPSRs */
 585        return env->banked_spsr[bank_number(tgtmode)];
 586    case 17: /* ELR_Hyp */
 587        return env->elr_el[2];
 588    case 13:
 589        return env->banked_r13[bank_number(tgtmode)];
 590    case 14:
 591        return env->banked_r14[r14_bank_number(tgtmode)];
 592    case 8 ... 12:
 593        switch (tgtmode) {
 594        case ARM_CPU_MODE_USR:
 595            return env->usr_regs[regno - 8];
 596        case ARM_CPU_MODE_FIQ:
 597            return env->fiq_regs[regno - 8];
 598        default:
 599            g_assert_not_reached();
 600        }
 601    default:
 602        g_assert_not_reached();
 603    }
 604}
 605
 606void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
 607                                 uint32_t isread)
 608{
 609    const ARMCPRegInfo *ri = rip;
 610    int target_el;
 611
 612    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
 613        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
 614        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
 615    }
 616
 617    if (!ri->accessfn) {
 618        return;
 619    }
 620
 621    switch (ri->accessfn(env, ri, isread)) {
 622    case CP_ACCESS_OK:
 623        return;
 624    case CP_ACCESS_TRAP:
 625        target_el = exception_target_el(env);
 626        break;
 627    case CP_ACCESS_TRAP_EL2:
 628        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
 629         * a bug in the access function.
 630         */
 631        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
 632        target_el = 2;
 633        break;
 634    case CP_ACCESS_TRAP_EL3:
 635        target_el = 3;
 636        break;
 637    case CP_ACCESS_TRAP_UNCATEGORIZED:
 638        target_el = exception_target_el(env);
 639        syndrome = syn_uncategorized();
 640        break;
 641    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
 642        target_el = 2;
 643        syndrome = syn_uncategorized();
 644        break;
 645    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
 646        target_el = 3;
 647        syndrome = syn_uncategorized();
 648        break;
 649    case CP_ACCESS_TRAP_FP_EL2:
 650        target_el = 2;
 651        /* Since we are an implementation that takes exceptions on a trapped
 652         * conditional insn only if the insn has passed its condition code
 653         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
 654         * (which is also the required value for AArch64 traps).
 655         */
 656        syndrome = syn_fp_access_trap(1, 0xe, false);
 657        break;
 658    case CP_ACCESS_TRAP_FP_EL3:
 659        target_el = 3;
 660        syndrome = syn_fp_access_trap(1, 0xe, false);
 661        break;
 662    default:
 663        g_assert_not_reached();
 664    }
 665
 666    raise_exception(env, EXCP_UDEF, syndrome, target_el);
 667}
 668
 669void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
 670{
 671    const ARMCPRegInfo *ri = rip;
 672
 673    if (ri->type & ARM_CP_IO) {
 674        qemu_mutex_lock_iothread();
 675        ri->writefn(env, ri, value);
 676        qemu_mutex_unlock_iothread();
 677    } else {
 678        ri->writefn(env, ri, value);
 679    }
 680}
 681
 682uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
 683{
 684    const ARMCPRegInfo *ri = rip;
 685    uint32_t res;
 686
 687    if (ri->type & ARM_CP_IO) {
 688        qemu_mutex_lock_iothread();
 689        res = ri->readfn(env, ri);
 690        qemu_mutex_unlock_iothread();
 691    } else {
 692        res = ri->readfn(env, ri);
 693    }
 694
 695    return res;
 696}
 697
 698void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
 699{
 700    const ARMCPRegInfo *ri = rip;
 701
 702    if (ri->type & ARM_CP_IO) {
 703        qemu_mutex_lock_iothread();
 704        ri->writefn(env, ri, value);
 705        qemu_mutex_unlock_iothread();
 706    } else {
 707        ri->writefn(env, ri, value);
 708    }
 709}
 710
 711uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
 712{
 713    const ARMCPRegInfo *ri = rip;
 714    uint64_t res;
 715
 716    if (ri->type & ARM_CP_IO) {
 717        qemu_mutex_lock_iothread();
 718        res = ri->readfn(env, ri);
 719        qemu_mutex_unlock_iothread();
 720    } else {
 721        res = ri->readfn(env, ri);
 722    }
 723
 724    return res;
 725}
 726
 727void HELPER(pre_hvc)(CPUARMState *env)
 728{
 729    ARMCPU *cpu = env_archcpu(env);
 730    int cur_el = arm_current_el(env);
 731    /* FIXME: Use actual secure state.  */
 732    bool secure = false;
 733    bool undef;
 734
 735    if (arm_is_psci_call(cpu, EXCP_HVC)) {
 736        /* If PSCI is enabled and this looks like a valid PSCI call then
 737         * that overrides the architecturally mandated HVC behaviour.
 738         */
 739        return;
 740    }
 741
 742    if (!arm_feature(env, ARM_FEATURE_EL2)) {
 743        /* If EL2 doesn't exist, HVC always UNDEFs */
 744        undef = true;
 745    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
 746        /* EL3.HCE has priority over EL2.HCD. */
 747        undef = !(env->cp15.scr_el3 & SCR_HCE);
 748    } else {
 749        undef = env->cp15.hcr_el2 & HCR_HCD;
 750    }
 751
 752    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
 753     * For ARMv8/AArch64, HVC is allowed in EL3.
 754     * Note that we've already trapped HVC from EL0 at translation
 755     * time.
 756     */
 757    if (secure && (!is_a64(env) || cur_el == 1)) {
 758        undef = true;
 759    }
 760
 761    if (undef) {
 762        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 763                        exception_target_el(env));
 764    }
 765}
 766
 767void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
 768{
 769    ARMCPU *cpu = env_archcpu(env);
 770    int cur_el = arm_current_el(env);
 771    bool secure = arm_is_secure(env);
 772    bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
 773
 774    /*
 775     * SMC behaviour is summarized in the following table.
 776     * This helper handles the "Trap to EL2" and "Undef insn" cases.
 777     * The "Trap to EL3" and "PSCI call" cases are handled in the exception
 778     * helper.
 779     *
 780     *  -> ARM_FEATURE_EL3 and !SMD
 781     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 782     *
 783     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 784     *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
 785     *  Conduit not SMC          Trap to EL2         Trap to EL3
 786     *
 787     *
 788     *  -> ARM_FEATURE_EL3 and SMD
 789     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 790     *
 791     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 792     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 793     *  Conduit not SMC          Trap to EL2         Undef insn
 794     *
 795     *
 796     *  -> !ARM_FEATURE_EL3
 797     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
 798     *
 799     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
 800     *  Conduit SMC, inval call  Trap to EL2         Undef insn
 801     *  Conduit not SMC          Undef insn          Undef insn
 802     */
 803
 804    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
 805     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
 806     *  extensions, SMD only applies to NS state.
 807     * On ARMv7 without the Virtualization extensions, the SMD bit
 808     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
 809     * so we need not special case this here.
 810     */
 811    bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
 812                                                     : smd_flag && !secure;
 813
 814    if (!arm_feature(env, ARM_FEATURE_EL3) &&
 815        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
 816        /* If we have no EL3 then SMC always UNDEFs and can't be
 817         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
 818         * firmware within QEMU, and we want an EL2 guest to be able
 819         * to forbid its EL1 from making PSCI calls into QEMU's
 820         * "firmware" via HCR.TSC, so for these purposes treat
 821         * PSCI-via-SMC as implying an EL3.
 822         * This handles the very last line of the previous table.
 823         */
 824        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 825                        exception_target_el(env));
 826    }
 827
 828    if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
 829        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
 830         * We also want an EL2 guest to be able to forbid its EL1 from
 831         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
 832         * This handles all the "Trap to EL2" cases of the previous table.
 833         */
 834        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
 835    }
 836
 837    /* Catch the two remaining "Undef insn" cases of the previous table:
 838     *    - PSCI conduit is SMC but we don't have a valid PCSI call,
 839     *    - We don't have EL3 or SMD is set.
 840     */
 841    if (!arm_is_psci_call(cpu, EXCP_SMC) &&
 842        (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
 843        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
 844                        exception_target_el(env));
 845    }
 846}
 847
 848/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
 849   The only way to do that in TCG is a conditional branch, which clobbers
 850   all our temporaries.  For now implement these as helper functions.  */
 851
 852/* Similarly for variable shift instructions.  */
 853
 854uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 855{
 856    int shift = i & 0xff;
 857    if (shift >= 32) {
 858        if (shift == 32)
 859            env->CF = x & 1;
 860        else
 861            env->CF = 0;
 862        return 0;
 863    } else if (shift != 0) {
 864        env->CF = (x >> (32 - shift)) & 1;
 865        return x << shift;
 866    }
 867    return x;
 868}
 869
 870uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 871{
 872    int shift = i & 0xff;
 873    if (shift >= 32) {
 874        if (shift == 32)
 875            env->CF = (x >> 31) & 1;
 876        else
 877            env->CF = 0;
 878        return 0;
 879    } else if (shift != 0) {
 880        env->CF = (x >> (shift - 1)) & 1;
 881        return x >> shift;
 882    }
 883    return x;
 884}
 885
 886uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 887{
 888    int shift = i & 0xff;
 889    if (shift >= 32) {
 890        env->CF = (x >> 31) & 1;
 891        return (int32_t)x >> 31;
 892    } else if (shift != 0) {
 893        env->CF = (x >> (shift - 1)) & 1;
 894        return (int32_t)x >> shift;
 895    }
 896    return x;
 897}
 898
 899uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
 900{
 901    int shift1, shift;
 902    shift1 = i & 0xff;
 903    shift = shift1 & 0x1f;
 904    if (shift == 0) {
 905        if (shift1 != 0)
 906            env->CF = (x >> 31) & 1;
 907        return x;
 908    } else {
 909        env->CF = (x >> (shift - 1)) & 1;
 910        return ((uint32_t)x >> shift) | (x << (32 - shift));
 911    }
 912}
 913
 914void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
 915{
 916    /*
 917     * Implement DC ZVA, which zeroes a fixed-length block of memory.
 918     * Note that we do not implement the (architecturally mandated)
 919     * alignment fault for attempts to use this on Device memory
 920     * (which matches the usual QEMU behaviour of not implementing either
 921     * alignment faults or any memory attribute handling).
 922     */
 923
 924    ARMCPU *cpu = env_archcpu(env);
 925    uint64_t blocklen = 4 << cpu->dcz_blocksize;
 926    uint64_t vaddr = vaddr_in & ~(blocklen - 1);
 927
 928#ifndef CONFIG_USER_ONLY
 929    {
 930        /*
 931         * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
 932         * the block size so we might have to do more than one TLB lookup.
 933         * We know that in fact for any v8 CPU the page size is at least 4K
 934         * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
 935         * 1K as an artefact of legacy v5 subpage support being present in the
 936         * same QEMU executable. So in practice the hostaddr[] array has
 937         * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
 938         */
 939        int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
 940        void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
 941        int try, i;
 942        unsigned mmu_idx = cpu_mmu_index(env, false);
 943        TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
 944
 945        assert(maxidx <= ARRAY_SIZE(hostaddr));
 946
 947        for (try = 0; try < 2; try++) {
 948
 949            for (i = 0; i < maxidx; i++) {
 950                hostaddr[i] = tlb_vaddr_to_host(env,
 951                                                vaddr + TARGET_PAGE_SIZE * i,
 952                                                1, mmu_idx);
 953                if (!hostaddr[i]) {
 954                    break;
 955                }
 956            }
 957            if (i == maxidx) {
 958                /*
 959                 * If it's all in the TLB it's fair game for just writing to;
 960                 * we know we don't need to update dirty status, etc.
 961                 */
 962                for (i = 0; i < maxidx - 1; i++) {
 963                    memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
 964                }
 965                memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
 966                return;
 967            }
 968            /*
 969             * OK, try a store and see if we can populate the tlb. This
 970             * might cause an exception if the memory isn't writable,
 971             * in which case we will longjmp out of here. We must for
 972             * this purpose use the actual register value passed to us
 973             * so that we get the fault address right.
 974             */
 975            helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
 976            /* Now we can populate the other TLB entries, if any */
 977            for (i = 0; i < maxidx; i++) {
 978                uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
 979                if (va != (vaddr_in & TARGET_PAGE_MASK)) {
 980                    helper_ret_stb_mmu(env, va, 0, oi, GETPC());
 981                }
 982            }
 983        }
 984
 985        /*
 986         * Slow path (probably attempt to do this to an I/O device or
 987         * similar, or clearing of a block of code we have translations
 988         * cached for). Just do a series of byte writes as the architecture
 989         * demands. It's not worth trying to use a cpu_physical_memory_map(),
 990         * memset(), unmap() sequence here because:
 991         *  + we'd need to account for the blocksize being larger than a page
 992         *  + the direct-RAM access case is almost always going to be dealt
 993         *    with in the fastpath code above, so there's no speed benefit
 994         *  + we would have to deal with the map returning NULL because the
 995         *    bounce buffer was in use
 996         */
 997        for (i = 0; i < blocklen; i++) {
 998            helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
 999        }
1000    }
1001#else
1002    memset(g2h(vaddr), 0, blocklen);
1003#endif
1004}
1005