qemu/target/arm/translate.h
<<
>>
Prefs
   1#ifndef TARGET_ARM_TRANSLATE_H
   2#define TARGET_ARM_TRANSLATE_H
   3
   4#include "exec/translator.h"
   5#include "internals.h"
   6
   7
   8/* internal defines */
   9typedef struct DisasContext {
  10    DisasContextBase base;
  11    const ARMISARegisters *isar;
  12
  13    /* The address of the current instruction being translated. */
  14    target_ulong pc_curr;
  15    target_ulong page_start;
  16    uint32_t insn;
  17    /* Nonzero if this instruction has been conditionally skipped.  */
  18    int condjmp;
  19    /* The label that will be jumped to when the instruction is skipped.  */
  20    TCGLabel *condlabel;
  21    /* Thumb-2 conditional execution bits.  */
  22    int condexec_mask;
  23    int condexec_cond;
  24    /* M-profile ECI/ICI exception-continuable instruction state */
  25    int eci;
  26    /*
  27     * trans_ functions for insns which are continuable should set this true
  28     * after decode (ie after any UNDEF checks)
  29     */
  30    bool eci_handled;
  31    /* TCG op to rewind to if this turns out to be an invalid ECI state */
  32    TCGOp *insn_eci_rewind;
  33    int thumb;
  34    int sctlr_b;
  35    MemOp be_data;
  36#if !defined(CONFIG_USER_ONLY)
  37    int user;
  38#endif
  39    ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
  40    uint8_t tbii;      /* TBI1|TBI0 for insns */
  41    uint8_t tbid;      /* TBI1|TBI0 for data */
  42    uint8_t tcma;      /* TCMA1|TCMA0 for MTE */
  43    bool ns;        /* Use non-secure CPREG bank on access */
  44    int fp_excp_el; /* FP exception EL or 0 if enabled */
  45    int sve_excp_el; /* SVE exception EL or 0 if enabled */
  46    int sve_len;     /* SVE vector length in bytes */
  47    /* Flag indicating that exceptions from secure mode are routed to EL3. */
  48    bool secure_routed_to_el3;
  49    bool vfp_enabled; /* FP enabled via FPSCR.EN */
  50    int vec_len;
  51    int vec_stride;
  52    bool v7m_handler_mode;
  53    bool v8m_secure; /* true if v8M and we're in Secure mode */
  54    bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
  55    bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */
  56    bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */
  57    bool v7m_lspact; /* FPCCR.LSPACT set */
  58    /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
  59     * so that top level loop can generate correct syndrome information.
  60     */
  61    uint32_t svc_imm;
  62    int aarch64;
  63    int current_el;
  64    /* Debug target exception level for single-step exceptions */
  65    int debug_target_el;
  66    GHashTable *cp_regs;
  67    uint64_t features; /* CPU features bits */
  68    /* Because unallocated encodings generate different exception syndrome
  69     * information from traps due to FP being disabled, we can't do a single
  70     * "is fp access disabled" check at a high level in the decode tree.
  71     * To help in catching bugs where the access check was forgotten in some
  72     * code path, we set this flag when the access check is done, and assert
  73     * that it is set at the point where we actually touch the FP regs.
  74     */
  75    bool fp_access_checked;
  76    bool sve_access_checked;
  77    /* ARMv8 single-step state (this is distinct from the QEMU gdbstub
  78     * single-step support).
  79     */
  80    bool ss_active;
  81    bool pstate_ss;
  82    /* True if the insn just emitted was a load-exclusive instruction
  83     * (necessary for syndrome information for single step exceptions),
  84     * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
  85     */
  86    bool is_ldex;
  87    /* True if AccType_UNPRIV should be used for LDTR et al */
  88    bool unpriv;
  89    /* True if v8.3-PAuth is active.  */
  90    bool pauth_active;
  91    /* True if v8.5-MTE access to tags is enabled.  */
  92    bool ata;
  93    /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv.  */
  94    bool mte_active[2];
  95    /* True with v8.5-BTI and SCTLR_ELx.BT* set.  */
  96    bool bt;
  97    /* True if any CP15 access is trapped by HSTR_EL2 */
  98    bool hstr_active;
  99    /* True if memory operations require alignment */
 100    bool align_mem;
 101    /*
 102     * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
 103     *  < 0, set by the current instruction.
 104     */
 105    int8_t btype;
 106    /* A copy of cpu->dcz_blocksize. */
 107    uint8_t dcz_blocksize;
 108    /* True if this page is guarded.  */
 109    bool guarded_page;
 110    /* Bottom two bits of XScale c15_cpar coprocessor access control reg */
 111    int c15_cpar;
 112    /* TCG op of the current insn_start.  */
 113    TCGOp *insn_start;
 114#define TMP_A64_MAX 16
 115    int tmp_a64_count;
 116    TCGv_i64 tmp_a64[TMP_A64_MAX];
 117} DisasContext;
 118
 119typedef struct DisasCompare {
 120    TCGCond cond;
 121    TCGv_i32 value;
 122    bool value_global;
 123} DisasCompare;
 124
 125/* Share the TCG temporaries common between 32 and 64 bit modes.  */
 126extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
 127extern TCGv_i64 cpu_exclusive_addr;
 128extern TCGv_i64 cpu_exclusive_val;
 129
 130/*
 131 * Constant expanders for the decoders.
 132 */
 133
 134static inline int negate(DisasContext *s, int x)
 135{
 136    return -x;
 137}
 138
 139static inline int plus_1(DisasContext *s, int x)
 140{
 141    return x + 1;
 142}
 143
 144static inline int plus_2(DisasContext *s, int x)
 145{
 146    return x + 2;
 147}
 148
 149static inline int times_2(DisasContext *s, int x)
 150{
 151    return x * 2;
 152}
 153
 154static inline int times_4(DisasContext *s, int x)
 155{
 156    return x * 4;
 157}
 158
 159static inline int times_2_plus_1(DisasContext *s, int x)
 160{
 161    return x * 2 + 1;
 162}
 163
 164static inline int rsub_64(DisasContext *s, int x)
 165{
 166    return 64 - x;
 167}
 168
 169static inline int rsub_32(DisasContext *s, int x)
 170{
 171    return 32 - x;
 172}
 173
 174static inline int rsub_16(DisasContext *s, int x)
 175{
 176    return 16 - x;
 177}
 178
 179static inline int rsub_8(DisasContext *s, int x)
 180{
 181    return 8 - x;
 182}
 183
 184static inline int arm_dc_feature(DisasContext *dc, int feature)
 185{
 186    return (dc->features & (1ULL << feature)) != 0;
 187}
 188
 189static inline int get_mem_index(DisasContext *s)
 190{
 191    return arm_to_core_mmu_idx(s->mmu_idx);
 192}
 193
 194/* Function used to determine the target exception EL when otherwise not known
 195 * or default.
 196 */
 197static inline int default_exception_el(DisasContext *s)
 198{
 199    /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
 200     * there is no secure EL1, so we route exceptions to EL3.  Otherwise,
 201     * exceptions can only be routed to ELs above 1, so we target the higher of
 202     * 1 or the current EL.
 203     */
 204    return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3)
 205            ? 3 : MAX(1, s->current_el);
 206}
 207
 208static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
 209{
 210    /* We don't need to save all of the syndrome so we mask and shift
 211     * out unneeded bits to help the sleb128 encoder do a better job.
 212     */
 213    syn &= ARM_INSN_START_WORD2_MASK;
 214    syn >>= ARM_INSN_START_WORD2_SHIFT;
 215
 216    /* We check and clear insn_start_idx to catch multiple updates.  */
 217    assert(s->insn_start != NULL);
 218    tcg_set_insn_start_param(s->insn_start, 2, syn);
 219    s->insn_start = NULL;
 220}
 221
 222/* is_jmp field values */
 223#define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
 224/* CPU state was modified dynamically; exit to main loop for interrupts. */
 225#define DISAS_UPDATE_EXIT  DISAS_TARGET_1
 226/* These instructions trap after executing, so the A32/T32 decoder must
 227 * defer them until after the conditional execution state has been updated.
 228 * WFI also needs special handling when single-stepping.
 229 */
 230#define DISAS_WFI       DISAS_TARGET_2
 231#define DISAS_SWI       DISAS_TARGET_3
 232/* WFE */
 233#define DISAS_WFE       DISAS_TARGET_4
 234#define DISAS_HVC       DISAS_TARGET_5
 235#define DISAS_SMC       DISAS_TARGET_6
 236#define DISAS_YIELD     DISAS_TARGET_7
 237/* M profile branch which might be an exception return (and so needs
 238 * custom end-of-TB code)
 239 */
 240#define DISAS_BX_EXCRET DISAS_TARGET_8
 241/*
 242 * For instructions which want an immediate exit to the main loop, as opposed
 243 * to attempting to use lookup_and_goto_ptr.  Unlike DISAS_UPDATE_EXIT, this
 244 * doesn't write the PC on exiting the translation loop so you need to ensure
 245 * something (gen_a64_set_pc_im or runtime helper) has done so before we reach
 246 * return from cpu_tb_exec.
 247 */
 248#define DISAS_EXIT      DISAS_TARGET_9
 249/* CPU state was modified dynamically; no need to exit, but do not chain. */
 250#define DISAS_UPDATE_NOCHAIN  DISAS_TARGET_10
 251
 252#ifdef TARGET_AARCH64
 253void a64_translate_init(void);
 254void gen_a64_set_pc_im(uint64_t val);
 255extern const TranslatorOps aarch64_translator_ops;
 256#else
 257static inline void a64_translate_init(void)
 258{
 259}
 260
 261static inline void gen_a64_set_pc_im(uint64_t val)
 262{
 263}
 264#endif
 265
 266void arm_test_cc(DisasCompare *cmp, int cc);
 267void arm_free_cc(DisasCompare *cmp);
 268void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
 269void arm_gen_test_cc(int cc, TCGLabel *label);
 270MemOp pow2_align(unsigned i);
 271void unallocated_encoding(DisasContext *s);
 272void gen_exception_insn(DisasContext *s, uint64_t pc, int excp,
 273                        uint32_t syn, uint32_t target_el);
 274
 275/* Return state of Alternate Half-precision flag, caller frees result */
 276static inline TCGv_i32 get_ahp_flag(void)
 277{
 278    TCGv_i32 ret = tcg_temp_new_i32();
 279
 280    tcg_gen_ld_i32(ret, cpu_env,
 281                   offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR]));
 282    tcg_gen_extract_i32(ret, ret, 26, 1);
 283
 284    return ret;
 285}
 286
 287/* Set bits within PSTATE.  */
 288static inline void set_pstate_bits(uint32_t bits)
 289{
 290    TCGv_i32 p = tcg_temp_new_i32();
 291
 292    tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
 293
 294    tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
 295    tcg_gen_ori_i32(p, p, bits);
 296    tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
 297    tcg_temp_free_i32(p);
 298}
 299
 300/* Clear bits within PSTATE.  */
 301static inline void clear_pstate_bits(uint32_t bits)
 302{
 303    TCGv_i32 p = tcg_temp_new_i32();
 304
 305    tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
 306
 307    tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
 308    tcg_gen_andi_i32(p, p, ~bits);
 309    tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
 310    tcg_temp_free_i32(p);
 311}
 312
 313/* If the singlestep state is Active-not-pending, advance to Active-pending. */
 314static inline void gen_ss_advance(DisasContext *s)
 315{
 316    if (s->ss_active) {
 317        s->pstate_ss = 0;
 318        clear_pstate_bits(PSTATE_SS);
 319    }
 320}
 321
 322static inline void gen_exception(int excp, uint32_t syndrome,
 323                                 uint32_t target_el)
 324{
 325    TCGv_i32 tcg_excp = tcg_const_i32(excp);
 326    TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
 327    TCGv_i32 tcg_el = tcg_const_i32(target_el);
 328
 329    gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
 330                                       tcg_syn, tcg_el);
 331
 332    tcg_temp_free_i32(tcg_el);
 333    tcg_temp_free_i32(tcg_syn);
 334    tcg_temp_free_i32(tcg_excp);
 335}
 336
 337/* Generate an architectural singlestep exception */
 338static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
 339{
 340    bool same_el = (s->debug_target_el == s->current_el);
 341
 342    /*
 343     * If singlestep is targeting a lower EL than the current one,
 344     * then s->ss_active must be false and we can never get here.
 345     */
 346    assert(s->debug_target_el >= s->current_el);
 347
 348    gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
 349}
 350
 351/*
 352 * Given a VFP floating point constant encoded into an 8 bit immediate in an
 353 * instruction, expand it to the actual constant value of the specified
 354 * size, as per the VFPExpandImm() pseudocode in the Arm ARM.
 355 */
 356uint64_t vfp_expand_imm(int size, uint8_t imm8);
 357
 358/* Vector operations shared between ARM and AArch64.  */
 359void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 360                   uint32_t opr_sz, uint32_t max_sz);
 361void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 362                   uint32_t opr_sz, uint32_t max_sz);
 363void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 364                   uint32_t opr_sz, uint32_t max_sz);
 365void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 366                   uint32_t opr_sz, uint32_t max_sz);
 367void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 368                   uint32_t opr_sz, uint32_t max_sz);
 369
 370void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 371                  uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 372void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 373                  uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 374
 375void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 376                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 377void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 378                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 379void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 380                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 381
 382void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
 383void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
 384void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
 385void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
 386void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
 387
 388void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 389                       uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 390void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 391                       uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 392void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 393                       uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 394void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 395                       uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 396
 397void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 398                   int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 399void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 400                   int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 401
 402void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 403                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 404void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 405                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 406void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 407                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 408void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 409                    int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 410
 411void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 412                  int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 413void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
 414                  int64_t shift, uint32_t opr_sz, uint32_t max_sz);
 415
 416void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 417                          uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 418void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 419                          uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 420
 421void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 422                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 423void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 424                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 425
 426void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 427                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 428void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
 429                   uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
 430
 431/*
 432 * Forward to the isar_feature_* tests given a DisasContext pointer.
 433 */
 434#define dc_isar_feature(name, ctx) \
 435    ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
 436
 437/* Note that the gvec expanders operate on offsets + sizes.  */
 438typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
 439typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
 440                         uint32_t, uint32_t);
 441typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
 442                        uint32_t, uint32_t, uint32_t);
 443typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
 444                        uint32_t, uint32_t, uint32_t);
 445
 446/* Function prototype for gen_ functions for calling Neon helpers */
 447typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32);
 448typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
 449typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
 450typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
 451typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
 452                                 TCGv_i32, TCGv_i32);
 453typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
 454typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
 455typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
 456typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
 457typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
 458typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32);
 459typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr);
 460typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
 461typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
 462typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64);
 463typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
 464typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
 465typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
 466typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
 467typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
 468typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
 469typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
 470typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
 471
 472/**
 473 * arm_tbflags_from_tb:
 474 * @tb: the TranslationBlock
 475 *
 476 * Extract the flag values from @tb.
 477 */
 478static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
 479{
 480    return (CPUARMTBFlags){ tb->flags, tb->cs_base };
 481}
 482
 483/*
 484 * Enum for argument to fpstatus_ptr().
 485 */
 486typedef enum ARMFPStatusFlavour {
 487    FPST_FPCR,
 488    FPST_FPCR_F16,
 489    FPST_STD,
 490    FPST_STD_F16,
 491} ARMFPStatusFlavour;
 492
 493/**
 494 * fpstatus_ptr: return TCGv_ptr to the specified fp_status field
 495 *
 496 * We have multiple softfloat float_status fields in the Arm CPU state struct
 497 * (see the comment in cpu.h for details). Return a TCGv_ptr which has
 498 * been set up to point to the requested field in the CPU state struct.
 499 * The options are:
 500 *
 501 * FPST_FPCR
 502 *   for non-FP16 operations controlled by the FPCR
 503 * FPST_FPCR_F16
 504 *   for operations controlled by the FPCR where FPCR.FZ16 is to be used
 505 * FPST_STD
 506 *   for A32/T32 Neon operations using the "standard FPSCR value"
 507 * FPST_STD_F16
 508 *   as FPST_STD, but where FPCR.FZ16 is to be used
 509 */
 510static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
 511{
 512    TCGv_ptr statusptr = tcg_temp_new_ptr();
 513    int offset;
 514
 515    switch (flavour) {
 516    case FPST_FPCR:
 517        offset = offsetof(CPUARMState, vfp.fp_status);
 518        break;
 519    case FPST_FPCR_F16:
 520        offset = offsetof(CPUARMState, vfp.fp_status_f16);
 521        break;
 522    case FPST_STD:
 523        offset = offsetof(CPUARMState, vfp.standard_fp_status);
 524        break;
 525    case FPST_STD_F16:
 526        offset = offsetof(CPUARMState, vfp.standard_fp_status_f16);
 527        break;
 528    default:
 529        g_assert_not_reached();
 530    }
 531    tcg_gen_addi_ptr(statusptr, cpu_env, offset);
 532    return statusptr;
 533}
 534
 535/**
 536 * finalize_memop:
 537 * @s: DisasContext
 538 * @opc: size+sign+align of the memory operation
 539 *
 540 * Build the complete MemOp for a memory operation, including alignment
 541 * and endianness.
 542 *
 543 * If (op & MO_AMASK) then the operation already contains the required
 544 * alignment, e.g. for AccType_ATOMIC.  Otherwise, this an optionally
 545 * unaligned operation, e.g. for AccType_NORMAL.
 546 *
 547 * In the latter case, there are configuration bits that require alignment,
 548 * and this is applied here.  Note that there is no way to indicate that
 549 * no alignment should ever be enforced; this must be handled manually.
 550 */
 551static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
 552{
 553    if (s->align_mem && !(opc & MO_AMASK)) {
 554        opc |= MO_ALIGN;
 555    }
 556    return opc | s->be_data;
 557}
 558
 559/**
 560 * asimd_imm_const: Expand an encoded SIMD constant value
 561 *
 562 * Expand a SIMD constant value. This is essentially the pseudocode
 563 * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
 564 * VMVN and VBIC (when cmode < 14 && op == 1).
 565 *
 566 * The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
 567 * callers must catch this; we return the 64-bit constant value defined
 568 * for AArch64.
 569 *
 570 * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
 571 * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
 572 * we produce an immediate constant value of 0 in these cases.
 573 */
 574uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
 575
 576#endif /* TARGET_ARM_TRANSLATE_H */
 577