qemu/accel/tcg/user-exec.c
<<
>>
Prefs
   1/*
   2 *  User emulator execution
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "hw/core/tcg-cpu-ops.h"
  22#include "disas/disas.h"
  23#include "exec/exec-all.h"
  24#include "tcg/tcg.h"
  25#include "qemu/bitops.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/translate-all.h"
  28#include "exec/helper-proto.h"
  29#include "qemu/atomic128.h"
  30#include "trace/trace-root.h"
  31#include "trace/mem.h"
  32
  33#undef EAX
  34#undef ECX
  35#undef EDX
  36#undef EBX
  37#undef ESP
  38#undef EBP
  39#undef ESI
  40#undef EDI
  41#undef EIP
  42#ifdef __linux__
  43#include <sys/ucontext.h>
  44#endif
  45
  46__thread uintptr_t helper_retaddr;
  47
  48//#define DEBUG_SIGNAL
  49
  50/* exit the current TB from a signal handler. The host registers are
  51   restored in a state compatible with the CPU emulator
  52 */
  53static void QEMU_NORETURN cpu_exit_tb_from_sighandler(CPUState *cpu,
  54                                                      sigset_t *old_set)
  55{
  56    /* XXX: use siglongjmp ? */
  57    sigprocmask(SIG_SETMASK, old_set, NULL);
  58    cpu_loop_exit_noexc(cpu);
  59}
  60
  61/* 'pc' is the host PC at which the exception was raised. 'address' is
  62   the effective address of the memory exception. 'is_write' is 1 if a
  63   write caused the exception and otherwise 0'. 'old_set' is the
  64   signal set which should be restored */
  65static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
  66                                    int is_write, sigset_t *old_set)
  67{
  68    CPUState *cpu = current_cpu;
  69    CPUClass *cc;
  70    unsigned long address = (unsigned long)info->si_addr;
  71    MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
  72
  73    switch (helper_retaddr) {
  74    default:
  75        /*
  76         * Fault during host memory operation within a helper function.
  77         * The helper's host return address, saved here, gives us a
  78         * pointer into the generated code that will unwind to the
  79         * correct guest pc.
  80         */
  81        pc = helper_retaddr;
  82        break;
  83
  84    case 0:
  85        /*
  86         * Fault during host memory operation within generated code.
  87         * (Or, a unrelated bug within qemu, but we can't tell from here).
  88         *
  89         * We take the host pc from the signal frame.  However, we cannot
  90         * use that value directly.  Within cpu_restore_state_from_tb, we
  91         * assume PC comes from GETPC(), as used by the helper functions,
  92         * so we adjust the address by -GETPC_ADJ to form an address that
  93         * is within the call insn, so that the address does not accidentally
  94         * match the beginning of the next guest insn.  However, when the
  95         * pc comes from the signal frame it points to the actual faulting
  96         * host memory insn and not the return from a call insn.
  97         *
  98         * Therefore, adjust to compensate for what will be done later
  99         * by cpu_restore_state_from_tb.
 100         */
 101        pc += GETPC_ADJ;
 102        break;
 103
 104    case 1:
 105        /*
 106         * Fault during host read for translation, or loosely, "execution".
 107         *
 108         * The guest pc is already pointing to the start of the TB for which
 109         * code is being generated.  If the guest translator manages the
 110         * page crossings correctly, this is exactly the correct address
 111         * (and if the translator doesn't handle page boundaries correctly
 112         * there's little we can do about that here).  Therefore, do not
 113         * trigger the unwinder.
 114         *
 115         * Like tb_gen_code, release the memory lock before cpu_loop_exit.
 116         */
 117        pc = 0;
 118        access_type = MMU_INST_FETCH;
 119        mmap_unlock();
 120        break;
 121    }
 122
 123    /* For synchronous signals we expect to be coming from the vCPU
 124     * thread (so current_cpu should be valid) and either from running
 125     * code or during translation which can fault as we cross pages.
 126     *
 127     * If neither is true then something has gone wrong and we should
 128     * abort rather than try and restart the vCPU execution.
 129     */
 130    if (!cpu || !cpu->running) {
 131        printf("qemu:%s received signal outside vCPU context @ pc=0x%"
 132               PRIxPTR "\n",  __func__, pc);
 133        abort();
 134    }
 135
 136#if defined(DEBUG_SIGNAL)
 137    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
 138           pc, address, is_write, *(unsigned long *)old_set);
 139#endif
 140    /* XXX: locking issue */
 141    /* Note that it is important that we don't call page_unprotect() unless
 142     * this is really a "write to nonwriteable page" fault, because
 143     * page_unprotect() assumes that if it is called for an access to
 144     * a page that's writeable this means we had two threads racing and
 145     * another thread got there first and already made the page writeable;
 146     * so we will retry the access. If we were to call page_unprotect()
 147     * for some other kind of fault that should really be passed to the
 148     * guest, we'd end up in an infinite loop of retrying the faulting
 149     * access.
 150     */
 151    if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR &&
 152        h2g_valid(address)) {
 153        switch (page_unprotect(h2g(address), pc)) {
 154        case 0:
 155            /* Fault not caused by a page marked unwritable to protect
 156             * cached translations, must be the guest binary's problem.
 157             */
 158            break;
 159        case 1:
 160            /* Fault caused by protection of cached translation; TBs
 161             * invalidated, so resume execution.  Retain helper_retaddr
 162             * for a possible second fault.
 163             */
 164            return 1;
 165        case 2:
 166            /* Fault caused by protection of cached translation, and the
 167             * currently executing TB was modified and must be exited
 168             * immediately.  Clear helper_retaddr for next execution.
 169             */
 170            clear_helper_retaddr();
 171            cpu_exit_tb_from_sighandler(cpu, old_set);
 172            /* NORETURN */
 173
 174        default:
 175            g_assert_not_reached();
 176        }
 177    }
 178
 179    /* Convert forcefully to guest address space, invalid addresses
 180       are still valid segv ones */
 181    address = h2g_nocheck(address);
 182
 183    /*
 184     * There is no way the target can handle this other than raising
 185     * an exception.  Undo signal and retaddr state prior to longjmp.
 186     */
 187    sigprocmask(SIG_SETMASK, old_set, NULL);
 188    clear_helper_retaddr();
 189
 190    cc = CPU_GET_CLASS(cpu);
 191    cc->tcg_ops->tlb_fill(cpu, address, 0, access_type,
 192                          MMU_USER_IDX, false, pc);
 193    g_assert_not_reached();
 194}
 195
 196static int probe_access_internal(CPUArchState *env, target_ulong addr,
 197                                 int fault_size, MMUAccessType access_type,
 198                                 bool nonfault, uintptr_t ra)
 199{
 200    int flags;
 201
 202    switch (access_type) {
 203    case MMU_DATA_STORE:
 204        flags = PAGE_WRITE;
 205        break;
 206    case MMU_DATA_LOAD:
 207        flags = PAGE_READ;
 208        break;
 209    case MMU_INST_FETCH:
 210        flags = PAGE_EXEC;
 211        break;
 212    default:
 213        g_assert_not_reached();
 214    }
 215
 216    if (!guest_addr_valid_untagged(addr) ||
 217        page_check_range(addr, 1, flags) < 0) {
 218        if (nonfault) {
 219            return TLB_INVALID_MASK;
 220        } else {
 221            CPUState *cpu = env_cpu(env);
 222            CPUClass *cc = CPU_GET_CLASS(cpu);
 223            cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
 224                                  MMU_USER_IDX, false, ra);
 225            g_assert_not_reached();
 226        }
 227    }
 228    return 0;
 229}
 230
 231int probe_access_flags(CPUArchState *env, target_ulong addr,
 232                       MMUAccessType access_type, int mmu_idx,
 233                       bool nonfault, void **phost, uintptr_t ra)
 234{
 235    int flags;
 236
 237    flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
 238    *phost = flags ? NULL : g2h(env_cpu(env), addr);
 239    return flags;
 240}
 241
 242void *probe_access(CPUArchState *env, target_ulong addr, int size,
 243                   MMUAccessType access_type, int mmu_idx, uintptr_t ra)
 244{
 245    int flags;
 246
 247    g_assert(-(addr | TARGET_PAGE_MASK) >= size);
 248    flags = probe_access_internal(env, addr, size, access_type, false, ra);
 249    g_assert(flags == 0);
 250
 251    return size ? g2h(env_cpu(env), addr) : NULL;
 252}
 253
 254#if defined(__i386__)
 255
 256#if defined(__NetBSD__)
 257#include <ucontext.h>
 258
 259#define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
 260#define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 261#define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
 262#define MASK_sig(context)    ((context)->uc_sigmask)
 263#elif defined(__FreeBSD__) || defined(__DragonFly__)
 264#include <ucontext.h>
 265
 266#define EIP_sig(context)  (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
 267#define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
 268#define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
 269#define MASK_sig(context)    ((context)->uc_sigmask)
 270#elif defined(__OpenBSD__)
 271#define EIP_sig(context)     ((context)->sc_eip)
 272#define TRAP_sig(context)    ((context)->sc_trapno)
 273#define ERROR_sig(context)   ((context)->sc_err)
 274#define MASK_sig(context)    ((context)->sc_mask)
 275#else
 276#define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
 277#define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
 278#define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
 279#define MASK_sig(context)    ((context)->uc_sigmask)
 280#endif
 281
 282int cpu_signal_handler(int host_signum, void *pinfo,
 283                       void *puc)
 284{
 285    siginfo_t *info = pinfo;
 286#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
 287    ucontext_t *uc = puc;
 288#elif defined(__OpenBSD__)
 289    struct sigcontext *uc = puc;
 290#else
 291    ucontext_t *uc = puc;
 292#endif
 293    unsigned long pc;
 294    int trapno;
 295
 296#ifndef REG_EIP
 297/* for glibc 2.1 */
 298#define REG_EIP    EIP
 299#define REG_ERR    ERR
 300#define REG_TRAPNO TRAPNO
 301#endif
 302    pc = EIP_sig(uc);
 303    trapno = TRAP_sig(uc);
 304    return handle_cpu_signal(pc, info,
 305                             trapno == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
 306                             &MASK_sig(uc));
 307}
 308
 309#elif defined(__x86_64__)
 310
 311#ifdef __NetBSD__
 312#define PC_sig(context)       _UC_MACHINE_PC(context)
 313#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 314#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
 315#define MASK_sig(context)     ((context)->uc_sigmask)
 316#elif defined(__OpenBSD__)
 317#define PC_sig(context)       ((context)->sc_rip)
 318#define TRAP_sig(context)     ((context)->sc_trapno)
 319#define ERROR_sig(context)    ((context)->sc_err)
 320#define MASK_sig(context)     ((context)->sc_mask)
 321#elif defined(__FreeBSD__) || defined(__DragonFly__)
 322#include <ucontext.h>
 323
 324#define PC_sig(context)  (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
 325#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
 326#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
 327#define MASK_sig(context)     ((context)->uc_sigmask)
 328#else
 329#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
 330#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
 331#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
 332#define MASK_sig(context)     ((context)->uc_sigmask)
 333#endif
 334
 335int cpu_signal_handler(int host_signum, void *pinfo,
 336                       void *puc)
 337{
 338    siginfo_t *info = pinfo;
 339    unsigned long pc;
 340#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
 341    ucontext_t *uc = puc;
 342#elif defined(__OpenBSD__)
 343    struct sigcontext *uc = puc;
 344#else
 345    ucontext_t *uc = puc;
 346#endif
 347
 348    pc = PC_sig(uc);
 349    return handle_cpu_signal(pc, info,
 350                             TRAP_sig(uc) == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
 351                             &MASK_sig(uc));
 352}
 353
 354#elif defined(_ARCH_PPC)
 355
 356/***********************************************************************
 357 * signal context platform-specific definitions
 358 * From Wine
 359 */
 360#ifdef linux
 361/* All Registers access - only for local access */
 362#define REG_sig(reg_name, context)              \
 363    ((context)->uc_mcontext.regs->reg_name)
 364/* Gpr Registers access  */
 365#define GPR_sig(reg_num, context)              REG_sig(gpr[reg_num], context)
 366/* Program counter */
 367#define IAR_sig(context)                       REG_sig(nip, context)
 368/* Machine State Register (Supervisor) */
 369#define MSR_sig(context)                       REG_sig(msr, context)
 370/* Count register */
 371#define CTR_sig(context)                       REG_sig(ctr, context)
 372/* User's integer exception register */
 373#define XER_sig(context)                       REG_sig(xer, context)
 374/* Link register */
 375#define LR_sig(context)                        REG_sig(link, context)
 376/* Condition register */
 377#define CR_sig(context)                        REG_sig(ccr, context)
 378
 379/* Float Registers access  */
 380#define FLOAT_sig(reg_num, context)                                     \
 381    (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num])
 382#define FPSCR_sig(context) \
 383    (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4)))
 384/* Exception Registers access */
 385#define DAR_sig(context)                       REG_sig(dar, context)
 386#define DSISR_sig(context)                     REG_sig(dsisr, context)
 387#define TRAP_sig(context)                      REG_sig(trap, context)
 388#endif /* linux */
 389
 390#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 391#include <ucontext.h>
 392#define IAR_sig(context)               ((context)->uc_mcontext.mc_srr0)
 393#define MSR_sig(context)               ((context)->uc_mcontext.mc_srr1)
 394#define CTR_sig(context)               ((context)->uc_mcontext.mc_ctr)
 395#define XER_sig(context)               ((context)->uc_mcontext.mc_xer)
 396#define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
 397#define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
 398/* Exception Registers access */
 399#define DAR_sig(context)               ((context)->uc_mcontext.mc_dar)
 400#define DSISR_sig(context)             ((context)->uc_mcontext.mc_dsisr)
 401#define TRAP_sig(context)              ((context)->uc_mcontext.mc_exc)
 402#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
 403
 404int cpu_signal_handler(int host_signum, void *pinfo,
 405                       void *puc)
 406{
 407    siginfo_t *info = pinfo;
 408#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 409    ucontext_t *uc = puc;
 410#else
 411    ucontext_t *uc = puc;
 412#endif
 413    unsigned long pc;
 414    int is_write;
 415
 416    pc = IAR_sig(uc);
 417    is_write = 0;
 418#if 0
 419    /* ppc 4xx case */
 420    if (DSISR_sig(uc) & 0x00800000) {
 421        is_write = 1;
 422    }
 423#else
 424    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) {
 425        is_write = 1;
 426    }
 427#endif
 428    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 429}
 430
 431#elif defined(__alpha__)
 432
 433int cpu_signal_handler(int host_signum, void *pinfo,
 434                           void *puc)
 435{
 436    siginfo_t *info = pinfo;
 437    ucontext_t *uc = puc;
 438    uint32_t *pc = uc->uc_mcontext.sc_pc;
 439    uint32_t insn = *pc;
 440    int is_write = 0;
 441
 442    /* XXX: need kernel patch to get write flag faster */
 443    switch (insn >> 26) {
 444    case 0x0d: /* stw */
 445    case 0x0e: /* stb */
 446    case 0x0f: /* stq_u */
 447    case 0x24: /* stf */
 448    case 0x25: /* stg */
 449    case 0x26: /* sts */
 450    case 0x27: /* stt */
 451    case 0x2c: /* stl */
 452    case 0x2d: /* stq */
 453    case 0x2e: /* stl_c */
 454    case 0x2f: /* stq_c */
 455        is_write = 1;
 456    }
 457
 458    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 459}
 460#elif defined(__sparc__)
 461
 462int cpu_signal_handler(int host_signum, void *pinfo,
 463                       void *puc)
 464{
 465    siginfo_t *info = pinfo;
 466    int is_write;
 467    uint32_t insn;
 468#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
 469    uint32_t *regs = (uint32_t *)(info + 1);
 470    void *sigmask = (regs + 20);
 471    /* XXX: is there a standard glibc define ? */
 472    unsigned long pc = regs[1];
 473#else
 474#ifdef __linux__
 475    struct sigcontext *sc = puc;
 476    unsigned long pc = sc->sigc_regs.tpc;
 477    void *sigmask = (void *)sc->sigc_mask;
 478#elif defined(__OpenBSD__)
 479    struct sigcontext *uc = puc;
 480    unsigned long pc = uc->sc_pc;
 481    void *sigmask = (void *)(long)uc->sc_mask;
 482#elif defined(__NetBSD__)
 483    ucontext_t *uc = puc;
 484    unsigned long pc = _UC_MACHINE_PC(uc);
 485    void *sigmask = (void *)&uc->uc_sigmask;
 486#endif
 487#endif
 488
 489    /* XXX: need kernel patch to get write flag faster */
 490    is_write = 0;
 491    insn = *(uint32_t *)pc;
 492    if ((insn >> 30) == 3) {
 493        switch ((insn >> 19) & 0x3f) {
 494        case 0x05: /* stb */
 495        case 0x15: /* stba */
 496        case 0x06: /* sth */
 497        case 0x16: /* stha */
 498        case 0x04: /* st */
 499        case 0x14: /* sta */
 500        case 0x07: /* std */
 501        case 0x17: /* stda */
 502        case 0x0e: /* stx */
 503        case 0x1e: /* stxa */
 504        case 0x24: /* stf */
 505        case 0x34: /* stfa */
 506        case 0x27: /* stdf */
 507        case 0x37: /* stdfa */
 508        case 0x26: /* stqf */
 509        case 0x36: /* stqfa */
 510        case 0x25: /* stfsr */
 511        case 0x3c: /* casa */
 512        case 0x3e: /* casxa */
 513            is_write = 1;
 514            break;
 515        }
 516    }
 517    return handle_cpu_signal(pc, info, is_write, sigmask);
 518}
 519
 520#elif defined(__arm__)
 521
 522#if defined(__NetBSD__)
 523#include <ucontext.h>
 524#include <sys/siginfo.h>
 525#endif
 526
 527int cpu_signal_handler(int host_signum, void *pinfo,
 528                       void *puc)
 529{
 530    siginfo_t *info = pinfo;
 531#if defined(__NetBSD__)
 532    ucontext_t *uc = puc;
 533    siginfo_t *si = pinfo;
 534#else
 535    ucontext_t *uc = puc;
 536#endif
 537    unsigned long pc;
 538    uint32_t fsr;
 539    int is_write;
 540
 541#if defined(__NetBSD__)
 542    pc = uc->uc_mcontext.__gregs[_REG_R15];
 543#elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
 544    pc = uc->uc_mcontext.gregs[R15];
 545#else
 546    pc = uc->uc_mcontext.arm_pc;
 547#endif
 548
 549#ifdef __NetBSD__
 550    fsr = si->si_trap;
 551#else
 552    fsr = uc->uc_mcontext.error_code;
 553#endif
 554    /*
 555     * In the FSR, bit 11 is WnR, assuming a v6 or
 556     * later processor.  On v5 we will always report
 557     * this as a read, which will fail later.
 558     */
 559    is_write = extract32(fsr, 11, 1);
 560    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 561}
 562
 563#elif defined(__aarch64__)
 564
 565#if defined(__NetBSD__)
 566
 567#include <ucontext.h>
 568#include <sys/siginfo.h>
 569
 570int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
 571{
 572    ucontext_t *uc = puc;
 573    siginfo_t *si = pinfo;
 574    unsigned long pc;
 575    int is_write;
 576    uint32_t esr;
 577
 578    pc = uc->uc_mcontext.__gregs[_REG_PC];
 579    esr = si->si_trap;
 580
 581    /*
 582     * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC
 583     * is 0b10010x: then bit 6 is the WnR bit
 584     */
 585    is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
 586    return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask);
 587}
 588
 589#else
 590
 591#ifndef ESR_MAGIC
 592/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
 593#define ESR_MAGIC 0x45535201
 594struct esr_context {
 595    struct _aarch64_ctx head;
 596    uint64_t esr;
 597};
 598#endif
 599
 600static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc)
 601{
 602    return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved;
 603}
 604
 605static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr)
 606{
 607    return (struct _aarch64_ctx *)((char *)hdr + hdr->size);
 608}
 609
 610int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
 611{
 612    siginfo_t *info = pinfo;
 613    ucontext_t *uc = puc;
 614    uintptr_t pc = uc->uc_mcontext.pc;
 615    bool is_write;
 616    struct _aarch64_ctx *hdr;
 617    struct esr_context const *esrctx = NULL;
 618
 619    /* Find the esr_context, which has the WnR bit in it */
 620    for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) {
 621        if (hdr->magic == ESR_MAGIC) {
 622            esrctx = (struct esr_context const *)hdr;
 623            break;
 624        }
 625    }
 626
 627    if (esrctx) {
 628        /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */
 629        uint64_t esr = esrctx->esr;
 630        is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
 631    } else {
 632        /*
 633         * Fall back to parsing instructions; will only be needed
 634         * for really ancient (pre-3.16) kernels.
 635         */
 636        uint32_t insn = *(uint32_t *)pc;
 637
 638        is_write = ((insn & 0xbfff0000) == 0x0c000000   /* C3.3.1 */
 639                    || (insn & 0xbfe00000) == 0x0c800000   /* C3.3.2 */
 640                    || (insn & 0xbfdf0000) == 0x0d000000   /* C3.3.3 */
 641                    || (insn & 0xbfc00000) == 0x0d800000   /* C3.3.4 */
 642                    || (insn & 0x3f400000) == 0x08000000   /* C3.3.6 */
 643                    || (insn & 0x3bc00000) == 0x39000000   /* C3.3.13 */
 644                    || (insn & 0x3fc00000) == 0x3d800000   /* ... 128bit */
 645                    /* Ignore bits 10, 11 & 21, controlling indexing.  */
 646                    || (insn & 0x3bc00000) == 0x38000000   /* C3.3.8-12 */
 647                    || (insn & 0x3fe00000) == 0x3c800000   /* ... 128bit */
 648                    /* Ignore bits 23 & 24, controlling indexing.  */
 649                    || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
 650    }
 651    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 652}
 653#endif
 654
 655#elif defined(__s390__)
 656
 657int cpu_signal_handler(int host_signum, void *pinfo,
 658                       void *puc)
 659{
 660    siginfo_t *info = pinfo;
 661    ucontext_t *uc = puc;
 662    unsigned long pc;
 663    uint16_t *pinsn;
 664    int is_write = 0;
 665
 666    pc = uc->uc_mcontext.psw.addr;
 667
 668    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
 669       of the normal 2 arguments.  The 3rd argument contains the "int_code"
 670       from the hardware which does in fact contain the is_write value.
 671       The rt signal handler, as far as I can tell, does not give this value
 672       at all.  Not that we could get to it from here even if it were.  */
 673    /* ??? This is not even close to complete, since it ignores all
 674       of the read-modify-write instructions.  */
 675    pinsn = (uint16_t *)pc;
 676    switch (pinsn[0] >> 8) {
 677    case 0x50: /* ST */
 678    case 0x42: /* STC */
 679    case 0x40: /* STH */
 680        is_write = 1;
 681        break;
 682    case 0xc4: /* RIL format insns */
 683        switch (pinsn[0] & 0xf) {
 684        case 0xf: /* STRL */
 685        case 0xb: /* STGRL */
 686        case 0x7: /* STHRL */
 687            is_write = 1;
 688        }
 689        break;
 690    case 0xe3: /* RXY format insns */
 691        switch (pinsn[2] & 0xff) {
 692        case 0x50: /* STY */
 693        case 0x24: /* STG */
 694        case 0x72: /* STCY */
 695        case 0x70: /* STHY */
 696        case 0x8e: /* STPQ */
 697        case 0x3f: /* STRVH */
 698        case 0x3e: /* STRV */
 699        case 0x2f: /* STRVG */
 700            is_write = 1;
 701        }
 702        break;
 703    }
 704    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 705}
 706
 707#elif defined(__mips__)
 708
 709#if defined(__misp16) || defined(__mips_micromips)
 710#error "Unsupported encoding"
 711#endif
 712
 713int cpu_signal_handler(int host_signum, void *pinfo,
 714                       void *puc)
 715{
 716    siginfo_t *info = pinfo;
 717    ucontext_t *uc = puc;
 718    uintptr_t pc = uc->uc_mcontext.pc;
 719    uint32_t insn = *(uint32_t *)pc;
 720    int is_write = 0;
 721
 722    /* Detect all store instructions at program counter. */
 723    switch((insn >> 26) & 077) {
 724    case 050: /* SB */
 725    case 051: /* SH */
 726    case 052: /* SWL */
 727    case 053: /* SW */
 728    case 054: /* SDL */
 729    case 055: /* SDR */
 730    case 056: /* SWR */
 731    case 070: /* SC */
 732    case 071: /* SWC1 */
 733    case 074: /* SCD */
 734    case 075: /* SDC1 */
 735    case 077: /* SD */
 736#if !defined(__mips_isa_rev) || __mips_isa_rev < 6
 737    case 072: /* SWC2 */
 738    case 076: /* SDC2 */
 739#endif
 740        is_write = 1;
 741        break;
 742    case 023: /* COP1X */
 743        /* Required in all versions of MIPS64 since
 744           MIPS64r1 and subsequent versions of MIPS32r2. */
 745        switch (insn & 077) {
 746        case 010: /* SWXC1 */
 747        case 011: /* SDXC1 */
 748        case 015: /* SUXC1 */
 749            is_write = 1;
 750        }
 751        break;
 752    }
 753
 754    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 755}
 756
 757#elif defined(__riscv)
 758
 759int cpu_signal_handler(int host_signum, void *pinfo,
 760                       void *puc)
 761{
 762    siginfo_t *info = pinfo;
 763    ucontext_t *uc = puc;
 764    greg_t pc = uc->uc_mcontext.__gregs[REG_PC];
 765    uint32_t insn = *(uint32_t *)pc;
 766    int is_write = 0;
 767
 768    /* Detect store by reading the instruction at the program
 769       counter. Note: we currently only generate 32-bit
 770       instructions so we thus only detect 32-bit stores */
 771    switch (((insn >> 0) & 0b11)) {
 772    case 3:
 773        switch (((insn >> 2) & 0b11111)) {
 774        case 8:
 775            switch (((insn >> 12) & 0b111)) {
 776            case 0: /* sb */
 777            case 1: /* sh */
 778            case 2: /* sw */
 779            case 3: /* sd */
 780            case 4: /* sq */
 781                is_write = 1;
 782                break;
 783            default:
 784                break;
 785            }
 786            break;
 787        case 9:
 788            switch (((insn >> 12) & 0b111)) {
 789            case 2: /* fsw */
 790            case 3: /* fsd */
 791            case 4: /* fsq */
 792                is_write = 1;
 793                break;
 794            default:
 795                break;
 796            }
 797            break;
 798        default:
 799            break;
 800        }
 801    }
 802
 803    /* Check for compressed instructions */
 804    switch (((insn >> 13) & 0b111)) {
 805    case 7:
 806        switch (insn & 0b11) {
 807        case 0: /*c.sd */
 808        case 2: /* c.sdsp */
 809            is_write = 1;
 810            break;
 811        default:
 812            break;
 813        }
 814        break;
 815    case 6:
 816        switch (insn & 0b11) {
 817        case 0: /* c.sw */
 818        case 3: /* c.swsp */
 819            is_write = 1;
 820            break;
 821        default:
 822            break;
 823        }
 824        break;
 825    default:
 826        break;
 827    }
 828
 829    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 830}
 831
 832#else
 833
 834#error host CPU specific signal handler needed
 835
 836#endif
 837
 838/* The softmmu versions of these helpers are in cputlb.c.  */
 839
 840uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
 841{
 842    uint32_t ret;
 843    uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
 844
 845    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 846    ret = ldub_p(g2h(env_cpu(env), ptr));
 847    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 848    return ret;
 849}
 850
 851int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
 852{
 853    int ret;
 854    uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
 855
 856    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 857    ret = ldsb_p(g2h(env_cpu(env), ptr));
 858    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 859    return ret;
 860}
 861
 862uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
 863{
 864    uint32_t ret;
 865    uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false);
 866
 867    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 868    ret = lduw_be_p(g2h(env_cpu(env), ptr));
 869    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 870    return ret;
 871}
 872
 873int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
 874{
 875    int ret;
 876    uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false);
 877
 878    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 879    ret = ldsw_be_p(g2h(env_cpu(env), ptr));
 880    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 881    return ret;
 882}
 883
 884uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
 885{
 886    uint32_t ret;
 887    uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false);
 888
 889    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 890    ret = ldl_be_p(g2h(env_cpu(env), ptr));
 891    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 892    return ret;
 893}
 894
 895uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
 896{
 897    uint64_t ret;
 898    uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false);
 899
 900    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 901    ret = ldq_be_p(g2h(env_cpu(env), ptr));
 902    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 903    return ret;
 904}
 905
 906uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
 907{
 908    uint32_t ret;
 909    uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false);
 910
 911    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 912    ret = lduw_le_p(g2h(env_cpu(env), ptr));
 913    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 914    return ret;
 915}
 916
 917int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
 918{
 919    int ret;
 920    uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false);
 921
 922    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 923    ret = ldsw_le_p(g2h(env_cpu(env), ptr));
 924    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 925    return ret;
 926}
 927
 928uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
 929{
 930    uint32_t ret;
 931    uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false);
 932
 933    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 934    ret = ldl_le_p(g2h(env_cpu(env), ptr));
 935    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 936    return ret;
 937}
 938
 939uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
 940{
 941    uint64_t ret;
 942    uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false);
 943
 944    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 945    ret = ldq_le_p(g2h(env_cpu(env), ptr));
 946    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 947    return ret;
 948}
 949
 950uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 951{
 952    uint32_t ret;
 953
 954    set_helper_retaddr(retaddr);
 955    ret = cpu_ldub_data(env, ptr);
 956    clear_helper_retaddr();
 957    return ret;
 958}
 959
 960int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 961{
 962    int ret;
 963
 964    set_helper_retaddr(retaddr);
 965    ret = cpu_ldsb_data(env, ptr);
 966    clear_helper_retaddr();
 967    return ret;
 968}
 969
 970uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 971{
 972    uint32_t ret;
 973
 974    set_helper_retaddr(retaddr);
 975    ret = cpu_lduw_be_data(env, ptr);
 976    clear_helper_retaddr();
 977    return ret;
 978}
 979
 980int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 981{
 982    int ret;
 983
 984    set_helper_retaddr(retaddr);
 985    ret = cpu_ldsw_be_data(env, ptr);
 986    clear_helper_retaddr();
 987    return ret;
 988}
 989
 990uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 991{
 992    uint32_t ret;
 993
 994    set_helper_retaddr(retaddr);
 995    ret = cpu_ldl_be_data(env, ptr);
 996    clear_helper_retaddr();
 997    return ret;
 998}
 999
1000uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1001{
1002    uint64_t ret;
1003
1004    set_helper_retaddr(retaddr);
1005    ret = cpu_ldq_be_data(env, ptr);
1006    clear_helper_retaddr();
1007    return ret;
1008}
1009
1010uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1011{
1012    uint32_t ret;
1013
1014    set_helper_retaddr(retaddr);
1015    ret = cpu_lduw_le_data(env, ptr);
1016    clear_helper_retaddr();
1017    return ret;
1018}
1019
1020int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1021{
1022    int ret;
1023
1024    set_helper_retaddr(retaddr);
1025    ret = cpu_ldsw_le_data(env, ptr);
1026    clear_helper_retaddr();
1027    return ret;
1028}
1029
1030uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1031{
1032    uint32_t ret;
1033
1034    set_helper_retaddr(retaddr);
1035    ret = cpu_ldl_le_data(env, ptr);
1036    clear_helper_retaddr();
1037    return ret;
1038}
1039
1040uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1041{
1042    uint64_t ret;
1043
1044    set_helper_retaddr(retaddr);
1045    ret = cpu_ldq_le_data(env, ptr);
1046    clear_helper_retaddr();
1047    return ret;
1048}
1049
1050void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1051{
1052    uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
1053
1054    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1055    stb_p(g2h(env_cpu(env), ptr), val);
1056    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1057}
1058
1059void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1060{
1061    uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true);
1062
1063    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1064    stw_be_p(g2h(env_cpu(env), ptr), val);
1065    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1066}
1067
1068void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1069{
1070    uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true);
1071
1072    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1073    stl_be_p(g2h(env_cpu(env), ptr), val);
1074    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1075}
1076
1077void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1078{
1079    uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true);
1080
1081    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1082    stq_be_p(g2h(env_cpu(env), ptr), val);
1083    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1084}
1085
1086void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1087{
1088    uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true);
1089
1090    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1091    stw_le_p(g2h(env_cpu(env), ptr), val);
1092    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1093}
1094
1095void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1096{
1097    uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true);
1098
1099    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1100    stl_le_p(g2h(env_cpu(env), ptr), val);
1101    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1102}
1103
1104void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1105{
1106    uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true);
1107
1108    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1109    stq_le_p(g2h(env_cpu(env), ptr), val);
1110    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1111}
1112
1113void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
1114                     uint32_t val, uintptr_t retaddr)
1115{
1116    set_helper_retaddr(retaddr);
1117    cpu_stb_data(env, ptr, val);
1118    clear_helper_retaddr();
1119}
1120
1121void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
1122                        uint32_t val, uintptr_t retaddr)
1123{
1124    set_helper_retaddr(retaddr);
1125    cpu_stw_be_data(env, ptr, val);
1126    clear_helper_retaddr();
1127}
1128
1129void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
1130                        uint32_t val, uintptr_t retaddr)
1131{
1132    set_helper_retaddr(retaddr);
1133    cpu_stl_be_data(env, ptr, val);
1134    clear_helper_retaddr();
1135}
1136
1137void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
1138                        uint64_t val, uintptr_t retaddr)
1139{
1140    set_helper_retaddr(retaddr);
1141    cpu_stq_be_data(env, ptr, val);
1142    clear_helper_retaddr();
1143}
1144
1145void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
1146                        uint32_t val, uintptr_t retaddr)
1147{
1148    set_helper_retaddr(retaddr);
1149    cpu_stw_le_data(env, ptr, val);
1150    clear_helper_retaddr();
1151}
1152
1153void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
1154                        uint32_t val, uintptr_t retaddr)
1155{
1156    set_helper_retaddr(retaddr);
1157    cpu_stl_le_data(env, ptr, val);
1158    clear_helper_retaddr();
1159}
1160
1161void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
1162                        uint64_t val, uintptr_t retaddr)
1163{
1164    set_helper_retaddr(retaddr);
1165    cpu_stq_le_data(env, ptr, val);
1166    clear_helper_retaddr();
1167}
1168
1169uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1170{
1171    uint32_t ret;
1172
1173    set_helper_retaddr(1);
1174    ret = ldub_p(g2h_untagged(ptr));
1175    clear_helper_retaddr();
1176    return ret;
1177}
1178
1179uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1180{
1181    uint32_t ret;
1182
1183    set_helper_retaddr(1);
1184    ret = lduw_p(g2h_untagged(ptr));
1185    clear_helper_retaddr();
1186    return ret;
1187}
1188
1189uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1190{
1191    uint32_t ret;
1192
1193    set_helper_retaddr(1);
1194    ret = ldl_p(g2h_untagged(ptr));
1195    clear_helper_retaddr();
1196    return ret;
1197}
1198
1199uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1200{
1201    uint64_t ret;
1202
1203    set_helper_retaddr(1);
1204    ret = ldq_p(g2h_untagged(ptr));
1205    clear_helper_retaddr();
1206    return ret;
1207}
1208
1209/* Do not allow unaligned operations to proceed.  Return the host address.  */
1210static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1211                               int size, uintptr_t retaddr)
1212{
1213    /* Enforce qemu required alignment.  */
1214    if (unlikely(addr & (size - 1))) {
1215        cpu_loop_exit_atomic(env_cpu(env), retaddr);
1216    }
1217    void *ret = g2h(env_cpu(env), addr);
1218    set_helper_retaddr(retaddr);
1219    return ret;
1220}
1221
1222/* Macro to call the above, with local variables from the use context.  */
1223#define ATOMIC_MMU_DECLS do {} while (0)
1224#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
1225#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1226#define ATOMIC_MMU_IDX MMU_USER_IDX
1227
1228#define ATOMIC_NAME(X)   HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1229#define EXTRA_ARGS
1230
1231#include "atomic_common.c.inc"
1232
1233#define DATA_SIZE 1
1234#include "atomic_template.h"
1235
1236#define DATA_SIZE 2
1237#include "atomic_template.h"
1238
1239#define DATA_SIZE 4
1240#include "atomic_template.h"
1241
1242#ifdef CONFIG_ATOMIC64
1243#define DATA_SIZE 8
1244#include "atomic_template.h"
1245#endif
1246
1247/* The following is only callable from other helpers, and matches up
1248   with the softmmu version.  */
1249
1250#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1251
1252#undef EXTRA_ARGS
1253#undef ATOMIC_NAME
1254#undef ATOMIC_MMU_LOOKUP
1255
1256#define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1257#define ATOMIC_NAME(X) \
1258    HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1259#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
1260
1261#define DATA_SIZE 16
1262#include "atomic_template.h"
1263#endif
1264