qemu/accel/tcg/user-exec.c
<<
>>
Prefs
   1/*
   2 *  User emulator execution
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "cpu.h"
  21#include "disas/disas.h"
  22#include "exec/exec-all.h"
  23#include "tcg/tcg.h"
  24#include "qemu/bitops.h"
  25#include "exec/cpu_ldst.h"
  26#include "translate-all.h"
  27#include "exec/helper-proto.h"
  28#include "qemu/atomic128.h"
  29#include "trace/trace-root.h"
  30#include "trace/mem.h"
  31
  32#undef EAX
  33#undef ECX
  34#undef EDX
  35#undef EBX
  36#undef ESP
  37#undef EBP
  38#undef ESI
  39#undef EDI
  40#undef EIP
  41#ifdef __linux__
  42#include <sys/ucontext.h>
  43#endif
  44
  45__thread uintptr_t helper_retaddr;
  46
  47//#define DEBUG_SIGNAL
  48
  49/* exit the current TB from a signal handler. The host registers are
  50   restored in a state compatible with the CPU emulator
  51 */
  52static void cpu_exit_tb_from_sighandler(CPUState *cpu, sigset_t *old_set)
  53{
  54    /* XXX: use siglongjmp ? */
  55    sigprocmask(SIG_SETMASK, old_set, NULL);
  56    cpu_loop_exit_noexc(cpu);
  57}
  58
  59/* 'pc' is the host PC at which the exception was raised. 'address' is
  60   the effective address of the memory exception. 'is_write' is 1 if a
  61   write caused the exception and otherwise 0'. 'old_set' is the
  62   signal set which should be restored */
  63static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
  64                                    int is_write, sigset_t *old_set)
  65{
  66    CPUState *cpu = current_cpu;
  67    CPUClass *cc;
  68    unsigned long address = (unsigned long)info->si_addr;
  69    MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
  70
  71    switch (helper_retaddr) {
  72    default:
  73        /*
  74         * Fault during host memory operation within a helper function.
  75         * The helper's host return address, saved here, gives us a
  76         * pointer into the generated code that will unwind to the
  77         * correct guest pc.
  78         */
  79        pc = helper_retaddr;
  80        break;
  81
  82    case 0:
  83        /*
  84         * Fault during host memory operation within generated code.
  85         * (Or, a unrelated bug within qemu, but we can't tell from here).
  86         *
  87         * We take the host pc from the signal frame.  However, we cannot
  88         * use that value directly.  Within cpu_restore_state_from_tb, we
  89         * assume PC comes from GETPC(), as used by the helper functions,
  90         * so we adjust the address by -GETPC_ADJ to form an address that
  91         * is within the call insn, so that the address does not accidentally
  92         * match the beginning of the next guest insn.  However, when the
  93         * pc comes from the signal frame it points to the actual faulting
  94         * host memory insn and not the return from a call insn.
  95         *
  96         * Therefore, adjust to compensate for what will be done later
  97         * by cpu_restore_state_from_tb.
  98         */
  99        pc += GETPC_ADJ;
 100        break;
 101
 102    case 1:
 103        /*
 104         * Fault during host read for translation, or loosely, "execution".
 105         *
 106         * The guest pc is already pointing to the start of the TB for which
 107         * code is being generated.  If the guest translator manages the
 108         * page crossings correctly, this is exactly the correct address
 109         * (and if the translator doesn't handle page boundaries correctly
 110         * there's little we can do about that here).  Therefore, do not
 111         * trigger the unwinder.
 112         *
 113         * Like tb_gen_code, release the memory lock before cpu_loop_exit.
 114         */
 115        pc = 0;
 116        access_type = MMU_INST_FETCH;
 117        mmap_unlock();
 118        break;
 119    }
 120
 121    /* For synchronous signals we expect to be coming from the vCPU
 122     * thread (so current_cpu should be valid) and either from running
 123     * code or during translation which can fault as we cross pages.
 124     *
 125     * If neither is true then something has gone wrong and we should
 126     * abort rather than try and restart the vCPU execution.
 127     */
 128    if (!cpu || !cpu->running) {
 129        printf("qemu:%s received signal outside vCPU context @ pc=0x%"
 130               PRIxPTR "\n",  __func__, pc);
 131        abort();
 132    }
 133
 134#if defined(DEBUG_SIGNAL)
 135    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
 136           pc, address, is_write, *(unsigned long *)old_set);
 137#endif
 138    /* XXX: locking issue */
 139    /* Note that it is important that we don't call page_unprotect() unless
 140     * this is really a "write to nonwriteable page" fault, because
 141     * page_unprotect() assumes that if it is called for an access to
 142     * a page that's writeable this means we had two threads racing and
 143     * another thread got there first and already made the page writeable;
 144     * so we will retry the access. If we were to call page_unprotect()
 145     * for some other kind of fault that should really be passed to the
 146     * guest, we'd end up in an infinite loop of retrying the faulting
 147     * access.
 148     */
 149    if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR &&
 150        h2g_valid(address)) {
 151        switch (page_unprotect(h2g(address), pc)) {
 152        case 0:
 153            /* Fault not caused by a page marked unwritable to protect
 154             * cached translations, must be the guest binary's problem.
 155             */
 156            break;
 157        case 1:
 158            /* Fault caused by protection of cached translation; TBs
 159             * invalidated, so resume execution.  Retain helper_retaddr
 160             * for a possible second fault.
 161             */
 162            return 1;
 163        case 2:
 164            /* Fault caused by protection of cached translation, and the
 165             * currently executing TB was modified and must be exited
 166             * immediately.  Clear helper_retaddr for next execution.
 167             */
 168            clear_helper_retaddr();
 169            cpu_exit_tb_from_sighandler(cpu, old_set);
 170            /* NORETURN */
 171
 172        default:
 173            g_assert_not_reached();
 174        }
 175    }
 176
 177    /* Convert forcefully to guest address space, invalid addresses
 178       are still valid segv ones */
 179    address = h2g_nocheck(address);
 180
 181    /*
 182     * There is no way the target can handle this other than raising
 183     * an exception.  Undo signal and retaddr state prior to longjmp.
 184     */
 185    sigprocmask(SIG_SETMASK, old_set, NULL);
 186    clear_helper_retaddr();
 187
 188    cc = CPU_GET_CLASS(cpu);
 189    cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
 190    g_assert_not_reached();
 191}
 192
 193static int probe_access_internal(CPUArchState *env, target_ulong addr,
 194                                 int fault_size, MMUAccessType access_type,
 195                                 bool nonfault, uintptr_t ra)
 196{
 197    int flags;
 198
 199    switch (access_type) {
 200    case MMU_DATA_STORE:
 201        flags = PAGE_WRITE;
 202        break;
 203    case MMU_DATA_LOAD:
 204        flags = PAGE_READ;
 205        break;
 206    case MMU_INST_FETCH:
 207        flags = PAGE_EXEC;
 208        break;
 209    default:
 210        g_assert_not_reached();
 211    }
 212
 213    if (!guest_addr_valid(addr) || page_check_range(addr, 1, flags) < 0) {
 214        if (nonfault) {
 215            return TLB_INVALID_MASK;
 216        } else {
 217            CPUState *cpu = env_cpu(env);
 218            CPUClass *cc = CPU_GET_CLASS(cpu);
 219            cc->tlb_fill(cpu, addr, fault_size, access_type,
 220                         MMU_USER_IDX, false, ra);
 221            g_assert_not_reached();
 222        }
 223    }
 224    return 0;
 225}
 226
 227int probe_access_flags(CPUArchState *env, target_ulong addr,
 228                       MMUAccessType access_type, int mmu_idx,
 229                       bool nonfault, void **phost, uintptr_t ra)
 230{
 231    int flags;
 232
 233    flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
 234    *phost = flags ? NULL : g2h(addr);
 235    return flags;
 236}
 237
 238void *probe_access(CPUArchState *env, target_ulong addr, int size,
 239                   MMUAccessType access_type, int mmu_idx, uintptr_t ra)
 240{
 241    int flags;
 242
 243    g_assert(-(addr | TARGET_PAGE_MASK) >= size);
 244    flags = probe_access_internal(env, addr, size, access_type, false, ra);
 245    g_assert(flags == 0);
 246
 247    return size ? g2h(addr) : NULL;
 248}
 249
 250#if defined(__i386__)
 251
 252#if defined(__NetBSD__)
 253#include <ucontext.h>
 254
 255#define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
 256#define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 257#define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
 258#define MASK_sig(context)    ((context)->uc_sigmask)
 259#elif defined(__FreeBSD__) || defined(__DragonFly__)
 260#include <ucontext.h>
 261
 262#define EIP_sig(context)  (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
 263#define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
 264#define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
 265#define MASK_sig(context)    ((context)->uc_sigmask)
 266#elif defined(__OpenBSD__)
 267#define EIP_sig(context)     ((context)->sc_eip)
 268#define TRAP_sig(context)    ((context)->sc_trapno)
 269#define ERROR_sig(context)   ((context)->sc_err)
 270#define MASK_sig(context)    ((context)->sc_mask)
 271#else
 272#define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
 273#define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
 274#define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
 275#define MASK_sig(context)    ((context)->uc_sigmask)
 276#endif
 277
 278int cpu_signal_handler(int host_signum, void *pinfo,
 279                       void *puc)
 280{
 281    siginfo_t *info = pinfo;
 282#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
 283    ucontext_t *uc = puc;
 284#elif defined(__OpenBSD__)
 285    struct sigcontext *uc = puc;
 286#else
 287    ucontext_t *uc = puc;
 288#endif
 289    unsigned long pc;
 290    int trapno;
 291
 292#ifndef REG_EIP
 293/* for glibc 2.1 */
 294#define REG_EIP    EIP
 295#define REG_ERR    ERR
 296#define REG_TRAPNO TRAPNO
 297#endif
 298    pc = EIP_sig(uc);
 299    trapno = TRAP_sig(uc);
 300    return handle_cpu_signal(pc, info,
 301                             trapno == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
 302                             &MASK_sig(uc));
 303}
 304
 305#elif defined(__x86_64__)
 306
 307#ifdef __NetBSD__
 308#define PC_sig(context)       _UC_MACHINE_PC(context)
 309#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 310#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
 311#define MASK_sig(context)     ((context)->uc_sigmask)
 312#elif defined(__OpenBSD__)
 313#define PC_sig(context)       ((context)->sc_rip)
 314#define TRAP_sig(context)     ((context)->sc_trapno)
 315#define ERROR_sig(context)    ((context)->sc_err)
 316#define MASK_sig(context)     ((context)->sc_mask)
 317#elif defined(__FreeBSD__) || defined(__DragonFly__)
 318#include <ucontext.h>
 319
 320#define PC_sig(context)  (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
 321#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
 322#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
 323#define MASK_sig(context)     ((context)->uc_sigmask)
 324#else
 325#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
 326#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
 327#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
 328#define MASK_sig(context)     ((context)->uc_sigmask)
 329#endif
 330
 331int cpu_signal_handler(int host_signum, void *pinfo,
 332                       void *puc)
 333{
 334    siginfo_t *info = pinfo;
 335    unsigned long pc;
 336#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
 337    ucontext_t *uc = puc;
 338#elif defined(__OpenBSD__)
 339    struct sigcontext *uc = puc;
 340#else
 341    ucontext_t *uc = puc;
 342#endif
 343
 344    pc = PC_sig(uc);
 345    return handle_cpu_signal(pc, info,
 346                             TRAP_sig(uc) == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
 347                             &MASK_sig(uc));
 348}
 349
 350#elif defined(_ARCH_PPC)
 351
 352/***********************************************************************
 353 * signal context platform-specific definitions
 354 * From Wine
 355 */
 356#ifdef linux
 357/* All Registers access - only for local access */
 358#define REG_sig(reg_name, context)              \
 359    ((context)->uc_mcontext.regs->reg_name)
 360/* Gpr Registers access  */
 361#define GPR_sig(reg_num, context)              REG_sig(gpr[reg_num], context)
 362/* Program counter */
 363#define IAR_sig(context)                       REG_sig(nip, context)
 364/* Machine State Register (Supervisor) */
 365#define MSR_sig(context)                       REG_sig(msr, context)
 366/* Count register */
 367#define CTR_sig(context)                       REG_sig(ctr, context)
 368/* User's integer exception register */
 369#define XER_sig(context)                       REG_sig(xer, context)
 370/* Link register */
 371#define LR_sig(context)                        REG_sig(link, context)
 372/* Condition register */
 373#define CR_sig(context)                        REG_sig(ccr, context)
 374
 375/* Float Registers access  */
 376#define FLOAT_sig(reg_num, context)                                     \
 377    (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num])
 378#define FPSCR_sig(context) \
 379    (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4)))
 380/* Exception Registers access */
 381#define DAR_sig(context)                       REG_sig(dar, context)
 382#define DSISR_sig(context)                     REG_sig(dsisr, context)
 383#define TRAP_sig(context)                      REG_sig(trap, context)
 384#endif /* linux */
 385
 386#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 387#include <ucontext.h>
 388#define IAR_sig(context)               ((context)->uc_mcontext.mc_srr0)
 389#define MSR_sig(context)               ((context)->uc_mcontext.mc_srr1)
 390#define CTR_sig(context)               ((context)->uc_mcontext.mc_ctr)
 391#define XER_sig(context)               ((context)->uc_mcontext.mc_xer)
 392#define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
 393#define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
 394/* Exception Registers access */
 395#define DAR_sig(context)               ((context)->uc_mcontext.mc_dar)
 396#define DSISR_sig(context)             ((context)->uc_mcontext.mc_dsisr)
 397#define TRAP_sig(context)              ((context)->uc_mcontext.mc_exc)
 398#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
 399
 400int cpu_signal_handler(int host_signum, void *pinfo,
 401                       void *puc)
 402{
 403    siginfo_t *info = pinfo;
 404#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 405    ucontext_t *uc = puc;
 406#else
 407    ucontext_t *uc = puc;
 408#endif
 409    unsigned long pc;
 410    int is_write;
 411
 412    pc = IAR_sig(uc);
 413    is_write = 0;
 414#if 0
 415    /* ppc 4xx case */
 416    if (DSISR_sig(uc) & 0x00800000) {
 417        is_write = 1;
 418    }
 419#else
 420    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) {
 421        is_write = 1;
 422    }
 423#endif
 424    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 425}
 426
 427#elif defined(__alpha__)
 428
 429int cpu_signal_handler(int host_signum, void *pinfo,
 430                           void *puc)
 431{
 432    siginfo_t *info = pinfo;
 433    ucontext_t *uc = puc;
 434    uint32_t *pc = uc->uc_mcontext.sc_pc;
 435    uint32_t insn = *pc;
 436    int is_write = 0;
 437
 438    /* XXX: need kernel patch to get write flag faster */
 439    switch (insn >> 26) {
 440    case 0x0d: /* stw */
 441    case 0x0e: /* stb */
 442    case 0x0f: /* stq_u */
 443    case 0x24: /* stf */
 444    case 0x25: /* stg */
 445    case 0x26: /* sts */
 446    case 0x27: /* stt */
 447    case 0x2c: /* stl */
 448    case 0x2d: /* stq */
 449    case 0x2e: /* stl_c */
 450    case 0x2f: /* stq_c */
 451        is_write = 1;
 452    }
 453
 454    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 455}
 456#elif defined(__sparc__)
 457
 458int cpu_signal_handler(int host_signum, void *pinfo,
 459                       void *puc)
 460{
 461    siginfo_t *info = pinfo;
 462    int is_write;
 463    uint32_t insn;
 464#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
 465    uint32_t *regs = (uint32_t *)(info + 1);
 466    void *sigmask = (regs + 20);
 467    /* XXX: is there a standard glibc define ? */
 468    unsigned long pc = regs[1];
 469#else
 470#ifdef __linux__
 471    struct sigcontext *sc = puc;
 472    unsigned long pc = sc->sigc_regs.tpc;
 473    void *sigmask = (void *)sc->sigc_mask;
 474#elif defined(__OpenBSD__)
 475    struct sigcontext *uc = puc;
 476    unsigned long pc = uc->sc_pc;
 477    void *sigmask = (void *)(long)uc->sc_mask;
 478#elif defined(__NetBSD__)
 479    ucontext_t *uc = puc;
 480    unsigned long pc = _UC_MACHINE_PC(uc);
 481    void *sigmask = (void *)&uc->uc_sigmask;
 482#endif
 483#endif
 484
 485    /* XXX: need kernel patch to get write flag faster */
 486    is_write = 0;
 487    insn = *(uint32_t *)pc;
 488    if ((insn >> 30) == 3) {
 489        switch ((insn >> 19) & 0x3f) {
 490        case 0x05: /* stb */
 491        case 0x15: /* stba */
 492        case 0x06: /* sth */
 493        case 0x16: /* stha */
 494        case 0x04: /* st */
 495        case 0x14: /* sta */
 496        case 0x07: /* std */
 497        case 0x17: /* stda */
 498        case 0x0e: /* stx */
 499        case 0x1e: /* stxa */
 500        case 0x24: /* stf */
 501        case 0x34: /* stfa */
 502        case 0x27: /* stdf */
 503        case 0x37: /* stdfa */
 504        case 0x26: /* stqf */
 505        case 0x36: /* stqfa */
 506        case 0x25: /* stfsr */
 507        case 0x3c: /* casa */
 508        case 0x3e: /* casxa */
 509            is_write = 1;
 510            break;
 511        }
 512    }
 513    return handle_cpu_signal(pc, info, is_write, sigmask);
 514}
 515
 516#elif defined(__arm__)
 517
 518#if defined(__NetBSD__)
 519#include <ucontext.h>
 520#include <sys/siginfo.h>
 521#endif
 522
 523int cpu_signal_handler(int host_signum, void *pinfo,
 524                       void *puc)
 525{
 526    siginfo_t *info = pinfo;
 527#if defined(__NetBSD__)
 528    ucontext_t *uc = puc;
 529    siginfo_t *si = pinfo;
 530#else
 531    ucontext_t *uc = puc;
 532#endif
 533    unsigned long pc;
 534    uint32_t fsr;
 535    int is_write;
 536
 537#if defined(__NetBSD__)
 538    pc = uc->uc_mcontext.__gregs[_REG_R15];
 539#elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
 540    pc = uc->uc_mcontext.gregs[R15];
 541#else
 542    pc = uc->uc_mcontext.arm_pc;
 543#endif
 544
 545#ifdef __NetBSD__
 546    fsr = si->si_trap;
 547#else
 548    fsr = uc->uc_mcontext.error_code;
 549#endif
 550    /*
 551     * In the FSR, bit 11 is WnR, assuming a v6 or
 552     * later processor.  On v5 we will always report
 553     * this as a read, which will fail later.
 554     */
 555    is_write = extract32(fsr, 11, 1);
 556    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 557}
 558
 559#elif defined(__aarch64__)
 560
 561#if defined(__NetBSD__)
 562
 563#include <ucontext.h>
 564#include <sys/siginfo.h>
 565
 566int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
 567{
 568    ucontext_t *uc = puc;
 569    siginfo_t *si = pinfo;
 570    unsigned long pc;
 571    int is_write;
 572    uint32_t esr;
 573
 574    pc = uc->uc_mcontext.__gregs[_REG_PC];
 575    esr = si->si_trap;
 576
 577    /*
 578     * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC
 579     * is 0b10010x: then bit 6 is the WnR bit
 580     */
 581    is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
 582    return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask);
 583}
 584
 585#else
 586
 587#ifndef ESR_MAGIC
 588/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
 589#define ESR_MAGIC 0x45535201
 590struct esr_context {
 591    struct _aarch64_ctx head;
 592    uint64_t esr;
 593};
 594#endif
 595
 596static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc)
 597{
 598    return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved;
 599}
 600
 601static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr)
 602{
 603    return (struct _aarch64_ctx *)((char *)hdr + hdr->size);
 604}
 605
 606int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
 607{
 608    siginfo_t *info = pinfo;
 609    ucontext_t *uc = puc;
 610    uintptr_t pc = uc->uc_mcontext.pc;
 611    bool is_write;
 612    struct _aarch64_ctx *hdr;
 613    struct esr_context const *esrctx = NULL;
 614
 615    /* Find the esr_context, which has the WnR bit in it */
 616    for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) {
 617        if (hdr->magic == ESR_MAGIC) {
 618            esrctx = (struct esr_context const *)hdr;
 619            break;
 620        }
 621    }
 622
 623    if (esrctx) {
 624        /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */
 625        uint64_t esr = esrctx->esr;
 626        is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
 627    } else {
 628        /*
 629         * Fall back to parsing instructions; will only be needed
 630         * for really ancient (pre-3.16) kernels.
 631         */
 632        uint32_t insn = *(uint32_t *)pc;
 633
 634        is_write = ((insn & 0xbfff0000) == 0x0c000000   /* C3.3.1 */
 635                    || (insn & 0xbfe00000) == 0x0c800000   /* C3.3.2 */
 636                    || (insn & 0xbfdf0000) == 0x0d000000   /* C3.3.3 */
 637                    || (insn & 0xbfc00000) == 0x0d800000   /* C3.3.4 */
 638                    || (insn & 0x3f400000) == 0x08000000   /* C3.3.6 */
 639                    || (insn & 0x3bc00000) == 0x39000000   /* C3.3.13 */
 640                    || (insn & 0x3fc00000) == 0x3d800000   /* ... 128bit */
 641                    /* Ignore bits 10, 11 & 21, controlling indexing.  */
 642                    || (insn & 0x3bc00000) == 0x38000000   /* C3.3.8-12 */
 643                    || (insn & 0x3fe00000) == 0x3c800000   /* ... 128bit */
 644                    /* Ignore bits 23 & 24, controlling indexing.  */
 645                    || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
 646    }
 647    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 648}
 649#endif
 650
 651#elif defined(__s390__)
 652
 653int cpu_signal_handler(int host_signum, void *pinfo,
 654                       void *puc)
 655{
 656    siginfo_t *info = pinfo;
 657    ucontext_t *uc = puc;
 658    unsigned long pc;
 659    uint16_t *pinsn;
 660    int is_write = 0;
 661
 662    pc = uc->uc_mcontext.psw.addr;
 663
 664    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
 665       of the normal 2 arguments.  The 3rd argument contains the "int_code"
 666       from the hardware which does in fact contain the is_write value.
 667       The rt signal handler, as far as I can tell, does not give this value
 668       at all.  Not that we could get to it from here even if it were.  */
 669    /* ??? This is not even close to complete, since it ignores all
 670       of the read-modify-write instructions.  */
 671    pinsn = (uint16_t *)pc;
 672    switch (pinsn[0] >> 8) {
 673    case 0x50: /* ST */
 674    case 0x42: /* STC */
 675    case 0x40: /* STH */
 676        is_write = 1;
 677        break;
 678    case 0xc4: /* RIL format insns */
 679        switch (pinsn[0] & 0xf) {
 680        case 0xf: /* STRL */
 681        case 0xb: /* STGRL */
 682        case 0x7: /* STHRL */
 683            is_write = 1;
 684        }
 685        break;
 686    case 0xe3: /* RXY format insns */
 687        switch (pinsn[2] & 0xff) {
 688        case 0x50: /* STY */
 689        case 0x24: /* STG */
 690        case 0x72: /* STCY */
 691        case 0x70: /* STHY */
 692        case 0x8e: /* STPQ */
 693        case 0x3f: /* STRVH */
 694        case 0x3e: /* STRV */
 695        case 0x2f: /* STRVG */
 696            is_write = 1;
 697        }
 698        break;
 699    }
 700    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 701}
 702
 703#elif defined(__mips__)
 704
 705#if defined(__misp16) || defined(__mips_micromips)
 706#error "Unsupported encoding"
 707#endif
 708
 709int cpu_signal_handler(int host_signum, void *pinfo,
 710                       void *puc)
 711{
 712    siginfo_t *info = pinfo;
 713    ucontext_t *uc = puc;
 714    uintptr_t pc = uc->uc_mcontext.pc;
 715    uint32_t insn = *(uint32_t *)pc;
 716    int is_write = 0;
 717
 718    /* Detect all store instructions at program counter. */
 719    switch((insn >> 26) & 077) {
 720    case 050: /* SB */
 721    case 051: /* SH */
 722    case 052: /* SWL */
 723    case 053: /* SW */
 724    case 054: /* SDL */
 725    case 055: /* SDR */
 726    case 056: /* SWR */
 727    case 070: /* SC */
 728    case 071: /* SWC1 */
 729    case 074: /* SCD */
 730    case 075: /* SDC1 */
 731    case 077: /* SD */
 732#if !defined(__mips_isa_rev) || __mips_isa_rev < 6
 733    case 072: /* SWC2 */
 734    case 076: /* SDC2 */
 735#endif
 736        is_write = 1;
 737        break;
 738    case 023: /* COP1X */
 739        /* Required in all versions of MIPS64 since
 740           MIPS64r1 and subsequent versions of MIPS32r2. */
 741        switch (insn & 077) {
 742        case 010: /* SWXC1 */
 743        case 011: /* SDXC1 */
 744        case 015: /* SUXC1 */
 745            is_write = 1;
 746        }
 747        break;
 748    }
 749
 750    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 751}
 752
 753#elif defined(__riscv)
 754
 755int cpu_signal_handler(int host_signum, void *pinfo,
 756                       void *puc)
 757{
 758    siginfo_t *info = pinfo;
 759    ucontext_t *uc = puc;
 760    greg_t pc = uc->uc_mcontext.__gregs[REG_PC];
 761    uint32_t insn = *(uint32_t *)pc;
 762    int is_write = 0;
 763
 764    /* Detect store by reading the instruction at the program
 765       counter. Note: we currently only generate 32-bit
 766       instructions so we thus only detect 32-bit stores */
 767    switch (((insn >> 0) & 0b11)) {
 768    case 3:
 769        switch (((insn >> 2) & 0b11111)) {
 770        case 8:
 771            switch (((insn >> 12) & 0b111)) {
 772            case 0: /* sb */
 773            case 1: /* sh */
 774            case 2: /* sw */
 775            case 3: /* sd */
 776            case 4: /* sq */
 777                is_write = 1;
 778                break;
 779            default:
 780                break;
 781            }
 782            break;
 783        case 9:
 784            switch (((insn >> 12) & 0b111)) {
 785            case 2: /* fsw */
 786            case 3: /* fsd */
 787            case 4: /* fsq */
 788                is_write = 1;
 789                break;
 790            default:
 791                break;
 792            }
 793            break;
 794        default:
 795            break;
 796        }
 797    }
 798
 799    /* Check for compressed instructions */
 800    switch (((insn >> 13) & 0b111)) {
 801    case 7:
 802        switch (insn & 0b11) {
 803        case 0: /*c.sd */
 804        case 2: /* c.sdsp */
 805            is_write = 1;
 806            break;
 807        default:
 808            break;
 809        }
 810        break;
 811    case 6:
 812        switch (insn & 0b11) {
 813        case 0: /* c.sw */
 814        case 3: /* c.swsp */
 815            is_write = 1;
 816            break;
 817        default:
 818            break;
 819        }
 820        break;
 821    default:
 822        break;
 823    }
 824
 825    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 826}
 827
 828#else
 829
 830#error host CPU specific signal handler needed
 831
 832#endif
 833
 834/* The softmmu versions of these helpers are in cputlb.c.  */
 835
 836uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
 837{
 838    uint32_t ret;
 839    uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
 840
 841    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 842    ret = ldub_p(g2h(ptr));
 843    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 844    return ret;
 845}
 846
 847int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
 848{
 849    int ret;
 850    uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
 851
 852    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 853    ret = ldsb_p(g2h(ptr));
 854    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 855    return ret;
 856}
 857
 858uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
 859{
 860    uint32_t ret;
 861    uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false);
 862
 863    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 864    ret = lduw_be_p(g2h(ptr));
 865    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 866    return ret;
 867}
 868
 869int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
 870{
 871    int ret;
 872    uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false);
 873
 874    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 875    ret = ldsw_be_p(g2h(ptr));
 876    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 877    return ret;
 878}
 879
 880uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
 881{
 882    uint32_t ret;
 883    uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false);
 884
 885    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 886    ret = ldl_be_p(g2h(ptr));
 887    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 888    return ret;
 889}
 890
 891uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
 892{
 893    uint64_t ret;
 894    uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false);
 895
 896    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 897    ret = ldq_be_p(g2h(ptr));
 898    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 899    return ret;
 900}
 901
 902uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
 903{
 904    uint32_t ret;
 905    uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false);
 906
 907    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 908    ret = lduw_le_p(g2h(ptr));
 909    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 910    return ret;
 911}
 912
 913int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
 914{
 915    int ret;
 916    uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false);
 917
 918    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 919    ret = ldsw_le_p(g2h(ptr));
 920    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 921    return ret;
 922}
 923
 924uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
 925{
 926    uint32_t ret;
 927    uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false);
 928
 929    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 930    ret = ldl_le_p(g2h(ptr));
 931    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 932    return ret;
 933}
 934
 935uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
 936{
 937    uint64_t ret;
 938    uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false);
 939
 940    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 941    ret = ldq_le_p(g2h(ptr));
 942    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 943    return ret;
 944}
 945
 946uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 947{
 948    uint32_t ret;
 949
 950    set_helper_retaddr(retaddr);
 951    ret = cpu_ldub_data(env, ptr);
 952    clear_helper_retaddr();
 953    return ret;
 954}
 955
 956int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 957{
 958    int ret;
 959
 960    set_helper_retaddr(retaddr);
 961    ret = cpu_ldsb_data(env, ptr);
 962    clear_helper_retaddr();
 963    return ret;
 964}
 965
 966uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 967{
 968    uint32_t ret;
 969
 970    set_helper_retaddr(retaddr);
 971    ret = cpu_lduw_be_data(env, ptr);
 972    clear_helper_retaddr();
 973    return ret;
 974}
 975
 976int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 977{
 978    int ret;
 979
 980    set_helper_retaddr(retaddr);
 981    ret = cpu_ldsw_be_data(env, ptr);
 982    clear_helper_retaddr();
 983    return ret;
 984}
 985
 986uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 987{
 988    uint32_t ret;
 989
 990    set_helper_retaddr(retaddr);
 991    ret = cpu_ldl_be_data(env, ptr);
 992    clear_helper_retaddr();
 993    return ret;
 994}
 995
 996uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 997{
 998    uint64_t ret;
 999
1000    set_helper_retaddr(retaddr);
1001    ret = cpu_ldq_be_data(env, ptr);
1002    clear_helper_retaddr();
1003    return ret;
1004}
1005
1006uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1007{
1008    uint32_t ret;
1009
1010    set_helper_retaddr(retaddr);
1011    ret = cpu_lduw_le_data(env, ptr);
1012    clear_helper_retaddr();
1013    return ret;
1014}
1015
1016int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1017{
1018    int ret;
1019
1020    set_helper_retaddr(retaddr);
1021    ret = cpu_ldsw_le_data(env, ptr);
1022    clear_helper_retaddr();
1023    return ret;
1024}
1025
1026uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1027{
1028    uint32_t ret;
1029
1030    set_helper_retaddr(retaddr);
1031    ret = cpu_ldl_le_data(env, ptr);
1032    clear_helper_retaddr();
1033    return ret;
1034}
1035
1036uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1037{
1038    uint64_t ret;
1039
1040    set_helper_retaddr(retaddr);
1041    ret = cpu_ldq_le_data(env, ptr);
1042    clear_helper_retaddr();
1043    return ret;
1044}
1045
1046void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1047{
1048    uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
1049
1050    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1051    stb_p(g2h(ptr), val);
1052    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1053}
1054
1055void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1056{
1057    uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true);
1058
1059    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1060    stw_be_p(g2h(ptr), val);
1061    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1062}
1063
1064void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1065{
1066    uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true);
1067
1068    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1069    stl_be_p(g2h(ptr), val);
1070    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1071}
1072
1073void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1074{
1075    uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true);
1076
1077    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1078    stq_be_p(g2h(ptr), val);
1079    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1080}
1081
1082void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1083{
1084    uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true);
1085
1086    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1087    stw_le_p(g2h(ptr), val);
1088    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1089}
1090
1091void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1092{
1093    uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true);
1094
1095    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1096    stl_le_p(g2h(ptr), val);
1097    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1098}
1099
1100void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1101{
1102    uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true);
1103
1104    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1105    stq_le_p(g2h(ptr), val);
1106    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1107}
1108
1109void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
1110                     uint32_t val, uintptr_t retaddr)
1111{
1112    set_helper_retaddr(retaddr);
1113    cpu_stb_data(env, ptr, val);
1114    clear_helper_retaddr();
1115}
1116
1117void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
1118                        uint32_t val, uintptr_t retaddr)
1119{
1120    set_helper_retaddr(retaddr);
1121    cpu_stw_be_data(env, ptr, val);
1122    clear_helper_retaddr();
1123}
1124
1125void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
1126                        uint32_t val, uintptr_t retaddr)
1127{
1128    set_helper_retaddr(retaddr);
1129    cpu_stl_be_data(env, ptr, val);
1130    clear_helper_retaddr();
1131}
1132
1133void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
1134                        uint64_t val, uintptr_t retaddr)
1135{
1136    set_helper_retaddr(retaddr);
1137    cpu_stq_be_data(env, ptr, val);
1138    clear_helper_retaddr();
1139}
1140
1141void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
1142                        uint32_t val, uintptr_t retaddr)
1143{
1144    set_helper_retaddr(retaddr);
1145    cpu_stw_le_data(env, ptr, val);
1146    clear_helper_retaddr();
1147}
1148
1149void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
1150                        uint32_t val, uintptr_t retaddr)
1151{
1152    set_helper_retaddr(retaddr);
1153    cpu_stl_le_data(env, ptr, val);
1154    clear_helper_retaddr();
1155}
1156
1157void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
1158                        uint64_t val, uintptr_t retaddr)
1159{
1160    set_helper_retaddr(retaddr);
1161    cpu_stq_le_data(env, ptr, val);
1162    clear_helper_retaddr();
1163}
1164
1165uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1166{
1167    uint32_t ret;
1168
1169    set_helper_retaddr(1);
1170    ret = ldub_p(g2h(ptr));
1171    clear_helper_retaddr();
1172    return ret;
1173}
1174
1175uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1176{
1177    uint32_t ret;
1178
1179    set_helper_retaddr(1);
1180    ret = lduw_p(g2h(ptr));
1181    clear_helper_retaddr();
1182    return ret;
1183}
1184
1185uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1186{
1187    uint32_t ret;
1188
1189    set_helper_retaddr(1);
1190    ret = ldl_p(g2h(ptr));
1191    clear_helper_retaddr();
1192    return ret;
1193}
1194
1195uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1196{
1197    uint64_t ret;
1198
1199    set_helper_retaddr(1);
1200    ret = ldq_p(g2h(ptr));
1201    clear_helper_retaddr();
1202    return ret;
1203}
1204
1205/* Do not allow unaligned operations to proceed.  Return the host address.  */
1206static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1207                               int size, uintptr_t retaddr)
1208{
1209    /* Enforce qemu required alignment.  */
1210    if (unlikely(addr & (size - 1))) {
1211        cpu_loop_exit_atomic(env_cpu(env), retaddr);
1212    }
1213    void *ret = g2h(addr);
1214    set_helper_retaddr(retaddr);
1215    return ret;
1216}
1217
1218/* Macro to call the above, with local variables from the use context.  */
1219#define ATOMIC_MMU_DECLS do {} while (0)
1220#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
1221#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1222#define ATOMIC_MMU_IDX MMU_USER_IDX
1223
1224#define ATOMIC_NAME(X)   HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1225#define EXTRA_ARGS
1226
1227#include "atomic_common.c.inc"
1228
1229#define DATA_SIZE 1
1230#include "atomic_template.h"
1231
1232#define DATA_SIZE 2
1233#include "atomic_template.h"
1234
1235#define DATA_SIZE 4
1236#include "atomic_template.h"
1237
1238#ifdef CONFIG_ATOMIC64
1239#define DATA_SIZE 8
1240#include "atomic_template.h"
1241#endif
1242
1243/* The following is only callable from other helpers, and matches up
1244   with the softmmu version.  */
1245
1246#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1247
1248#undef EXTRA_ARGS
1249#undef ATOMIC_NAME
1250#undef ATOMIC_MMU_LOOKUP
1251
1252#define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1253#define ATOMIC_NAME(X) \
1254    HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1255#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
1256
1257#define DATA_SIZE 16
1258#include "atomic_template.h"
1259#endif
1260