qemu/accel/tcg/user-exec.c
<<
>>
Prefs
   1/*
   2 *  User emulator execution
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "hw/core/tcg-cpu-ops.h"
  21#include "disas/disas.h"
  22#include "exec/exec-all.h"
  23#include "tcg/tcg.h"
  24#include "qemu/bitops.h"
  25#include "exec/cpu_ldst.h"
  26#include "exec/translate-all.h"
  27#include "exec/helper-proto.h"
  28#include "qemu/atomic128.h"
  29#include "trace/trace-root.h"
  30#include "trace/mem.h"
  31
  32#undef EAX
  33#undef ECX
  34#undef EDX
  35#undef EBX
  36#undef ESP
  37#undef EBP
  38#undef ESI
  39#undef EDI
  40#undef EIP
  41#ifdef __linux__
  42#include <sys/ucontext.h>
  43#endif
  44
  45__thread uintptr_t helper_retaddr;
  46
  47//#define DEBUG_SIGNAL
  48
  49/* exit the current TB from a signal handler. The host registers are
  50   restored in a state compatible with the CPU emulator
  51 */
  52static void QEMU_NORETURN cpu_exit_tb_from_sighandler(CPUState *cpu,
  53                                                      sigset_t *old_set)
  54{
  55    /* XXX: use siglongjmp ? */
  56    sigprocmask(SIG_SETMASK, old_set, NULL);
  57    cpu_loop_exit_noexc(cpu);
  58}
  59
  60/* 'pc' is the host PC at which the exception was raised. 'address' is
  61   the effective address of the memory exception. 'is_write' is 1 if a
  62   write caused the exception and otherwise 0'. 'old_set' is the
  63   signal set which should be restored */
  64static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
  65                                    int is_write, sigset_t *old_set)
  66{
  67    CPUState *cpu = current_cpu;
  68    CPUClass *cc;
  69    unsigned long address = (unsigned long)info->si_addr;
  70    MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
  71
  72    switch (helper_retaddr) {
  73    default:
  74        /*
  75         * Fault during host memory operation within a helper function.
  76         * The helper's host return address, saved here, gives us a
  77         * pointer into the generated code that will unwind to the
  78         * correct guest pc.
  79         */
  80        pc = helper_retaddr;
  81        break;
  82
  83    case 0:
  84        /*
  85         * Fault during host memory operation within generated code.
  86         * (Or, a unrelated bug within qemu, but we can't tell from here).
  87         *
  88         * We take the host pc from the signal frame.  However, we cannot
  89         * use that value directly.  Within cpu_restore_state_from_tb, we
  90         * assume PC comes from GETPC(), as used by the helper functions,
  91         * so we adjust the address by -GETPC_ADJ to form an address that
  92         * is within the call insn, so that the address does not accidentally
  93         * match the beginning of the next guest insn.  However, when the
  94         * pc comes from the signal frame it points to the actual faulting
  95         * host memory insn and not the return from a call insn.
  96         *
  97         * Therefore, adjust to compensate for what will be done later
  98         * by cpu_restore_state_from_tb.
  99         */
 100        pc += GETPC_ADJ;
 101        break;
 102
 103    case 1:
 104        /*
 105         * Fault during host read for translation, or loosely, "execution".
 106         *
 107         * The guest pc is already pointing to the start of the TB for which
 108         * code is being generated.  If the guest translator manages the
 109         * page crossings correctly, this is exactly the correct address
 110         * (and if the translator doesn't handle page boundaries correctly
 111         * there's little we can do about that here).  Therefore, do not
 112         * trigger the unwinder.
 113         *
 114         * Like tb_gen_code, release the memory lock before cpu_loop_exit.
 115         */
 116        pc = 0;
 117        access_type = MMU_INST_FETCH;
 118        mmap_unlock();
 119        break;
 120    }
 121
 122    /* For synchronous signals we expect to be coming from the vCPU
 123     * thread (so current_cpu should be valid) and either from running
 124     * code or during translation which can fault as we cross pages.
 125     *
 126     * If neither is true then something has gone wrong and we should
 127     * abort rather than try and restart the vCPU execution.
 128     */
 129    if (!cpu || !cpu->running) {
 130        printf("qemu:%s received signal outside vCPU context @ pc=0x%"
 131               PRIxPTR "\n",  __func__, pc);
 132        abort();
 133    }
 134
 135#if defined(DEBUG_SIGNAL)
 136    printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
 137           pc, address, is_write, *(unsigned long *)old_set);
 138#endif
 139    /* XXX: locking issue */
 140    /* Note that it is important that we don't call page_unprotect() unless
 141     * this is really a "write to nonwriteable page" fault, because
 142     * page_unprotect() assumes that if it is called for an access to
 143     * a page that's writeable this means we had two threads racing and
 144     * another thread got there first and already made the page writeable;
 145     * so we will retry the access. If we were to call page_unprotect()
 146     * for some other kind of fault that should really be passed to the
 147     * guest, we'd end up in an infinite loop of retrying the faulting
 148     * access.
 149     */
 150    if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR &&
 151        h2g_valid(address)) {
 152        switch (page_unprotect(h2g(address), pc)) {
 153        case 0:
 154            /* Fault not caused by a page marked unwritable to protect
 155             * cached translations, must be the guest binary's problem.
 156             */
 157            break;
 158        case 1:
 159            /* Fault caused by protection of cached translation; TBs
 160             * invalidated, so resume execution.  Retain helper_retaddr
 161             * for a possible second fault.
 162             */
 163            return 1;
 164        case 2:
 165            /* Fault caused by protection of cached translation, and the
 166             * currently executing TB was modified and must be exited
 167             * immediately.  Clear helper_retaddr for next execution.
 168             */
 169            clear_helper_retaddr();
 170            cpu_exit_tb_from_sighandler(cpu, old_set);
 171            /* NORETURN */
 172
 173        default:
 174            g_assert_not_reached();
 175        }
 176    }
 177
 178    /* Convert forcefully to guest address space, invalid addresses
 179       are still valid segv ones */
 180    address = h2g_nocheck(address);
 181
 182    /*
 183     * There is no way the target can handle this other than raising
 184     * an exception.  Undo signal and retaddr state prior to longjmp.
 185     */
 186    sigprocmask(SIG_SETMASK, old_set, NULL);
 187    clear_helper_retaddr();
 188
 189    cc = CPU_GET_CLASS(cpu);
 190    cc->tcg_ops->tlb_fill(cpu, address, 0, access_type,
 191                          MMU_USER_IDX, false, pc);
 192    g_assert_not_reached();
 193}
 194
 195static int probe_access_internal(CPUArchState *env, target_ulong addr,
 196                                 int fault_size, MMUAccessType access_type,
 197                                 bool nonfault, uintptr_t ra)
 198{
 199    int flags;
 200
 201    switch (access_type) {
 202    case MMU_DATA_STORE:
 203        flags = PAGE_WRITE;
 204        break;
 205    case MMU_DATA_LOAD:
 206        flags = PAGE_READ;
 207        break;
 208    case MMU_INST_FETCH:
 209        flags = PAGE_EXEC;
 210        break;
 211    default:
 212        g_assert_not_reached();
 213    }
 214
 215    if (!guest_addr_valid_untagged(addr) ||
 216        page_check_range(addr, 1, flags) < 0) {
 217        if (nonfault) {
 218            return TLB_INVALID_MASK;
 219        } else {
 220            CPUState *cpu = env_cpu(env);
 221            CPUClass *cc = CPU_GET_CLASS(cpu);
 222            cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
 223                                  MMU_USER_IDX, false, ra);
 224            g_assert_not_reached();
 225        }
 226    }
 227    return 0;
 228}
 229
 230int probe_access_flags(CPUArchState *env, target_ulong addr,
 231                       MMUAccessType access_type, int mmu_idx,
 232                       bool nonfault, void **phost, uintptr_t ra)
 233{
 234    int flags;
 235
 236    flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
 237    *phost = flags ? NULL : g2h(env_cpu(env), addr);
 238    return flags;
 239}
 240
 241void *probe_access(CPUArchState *env, target_ulong addr, int size,
 242                   MMUAccessType access_type, int mmu_idx, uintptr_t ra)
 243{
 244    int flags;
 245
 246    g_assert(-(addr | TARGET_PAGE_MASK) >= size);
 247    flags = probe_access_internal(env, addr, size, access_type, false, ra);
 248    g_assert(flags == 0);
 249
 250    return size ? g2h(env_cpu(env), addr) : NULL;
 251}
 252
 253#if defined(__i386__)
 254
 255#if defined(__NetBSD__)
 256#include <ucontext.h>
 257#include <machine/trap.h>
 258
 259#define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
 260#define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 261#define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
 262#define MASK_sig(context)    ((context)->uc_sigmask)
 263#define PAGE_FAULT_TRAP      T_PAGEFLT
 264#elif defined(__FreeBSD__) || defined(__DragonFly__)
 265#include <ucontext.h>
 266#include <machine/trap.h>
 267
 268#define EIP_sig(context)  (*((unsigned long *)&(context)->uc_mcontext.mc_eip))
 269#define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
 270#define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
 271#define MASK_sig(context)    ((context)->uc_sigmask)
 272#define PAGE_FAULT_TRAP      T_PAGEFLT
 273#elif defined(__OpenBSD__)
 274#include <machine/trap.h>
 275#define EIP_sig(context)     ((context)->sc_eip)
 276#define TRAP_sig(context)    ((context)->sc_trapno)
 277#define ERROR_sig(context)   ((context)->sc_err)
 278#define MASK_sig(context)    ((context)->sc_mask)
 279#define PAGE_FAULT_TRAP      T_PAGEFLT
 280#else
 281#define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
 282#define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
 283#define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
 284#define MASK_sig(context)    ((context)->uc_sigmask)
 285#define PAGE_FAULT_TRAP      0xe
 286#endif
 287
 288int cpu_signal_handler(int host_signum, void *pinfo,
 289                       void *puc)
 290{
 291    siginfo_t *info = pinfo;
 292#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
 293    ucontext_t *uc = puc;
 294#elif defined(__OpenBSD__)
 295    struct sigcontext *uc = puc;
 296#else
 297    ucontext_t *uc = puc;
 298#endif
 299    unsigned long pc;
 300    int trapno;
 301
 302#ifndef REG_EIP
 303/* for glibc 2.1 */
 304#define REG_EIP    EIP
 305#define REG_ERR    ERR
 306#define REG_TRAPNO TRAPNO
 307#endif
 308    pc = EIP_sig(uc);
 309    trapno = TRAP_sig(uc);
 310    return handle_cpu_signal(pc, info,
 311                             trapno == PAGE_FAULT_TRAP ?
 312                             (ERROR_sig(uc) >> 1) & 1 : 0,
 313                             &MASK_sig(uc));
 314}
 315
 316#elif defined(__x86_64__)
 317
 318#ifdef __NetBSD__
 319#include <machine/trap.h>
 320#define PC_sig(context)       _UC_MACHINE_PC(context)
 321#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
 322#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
 323#define MASK_sig(context)     ((context)->uc_sigmask)
 324#define PAGE_FAULT_TRAP       T_PAGEFLT
 325#elif defined(__OpenBSD__)
 326#include <machine/trap.h>
 327#define PC_sig(context)       ((context)->sc_rip)
 328#define TRAP_sig(context)     ((context)->sc_trapno)
 329#define ERROR_sig(context)    ((context)->sc_err)
 330#define MASK_sig(context)     ((context)->sc_mask)
 331#define PAGE_FAULT_TRAP       T_PAGEFLT
 332#elif defined(__FreeBSD__) || defined(__DragonFly__)
 333#include <ucontext.h>
 334#include <machine/trap.h>
 335
 336#define PC_sig(context)  (*((unsigned long *)&(context)->uc_mcontext.mc_rip))
 337#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
 338#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
 339#define MASK_sig(context)     ((context)->uc_sigmask)
 340#define PAGE_FAULT_TRAP       T_PAGEFLT
 341#else
 342#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
 343#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
 344#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
 345#define MASK_sig(context)     ((context)->uc_sigmask)
 346#define PAGE_FAULT_TRAP       0xe
 347#endif
 348
 349int cpu_signal_handler(int host_signum, void *pinfo,
 350                       void *puc)
 351{
 352    siginfo_t *info = pinfo;
 353    unsigned long pc;
 354#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__)
 355    ucontext_t *uc = puc;
 356#elif defined(__OpenBSD__)
 357    struct sigcontext *uc = puc;
 358#else
 359    ucontext_t *uc = puc;
 360#endif
 361
 362    pc = PC_sig(uc);
 363    return handle_cpu_signal(pc, info,
 364                             TRAP_sig(uc) == PAGE_FAULT_TRAP ?
 365                             (ERROR_sig(uc) >> 1) & 1 : 0,
 366                             &MASK_sig(uc));
 367}
 368
 369#elif defined(_ARCH_PPC)
 370
 371/***********************************************************************
 372 * signal context platform-specific definitions
 373 * From Wine
 374 */
 375#ifdef linux
 376/* All Registers access - only for local access */
 377#define REG_sig(reg_name, context)              \
 378    ((context)->uc_mcontext.regs->reg_name)
 379/* Gpr Registers access  */
 380#define GPR_sig(reg_num, context)              REG_sig(gpr[reg_num], context)
 381/* Program counter */
 382#define IAR_sig(context)                       REG_sig(nip, context)
 383/* Machine State Register (Supervisor) */
 384#define MSR_sig(context)                       REG_sig(msr, context)
 385/* Count register */
 386#define CTR_sig(context)                       REG_sig(ctr, context)
 387/* User's integer exception register */
 388#define XER_sig(context)                       REG_sig(xer, context)
 389/* Link register */
 390#define LR_sig(context)                        REG_sig(link, context)
 391/* Condition register */
 392#define CR_sig(context)                        REG_sig(ccr, context)
 393
 394/* Float Registers access  */
 395#define FLOAT_sig(reg_num, context)                                     \
 396    (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num])
 397#define FPSCR_sig(context) \
 398    (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4)))
 399/* Exception Registers access */
 400#define DAR_sig(context)                       REG_sig(dar, context)
 401#define DSISR_sig(context)                     REG_sig(dsisr, context)
 402#define TRAP_sig(context)                      REG_sig(trap, context)
 403#endif /* linux */
 404
 405#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 406#include <ucontext.h>
 407#define IAR_sig(context)               ((context)->uc_mcontext.mc_srr0)
 408#define MSR_sig(context)               ((context)->uc_mcontext.mc_srr1)
 409#define CTR_sig(context)               ((context)->uc_mcontext.mc_ctr)
 410#define XER_sig(context)               ((context)->uc_mcontext.mc_xer)
 411#define LR_sig(context)                ((context)->uc_mcontext.mc_lr)
 412#define CR_sig(context)                ((context)->uc_mcontext.mc_cr)
 413/* Exception Registers access */
 414#define DAR_sig(context)               ((context)->uc_mcontext.mc_dar)
 415#define DSISR_sig(context)             ((context)->uc_mcontext.mc_dsisr)
 416#define TRAP_sig(context)              ((context)->uc_mcontext.mc_exc)
 417#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
 418
 419int cpu_signal_handler(int host_signum, void *pinfo,
 420                       void *puc)
 421{
 422    siginfo_t *info = pinfo;
 423#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
 424    ucontext_t *uc = puc;
 425#else
 426    ucontext_t *uc = puc;
 427#endif
 428    unsigned long pc;
 429    int is_write;
 430
 431    pc = IAR_sig(uc);
 432    is_write = 0;
 433#if 0
 434    /* ppc 4xx case */
 435    if (DSISR_sig(uc) & 0x00800000) {
 436        is_write = 1;
 437    }
 438#else
 439    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) {
 440        is_write = 1;
 441    }
 442#endif
 443    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 444}
 445
 446#elif defined(__alpha__)
 447
 448int cpu_signal_handler(int host_signum, void *pinfo,
 449                           void *puc)
 450{
 451    siginfo_t *info = pinfo;
 452    ucontext_t *uc = puc;
 453    uint32_t *pc = uc->uc_mcontext.sc_pc;
 454    uint32_t insn = *pc;
 455    int is_write = 0;
 456
 457    /* XXX: need kernel patch to get write flag faster */
 458    switch (insn >> 26) {
 459    case 0x0d: /* stw */
 460    case 0x0e: /* stb */
 461    case 0x0f: /* stq_u */
 462    case 0x24: /* stf */
 463    case 0x25: /* stg */
 464    case 0x26: /* sts */
 465    case 0x27: /* stt */
 466    case 0x2c: /* stl */
 467    case 0x2d: /* stq */
 468    case 0x2e: /* stl_c */
 469    case 0x2f: /* stq_c */
 470        is_write = 1;
 471    }
 472
 473    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 474}
 475#elif defined(__sparc__)
 476
 477int cpu_signal_handler(int host_signum, void *pinfo,
 478                       void *puc)
 479{
 480    siginfo_t *info = pinfo;
 481    int is_write;
 482    uint32_t insn;
 483#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
 484    uint32_t *regs = (uint32_t *)(info + 1);
 485    void *sigmask = (regs + 20);
 486    /* XXX: is there a standard glibc define ? */
 487    unsigned long pc = regs[1];
 488#else
 489#ifdef __linux__
 490    struct sigcontext *sc = puc;
 491    unsigned long pc = sc->sigc_regs.tpc;
 492    void *sigmask = (void *)sc->sigc_mask;
 493#elif defined(__OpenBSD__)
 494    struct sigcontext *uc = puc;
 495    unsigned long pc = uc->sc_pc;
 496    void *sigmask = (void *)(long)uc->sc_mask;
 497#elif defined(__NetBSD__)
 498    ucontext_t *uc = puc;
 499    unsigned long pc = _UC_MACHINE_PC(uc);
 500    void *sigmask = (void *)&uc->uc_sigmask;
 501#endif
 502#endif
 503
 504    /* XXX: need kernel patch to get write flag faster */
 505    is_write = 0;
 506    insn = *(uint32_t *)pc;
 507    if ((insn >> 30) == 3) {
 508        switch ((insn >> 19) & 0x3f) {
 509        case 0x05: /* stb */
 510        case 0x15: /* stba */
 511        case 0x06: /* sth */
 512        case 0x16: /* stha */
 513        case 0x04: /* st */
 514        case 0x14: /* sta */
 515        case 0x07: /* std */
 516        case 0x17: /* stda */
 517        case 0x0e: /* stx */
 518        case 0x1e: /* stxa */
 519        case 0x24: /* stf */
 520        case 0x34: /* stfa */
 521        case 0x27: /* stdf */
 522        case 0x37: /* stdfa */
 523        case 0x26: /* stqf */
 524        case 0x36: /* stqfa */
 525        case 0x25: /* stfsr */
 526        case 0x3c: /* casa */
 527        case 0x3e: /* casxa */
 528            is_write = 1;
 529            break;
 530        }
 531    }
 532    return handle_cpu_signal(pc, info, is_write, sigmask);
 533}
 534
 535#elif defined(__arm__)
 536
 537#if defined(__NetBSD__)
 538#include <ucontext.h>
 539#include <sys/siginfo.h>
 540#endif
 541
 542int cpu_signal_handler(int host_signum, void *pinfo,
 543                       void *puc)
 544{
 545    siginfo_t *info = pinfo;
 546#if defined(__NetBSD__)
 547    ucontext_t *uc = puc;
 548    siginfo_t *si = pinfo;
 549#else
 550    ucontext_t *uc = puc;
 551#endif
 552    unsigned long pc;
 553    uint32_t fsr;
 554    int is_write;
 555
 556#if defined(__NetBSD__)
 557    pc = uc->uc_mcontext.__gregs[_REG_R15];
 558#elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
 559    pc = uc->uc_mcontext.gregs[R15];
 560#else
 561    pc = uc->uc_mcontext.arm_pc;
 562#endif
 563
 564#ifdef __NetBSD__
 565    fsr = si->si_trap;
 566#else
 567    fsr = uc->uc_mcontext.error_code;
 568#endif
 569    /*
 570     * In the FSR, bit 11 is WnR, assuming a v6 or
 571     * later processor.  On v5 we will always report
 572     * this as a read, which will fail later.
 573     */
 574    is_write = extract32(fsr, 11, 1);
 575    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 576}
 577
 578#elif defined(__aarch64__)
 579
 580#if defined(__NetBSD__)
 581
 582#include <ucontext.h>
 583#include <sys/siginfo.h>
 584
 585int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
 586{
 587    ucontext_t *uc = puc;
 588    siginfo_t *si = pinfo;
 589    unsigned long pc;
 590    int is_write;
 591    uint32_t esr;
 592
 593    pc = uc->uc_mcontext.__gregs[_REG_PC];
 594    esr = si->si_trap;
 595
 596    /*
 597     * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC
 598     * is 0b10010x: then bit 6 is the WnR bit
 599     */
 600    is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
 601    return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask);
 602}
 603
 604#else
 605
 606#ifndef ESR_MAGIC
 607/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
 608#define ESR_MAGIC 0x45535201
 609struct esr_context {
 610    struct _aarch64_ctx head;
 611    uint64_t esr;
 612};
 613#endif
 614
 615static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc)
 616{
 617    return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved;
 618}
 619
 620static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr)
 621{
 622    return (struct _aarch64_ctx *)((char *)hdr + hdr->size);
 623}
 624
 625int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
 626{
 627    siginfo_t *info = pinfo;
 628    ucontext_t *uc = puc;
 629    uintptr_t pc = uc->uc_mcontext.pc;
 630    bool is_write;
 631    struct _aarch64_ctx *hdr;
 632    struct esr_context const *esrctx = NULL;
 633
 634    /* Find the esr_context, which has the WnR bit in it */
 635    for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) {
 636        if (hdr->magic == ESR_MAGIC) {
 637            esrctx = (struct esr_context const *)hdr;
 638            break;
 639        }
 640    }
 641
 642    if (esrctx) {
 643        /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */
 644        uint64_t esr = esrctx->esr;
 645        is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1;
 646    } else {
 647        /*
 648         * Fall back to parsing instructions; will only be needed
 649         * for really ancient (pre-3.16) kernels.
 650         */
 651        uint32_t insn = *(uint32_t *)pc;
 652
 653        is_write = ((insn & 0xbfff0000) == 0x0c000000   /* C3.3.1 */
 654                    || (insn & 0xbfe00000) == 0x0c800000   /* C3.3.2 */
 655                    || (insn & 0xbfdf0000) == 0x0d000000   /* C3.3.3 */
 656                    || (insn & 0xbfc00000) == 0x0d800000   /* C3.3.4 */
 657                    || (insn & 0x3f400000) == 0x08000000   /* C3.3.6 */
 658                    || (insn & 0x3bc00000) == 0x39000000   /* C3.3.13 */
 659                    || (insn & 0x3fc00000) == 0x3d800000   /* ... 128bit */
 660                    /* Ignore bits 10, 11 & 21, controlling indexing.  */
 661                    || (insn & 0x3bc00000) == 0x38000000   /* C3.3.8-12 */
 662                    || (insn & 0x3fe00000) == 0x3c800000   /* ... 128bit */
 663                    /* Ignore bits 23 & 24, controlling indexing.  */
 664                    || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
 665    }
 666    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 667}
 668#endif
 669
 670#elif defined(__s390__)
 671
 672int cpu_signal_handler(int host_signum, void *pinfo,
 673                       void *puc)
 674{
 675    siginfo_t *info = pinfo;
 676    ucontext_t *uc = puc;
 677    unsigned long pc;
 678    uint16_t *pinsn;
 679    int is_write = 0;
 680
 681    pc = uc->uc_mcontext.psw.addr;
 682
 683    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
 684       of the normal 2 arguments.  The 3rd argument contains the "int_code"
 685       from the hardware which does in fact contain the is_write value.
 686       The rt signal handler, as far as I can tell, does not give this value
 687       at all.  Not that we could get to it from here even if it were.  */
 688    /* ??? This is not even close to complete, since it ignores all
 689       of the read-modify-write instructions.  */
 690    pinsn = (uint16_t *)pc;
 691    switch (pinsn[0] >> 8) {
 692    case 0x50: /* ST */
 693    case 0x42: /* STC */
 694    case 0x40: /* STH */
 695        is_write = 1;
 696        break;
 697    case 0xc4: /* RIL format insns */
 698        switch (pinsn[0] & 0xf) {
 699        case 0xf: /* STRL */
 700        case 0xb: /* STGRL */
 701        case 0x7: /* STHRL */
 702            is_write = 1;
 703        }
 704        break;
 705    case 0xe3: /* RXY format insns */
 706        switch (pinsn[2] & 0xff) {
 707        case 0x50: /* STY */
 708        case 0x24: /* STG */
 709        case 0x72: /* STCY */
 710        case 0x70: /* STHY */
 711        case 0x8e: /* STPQ */
 712        case 0x3f: /* STRVH */
 713        case 0x3e: /* STRV */
 714        case 0x2f: /* STRVG */
 715            is_write = 1;
 716        }
 717        break;
 718    }
 719    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 720}
 721
 722#elif defined(__mips__)
 723
 724#if defined(__misp16) || defined(__mips_micromips)
 725#error "Unsupported encoding"
 726#endif
 727
 728int cpu_signal_handler(int host_signum, void *pinfo,
 729                       void *puc)
 730{
 731    siginfo_t *info = pinfo;
 732    ucontext_t *uc = puc;
 733    uintptr_t pc = uc->uc_mcontext.pc;
 734    uint32_t insn = *(uint32_t *)pc;
 735    int is_write = 0;
 736
 737    /* Detect all store instructions at program counter. */
 738    switch((insn >> 26) & 077) {
 739    case 050: /* SB */
 740    case 051: /* SH */
 741    case 052: /* SWL */
 742    case 053: /* SW */
 743    case 054: /* SDL */
 744    case 055: /* SDR */
 745    case 056: /* SWR */
 746    case 070: /* SC */
 747    case 071: /* SWC1 */
 748    case 074: /* SCD */
 749    case 075: /* SDC1 */
 750    case 077: /* SD */
 751#if !defined(__mips_isa_rev) || __mips_isa_rev < 6
 752    case 072: /* SWC2 */
 753    case 076: /* SDC2 */
 754#endif
 755        is_write = 1;
 756        break;
 757    case 023: /* COP1X */
 758        /* Required in all versions of MIPS64 since
 759           MIPS64r1 and subsequent versions of MIPS32r2. */
 760        switch (insn & 077) {
 761        case 010: /* SWXC1 */
 762        case 011: /* SDXC1 */
 763        case 015: /* SUXC1 */
 764            is_write = 1;
 765        }
 766        break;
 767    }
 768
 769    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 770}
 771
 772#elif defined(__riscv)
 773
 774int cpu_signal_handler(int host_signum, void *pinfo,
 775                       void *puc)
 776{
 777    siginfo_t *info = pinfo;
 778    ucontext_t *uc = puc;
 779    greg_t pc = uc->uc_mcontext.__gregs[REG_PC];
 780    uint32_t insn = *(uint32_t *)pc;
 781    int is_write = 0;
 782
 783    /* Detect store by reading the instruction at the program
 784       counter. Note: we currently only generate 32-bit
 785       instructions so we thus only detect 32-bit stores */
 786    switch (((insn >> 0) & 0b11)) {
 787    case 3:
 788        switch (((insn >> 2) & 0b11111)) {
 789        case 8:
 790            switch (((insn >> 12) & 0b111)) {
 791            case 0: /* sb */
 792            case 1: /* sh */
 793            case 2: /* sw */
 794            case 3: /* sd */
 795            case 4: /* sq */
 796                is_write = 1;
 797                break;
 798            default:
 799                break;
 800            }
 801            break;
 802        case 9:
 803            switch (((insn >> 12) & 0b111)) {
 804            case 2: /* fsw */
 805            case 3: /* fsd */
 806            case 4: /* fsq */
 807                is_write = 1;
 808                break;
 809            default:
 810                break;
 811            }
 812            break;
 813        default:
 814            break;
 815        }
 816    }
 817
 818    /* Check for compressed instructions */
 819    switch (((insn >> 13) & 0b111)) {
 820    case 7:
 821        switch (insn & 0b11) {
 822        case 0: /*c.sd */
 823        case 2: /* c.sdsp */
 824            is_write = 1;
 825            break;
 826        default:
 827            break;
 828        }
 829        break;
 830    case 6:
 831        switch (insn & 0b11) {
 832        case 0: /* c.sw */
 833        case 3: /* c.swsp */
 834            is_write = 1;
 835            break;
 836        default:
 837            break;
 838        }
 839        break;
 840    default:
 841        break;
 842    }
 843
 844    return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
 845}
 846
 847#else
 848
 849#error host CPU specific signal handler needed
 850
 851#endif
 852
 853/* The softmmu versions of these helpers are in cputlb.c.  */
 854
 855uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
 856{
 857    uint32_t ret;
 858    uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
 859
 860    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 861    ret = ldub_p(g2h(env_cpu(env), ptr));
 862    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 863    return ret;
 864}
 865
 866int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
 867{
 868    int ret;
 869    uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
 870
 871    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 872    ret = ldsb_p(g2h(env_cpu(env), ptr));
 873    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 874    return ret;
 875}
 876
 877uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
 878{
 879    uint32_t ret;
 880    uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false);
 881
 882    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 883    ret = lduw_be_p(g2h(env_cpu(env), ptr));
 884    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 885    return ret;
 886}
 887
 888int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
 889{
 890    int ret;
 891    uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false);
 892
 893    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 894    ret = ldsw_be_p(g2h(env_cpu(env), ptr));
 895    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 896    return ret;
 897}
 898
 899uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
 900{
 901    uint32_t ret;
 902    uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false);
 903
 904    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 905    ret = ldl_be_p(g2h(env_cpu(env), ptr));
 906    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 907    return ret;
 908}
 909
 910uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
 911{
 912    uint64_t ret;
 913    uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false);
 914
 915    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 916    ret = ldq_be_p(g2h(env_cpu(env), ptr));
 917    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 918    return ret;
 919}
 920
 921uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
 922{
 923    uint32_t ret;
 924    uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false);
 925
 926    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 927    ret = lduw_le_p(g2h(env_cpu(env), ptr));
 928    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 929    return ret;
 930}
 931
 932int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
 933{
 934    int ret;
 935    uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false);
 936
 937    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 938    ret = ldsw_le_p(g2h(env_cpu(env), ptr));
 939    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 940    return ret;
 941}
 942
 943uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
 944{
 945    uint32_t ret;
 946    uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false);
 947
 948    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 949    ret = ldl_le_p(g2h(env_cpu(env), ptr));
 950    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 951    return ret;
 952}
 953
 954uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
 955{
 956    uint64_t ret;
 957    uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false);
 958
 959    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
 960    ret = ldq_le_p(g2h(env_cpu(env), ptr));
 961    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
 962    return ret;
 963}
 964
 965uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 966{
 967    uint32_t ret;
 968
 969    set_helper_retaddr(retaddr);
 970    ret = cpu_ldub_data(env, ptr);
 971    clear_helper_retaddr();
 972    return ret;
 973}
 974
 975int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 976{
 977    int ret;
 978
 979    set_helper_retaddr(retaddr);
 980    ret = cpu_ldsb_data(env, ptr);
 981    clear_helper_retaddr();
 982    return ret;
 983}
 984
 985uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 986{
 987    uint32_t ret;
 988
 989    set_helper_retaddr(retaddr);
 990    ret = cpu_lduw_be_data(env, ptr);
 991    clear_helper_retaddr();
 992    return ret;
 993}
 994
 995int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
 996{
 997    int ret;
 998
 999    set_helper_retaddr(retaddr);
1000    ret = cpu_ldsw_be_data(env, ptr);
1001    clear_helper_retaddr();
1002    return ret;
1003}
1004
1005uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1006{
1007    uint32_t ret;
1008
1009    set_helper_retaddr(retaddr);
1010    ret = cpu_ldl_be_data(env, ptr);
1011    clear_helper_retaddr();
1012    return ret;
1013}
1014
1015uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1016{
1017    uint64_t ret;
1018
1019    set_helper_retaddr(retaddr);
1020    ret = cpu_ldq_be_data(env, ptr);
1021    clear_helper_retaddr();
1022    return ret;
1023}
1024
1025uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1026{
1027    uint32_t ret;
1028
1029    set_helper_retaddr(retaddr);
1030    ret = cpu_lduw_le_data(env, ptr);
1031    clear_helper_retaddr();
1032    return ret;
1033}
1034
1035int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1036{
1037    int ret;
1038
1039    set_helper_retaddr(retaddr);
1040    ret = cpu_ldsw_le_data(env, ptr);
1041    clear_helper_retaddr();
1042    return ret;
1043}
1044
1045uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1046{
1047    uint32_t ret;
1048
1049    set_helper_retaddr(retaddr);
1050    ret = cpu_ldl_le_data(env, ptr);
1051    clear_helper_retaddr();
1052    return ret;
1053}
1054
1055uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1056{
1057    uint64_t ret;
1058
1059    set_helper_retaddr(retaddr);
1060    ret = cpu_ldq_le_data(env, ptr);
1061    clear_helper_retaddr();
1062    return ret;
1063}
1064
1065void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1066{
1067    uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
1068
1069    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1070    stb_p(g2h(env_cpu(env), ptr), val);
1071    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1072}
1073
1074void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1075{
1076    uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true);
1077
1078    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1079    stw_be_p(g2h(env_cpu(env), ptr), val);
1080    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1081}
1082
1083void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1084{
1085    uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true);
1086
1087    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1088    stl_be_p(g2h(env_cpu(env), ptr), val);
1089    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1090}
1091
1092void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1093{
1094    uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true);
1095
1096    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1097    stq_be_p(g2h(env_cpu(env), ptr), val);
1098    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1099}
1100
1101void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1102{
1103    uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true);
1104
1105    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1106    stw_le_p(g2h(env_cpu(env), ptr), val);
1107    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1108}
1109
1110void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1111{
1112    uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true);
1113
1114    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1115    stl_le_p(g2h(env_cpu(env), ptr), val);
1116    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1117}
1118
1119void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1120{
1121    uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true);
1122
1123    trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
1124    stq_le_p(g2h(env_cpu(env), ptr), val);
1125    qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
1126}
1127
1128void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
1129                     uint32_t val, uintptr_t retaddr)
1130{
1131    set_helper_retaddr(retaddr);
1132    cpu_stb_data(env, ptr, val);
1133    clear_helper_retaddr();
1134}
1135
1136void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
1137                        uint32_t val, uintptr_t retaddr)
1138{
1139    set_helper_retaddr(retaddr);
1140    cpu_stw_be_data(env, ptr, val);
1141    clear_helper_retaddr();
1142}
1143
1144void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
1145                        uint32_t val, uintptr_t retaddr)
1146{
1147    set_helper_retaddr(retaddr);
1148    cpu_stl_be_data(env, ptr, val);
1149    clear_helper_retaddr();
1150}
1151
1152void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
1153                        uint64_t val, uintptr_t retaddr)
1154{
1155    set_helper_retaddr(retaddr);
1156    cpu_stq_be_data(env, ptr, val);
1157    clear_helper_retaddr();
1158}
1159
1160void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
1161                        uint32_t val, uintptr_t retaddr)
1162{
1163    set_helper_retaddr(retaddr);
1164    cpu_stw_le_data(env, ptr, val);
1165    clear_helper_retaddr();
1166}
1167
1168void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
1169                        uint32_t val, uintptr_t retaddr)
1170{
1171    set_helper_retaddr(retaddr);
1172    cpu_stl_le_data(env, ptr, val);
1173    clear_helper_retaddr();
1174}
1175
1176void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
1177                        uint64_t val, uintptr_t retaddr)
1178{
1179    set_helper_retaddr(retaddr);
1180    cpu_stq_le_data(env, ptr, val);
1181    clear_helper_retaddr();
1182}
1183
1184uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1185{
1186    uint32_t ret;
1187
1188    set_helper_retaddr(1);
1189    ret = ldub_p(g2h_untagged(ptr));
1190    clear_helper_retaddr();
1191    return ret;
1192}
1193
1194uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1195{
1196    uint32_t ret;
1197
1198    set_helper_retaddr(1);
1199    ret = lduw_p(g2h_untagged(ptr));
1200    clear_helper_retaddr();
1201    return ret;
1202}
1203
1204uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1205{
1206    uint32_t ret;
1207
1208    set_helper_retaddr(1);
1209    ret = ldl_p(g2h_untagged(ptr));
1210    clear_helper_retaddr();
1211    return ret;
1212}
1213
1214uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1215{
1216    uint64_t ret;
1217
1218    set_helper_retaddr(1);
1219    ret = ldq_p(g2h_untagged(ptr));
1220    clear_helper_retaddr();
1221    return ret;
1222}
1223
1224/*
1225 * Do not allow unaligned operations to proceed.  Return the host address.
1226 *
1227 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1228 */
1229static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1230                               TCGMemOpIdx oi, int size, int prot,
1231                               uintptr_t retaddr)
1232{
1233    /* Enforce qemu required alignment.  */
1234    if (unlikely(addr & (size - 1))) {
1235        cpu_loop_exit_atomic(env_cpu(env), retaddr);
1236    }
1237    void *ret = g2h(env_cpu(env), addr);
1238    set_helper_retaddr(retaddr);
1239    return ret;
1240}
1241
1242#include "atomic_common.c.inc"
1243
1244/*
1245 * First set of functions passes in OI and RETADDR.
1246 * This makes them callable from other helpers.
1247 */
1248
1249#define ATOMIC_NAME(X) \
1250    glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1251#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1252#define ATOMIC_MMU_IDX MMU_USER_IDX
1253
1254#define DATA_SIZE 1
1255#include "atomic_template.h"
1256
1257#define DATA_SIZE 2
1258#include "atomic_template.h"
1259
1260#define DATA_SIZE 4
1261#include "atomic_template.h"
1262
1263#ifdef CONFIG_ATOMIC64
1264#define DATA_SIZE 8
1265#include "atomic_template.h"
1266#endif
1267
1268#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1269#define DATA_SIZE 16
1270#include "atomic_template.h"
1271#endif
1272