qemu/linux-user/qemu.h
<<
>>
Prefs
   1#ifndef QEMU_H
   2#define QEMU_H
   3
   4#include "hostdep.h"
   5#include "cpu.h"
   6#include "exec/exec-all.h"
   7#include "exec/cpu_ldst.h"
   8
   9#undef DEBUG_REMAP
  10#ifdef DEBUG_REMAP
  11#endif /* DEBUG_REMAP */
  12
  13#include "exec/user/abitypes.h"
  14
  15#include "exec/user/thunk.h"
  16#include "syscall_defs.h"
  17#include "target_syscall.h"
  18#include "exec/gdbstub.h"
  19#include "qemu/queue.h"
  20
  21#define THREAD __thread
  22
  23/* This is the size of the host kernel's sigset_t, needed where we make
  24 * direct system calls that take a sigset_t pointer and a size.
  25 */
  26#define SIGSET_T_SIZE (_NSIG / 8)
  27
  28/* This struct is used to hold certain information about the image.
  29 * Basically, it replicates in user space what would be certain
  30 * task_struct fields in the kernel
  31 */
  32struct image_info {
  33        abi_ulong       load_bias;
  34        abi_ulong       load_addr;
  35        abi_ulong       start_code;
  36        abi_ulong       end_code;
  37        abi_ulong       start_data;
  38        abi_ulong       end_data;
  39        abi_ulong       start_brk;
  40        abi_ulong       brk;
  41        abi_ulong       start_mmap;
  42        abi_ulong       start_stack;
  43        abi_ulong       stack_limit;
  44        abi_ulong       entry;
  45        abi_ulong       code_offset;
  46        abi_ulong       data_offset;
  47        abi_ulong       saved_auxv;
  48        abi_ulong       auxv_len;
  49        abi_ulong       arg_start;
  50        abi_ulong       arg_end;
  51        uint32_t        elf_flags;
  52        int             personality;
  53#ifdef CONFIG_USE_FDPIC
  54        abi_ulong       loadmap_addr;
  55        uint16_t        nsegs;
  56        void           *loadsegs;
  57        abi_ulong       pt_dynamic_addr;
  58        struct image_info *other_info;
  59#endif
  60};
  61
  62#ifdef TARGET_I386
  63/* Information about the current linux thread */
  64struct vm86_saved_state {
  65    uint32_t eax; /* return code */
  66    uint32_t ebx;
  67    uint32_t ecx;
  68    uint32_t edx;
  69    uint32_t esi;
  70    uint32_t edi;
  71    uint32_t ebp;
  72    uint32_t esp;
  73    uint32_t eflags;
  74    uint32_t eip;
  75    uint16_t cs, ss, ds, es, fs, gs;
  76};
  77#endif
  78
  79#if defined(TARGET_ARM) && defined(TARGET_ABI32)
  80/* FPU emulator */
  81#include "nwfpe/fpa11.h"
  82#endif
  83
  84#define MAX_SIGQUEUE_SIZE 1024
  85
  86struct emulated_sigtable {
  87    int pending; /* true if signal is pending */
  88    target_siginfo_t info;
  89};
  90
  91/* NOTE: we force a big alignment so that the stack stored after is
  92   aligned too */
  93typedef struct TaskState {
  94    pid_t ts_tid;     /* tid (or pid) of this task */
  95#ifdef TARGET_ARM
  96# ifdef TARGET_ABI32
  97    /* FPA state */
  98    FPA11 fpa;
  99# endif
 100    int swi_errno;
 101#endif
 102#ifdef TARGET_UNICORE32
 103    int swi_errno;
 104#endif
 105#if defined(TARGET_I386) && !defined(TARGET_X86_64)
 106    abi_ulong target_v86;
 107    struct vm86_saved_state vm86_saved_regs;
 108    struct target_vm86plus_struct vm86plus;
 109    uint32_t v86flags;
 110    uint32_t v86mask;
 111#endif
 112    abi_ulong child_tidptr;
 113#ifdef TARGET_M68K
 114    int sim_syscalls;
 115    abi_ulong tp_value;
 116#endif
 117#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
 118    /* Extra fields for semihosted binaries.  */
 119    abi_ulong heap_base;
 120    abi_ulong heap_limit;
 121#endif
 122    abi_ulong stack_base;
 123    int used; /* non zero if used */
 124    struct image_info *info;
 125    struct linux_binprm *bprm;
 126
 127    struct emulated_sigtable sync_signal;
 128    struct emulated_sigtable sigtab[TARGET_NSIG];
 129    /* This thread's signal mask, as requested by the guest program.
 130     * The actual signal mask of this thread may differ:
 131     *  + we don't let SIGSEGV and SIGBUS be blocked while running guest code
 132     *  + sometimes we block all signals to avoid races
 133     */
 134    sigset_t signal_mask;
 135    /* The signal mask imposed by a guest sigsuspend syscall, if we are
 136     * currently in the middle of such a syscall
 137     */
 138    sigset_t sigsuspend_mask;
 139    /* Nonzero if we're leaving a sigsuspend and sigsuspend_mask is valid. */
 140    int in_sigsuspend;
 141
 142    /* Nonzero if process_pending_signals() needs to do something (either
 143     * handle a pending signal or unblock signals).
 144     * This flag is written from a signal handler so should be accessed via
 145     * the atomic_read() and atomic_write() functions. (It is not accessed
 146     * from multiple threads.)
 147     */
 148    int signal_pending;
 149
 150} __attribute__((aligned(16))) TaskState;
 151
 152extern char *exec_path;
 153void init_task_state(TaskState *ts);
 154void task_settid(TaskState *);
 155void stop_all_tasks(void);
 156extern const char *qemu_uname_release;
 157extern unsigned long mmap_min_addr;
 158
 159/* ??? See if we can avoid exposing so much of the loader internals.  */
 160
 161/* Read a good amount of data initially, to hopefully get all the
 162   program headers loaded.  */
 163#define BPRM_BUF_SIZE  1024
 164
 165/*
 166 * This structure is used to hold the arguments that are
 167 * used when loading binaries.
 168 */
 169struct linux_binprm {
 170        char buf[BPRM_BUF_SIZE] __attribute__((aligned));
 171        abi_ulong p;
 172        int fd;
 173        int e_uid, e_gid;
 174        int argc, envc;
 175        char **argv;
 176        char **envp;
 177        char * filename;        /* Name of binary */
 178        int (*core_dump)(int, const CPUArchState *); /* coredump routine */
 179};
 180
 181void do_init_thread(struct target_pt_regs *regs, struct image_info *infop);
 182abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
 183                              abi_ulong stringp, int push_ptr);
 184int loader_exec(int fdexec, const char *filename, char **argv, char **envp,
 185             struct target_pt_regs * regs, struct image_info *infop,
 186             struct linux_binprm *);
 187
 188int load_elf_binary(struct linux_binprm *bprm, struct image_info *info);
 189int load_flt_binary(struct linux_binprm *bprm, struct image_info *info);
 190
 191abi_long memcpy_to_target(abi_ulong dest, const void *src,
 192                          unsigned long len);
 193void target_set_brk(abi_ulong new_brk);
 194abi_long do_brk(abi_ulong new_brk);
 195void syscall_init(void);
 196abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
 197                    abi_long arg2, abi_long arg3, abi_long arg4,
 198                    abi_long arg5, abi_long arg6, abi_long arg7,
 199                    abi_long arg8);
 200void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
 201extern THREAD CPUState *thread_cpu;
 202void cpu_loop(CPUArchState *env);
 203const char *target_strerror(int err);
 204int get_osversion(void);
 205void init_qemu_uname_release(void);
 206void fork_start(void);
 207void fork_end(int child);
 208
 209/* Creates the initial guest address space in the host memory space using
 210 * the given host start address hint and size.  The guest_start parameter
 211 * specifies the start address of the guest space.  guest_base will be the
 212 * difference between the host start address computed by this function and
 213 * guest_start.  If fixed is specified, then the mapped address space must
 214 * start at host_start.  The real start address of the mapped memory space is
 215 * returned or -1 if there was an error.
 216 */
 217unsigned long init_guest_space(unsigned long host_start,
 218                               unsigned long host_size,
 219                               unsigned long guest_start,
 220                               bool fixed);
 221
 222#include "qemu/log.h"
 223
 224/* safe_syscall.S */
 225
 226/**
 227 * safe_syscall:
 228 * @int number: number of system call to make
 229 * ...: arguments to the system call
 230 *
 231 * Call a system call if guest signal not pending.
 232 * This has the same API as the libc syscall() function, except that it
 233 * may return -1 with errno == TARGET_ERESTARTSYS if a signal was pending.
 234 *
 235 * Returns: the system call result, or -1 with an error code in errno
 236 * (Errnos are host errnos; we rely on TARGET_ERESTARTSYS not clashing
 237 * with any of the host errno values.)
 238 */
 239
 240/* A guide to using safe_syscall() to handle interactions between guest
 241 * syscalls and guest signals:
 242 *
 243 * Guest syscalls come in two flavours:
 244 *
 245 * (1) Non-interruptible syscalls
 246 *
 247 * These are guest syscalls that never get interrupted by signals and
 248 * so never return EINTR. They can be implemented straightforwardly in
 249 * QEMU: just make sure that if the implementation code has to make any
 250 * blocking calls that those calls are retried if they return EINTR.
 251 * It's also OK to implement these with safe_syscall, though it will be
 252 * a little less efficient if a signal is delivered at the 'wrong' moment.
 253 *
 254 * Some non-interruptible syscalls need to be handled using block_signals()
 255 * to block signals for the duration of the syscall. This mainly applies
 256 * to code which needs to modify the data structures used by the
 257 * host_signal_handler() function and the functions it calls, including
 258 * all syscalls which change the thread's signal mask.
 259 *
 260 * (2) Interruptible syscalls
 261 *
 262 * These are guest syscalls that can be interrupted by signals and
 263 * for which we need to either return EINTR or arrange for the guest
 264 * syscall to be restarted. This category includes both syscalls which
 265 * always restart (and in the kernel return -ERESTARTNOINTR), ones
 266 * which only restart if there is no handler (kernel returns -ERESTARTNOHAND
 267 * or -ERESTART_RESTARTBLOCK), and the most common kind which restart
 268 * if the handler was registered with SA_RESTART (kernel returns
 269 * -ERESTARTSYS). System calls which are only interruptible in some
 270 * situations (like 'open') also need to be handled this way.
 271 *
 272 * Here it is important that the host syscall is made
 273 * via this safe_syscall() function, and *not* via the host libc.
 274 * If the host libc is used then the implementation will appear to work
 275 * most of the time, but there will be a race condition where a
 276 * signal could arrive just before we make the host syscall inside libc,
 277 * and then then guest syscall will not correctly be interrupted.
 278 * Instead the implementation of the guest syscall can use the safe_syscall
 279 * function but otherwise just return the result or errno in the usual
 280 * way; the main loop code will take care of restarting the syscall
 281 * if appropriate.
 282 *
 283 * (If the implementation needs to make multiple host syscalls this is
 284 * OK; any which might really block must be via safe_syscall(); for those
 285 * which are only technically blocking (ie which we know in practice won't
 286 * stay in the host kernel indefinitely) it's OK to use libc if necessary.
 287 * You must be able to cope with backing out correctly if some safe_syscall
 288 * you make in the implementation returns either -TARGET_ERESTARTSYS or
 289 * EINTR though.)
 290 *
 291 * block_signals() cannot be used for interruptible syscalls.
 292 *
 293 *
 294 * How and why the safe_syscall implementation works:
 295 *
 296 * The basic setup is that we make the host syscall via a known
 297 * section of host native assembly. If a signal occurs, our signal
 298 * handler checks the interrupted host PC against the addresse of that
 299 * known section. If the PC is before or at the address of the syscall
 300 * instruction then we change the PC to point at a "return
 301 * -TARGET_ERESTARTSYS" code path instead, and then exit the signal handler
 302 * (causing the safe_syscall() call to immediately return that value).
 303 * Then in the main.c loop if we see this magic return value we adjust
 304 * the guest PC to wind it back to before the system call, and invoke
 305 * the guest signal handler as usual.
 306 *
 307 * This winding-back will happen in two cases:
 308 * (1) signal came in just before we took the host syscall (a race);
 309 *   in this case we'll take the guest signal and have another go
 310 *   at the syscall afterwards, and this is indistinguishable for the
 311 *   guest from the timing having been different such that the guest
 312 *   signal really did win the race
 313 * (2) signal came in while the host syscall was blocking, and the
 314 *   host kernel decided the syscall should be restarted;
 315 *   in this case we want to restart the guest syscall also, and so
 316 *   rewinding is the right thing. (Note that "restart" semantics mean
 317 *   "first call the signal handler, then reattempt the syscall".)
 318 * The other situation to consider is when a signal came in while the
 319 * host syscall was blocking, and the host kernel decided that the syscall
 320 * should not be restarted; in this case QEMU's host signal handler will
 321 * be invoked with the PC pointing just after the syscall instruction,
 322 * with registers indicating an EINTR return; the special code in the
 323 * handler will not kick in, and we will return EINTR to the guest as
 324 * we should.
 325 *
 326 * Notice that we can leave the host kernel to make the decision for
 327 * us about whether to do a restart of the syscall or not; we do not
 328 * need to check SA_RESTART flags in QEMU or distinguish the various
 329 * kinds of restartability.
 330 */
 331#ifdef HAVE_SAFE_SYSCALL
 332/* The core part of this function is implemented in assembly */
 333extern long safe_syscall_base(int *pending, long number, ...);
 334
 335#define safe_syscall(...)                                               \
 336    ({                                                                  \
 337        long ret_;                                                      \
 338        int *psp_ = &((TaskState *)thread_cpu->opaque)->signal_pending; \
 339        ret_ = safe_syscall_base(psp_, __VA_ARGS__);                    \
 340        if (is_error(ret_)) {                                           \
 341            errno = -ret_;                                              \
 342            ret_ = -1;                                                  \
 343        }                                                               \
 344        ret_;                                                           \
 345    })
 346
 347#else
 348
 349/* Fallback for architectures which don't yet provide a safe-syscall assembly
 350 * fragment; note that this is racy!
 351 * This should go away when all host architectures have been updated.
 352 */
 353#define safe_syscall syscall
 354
 355#endif
 356
 357/* syscall.c */
 358int host_to_target_waitstatus(int status);
 359
 360/* strace.c */
 361void print_syscall(int num,
 362                   abi_long arg1, abi_long arg2, abi_long arg3,
 363                   abi_long arg4, abi_long arg5, abi_long arg6);
 364void print_syscall_ret(int num, abi_long arg1);
 365/**
 366 * print_taken_signal:
 367 * @target_signum: target signal being taken
 368 * @tinfo: target_siginfo_t which will be passed to the guest for the signal
 369 *
 370 * Print strace output indicating that this signal is being taken by the guest,
 371 * in a format similar to:
 372 * --- SIGSEGV {si_signo=SIGSEGV, si_code=SI_KERNEL, si_addr=0} ---
 373 */
 374void print_taken_signal(int target_signum, const target_siginfo_t *tinfo);
 375extern int do_strace;
 376
 377/* signal.c */
 378void process_pending_signals(CPUArchState *cpu_env);
 379void signal_init(void);
 380int queue_signal(CPUArchState *env, int sig, int si_type,
 381                 target_siginfo_t *info);
 382void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
 383void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
 384int target_to_host_signal(int sig);
 385int host_to_target_signal(int sig);
 386long do_sigreturn(CPUArchState *env);
 387long do_rt_sigreturn(CPUArchState *env);
 388abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
 389int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset);
 390/**
 391 * block_signals: block all signals while handling this guest syscall
 392 *
 393 * Block all signals, and arrange that the signal mask is returned to
 394 * its correct value for the guest before we resume execution of guest code.
 395 * If this function returns non-zero, then the caller should immediately
 396 * return -TARGET_ERESTARTSYS to the main loop, which will take the pending
 397 * signal and restart execution of the syscall.
 398 * If block_signals() returns zero, then the caller can continue with
 399 * emulation of the system call knowing that no signals can be taken
 400 * (and therefore that no race conditions will result).
 401 * This should only be called once, because if it is called a second time
 402 * it will always return non-zero. (Think of it like a mutex that can't
 403 * be recursively locked.)
 404 * Signals will be unblocked again by process_pending_signals().
 405 *
 406 * Return value: non-zero if there was a pending signal, zero if not.
 407 */
 408int block_signals(void); /* Returns non zero if signal pending */
 409
 410#ifdef TARGET_I386
 411/* vm86.c */
 412void save_v86_state(CPUX86State *env);
 413void handle_vm86_trap(CPUX86State *env, int trapno);
 414void handle_vm86_fault(CPUX86State *env);
 415int do_vm86(CPUX86State *env, long subfunction, abi_ulong v86_addr);
 416#elif defined(TARGET_SPARC64)
 417void sparc64_set_context(CPUSPARCState *env);
 418void sparc64_get_context(CPUSPARCState *env);
 419#endif
 420
 421/* mmap.c */
 422int target_mprotect(abi_ulong start, abi_ulong len, int prot);
 423abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
 424                     int flags, int fd, abi_ulong offset);
 425int target_munmap(abi_ulong start, abi_ulong len);
 426abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
 427                       abi_ulong new_size, unsigned long flags,
 428                       abi_ulong new_addr);
 429int target_msync(abi_ulong start, abi_ulong len, int flags);
 430extern unsigned long last_brk;
 431extern abi_ulong mmap_next_start;
 432abi_ulong mmap_find_vma(abi_ulong, abi_ulong);
 433void mmap_fork_start(void);
 434void mmap_fork_end(int child);
 435
 436/* main.c */
 437extern unsigned long guest_stack_size;
 438
 439/* user access */
 440
 441#define VERIFY_READ 0
 442#define VERIFY_WRITE 1 /* implies read access */
 443
 444static inline int access_ok(int type, abi_ulong addr, abi_ulong size)
 445{
 446    return page_check_range((target_ulong)addr, size,
 447                            (type == VERIFY_READ) ? PAGE_READ : (PAGE_READ | PAGE_WRITE)) == 0;
 448}
 449
 450/* NOTE __get_user and __put_user use host pointers and don't check access.
 451   These are usually used to access struct data members once the struct has
 452   been locked - usually with lock_user_struct.  */
 453
 454/* Tricky points:
 455   - Use __builtin_choose_expr to avoid type promotion from ?:,
 456   - Invalid sizes result in a compile time error stemming from
 457     the fact that abort has no parameters.
 458   - It's easier to use the endian-specific unaligned load/store
 459     functions than host-endian unaligned load/store plus tswapN.  */
 460
 461#define __put_user_e(x, hptr, e)                                        \
 462  (__builtin_choose_expr(sizeof(*(hptr)) == 1, stb_p,                   \
 463   __builtin_choose_expr(sizeof(*(hptr)) == 2, stw_##e##_p,             \
 464   __builtin_choose_expr(sizeof(*(hptr)) == 4, stl_##e##_p,             \
 465   __builtin_choose_expr(sizeof(*(hptr)) == 8, stq_##e##_p, abort))))   \
 466     ((hptr), (x)), (void)0)
 467
 468#define __get_user_e(x, hptr, e)                                        \
 469  ((x) = (typeof(*hptr))(                                               \
 470   __builtin_choose_expr(sizeof(*(hptr)) == 1, ldub_p,                  \
 471   __builtin_choose_expr(sizeof(*(hptr)) == 2, lduw_##e##_p,            \
 472   __builtin_choose_expr(sizeof(*(hptr)) == 4, ldl_##e##_p,             \
 473   __builtin_choose_expr(sizeof(*(hptr)) == 8, ldq_##e##_p, abort))))   \
 474     (hptr)), (void)0)
 475
 476#ifdef TARGET_WORDS_BIGENDIAN
 477# define __put_user(x, hptr)  __put_user_e(x, hptr, be)
 478# define __get_user(x, hptr)  __get_user_e(x, hptr, be)
 479#else
 480# define __put_user(x, hptr)  __put_user_e(x, hptr, le)
 481# define __get_user(x, hptr)  __get_user_e(x, hptr, le)
 482#endif
 483
 484/* put_user()/get_user() take a guest address and check access */
 485/* These are usually used to access an atomic data type, such as an int,
 486 * that has been passed by address.  These internally perform locking
 487 * and unlocking on the data type.
 488 */
 489#define put_user(x, gaddr, target_type)                                 \
 490({                                                                      \
 491    abi_ulong __gaddr = (gaddr);                                        \
 492    target_type *__hptr;                                                \
 493    abi_long __ret = 0;                                                 \
 494    if ((__hptr = lock_user(VERIFY_WRITE, __gaddr, sizeof(target_type), 0))) { \
 495        __put_user((x), __hptr);                                \
 496        unlock_user(__hptr, __gaddr, sizeof(target_type));              \
 497    } else                                                              \
 498        __ret = -TARGET_EFAULT;                                         \
 499    __ret;                                                              \
 500})
 501
 502#define get_user(x, gaddr, target_type)                                 \
 503({                                                                      \
 504    abi_ulong __gaddr = (gaddr);                                        \
 505    target_type *__hptr;                                                \
 506    abi_long __ret = 0;                                                 \
 507    if ((__hptr = lock_user(VERIFY_READ, __gaddr, sizeof(target_type), 1))) { \
 508        __get_user((x), __hptr);                                \
 509        unlock_user(__hptr, __gaddr, 0);                                \
 510    } else {                                                            \
 511        /* avoid warning */                                             \
 512        (x) = 0;                                                        \
 513        __ret = -TARGET_EFAULT;                                         \
 514    }                                                                   \
 515    __ret;                                                              \
 516})
 517
 518#define put_user_ual(x, gaddr) put_user((x), (gaddr), abi_ulong)
 519#define put_user_sal(x, gaddr) put_user((x), (gaddr), abi_long)
 520#define put_user_u64(x, gaddr) put_user((x), (gaddr), uint64_t)
 521#define put_user_s64(x, gaddr) put_user((x), (gaddr), int64_t)
 522#define put_user_u32(x, gaddr) put_user((x), (gaddr), uint32_t)
 523#define put_user_s32(x, gaddr) put_user((x), (gaddr), int32_t)
 524#define put_user_u16(x, gaddr) put_user((x), (gaddr), uint16_t)
 525#define put_user_s16(x, gaddr) put_user((x), (gaddr), int16_t)
 526#define put_user_u8(x, gaddr)  put_user((x), (gaddr), uint8_t)
 527#define put_user_s8(x, gaddr)  put_user((x), (gaddr), int8_t)
 528
 529#define get_user_ual(x, gaddr) get_user((x), (gaddr), abi_ulong)
 530#define get_user_sal(x, gaddr) get_user((x), (gaddr), abi_long)
 531#define get_user_u64(x, gaddr) get_user((x), (gaddr), uint64_t)
 532#define get_user_s64(x, gaddr) get_user((x), (gaddr), int64_t)
 533#define get_user_u32(x, gaddr) get_user((x), (gaddr), uint32_t)
 534#define get_user_s32(x, gaddr) get_user((x), (gaddr), int32_t)
 535#define get_user_u16(x, gaddr) get_user((x), (gaddr), uint16_t)
 536#define get_user_s16(x, gaddr) get_user((x), (gaddr), int16_t)
 537#define get_user_u8(x, gaddr)  get_user((x), (gaddr), uint8_t)
 538#define get_user_s8(x, gaddr)  get_user((x), (gaddr), int8_t)
 539
 540/* copy_from_user() and copy_to_user() are usually used to copy data
 541 * buffers between the target and host.  These internally perform
 542 * locking/unlocking of the memory.
 543 */
 544abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len);
 545abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
 546
 547/* Functions for accessing guest memory.  The tget and tput functions
 548   read/write single values, byteswapping as necessary.  The lock_user function
 549   gets a pointer to a contiguous area of guest memory, but does not perform
 550   any byteswapping.  lock_user may return either a pointer to the guest
 551   memory, or a temporary buffer.  */
 552
 553/* Lock an area of guest memory into the host.  If copy is true then the
 554   host area will have the same contents as the guest.  */
 555static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
 556{
 557    if (!access_ok(type, guest_addr, len))
 558        return NULL;
 559#ifdef DEBUG_REMAP
 560    {
 561        void *addr;
 562        addr = g_malloc(len);
 563        if (copy)
 564            memcpy(addr, g2h(guest_addr), len);
 565        else
 566            memset(addr, 0, len);
 567        return addr;
 568    }
 569#else
 570    return g2h(guest_addr);
 571#endif
 572}
 573
 574/* Unlock an area of guest memory.  The first LEN bytes must be
 575   flushed back to guest memory. host_ptr = NULL is explicitly
 576   allowed and does nothing. */
 577static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
 578                               long len)
 579{
 580
 581#ifdef DEBUG_REMAP
 582    if (!host_ptr)
 583        return;
 584    if (host_ptr == g2h(guest_addr))
 585        return;
 586    if (len > 0)
 587        memcpy(g2h(guest_addr), host_ptr, len);
 588    g_free(host_ptr);
 589#endif
 590}
 591
 592/* Return the length of a string in target memory or -TARGET_EFAULT if
 593   access error. */
 594abi_long target_strlen(abi_ulong gaddr);
 595
 596/* Like lock_user but for null terminated strings.  */
 597static inline void *lock_user_string(abi_ulong guest_addr)
 598{
 599    abi_long len;
 600    len = target_strlen(guest_addr);
 601    if (len < 0)
 602        return NULL;
 603    return lock_user(VERIFY_READ, guest_addr, (long)(len + 1), 1);
 604}
 605
 606/* Helper macros for locking/unlocking a target struct.  */
 607#define lock_user_struct(type, host_ptr, guest_addr, copy)      \
 608    (host_ptr = lock_user(type, guest_addr, sizeof(*host_ptr), copy))
 609#define unlock_user_struct(host_ptr, guest_addr, copy)          \
 610    unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
 611
 612#include <pthread.h>
 613
 614/* Include target-specific struct and function definitions;
 615 * they may need access to the target-independent structures
 616 * above, so include them last.
 617 */
 618#include "target_cpu.h"
 619#include "target_signal.h"
 620#include "target_structs.h"
 621
 622#endif /* QEMU_H */
 623