qemu/linux-user/syscall.c
<<
>>
Prefs
   1/*
   2 *  Linux syscalls
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#define _ATFILE_SOURCE
  20#include "qemu/osdep.h"
  21#include "qemu/cutils.h"
  22#include "qemu/path.h"
  23#include <elf.h>
  24#include <endian.h>
  25#include <grp.h>
  26#include <sys/ipc.h>
  27#include <sys/msg.h>
  28#include <sys/wait.h>
  29#include <sys/mount.h>
  30#include <sys/file.h>
  31#include <sys/fsuid.h>
  32#include <sys/personality.h>
  33#include <sys/prctl.h>
  34#include <sys/resource.h>
  35#include <sys/swap.h>
  36#include <linux/capability.h>
  37#include <sched.h>
  38#include <sys/timex.h>
  39#include <sys/socket.h>
  40#include <sys/un.h>
  41#include <sys/uio.h>
  42#include <poll.h>
  43#include <sys/times.h>
  44#include <sys/shm.h>
  45#include <sys/sem.h>
  46#include <sys/statfs.h>
  47#include <utime.h>
  48#include <sys/sysinfo.h>
  49#include <sys/signalfd.h>
  50//#include <sys/user.h>
  51#include <netinet/ip.h>
  52#include <netinet/tcp.h>
  53#include <linux/wireless.h>
  54#include <linux/icmp.h>
  55#include <linux/icmpv6.h>
  56#include <linux/errqueue.h>
  57#include <linux/random.h>
  58#include "qemu-common.h"
  59#ifdef CONFIG_TIMERFD
  60#include <sys/timerfd.h>
  61#endif
  62#ifdef TARGET_GPROF
  63#include <sys/gmon.h>
  64#endif
  65#ifdef CONFIG_EVENTFD
  66#include <sys/eventfd.h>
  67#endif
  68#ifdef CONFIG_EPOLL
  69#include <sys/epoll.h>
  70#endif
  71#ifdef CONFIG_ATTR
  72#include "qemu/xattr.h"
  73#endif
  74#ifdef CONFIG_SENDFILE
  75#include <sys/sendfile.h>
  76#endif
  77
  78#define termios host_termios
  79#define winsize host_winsize
  80#define termio host_termio
  81#define sgttyb host_sgttyb /* same as target */
  82#define tchars host_tchars /* same as target */
  83#define ltchars host_ltchars /* same as target */
  84
  85#include <linux/termios.h>
  86#include <linux/unistd.h>
  87#include <linux/cdrom.h>
  88#include <linux/hdreg.h>
  89#include <linux/soundcard.h>
  90#include <linux/kd.h>
  91#include <linux/mtio.h>
  92#include <linux/fs.h>
  93#if defined(CONFIG_FIEMAP)
  94#include <linux/fiemap.h>
  95#endif
  96#include <linux/fb.h>
  97#if defined(CONFIG_USBFS)
  98#include <linux/usbdevice_fs.h>
  99#include <linux/usb/ch9.h>
 100#endif
 101#include <linux/vt.h>
 102#include <linux/dm-ioctl.h>
 103#include <linux/reboot.h>
 104#include <linux/route.h>
 105#include <linux/filter.h>
 106#include <linux/blkpg.h>
 107#include <netpacket/packet.h>
 108#include <linux/netlink.h>
 109#include "linux_loop.h"
 110#include "uname.h"
 111
 112#include "qemu.h"
 113#include "fd-trans.h"
 114
 115#ifndef CLONE_IO
 116#define CLONE_IO                0x80000000      /* Clone io context */
 117#endif
 118
 119/* We can't directly call the host clone syscall, because this will
 120 * badly confuse libc (breaking mutexes, for example). So we must
 121 * divide clone flags into:
 122 *  * flag combinations that look like pthread_create()
 123 *  * flag combinations that look like fork()
 124 *  * flags we can implement within QEMU itself
 125 *  * flags we can't support and will return an error for
 126 */
 127/* For thread creation, all these flags must be present; for
 128 * fork, none must be present.
 129 */
 130#define CLONE_THREAD_FLAGS                              \
 131    (CLONE_VM | CLONE_FS | CLONE_FILES |                \
 132     CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
 133
 134/* These flags are ignored:
 135 * CLONE_DETACHED is now ignored by the kernel;
 136 * CLONE_IO is just an optimisation hint to the I/O scheduler
 137 */
 138#define CLONE_IGNORED_FLAGS                     \
 139    (CLONE_DETACHED | CLONE_IO)
 140
 141/* Flags for fork which we can implement within QEMU itself */
 142#define CLONE_OPTIONAL_FORK_FLAGS               \
 143    (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
 144     CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
 145
 146/* Flags for thread creation which we can implement within QEMU itself */
 147#define CLONE_OPTIONAL_THREAD_FLAGS                             \
 148    (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
 149     CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
 150
 151#define CLONE_INVALID_FORK_FLAGS                                        \
 152    (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
 153
 154#define CLONE_INVALID_THREAD_FLAGS                                      \
 155    (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
 156       CLONE_IGNORED_FLAGS))
 157
 158/* CLONE_VFORK is special cased early in do_fork(). The other flag bits
 159 * have almost all been allocated. We cannot support any of
 160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
 161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
 162 * The checks against the invalid thread masks above will catch these.
 163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
 164 */
 165
 166/* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
 167 * once. This exercises the codepaths for restart.
 168 */
 169//#define DEBUG_ERESTARTSYS
 170
 171//#include <linux/msdos_fs.h>
 172#define VFAT_IOCTL_READDIR_BOTH         _IOR('r', 1, struct linux_dirent [2])
 173#define VFAT_IOCTL_READDIR_SHORT        _IOR('r', 2, struct linux_dirent [2])
 174
 175#undef _syscall0
 176#undef _syscall1
 177#undef _syscall2
 178#undef _syscall3
 179#undef _syscall4
 180#undef _syscall5
 181#undef _syscall6
 182
 183#define _syscall0(type,name)            \
 184static type name (void)                 \
 185{                                       \
 186        return syscall(__NR_##name);    \
 187}
 188
 189#define _syscall1(type,name,type1,arg1)         \
 190static type name (type1 arg1)                   \
 191{                                               \
 192        return syscall(__NR_##name, arg1);      \
 193}
 194
 195#define _syscall2(type,name,type1,arg1,type2,arg2)      \
 196static type name (type1 arg1,type2 arg2)                \
 197{                                                       \
 198        return syscall(__NR_##name, arg1, arg2);        \
 199}
 200
 201#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)   \
 202static type name (type1 arg1,type2 arg2,type3 arg3)             \
 203{                                                               \
 204        return syscall(__NR_##name, arg1, arg2, arg3);          \
 205}
 206
 207#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)        \
 208static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)                  \
 209{                                                                               \
 210        return syscall(__NR_##name, arg1, arg2, arg3, arg4);                    \
 211}
 212
 213#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,        \
 214                  type5,arg5)                                                   \
 215static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)       \
 216{                                                                               \
 217        return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);              \
 218}
 219
 220
 221#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,        \
 222                  type5,arg5,type6,arg6)                                        \
 223static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,       \
 224                  type6 arg6)                                                   \
 225{                                                                               \
 226        return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);        \
 227}
 228
 229
 230#define __NR_sys_uname __NR_uname
 231#define __NR_sys_getcwd1 __NR_getcwd
 232#define __NR_sys_getdents __NR_getdents
 233#define __NR_sys_getdents64 __NR_getdents64
 234#define __NR_sys_getpriority __NR_getpriority
 235#define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
 236#define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
 237#define __NR_sys_syslog __NR_syslog
 238#define __NR_sys_futex __NR_futex
 239#define __NR_sys_inotify_init __NR_inotify_init
 240#define __NR_sys_inotify_add_watch __NR_inotify_add_watch
 241#define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
 242
 243#if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
 244#define __NR__llseek __NR_lseek
 245#endif
 246
 247/* Newer kernel ports have llseek() instead of _llseek() */
 248#if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
 249#define TARGET_NR__llseek TARGET_NR_llseek
 250#endif
 251
 252#define __NR_sys_gettid __NR_gettid
 253_syscall0(int, sys_gettid)
 254
 255/* For the 64-bit guest on 32-bit host case we must emulate
 256 * getdents using getdents64, because otherwise the host
 257 * might hand us back more dirent records than we can fit
 258 * into the guest buffer after structure format conversion.
 259 * Otherwise we emulate getdents with getdents if the host has it.
 260 */
 261#if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
 262#define EMULATE_GETDENTS_WITH_GETDENTS
 263#endif
 264
 265#if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
 266_syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
 267#endif
 268#if (defined(TARGET_NR_getdents) && \
 269      !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
 270    (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
 271_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
 272#endif
 273#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
 274_syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
 275          loff_t *, res, uint, wh);
 276#endif
 277_syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
 278_syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
 279          siginfo_t *, uinfo)
 280_syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
 281#ifdef __NR_exit_group
 282_syscall1(int,exit_group,int,error_code)
 283#endif
 284#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
 285_syscall1(int,set_tid_address,int *,tidptr)
 286#endif
 287#if defined(TARGET_NR_futex) && defined(__NR_futex)
 288_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
 289          const struct timespec *,timeout,int *,uaddr2,int,val3)
 290#endif
 291#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
 292_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
 293          unsigned long *, user_mask_ptr);
 294#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
 295_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
 296          unsigned long *, user_mask_ptr);
 297#define __NR_sys_getcpu __NR_getcpu
 298_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
 299_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
 300          void *, arg);
 301_syscall2(int, capget, struct __user_cap_header_struct *, header,
 302          struct __user_cap_data_struct *, data);
 303_syscall2(int, capset, struct __user_cap_header_struct *, header,
 304          struct __user_cap_data_struct *, data);
 305#if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
 306_syscall2(int, ioprio_get, int, which, int, who)
 307#endif
 308#if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
 309_syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
 310#endif
 311#if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
 312_syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
 313#endif
 314
 315#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
 316_syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
 317          unsigned long, idx1, unsigned long, idx2)
 318#endif
 319
 320static bitmask_transtbl fcntl_flags_tbl[] = {
 321  { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
 322  { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
 323  { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
 324  { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
 325  { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
 326  { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
 327  { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
 328  { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
 329  { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
 330  { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
 331  { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
 332  { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
 333  { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
 334#if defined(O_DIRECT)
 335  { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
 336#endif
 337#if defined(O_NOATIME)
 338  { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
 339#endif
 340#if defined(O_CLOEXEC)
 341  { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
 342#endif
 343#if defined(O_PATH)
 344  { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
 345#endif
 346#if defined(O_TMPFILE)
 347  { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
 348#endif
 349  /* Don't terminate the list prematurely on 64-bit host+guest.  */
 350#if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
 351  { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
 352#endif
 353  { 0, 0, 0, 0 }
 354};
 355
 356static int sys_getcwd1(char *buf, size_t size)
 357{
 358  if (getcwd(buf, size) == NULL) {
 359      /* getcwd() sets errno */
 360      return (-1);
 361  }
 362  return strlen(buf)+1;
 363}
 364
 365#ifdef TARGET_NR_utimensat
 366#if defined(__NR_utimensat)
 367#define __NR_sys_utimensat __NR_utimensat
 368_syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
 369          const struct timespec *,tsp,int,flags)
 370#else
 371static int sys_utimensat(int dirfd, const char *pathname,
 372                         const struct timespec times[2], int flags)
 373{
 374    errno = ENOSYS;
 375    return -1;
 376}
 377#endif
 378#endif /* TARGET_NR_utimensat */
 379
 380#ifdef TARGET_NR_renameat2
 381#if defined(__NR_renameat2)
 382#define __NR_sys_renameat2 __NR_renameat2
 383_syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
 384          const char *, new, unsigned int, flags)
 385#else
 386static int sys_renameat2(int oldfd, const char *old,
 387                         int newfd, const char *new, int flags)
 388{
 389    if (flags == 0) {
 390        return renameat(oldfd, old, newfd, new);
 391    }
 392    errno = ENOSYS;
 393    return -1;
 394}
 395#endif
 396#endif /* TARGET_NR_renameat2 */
 397
 398#ifdef CONFIG_INOTIFY
 399#include <sys/inotify.h>
 400
 401#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
 402static int sys_inotify_init(void)
 403{
 404  return (inotify_init());
 405}
 406#endif
 407#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
 408static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
 409{
 410  return (inotify_add_watch(fd, pathname, mask));
 411}
 412#endif
 413#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
 414static int sys_inotify_rm_watch(int fd, int32_t wd)
 415{
 416  return (inotify_rm_watch(fd, wd));
 417}
 418#endif
 419#ifdef CONFIG_INOTIFY1
 420#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
 421static int sys_inotify_init1(int flags)
 422{
 423  return (inotify_init1(flags));
 424}
 425#endif
 426#endif
 427#else
 428/* Userspace can usually survive runtime without inotify */
 429#undef TARGET_NR_inotify_init
 430#undef TARGET_NR_inotify_init1
 431#undef TARGET_NR_inotify_add_watch
 432#undef TARGET_NR_inotify_rm_watch
 433#endif /* CONFIG_INOTIFY  */
 434
 435#if defined(TARGET_NR_prlimit64)
 436#ifndef __NR_prlimit64
 437# define __NR_prlimit64 -1
 438#endif
 439#define __NR_sys_prlimit64 __NR_prlimit64
 440/* The glibc rlimit structure may not be that used by the underlying syscall */
 441struct host_rlimit64 {
 442    uint64_t rlim_cur;
 443    uint64_t rlim_max;
 444};
 445_syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
 446          const struct host_rlimit64 *, new_limit,
 447          struct host_rlimit64 *, old_limit)
 448#endif
 449
 450
 451#if defined(TARGET_NR_timer_create)
 452/* Maxiumum of 32 active POSIX timers allowed at any one time. */
 453static timer_t g_posix_timers[32] = { 0, } ;
 454
 455static inline int next_free_host_timer(void)
 456{
 457    int k ;
 458    /* FIXME: Does finding the next free slot require a lock? */
 459    for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
 460        if (g_posix_timers[k] == 0) {
 461            g_posix_timers[k] = (timer_t) 1;
 462            return k;
 463        }
 464    }
 465    return -1;
 466}
 467#endif
 468
 469/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
 470#ifdef TARGET_ARM
 471static inline int regpairs_aligned(void *cpu_env, int num)
 472{
 473    return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
 474}
 475#elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
 476static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
 477#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
 478/* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
 479 * of registers which translates to the same as ARM/MIPS, because we start with
 480 * r3 as arg1 */
 481static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
 482#elif defined(TARGET_SH4)
 483/* SH4 doesn't align register pairs, except for p{read,write}64 */
 484static inline int regpairs_aligned(void *cpu_env, int num)
 485{
 486    switch (num) {
 487    case TARGET_NR_pread64:
 488    case TARGET_NR_pwrite64:
 489        return 1;
 490
 491    default:
 492        return 0;
 493    }
 494}
 495#elif defined(TARGET_XTENSA)
 496static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
 497#else
 498static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
 499#endif
 500
 501#define ERRNO_TABLE_SIZE 1200
 502
 503/* target_to_host_errno_table[] is initialized from
 504 * host_to_target_errno_table[] in syscall_init(). */
 505static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
 506};
 507
 508/*
 509 * This list is the union of errno values overridden in asm-<arch>/errno.h
 510 * minus the errnos that are not actually generic to all archs.
 511 */
 512static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
 513    [EAGAIN]            = TARGET_EAGAIN,
 514    [EIDRM]             = TARGET_EIDRM,
 515    [ECHRNG]            = TARGET_ECHRNG,
 516    [EL2NSYNC]          = TARGET_EL2NSYNC,
 517    [EL3HLT]            = TARGET_EL3HLT,
 518    [EL3RST]            = TARGET_EL3RST,
 519    [ELNRNG]            = TARGET_ELNRNG,
 520    [EUNATCH]           = TARGET_EUNATCH,
 521    [ENOCSI]            = TARGET_ENOCSI,
 522    [EL2HLT]            = TARGET_EL2HLT,
 523    [EDEADLK]           = TARGET_EDEADLK,
 524    [ENOLCK]            = TARGET_ENOLCK,
 525    [EBADE]             = TARGET_EBADE,
 526    [EBADR]             = TARGET_EBADR,
 527    [EXFULL]            = TARGET_EXFULL,
 528    [ENOANO]            = TARGET_ENOANO,
 529    [EBADRQC]           = TARGET_EBADRQC,
 530    [EBADSLT]           = TARGET_EBADSLT,
 531    [EBFONT]            = TARGET_EBFONT,
 532    [ENOSTR]            = TARGET_ENOSTR,
 533    [ENODATA]           = TARGET_ENODATA,
 534    [ETIME]             = TARGET_ETIME,
 535    [ENOSR]             = TARGET_ENOSR,
 536    [ENONET]            = TARGET_ENONET,
 537    [ENOPKG]            = TARGET_ENOPKG,
 538    [EREMOTE]           = TARGET_EREMOTE,
 539    [ENOLINK]           = TARGET_ENOLINK,
 540    [EADV]              = TARGET_EADV,
 541    [ESRMNT]            = TARGET_ESRMNT,
 542    [ECOMM]             = TARGET_ECOMM,
 543    [EPROTO]            = TARGET_EPROTO,
 544    [EDOTDOT]           = TARGET_EDOTDOT,
 545    [EMULTIHOP]         = TARGET_EMULTIHOP,
 546    [EBADMSG]           = TARGET_EBADMSG,
 547    [ENAMETOOLONG]      = TARGET_ENAMETOOLONG,
 548    [EOVERFLOW]         = TARGET_EOVERFLOW,
 549    [ENOTUNIQ]          = TARGET_ENOTUNIQ,
 550    [EBADFD]            = TARGET_EBADFD,
 551    [EREMCHG]           = TARGET_EREMCHG,
 552    [ELIBACC]           = TARGET_ELIBACC,
 553    [ELIBBAD]           = TARGET_ELIBBAD,
 554    [ELIBSCN]           = TARGET_ELIBSCN,
 555    [ELIBMAX]           = TARGET_ELIBMAX,
 556    [ELIBEXEC]          = TARGET_ELIBEXEC,
 557    [EILSEQ]            = TARGET_EILSEQ,
 558    [ENOSYS]            = TARGET_ENOSYS,
 559    [ELOOP]             = TARGET_ELOOP,
 560    [ERESTART]          = TARGET_ERESTART,
 561    [ESTRPIPE]          = TARGET_ESTRPIPE,
 562    [ENOTEMPTY]         = TARGET_ENOTEMPTY,
 563    [EUSERS]            = TARGET_EUSERS,
 564    [ENOTSOCK]          = TARGET_ENOTSOCK,
 565    [EDESTADDRREQ]      = TARGET_EDESTADDRREQ,
 566    [EMSGSIZE]          = TARGET_EMSGSIZE,
 567    [EPROTOTYPE]        = TARGET_EPROTOTYPE,
 568    [ENOPROTOOPT]       = TARGET_ENOPROTOOPT,
 569    [EPROTONOSUPPORT]   = TARGET_EPROTONOSUPPORT,
 570    [ESOCKTNOSUPPORT]   = TARGET_ESOCKTNOSUPPORT,
 571    [EOPNOTSUPP]        = TARGET_EOPNOTSUPP,
 572    [EPFNOSUPPORT]      = TARGET_EPFNOSUPPORT,
 573    [EAFNOSUPPORT]      = TARGET_EAFNOSUPPORT,
 574    [EADDRINUSE]        = TARGET_EADDRINUSE,
 575    [EADDRNOTAVAIL]     = TARGET_EADDRNOTAVAIL,
 576    [ENETDOWN]          = TARGET_ENETDOWN,
 577    [ENETUNREACH]       = TARGET_ENETUNREACH,
 578    [ENETRESET]         = TARGET_ENETRESET,
 579    [ECONNABORTED]      = TARGET_ECONNABORTED,
 580    [ECONNRESET]        = TARGET_ECONNRESET,
 581    [ENOBUFS]           = TARGET_ENOBUFS,
 582    [EISCONN]           = TARGET_EISCONN,
 583    [ENOTCONN]          = TARGET_ENOTCONN,
 584    [EUCLEAN]           = TARGET_EUCLEAN,
 585    [ENOTNAM]           = TARGET_ENOTNAM,
 586    [ENAVAIL]           = TARGET_ENAVAIL,
 587    [EISNAM]            = TARGET_EISNAM,
 588    [EREMOTEIO]         = TARGET_EREMOTEIO,
 589    [EDQUOT]            = TARGET_EDQUOT,
 590    [ESHUTDOWN]         = TARGET_ESHUTDOWN,
 591    [ETOOMANYREFS]      = TARGET_ETOOMANYREFS,
 592    [ETIMEDOUT]         = TARGET_ETIMEDOUT,
 593    [ECONNREFUSED]      = TARGET_ECONNREFUSED,
 594    [EHOSTDOWN]         = TARGET_EHOSTDOWN,
 595    [EHOSTUNREACH]      = TARGET_EHOSTUNREACH,
 596    [EALREADY]          = TARGET_EALREADY,
 597    [EINPROGRESS]       = TARGET_EINPROGRESS,
 598    [ESTALE]            = TARGET_ESTALE,
 599    [ECANCELED]         = TARGET_ECANCELED,
 600    [ENOMEDIUM]         = TARGET_ENOMEDIUM,
 601    [EMEDIUMTYPE]       = TARGET_EMEDIUMTYPE,
 602#ifdef ENOKEY
 603    [ENOKEY]            = TARGET_ENOKEY,
 604#endif
 605#ifdef EKEYEXPIRED
 606    [EKEYEXPIRED]       = TARGET_EKEYEXPIRED,
 607#endif
 608#ifdef EKEYREVOKED
 609    [EKEYREVOKED]       = TARGET_EKEYREVOKED,
 610#endif
 611#ifdef EKEYREJECTED
 612    [EKEYREJECTED]      = TARGET_EKEYREJECTED,
 613#endif
 614#ifdef EOWNERDEAD
 615    [EOWNERDEAD]        = TARGET_EOWNERDEAD,
 616#endif
 617#ifdef ENOTRECOVERABLE
 618    [ENOTRECOVERABLE]   = TARGET_ENOTRECOVERABLE,
 619#endif
 620#ifdef ENOMSG
 621    [ENOMSG]            = TARGET_ENOMSG,
 622#endif
 623#ifdef ERKFILL
 624    [ERFKILL]           = TARGET_ERFKILL,
 625#endif
 626#ifdef EHWPOISON
 627    [EHWPOISON]         = TARGET_EHWPOISON,
 628#endif
 629};
 630
 631static inline int host_to_target_errno(int err)
 632{
 633    if (err >= 0 && err < ERRNO_TABLE_SIZE &&
 634        host_to_target_errno_table[err]) {
 635        return host_to_target_errno_table[err];
 636    }
 637    return err;
 638}
 639
 640static inline int target_to_host_errno(int err)
 641{
 642    if (err >= 0 && err < ERRNO_TABLE_SIZE &&
 643        target_to_host_errno_table[err]) {
 644        return target_to_host_errno_table[err];
 645    }
 646    return err;
 647}
 648
 649static inline abi_long get_errno(abi_long ret)
 650{
 651    if (ret == -1)
 652        return -host_to_target_errno(errno);
 653    else
 654        return ret;
 655}
 656
 657const char *target_strerror(int err)
 658{
 659    if (err == TARGET_ERESTARTSYS) {
 660        return "To be restarted";
 661    }
 662    if (err == TARGET_QEMU_ESIGRETURN) {
 663        return "Successful exit from sigreturn";
 664    }
 665
 666    if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
 667        return NULL;
 668    }
 669    return strerror(target_to_host_errno(err));
 670}
 671
 672#define safe_syscall0(type, name) \
 673static type safe_##name(void) \
 674{ \
 675    return safe_syscall(__NR_##name); \
 676}
 677
 678#define safe_syscall1(type, name, type1, arg1) \
 679static type safe_##name(type1 arg1) \
 680{ \
 681    return safe_syscall(__NR_##name, arg1); \
 682}
 683
 684#define safe_syscall2(type, name, type1, arg1, type2, arg2) \
 685static type safe_##name(type1 arg1, type2 arg2) \
 686{ \
 687    return safe_syscall(__NR_##name, arg1, arg2); \
 688}
 689
 690#define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
 691static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
 692{ \
 693    return safe_syscall(__NR_##name, arg1, arg2, arg3); \
 694}
 695
 696#define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
 697    type4, arg4) \
 698static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
 699{ \
 700    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
 701}
 702
 703#define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
 704    type4, arg4, type5, arg5) \
 705static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
 706    type5 arg5) \
 707{ \
 708    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
 709}
 710
 711#define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
 712    type4, arg4, type5, arg5, type6, arg6) \
 713static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
 714    type5 arg5, type6 arg6) \
 715{ \
 716    return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
 717}
 718
 719safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
 720safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
 721safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
 722              int, flags, mode_t, mode)
 723safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
 724              struct rusage *, rusage)
 725safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
 726              int, options, struct rusage *, rusage)
 727safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
 728safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
 729              fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
 730safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
 731              struct timespec *, tsp, const sigset_t *, sigmask,
 732              size_t, sigsetsize)
 733safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
 734              int, maxevents, int, timeout, const sigset_t *, sigmask,
 735              size_t, sigsetsize)
 736safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
 737              const struct timespec *,timeout,int *,uaddr2,int,val3)
 738safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
 739safe_syscall2(int, kill, pid_t, pid, int, sig)
 740safe_syscall2(int, tkill, int, tid, int, sig)
 741safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
 742safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
 743safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
 744safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
 745              unsigned long, pos_l, unsigned long, pos_h)
 746safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
 747              unsigned long, pos_l, unsigned long, pos_h)
 748safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
 749              socklen_t, addrlen)
 750safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
 751              int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
 752safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
 753              int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
 754safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
 755safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
 756safe_syscall2(int, flock, int, fd, int, operation)
 757safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
 758              const struct timespec *, uts, size_t, sigsetsize)
 759safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
 760              int, flags)
 761safe_syscall2(int, nanosleep, const struct timespec *, req,
 762              struct timespec *, rem)
 763#ifdef TARGET_NR_clock_nanosleep
 764safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
 765              const struct timespec *, req, struct timespec *, rem)
 766#endif
 767#ifdef __NR_msgsnd
 768safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
 769              int, flags)
 770safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
 771              long, msgtype, int, flags)
 772safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
 773              unsigned, nsops, const struct timespec *, timeout)
 774#else
 775/* This host kernel architecture uses a single ipc syscall; fake up
 776 * wrappers for the sub-operations to hide this implementation detail.
 777 * Annoyingly we can't include linux/ipc.h to get the constant definitions
 778 * for the call parameter because some structs in there conflict with the
 779 * sys/ipc.h ones. So we just define them here, and rely on them being
 780 * the same for all host architectures.
 781 */
 782#define Q_SEMTIMEDOP 4
 783#define Q_MSGSND 11
 784#define Q_MSGRCV 12
 785#define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
 786
 787safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
 788              void *, ptr, long, fifth)
 789static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
 790{
 791    return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
 792}
 793static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
 794{
 795    return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
 796}
 797static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
 798                           const struct timespec *timeout)
 799{
 800    return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
 801                    (long)timeout);
 802}
 803#endif
 804#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
 805safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
 806              size_t, len, unsigned, prio, const struct timespec *, timeout)
 807safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
 808              size_t, len, unsigned *, prio, const struct timespec *, timeout)
 809#endif
 810/* We do ioctl like this rather than via safe_syscall3 to preserve the
 811 * "third argument might be integer or pointer or not present" behaviour of
 812 * the libc function.
 813 */
 814#define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
 815/* Similarly for fcntl. Note that callers must always:
 816 *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
 817 *  use the flock64 struct rather than unsuffixed flock
 818 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
 819 */
 820#ifdef __NR_fcntl64
 821#define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
 822#else
 823#define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
 824#endif
 825
 826static inline int host_to_target_sock_type(int host_type)
 827{
 828    int target_type;
 829
 830    switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
 831    case SOCK_DGRAM:
 832        target_type = TARGET_SOCK_DGRAM;
 833        break;
 834    case SOCK_STREAM:
 835        target_type = TARGET_SOCK_STREAM;
 836        break;
 837    default:
 838        target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
 839        break;
 840    }
 841
 842#if defined(SOCK_CLOEXEC)
 843    if (host_type & SOCK_CLOEXEC) {
 844        target_type |= TARGET_SOCK_CLOEXEC;
 845    }
 846#endif
 847
 848#if defined(SOCK_NONBLOCK)
 849    if (host_type & SOCK_NONBLOCK) {
 850        target_type |= TARGET_SOCK_NONBLOCK;
 851    }
 852#endif
 853
 854    return target_type;
 855}
 856
 857static abi_ulong target_brk;
 858static abi_ulong target_original_brk;
 859static abi_ulong brk_page;
 860
 861void target_set_brk(abi_ulong new_brk)
 862{
 863    target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
 864    brk_page = HOST_PAGE_ALIGN(target_brk);
 865}
 866
 867//#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
 868#define DEBUGF_BRK(message, args...)
 869
 870/* do_brk() must return target values and target errnos. */
 871abi_long do_brk(abi_ulong new_brk)
 872{
 873    abi_long mapped_addr;
 874    abi_ulong new_alloc_size;
 875
 876    DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
 877
 878    if (!new_brk) {
 879        DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
 880        return target_brk;
 881    }
 882    if (new_brk < target_original_brk) {
 883        DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
 884                   target_brk);
 885        return target_brk;
 886    }
 887
 888    /* If the new brk is less than the highest page reserved to the
 889     * target heap allocation, set it and we're almost done...  */
 890    if (new_brk <= brk_page) {
 891        /* Heap contents are initialized to zero, as for anonymous
 892         * mapped pages.  */
 893        if (new_brk > target_brk) {
 894            memset(g2h(target_brk), 0, new_brk - target_brk);
 895        }
 896        target_brk = new_brk;
 897        DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
 898        return target_brk;
 899    }
 900
 901    /* We need to allocate more memory after the brk... Note that
 902     * we don't use MAP_FIXED because that will map over the top of
 903     * any existing mapping (like the one with the host libc or qemu
 904     * itself); instead we treat "mapped but at wrong address" as
 905     * a failure and unmap again.
 906     */
 907    new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
 908    mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
 909                                        PROT_READ|PROT_WRITE,
 910                                        MAP_ANON|MAP_PRIVATE, 0, 0));
 911
 912    if (mapped_addr == brk_page) {
 913        /* Heap contents are initialized to zero, as for anonymous
 914         * mapped pages.  Technically the new pages are already
 915         * initialized to zero since they *are* anonymous mapped
 916         * pages, however we have to take care with the contents that
 917         * come from the remaining part of the previous page: it may
 918         * contains garbage data due to a previous heap usage (grown
 919         * then shrunken).  */
 920        memset(g2h(target_brk), 0, brk_page - target_brk);
 921
 922        target_brk = new_brk;
 923        brk_page = HOST_PAGE_ALIGN(target_brk);
 924        DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
 925            target_brk);
 926        return target_brk;
 927    } else if (mapped_addr != -1) {
 928        /* Mapped but at wrong address, meaning there wasn't actually
 929         * enough space for this brk.
 930         */
 931        target_munmap(mapped_addr, new_alloc_size);
 932        mapped_addr = -1;
 933        DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
 934    }
 935    else {
 936        DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
 937    }
 938
 939#if defined(TARGET_ALPHA)
 940    /* We (partially) emulate OSF/1 on Alpha, which requires we
 941       return a proper errno, not an unchanged brk value.  */
 942    return -TARGET_ENOMEM;
 943#endif
 944    /* For everything else, return the previous break. */
 945    return target_brk;
 946}
 947
 948static inline abi_long copy_from_user_fdset(fd_set *fds,
 949                                            abi_ulong target_fds_addr,
 950                                            int n)
 951{
 952    int i, nw, j, k;
 953    abi_ulong b, *target_fds;
 954
 955    nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
 956    if (!(target_fds = lock_user(VERIFY_READ,
 957                                 target_fds_addr,
 958                                 sizeof(abi_ulong) * nw,
 959                                 1)))
 960        return -TARGET_EFAULT;
 961
 962    FD_ZERO(fds);
 963    k = 0;
 964    for (i = 0; i < nw; i++) {
 965        /* grab the abi_ulong */
 966        __get_user(b, &target_fds[i]);
 967        for (j = 0; j < TARGET_ABI_BITS; j++) {
 968            /* check the bit inside the abi_ulong */
 969            if ((b >> j) & 1)
 970                FD_SET(k, fds);
 971            k++;
 972        }
 973    }
 974
 975    unlock_user(target_fds, target_fds_addr, 0);
 976
 977    return 0;
 978}
 979
 980static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
 981                                                 abi_ulong target_fds_addr,
 982                                                 int n)
 983{
 984    if (target_fds_addr) {
 985        if (copy_from_user_fdset(fds, target_fds_addr, n))
 986            return -TARGET_EFAULT;
 987        *fds_ptr = fds;
 988    } else {
 989        *fds_ptr = NULL;
 990    }
 991    return 0;
 992}
 993
 994static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
 995                                          const fd_set *fds,
 996                                          int n)
 997{
 998    int i, nw, j, k;
 999    abi_long v;
1000    abi_ulong *target_fds;
1001
1002    nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1003    if (!(target_fds = lock_user(VERIFY_WRITE,
1004                                 target_fds_addr,
1005                                 sizeof(abi_ulong) * nw,
1006                                 0)))
1007        return -TARGET_EFAULT;
1008
1009    k = 0;
1010    for (i = 0; i < nw; i++) {
1011        v = 0;
1012        for (j = 0; j < TARGET_ABI_BITS; j++) {
1013            v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1014            k++;
1015        }
1016        __put_user(v, &target_fds[i]);
1017    }
1018
1019    unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1020
1021    return 0;
1022}
1023
1024#if defined(__alpha__)
1025#define HOST_HZ 1024
1026#else
1027#define HOST_HZ 100
1028#endif
1029
1030static inline abi_long host_to_target_clock_t(long ticks)
1031{
1032#if HOST_HZ == TARGET_HZ
1033    return ticks;
1034#else
1035    return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1036#endif
1037}
1038
1039static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1040                                             const struct rusage *rusage)
1041{
1042    struct target_rusage *target_rusage;
1043
1044    if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1045        return -TARGET_EFAULT;
1046    target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1047    target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1048    target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1049    target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1050    target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1051    target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1052    target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1053    target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1054    target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1055    target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1056    target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1057    target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1058    target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1059    target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1060    target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1061    target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1062    target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1063    target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1064    unlock_user_struct(target_rusage, target_addr, 1);
1065
1066    return 0;
1067}
1068
1069static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1070{
1071    abi_ulong target_rlim_swap;
1072    rlim_t result;
1073    
1074    target_rlim_swap = tswapal(target_rlim);
1075    if (target_rlim_swap == TARGET_RLIM_INFINITY)
1076        return RLIM_INFINITY;
1077
1078    result = target_rlim_swap;
1079    if (target_rlim_swap != (rlim_t)result)
1080        return RLIM_INFINITY;
1081    
1082    return result;
1083}
1084
1085static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1086{
1087    abi_ulong target_rlim_swap;
1088    abi_ulong result;
1089    
1090    if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1091        target_rlim_swap = TARGET_RLIM_INFINITY;
1092    else
1093        target_rlim_swap = rlim;
1094    result = tswapal(target_rlim_swap);
1095    
1096    return result;
1097}
1098
1099static inline int target_to_host_resource(int code)
1100{
1101    switch (code) {
1102    case TARGET_RLIMIT_AS:
1103        return RLIMIT_AS;
1104    case TARGET_RLIMIT_CORE:
1105        return RLIMIT_CORE;
1106    case TARGET_RLIMIT_CPU:
1107        return RLIMIT_CPU;
1108    case TARGET_RLIMIT_DATA:
1109        return RLIMIT_DATA;
1110    case TARGET_RLIMIT_FSIZE:
1111        return RLIMIT_FSIZE;
1112    case TARGET_RLIMIT_LOCKS:
1113        return RLIMIT_LOCKS;
1114    case TARGET_RLIMIT_MEMLOCK:
1115        return RLIMIT_MEMLOCK;
1116    case TARGET_RLIMIT_MSGQUEUE:
1117        return RLIMIT_MSGQUEUE;
1118    case TARGET_RLIMIT_NICE:
1119        return RLIMIT_NICE;
1120    case TARGET_RLIMIT_NOFILE:
1121        return RLIMIT_NOFILE;
1122    case TARGET_RLIMIT_NPROC:
1123        return RLIMIT_NPROC;
1124    case TARGET_RLIMIT_RSS:
1125        return RLIMIT_RSS;
1126    case TARGET_RLIMIT_RTPRIO:
1127        return RLIMIT_RTPRIO;
1128    case TARGET_RLIMIT_SIGPENDING:
1129        return RLIMIT_SIGPENDING;
1130    case TARGET_RLIMIT_STACK:
1131        return RLIMIT_STACK;
1132    default:
1133        return code;
1134    }
1135}
1136
1137static inline abi_long copy_from_user_timeval(struct timeval *tv,
1138                                              abi_ulong target_tv_addr)
1139{
1140    struct target_timeval *target_tv;
1141
1142    if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1143        return -TARGET_EFAULT;
1144
1145    __get_user(tv->tv_sec, &target_tv->tv_sec);
1146    __get_user(tv->tv_usec, &target_tv->tv_usec);
1147
1148    unlock_user_struct(target_tv, target_tv_addr, 0);
1149
1150    return 0;
1151}
1152
1153static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1154                                            const struct timeval *tv)
1155{
1156    struct target_timeval *target_tv;
1157
1158    if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1159        return -TARGET_EFAULT;
1160
1161    __put_user(tv->tv_sec, &target_tv->tv_sec);
1162    __put_user(tv->tv_usec, &target_tv->tv_usec);
1163
1164    unlock_user_struct(target_tv, target_tv_addr, 1);
1165
1166    return 0;
1167}
1168
1169static inline abi_long copy_from_user_timezone(struct timezone *tz,
1170                                               abi_ulong target_tz_addr)
1171{
1172    struct target_timezone *target_tz;
1173
1174    if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1175        return -TARGET_EFAULT;
1176    }
1177
1178    __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1179    __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1180
1181    unlock_user_struct(target_tz, target_tz_addr, 0);
1182
1183    return 0;
1184}
1185
1186#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1187#include <mqueue.h>
1188
1189static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1190                                              abi_ulong target_mq_attr_addr)
1191{
1192    struct target_mq_attr *target_mq_attr;
1193
1194    if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1195                          target_mq_attr_addr, 1))
1196        return -TARGET_EFAULT;
1197
1198    __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1199    __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1200    __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1201    __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1202
1203    unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1204
1205    return 0;
1206}
1207
1208static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1209                                            const struct mq_attr *attr)
1210{
1211    struct target_mq_attr *target_mq_attr;
1212
1213    if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1214                          target_mq_attr_addr, 0))
1215        return -TARGET_EFAULT;
1216
1217    __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1218    __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1219    __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1220    __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1221
1222    unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1223
1224    return 0;
1225}
1226#endif
1227
1228#if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1229/* do_select() must return target values and target errnos. */
1230static abi_long do_select(int n,
1231                          abi_ulong rfd_addr, abi_ulong wfd_addr,
1232                          abi_ulong efd_addr, abi_ulong target_tv_addr)
1233{
1234    fd_set rfds, wfds, efds;
1235    fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1236    struct timeval tv;
1237    struct timespec ts, *ts_ptr;
1238    abi_long ret;
1239
1240    ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1241    if (ret) {
1242        return ret;
1243    }
1244    ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1245    if (ret) {
1246        return ret;
1247    }
1248    ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1249    if (ret) {
1250        return ret;
1251    }
1252
1253    if (target_tv_addr) {
1254        if (copy_from_user_timeval(&tv, target_tv_addr))
1255            return -TARGET_EFAULT;
1256        ts.tv_sec = tv.tv_sec;
1257        ts.tv_nsec = tv.tv_usec * 1000;
1258        ts_ptr = &ts;
1259    } else {
1260        ts_ptr = NULL;
1261    }
1262
1263    ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1264                                  ts_ptr, NULL));
1265
1266    if (!is_error(ret)) {
1267        if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1268            return -TARGET_EFAULT;
1269        if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1270            return -TARGET_EFAULT;
1271        if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1272            return -TARGET_EFAULT;
1273
1274        if (target_tv_addr) {
1275            tv.tv_sec = ts.tv_sec;
1276            tv.tv_usec = ts.tv_nsec / 1000;
1277            if (copy_to_user_timeval(target_tv_addr, &tv)) {
1278                return -TARGET_EFAULT;
1279            }
1280        }
1281    }
1282
1283    return ret;
1284}
1285
1286#if defined(TARGET_WANT_OLD_SYS_SELECT)
1287static abi_long do_old_select(abi_ulong arg1)
1288{
1289    struct target_sel_arg_struct *sel;
1290    abi_ulong inp, outp, exp, tvp;
1291    long nsel;
1292
1293    if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1294        return -TARGET_EFAULT;
1295    }
1296
1297    nsel = tswapal(sel->n);
1298    inp = tswapal(sel->inp);
1299    outp = tswapal(sel->outp);
1300    exp = tswapal(sel->exp);
1301    tvp = tswapal(sel->tvp);
1302
1303    unlock_user_struct(sel, arg1, 0);
1304
1305    return do_select(nsel, inp, outp, exp, tvp);
1306}
1307#endif
1308#endif
1309
1310static abi_long do_pipe2(int host_pipe[], int flags)
1311{
1312#ifdef CONFIG_PIPE2
1313    return pipe2(host_pipe, flags);
1314#else
1315    return -ENOSYS;
1316#endif
1317}
1318
1319static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1320                        int flags, int is_pipe2)
1321{
1322    int host_pipe[2];
1323    abi_long ret;
1324    ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1325
1326    if (is_error(ret))
1327        return get_errno(ret);
1328
1329    /* Several targets have special calling conventions for the original
1330       pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1331    if (!is_pipe2) {
1332#if defined(TARGET_ALPHA)
1333        ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1334        return host_pipe[0];
1335#elif defined(TARGET_MIPS)
1336        ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1337        return host_pipe[0];
1338#elif defined(TARGET_SH4)
1339        ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1340        return host_pipe[0];
1341#elif defined(TARGET_SPARC)
1342        ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1343        return host_pipe[0];
1344#endif
1345    }
1346
1347    if (put_user_s32(host_pipe[0], pipedes)
1348        || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1349        return -TARGET_EFAULT;
1350    return get_errno(ret);
1351}
1352
1353static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1354                                              abi_ulong target_addr,
1355                                              socklen_t len)
1356{
1357    struct target_ip_mreqn *target_smreqn;
1358
1359    target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1360    if (!target_smreqn)
1361        return -TARGET_EFAULT;
1362    mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1363    mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1364    if (len == sizeof(struct target_ip_mreqn))
1365        mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1366    unlock_user(target_smreqn, target_addr, 0);
1367
1368    return 0;
1369}
1370
1371static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1372                                               abi_ulong target_addr,
1373                                               socklen_t len)
1374{
1375    const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1376    sa_family_t sa_family;
1377    struct target_sockaddr *target_saddr;
1378
1379    if (fd_trans_target_to_host_addr(fd)) {
1380        return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1381    }
1382
1383    target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1384    if (!target_saddr)
1385        return -TARGET_EFAULT;
1386
1387    sa_family = tswap16(target_saddr->sa_family);
1388
1389    /* Oops. The caller might send a incomplete sun_path; sun_path
1390     * must be terminated by \0 (see the manual page), but
1391     * unfortunately it is quite common to specify sockaddr_un
1392     * length as "strlen(x->sun_path)" while it should be
1393     * "strlen(...) + 1". We'll fix that here if needed.
1394     * Linux kernel has a similar feature.
1395     */
1396
1397    if (sa_family == AF_UNIX) {
1398        if (len < unix_maxlen && len > 0) {
1399            char *cp = (char*)target_saddr;
1400
1401            if ( cp[len-1] && !cp[len] )
1402                len++;
1403        }
1404        if (len > unix_maxlen)
1405            len = unix_maxlen;
1406    }
1407
1408    memcpy(addr, target_saddr, len);
1409    addr->sa_family = sa_family;
1410    if (sa_family == AF_NETLINK) {
1411        struct sockaddr_nl *nladdr;
1412
1413        nladdr = (struct sockaddr_nl *)addr;
1414        nladdr->nl_pid = tswap32(nladdr->nl_pid);
1415        nladdr->nl_groups = tswap32(nladdr->nl_groups);
1416    } else if (sa_family == AF_PACKET) {
1417        struct target_sockaddr_ll *lladdr;
1418
1419        lladdr = (struct target_sockaddr_ll *)addr;
1420        lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1421        lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1422    }
1423    unlock_user(target_saddr, target_addr, 0);
1424
1425    return 0;
1426}
1427
1428static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1429                                               struct sockaddr *addr,
1430                                               socklen_t len)
1431{
1432    struct target_sockaddr *target_saddr;
1433
1434    if (len == 0) {
1435        return 0;
1436    }
1437    assert(addr);
1438
1439    target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1440    if (!target_saddr)
1441        return -TARGET_EFAULT;
1442    memcpy(target_saddr, addr, len);
1443    if (len >= offsetof(struct target_sockaddr, sa_family) +
1444        sizeof(target_saddr->sa_family)) {
1445        target_saddr->sa_family = tswap16(addr->sa_family);
1446    }
1447    if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1448        struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1449        target_nl->nl_pid = tswap32(target_nl->nl_pid);
1450        target_nl->nl_groups = tswap32(target_nl->nl_groups);
1451    } else if (addr->sa_family == AF_PACKET) {
1452        struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1453        target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1454        target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1455    } else if (addr->sa_family == AF_INET6 &&
1456               len >= sizeof(struct target_sockaddr_in6)) {
1457        struct target_sockaddr_in6 *target_in6 =
1458               (struct target_sockaddr_in6 *)target_saddr;
1459        target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1460    }
1461    unlock_user(target_saddr, target_addr, len);
1462
1463    return 0;
1464}
1465
1466static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1467                                           struct target_msghdr *target_msgh)
1468{
1469    struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1470    abi_long msg_controllen;
1471    abi_ulong target_cmsg_addr;
1472    struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1473    socklen_t space = 0;
1474    
1475    msg_controllen = tswapal(target_msgh->msg_controllen);
1476    if (msg_controllen < sizeof (struct target_cmsghdr)) 
1477        goto the_end;
1478    target_cmsg_addr = tswapal(target_msgh->msg_control);
1479    target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1480    target_cmsg_start = target_cmsg;
1481    if (!target_cmsg)
1482        return -TARGET_EFAULT;
1483
1484    while (cmsg && target_cmsg) {
1485        void *data = CMSG_DATA(cmsg);
1486        void *target_data = TARGET_CMSG_DATA(target_cmsg);
1487
1488        int len = tswapal(target_cmsg->cmsg_len)
1489            - sizeof(struct target_cmsghdr);
1490
1491        space += CMSG_SPACE(len);
1492        if (space > msgh->msg_controllen) {
1493            space -= CMSG_SPACE(len);
1494            /* This is a QEMU bug, since we allocated the payload
1495             * area ourselves (unlike overflow in host-to-target
1496             * conversion, which is just the guest giving us a buffer
1497             * that's too small). It can't happen for the payload types
1498             * we currently support; if it becomes an issue in future
1499             * we would need to improve our allocation strategy to
1500             * something more intelligent than "twice the size of the
1501             * target buffer we're reading from".
1502             */
1503            gemu_log("Host cmsg overflow\n");
1504            break;
1505        }
1506
1507        if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1508            cmsg->cmsg_level = SOL_SOCKET;
1509        } else {
1510            cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1511        }
1512        cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1513        cmsg->cmsg_len = CMSG_LEN(len);
1514
1515        if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1516            int *fd = (int *)data;
1517            int *target_fd = (int *)target_data;
1518            int i, numfds = len / sizeof(int);
1519
1520            for (i = 0; i < numfds; i++) {
1521                __get_user(fd[i], target_fd + i);
1522            }
1523        } else if (cmsg->cmsg_level == SOL_SOCKET
1524               &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1525            struct ucred *cred = (struct ucred *)data;
1526            struct target_ucred *target_cred =
1527                (struct target_ucred *)target_data;
1528
1529            __get_user(cred->pid, &target_cred->pid);
1530            __get_user(cred->uid, &target_cred->uid);
1531            __get_user(cred->gid, &target_cred->gid);
1532        } else {
1533            gemu_log("Unsupported ancillary data: %d/%d\n",
1534                                        cmsg->cmsg_level, cmsg->cmsg_type);
1535            memcpy(data, target_data, len);
1536        }
1537
1538        cmsg = CMSG_NXTHDR(msgh, cmsg);
1539        target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1540                                         target_cmsg_start);
1541    }
1542    unlock_user(target_cmsg, target_cmsg_addr, 0);
1543 the_end:
1544    msgh->msg_controllen = space;
1545    return 0;
1546}
1547
1548static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1549                                           struct msghdr *msgh)
1550{
1551    struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1552    abi_long msg_controllen;
1553    abi_ulong target_cmsg_addr;
1554    struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1555    socklen_t space = 0;
1556
1557    msg_controllen = tswapal(target_msgh->msg_controllen);
1558    if (msg_controllen < sizeof (struct target_cmsghdr)) 
1559        goto the_end;
1560    target_cmsg_addr = tswapal(target_msgh->msg_control);
1561    target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1562    target_cmsg_start = target_cmsg;
1563    if (!target_cmsg)
1564        return -TARGET_EFAULT;
1565
1566    while (cmsg && target_cmsg) {
1567        void *data = CMSG_DATA(cmsg);
1568        void *target_data = TARGET_CMSG_DATA(target_cmsg);
1569
1570        int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1571        int tgt_len, tgt_space;
1572
1573        /* We never copy a half-header but may copy half-data;
1574         * this is Linux's behaviour in put_cmsg(). Note that
1575         * truncation here is a guest problem (which we report
1576         * to the guest via the CTRUNC bit), unlike truncation
1577         * in target_to_host_cmsg, which is a QEMU bug.
1578         */
1579        if (msg_controllen < sizeof(struct target_cmsghdr)) {
1580            target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1581            break;
1582        }
1583
1584        if (cmsg->cmsg_level == SOL_SOCKET) {
1585            target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1586        } else {
1587            target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1588        }
1589        target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1590
1591        /* Payload types which need a different size of payload on
1592         * the target must adjust tgt_len here.
1593         */
1594        tgt_len = len;
1595        switch (cmsg->cmsg_level) {
1596        case SOL_SOCKET:
1597            switch (cmsg->cmsg_type) {
1598            case SO_TIMESTAMP:
1599                tgt_len = sizeof(struct target_timeval);
1600                break;
1601            default:
1602                break;
1603            }
1604            break;
1605        default:
1606            break;
1607        }
1608
1609        if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1610            target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1611            tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1612        }
1613
1614        /* We must now copy-and-convert len bytes of payload
1615         * into tgt_len bytes of destination space. Bear in mind
1616         * that in both source and destination we may be dealing
1617         * with a truncated value!
1618         */
1619        switch (cmsg->cmsg_level) {
1620        case SOL_SOCKET:
1621            switch (cmsg->cmsg_type) {
1622            case SCM_RIGHTS:
1623            {
1624                int *fd = (int *)data;
1625                int *target_fd = (int *)target_data;
1626                int i, numfds = tgt_len / sizeof(int);
1627
1628                for (i = 0; i < numfds; i++) {
1629                    __put_user(fd[i], target_fd + i);
1630                }
1631                break;
1632            }
1633            case SO_TIMESTAMP:
1634            {
1635                struct timeval *tv = (struct timeval *)data;
1636                struct target_timeval *target_tv =
1637                    (struct target_timeval *)target_data;
1638
1639                if (len != sizeof(struct timeval) ||
1640                    tgt_len != sizeof(struct target_timeval)) {
1641                    goto unimplemented;
1642                }
1643
1644                /* copy struct timeval to target */
1645                __put_user(tv->tv_sec, &target_tv->tv_sec);
1646                __put_user(tv->tv_usec, &target_tv->tv_usec);
1647                break;
1648            }
1649            case SCM_CREDENTIALS:
1650            {
1651                struct ucred *cred = (struct ucred *)data;
1652                struct target_ucred *target_cred =
1653                    (struct target_ucred *)target_data;
1654
1655                __put_user(cred->pid, &target_cred->pid);
1656                __put_user(cred->uid, &target_cred->uid);
1657                __put_user(cred->gid, &target_cred->gid);
1658                break;
1659            }
1660            default:
1661                goto unimplemented;
1662            }
1663            break;
1664
1665        case SOL_IP:
1666            switch (cmsg->cmsg_type) {
1667            case IP_TTL:
1668            {
1669                uint32_t *v = (uint32_t *)data;
1670                uint32_t *t_int = (uint32_t *)target_data;
1671
1672                if (len != sizeof(uint32_t) ||
1673                    tgt_len != sizeof(uint32_t)) {
1674                    goto unimplemented;
1675                }
1676                __put_user(*v, t_int);
1677                break;
1678            }
1679            case IP_RECVERR:
1680            {
1681                struct errhdr_t {
1682                   struct sock_extended_err ee;
1683                   struct sockaddr_in offender;
1684                };
1685                struct errhdr_t *errh = (struct errhdr_t *)data;
1686                struct errhdr_t *target_errh =
1687                    (struct errhdr_t *)target_data;
1688
1689                if (len != sizeof(struct errhdr_t) ||
1690                    tgt_len != sizeof(struct errhdr_t)) {
1691                    goto unimplemented;
1692                }
1693                __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1694                __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1695                __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1696                __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1697                __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1698                __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1699                __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1700                host_to_target_sockaddr((unsigned long) &target_errh->offender,
1701                    (void *) &errh->offender, sizeof(errh->offender));
1702                break;
1703            }
1704            default:
1705                goto unimplemented;
1706            }
1707            break;
1708
1709        case SOL_IPV6:
1710            switch (cmsg->cmsg_type) {
1711            case IPV6_HOPLIMIT:
1712            {
1713                uint32_t *v = (uint32_t *)data;
1714                uint32_t *t_int = (uint32_t *)target_data;
1715
1716                if (len != sizeof(uint32_t) ||
1717                    tgt_len != sizeof(uint32_t)) {
1718                    goto unimplemented;
1719                }
1720                __put_user(*v, t_int);
1721                break;
1722            }
1723            case IPV6_RECVERR:
1724            {
1725                struct errhdr6_t {
1726                   struct sock_extended_err ee;
1727                   struct sockaddr_in6 offender;
1728                };
1729                struct errhdr6_t *errh = (struct errhdr6_t *)data;
1730                struct errhdr6_t *target_errh =
1731                    (struct errhdr6_t *)target_data;
1732
1733                if (len != sizeof(struct errhdr6_t) ||
1734                    tgt_len != sizeof(struct errhdr6_t)) {
1735                    goto unimplemented;
1736                }
1737                __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1738                __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1739                __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1740                __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1741                __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1742                __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1743                __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1744                host_to_target_sockaddr((unsigned long) &target_errh->offender,
1745                    (void *) &errh->offender, sizeof(errh->offender));
1746                break;
1747            }
1748            default:
1749                goto unimplemented;
1750            }
1751            break;
1752
1753        default:
1754        unimplemented:
1755            gemu_log("Unsupported ancillary data: %d/%d\n",
1756                                        cmsg->cmsg_level, cmsg->cmsg_type);
1757            memcpy(target_data, data, MIN(len, tgt_len));
1758            if (tgt_len > len) {
1759                memset(target_data + len, 0, tgt_len - len);
1760            }
1761        }
1762
1763        target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1764        tgt_space = TARGET_CMSG_SPACE(tgt_len);
1765        if (msg_controllen < tgt_space) {
1766            tgt_space = msg_controllen;
1767        }
1768        msg_controllen -= tgt_space;
1769        space += tgt_space;
1770        cmsg = CMSG_NXTHDR(msgh, cmsg);
1771        target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1772                                         target_cmsg_start);
1773    }
1774    unlock_user(target_cmsg, target_cmsg_addr, space);
1775 the_end:
1776    target_msgh->msg_controllen = tswapal(space);
1777    return 0;
1778}
1779
1780/* do_setsockopt() Must return target values and target errnos. */
1781static abi_long do_setsockopt(int sockfd, int level, int optname,
1782                              abi_ulong optval_addr, socklen_t optlen)
1783{
1784    abi_long ret;
1785    int val;
1786    struct ip_mreqn *ip_mreq;
1787    struct ip_mreq_source *ip_mreq_source;
1788
1789    switch(level) {
1790    case SOL_TCP:
1791        /* TCP options all take an 'int' value.  */
1792        if (optlen < sizeof(uint32_t))
1793            return -TARGET_EINVAL;
1794
1795        if (get_user_u32(val, optval_addr))
1796            return -TARGET_EFAULT;
1797        ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1798        break;
1799    case SOL_IP:
1800        switch(optname) {
1801        case IP_TOS:
1802        case IP_TTL:
1803        case IP_HDRINCL:
1804        case IP_ROUTER_ALERT:
1805        case IP_RECVOPTS:
1806        case IP_RETOPTS:
1807        case IP_PKTINFO:
1808        case IP_MTU_DISCOVER:
1809        case IP_RECVERR:
1810        case IP_RECVTTL:
1811        case IP_RECVTOS:
1812#ifdef IP_FREEBIND
1813        case IP_FREEBIND:
1814#endif
1815        case IP_MULTICAST_TTL:
1816        case IP_MULTICAST_LOOP:
1817            val = 0;
1818            if (optlen >= sizeof(uint32_t)) {
1819                if (get_user_u32(val, optval_addr))
1820                    return -TARGET_EFAULT;
1821            } else if (optlen >= 1) {
1822                if (get_user_u8(val, optval_addr))
1823                    return -TARGET_EFAULT;
1824            }
1825            ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1826            break;
1827        case IP_ADD_MEMBERSHIP:
1828        case IP_DROP_MEMBERSHIP:
1829            if (optlen < sizeof (struct target_ip_mreq) ||
1830                optlen > sizeof (struct target_ip_mreqn))
1831                return -TARGET_EINVAL;
1832
1833            ip_mreq = (struct ip_mreqn *) alloca(optlen);
1834            target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1835            ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1836            break;
1837
1838        case IP_BLOCK_SOURCE:
1839        case IP_UNBLOCK_SOURCE:
1840        case IP_ADD_SOURCE_MEMBERSHIP:
1841        case IP_DROP_SOURCE_MEMBERSHIP:
1842            if (optlen != sizeof (struct target_ip_mreq_source))
1843                return -TARGET_EINVAL;
1844
1845            ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1846            ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1847            unlock_user (ip_mreq_source, optval_addr, 0);
1848            break;
1849
1850        default:
1851            goto unimplemented;
1852        }
1853        break;
1854    case SOL_IPV6:
1855        switch (optname) {
1856        case IPV6_MTU_DISCOVER:
1857        case IPV6_MTU:
1858        case IPV6_V6ONLY:
1859        case IPV6_RECVPKTINFO:
1860        case IPV6_UNICAST_HOPS:
1861        case IPV6_MULTICAST_HOPS:
1862        case IPV6_MULTICAST_LOOP:
1863        case IPV6_RECVERR:
1864        case IPV6_RECVHOPLIMIT:
1865        case IPV6_2292HOPLIMIT:
1866        case IPV6_CHECKSUM:
1867            val = 0;
1868            if (optlen < sizeof(uint32_t)) {
1869                return -TARGET_EINVAL;
1870            }
1871            if (get_user_u32(val, optval_addr)) {
1872                return -TARGET_EFAULT;
1873            }
1874            ret = get_errno(setsockopt(sockfd, level, optname,
1875                                       &val, sizeof(val)));
1876            break;
1877        case IPV6_PKTINFO:
1878        {
1879            struct in6_pktinfo pki;
1880
1881            if (optlen < sizeof(pki)) {
1882                return -TARGET_EINVAL;
1883            }
1884
1885            if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1886                return -TARGET_EFAULT;
1887            }
1888
1889            pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1890
1891            ret = get_errno(setsockopt(sockfd, level, optname,
1892                                       &pki, sizeof(pki)));
1893            break;
1894        }
1895        default:
1896            goto unimplemented;
1897        }
1898        break;
1899    case SOL_ICMPV6:
1900        switch (optname) {
1901        case ICMPV6_FILTER:
1902        {
1903            struct icmp6_filter icmp6f;
1904
1905            if (optlen > sizeof(icmp6f)) {
1906                optlen = sizeof(icmp6f);
1907            }
1908
1909            if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1910                return -TARGET_EFAULT;
1911            }
1912
1913            for (val = 0; val < 8; val++) {
1914                icmp6f.data[val] = tswap32(icmp6f.data[val]);
1915            }
1916
1917            ret = get_errno(setsockopt(sockfd, level, optname,
1918                                       &icmp6f, optlen));
1919            break;
1920        }
1921        default:
1922            goto unimplemented;
1923        }
1924        break;
1925    case SOL_RAW:
1926        switch (optname) {
1927        case ICMP_FILTER:
1928        case IPV6_CHECKSUM:
1929            /* those take an u32 value */
1930            if (optlen < sizeof(uint32_t)) {
1931                return -TARGET_EINVAL;
1932            }
1933
1934            if (get_user_u32(val, optval_addr)) {
1935                return -TARGET_EFAULT;
1936            }
1937            ret = get_errno(setsockopt(sockfd, level, optname,
1938                                       &val, sizeof(val)));
1939            break;
1940
1941        default:
1942            goto unimplemented;
1943        }
1944        break;
1945    case TARGET_SOL_SOCKET:
1946        switch (optname) {
1947        case TARGET_SO_RCVTIMEO:
1948        {
1949                struct timeval tv;
1950
1951                optname = SO_RCVTIMEO;
1952
1953set_timeout:
1954                if (optlen != sizeof(struct target_timeval)) {
1955                    return -TARGET_EINVAL;
1956                }
1957
1958                if (copy_from_user_timeval(&tv, optval_addr)) {
1959                    return -TARGET_EFAULT;
1960                }
1961
1962                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1963                                &tv, sizeof(tv)));
1964                return ret;
1965        }
1966        case TARGET_SO_SNDTIMEO:
1967                optname = SO_SNDTIMEO;
1968                goto set_timeout;
1969        case TARGET_SO_ATTACH_FILTER:
1970        {
1971                struct target_sock_fprog *tfprog;
1972                struct target_sock_filter *tfilter;
1973                struct sock_fprog fprog;
1974                struct sock_filter *filter;
1975                int i;
1976
1977                if (optlen != sizeof(*tfprog)) {
1978                    return -TARGET_EINVAL;
1979                }
1980                if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1981                    return -TARGET_EFAULT;
1982                }
1983                if (!lock_user_struct(VERIFY_READ, tfilter,
1984                                      tswapal(tfprog->filter), 0)) {
1985                    unlock_user_struct(tfprog, optval_addr, 1);
1986                    return -TARGET_EFAULT;
1987                }
1988
1989                fprog.len = tswap16(tfprog->len);
1990                filter = g_try_new(struct sock_filter, fprog.len);
1991                if (filter == NULL) {
1992                    unlock_user_struct(tfilter, tfprog->filter, 1);
1993                    unlock_user_struct(tfprog, optval_addr, 1);
1994                    return -TARGET_ENOMEM;
1995                }
1996                for (i = 0; i < fprog.len; i++) {
1997                    filter[i].code = tswap16(tfilter[i].code);
1998                    filter[i].jt = tfilter[i].jt;
1999                    filter[i].jf = tfilter[i].jf;
2000                    filter[i].k = tswap32(tfilter[i].k);
2001                }
2002                fprog.filter = filter;
2003
2004                ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2005                                SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2006                g_free(filter);
2007
2008                unlock_user_struct(tfilter, tfprog->filter, 1);
2009                unlock_user_struct(tfprog, optval_addr, 1);
2010                return ret;
2011        }
2012        case TARGET_SO_BINDTODEVICE:
2013        {
2014                char *dev_ifname, *addr_ifname;
2015
2016                if (optlen > IFNAMSIZ - 1) {
2017                    optlen = IFNAMSIZ - 1;
2018                }
2019                dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2020                if (!dev_ifname) {
2021                    return -TARGET_EFAULT;
2022                }
2023                optname = SO_BINDTODEVICE;
2024                addr_ifname = alloca(IFNAMSIZ);
2025                memcpy(addr_ifname, dev_ifname, optlen);
2026                addr_ifname[optlen] = 0;
2027                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2028                                           addr_ifname, optlen));
2029                unlock_user (dev_ifname, optval_addr, 0);
2030                return ret;
2031        }
2032        case TARGET_SO_LINGER:
2033        {
2034                struct linger lg;
2035                struct target_linger *tlg;
2036
2037                if (optlen != sizeof(struct target_linger)) {
2038                    return -TARGET_EINVAL;
2039                }
2040                if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2041                    return -TARGET_EFAULT;
2042                }
2043                __get_user(lg.l_onoff, &tlg->l_onoff);
2044                __get_user(lg.l_linger, &tlg->l_linger);
2045                ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2046                                &lg, sizeof(lg)));
2047                unlock_user_struct(tlg, optval_addr, 0);
2048                return ret;
2049        }
2050            /* Options with 'int' argument.  */
2051        case TARGET_SO_DEBUG:
2052                optname = SO_DEBUG;
2053                break;
2054        case TARGET_SO_REUSEADDR:
2055                optname = SO_REUSEADDR;
2056                break;
2057#ifdef SO_REUSEPORT
2058        case TARGET_SO_REUSEPORT:
2059                optname = SO_REUSEPORT;
2060                break;
2061#endif
2062        case TARGET_SO_TYPE:
2063                optname = SO_TYPE;
2064                break;
2065        case TARGET_SO_ERROR:
2066                optname = SO_ERROR;
2067                break;
2068        case TARGET_SO_DONTROUTE:
2069                optname = SO_DONTROUTE;
2070                break;
2071        case TARGET_SO_BROADCAST:
2072                optname = SO_BROADCAST;
2073                break;
2074        case TARGET_SO_SNDBUF:
2075                optname = SO_SNDBUF;
2076                break;
2077        case TARGET_SO_SNDBUFFORCE:
2078                optname = SO_SNDBUFFORCE;
2079                break;
2080        case TARGET_SO_RCVBUF:
2081                optname = SO_RCVBUF;
2082                break;
2083        case TARGET_SO_RCVBUFFORCE:
2084                optname = SO_RCVBUFFORCE;
2085                break;
2086        case TARGET_SO_KEEPALIVE:
2087                optname = SO_KEEPALIVE;
2088                break;
2089        case TARGET_SO_OOBINLINE:
2090                optname = SO_OOBINLINE;
2091                break;
2092        case TARGET_SO_NO_CHECK:
2093                optname = SO_NO_CHECK;
2094                break;
2095        case TARGET_SO_PRIORITY:
2096                optname = SO_PRIORITY;
2097                break;
2098#ifdef SO_BSDCOMPAT
2099        case TARGET_SO_BSDCOMPAT:
2100                optname = SO_BSDCOMPAT;
2101                break;
2102#endif
2103        case TARGET_SO_PASSCRED:
2104                optname = SO_PASSCRED;
2105                break;
2106        case TARGET_SO_PASSSEC:
2107                optname = SO_PASSSEC;
2108                break;
2109        case TARGET_SO_TIMESTAMP:
2110                optname = SO_TIMESTAMP;
2111                break;
2112        case TARGET_SO_RCVLOWAT:
2113                optname = SO_RCVLOWAT;
2114                break;
2115        default:
2116            goto unimplemented;
2117        }
2118        if (optlen < sizeof(uint32_t))
2119            return -TARGET_EINVAL;
2120
2121        if (get_user_u32(val, optval_addr))
2122            return -TARGET_EFAULT;
2123        ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2124        break;
2125    default:
2126    unimplemented:
2127        gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2128        ret = -TARGET_ENOPROTOOPT;
2129    }
2130    return ret;
2131}
2132
2133/* do_getsockopt() Must return target values and target errnos. */
2134static abi_long do_getsockopt(int sockfd, int level, int optname,
2135                              abi_ulong optval_addr, abi_ulong optlen)
2136{
2137    abi_long ret;
2138    int len, val;
2139    socklen_t lv;
2140
2141    switch(level) {
2142    case TARGET_SOL_SOCKET:
2143        level = SOL_SOCKET;
2144        switch (optname) {
2145        /* These don't just return a single integer */
2146        case TARGET_SO_RCVTIMEO:
2147        case TARGET_SO_SNDTIMEO:
2148        case TARGET_SO_PEERNAME:
2149            goto unimplemented;
2150        case TARGET_SO_PEERCRED: {
2151            struct ucred cr;
2152            socklen_t crlen;
2153            struct target_ucred *tcr;
2154
2155            if (get_user_u32(len, optlen)) {
2156                return -TARGET_EFAULT;
2157            }
2158            if (len < 0) {
2159                return -TARGET_EINVAL;
2160            }
2161
2162            crlen = sizeof(cr);
2163            ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2164                                       &cr, &crlen));
2165            if (ret < 0) {
2166                return ret;
2167            }
2168            if (len > crlen) {
2169                len = crlen;
2170            }
2171            if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2172                return -TARGET_EFAULT;
2173            }
2174            __put_user(cr.pid, &tcr->pid);
2175            __put_user(cr.uid, &tcr->uid);
2176            __put_user(cr.gid, &tcr->gid);
2177            unlock_user_struct(tcr, optval_addr, 1);
2178            if (put_user_u32(len, optlen)) {
2179                return -TARGET_EFAULT;
2180            }
2181            break;
2182        }
2183        case TARGET_SO_LINGER:
2184        {
2185            struct linger lg;
2186            socklen_t lglen;
2187            struct target_linger *tlg;
2188
2189            if (get_user_u32(len, optlen)) {
2190                return -TARGET_EFAULT;
2191            }
2192            if (len < 0) {
2193                return -TARGET_EINVAL;
2194            }
2195
2196            lglen = sizeof(lg);
2197            ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2198                                       &lg, &lglen));
2199            if (ret < 0) {
2200                return ret;
2201            }
2202            if (len > lglen) {
2203                len = lglen;
2204            }
2205            if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2206                return -TARGET_EFAULT;
2207            }
2208            __put_user(lg.l_onoff, &tlg->l_onoff);
2209            __put_user(lg.l_linger, &tlg->l_linger);
2210            unlock_user_struct(tlg, optval_addr, 1);
2211            if (put_user_u32(len, optlen)) {
2212                return -TARGET_EFAULT;
2213            }
2214            break;
2215        }
2216        /* Options with 'int' argument.  */
2217        case TARGET_SO_DEBUG:
2218            optname = SO_DEBUG;
2219            goto int_case;
2220        case TARGET_SO_REUSEADDR:
2221            optname = SO_REUSEADDR;
2222            goto int_case;
2223#ifdef SO_REUSEPORT
2224        case TARGET_SO_REUSEPORT:
2225            optname = SO_REUSEPORT;
2226            goto int_case;
2227#endif
2228        case TARGET_SO_TYPE:
2229            optname = SO_TYPE;
2230            goto int_case;
2231        case TARGET_SO_ERROR:
2232            optname = SO_ERROR;
2233            goto int_case;
2234        case TARGET_SO_DONTROUTE:
2235            optname = SO_DONTROUTE;
2236            goto int_case;
2237        case TARGET_SO_BROADCAST:
2238            optname = SO_BROADCAST;
2239            goto int_case;
2240        case TARGET_SO_SNDBUF:
2241            optname = SO_SNDBUF;
2242            goto int_case;
2243        case TARGET_SO_RCVBUF:
2244            optname = SO_RCVBUF;
2245            goto int_case;
2246        case TARGET_SO_KEEPALIVE:
2247            optname = SO_KEEPALIVE;
2248            goto int_case;
2249        case TARGET_SO_OOBINLINE:
2250            optname = SO_OOBINLINE;
2251            goto int_case;
2252        case TARGET_SO_NO_CHECK:
2253            optname = SO_NO_CHECK;
2254            goto int_case;
2255        case TARGET_SO_PRIORITY:
2256            optname = SO_PRIORITY;
2257            goto int_case;
2258#ifdef SO_BSDCOMPAT
2259        case TARGET_SO_BSDCOMPAT:
2260            optname = SO_BSDCOMPAT;
2261            goto int_case;
2262#endif
2263        case TARGET_SO_PASSCRED:
2264            optname = SO_PASSCRED;
2265            goto int_case;
2266        case TARGET_SO_TIMESTAMP:
2267            optname = SO_TIMESTAMP;
2268            goto int_case;
2269        case TARGET_SO_RCVLOWAT:
2270            optname = SO_RCVLOWAT;
2271            goto int_case;
2272        case TARGET_SO_ACCEPTCONN:
2273            optname = SO_ACCEPTCONN;
2274            goto int_case;
2275        default:
2276            goto int_case;
2277        }
2278        break;
2279    case SOL_TCP:
2280        /* TCP options all take an 'int' value.  */
2281    int_case:
2282        if (get_user_u32(len, optlen))
2283            return -TARGET_EFAULT;
2284        if (len < 0)
2285            return -TARGET_EINVAL;
2286        lv = sizeof(lv);
2287        ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2288        if (ret < 0)
2289            return ret;
2290        if (optname == SO_TYPE) {
2291            val = host_to_target_sock_type(val);
2292        }
2293        if (len > lv)
2294            len = lv;
2295        if (len == 4) {
2296            if (put_user_u32(val, optval_addr))
2297                return -TARGET_EFAULT;
2298        } else {
2299            if (put_user_u8(val, optval_addr))
2300                return -TARGET_EFAULT;
2301        }
2302        if (put_user_u32(len, optlen))
2303            return -TARGET_EFAULT;
2304        break;
2305    case SOL_IP:
2306        switch(optname) {
2307        case IP_TOS:
2308        case IP_TTL:
2309        case IP_HDRINCL:
2310        case IP_ROUTER_ALERT:
2311        case IP_RECVOPTS:
2312        case IP_RETOPTS:
2313        case IP_PKTINFO:
2314        case IP_MTU_DISCOVER:
2315        case IP_RECVERR:
2316        case IP_RECVTOS:
2317#ifdef IP_FREEBIND
2318        case IP_FREEBIND:
2319#endif
2320        case IP_MULTICAST_TTL:
2321        case IP_MULTICAST_LOOP:
2322            if (get_user_u32(len, optlen))
2323                return -TARGET_EFAULT;
2324            if (len < 0)
2325                return -TARGET_EINVAL;
2326            lv = sizeof(lv);
2327            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2328            if (ret < 0)
2329                return ret;
2330            if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2331                len = 1;
2332                if (put_user_u32(len, optlen)
2333                    || put_user_u8(val, optval_addr))
2334                    return -TARGET_EFAULT;
2335            } else {
2336                if (len > sizeof(int))
2337                    len = sizeof(int);
2338                if (put_user_u32(len, optlen)
2339                    || put_user_u32(val, optval_addr))
2340                    return -TARGET_EFAULT;
2341            }
2342            break;
2343        default:
2344            ret = -TARGET_ENOPROTOOPT;
2345            break;
2346        }
2347        break;
2348    case SOL_IPV6:
2349        switch (optname) {
2350        case IPV6_MTU_DISCOVER:
2351        case IPV6_MTU:
2352        case IPV6_V6ONLY:
2353        case IPV6_RECVPKTINFO:
2354        case IPV6_UNICAST_HOPS:
2355        case IPV6_MULTICAST_HOPS:
2356        case IPV6_MULTICAST_LOOP:
2357        case IPV6_RECVERR:
2358        case IPV6_RECVHOPLIMIT:
2359        case IPV6_2292HOPLIMIT:
2360        case IPV6_CHECKSUM:
2361            if (get_user_u32(len, optlen))
2362                return -TARGET_EFAULT;
2363            if (len < 0)
2364                return -TARGET_EINVAL;
2365            lv = sizeof(lv);
2366            ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2367            if (ret < 0)
2368                return ret;
2369            if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2370                len = 1;
2371                if (put_user_u32(len, optlen)
2372                    || put_user_u8(val, optval_addr))
2373                    return -TARGET_EFAULT;
2374            } else {
2375                if (len > sizeof(int))
2376                    len = sizeof(int);
2377                if (put_user_u32(len, optlen)
2378                    || put_user_u32(val, optval_addr))
2379                    return -TARGET_EFAULT;
2380            }
2381            break;
2382        default:
2383            ret = -TARGET_ENOPROTOOPT;
2384            break;
2385        }
2386        break;
2387    default:
2388    unimplemented:
2389        gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2390                 level, optname);
2391        ret = -TARGET_EOPNOTSUPP;
2392        break;
2393    }
2394    return ret;
2395}
2396
2397/* Convert target low/high pair representing file offset into the host
2398 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2399 * as the kernel doesn't handle them either.
2400 */
2401static void target_to_host_low_high(abi_ulong tlow,
2402                                    abi_ulong thigh,
2403                                    unsigned long *hlow,
2404                                    unsigned long *hhigh)
2405{
2406    uint64_t off = tlow |
2407        ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2408        TARGET_LONG_BITS / 2;
2409
2410    *hlow = off;
2411    *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2412}
2413
2414static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2415                                abi_ulong count, int copy)
2416{
2417    struct target_iovec *target_vec;
2418    struct iovec *vec;
2419    abi_ulong total_len, max_len;
2420    int i;
2421    int err = 0;
2422    bool bad_address = false;
2423
2424    if (count == 0) {
2425        errno = 0;
2426        return NULL;
2427    }
2428    if (count > IOV_MAX) {
2429        errno = EINVAL;
2430        return NULL;
2431    }
2432
2433    vec = g_try_new0(struct iovec, count);
2434    if (vec == NULL) {
2435        errno = ENOMEM;
2436        return NULL;
2437    }
2438
2439    target_vec = lock_user(VERIFY_READ, target_addr,
2440                           count * sizeof(struct target_iovec), 1);
2441    if (target_vec == NULL) {
2442        err = EFAULT;
2443        goto fail2;
2444    }
2445
2446    /* ??? If host page size > target page size, this will result in a
2447       value larger than what we can actually support.  */
2448    max_len = 0x7fffffff & TARGET_PAGE_MASK;
2449    total_len = 0;
2450
2451    for (i = 0; i < count; i++) {
2452        abi_ulong base = tswapal(target_vec[i].iov_base);
2453        abi_long len = tswapal(target_vec[i].iov_len);
2454
2455        if (len < 0) {
2456            err = EINVAL;
2457            goto fail;
2458        } else if (len == 0) {
2459            /* Zero length pointer is ignored.  */
2460            vec[i].iov_base = 0;
2461        } else {
2462            vec[i].iov_base = lock_user(type, base, len, copy);
2463            /* If the first buffer pointer is bad, this is a fault.  But
2464             * subsequent bad buffers will result in a partial write; this
2465             * is realized by filling the vector with null pointers and
2466             * zero lengths. */
2467            if (!vec[i].iov_base) {
2468                if (i == 0) {
2469                    err = EFAULT;
2470                    goto fail;
2471                } else {
2472                    bad_address = true;
2473                }
2474            }
2475            if (bad_address) {
2476                len = 0;
2477            }
2478            if (len > max_len - total_len) {
2479                len = max_len - total_len;
2480            }
2481        }
2482        vec[i].iov_len = len;
2483        total_len += len;
2484    }
2485
2486    unlock_user(target_vec, target_addr, 0);
2487    return vec;
2488
2489 fail:
2490    while (--i >= 0) {
2491        if (tswapal(target_vec[i].iov_len) > 0) {
2492            unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2493        }
2494    }
2495    unlock_user(target_vec, target_addr, 0);
2496 fail2:
2497    g_free(vec);
2498    errno = err;
2499    return NULL;
2500}
2501
2502static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2503                         abi_ulong count, int copy)
2504{
2505    struct target_iovec *target_vec;
2506    int i;
2507
2508    target_vec = lock_user(VERIFY_READ, target_addr,
2509                           count * sizeof(struct target_iovec), 1);
2510    if (target_vec) {
2511        for (i = 0; i < count; i++) {
2512            abi_ulong base = tswapal(target_vec[i].iov_base);
2513            abi_long len = tswapal(target_vec[i].iov_len);
2514            if (len < 0) {
2515                break;
2516            }
2517            unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2518        }
2519        unlock_user(target_vec, target_addr, 0);
2520    }
2521
2522    g_free(vec);
2523}
2524
2525static inline int target_to_host_sock_type(int *type)
2526{
2527    int host_type = 0;
2528    int target_type = *type;
2529
2530    switch (target_type & TARGET_SOCK_TYPE_MASK) {
2531    case TARGET_SOCK_DGRAM:
2532        host_type = SOCK_DGRAM;
2533        break;
2534    case TARGET_SOCK_STREAM:
2535        host_type = SOCK_STREAM;
2536        break;
2537    default:
2538        host_type = target_type & TARGET_SOCK_TYPE_MASK;
2539        break;
2540    }
2541    if (target_type & TARGET_SOCK_CLOEXEC) {
2542#if defined(SOCK_CLOEXEC)
2543        host_type |= SOCK_CLOEXEC;
2544#else
2545        return -TARGET_EINVAL;
2546#endif
2547    }
2548    if (target_type & TARGET_SOCK_NONBLOCK) {
2549#if defined(SOCK_NONBLOCK)
2550        host_type |= SOCK_NONBLOCK;
2551#elif !defined(O_NONBLOCK)
2552        return -TARGET_EINVAL;
2553#endif
2554    }
2555    *type = host_type;
2556    return 0;
2557}
2558
2559/* Try to emulate socket type flags after socket creation.  */
2560static int sock_flags_fixup(int fd, int target_type)
2561{
2562#if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2563    if (target_type & TARGET_SOCK_NONBLOCK) {
2564        int flags = fcntl(fd, F_GETFL);
2565        if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2566            close(fd);
2567            return -TARGET_EINVAL;
2568        }
2569    }
2570#endif
2571    return fd;
2572}
2573
2574/* do_socket() Must return target values and target errnos. */
2575static abi_long do_socket(int domain, int type, int protocol)
2576{
2577    int target_type = type;
2578    int ret;
2579
2580    ret = target_to_host_sock_type(&type);
2581    if (ret) {
2582        return ret;
2583    }
2584
2585    if (domain == PF_NETLINK && !(
2586#ifdef CONFIG_RTNETLINK
2587         protocol == NETLINK_ROUTE ||
2588#endif
2589         protocol == NETLINK_KOBJECT_UEVENT ||
2590         protocol == NETLINK_AUDIT)) {
2591        return -EPFNOSUPPORT;
2592    }
2593
2594    if (domain == AF_PACKET ||
2595        (domain == AF_INET && type == SOCK_PACKET)) {
2596        protocol = tswap16(protocol);
2597    }
2598
2599    ret = get_errno(socket(domain, type, protocol));
2600    if (ret >= 0) {
2601        ret = sock_flags_fixup(ret, target_type);
2602        if (type == SOCK_PACKET) {
2603            /* Manage an obsolete case :
2604             * if socket type is SOCK_PACKET, bind by name
2605             */
2606            fd_trans_register(ret, &target_packet_trans);
2607        } else if (domain == PF_NETLINK) {
2608            switch (protocol) {
2609#ifdef CONFIG_RTNETLINK
2610            case NETLINK_ROUTE:
2611                fd_trans_register(ret, &target_netlink_route_trans);
2612                break;
2613#endif
2614            case NETLINK_KOBJECT_UEVENT:
2615                /* nothing to do: messages are strings */
2616                break;
2617            case NETLINK_AUDIT:
2618                fd_trans_register(ret, &target_netlink_audit_trans);
2619                break;
2620            default:
2621                g_assert_not_reached();
2622            }
2623        }
2624    }
2625    return ret;
2626}
2627
2628/* do_bind() Must return target values and target errnos. */
2629static abi_long do_bind(int sockfd, abi_ulong target_addr,
2630                        socklen_t addrlen)
2631{
2632    void *addr;
2633    abi_long ret;
2634
2635    if ((int)addrlen < 0) {
2636        return -TARGET_EINVAL;
2637    }
2638
2639    addr = alloca(addrlen+1);
2640
2641    ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2642    if (ret)
2643        return ret;
2644
2645    return get_errno(bind(sockfd, addr, addrlen));
2646}
2647
2648/* do_connect() Must return target values and target errnos. */
2649static abi_long do_connect(int sockfd, abi_ulong target_addr,
2650                           socklen_t addrlen)
2651{
2652    void *addr;
2653    abi_long ret;
2654
2655    if ((int)addrlen < 0) {
2656        return -TARGET_EINVAL;
2657    }
2658
2659    addr = alloca(addrlen+1);
2660
2661    ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2662    if (ret)
2663        return ret;
2664
2665    return get_errno(safe_connect(sockfd, addr, addrlen));
2666}
2667
2668/* do_sendrecvmsg_locked() Must return target values and target errnos. */
2669static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2670                                      int flags, int send)
2671{
2672    abi_long ret, len;
2673    struct msghdr msg;
2674    abi_ulong count;
2675    struct iovec *vec;
2676    abi_ulong target_vec;
2677
2678    if (msgp->msg_name) {
2679        msg.msg_namelen = tswap32(msgp->msg_namelen);
2680        msg.msg_name = alloca(msg.msg_namelen+1);
2681        ret = target_to_host_sockaddr(fd, msg.msg_name,
2682                                      tswapal(msgp->msg_name),
2683                                      msg.msg_namelen);
2684        if (ret == -TARGET_EFAULT) {
2685            /* For connected sockets msg_name and msg_namelen must
2686             * be ignored, so returning EFAULT immediately is wrong.
2687             * Instead, pass a bad msg_name to the host kernel, and
2688             * let it decide whether to return EFAULT or not.
2689             */
2690            msg.msg_name = (void *)-1;
2691        } else if (ret) {
2692            goto out2;
2693        }
2694    } else {
2695        msg.msg_name = NULL;
2696        msg.msg_namelen = 0;
2697    }
2698    msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2699    msg.msg_control = alloca(msg.msg_controllen);
2700    memset(msg.msg_control, 0, msg.msg_controllen);
2701
2702    msg.msg_flags = tswap32(msgp->msg_flags);
2703
2704    count = tswapal(msgp->msg_iovlen);
2705    target_vec = tswapal(msgp->msg_iov);
2706
2707    if (count > IOV_MAX) {
2708        /* sendrcvmsg returns a different errno for this condition than
2709         * readv/writev, so we must catch it here before lock_iovec() does.
2710         */
2711        ret = -TARGET_EMSGSIZE;
2712        goto out2;
2713    }
2714
2715    vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2716                     target_vec, count, send);
2717    if (vec == NULL) {
2718        ret = -host_to_target_errno(errno);
2719        goto out2;
2720    }
2721    msg.msg_iovlen = count;
2722    msg.msg_iov = vec;
2723
2724    if (send) {
2725        if (fd_trans_target_to_host_data(fd)) {
2726            void *host_msg;
2727
2728            host_msg = g_malloc(msg.msg_iov->iov_len);
2729            memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2730            ret = fd_trans_target_to_host_data(fd)(host_msg,
2731                                                   msg.msg_iov->iov_len);
2732            if (ret >= 0) {
2733                msg.msg_iov->iov_base = host_msg;
2734                ret = get_errno(safe_sendmsg(fd, &msg, flags));
2735            }
2736            g_free(host_msg);
2737        } else {
2738            ret = target_to_host_cmsg(&msg, msgp);
2739            if (ret == 0) {
2740                ret = get_errno(safe_sendmsg(fd, &msg, flags));
2741            }
2742        }
2743    } else {
2744        ret = get_errno(safe_recvmsg(fd, &msg, flags));
2745        if (!is_error(ret)) {
2746            len = ret;
2747            if (fd_trans_host_to_target_data(fd)) {
2748                ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2749                                               MIN(msg.msg_iov->iov_len, len));
2750            } else {
2751                ret = host_to_target_cmsg(msgp, &msg);
2752            }
2753            if (!is_error(ret)) {
2754                msgp->msg_namelen = tswap32(msg.msg_namelen);
2755                msgp->msg_flags = tswap32(msg.msg_flags);
2756                if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2757                    ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2758                                    msg.msg_name, msg.msg_namelen);
2759                    if (ret) {
2760                        goto out;
2761                    }
2762                }
2763
2764                ret = len;
2765            }
2766        }
2767    }
2768
2769out:
2770    unlock_iovec(vec, target_vec, count, !send);
2771out2:
2772    return ret;
2773}
2774
2775static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2776                               int flags, int send)
2777{
2778    abi_long ret;
2779    struct target_msghdr *msgp;
2780
2781    if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2782                          msgp,
2783                          target_msg,
2784                          send ? 1 : 0)) {
2785        return -TARGET_EFAULT;
2786    }
2787    ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2788    unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2789    return ret;
2790}
2791
2792/* We don't rely on the C library to have sendmmsg/recvmmsg support,
2793 * so it might not have this *mmsg-specific flag either.
2794 */
2795#ifndef MSG_WAITFORONE
2796#define MSG_WAITFORONE 0x10000
2797#endif
2798
2799static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2800                                unsigned int vlen, unsigned int flags,
2801                                int send)
2802{
2803    struct target_mmsghdr *mmsgp;
2804    abi_long ret = 0;
2805    int i;
2806
2807    if (vlen > UIO_MAXIOV) {
2808        vlen = UIO_MAXIOV;
2809    }
2810
2811    mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2812    if (!mmsgp) {
2813        return -TARGET_EFAULT;
2814    }
2815
2816    for (i = 0; i < vlen; i++) {
2817        ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2818        if (is_error(ret)) {
2819            break;
2820        }
2821        mmsgp[i].msg_len = tswap32(ret);
2822        /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2823        if (flags & MSG_WAITFORONE) {
2824            flags |= MSG_DONTWAIT;
2825        }
2826    }
2827
2828    unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2829
2830    /* Return number of datagrams sent if we sent any at all;
2831     * otherwise return the error.
2832     */
2833    if (i) {
2834        return i;
2835    }
2836    return ret;
2837}
2838
2839/* do_accept4() Must return target values and target errnos. */
2840static abi_long do_accept4(int fd, abi_ulong target_addr,
2841                           abi_ulong target_addrlen_addr, int flags)
2842{
2843    socklen_t addrlen, ret_addrlen;
2844    void *addr;
2845    abi_long ret;
2846    int host_flags;
2847
2848    host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2849
2850    if (target_addr == 0) {
2851        return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2852    }
2853
2854    /* linux returns EINVAL if addrlen pointer is invalid */
2855    if (get_user_u32(addrlen, target_addrlen_addr))
2856        return -TARGET_EINVAL;
2857
2858    if ((int)addrlen < 0) {
2859        return -TARGET_EINVAL;
2860    }
2861
2862    if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2863        return -TARGET_EINVAL;
2864
2865    addr = alloca(addrlen);
2866
2867    ret_addrlen = addrlen;
2868    ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2869    if (!is_error(ret)) {
2870        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2871        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2872            ret = -TARGET_EFAULT;
2873        }
2874    }
2875    return ret;
2876}
2877
2878/* do_getpeername() Must return target values and target errnos. */
2879static abi_long do_getpeername(int fd, abi_ulong target_addr,
2880                               abi_ulong target_addrlen_addr)
2881{
2882    socklen_t addrlen, ret_addrlen;
2883    void *addr;
2884    abi_long ret;
2885
2886    if (get_user_u32(addrlen, target_addrlen_addr))
2887        return -TARGET_EFAULT;
2888
2889    if ((int)addrlen < 0) {
2890        return -TARGET_EINVAL;
2891    }
2892
2893    if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2894        return -TARGET_EFAULT;
2895
2896    addr = alloca(addrlen);
2897
2898    ret_addrlen = addrlen;
2899    ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2900    if (!is_error(ret)) {
2901        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2902        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2903            ret = -TARGET_EFAULT;
2904        }
2905    }
2906    return ret;
2907}
2908
2909/* do_getsockname() Must return target values and target errnos. */
2910static abi_long do_getsockname(int fd, abi_ulong target_addr,
2911                               abi_ulong target_addrlen_addr)
2912{
2913    socklen_t addrlen, ret_addrlen;
2914    void *addr;
2915    abi_long ret;
2916
2917    if (get_user_u32(addrlen, target_addrlen_addr))
2918        return -TARGET_EFAULT;
2919
2920    if ((int)addrlen < 0) {
2921        return -TARGET_EINVAL;
2922    }
2923
2924    if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2925        return -TARGET_EFAULT;
2926
2927    addr = alloca(addrlen);
2928
2929    ret_addrlen = addrlen;
2930    ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2931    if (!is_error(ret)) {
2932        host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2933        if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2934            ret = -TARGET_EFAULT;
2935        }
2936    }
2937    return ret;
2938}
2939
2940/* do_socketpair() Must return target values and target errnos. */
2941static abi_long do_socketpair(int domain, int type, int protocol,
2942                              abi_ulong target_tab_addr)
2943{
2944    int tab[2];
2945    abi_long ret;
2946
2947    target_to_host_sock_type(&type);
2948
2949    ret = get_errno(socketpair(domain, type, protocol, tab));
2950    if (!is_error(ret)) {
2951        if (put_user_s32(tab[0], target_tab_addr)
2952            || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2953            ret = -TARGET_EFAULT;
2954    }
2955    return ret;
2956}
2957
2958/* do_sendto() Must return target values and target errnos. */
2959static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2960                          abi_ulong target_addr, socklen_t addrlen)
2961{
2962    void *addr;
2963    void *host_msg;
2964    void *copy_msg = NULL;
2965    abi_long ret;
2966
2967    if ((int)addrlen < 0) {
2968        return -TARGET_EINVAL;
2969    }
2970
2971    host_msg = lock_user(VERIFY_READ, msg, len, 1);
2972    if (!host_msg)
2973        return -TARGET_EFAULT;
2974    if (fd_trans_target_to_host_data(fd)) {
2975        copy_msg = host_msg;
2976        host_msg = g_malloc(len);
2977        memcpy(host_msg, copy_msg, len);
2978        ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2979        if (ret < 0) {
2980            goto fail;
2981        }
2982    }
2983    if (target_addr) {
2984        addr = alloca(addrlen+1);
2985        ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2986        if (ret) {
2987            goto fail;
2988        }
2989        ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2990    } else {
2991        ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
2992    }
2993fail:
2994    if (copy_msg) {
2995        g_free(host_msg);
2996        host_msg = copy_msg;
2997    }
2998    unlock_user(host_msg, msg, 0);
2999    return ret;
3000}
3001
3002/* do_recvfrom() Must return target values and target errnos. */
3003static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3004                            abi_ulong target_addr,
3005                            abi_ulong target_addrlen)
3006{
3007    socklen_t addrlen, ret_addrlen;
3008    void *addr;
3009    void *host_msg;
3010    abi_long ret;
3011
3012    host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3013    if (!host_msg)
3014        return -TARGET_EFAULT;
3015    if (target_addr) {
3016        if (get_user_u32(addrlen, target_addrlen)) {
3017            ret = -TARGET_EFAULT;
3018            goto fail;
3019        }
3020        if ((int)addrlen < 0) {
3021            ret = -TARGET_EINVAL;
3022            goto fail;
3023        }
3024        addr = alloca(addrlen);
3025        ret_addrlen = addrlen;
3026        ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3027                                      addr, &ret_addrlen));
3028    } else {
3029        addr = NULL; /* To keep compiler quiet.  */
3030        addrlen = 0; /* To keep compiler quiet.  */
3031        ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3032    }
3033    if (!is_error(ret)) {
3034        if (fd_trans_host_to_target_data(fd)) {
3035            abi_long trans;
3036            trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3037            if (is_error(trans)) {
3038                ret = trans;
3039                goto fail;
3040            }
3041        }
3042        if (target_addr) {
3043            host_to_target_sockaddr(target_addr, addr,
3044                                    MIN(addrlen, ret_addrlen));
3045            if (put_user_u32(ret_addrlen, target_addrlen)) {
3046                ret = -TARGET_EFAULT;
3047                goto fail;
3048            }
3049        }
3050        unlock_user(host_msg, msg, len);
3051    } else {
3052fail:
3053        unlock_user(host_msg, msg, 0);
3054    }
3055    return ret;
3056}
3057
3058#ifdef TARGET_NR_socketcall
3059/* do_socketcall() must return target values and target errnos. */
3060static abi_long do_socketcall(int num, abi_ulong vptr)
3061{
3062    static const unsigned nargs[] = { /* number of arguments per operation */
3063        [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3064        [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3065        [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3066        [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3067        [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3068        [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3069        [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3070        [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3071        [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3072        [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3073        [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3074        [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3075        [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3076        [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3077        [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3078        [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3079        [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3080        [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3081        [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3082        [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3083    };
3084    abi_long a[6]; /* max 6 args */
3085    unsigned i;
3086
3087    /* check the range of the first argument num */
3088    /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3089    if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3090        return -TARGET_EINVAL;
3091    }
3092    /* ensure we have space for args */
3093    if (nargs[num] > ARRAY_SIZE(a)) {
3094        return -TARGET_EINVAL;
3095    }
3096    /* collect the arguments in a[] according to nargs[] */
3097    for (i = 0; i < nargs[num]; ++i) {
3098        if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3099            return -TARGET_EFAULT;
3100        }
3101    }
3102    /* now when we have the args, invoke the appropriate underlying function */
3103    switch (num) {
3104    case TARGET_SYS_SOCKET: /* domain, type, protocol */
3105        return do_socket(a[0], a[1], a[2]);
3106    case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3107        return do_bind(a[0], a[1], a[2]);
3108    case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3109        return do_connect(a[0], a[1], a[2]);
3110    case TARGET_SYS_LISTEN: /* sockfd, backlog */
3111        return get_errno(listen(a[0], a[1]));
3112    case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3113        return do_accept4(a[0], a[1], a[2], 0);
3114    case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3115        return do_getsockname(a[0], a[1], a[2]);
3116    case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3117        return do_getpeername(a[0], a[1], a[2]);
3118    case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3119        return do_socketpair(a[0], a[1], a[2], a[3]);
3120    case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3121        return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3122    case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3123        return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3124    case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3125        return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3126    case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3127        return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3128    case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3129        return get_errno(shutdown(a[0], a[1]));
3130    case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3131        return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3132    case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3133        return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3134    case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3135        return do_sendrecvmsg(a[0], a[1], a[2], 1);
3136    case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3137        return do_sendrecvmsg(a[0], a[1], a[2], 0);
3138    case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3139        return do_accept4(a[0], a[1], a[2], a[3]);
3140    case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3141        return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3142    case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3143        return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3144    default:
3145        gemu_log("Unsupported socketcall: %d\n", num);
3146        return -TARGET_EINVAL;
3147    }
3148}
3149#endif
3150
3151#define N_SHM_REGIONS   32
3152
3153static struct shm_region {
3154    abi_ulong start;
3155    abi_ulong size;
3156    bool in_use;
3157} shm_regions[N_SHM_REGIONS];
3158
3159#ifndef TARGET_SEMID64_DS
3160/* asm-generic version of this struct */
3161struct target_semid64_ds
3162{
3163  struct target_ipc_perm sem_perm;
3164  abi_ulong sem_otime;
3165#if TARGET_ABI_BITS == 32
3166  abi_ulong __unused1;
3167#endif
3168  abi_ulong sem_ctime;
3169#if TARGET_ABI_BITS == 32
3170  abi_ulong __unused2;
3171#endif
3172  abi_ulong sem_nsems;
3173  abi_ulong __unused3;
3174  abi_ulong __unused4;
3175};
3176#endif
3177
3178static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3179                                               abi_ulong target_addr)
3180{
3181    struct target_ipc_perm *target_ip;
3182    struct target_semid64_ds *target_sd;
3183
3184    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3185        return -TARGET_EFAULT;
3186    target_ip = &(target_sd->sem_perm);
3187    host_ip->__key = tswap32(target_ip->__key);
3188    host_ip->uid = tswap32(target_ip->uid);
3189    host_ip->gid = tswap32(target_ip->gid);
3190    host_ip->cuid = tswap32(target_ip->cuid);
3191    host_ip->cgid = tswap32(target_ip->cgid);
3192#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3193    host_ip->mode = tswap32(target_ip->mode);
3194#else
3195    host_ip->mode = tswap16(target_ip->mode);
3196#endif
3197#if defined(TARGET_PPC)
3198    host_ip->__seq = tswap32(target_ip->__seq);
3199#else
3200    host_ip->__seq = tswap16(target_ip->__seq);
3201#endif
3202    unlock_user_struct(target_sd, target_addr, 0);
3203    return 0;
3204}
3205
3206static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3207                                               struct ipc_perm *host_ip)
3208{
3209    struct target_ipc_perm *target_ip;
3210    struct target_semid64_ds *target_sd;
3211
3212    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3213        return -TARGET_EFAULT;
3214    target_ip = &(target_sd->sem_perm);
3215    target_ip->__key = tswap32(host_ip->__key);
3216    target_ip->uid = tswap32(host_ip->uid);
3217    target_ip->gid = tswap32(host_ip->gid);
3218    target_ip->cuid = tswap32(host_ip->cuid);
3219    target_ip->cgid = tswap32(host_ip->cgid);
3220#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3221    target_ip->mode = tswap32(host_ip->mode);
3222#else
3223    target_ip->mode = tswap16(host_ip->mode);
3224#endif
3225#if defined(TARGET_PPC)
3226    target_ip->__seq = tswap32(host_ip->__seq);
3227#else
3228    target_ip->__seq = tswap16(host_ip->__seq);
3229#endif
3230    unlock_user_struct(target_sd, target_addr, 1);
3231    return 0;
3232}
3233
3234static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3235                                               abi_ulong target_addr)
3236{
3237    struct target_semid64_ds *target_sd;
3238
3239    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3240        return -TARGET_EFAULT;
3241    if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3242        return -TARGET_EFAULT;
3243    host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3244    host_sd->sem_otime = tswapal(target_sd->sem_otime);
3245    host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3246    unlock_user_struct(target_sd, target_addr, 0);
3247    return 0;
3248}
3249
3250static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3251                                               struct semid_ds *host_sd)
3252{
3253    struct target_semid64_ds *target_sd;
3254
3255    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3256        return -TARGET_EFAULT;
3257    if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3258        return -TARGET_EFAULT;
3259    target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3260    target_sd->sem_otime = tswapal(host_sd->sem_otime);
3261    target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3262    unlock_user_struct(target_sd, target_addr, 1);
3263    return 0;
3264}
3265
3266struct target_seminfo {
3267    int semmap;
3268    int semmni;
3269    int semmns;
3270    int semmnu;
3271    int semmsl;
3272    int semopm;
3273    int semume;
3274    int semusz;
3275    int semvmx;
3276    int semaem;
3277};
3278
3279static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3280                                              struct seminfo *host_seminfo)
3281{
3282    struct target_seminfo *target_seminfo;
3283    if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3284        return -TARGET_EFAULT;
3285    __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3286    __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3287    __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3288    __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3289    __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3290    __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3291    __put_user(host_seminfo->semume, &target_seminfo->semume);
3292    __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3293    __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3294    __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3295    unlock_user_struct(target_seminfo, target_addr, 1);
3296    return 0;
3297}
3298
3299union semun {
3300        int val;
3301        struct semid_ds *buf;
3302        unsigned short *array;
3303        struct seminfo *__buf;
3304};
3305
3306union target_semun {
3307        int val;
3308        abi_ulong buf;
3309        abi_ulong array;
3310        abi_ulong __buf;
3311};
3312
3313static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3314                                               abi_ulong target_addr)
3315{
3316    int nsems;
3317    unsigned short *array;
3318    union semun semun;
3319    struct semid_ds semid_ds;
3320    int i, ret;
3321
3322    semun.buf = &semid_ds;
3323
3324    ret = semctl(semid, 0, IPC_STAT, semun);
3325    if (ret == -1)
3326        return get_errno(ret);
3327
3328    nsems = semid_ds.sem_nsems;
3329
3330    *host_array = g_try_new(unsigned short, nsems);
3331    if (!*host_array) {
3332        return -TARGET_ENOMEM;
3333    }
3334    array = lock_user(VERIFY_READ, target_addr,
3335                      nsems*sizeof(unsigned short), 1);
3336    if (!array) {
3337        g_free(*host_array);
3338        return -TARGET_EFAULT;
3339    }
3340
3341    for(i=0; i<nsems; i++) {
3342        __get_user((*host_array)[i], &array[i]);
3343    }
3344    unlock_user(array, target_addr, 0);
3345
3346    return 0;
3347}
3348
3349static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3350                                               unsigned short **host_array)
3351{
3352    int nsems;
3353    unsigned short *array;
3354    union semun semun;
3355    struct semid_ds semid_ds;
3356    int i, ret;
3357
3358    semun.buf = &semid_ds;
3359
3360    ret = semctl(semid, 0, IPC_STAT, semun);
3361    if (ret == -1)
3362        return get_errno(ret);
3363
3364    nsems = semid_ds.sem_nsems;
3365
3366    array = lock_user(VERIFY_WRITE, target_addr,
3367                      nsems*sizeof(unsigned short), 0);
3368    if (!array)
3369        return -TARGET_EFAULT;
3370
3371    for(i=0; i<nsems; i++) {
3372        __put_user((*host_array)[i], &array[i]);
3373    }
3374    g_free(*host_array);
3375    unlock_user(array, target_addr, 1);
3376
3377    return 0;
3378}
3379
3380static inline abi_long do_semctl(int semid, int semnum, int cmd,
3381                                 abi_ulong target_arg)
3382{
3383    union target_semun target_su = { .buf = target_arg };
3384    union semun arg;
3385    struct semid_ds dsarg;
3386    unsigned short *array = NULL;
3387    struct seminfo seminfo;
3388    abi_long ret = -TARGET_EINVAL;
3389    abi_long err;
3390    cmd &= 0xff;
3391
3392    switch( cmd ) {
3393        case GETVAL:
3394        case SETVAL:
3395            /* In 64 bit cross-endian situations, we will erroneously pick up
3396             * the wrong half of the union for the "val" element.  To rectify
3397             * this, the entire 8-byte structure is byteswapped, followed by
3398             * a swap of the 4 byte val field. In other cases, the data is
3399             * already in proper host byte order. */
3400            if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3401                target_su.buf = tswapal(target_su.buf);
3402                arg.val = tswap32(target_su.val);
3403            } else {
3404                arg.val = target_su.val;
3405            }
3406            ret = get_errno(semctl(semid, semnum, cmd, arg));
3407            break;
3408        case GETALL:
3409        case SETALL:
3410            err = target_to_host_semarray(semid, &array, target_su.array);
3411            if (err)
3412                return err;
3413            arg.array = array;
3414            ret = get_errno(semctl(semid, semnum, cmd, arg));
3415            err = host_to_target_semarray(semid, target_su.array, &array);
3416            if (err)
3417                return err;
3418            break;
3419        case IPC_STAT:
3420        case IPC_SET:
3421        case SEM_STAT:
3422            err = target_to_host_semid_ds(&dsarg, target_su.buf);
3423            if (err)
3424                return err;
3425            arg.buf = &dsarg;
3426            ret = get_errno(semctl(semid, semnum, cmd, arg));
3427            err = host_to_target_semid_ds(target_su.buf, &dsarg);
3428            if (err)
3429                return err;
3430            break;
3431        case IPC_INFO:
3432        case SEM_INFO:
3433            arg.__buf = &seminfo;
3434            ret = get_errno(semctl(semid, semnum, cmd, arg));
3435            err = host_to_target_seminfo(target_su.__buf, &seminfo);
3436            if (err)
3437                return err;
3438            break;
3439        case IPC_RMID:
3440        case GETPID:
3441        case GETNCNT:
3442        case GETZCNT:
3443            ret = get_errno(semctl(semid, semnum, cmd, NULL));
3444            break;
3445    }
3446
3447    return ret;
3448}
3449
3450struct target_sembuf {
3451    unsigned short sem_num;
3452    short sem_op;
3453    short sem_flg;
3454};
3455
3456static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3457                                             abi_ulong target_addr,
3458                                             unsigned nsops)
3459{
3460    struct target_sembuf *target_sembuf;
3461    int i;
3462
3463    target_sembuf = lock_user(VERIFY_READ, target_addr,
3464                              nsops*sizeof(struct target_sembuf), 1);
3465    if (!target_sembuf)
3466        return -TARGET_EFAULT;
3467
3468    for(i=0; i<nsops; i++) {
3469        __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3470        __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3471        __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3472    }
3473
3474    unlock_user(target_sembuf, target_addr, 0);
3475
3476    return 0;
3477}
3478
3479static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3480{
3481    struct sembuf sops[nsops];
3482
3483    if (target_to_host_sembuf(sops, ptr, nsops))
3484        return -TARGET_EFAULT;
3485
3486    return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3487}
3488
3489struct target_msqid_ds
3490{
3491    struct target_ipc_perm msg_perm;
3492    abi_ulong msg_stime;
3493#if TARGET_ABI_BITS == 32
3494    abi_ulong __unused1;
3495#endif
3496    abi_ulong msg_rtime;
3497#if TARGET_ABI_BITS == 32
3498    abi_ulong __unused2;
3499#endif
3500    abi_ulong msg_ctime;
3501#if TARGET_ABI_BITS == 32
3502    abi_ulong __unused3;
3503#endif
3504    abi_ulong __msg_cbytes;
3505    abi_ulong msg_qnum;
3506    abi_ulong msg_qbytes;
3507    abi_ulong msg_lspid;
3508    abi_ulong msg_lrpid;
3509    abi_ulong __unused4;
3510    abi_ulong __unused5;
3511};
3512
3513static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3514                                               abi_ulong target_addr)
3515{
3516    struct target_msqid_ds *target_md;
3517
3518    if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3519        return -TARGET_EFAULT;
3520    if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3521        return -TARGET_EFAULT;
3522    host_md->msg_stime = tswapal(target_md->msg_stime);
3523    host_md->msg_rtime = tswapal(target_md->msg_rtime);
3524    host_md->msg_ctime = tswapal(target_md->msg_ctime);
3525    host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3526    host_md->msg_qnum = tswapal(target_md->msg_qnum);
3527    host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3528    host_md->msg_lspid = tswapal(target_md->msg_lspid);
3529    host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3530    unlock_user_struct(target_md, target_addr, 0);
3531    return 0;
3532}
3533
3534static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3535                                               struct msqid_ds *host_md)
3536{
3537    struct target_msqid_ds *target_md;
3538
3539    if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3540        return -TARGET_EFAULT;
3541    if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3542        return -TARGET_EFAULT;
3543    target_md->msg_stime = tswapal(host_md->msg_stime);
3544    target_md->msg_rtime = tswapal(host_md->msg_rtime);
3545    target_md->msg_ctime = tswapal(host_md->msg_ctime);
3546    target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3547    target_md->msg_qnum = tswapal(host_md->msg_qnum);
3548    target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3549    target_md->msg_lspid = tswapal(host_md->msg_lspid);
3550    target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3551    unlock_user_struct(target_md, target_addr, 1);
3552    return 0;
3553}
3554
3555struct target_msginfo {
3556    int msgpool;
3557    int msgmap;
3558    int msgmax;
3559    int msgmnb;
3560    int msgmni;
3561    int msgssz;
3562    int msgtql;
3563    unsigned short int msgseg;
3564};
3565
3566static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3567                                              struct msginfo *host_msginfo)
3568{
3569    struct target_msginfo *target_msginfo;
3570    if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3571        return -TARGET_EFAULT;
3572    __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3573    __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3574    __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3575    __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3576    __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3577    __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3578    __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3579    __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3580    unlock_user_struct(target_msginfo, target_addr, 1);
3581    return 0;
3582}
3583
3584static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3585{
3586    struct msqid_ds dsarg;
3587    struct msginfo msginfo;
3588    abi_long ret = -TARGET_EINVAL;
3589
3590    cmd &= 0xff;
3591
3592    switch (cmd) {
3593    case IPC_STAT:
3594    case IPC_SET:
3595    case MSG_STAT:
3596        if (target_to_host_msqid_ds(&dsarg,ptr))
3597            return -TARGET_EFAULT;
3598        ret = get_errno(msgctl(msgid, cmd, &dsarg));
3599        if (host_to_target_msqid_ds(ptr,&dsarg))
3600            return -TARGET_EFAULT;
3601        break;
3602    case IPC_RMID:
3603        ret = get_errno(msgctl(msgid, cmd, NULL));
3604        break;
3605    case IPC_INFO:
3606    case MSG_INFO:
3607        ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3608        if (host_to_target_msginfo(ptr, &msginfo))
3609            return -TARGET_EFAULT;
3610        break;
3611    }
3612
3613    return ret;
3614}
3615
3616struct target_msgbuf {
3617    abi_long mtype;
3618    char        mtext[1];
3619};
3620
3621static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3622                                 ssize_t msgsz, int msgflg)
3623{
3624    struct target_msgbuf *target_mb;
3625    struct msgbuf *host_mb;
3626    abi_long ret = 0;
3627
3628    if (msgsz < 0) {
3629        return -TARGET_EINVAL;
3630    }
3631
3632    if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3633        return -TARGET_EFAULT;
3634    host_mb = g_try_malloc(msgsz + sizeof(long));
3635    if (!host_mb) {
3636        unlock_user_struct(target_mb, msgp, 0);
3637        return -TARGET_ENOMEM;
3638    }
3639    host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3640    memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3641    ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3642    g_free(host_mb);
3643    unlock_user_struct(target_mb, msgp, 0);
3644
3645    return ret;
3646}
3647
3648static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3649                                 ssize_t msgsz, abi_long msgtyp,
3650                                 int msgflg)
3651{
3652    struct target_msgbuf *target_mb;
3653    char *target_mtext;
3654    struct msgbuf *host_mb;
3655    abi_long ret = 0;
3656
3657    if (msgsz < 0) {
3658        return -TARGET_EINVAL;
3659    }
3660
3661    if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3662        return -TARGET_EFAULT;
3663
3664    host_mb = g_try_malloc(msgsz + sizeof(long));
3665    if (!host_mb) {
3666        ret = -TARGET_ENOMEM;
3667        goto end;
3668    }
3669    ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3670
3671    if (ret > 0) {
3672        abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3673        target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3674        if (!target_mtext) {
3675            ret = -TARGET_EFAULT;
3676            goto end;
3677        }
3678        memcpy(target_mb->mtext, host_mb->mtext, ret);
3679        unlock_user(target_mtext, target_mtext_addr, ret);
3680    }
3681
3682    target_mb->mtype = tswapal(host_mb->mtype);
3683
3684end:
3685    if (target_mb)
3686        unlock_user_struct(target_mb, msgp, 1);
3687    g_free(host_mb);
3688    return ret;
3689}
3690
3691static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3692                                               abi_ulong target_addr)
3693{
3694    struct target_shmid_ds *target_sd;
3695
3696    if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3697        return -TARGET_EFAULT;
3698    if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3699        return -TARGET_EFAULT;
3700    __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3701    __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3702    __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3703    __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3704    __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3705    __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3706    __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3707    unlock_user_struct(target_sd, target_addr, 0);
3708    return 0;
3709}
3710
3711static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3712                                               struct shmid_ds *host_sd)
3713{
3714    struct target_shmid_ds *target_sd;
3715
3716    if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3717        return -TARGET_EFAULT;
3718    if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3719        return -TARGET_EFAULT;
3720    __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3721    __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3722    __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3723    __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3724    __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3725    __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3726    __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3727    unlock_user_struct(target_sd, target_addr, 1);
3728    return 0;
3729}
3730
3731struct  target_shminfo {
3732    abi_ulong shmmax;
3733    abi_ulong shmmin;
3734    abi_ulong shmmni;
3735    abi_ulong shmseg;
3736    abi_ulong shmall;
3737};
3738
3739static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3740                                              struct shminfo *host_shminfo)
3741{
3742    struct target_shminfo *target_shminfo;
3743    if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3744        return -TARGET_EFAULT;
3745    __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3746    __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3747    __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3748    __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3749    __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3750    unlock_user_struct(target_shminfo, target_addr, 1);
3751    return 0;
3752}
3753
3754struct target_shm_info {
3755    int used_ids;
3756    abi_ulong shm_tot;
3757    abi_ulong shm_rss;
3758    abi_ulong shm_swp;
3759    abi_ulong swap_attempts;
3760    abi_ulong swap_successes;
3761};
3762
3763static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3764                                               struct shm_info *host_shm_info)
3765{
3766    struct target_shm_info *target_shm_info;
3767    if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3768        return -TARGET_EFAULT;
3769    __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3770    __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3771    __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3772    __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3773    __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3774    __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3775    unlock_user_struct(target_shm_info, target_addr, 1);
3776    return 0;
3777}
3778
3779static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3780{
3781    struct shmid_ds dsarg;
3782    struct shminfo shminfo;
3783    struct shm_info shm_info;
3784    abi_long ret = -TARGET_EINVAL;
3785
3786    cmd &= 0xff;
3787
3788    switch(cmd) {
3789    case IPC_STAT:
3790    case IPC_SET:
3791    case SHM_STAT:
3792        if (target_to_host_shmid_ds(&dsarg, buf))
3793            return -TARGET_EFAULT;
3794        ret = get_errno(shmctl(shmid, cmd, &dsarg));
3795        if (host_to_target_shmid_ds(buf, &dsarg))
3796            return -TARGET_EFAULT;
3797        break;
3798    case IPC_INFO:
3799        ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3800        if (host_to_target_shminfo(buf, &shminfo))
3801            return -TARGET_EFAULT;
3802        break;
3803    case SHM_INFO:
3804        ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3805        if (host_to_target_shm_info(buf, &shm_info))
3806            return -TARGET_EFAULT;
3807        break;
3808    case IPC_RMID:
3809    case SHM_LOCK:
3810    case SHM_UNLOCK:
3811        ret = get_errno(shmctl(shmid, cmd, NULL));
3812        break;
3813    }
3814
3815    return ret;
3816}
3817
3818#ifndef TARGET_FORCE_SHMLBA
3819/* For most architectures, SHMLBA is the same as the page size;
3820 * some architectures have larger values, in which case they should
3821 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3822 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3823 * and defining its own value for SHMLBA.
3824 *
3825 * The kernel also permits SHMLBA to be set by the architecture to a
3826 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3827 * this means that addresses are rounded to the large size if
3828 * SHM_RND is set but addresses not aligned to that size are not rejected
3829 * as long as they are at least page-aligned. Since the only architecture
3830 * which uses this is ia64 this code doesn't provide for that oddity.
3831 */
3832static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3833{
3834    return TARGET_PAGE_SIZE;
3835}
3836#endif
3837
3838static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3839                                 int shmid, abi_ulong shmaddr, int shmflg)
3840{
3841    abi_long raddr;
3842    void *host_raddr;
3843    struct shmid_ds shm_info;
3844    int i,ret;
3845    abi_ulong shmlba;
3846
3847    /* find out the length of the shared memory segment */
3848    ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3849    if (is_error(ret)) {
3850        /* can't get length, bail out */
3851        return ret;
3852    }
3853
3854    shmlba = target_shmlba(cpu_env);
3855
3856    if (shmaddr & (shmlba - 1)) {
3857        if (shmflg & SHM_RND) {
3858            shmaddr &= ~(shmlba - 1);
3859        } else {
3860            return -TARGET_EINVAL;
3861        }
3862    }
3863    if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3864        return -TARGET_EINVAL;
3865    }
3866
3867    mmap_lock();
3868
3869    if (shmaddr)
3870        host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3871    else {
3872        abi_ulong mmap_start;
3873
3874        mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3875
3876        if (mmap_start == -1) {
3877            errno = ENOMEM;
3878            host_raddr = (void *)-1;
3879        } else
3880            host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3881    }
3882
3883    if (host_raddr == (void *)-1) {
3884        mmap_unlock();
3885        return get_errno((long)host_raddr);
3886    }
3887    raddr=h2g((unsigned long)host_raddr);
3888
3889    page_set_flags(raddr, raddr + shm_info.shm_segsz,
3890                   PAGE_VALID | PAGE_READ |
3891                   ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3892
3893    for (i = 0; i < N_SHM_REGIONS; i++) {
3894        if (!shm_regions[i].in_use) {
3895            shm_regions[i].in_use = true;
3896            shm_regions[i].start = raddr;
3897            shm_regions[i].size = shm_info.shm_segsz;
3898            break;
3899        }
3900    }
3901
3902    mmap_unlock();
3903    return raddr;
3904
3905}
3906
3907static inline abi_long do_shmdt(abi_ulong shmaddr)
3908{
3909    int i;
3910    abi_long rv;
3911
3912    mmap_lock();
3913
3914    for (i = 0; i < N_SHM_REGIONS; ++i) {
3915        if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3916            shm_regions[i].in_use = false;
3917            page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3918            break;
3919        }
3920    }
3921    rv = get_errno(shmdt(g2h(shmaddr)));
3922
3923    mmap_unlock();
3924
3925    return rv;
3926}
3927
3928#ifdef TARGET_NR_ipc
3929/* ??? This only works with linear mappings.  */
3930/* do_ipc() must return target values and target errnos. */
3931static abi_long do_ipc(CPUArchState *cpu_env,
3932                       unsigned int call, abi_long first,
3933                       abi_long second, abi_long third,
3934                       abi_long ptr, abi_long fifth)
3935{
3936    int version;
3937    abi_long ret = 0;
3938
3939    version = call >> 16;
3940    call &= 0xffff;
3941
3942    switch (call) {
3943    case IPCOP_semop:
3944        ret = do_semop(first, ptr, second);
3945        break;
3946
3947    case IPCOP_semget:
3948        ret = get_errno(semget(first, second, third));
3949        break;
3950
3951    case IPCOP_semctl: {
3952        /* The semun argument to semctl is passed by value, so dereference the
3953         * ptr argument. */
3954        abi_ulong atptr;
3955        get_user_ual(atptr, ptr);
3956        ret = do_semctl(first, second, third, atptr);
3957        break;
3958    }
3959
3960    case IPCOP_msgget:
3961        ret = get_errno(msgget(first, second));
3962        break;
3963
3964    case IPCOP_msgsnd:
3965        ret = do_msgsnd(first, ptr, second, third);
3966        break;
3967
3968    case IPCOP_msgctl:
3969        ret = do_msgctl(first, second, ptr);
3970        break;
3971
3972    case IPCOP_msgrcv:
3973        switch (version) {
3974        case 0:
3975            {
3976                struct target_ipc_kludge {
3977                    abi_long msgp;
3978                    abi_long msgtyp;
3979                } *tmp;
3980
3981                if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3982                    ret = -TARGET_EFAULT;
3983                    break;
3984                }
3985
3986                ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3987
3988                unlock_user_struct(tmp, ptr, 0);
3989                break;
3990            }
3991        default:
3992            ret = do_msgrcv(first, ptr, second, fifth, third);
3993        }
3994        break;
3995
3996    case IPCOP_shmat:
3997        switch (version) {
3998        default:
3999        {
4000            abi_ulong raddr;
4001            raddr = do_shmat(cpu_env, first, ptr, second);
4002            if (is_error(raddr))
4003                return get_errno(raddr);
4004            if (put_user_ual(raddr, third))
4005                return -TARGET_EFAULT;
4006            break;
4007        }
4008        case 1:
4009            ret = -TARGET_EINVAL;
4010            break;
4011        }
4012        break;
4013    case IPCOP_shmdt:
4014        ret = do_shmdt(ptr);
4015        break;
4016
4017    case IPCOP_shmget:
4018        /* IPC_* flag values are the same on all linux platforms */
4019        ret = get_errno(shmget(first, second, third));
4020        break;
4021
4022        /* IPC_* and SHM_* command values are the same on all linux platforms */
4023    case IPCOP_shmctl:
4024        ret = do_shmctl(first, second, ptr);
4025        break;
4026    default:
4027        gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4028        ret = -TARGET_ENOSYS;
4029        break;
4030    }
4031    return ret;
4032}
4033#endif
4034
4035/* kernel structure types definitions */
4036
4037#define STRUCT(name, ...) STRUCT_ ## name,
4038#define STRUCT_SPECIAL(name) STRUCT_ ## name,
4039enum {
4040#include "syscall_types.h"
4041STRUCT_MAX
4042};
4043#undef STRUCT
4044#undef STRUCT_SPECIAL
4045
4046#define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4047#define STRUCT_SPECIAL(name)
4048#include "syscall_types.h"
4049#undef STRUCT
4050#undef STRUCT_SPECIAL
4051
4052typedef struct IOCTLEntry IOCTLEntry;
4053
4054typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4055                             int fd, int cmd, abi_long arg);
4056
4057struct IOCTLEntry {
4058    int target_cmd;
4059    unsigned int host_cmd;
4060    const char *name;
4061    int access;
4062    do_ioctl_fn *do_ioctl;
4063    const argtype arg_type[5];
4064};
4065
4066#define IOC_R 0x0001
4067#define IOC_W 0x0002
4068#define IOC_RW (IOC_R | IOC_W)
4069
4070#define MAX_STRUCT_SIZE 4096
4071
4072#ifdef CONFIG_FIEMAP
4073/* So fiemap access checks don't overflow on 32 bit systems.
4074 * This is very slightly smaller than the limit imposed by
4075 * the underlying kernel.
4076 */
4077#define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4078                            / sizeof(struct fiemap_extent))
4079
4080static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4081                                       int fd, int cmd, abi_long arg)
4082{
4083    /* The parameter for this ioctl is a struct fiemap followed
4084     * by an array of struct fiemap_extent whose size is set
4085     * in fiemap->fm_extent_count. The array is filled in by the
4086     * ioctl.
4087     */
4088    int target_size_in, target_size_out;
4089    struct fiemap *fm;
4090    const argtype *arg_type = ie->arg_type;
4091    const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4092    void *argptr, *p;
4093    abi_long ret;
4094    int i, extent_size = thunk_type_size(extent_arg_type, 0);
4095    uint32_t outbufsz;
4096    int free_fm = 0;
4097
4098    assert(arg_type[0] == TYPE_PTR);
4099    assert(ie->access == IOC_RW);
4100    arg_type++;
4101    target_size_in = thunk_type_size(arg_type, 0);
4102    argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4103    if (!argptr) {
4104        return -TARGET_EFAULT;
4105    }
4106    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4107    unlock_user(argptr, arg, 0);
4108    fm = (struct fiemap *)buf_temp;
4109    if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4110        return -TARGET_EINVAL;
4111    }
4112
4113    outbufsz = sizeof (*fm) +
4114        (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4115
4116    if (outbufsz > MAX_STRUCT_SIZE) {
4117        /* We can't fit all the extents into the fixed size buffer.
4118         * Allocate one that is large enough and use it instead.
4119         */
4120        fm = g_try_malloc(outbufsz);
4121        if (!fm) {
4122            return -TARGET_ENOMEM;
4123        }
4124        memcpy(fm, buf_temp, sizeof(struct fiemap));
4125        free_fm = 1;
4126    }
4127    ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4128    if (!is_error(ret)) {
4129        target_size_out = target_size_in;
4130        /* An extent_count of 0 means we were only counting the extents
4131         * so there are no structs to copy
4132         */
4133        if (fm->fm_extent_count != 0) {
4134            target_size_out += fm->fm_mapped_extents * extent_size;
4135        }
4136        argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4137        if (!argptr) {
4138            ret = -TARGET_EFAULT;
4139        } else {
4140            /* Convert the struct fiemap */
4141            thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4142            if (fm->fm_extent_count != 0) {
4143                p = argptr + target_size_in;
4144                /* ...and then all the struct fiemap_extents */
4145                for (i = 0; i < fm->fm_mapped_extents; i++) {
4146                    thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4147                                  THUNK_TARGET);
4148                    p += extent_size;
4149                }
4150            }
4151            unlock_user(argptr, arg, target_size_out);
4152        }
4153    }
4154    if (free_fm) {
4155        g_free(fm);
4156    }
4157    return ret;
4158}
4159#endif
4160
4161static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4162                                int fd, int cmd, abi_long arg)
4163{
4164    const argtype *arg_type = ie->arg_type;
4165    int target_size;
4166    void *argptr;
4167    int ret;
4168    struct ifconf *host_ifconf;
4169    uint32_t outbufsz;
4170    const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4171    int target_ifreq_size;
4172    int nb_ifreq;
4173    int free_buf = 0;
4174    int i;
4175    int target_ifc_len;
4176    abi_long target_ifc_buf;
4177    int host_ifc_len;
4178    char *host_ifc_buf;
4179
4180    assert(arg_type[0] == TYPE_PTR);
4181    assert(ie->access == IOC_RW);
4182
4183    arg_type++;
4184    target_size = thunk_type_size(arg_type, 0);
4185
4186    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4187    if (!argptr)
4188        return -TARGET_EFAULT;
4189    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4190    unlock_user(argptr, arg, 0);
4191
4192    host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4193    target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4194    target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4195
4196    if (target_ifc_buf != 0) {
4197        target_ifc_len = host_ifconf->ifc_len;
4198        nb_ifreq = target_ifc_len / target_ifreq_size;
4199        host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4200
4201        outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4202        if (outbufsz > MAX_STRUCT_SIZE) {
4203            /*
4204             * We can't fit all the extents into the fixed size buffer.
4205             * Allocate one that is large enough and use it instead.
4206             */
4207            host_ifconf = malloc(outbufsz);
4208            if (!host_ifconf) {
4209                return -TARGET_ENOMEM;
4210            }
4211            memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4212            free_buf = 1;
4213        }
4214        host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4215
4216        host_ifconf->ifc_len = host_ifc_len;
4217    } else {
4218      host_ifc_buf = NULL;
4219    }
4220    host_ifconf->ifc_buf = host_ifc_buf;
4221
4222    ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4223    if (!is_error(ret)) {
4224        /* convert host ifc_len to target ifc_len */
4225
4226        nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4227        target_ifc_len = nb_ifreq * target_ifreq_size;
4228        host_ifconf->ifc_len = target_ifc_len;
4229
4230        /* restore target ifc_buf */
4231
4232        host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4233
4234        /* copy struct ifconf to target user */
4235
4236        argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4237        if (!argptr)
4238            return -TARGET_EFAULT;
4239        thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4240        unlock_user(argptr, arg, target_size);
4241
4242        if (target_ifc_buf != 0) {
4243            /* copy ifreq[] to target user */
4244            argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4245            for (i = 0; i < nb_ifreq ; i++) {
4246                thunk_convert(argptr + i * target_ifreq_size,
4247                              host_ifc_buf + i * sizeof(struct ifreq),
4248                              ifreq_arg_type, THUNK_TARGET);
4249            }
4250            unlock_user(argptr, target_ifc_buf, target_ifc_len);
4251        }
4252    }
4253
4254    if (free_buf) {
4255        free(host_ifconf);
4256    }
4257
4258    return ret;
4259}
4260
4261#if defined(CONFIG_USBFS)
4262#if HOST_LONG_BITS > 64
4263#error USBDEVFS thunks do not support >64 bit hosts yet.
4264#endif
4265struct live_urb {
4266    uint64_t target_urb_adr;
4267    uint64_t target_buf_adr;
4268    char *target_buf_ptr;
4269    struct usbdevfs_urb host_urb;
4270};
4271
4272static GHashTable *usbdevfs_urb_hashtable(void)
4273{
4274    static GHashTable *urb_hashtable;
4275
4276    if (!urb_hashtable) {
4277        urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4278    }
4279    return urb_hashtable;
4280}
4281
4282static void urb_hashtable_insert(struct live_urb *urb)
4283{
4284    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4285    g_hash_table_insert(urb_hashtable, urb, urb);
4286}
4287
4288static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4289{
4290    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4291    return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4292}
4293
4294static void urb_hashtable_remove(struct live_urb *urb)
4295{
4296    GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4297    g_hash_table_remove(urb_hashtable, urb);
4298}
4299
4300static abi_long
4301do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4302                          int fd, int cmd, abi_long arg)
4303{
4304    const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4305    const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4306    struct live_urb *lurb;
4307    void *argptr;
4308    uint64_t hurb;
4309    int target_size;
4310    uintptr_t target_urb_adr;
4311    abi_long ret;
4312
4313    target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4314
4315    memset(buf_temp, 0, sizeof(uint64_t));
4316    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4317    if (is_error(ret)) {
4318        return ret;
4319    }
4320
4321    memcpy(&hurb, buf_temp, sizeof(uint64_t));
4322    lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4323    if (!lurb->target_urb_adr) {
4324        return -TARGET_EFAULT;
4325    }
4326    urb_hashtable_remove(lurb);
4327    unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4328        lurb->host_urb.buffer_length);
4329    lurb->target_buf_ptr = NULL;
4330
4331    /* restore the guest buffer pointer */
4332    lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4333
4334    /* update the guest urb struct */
4335    argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4336    if (!argptr) {
4337        g_free(lurb);
4338        return -TARGET_EFAULT;
4339    }
4340    thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4341    unlock_user(argptr, lurb->target_urb_adr, target_size);
4342
4343    target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4344    /* write back the urb handle */
4345    argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4346    if (!argptr) {
4347        g_free(lurb);
4348        return -TARGET_EFAULT;
4349    }
4350
4351    /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4352    target_urb_adr = lurb->target_urb_adr;
4353    thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4354    unlock_user(argptr, arg, target_size);
4355
4356    g_free(lurb);
4357    return ret;
4358}
4359
4360static abi_long
4361do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4362                             uint8_t *buf_temp __attribute__((unused)),
4363                             int fd, int cmd, abi_long arg)
4364{
4365    struct live_urb *lurb;
4366
4367    /* map target address back to host URB with metadata. */
4368    lurb = urb_hashtable_lookup(arg);
4369    if (!lurb) {
4370        return -TARGET_EFAULT;
4371    }
4372    return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4373}
4374
4375static abi_long
4376do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4377                            int fd, int cmd, abi_long arg)
4378{
4379    const argtype *arg_type = ie->arg_type;
4380    int target_size;
4381    abi_long ret;
4382    void *argptr;
4383    int rw_dir;
4384    struct live_urb *lurb;
4385
4386    /*
4387     * each submitted URB needs to map to a unique ID for the
4388     * kernel, and that unique ID needs to be a pointer to
4389     * host memory.  hence, we need to malloc for each URB.
4390     * isochronous transfers have a variable length struct.
4391     */
4392    arg_type++;
4393    target_size = thunk_type_size(arg_type, THUNK_TARGET);
4394
4395    /* construct host copy of urb and metadata */
4396    lurb = g_try_malloc0(sizeof(struct live_urb));
4397    if (!lurb) {
4398        return -TARGET_ENOMEM;
4399    }
4400
4401    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4402    if (!argptr) {
4403        g_free(lurb);
4404        return -TARGET_EFAULT;
4405    }
4406    thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4407    unlock_user(argptr, arg, 0);
4408
4409    lurb->target_urb_adr = arg;
4410    lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4411
4412    /* buffer space used depends on endpoint type so lock the entire buffer */
4413    /* control type urbs should check the buffer contents for true direction */
4414    rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4415    lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4416        lurb->host_urb.buffer_length, 1);
4417    if (lurb->target_buf_ptr == NULL) {
4418        g_free(lurb);
4419        return -TARGET_EFAULT;
4420    }
4421
4422    /* update buffer pointer in host copy */
4423    lurb->host_urb.buffer = lurb->target_buf_ptr;
4424
4425    ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4426    if (is_error(ret)) {
4427        unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4428        g_free(lurb);
4429    } else {
4430        urb_hashtable_insert(lurb);
4431    }
4432
4433    return ret;
4434}
4435#endif /* CONFIG_USBFS */
4436
4437static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4438                            int cmd, abi_long arg)
4439{
4440    void *argptr;
4441    struct dm_ioctl *host_dm;
4442    abi_long guest_data;
4443    uint32_t guest_data_size;
4444    int target_size;
4445    const argtype *arg_type = ie->arg_type;
4446    abi_long ret;
4447    void *big_buf = NULL;
4448    char *host_data;
4449
4450    arg_type++;
4451    target_size = thunk_type_size(arg_type, 0);
4452    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4453    if (!argptr) {
4454        ret = -TARGET_EFAULT;
4455        goto out;
4456    }
4457    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4458    unlock_user(argptr, arg, 0);
4459
4460    /* buf_temp is too small, so fetch things into a bigger buffer */
4461    big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4462    memcpy(big_buf, buf_temp, target_size);
4463    buf_temp = big_buf;
4464    host_dm = big_buf;
4465
4466    guest_data = arg + host_dm->data_start;
4467    if ((guest_data - arg) < 0) {
4468        ret = -TARGET_EINVAL;
4469        goto out;
4470    }
4471    guest_data_size = host_dm->data_size - host_dm->data_start;
4472    host_data = (char*)host_dm + host_dm->data_start;
4473
4474    argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4475    if (!argptr) {
4476        ret = -TARGET_EFAULT;
4477        goto out;
4478    }
4479
4480    switch (ie->host_cmd) {
4481    case DM_REMOVE_ALL:
4482    case DM_LIST_DEVICES:
4483    case DM_DEV_CREATE:
4484    case DM_DEV_REMOVE:
4485    case DM_DEV_SUSPEND:
4486    case DM_DEV_STATUS:
4487    case DM_DEV_WAIT:
4488    case DM_TABLE_STATUS:
4489    case DM_TABLE_CLEAR:
4490    case DM_TABLE_DEPS:
4491    case DM_LIST_VERSIONS:
4492        /* no input data */
4493        break;
4494    case DM_DEV_RENAME:
4495    case DM_DEV_SET_GEOMETRY:
4496        /* data contains only strings */
4497        memcpy(host_data, argptr, guest_data_size);
4498        break;
4499    case DM_TARGET_MSG:
4500        memcpy(host_data, argptr, guest_data_size);
4501        *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4502        break;
4503    case DM_TABLE_LOAD:
4504    {
4505        void *gspec = argptr;
4506        void *cur_data = host_data;
4507        const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4508        int spec_size = thunk_type_size(arg_type, 0);
4509        int i;
4510
4511        for (i = 0; i < host_dm->target_count; i++) {
4512            struct dm_target_spec *spec = cur_data;
4513            uint32_t next;
4514            int slen;
4515
4516            thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4517            slen = strlen((char*)gspec + spec_size) + 1;
4518            next = spec->next;
4519            spec->next = sizeof(*spec) + slen;
4520            strcpy((char*)&spec[1], gspec + spec_size);
4521            gspec += next;
4522            cur_data += spec->next;
4523        }
4524        break;
4525    }
4526    default:
4527        ret = -TARGET_EINVAL;
4528        unlock_user(argptr, guest_data, 0);
4529        goto out;
4530    }
4531    unlock_user(argptr, guest_data, 0);
4532
4533    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4534    if (!is_error(ret)) {
4535        guest_data = arg + host_dm->data_start;
4536        guest_data_size = host_dm->data_size - host_dm->data_start;
4537        argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4538        switch (ie->host_cmd) {
4539        case DM_REMOVE_ALL:
4540        case DM_DEV_CREATE:
4541        case DM_DEV_REMOVE:
4542        case DM_DEV_RENAME:
4543        case DM_DEV_SUSPEND:
4544        case DM_DEV_STATUS:
4545        case DM_TABLE_LOAD:
4546        case DM_TABLE_CLEAR:
4547        case DM_TARGET_MSG:
4548        case DM_DEV_SET_GEOMETRY:
4549            /* no return data */
4550            break;
4551        case DM_LIST_DEVICES:
4552        {
4553            struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4554            uint32_t remaining_data = guest_data_size;
4555            void *cur_data = argptr;
4556            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4557            int nl_size = 12; /* can't use thunk_size due to alignment */
4558
4559            while (1) {
4560                uint32_t next = nl->next;
4561                if (next) {
4562                    nl->next = nl_size + (strlen(nl->name) + 1);
4563                }
4564                if (remaining_data < nl->next) {
4565                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
4566                    break;
4567                }
4568                thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4569                strcpy(cur_data + nl_size, nl->name);
4570                cur_data += nl->next;
4571                remaining_data -= nl->next;
4572                if (!next) {
4573                    break;
4574                }
4575                nl = (void*)nl + next;
4576            }
4577            break;
4578        }
4579        case DM_DEV_WAIT:
4580        case DM_TABLE_STATUS:
4581        {
4582            struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4583            void *cur_data = argptr;
4584            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4585            int spec_size = thunk_type_size(arg_type, 0);
4586            int i;
4587
4588            for (i = 0; i < host_dm->target_count; i++) {
4589                uint32_t next = spec->next;
4590                int slen = strlen((char*)&spec[1]) + 1;
4591                spec->next = (cur_data - argptr) + spec_size + slen;
4592                if (guest_data_size < spec->next) {
4593                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
4594                    break;
4595                }
4596                thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4597                strcpy(cur_data + spec_size, (char*)&spec[1]);
4598                cur_data = argptr + spec->next;
4599                spec = (void*)host_dm + host_dm->data_start + next;
4600            }
4601            break;
4602        }
4603        case DM_TABLE_DEPS:
4604        {
4605            void *hdata = (void*)host_dm + host_dm->data_start;
4606            int count = *(uint32_t*)hdata;
4607            uint64_t *hdev = hdata + 8;
4608            uint64_t *gdev = argptr + 8;
4609            int i;
4610
4611            *(uint32_t*)argptr = tswap32(count);
4612            for (i = 0; i < count; i++) {
4613                *gdev = tswap64(*hdev);
4614                gdev++;
4615                hdev++;
4616            }
4617            break;
4618        }
4619        case DM_LIST_VERSIONS:
4620        {
4621            struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4622            uint32_t remaining_data = guest_data_size;
4623            void *cur_data = argptr;
4624            const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4625            int vers_size = thunk_type_size(arg_type, 0);
4626
4627            while (1) {
4628                uint32_t next = vers->next;
4629                if (next) {
4630                    vers->next = vers_size + (strlen(vers->name) + 1);
4631                }
4632                if (remaining_data < vers->next) {
4633                    host_dm->flags |= DM_BUFFER_FULL_FLAG;
4634                    break;
4635                }
4636                thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4637                strcpy(cur_data + vers_size, vers->name);
4638                cur_data += vers->next;
4639                remaining_data -= vers->next;
4640                if (!next) {
4641                    break;
4642                }
4643                vers = (void*)vers + next;
4644            }
4645            break;
4646        }
4647        default:
4648            unlock_user(argptr, guest_data, 0);
4649            ret = -TARGET_EINVAL;
4650            goto out;
4651        }
4652        unlock_user(argptr, guest_data, guest_data_size);
4653
4654        argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4655        if (!argptr) {
4656            ret = -TARGET_EFAULT;
4657            goto out;
4658        }
4659        thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4660        unlock_user(argptr, arg, target_size);
4661    }
4662out:
4663    g_free(big_buf);
4664    return ret;
4665}
4666
4667static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4668                               int cmd, abi_long arg)
4669{
4670    void *argptr;
4671    int target_size;
4672    const argtype *arg_type = ie->arg_type;
4673    const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4674    abi_long ret;
4675
4676    struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4677    struct blkpg_partition host_part;
4678
4679    /* Read and convert blkpg */
4680    arg_type++;
4681    target_size = thunk_type_size(arg_type, 0);
4682    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4683    if (!argptr) {
4684        ret = -TARGET_EFAULT;
4685        goto out;
4686    }
4687    thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4688    unlock_user(argptr, arg, 0);
4689
4690    switch (host_blkpg->op) {
4691    case BLKPG_ADD_PARTITION:
4692    case BLKPG_DEL_PARTITION:
4693        /* payload is struct blkpg_partition */
4694        break;
4695    default:
4696        /* Unknown opcode */
4697        ret = -TARGET_EINVAL;
4698        goto out;
4699    }
4700
4701    /* Read and convert blkpg->data */
4702    arg = (abi_long)(uintptr_t)host_blkpg->data;
4703    target_size = thunk_type_size(part_arg_type, 0);
4704    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4705    if (!argptr) {
4706        ret = -TARGET_EFAULT;
4707        goto out;
4708    }
4709    thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4710    unlock_user(argptr, arg, 0);
4711
4712    /* Swizzle the data pointer to our local copy and call! */
4713    host_blkpg->data = &host_part;
4714    ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4715
4716out:
4717    return ret;
4718}
4719
4720static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4721                                int fd, int cmd, abi_long arg)
4722{
4723    const argtype *arg_type = ie->arg_type;
4724    const StructEntry *se;
4725    const argtype *field_types;
4726    const int *dst_offsets, *src_offsets;
4727    int target_size;
4728    void *argptr;
4729    abi_ulong *target_rt_dev_ptr = NULL;
4730    unsigned long *host_rt_dev_ptr = NULL;
4731    abi_long ret;
4732    int i;
4733
4734    assert(ie->access == IOC_W);
4735    assert(*arg_type == TYPE_PTR);
4736    arg_type++;
4737    assert(*arg_type == TYPE_STRUCT);
4738    target_size = thunk_type_size(arg_type, 0);
4739    argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4740    if (!argptr) {
4741        return -TARGET_EFAULT;
4742    }
4743    arg_type++;
4744    assert(*arg_type == (int)STRUCT_rtentry);
4745    se = struct_entries + *arg_type++;
4746    assert(se->convert[0] == NULL);
4747    /* convert struct here to be able to catch rt_dev string */
4748    field_types = se->field_types;
4749    dst_offsets = se->field_offsets[THUNK_HOST];
4750    src_offsets = se->field_offsets[THUNK_TARGET];
4751    for (i = 0; i < se->nb_fields; i++) {
4752        if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4753            assert(*field_types == TYPE_PTRVOID);
4754            target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4755            host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4756            if (*target_rt_dev_ptr != 0) {
4757                *host_rt_dev_ptr = (unsigned long)lock_user_string(
4758                                                  tswapal(*target_rt_dev_ptr));
4759                if (!*host_rt_dev_ptr) {
4760                    unlock_user(argptr, arg, 0);
4761                    return -TARGET_EFAULT;
4762                }
4763            } else {
4764                *host_rt_dev_ptr = 0;
4765            }
4766            field_types++;
4767            continue;
4768        }
4769        field_types = thunk_convert(buf_temp + dst_offsets[i],
4770                                    argptr + src_offsets[i],
4771                                    field_types, THUNK_HOST);
4772    }
4773    unlock_user(argptr, arg, 0);
4774
4775    ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4776
4777    assert(host_rt_dev_ptr != NULL);
4778    assert(target_rt_dev_ptr != NULL);
4779    if (*host_rt_dev_ptr != 0) {
4780        unlock_user((void *)*host_rt_dev_ptr,
4781                    *target_rt_dev_ptr, 0);
4782    }
4783    return ret;
4784}
4785
4786static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4787                                     int fd, int cmd, abi_long arg)
4788{
4789    int sig = target_to_host_signal(arg);
4790    return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4791}
4792
4793#ifdef TIOCGPTPEER
4794static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4795                                     int fd, int cmd, abi_long arg)
4796{
4797    int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4798    return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4799}
4800#endif
4801
4802static IOCTLEntry ioctl_entries[] = {
4803#define IOCTL(cmd, access, ...) \
4804    { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
4805#define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
4806    { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
4807#define IOCTL_IGNORE(cmd) \
4808    { TARGET_ ## cmd, 0, #cmd },
4809#include "ioctls.h"
4810    { 0, 0, },
4811};
4812
4813/* ??? Implement proper locking for ioctls.  */
4814/* do_ioctl() Must return target values and target errnos. */
4815static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4816{
4817    const IOCTLEntry *ie;
4818    const argtype *arg_type;
4819    abi_long ret;
4820    uint8_t buf_temp[MAX_STRUCT_SIZE];
4821    int target_size;
4822    void *argptr;
4823
4824    ie = ioctl_entries;
4825    for(;;) {
4826        if (ie->target_cmd == 0) {
4827            gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4828            return -TARGET_ENOSYS;
4829        }
4830        if (ie->target_cmd == cmd)
4831            break;
4832        ie++;
4833    }
4834    arg_type = ie->arg_type;
4835    if (ie->do_ioctl) {
4836        return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4837    } else if (!ie->host_cmd) {
4838        /* Some architectures define BSD ioctls in their headers
4839           that are not implemented in Linux.  */
4840        return -TARGET_ENOSYS;
4841    }
4842
4843    switch(arg_type[0]) {
4844    case TYPE_NULL:
4845        /* no argument */
4846        ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4847        break;
4848    case TYPE_PTRVOID:
4849    case TYPE_INT:
4850        ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4851        break;
4852    case TYPE_PTR:
4853        arg_type++;
4854        target_size = thunk_type_size(arg_type, 0);
4855        switch(ie->access) {
4856        case IOC_R:
4857            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4858            if (!is_error(ret)) {
4859                argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4860                if (!argptr)
4861                    return -TARGET_EFAULT;
4862                thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4863                unlock_user(argptr, arg, target_size);
4864            }
4865            break;
4866        case IOC_W:
4867            argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4868            if (!argptr)
4869                return -TARGET_EFAULT;
4870            thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4871            unlock_user(argptr, arg, 0);
4872            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4873            break;
4874        default:
4875        case IOC_RW:
4876            argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4877            if (!argptr)
4878                return -TARGET_EFAULT;
4879            thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4880            unlock_user(argptr, arg, 0);
4881            ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4882            if (!is_error(ret)) {
4883                argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4884                if (!argptr)
4885                    return -TARGET_EFAULT;
4886                thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4887                unlock_user(argptr, arg, target_size);
4888            }
4889            break;
4890        }
4891        break;
4892    default:
4893        gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4894                 (long)cmd, arg_type[0]);
4895        ret = -TARGET_ENOSYS;
4896        break;
4897    }
4898    return ret;
4899}
4900
4901static const bitmask_transtbl iflag_tbl[] = {
4902        { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4903        { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4904        { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4905        { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4906        { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4907        { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4908        { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4909        { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4910        { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4911        { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4912        { TARGET_IXON, TARGET_IXON, IXON, IXON },
4913        { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4914        { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4915        { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4916        { 0, 0, 0, 0 }
4917};
4918
4919static const bitmask_transtbl oflag_tbl[] = {
4920        { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4921        { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4922        { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4923        { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4924        { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4925        { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4926        { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4927        { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4928        { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4929        { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4930        { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4931        { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4932        { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4933        { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4934        { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4935        { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4936        { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4937        { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4938        { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4939        { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4940        { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4941        { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4942        { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4943        { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4944        { 0, 0, 0, 0 }
4945};
4946
4947static const bitmask_transtbl cflag_tbl[] = {
4948        { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4949        { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4950        { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4951        { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4952        { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4953        { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4954        { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4955        { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4956        { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4957        { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4958        { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4959        { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4960        { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4961        { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4962        { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4963        { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4964        { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4965        { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4966        { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4967        { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4968        { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4969        { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4970        { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4971        { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4972        { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4973        { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4974        { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4975        { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4976        { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4977        { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4978        { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4979        { 0, 0, 0, 0 }
4980};
4981
4982static const bitmask_transtbl lflag_tbl[] = {
4983        { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4984        { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4985        { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4986        { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4987        { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4988        { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4989        { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4990        { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4991        { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4992        { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4993        { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4994        { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4995        { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4996        { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4997        { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4998        { 0, 0, 0, 0 }
4999};
5000
5001static void target_to_host_termios (void *dst, const void *src)
5002{
5003    struct host_termios *host = dst;
5004    const struct target_termios *target = src;
5005
5006    host->c_iflag =
5007        target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5008    host->c_oflag =
5009        target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5010    host->c_cflag =
5011        target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5012    host->c_lflag =
5013        target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5014    host->c_line = target->c_line;
5015
5016    memset(host->c_cc, 0, sizeof(host->c_cc));
5017    host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5018    host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5019    host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5020    host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5021    host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5022    host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5023    host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5024    host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5025    host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5026    host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5027    host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5028    host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5029    host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5030    host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5031    host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5032    host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5033    host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5034}
5035
5036static void host_to_target_termios (void *dst, const void *src)
5037{
5038    struct target_termios *target = dst;
5039    const struct host_termios *host = src;
5040
5041    target->c_iflag =
5042        tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5043    target->c_oflag =
5044        tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5045    target->c_cflag =
5046        tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5047    target->c_lflag =
5048        tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5049    target->c_line = host->c_line;
5050
5051    memset(target->c_cc, 0, sizeof(target->c_cc));
5052    target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5053    target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5054    target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5055    target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5056    target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5057    target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5058    target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5059    target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5060    target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5061    target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5062    target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5063    target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5064    target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5065    target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5066    target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5067    target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5068    target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5069}
5070
5071static const StructEntry struct_termios_def = {
5072    .convert = { host_to_target_termios, target_to_host_termios },
5073    .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5074    .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5075};
5076
5077static bitmask_transtbl mmap_flags_tbl[] = {
5078    { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5079    { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5080    { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5081    { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5082      MAP_ANONYMOUS, MAP_ANONYMOUS },
5083    { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5084      MAP_GROWSDOWN, MAP_GROWSDOWN },
5085    { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5086      MAP_DENYWRITE, MAP_DENYWRITE },
5087    { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5088      MAP_EXECUTABLE, MAP_EXECUTABLE },
5089    { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5090    { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5091      MAP_NORESERVE, MAP_NORESERVE },
5092    { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5093    /* MAP_STACK had been ignored by the kernel for quite some time.
5094       Recognize it for the target insofar as we do not want to pass
5095       it through to the host.  */
5096    { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5097    { 0, 0, 0, 0 }
5098};
5099
5100#if defined(TARGET_I386)
5101
5102/* NOTE: there is really one LDT for all the threads */
5103static uint8_t *ldt_table;
5104
5105static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5106{
5107    int size;
5108    void *p;
5109
5110    if (!ldt_table)
5111        return 0;
5112    size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5113    if (size > bytecount)
5114        size = bytecount;
5115    p = lock_user(VERIFY_WRITE, ptr, size, 0);
5116    if (!p)
5117        return -TARGET_EFAULT;
5118    /* ??? Should this by byteswapped?  */
5119    memcpy(p, ldt_table, size);
5120    unlock_user(p, ptr, size);
5121    return size;
5122}
5123
5124/* XXX: add locking support */
5125static abi_long write_ldt(CPUX86State *env,
5126                          abi_ulong ptr, unsigned long bytecount, int oldmode)
5127{
5128    struct target_modify_ldt_ldt_s ldt_info;
5129    struct target_modify_ldt_ldt_s *target_ldt_info;
5130    int seg_32bit, contents, read_exec_only, limit_in_pages;
5131    int seg_not_present, useable, lm;
5132    uint32_t *lp, entry_1, entry_2;
5133
5134    if (bytecount != sizeof(ldt_info))
5135        return -TARGET_EINVAL;
5136    if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5137        return -TARGET_EFAULT;
5138    ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5139    ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5140    ldt_info.limit = tswap32(target_ldt_info->limit);
5141    ldt_info.flags = tswap32(target_ldt_info->flags);
5142    unlock_user_struct(target_ldt_info, ptr, 0);
5143
5144    if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5145        return -TARGET_EINVAL;
5146    seg_32bit = ldt_info.flags & 1;
5147    contents = (ldt_info.flags >> 1) & 3;
5148    read_exec_only = (ldt_info.flags >> 3) & 1;
5149    limit_in_pages = (ldt_info.flags >> 4) & 1;
5150    seg_not_present = (ldt_info.flags >> 5) & 1;
5151    useable = (ldt_info.flags >> 6) & 1;
5152#ifdef TARGET_ABI32
5153    lm = 0;
5154#else
5155    lm = (ldt_info.flags >> 7) & 1;
5156#endif
5157    if (contents == 3) {
5158        if (oldmode)
5159            return -TARGET_EINVAL;
5160        if (seg_not_present == 0)
5161            return -TARGET_EINVAL;
5162    }
5163    /* allocate the LDT */
5164    if (!ldt_table) {
5165        env->ldt.base = target_mmap(0,
5166                                    TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5167                                    PROT_READ|PROT_WRITE,
5168                                    MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5169        if (env->ldt.base == -1)
5170            return -TARGET_ENOMEM;
5171        memset(g2h(env->ldt.base), 0,
5172               TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5173        env->ldt.limit = 0xffff;
5174        ldt_table = g2h(env->ldt.base);
5175    }
5176
5177    /* NOTE: same code as Linux kernel */
5178    /* Allow LDTs to be cleared by the user. */
5179    if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5180        if (oldmode ||
5181            (contents == 0              &&
5182             read_exec_only == 1        &&
5183             seg_32bit == 0             &&
5184             limit_in_pages == 0        &&
5185             seg_not_present == 1       &&
5186             useable == 0 )) {
5187            entry_1 = 0;
5188            entry_2 = 0;
5189            goto install;
5190        }
5191    }
5192
5193    entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5194        (ldt_info.limit & 0x0ffff);
5195    entry_2 = (ldt_info.base_addr & 0xff000000) |
5196        ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5197        (ldt_info.limit & 0xf0000) |
5198        ((read_exec_only ^ 1) << 9) |
5199        (contents << 10) |
5200        ((seg_not_present ^ 1) << 15) |
5201        (seg_32bit << 22) |
5202        (limit_in_pages << 23) |
5203        (lm << 21) |
5204        0x7000;
5205    if (!oldmode)
5206        entry_2 |= (useable << 20);
5207
5208    /* Install the new entry ...  */
5209install:
5210    lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5211    lp[0] = tswap32(entry_1);
5212    lp[1] = tswap32(entry_2);
5213    return 0;
5214}
5215
5216/* specific and weird i386 syscalls */
5217static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5218                              unsigned long bytecount)
5219{
5220    abi_long ret;
5221
5222    switch (func) {
5223    case 0:
5224        ret = read_ldt(ptr, bytecount);
5225        break;
5226    case 1:
5227        ret = write_ldt(env, ptr, bytecount, 1);
5228        break;
5229    case 0x11:
5230        ret = write_ldt(env, ptr, bytecount, 0);
5231        break;
5232    default:
5233        ret = -TARGET_ENOSYS;
5234        break;
5235    }
5236    return ret;
5237}
5238
5239#if defined(TARGET_I386) && defined(TARGET_ABI32)
5240abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5241{
5242    uint64_t *gdt_table = g2h(env->gdt.base);
5243    struct target_modify_ldt_ldt_s ldt_info;
5244    struct target_modify_ldt_ldt_s *target_ldt_info;
5245    int seg_32bit, contents, read_exec_only, limit_in_pages;
5246    int seg_not_present, useable, lm;
5247    uint32_t *lp, entry_1, entry_2;
5248    int i;
5249
5250    lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5251    if (!target_ldt_info)
5252        return -TARGET_EFAULT;
5253    ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5254    ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5255    ldt_info.limit = tswap32(target_ldt_info->limit);
5256    ldt_info.flags = tswap32(target_ldt_info->flags);
5257    if (ldt_info.entry_number == -1) {
5258        for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5259            if (gdt_table[i] == 0) {
5260                ldt_info.entry_number = i;
5261                target_ldt_info->entry_number = tswap32(i);
5262                break;
5263            }
5264        }
5265    }
5266    unlock_user_struct(target_ldt_info, ptr, 1);
5267
5268    if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN || 
5269        ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5270           return -TARGET_EINVAL;
5271    seg_32bit = ldt_info.flags & 1;
5272    contents = (ldt_info.flags >> 1) & 3;
5273    read_exec_only = (ldt_info.flags >> 3) & 1;
5274    limit_in_pages = (ldt_info.flags >> 4) & 1;
5275    seg_not_present = (ldt_info.flags >> 5) & 1;
5276    useable = (ldt_info.flags >> 6) & 1;
5277#ifdef TARGET_ABI32
5278    lm = 0;
5279#else
5280    lm = (ldt_info.flags >> 7) & 1;
5281#endif
5282
5283    if (contents == 3) {
5284        if (seg_not_present == 0)
5285            return -TARGET_EINVAL;
5286    }
5287
5288    /* NOTE: same code as Linux kernel */
5289    /* Allow LDTs to be cleared by the user. */
5290    if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5291        if ((contents == 0             &&
5292             read_exec_only == 1       &&
5293             seg_32bit == 0            &&
5294             limit_in_pages == 0       &&
5295             seg_not_present == 1      &&
5296             useable == 0 )) {
5297            entry_1 = 0;
5298            entry_2 = 0;
5299            goto install;
5300        }
5301    }
5302
5303    entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5304        (ldt_info.limit & 0x0ffff);
5305    entry_2 = (ldt_info.base_addr & 0xff000000) |
5306        ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5307        (ldt_info.limit & 0xf0000) |
5308        ((read_exec_only ^ 1) << 9) |
5309        (contents << 10) |
5310        ((seg_not_present ^ 1) << 15) |
5311        (seg_32bit << 22) |
5312        (limit_in_pages << 23) |
5313        (useable << 20) |
5314        (lm << 21) |
5315        0x7000;
5316
5317    /* Install the new entry ...  */
5318install:
5319    lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5320    lp[0] = tswap32(entry_1);
5321    lp[1] = tswap32(entry_2);
5322    return 0;
5323}
5324
5325static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5326{
5327    struct target_modify_ldt_ldt_s *target_ldt_info;
5328    uint64_t *gdt_table = g2h(env->gdt.base);
5329    uint32_t base_addr, limit, flags;
5330    int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5331    int seg_not_present, useable, lm;
5332    uint32_t *lp, entry_1, entry_2;
5333
5334    lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5335    if (!target_ldt_info)
5336        return -TARGET_EFAULT;
5337    idx = tswap32(target_ldt_info->entry_number);
5338    if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5339        idx > TARGET_GDT_ENTRY_TLS_MAX) {
5340        unlock_user_struct(target_ldt_info, ptr, 1);
5341        return -TARGET_EINVAL;
5342    }
5343    lp = (uint32_t *)(gdt_table + idx);
5344    entry_1 = tswap32(lp[0]);
5345    entry_2 = tswap32(lp[1]);
5346    
5347    read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5348    contents = (entry_2 >> 10) & 3;
5349    seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5350    seg_32bit = (entry_2 >> 22) & 1;
5351    limit_in_pages = (entry_2 >> 23) & 1;
5352    useable = (entry_2 >> 20) & 1;
5353#ifdef TARGET_ABI32
5354    lm = 0;
5355#else
5356    lm = (entry_2 >> 21) & 1;
5357#endif
5358    flags = (seg_32bit << 0) | (contents << 1) |
5359        (read_exec_only << 3) | (limit_in_pages << 4) |
5360        (seg_not_present << 5) | (useable << 6) | (lm << 7);
5361    limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
5362    base_addr = (entry_1 >> 16) | 
5363        (entry_2 & 0xff000000) | 
5364        ((entry_2 & 0xff) << 16);
5365    target_ldt_info->base_addr = tswapal(base_addr);
5366    target_ldt_info->limit = tswap32(limit);
5367    target_ldt_info->flags = tswap32(flags);
5368    unlock_user_struct(target_ldt_info, ptr, 1);
5369    return 0;
5370}
5371#endif /* TARGET_I386 && TARGET_ABI32 */
5372
5373#ifndef TARGET_ABI32
5374abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5375{
5376    abi_long ret = 0;
5377    abi_ulong val;
5378    int idx;
5379
5380    switch(code) {
5381    case TARGET_ARCH_SET_GS:
5382    case TARGET_ARCH_SET_FS:
5383        if (code == TARGET_ARCH_SET_GS)
5384            idx = R_GS;
5385        else
5386            idx = R_FS;
5387        cpu_x86_load_seg(env, idx, 0);
5388        env->segs[idx].base = addr;
5389        break;
5390    case TARGET_ARCH_GET_GS:
5391    case TARGET_ARCH_GET_FS:
5392        if (code == TARGET_ARCH_GET_GS)
5393            idx = R_GS;
5394        else
5395            idx = R_FS;
5396        val = env->segs[idx].base;
5397        if (put_user(val, addr, abi_ulong))
5398            ret = -TARGET_EFAULT;
5399        break;
5400    default:
5401        ret = -TARGET_EINVAL;
5402        break;
5403    }
5404    return ret;
5405}
5406#endif
5407
5408#endif /* defined(TARGET_I386) */
5409
5410#define NEW_STACK_SIZE 0x40000
5411
5412
5413static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5414typedef struct {
5415    CPUArchState *env;
5416    pthread_mutex_t mutex;
5417    pthread_cond_t cond;
5418    pthread_t thread;
5419    uint32_t tid;
5420    abi_ulong child_tidptr;
5421    abi_ulong parent_tidptr;
5422    sigset_t sigmask;
5423} new_thread_info;
5424
5425static void *clone_func(void *arg)
5426{
5427    new_thread_info *info = arg;
5428    CPUArchState *env;
5429    CPUState *cpu;
5430    TaskState *ts;
5431
5432    rcu_register_thread();
5433    tcg_register_thread();
5434    env = info->env;
5435    cpu = ENV_GET_CPU(env);
5436    thread_cpu = cpu;
5437    ts = (TaskState *)cpu->opaque;
5438    info->tid = sys_gettid();
5439    task_settid(ts);
5440    if (info->child_tidptr)
5441        put_user_u32(info->tid, info->child_tidptr);
5442    if (info->parent_tidptr)
5443        put_user_u32(info->tid, info->parent_tidptr);
5444    /* Enable signals.  */
5445    sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5446    /* Signal to the parent that we're ready.  */
5447    pthread_mutex_lock(&info->mutex);
5448    pthread_cond_broadcast(&info->cond);
5449    pthread_mutex_unlock(&info->mutex);
5450    /* Wait until the parent has finished initializing the tls state.  */
5451    pthread_mutex_lock(&clone_lock);
5452    pthread_mutex_unlock(&clone_lock);
5453    cpu_loop(env);
5454    /* never exits */
5455    return NULL;
5456}
5457
5458/* do_fork() Must return host values and target errnos (unlike most
5459   do_*() functions). */
5460static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5461                   abi_ulong parent_tidptr, target_ulong newtls,
5462                   abi_ulong child_tidptr)
5463{
5464    CPUState *cpu = ENV_GET_CPU(env);
5465    int ret;
5466    TaskState *ts;
5467    CPUState *new_cpu;
5468    CPUArchState *new_env;
5469    sigset_t sigmask;
5470
5471    flags &= ~CLONE_IGNORED_FLAGS;
5472
5473    /* Emulate vfork() with fork() */
5474    if (flags & CLONE_VFORK)
5475        flags &= ~(CLONE_VFORK | CLONE_VM);
5476
5477    if (flags & CLONE_VM) {
5478        TaskState *parent_ts = (TaskState *)cpu->opaque;
5479        new_thread_info info;
5480        pthread_attr_t attr;
5481
5482        if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5483            (flags & CLONE_INVALID_THREAD_FLAGS)) {
5484            return -TARGET_EINVAL;
5485        }
5486
5487        ts = g_new0(TaskState, 1);
5488        init_task_state(ts);
5489
5490        /* Grab a mutex so that thread setup appears atomic.  */
5491        pthread_mutex_lock(&clone_lock);
5492
5493        /* we create a new CPU instance. */
5494        new_env = cpu_copy(env);
5495        /* Init regs that differ from the parent.  */
5496        cpu_clone_regs(new_env, newsp);
5497        new_cpu = ENV_GET_CPU(new_env);
5498        new_cpu->opaque = ts;
5499        ts->bprm = parent_ts->bprm;
5500        ts->info = parent_ts->info;
5501        ts->signal_mask = parent_ts->signal_mask;
5502
5503        if (flags & CLONE_CHILD_CLEARTID) {
5504            ts->child_tidptr = child_tidptr;
5505        }
5506
5507        if (flags & CLONE_SETTLS) {
5508            cpu_set_tls (new_env, newtls);
5509        }
5510
5511        memset(&info, 0, sizeof(info));
5512        pthread_mutex_init(&info.mutex, NULL);
5513        pthread_mutex_lock(&info.mutex);
5514        pthread_cond_init(&info.cond, NULL);
5515        info.env = new_env;
5516        if (flags & CLONE_CHILD_SETTID) {
5517            info.child_tidptr = child_tidptr;
5518        }
5519        if (flags & CLONE_PARENT_SETTID) {
5520            info.parent_tidptr = parent_tidptr;
5521        }
5522
5523        ret = pthread_attr_init(&attr);
5524        ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5525        ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5526        /* It is not safe to deliver signals until the child has finished
5527           initializing, so temporarily block all signals.  */
5528        sigfillset(&sigmask);
5529        sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5530
5531        /* If this is our first additional thread, we need to ensure we
5532         * generate code for parallel execution and flush old translations.
5533         */
5534        if (!parallel_cpus) {
5535            parallel_cpus = true;
5536            tb_flush(cpu);
5537        }
5538
5539        ret = pthread_create(&info.thread, &attr, clone_func, &info);
5540        /* TODO: Free new CPU state if thread creation failed.  */
5541
5542        sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5543        pthread_attr_destroy(&attr);
5544        if (ret == 0) {
5545            /* Wait for the child to initialize.  */
5546            pthread_cond_wait(&info.cond, &info.mutex);
5547            ret = info.tid;
5548        } else {
5549            ret = -1;
5550        }
5551        pthread_mutex_unlock(&info.mutex);
5552        pthread_cond_destroy(&info.cond);
5553        pthread_mutex_destroy(&info.mutex);
5554        pthread_mutex_unlock(&clone_lock);
5555    } else {
5556        /* if no CLONE_VM, we consider it is a fork */
5557        if (flags & CLONE_INVALID_FORK_FLAGS) {
5558            return -TARGET_EINVAL;
5559        }
5560
5561        /* We can't support custom termination signals */
5562        if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5563            return -TARGET_EINVAL;
5564        }
5565
5566        if (block_signals()) {
5567            return -TARGET_ERESTARTSYS;
5568        }
5569
5570        fork_start();
5571        ret = fork();
5572        if (ret == 0) {
5573            /* Child Process.  */
5574            cpu_clone_regs(env, newsp);
5575            fork_end(1);
5576            /* There is a race condition here.  The parent process could
5577               theoretically read the TID in the child process before the child
5578               tid is set.  This would require using either ptrace
5579               (not implemented) or having *_tidptr to point at a shared memory
5580               mapping.  We can't repeat the spinlock hack used above because
5581               the child process gets its own copy of the lock.  */
5582            if (flags & CLONE_CHILD_SETTID)
5583                put_user_u32(sys_gettid(), child_tidptr);
5584            if (flags & CLONE_PARENT_SETTID)
5585                put_user_u32(sys_gettid(), parent_tidptr);
5586            ts = (TaskState *)cpu->opaque;
5587            if (flags & CLONE_SETTLS)
5588                cpu_set_tls (env, newtls);
5589            if (flags & CLONE_CHILD_CLEARTID)
5590                ts->child_tidptr = child_tidptr;
5591        } else {
5592            fork_end(0);
5593        }
5594    }
5595    return ret;
5596}
5597
5598/* warning : doesn't handle linux specific flags... */
5599static int target_to_host_fcntl_cmd(int cmd)
5600{
5601    int ret;
5602
5603    switch(cmd) {
5604    case TARGET_F_DUPFD:
5605    case TARGET_F_GETFD:
5606    case TARGET_F_SETFD:
5607    case TARGET_F_GETFL:
5608    case TARGET_F_SETFL:
5609        ret = cmd;
5610        break;
5611    case TARGET_F_GETLK:
5612        ret = F_GETLK64;
5613        break;
5614    case TARGET_F_SETLK:
5615        ret = F_SETLK64;
5616        break;
5617    case TARGET_F_SETLKW:
5618        ret = F_SETLKW64;
5619        break;
5620    case TARGET_F_GETOWN:
5621        ret = F_GETOWN;
5622        break;
5623    case TARGET_F_SETOWN:
5624        ret = F_SETOWN;
5625        break;
5626    case TARGET_F_GETSIG:
5627        ret = F_GETSIG;
5628        break;
5629    case TARGET_F_SETSIG:
5630        ret = F_SETSIG;
5631        break;
5632#if TARGET_ABI_BITS == 32
5633    case TARGET_F_GETLK64:
5634        ret = F_GETLK64;
5635        break;
5636    case TARGET_F_SETLK64:
5637        ret = F_SETLK64;
5638        break;
5639    case TARGET_F_SETLKW64:
5640        ret = F_SETLKW64;
5641        break;
5642#endif
5643    case TARGET_F_SETLEASE:
5644        ret = F_SETLEASE;
5645        break;
5646    case TARGET_F_GETLEASE:
5647        ret = F_GETLEASE;
5648        break;
5649#ifdef F_DUPFD_CLOEXEC
5650    case TARGET_F_DUPFD_CLOEXEC:
5651        ret = F_DUPFD_CLOEXEC;
5652        break;
5653#endif
5654    case TARGET_F_NOTIFY:
5655        ret = F_NOTIFY;
5656        break;
5657#ifdef F_GETOWN_EX
5658    case TARGET_F_GETOWN_EX:
5659        ret = F_GETOWN_EX;
5660        break;
5661#endif
5662#ifdef F_SETOWN_EX
5663    case TARGET_F_SETOWN_EX:
5664        ret = F_SETOWN_EX;
5665        break;
5666#endif
5667#ifdef F_SETPIPE_SZ
5668    case TARGET_F_SETPIPE_SZ:
5669        ret = F_SETPIPE_SZ;
5670        break;
5671    case TARGET_F_GETPIPE_SZ:
5672        ret = F_GETPIPE_SZ;
5673        break;
5674#endif
5675    default:
5676        ret = -TARGET_EINVAL;
5677        break;
5678    }
5679
5680#if defined(__powerpc64__)
5681    /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5682     * is not supported by kernel. The glibc fcntl call actually adjusts
5683     * them to 5, 6 and 7 before making the syscall(). Since we make the
5684     * syscall directly, adjust to what is supported by the kernel.
5685     */
5686    if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5687        ret -= F_GETLK64 - 5;
5688    }
5689#endif
5690
5691    return ret;
5692}
5693
5694#define FLOCK_TRANSTBL \
5695    switch (type) { \
5696    TRANSTBL_CONVERT(F_RDLCK); \
5697    TRANSTBL_CONVERT(F_WRLCK); \
5698    TRANSTBL_CONVERT(F_UNLCK); \
5699    TRANSTBL_CONVERT(F_EXLCK); \
5700    TRANSTBL_CONVERT(F_SHLCK); \
5701    }
5702
5703static int target_to_host_flock(int type)
5704{
5705#define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5706    FLOCK_TRANSTBL
5707#undef  TRANSTBL_CONVERT
5708    return -TARGET_EINVAL;
5709}
5710
5711static int host_to_target_flock(int type)
5712{
5713#define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5714    FLOCK_TRANSTBL
5715#undef  TRANSTBL_CONVERT
5716    /* if we don't know how to convert the value coming
5717     * from the host we copy to the target field as-is
5718     */
5719    return type;
5720}
5721
5722static inline abi_long copy_from_user_flock(struct flock64 *fl,
5723                                            abi_ulong target_flock_addr)
5724{
5725    struct target_flock *target_fl;
5726    int l_type;
5727
5728    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5729        return -TARGET_EFAULT;
5730    }
5731
5732    __get_user(l_type, &target_fl->l_type);
5733    l_type = target_to_host_flock(l_type);
5734    if (l_type < 0) {
5735        return l_type;
5736    }
5737    fl->l_type = l_type;
5738    __get_user(fl->l_whence, &target_fl->l_whence);
5739    __get_user(fl->l_start, &target_fl->l_start);
5740    __get_user(fl->l_len, &target_fl->l_len);
5741    __get_user(fl->l_pid, &target_fl->l_pid);
5742    unlock_user_struct(target_fl, target_flock_addr, 0);
5743    return 0;
5744}
5745
5746static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5747                                          const struct flock64 *fl)
5748{
5749    struct target_flock *target_fl;
5750    short l_type;
5751
5752    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5753        return -TARGET_EFAULT;
5754    }
5755
5756    l_type = host_to_target_flock(fl->l_type);
5757    __put_user(l_type, &target_fl->l_type);
5758    __put_user(fl->l_whence, &target_fl->l_whence);
5759    __put_user(fl->l_start, &target_fl->l_start);
5760    __put_user(fl->l_len, &target_fl->l_len);
5761    __put_user(fl->l_pid, &target_fl->l_pid);
5762    unlock_user_struct(target_fl, target_flock_addr, 1);
5763    return 0;
5764}
5765
5766typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5767typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5768
5769#if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5770static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5771                                                   abi_ulong target_flock_addr)
5772{
5773    struct target_oabi_flock64 *target_fl;
5774    int l_type;
5775
5776    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5777        return -TARGET_EFAULT;
5778    }
5779
5780    __get_user(l_type, &target_fl->l_type);
5781    l_type = target_to_host_flock(l_type);
5782    if (l_type < 0) {
5783        return l_type;
5784    }
5785    fl->l_type = l_type;
5786    __get_user(fl->l_whence, &target_fl->l_whence);
5787    __get_user(fl->l_start, &target_fl->l_start);
5788    __get_user(fl->l_len, &target_fl->l_len);
5789    __get_user(fl->l_pid, &target_fl->l_pid);
5790    unlock_user_struct(target_fl, target_flock_addr, 0);
5791    return 0;
5792}
5793
5794static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5795                                                 const struct flock64 *fl)
5796{
5797    struct target_oabi_flock64 *target_fl;
5798    short l_type;
5799
5800    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5801        return -TARGET_EFAULT;
5802    }
5803
5804    l_type = host_to_target_flock(fl->l_type);
5805    __put_user(l_type, &target_fl->l_type);
5806    __put_user(fl->l_whence, &target_fl->l_whence);
5807    __put_user(fl->l_start, &target_fl->l_start);
5808    __put_user(fl->l_len, &target_fl->l_len);
5809    __put_user(fl->l_pid, &target_fl->l_pid);
5810    unlock_user_struct(target_fl, target_flock_addr, 1);
5811    return 0;
5812}
5813#endif
5814
5815static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5816                                              abi_ulong target_flock_addr)
5817{
5818    struct target_flock64 *target_fl;
5819    int l_type;
5820
5821    if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5822        return -TARGET_EFAULT;
5823    }
5824
5825    __get_user(l_type, &target_fl->l_type);
5826    l_type = target_to_host_flock(l_type);
5827    if (l_type < 0) {
5828        return l_type;
5829    }
5830    fl->l_type = l_type;
5831    __get_user(fl->l_whence, &target_fl->l_whence);
5832    __get_user(fl->l_start, &target_fl->l_start);
5833    __get_user(fl->l_len, &target_fl->l_len);
5834    __get_user(fl->l_pid, &target_fl->l_pid);
5835    unlock_user_struct(target_fl, target_flock_addr, 0);
5836    return 0;
5837}
5838
5839static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5840                                            const struct flock64 *fl)
5841{
5842    struct target_flock64 *target_fl;
5843    short l_type;
5844
5845    if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5846        return -TARGET_EFAULT;
5847    }
5848
5849    l_type = host_to_target_flock(fl->l_type);
5850    __put_user(l_type, &target_fl->l_type);
5851    __put_user(fl->l_whence, &target_fl->l_whence);
5852    __put_user(fl->l_start, &target_fl->l_start);
5853    __put_user(fl->l_len, &target_fl->l_len);
5854    __put_user(fl->l_pid, &target_fl->l_pid);
5855    unlock_user_struct(target_fl, target_flock_addr, 1);
5856    return 0;
5857}
5858
5859static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5860{
5861    struct flock64 fl64;
5862#ifdef F_GETOWN_EX
5863    struct f_owner_ex fox;
5864    struct target_f_owner_ex *target_fox;
5865#endif
5866    abi_long ret;
5867    int host_cmd = target_to_host_fcntl_cmd(cmd);
5868
5869    if (host_cmd == -TARGET_EINVAL)
5870            return host_cmd;
5871
5872    switch(cmd) {
5873    case TARGET_F_GETLK:
5874        ret = copy_from_user_flock(&fl64, arg);
5875        if (ret) {
5876            return ret;
5877        }
5878        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5879        if (ret == 0) {
5880            ret = copy_to_user_flock(arg, &fl64);
5881        }
5882        break;
5883
5884    case TARGET_F_SETLK:
5885    case TARGET_F_SETLKW:
5886        ret = copy_from_user_flock(&fl64, arg);
5887        if (ret) {
5888            return ret;
5889        }
5890        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5891        break;
5892
5893    case TARGET_F_GETLK64:
5894        ret = copy_from_user_flock64(&fl64, arg);
5895        if (ret) {
5896            return ret;
5897        }
5898        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5899        if (ret == 0) {
5900            ret = copy_to_user_flock64(arg, &fl64);
5901        }
5902        break;
5903    case TARGET_F_SETLK64:
5904    case TARGET_F_SETLKW64:
5905        ret = copy_from_user_flock64(&fl64, arg);
5906        if (ret) {
5907            return ret;
5908        }
5909        ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5910        break;
5911
5912    case TARGET_F_GETFL:
5913        ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5914        if (ret >= 0) {
5915            ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5916        }
5917        break;
5918
5919    case TARGET_F_SETFL:
5920        ret = get_errno(safe_fcntl(fd, host_cmd,
5921                                   target_to_host_bitmask(arg,
5922                                                          fcntl_flags_tbl)));
5923        break;
5924
5925#ifdef F_GETOWN_EX
5926    case TARGET_F_GETOWN_EX:
5927        ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5928        if (ret >= 0) {
5929            if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5930                return -TARGET_EFAULT;
5931            target_fox->type = tswap32(fox.type);
5932            target_fox->pid = tswap32(fox.pid);
5933            unlock_user_struct(target_fox, arg, 1);
5934        }
5935        break;
5936#endif
5937
5938#ifdef F_SETOWN_EX
5939    case TARGET_F_SETOWN_EX:
5940        if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5941            return -TARGET_EFAULT;
5942        fox.type = tswap32(target_fox->type);
5943        fox.pid = tswap32(target_fox->pid);
5944        unlock_user_struct(target_fox, arg, 0);
5945        ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5946        break;
5947#endif
5948
5949    case TARGET_F_SETOWN:
5950    case TARGET_F_GETOWN:
5951    case TARGET_F_SETSIG:
5952    case TARGET_F_GETSIG:
5953    case TARGET_F_SETLEASE:
5954    case TARGET_F_GETLEASE:
5955    case TARGET_F_SETPIPE_SZ:
5956    case TARGET_F_GETPIPE_SZ:
5957        ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5958        break;
5959
5960    default:
5961        ret = get_errno(safe_fcntl(fd, cmd, arg));
5962        break;
5963    }
5964    return ret;
5965}
5966
5967#ifdef USE_UID16
5968
5969static inline int high2lowuid(int uid)
5970{
5971    if (uid > 65535)
5972        return 65534;
5973    else
5974        return uid;
5975}
5976
5977static inline int high2lowgid(int gid)
5978{
5979    if (gid > 65535)
5980        return 65534;
5981    else
5982        return gid;
5983}
5984
5985static inline int low2highuid(int uid)
5986{
5987    if ((int16_t)uid == -1)
5988        return -1;
5989    else
5990        return uid;
5991}
5992
5993static inline int low2highgid(int gid)
5994{
5995    if ((int16_t)gid == -1)
5996        return -1;
5997    else
5998        return gid;
5999}
6000static inline int tswapid(int id)
6001{
6002    return tswap16(id);
6003}
6004
6005#define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6006
6007#else /* !USE_UID16 */
6008static inline int high2lowuid(int uid)
6009{
6010    return uid;
6011}
6012static inline int high2lowgid(int gid)
6013{
6014    return gid;
6015}
6016static inline int low2highuid(int uid)
6017{
6018    return uid;
6019}
6020static inline int low2highgid(int gid)
6021{
6022    return gid;
6023}
6024static inline int tswapid(int id)
6025{
6026    return tswap32(id);
6027}
6028
6029#define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6030
6031#endif /* USE_UID16 */
6032
6033/* We must do direct syscalls for setting UID/GID, because we want to
6034 * implement the Linux system call semantics of "change only for this thread",
6035 * not the libc/POSIX semantics of "change for all threads in process".
6036 * (See http://ewontfix.com/17/ for more details.)
6037 * We use the 32-bit version of the syscalls if present; if it is not
6038 * then either the host architecture supports 32-bit UIDs natively with
6039 * the standard syscall, or the 16-bit UID is the best we can do.
6040 */
6041#ifdef __NR_setuid32
6042#define __NR_sys_setuid __NR_setuid32
6043#else
6044#define __NR_sys_setuid __NR_setuid
6045#endif
6046#ifdef __NR_setgid32
6047#define __NR_sys_setgid __NR_setgid32
6048#else
6049#define __NR_sys_setgid __NR_setgid
6050#endif
6051#ifdef __NR_setresuid32
6052#define __NR_sys_setresuid __NR_setresuid32
6053#else
6054#define __NR_sys_setresuid __NR_setresuid
6055#endif
6056#ifdef __NR_setresgid32
6057#define __NR_sys_setresgid __NR_setresgid32
6058#else
6059#define __NR_sys_setresgid __NR_setresgid
6060#endif
6061
6062_syscall1(int, sys_setuid, uid_t, uid)
6063_syscall1(int, sys_setgid, gid_t, gid)
6064_syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6065_syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6066
6067void syscall_init(void)
6068{
6069    IOCTLEntry *ie;
6070    const argtype *arg_type;
6071    int size;
6072    int i;
6073
6074    thunk_init(STRUCT_MAX);
6075
6076#define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6077#define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6078#include "syscall_types.h"
6079#undef STRUCT
6080#undef STRUCT_SPECIAL
6081
6082    /* Build target_to_host_errno_table[] table from
6083     * host_to_target_errno_table[]. */
6084    for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6085        target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6086    }
6087
6088    /* we patch the ioctl size if necessary. We rely on the fact that
6089       no ioctl has all the bits at '1' in the size field */
6090    ie = ioctl_entries;
6091    while (ie->target_cmd != 0) {
6092        if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6093            TARGET_IOC_SIZEMASK) {
6094            arg_type = ie->arg_type;
6095            if (arg_type[0] != TYPE_PTR) {
6096                fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6097                        ie->target_cmd);
6098                exit(1);
6099            }
6100            arg_type++;
6101            size = thunk_type_size(arg_type, 0);
6102            ie->target_cmd = (ie->target_cmd &
6103                              ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6104                (size << TARGET_IOC_SIZESHIFT);
6105        }
6106
6107        /* automatic consistency check if same arch */
6108#if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6109    (defined(__x86_64__) && defined(TARGET_X86_64))
6110        if (unlikely(ie->target_cmd != ie->host_cmd)) {
6111            fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6112                    ie->name, ie->target_cmd, ie->host_cmd);
6113        }
6114#endif
6115        ie++;
6116    }
6117}
6118
6119#if TARGET_ABI_BITS == 32
6120static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6121{
6122#ifdef TARGET_WORDS_BIGENDIAN
6123    return ((uint64_t)word0 << 32) | word1;
6124#else
6125    return ((uint64_t)word1 << 32) | word0;
6126#endif
6127}
6128#else /* TARGET_ABI_BITS == 32 */
6129static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6130{
6131    return word0;
6132}
6133#endif /* TARGET_ABI_BITS != 32 */
6134
6135#ifdef TARGET_NR_truncate64
6136static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6137                                         abi_long arg2,
6138                                         abi_long arg3,
6139                                         abi_long arg4)
6140{
6141    if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6142        arg2 = arg3;
6143        arg3 = arg4;
6144    }
6145    return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6146}
6147#endif
6148
6149#ifdef TARGET_NR_ftruncate64
6150static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6151                                          abi_long arg2,
6152                                          abi_long arg3,
6153                                          abi_long arg4)
6154{
6155    if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6156        arg2 = arg3;
6157        arg3 = arg4;
6158    }
6159    return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6160}
6161#endif
6162
6163static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6164                                               abi_ulong target_addr)
6165{
6166    struct target_timespec *target_ts;
6167
6168    if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6169        return -TARGET_EFAULT;
6170    __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6171    __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6172    unlock_user_struct(target_ts, target_addr, 0);
6173    return 0;
6174}
6175
6176static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6177                                               struct timespec *host_ts)
6178{
6179    struct target_timespec *target_ts;
6180
6181    if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6182        return -TARGET_EFAULT;
6183    __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6184    __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6185    unlock_user_struct(target_ts, target_addr, 1);
6186    return 0;
6187}
6188
6189static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6190                                                 abi_ulong target_addr)
6191{
6192    struct target_itimerspec *target_itspec;
6193
6194    if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6195        return -TARGET_EFAULT;
6196    }
6197
6198    host_itspec->it_interval.tv_sec =
6199                            tswapal(target_itspec->it_interval.tv_sec);
6200    host_itspec->it_interval.tv_nsec =
6201                            tswapal(target_itspec->it_interval.tv_nsec);
6202    host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6203    host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6204
6205    unlock_user_struct(target_itspec, target_addr, 1);
6206    return 0;
6207}
6208
6209static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6210                                               struct itimerspec *host_its)
6211{
6212    struct target_itimerspec *target_itspec;
6213
6214    if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6215        return -TARGET_EFAULT;
6216    }
6217
6218    target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6219    target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6220
6221    target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6222    target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6223
6224    unlock_user_struct(target_itspec, target_addr, 0);
6225    return 0;
6226}
6227
6228static inline abi_long target_to_host_timex(struct timex *host_tx,
6229                                            abi_long target_addr)
6230{
6231    struct target_timex *target_tx;
6232
6233    if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6234        return -TARGET_EFAULT;
6235    }
6236
6237    __get_user(host_tx->modes, &target_tx->modes);
6238    __get_user(host_tx->offset, &target_tx->offset);
6239    __get_user(host_tx->freq, &target_tx->freq);
6240    __get_user(host_tx->maxerror, &target_tx->maxerror);
6241    __get_user(host_tx->esterror, &target_tx->esterror);
6242    __get_user(host_tx->status, &target_tx->status);
6243    __get_user(host_tx->constant, &target_tx->constant);
6244    __get_user(host_tx->precision, &target_tx->precision);
6245    __get_user(host_tx->tolerance, &target_tx->tolerance);
6246    __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6247    __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6248    __get_user(host_tx->tick, &target_tx->tick);
6249    __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6250    __get_user(host_tx->jitter, &target_tx->jitter);
6251    __get_user(host_tx->shift, &target_tx->shift);
6252    __get_user(host_tx->stabil, &target_tx->stabil);
6253    __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6254    __get_user(host_tx->calcnt, &target_tx->calcnt);
6255    __get_user(host_tx->errcnt, &target_tx->errcnt);
6256    __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6257    __get_user(host_tx->tai, &target_tx->tai);
6258
6259    unlock_user_struct(target_tx, target_addr, 0);
6260    return 0;
6261}
6262
6263static inline abi_long host_to_target_timex(abi_long target_addr,
6264                                            struct timex *host_tx)
6265{
6266    struct target_timex *target_tx;
6267
6268    if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6269        return -TARGET_EFAULT;
6270    }
6271
6272    __put_user(host_tx->modes, &target_tx->modes);
6273    __put_user(host_tx->offset, &target_tx->offset);
6274    __put_user(host_tx->freq, &target_tx->freq);
6275    __put_user(host_tx->maxerror, &target_tx->maxerror);
6276    __put_user(host_tx->esterror, &target_tx->esterror);
6277    __put_user(host_tx->status, &target_tx->status);
6278    __put_user(host_tx->constant, &target_tx->constant);
6279    __put_user(host_tx->precision, &target_tx->precision);
6280    __put_user(host_tx->tolerance, &target_tx->tolerance);
6281    __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6282    __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6283    __put_user(host_tx->tick, &target_tx->tick);
6284    __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6285    __put_user(host_tx->jitter, &target_tx->jitter);
6286    __put_user(host_tx->shift, &target_tx->shift);
6287    __put_user(host_tx->stabil, &target_tx->stabil);
6288    __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6289    __put_user(host_tx->calcnt, &target_tx->calcnt);
6290    __put_user(host_tx->errcnt, &target_tx->errcnt);
6291    __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6292    __put_user(host_tx->tai, &target_tx->tai);
6293
6294    unlock_user_struct(target_tx, target_addr, 1);
6295    return 0;
6296}
6297
6298
6299static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6300                                               abi_ulong target_addr)
6301{
6302    struct target_sigevent *target_sevp;
6303
6304    if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6305        return -TARGET_EFAULT;
6306    }
6307
6308    /* This union is awkward on 64 bit systems because it has a 32 bit
6309     * integer and a pointer in it; we follow the conversion approach
6310     * used for handling sigval types in signal.c so the guest should get
6311     * the correct value back even if we did a 64 bit byteswap and it's
6312     * using the 32 bit integer.
6313     */
6314    host_sevp->sigev_value.sival_ptr =
6315        (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6316    host_sevp->sigev_signo =
6317        target_to_host_signal(tswap32(target_sevp->sigev_signo));
6318    host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6319    host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6320
6321    unlock_user_struct(target_sevp, target_addr, 1);
6322    return 0;
6323}
6324
6325#if defined(TARGET_NR_mlockall)
6326static inline int target_to_host_mlockall_arg(int arg)
6327{
6328    int result = 0;
6329
6330    if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6331        result |= MCL_CURRENT;
6332    }
6333    if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6334        result |= MCL_FUTURE;
6335    }
6336    return result;
6337}
6338#endif
6339
6340#if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
6341     defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
6342     defined(TARGET_NR_newfstatat))
6343static inline abi_long host_to_target_stat64(void *cpu_env,
6344                                             abi_ulong target_addr,
6345                                             struct stat *host_st)
6346{
6347#if defined(TARGET_ARM) && defined(TARGET_ABI32)
6348    if (((CPUARMState *)cpu_env)->eabi) {
6349        struct target_eabi_stat64 *target_st;
6350
6351        if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6352            return -TARGET_EFAULT;
6353        memset(target_st, 0, sizeof(struct target_eabi_stat64));
6354        __put_user(host_st->st_dev, &target_st->st_dev);
6355        __put_user(host_st->st_ino, &target_st->st_ino);
6356#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6357        __put_user(host_st->st_ino, &target_st->__st_ino);
6358#endif
6359        __put_user(host_st->st_mode, &target_st->st_mode);
6360        __put_user(host_st->st_nlink, &target_st->st_nlink);
6361        __put_user(host_st->st_uid, &target_st->st_uid);
6362        __put_user(host_st->st_gid, &target_st->st_gid);
6363        __put_user(host_st->st_rdev, &target_st->st_rdev);
6364        __put_user(host_st->st_size, &target_st->st_size);
6365        __put_user(host_st->st_blksize, &target_st->st_blksize);
6366        __put_user(host_st->st_blocks, &target_st->st_blocks);
6367        __put_user(host_st->st_atime, &target_st->target_st_atime);
6368        __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6369        __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6370        unlock_user_struct(target_st, target_addr, 1);
6371    } else
6372#endif
6373    {
6374#if defined(TARGET_HAS_STRUCT_STAT64)
6375        struct target_stat64 *target_st;
6376#else
6377        struct target_stat *target_st;
6378#endif
6379
6380        if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6381            return -TARGET_EFAULT;
6382        memset(target_st, 0, sizeof(*target_st));
6383        __put_user(host_st->st_dev, &target_st->st_dev);
6384        __put_user(host_st->st_ino, &target_st->st_ino);
6385#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6386        __put_user(host_st->st_ino, &target_st->__st_ino);
6387#endif
6388        __put_user(host_st->st_mode, &target_st->st_mode);
6389        __put_user(host_st->st_nlink, &target_st->st_nlink);
6390        __put_user(host_st->st_uid, &target_st->st_uid);
6391        __put_user(host_st->st_gid, &target_st->st_gid);
6392        __put_user(host_st->st_rdev, &target_st->st_rdev);
6393        /* XXX: better use of kernel struct */
6394        __put_user(host_st->st_size, &target_st->st_size);
6395        __put_user(host_st->st_blksize, &target_st->st_blksize);
6396        __put_user(host_st->st_blocks, &target_st->st_blocks);
6397        __put_user(host_st->st_atime, &target_st->target_st_atime);
6398        __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6399        __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6400        unlock_user_struct(target_st, target_addr, 1);
6401    }
6402
6403    return 0;
6404}
6405#endif
6406
6407/* ??? Using host futex calls even when target atomic operations
6408   are not really atomic probably breaks things.  However implementing
6409   futexes locally would make futexes shared between multiple processes
6410   tricky.  However they're probably useless because guest atomic
6411   operations won't work either.  */
6412static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6413                    target_ulong uaddr2, int val3)
6414{
6415    struct timespec ts, *pts;
6416    int base_op;
6417
6418    /* ??? We assume FUTEX_* constants are the same on both host
6419       and target.  */
6420#ifdef FUTEX_CMD_MASK
6421    base_op = op & FUTEX_CMD_MASK;
6422#else
6423    base_op = op;
6424#endif
6425    switch (base_op) {
6426    case FUTEX_WAIT:
6427    case FUTEX_WAIT_BITSET:
6428        if (timeout) {
6429            pts = &ts;
6430            target_to_host_timespec(pts, timeout);
6431        } else {
6432            pts = NULL;
6433        }
6434        return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6435                         pts, NULL, val3));
6436    case FUTEX_WAKE:
6437        return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6438    case FUTEX_FD:
6439        return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6440    case FUTEX_REQUEUE:
6441    case FUTEX_CMP_REQUEUE:
6442    case FUTEX_WAKE_OP:
6443        /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6444           TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6445           But the prototype takes a `struct timespec *'; insert casts
6446           to satisfy the compiler.  We do not need to tswap TIMEOUT
6447           since it's not compared to guest memory.  */
6448        pts = (struct timespec *)(uintptr_t) timeout;
6449        return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6450                                    g2h(uaddr2),
6451                                    (base_op == FUTEX_CMP_REQUEUE
6452                                     ? tswap32(val3)
6453                                     : val3)));
6454    default:
6455        return -TARGET_ENOSYS;
6456    }
6457}
6458#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6459static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6460                                     abi_long handle, abi_long mount_id,
6461                                     abi_long flags)
6462{
6463    struct file_handle *target_fh;
6464    struct file_handle *fh;
6465    int mid = 0;
6466    abi_long ret;
6467    char *name;
6468    unsigned int size, total_size;
6469
6470    if (get_user_s32(size, handle)) {
6471        return -TARGET_EFAULT;
6472    }
6473
6474    name = lock_user_string(pathname);
6475    if (!name) {
6476        return -TARGET_EFAULT;
6477    }
6478
6479    total_size = sizeof(struct file_handle) + size;
6480    target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6481    if (!target_fh) {
6482        unlock_user(name, pathname, 0);
6483        return -TARGET_EFAULT;
6484    }
6485
6486    fh = g_malloc0(total_size);
6487    fh->handle_bytes = size;
6488
6489    ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6490    unlock_user(name, pathname, 0);
6491
6492    /* man name_to_handle_at(2):
6493     * Other than the use of the handle_bytes field, the caller should treat
6494     * the file_handle structure as an opaque data type
6495     */
6496
6497    memcpy(target_fh, fh, total_size);
6498    target_fh->handle_bytes = tswap32(fh->handle_bytes);
6499    target_fh->handle_type = tswap32(fh->handle_type);
6500    g_free(fh);
6501    unlock_user(target_fh, handle, total_size);
6502
6503    if (put_user_s32(mid, mount_id)) {
6504        return -TARGET_EFAULT;
6505    }
6506
6507    return ret;
6508
6509}
6510#endif
6511
6512#if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6513static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6514                                     abi_long flags)
6515{
6516    struct file_handle *target_fh;
6517    struct file_handle *fh;
6518    unsigned int size, total_size;
6519    abi_long ret;
6520
6521    if (get_user_s32(size, handle)) {
6522        return -TARGET_EFAULT;
6523    }
6524
6525    total_size = sizeof(struct file_handle) + size;
6526    target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6527    if (!target_fh) {
6528        return -TARGET_EFAULT;
6529    }
6530
6531    fh = g_memdup(target_fh, total_size);
6532    fh->handle_bytes = size;
6533    fh->handle_type = tswap32(target_fh->handle_type);
6534
6535    ret = get_errno(open_by_handle_at(mount_fd, fh,
6536                    target_to_host_bitmask(flags, fcntl_flags_tbl)));
6537
6538    g_free(fh);
6539
6540    unlock_user(target_fh, handle, total_size);
6541
6542    return ret;
6543}
6544#endif
6545
6546#if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6547
6548static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6549{
6550    int host_flags;
6551    target_sigset_t *target_mask;
6552    sigset_t host_mask;
6553    abi_long ret;
6554
6555    if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6556        return -TARGET_EINVAL;
6557    }
6558    if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6559        return -TARGET_EFAULT;
6560    }
6561
6562    target_to_host_sigset(&host_mask, target_mask);
6563
6564    host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6565
6566    ret = get_errno(signalfd(fd, &host_mask, host_flags));
6567    if (ret >= 0) {
6568        fd_trans_register(ret, &target_signalfd_trans);
6569    }
6570
6571    unlock_user_struct(target_mask, mask, 0);
6572
6573    return ret;
6574}
6575#endif
6576
6577/* Map host to target signal numbers for the wait family of syscalls.
6578   Assume all other status bits are the same.  */
6579int host_to_target_waitstatus(int status)
6580{
6581    if (WIFSIGNALED(status)) {
6582        return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6583    }
6584    if (WIFSTOPPED(status)) {
6585        return (host_to_target_signal(WSTOPSIG(status)) << 8)
6586               | (status & 0xff);
6587    }
6588    return status;
6589}
6590
6591static int open_self_cmdline(void *cpu_env, int fd)
6592{
6593    CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6594    struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6595    int i;
6596
6597    for (i = 0; i < bprm->argc; i++) {
6598        size_t len = strlen(bprm->argv[i]) + 1;
6599
6600        if (write(fd, bprm->argv[i], len) != len) {
6601            return -1;
6602        }
6603    }
6604
6605    return 0;
6606}
6607
6608static int open_self_maps(void *cpu_env, int fd)
6609{
6610    CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6611    TaskState *ts = cpu->opaque;
6612    FILE *fp;
6613    char *line = NULL;
6614    size_t len = 0;
6615    ssize_t read;
6616
6617    fp = fopen("/proc/self/maps", "r");
6618    if (fp == NULL) {
6619        return -1;
6620    }
6621
6622    while ((read = getline(&line, &len, fp)) != -1) {
6623        int fields, dev_maj, dev_min, inode;
6624        uint64_t min, max, offset;
6625        char flag_r, flag_w, flag_x, flag_p;
6626        char path[512] = "";
6627        fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6628                        " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6629                        &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6630
6631        if ((fields < 10) || (fields > 11)) {
6632            continue;
6633        }
6634        if (h2g_valid(min)) {
6635            int flags = page_get_flags(h2g(min));
6636            max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6637            if (page_check_range(h2g(min), max - min, flags) == -1) {
6638                continue;
6639            }
6640            if (h2g(min) == ts->info->stack_limit) {
6641                pstrcpy(path, sizeof(path), "      [stack]");
6642            }
6643            dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6644                    " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6645                    h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6646                    flag_x, flag_p, offset, dev_maj, dev_min, inode,
6647                    path[0] ? "         " : "", path);
6648        }
6649    }
6650
6651    free(line);
6652    fclose(fp);
6653
6654    return 0;
6655}
6656
6657static int open_self_stat(void *cpu_env, int fd)
6658{
6659    CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6660    TaskState *ts = cpu->opaque;
6661    abi_ulong start_stack = ts->info->start_stack;
6662    int i;
6663
6664    for (i = 0; i < 44; i++) {
6665      char buf[128];
6666      int len;
6667      uint64_t val = 0;
6668
6669      if (i == 0) {
6670        /* pid */
6671        val = getpid();
6672        snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6673      } else if (i == 1) {
6674        /* app name */
6675        snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6676      } else if (i == 27) {
6677        /* stack bottom */
6678        val = start_stack;
6679        snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6680      } else {
6681        /* for the rest, there is MasterCard */
6682        snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6683      }
6684
6685      len = strlen(buf);
6686      if (write(fd, buf, len) != len) {
6687          return -1;
6688      }
6689    }
6690
6691    return 0;
6692}
6693
6694static int open_self_auxv(void *cpu_env, int fd)
6695{
6696    CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6697    TaskState *ts = cpu->opaque;
6698    abi_ulong auxv = ts->info->saved_auxv;
6699    abi_ulong len = ts->info->auxv_len;
6700    char *ptr;
6701
6702    /*
6703     * Auxiliary vector is stored in target process stack.
6704     * read in whole auxv vector and copy it to file
6705     */
6706    ptr = lock_user(VERIFY_READ, auxv, len, 0);
6707    if (ptr != NULL) {
6708        while (len > 0) {
6709            ssize_t r;
6710            r = write(fd, ptr, len);
6711            if (r <= 0) {
6712                break;
6713            }
6714            len -= r;
6715            ptr += r;
6716        }
6717        lseek(fd, 0, SEEK_SET);
6718        unlock_user(ptr, auxv, len);
6719    }
6720
6721    return 0;
6722}
6723
6724static int is_proc_myself(const char *filename, const char *entry)
6725{
6726    if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6727        filename += strlen("/proc/");
6728        if (!strncmp(filename, "self/", strlen("self/"))) {
6729            filename += strlen("self/");
6730        } else if (*filename >= '1' && *filename <= '9') {
6731            char myself[80];
6732            snprintf(myself, sizeof(myself), "%d/", getpid());
6733            if (!strncmp(filename, myself, strlen(myself))) {
6734                filename += strlen(myself);
6735            } else {
6736                return 0;
6737            }
6738        } else {
6739            return 0;
6740        }
6741        if (!strcmp(filename, entry)) {
6742            return 1;
6743        }
6744    }
6745    return 0;
6746}
6747
6748#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6749static int is_proc(const char *filename, const char *entry)
6750{
6751    return strcmp(filename, entry) == 0;
6752}
6753
6754static int open_net_route(void *cpu_env, int fd)
6755{
6756    FILE *fp;
6757    char *line = NULL;
6758    size_t len = 0;
6759    ssize_t read;
6760
6761    fp = fopen("/proc/net/route", "r");
6762    if (fp == NULL) {
6763        return -1;
6764    }
6765
6766    /* read header */
6767
6768    read = getline(&line, &len, fp);
6769    dprintf(fd, "%s", line);
6770
6771    /* read routes */
6772
6773    while ((read = getline(&line, &len, fp)) != -1) {
6774        char iface[16];
6775        uint32_t dest, gw, mask;
6776        unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6777        int fields;
6778
6779        fields = sscanf(line,
6780                        "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6781                        iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6782                        &mask, &mtu, &window, &irtt);
6783        if (fields != 11) {
6784            continue;
6785        }
6786        dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6787                iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6788                metric, tswap32(mask), mtu, window, irtt);
6789    }
6790
6791    free(line);
6792    fclose(fp);
6793
6794    return 0;
6795}
6796#endif
6797
6798static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6799{
6800    struct fake_open {
6801        const char *filename;
6802        int (*fill)(void *cpu_env, int fd);
6803        int (*cmp)(const char *s1, const char *s2);
6804    };
6805    const struct fake_open *fake_open;
6806    static const struct fake_open fakes[] = {
6807        { "maps", open_self_maps, is_proc_myself },
6808        { "stat", open_self_stat, is_proc_myself },
6809        { "auxv", open_self_auxv, is_proc_myself },
6810        { "cmdline", open_self_cmdline, is_proc_myself },
6811#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6812        { "/proc/net/route", open_net_route, is_proc },
6813#endif
6814        { NULL, NULL, NULL }
6815    };
6816
6817    if (is_proc_myself(pathname, "exe")) {
6818        int execfd = qemu_getauxval(AT_EXECFD);
6819        return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6820    }
6821
6822    for (fake_open = fakes; fake_open->filename; fake_open++) {
6823        if (fake_open->cmp(pathname, fake_open->filename)) {
6824            break;
6825        }
6826    }
6827
6828    if (fake_open->filename) {
6829        const char *tmpdir;
6830        char filename[PATH_MAX];
6831        int fd, r;
6832
6833        /* create temporary file to map stat to */
6834        tmpdir = getenv("TMPDIR");
6835        if (!tmpdir)
6836            tmpdir = "/tmp";
6837        snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6838        fd = mkstemp(filename);
6839        if (fd < 0) {
6840            return fd;
6841        }
6842        unlink(filename);
6843
6844        if ((r = fake_open->fill(cpu_env, fd))) {
6845            int e = errno;
6846            close(fd);
6847            errno = e;
6848            return r;
6849        }
6850        lseek(fd, 0, SEEK_SET);
6851
6852        return fd;
6853    }
6854
6855    return safe_openat(dirfd, path(pathname), flags, mode);
6856}
6857
6858#define TIMER_MAGIC 0x0caf0000
6859#define TIMER_MAGIC_MASK 0xffff0000
6860
6861/* Convert QEMU provided timer ID back to internal 16bit index format */
6862static target_timer_t get_timer_id(abi_long arg)
6863{
6864    target_timer_t timerid = arg;
6865
6866    if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6867        return -TARGET_EINVAL;
6868    }
6869
6870    timerid &= 0xffff;
6871
6872    if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6873        return -TARGET_EINVAL;
6874    }
6875
6876    return timerid;
6877}
6878
6879static int target_to_host_cpu_mask(unsigned long *host_mask,
6880                                   size_t host_size,
6881                                   abi_ulong target_addr,
6882                                   size_t target_size)
6883{
6884    unsigned target_bits = sizeof(abi_ulong) * 8;
6885    unsigned host_bits = sizeof(*host_mask) * 8;
6886    abi_ulong *target_mask;
6887    unsigned i, j;
6888
6889    assert(host_size >= target_size);
6890
6891    target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6892    if (!target_mask) {
6893        return -TARGET_EFAULT;
6894    }
6895    memset(host_mask, 0, host_size);
6896
6897    for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6898        unsigned bit = i * target_bits;
6899        abi_ulong val;
6900
6901        __get_user(val, &target_mask[i]);
6902        for (j = 0; j < target_bits; j++, bit++) {
6903            if (val & (1UL << j)) {
6904                host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6905            }
6906        }
6907    }
6908
6909    unlock_user(target_mask, target_addr, 0);
6910    return 0;
6911}
6912
6913static int host_to_target_cpu_mask(const unsigned long *host_mask,
6914                                   size_t host_size,
6915                                   abi_ulong target_addr,
6916                                   size_t target_size)
6917{
6918    unsigned target_bits = sizeof(abi_ulong) * 8;
6919    unsigned host_bits = sizeof(*host_mask) * 8;
6920    abi_ulong *target_mask;
6921    unsigned i, j;
6922
6923    assert(host_size >= target_size);
6924
6925    target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6926    if (!target_mask) {
6927        return -TARGET_EFAULT;
6928    }
6929
6930    for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6931        unsigned bit = i * target_bits;
6932        abi_ulong val = 0;
6933
6934        for (j = 0; j < target_bits; j++, bit++) {
6935            if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6936                val |= 1UL << j;
6937            }
6938        }
6939        __put_user(val, &target_mask[i]);
6940    }
6941
6942    unlock_user(target_mask, target_addr, target_size);
6943    return 0;
6944}
6945
6946/* This is an internal helper for do_syscall so that it is easier
6947 * to have a single return point, so that actions, such as logging
6948 * of syscall results, can be performed.
6949 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6950 */
6951static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6952                            abi_long arg2, abi_long arg3, abi_long arg4,
6953                            abi_long arg5, abi_long arg6, abi_long arg7,
6954                            abi_long arg8)
6955{
6956    CPUState *cpu = ENV_GET_CPU(cpu_env);
6957    abi_long ret;
6958#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6959    || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6960    || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6961    struct stat st;
6962#endif
6963#if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6964    || defined(TARGET_NR_fstatfs)
6965    struct statfs stfs;
6966#endif
6967    void *p;
6968
6969    switch(num) {
6970    case TARGET_NR_exit:
6971        /* In old applications this may be used to implement _exit(2).
6972           However in threaded applictions it is used for thread termination,
6973           and _exit_group is used for application termination.
6974           Do thread termination if we have more then one thread.  */
6975
6976        if (block_signals()) {
6977            return -TARGET_ERESTARTSYS;
6978        }
6979
6980        cpu_list_lock();
6981
6982        if (CPU_NEXT(first_cpu)) {
6983            TaskState *ts;
6984
6985            /* Remove the CPU from the list.  */
6986            QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6987
6988            cpu_list_unlock();
6989
6990            ts = cpu->opaque;
6991            if (ts->child_tidptr) {
6992                put_user_u32(0, ts->child_tidptr);
6993                sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6994                          NULL, NULL, 0);
6995            }
6996            thread_cpu = NULL;
6997            object_unref(OBJECT(cpu));
6998            g_free(ts);
6999            rcu_unregister_thread();
7000            pthread_exit(NULL);
7001        }
7002
7003        cpu_list_unlock();
7004        preexit_cleanup(cpu_env, arg1);
7005        _exit(arg1);
7006        return 0; /* avoid warning */
7007    case TARGET_NR_read:
7008        if (arg2 == 0 && arg3 == 0) {
7009            return get_errno(safe_read(arg1, 0, 0));
7010        } else {
7011            if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7012                return -TARGET_EFAULT;
7013            ret = get_errno(safe_read(arg1, p, arg3));
7014            if (ret >= 0 &&
7015                fd_trans_host_to_target_data(arg1)) {
7016                ret = fd_trans_host_to_target_data(arg1)(p, ret);
7017            }
7018            unlock_user(p, arg2, ret);
7019        }
7020        return ret;
7021    case TARGET_NR_write:
7022        if (arg2 == 0 && arg3 == 0) {
7023            return get_errno(safe_write(arg1, 0, 0));
7024        }
7025        if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7026            return -TARGET_EFAULT;
7027        if (fd_trans_target_to_host_data(arg1)) {
7028            void *copy = g_malloc(arg3);
7029            memcpy(copy, p, arg3);
7030            ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7031            if (ret >= 0) {
7032                ret = get_errno(safe_write(arg1, copy, ret));
7033            }
7034            g_free(copy);
7035        } else {
7036            ret = get_errno(safe_write(arg1, p, arg3));
7037        }
7038        unlock_user(p, arg2, 0);
7039        return ret;
7040
7041#ifdef TARGET_NR_open
7042    case TARGET_NR_open:
7043        if (!(p = lock_user_string(arg1)))
7044            return -TARGET_EFAULT;
7045        ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7046                                  target_to_host_bitmask(arg2, fcntl_flags_tbl),
7047                                  arg3));
7048        fd_trans_unregister(ret);
7049        unlock_user(p, arg1, 0);
7050        return ret;
7051#endif
7052    case TARGET_NR_openat:
7053        if (!(p = lock_user_string(arg2)))
7054            return -TARGET_EFAULT;
7055        ret = get_errno(do_openat(cpu_env, arg1, p,
7056                                  target_to_host_bitmask(arg3, fcntl_flags_tbl),
7057                                  arg4));
7058        fd_trans_unregister(ret);
7059        unlock_user(p, arg2, 0);
7060        return ret;
7061#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7062    case TARGET_NR_name_to_handle_at:
7063        ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7064        return ret;
7065#endif
7066#if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7067    case TARGET_NR_open_by_handle_at:
7068        ret = do_open_by_handle_at(arg1, arg2, arg3);
7069        fd_trans_unregister(ret);
7070        return ret;
7071#endif
7072    case TARGET_NR_close:
7073        fd_trans_unregister(arg1);
7074        return get_errno(close(arg1));
7075
7076    case TARGET_NR_brk:
7077        return do_brk(arg1);
7078#ifdef TARGET_NR_fork
7079    case TARGET_NR_fork:
7080        return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7081#endif
7082#ifdef TARGET_NR_waitpid
7083    case TARGET_NR_waitpid:
7084        {
7085            int status;
7086            ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7087            if (!is_error(ret) && arg2 && ret
7088                && put_user_s32(host_to_target_waitstatus(status), arg2))
7089                return -TARGET_EFAULT;
7090        }
7091        return ret;
7092#endif
7093#ifdef TARGET_NR_waitid
7094    case TARGET_NR_waitid:
7095        {
7096            siginfo_t info;
7097            info.si_pid = 0;
7098            ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7099            if (!is_error(ret) && arg3 && info.si_pid != 0) {
7100                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7101                    return -TARGET_EFAULT;
7102                host_to_target_siginfo(p, &info);
7103                unlock_user(p, arg3, sizeof(target_siginfo_t));
7104            }
7105        }
7106        return ret;
7107#endif
7108#ifdef TARGET_NR_creat /* not on alpha */
7109    case TARGET_NR_creat:
7110        if (!(p = lock_user_string(arg1)))
7111            return -TARGET_EFAULT;
7112        ret = get_errno(creat(p, arg2));
7113        fd_trans_unregister(ret);
7114        unlock_user(p, arg1, 0);
7115        return ret;
7116#endif
7117#ifdef TARGET_NR_link
7118    case TARGET_NR_link:
7119        {
7120            void * p2;
7121            p = lock_user_string(arg1);
7122            p2 = lock_user_string(arg2);
7123            if (!p || !p2)
7124                ret = -TARGET_EFAULT;
7125            else
7126                ret = get_errno(link(p, p2));
7127            unlock_user(p2, arg2, 0);
7128            unlock_user(p, arg1, 0);
7129        }
7130        return ret;
7131#endif
7132#if defined(TARGET_NR_linkat)
7133    case TARGET_NR_linkat:
7134        {
7135            void * p2 = NULL;
7136            if (!arg2 || !arg4)
7137                return -TARGET_EFAULT;
7138            p  = lock_user_string(arg2);
7139            p2 = lock_user_string(arg4);
7140            if (!p || !p2)
7141                ret = -TARGET_EFAULT;
7142            else
7143                ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7144            unlock_user(p, arg2, 0);
7145            unlock_user(p2, arg4, 0);
7146        }
7147        return ret;
7148#endif
7149#ifdef TARGET_NR_unlink
7150    case TARGET_NR_unlink:
7151        if (!(p = lock_user_string(arg1)))
7152            return -TARGET_EFAULT;
7153        ret = get_errno(unlink(p));
7154        unlock_user(p, arg1, 0);
7155        return ret;
7156#endif
7157#if defined(TARGET_NR_unlinkat)
7158    case TARGET_NR_unlinkat:
7159        if (!(p = lock_user_string(arg2)))
7160            return -TARGET_EFAULT;
7161        ret = get_errno(unlinkat(arg1, p, arg3));
7162        unlock_user(p, arg2, 0);
7163        return ret;
7164#endif
7165    case TARGET_NR_execve:
7166        {
7167            char **argp, **envp;
7168            int argc, envc;
7169            abi_ulong gp;
7170            abi_ulong guest_argp;
7171            abi_ulong guest_envp;
7172            abi_ulong addr;
7173            char **q;
7174            int total_size = 0;
7175
7176            argc = 0;
7177            guest_argp = arg2;
7178            for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7179                if (get_user_ual(addr, gp))
7180                    return -TARGET_EFAULT;
7181                if (!addr)
7182                    break;
7183                argc++;
7184            }
7185            envc = 0;
7186            guest_envp = arg3;
7187            for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7188                if (get_user_ual(addr, gp))
7189                    return -TARGET_EFAULT;
7190                if (!addr)
7191                    break;
7192                envc++;
7193            }
7194
7195            argp = g_new0(char *, argc + 1);
7196            envp = g_new0(char *, envc + 1);
7197
7198            for (gp = guest_argp, q = argp; gp;
7199                  gp += sizeof(abi_ulong), q++) {
7200                if (get_user_ual(addr, gp))
7201                    goto execve_efault;
7202                if (!addr)
7203                    break;
7204                if (!(*q = lock_user_string(addr)))
7205                    goto execve_efault;
7206                total_size += strlen(*q) + 1;
7207            }
7208            *q = NULL;
7209
7210            for (gp = guest_envp, q = envp; gp;
7211                  gp += sizeof(abi_ulong), q++) {
7212                if (get_user_ual(addr, gp))
7213                    goto execve_efault;
7214                if (!addr)
7215                    break;
7216                if (!(*q = lock_user_string(addr)))
7217                    goto execve_efault;
7218                total_size += strlen(*q) + 1;
7219            }
7220            *q = NULL;
7221
7222            if (!(p = lock_user_string(arg1)))
7223                goto execve_efault;
7224            /* Although execve() is not an interruptible syscall it is
7225             * a special case where we must use the safe_syscall wrapper:
7226             * if we allow a signal to happen before we make the host
7227             * syscall then we will 'lose' it, because at the point of
7228             * execve the process leaves QEMU's control. So we use the
7229             * safe syscall wrapper to ensure that we either take the
7230             * signal as a guest signal, or else it does not happen
7231             * before the execve completes and makes it the other
7232             * program's problem.
7233             */
7234            ret = get_errno(safe_execve(p, argp, envp));
7235            unlock_user(p, arg1, 0);
7236
7237            goto execve_end;
7238
7239        execve_efault:
7240            ret = -TARGET_EFAULT;
7241
7242        execve_end:
7243            for (gp = guest_argp, q = argp; *q;
7244                  gp += sizeof(abi_ulong), q++) {
7245                if (get_user_ual(addr, gp)
7246                    || !addr)
7247                    break;
7248                unlock_user(*q, addr, 0);
7249            }
7250            for (gp = guest_envp, q = envp; *q;
7251                  gp += sizeof(abi_ulong), q++) {
7252                if (get_user_ual(addr, gp)
7253                    || !addr)
7254                    break;
7255                unlock_user(*q, addr, 0);
7256            }
7257
7258            g_free(argp);
7259            g_free(envp);
7260        }
7261        return ret;
7262    case TARGET_NR_chdir:
7263        if (!(p = lock_user_string(arg1)))
7264            return -TARGET_EFAULT;
7265        ret = get_errno(chdir(p));
7266        unlock_user(p, arg1, 0);
7267        return ret;
7268#ifdef TARGET_NR_time
7269    case TARGET_NR_time:
7270        {
7271            time_t host_time;
7272            ret = get_errno(time(&host_time));
7273            if (!is_error(ret)
7274                && arg1
7275                && put_user_sal(host_time, arg1))
7276                return -TARGET_EFAULT;
7277        }
7278        return ret;
7279#endif
7280#ifdef TARGET_NR_mknod
7281    case TARGET_NR_mknod:
7282        if (!(p = lock_user_string(arg1)))
7283            return -TARGET_EFAULT;
7284        ret = get_errno(mknod(p, arg2, arg3));
7285        unlock_user(p, arg1, 0);
7286        return ret;
7287#endif
7288#if defined(TARGET_NR_mknodat)
7289    case TARGET_NR_mknodat:
7290        if (!(p = lock_user_string(arg2)))
7291            return -TARGET_EFAULT;
7292        ret = get_errno(mknodat(arg1, p, arg3, arg4));
7293        unlock_user(p, arg2, 0);
7294        return ret;
7295#endif
7296#ifdef TARGET_NR_chmod
7297    case TARGET_NR_chmod:
7298        if (!(p = lock_user_string(arg1)))
7299            return -TARGET_EFAULT;
7300        ret = get_errno(chmod(p, arg2));
7301        unlock_user(p, arg1, 0);
7302        return ret;
7303#endif
7304#ifdef TARGET_NR_lseek
7305    case TARGET_NR_lseek:
7306        return get_errno(lseek(arg1, arg2, arg3));
7307#endif
7308#if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7309    /* Alpha specific */
7310    case TARGET_NR_getxpid:
7311        ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7312        return get_errno(getpid());
7313#endif
7314#ifdef TARGET_NR_getpid
7315    case TARGET_NR_getpid:
7316        return get_errno(getpid());
7317#endif
7318    case TARGET_NR_mount:
7319        {
7320            /* need to look at the data field */
7321            void *p2, *p3;
7322
7323            if (arg1) {
7324                p = lock_user_string(arg1);
7325                if (!p) {
7326                    return -TARGET_EFAULT;
7327                }
7328            } else {
7329                p = NULL;
7330            }
7331
7332            p2 = lock_user_string(arg2);
7333            if (!p2) {
7334                if (arg1) {
7335                    unlock_user(p, arg1, 0);
7336                }
7337                return -TARGET_EFAULT;
7338            }
7339
7340            if (arg3) {
7341                p3 = lock_user_string(arg3);
7342                if (!p3) {
7343                    if (arg1) {
7344                        unlock_user(p, arg1, 0);
7345                    }
7346                    unlock_user(p2, arg2, 0);
7347                    return -TARGET_EFAULT;
7348                }
7349            } else {
7350                p3 = NULL;
7351            }
7352
7353            /* FIXME - arg5 should be locked, but it isn't clear how to
7354             * do that since it's not guaranteed to be a NULL-terminated
7355             * string.
7356             */
7357            if (!arg5) {
7358                ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7359            } else {
7360                ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7361            }
7362            ret = get_errno(ret);
7363
7364            if (arg1) {
7365                unlock_user(p, arg1, 0);
7366            }
7367            unlock_user(p2, arg2, 0);
7368            if (arg3) {
7369                unlock_user(p3, arg3, 0);
7370            }
7371        }
7372        return ret;
7373#ifdef TARGET_NR_umount
7374    case TARGET_NR_umount:
7375        if (!(p = lock_user_string(arg1)))
7376            return -TARGET_EFAULT;
7377        ret = get_errno(umount(p));
7378        unlock_user(p, arg1, 0);
7379        return ret;
7380#endif
7381#ifdef TARGET_NR_stime /* not on alpha */
7382    case TARGET_NR_stime:
7383        {
7384            time_t host_time;
7385            if (get_user_sal(host_time, arg1))
7386                return -TARGET_EFAULT;
7387            return get_errno(stime(&host_time));
7388        }
7389#endif
7390#ifdef TARGET_NR_alarm /* not on alpha */
7391    case TARGET_NR_alarm:
7392        return alarm(arg1);
7393#endif
7394#ifdef TARGET_NR_pause /* not on alpha */
7395    case TARGET_NR_pause:
7396        if (!block_signals()) {
7397            sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7398        }
7399        return -TARGET_EINTR;
7400#endif
7401#ifdef TARGET_NR_utime
7402    case TARGET_NR_utime:
7403        {
7404            struct utimbuf tbuf, *host_tbuf;
7405            struct target_utimbuf *target_tbuf;
7406            if (arg2) {
7407                if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7408                    return -TARGET_EFAULT;
7409                tbuf.actime = tswapal(target_tbuf->actime);
7410                tbuf.modtime = tswapal(target_tbuf->modtime);
7411                unlock_user_struct(target_tbuf, arg2, 0);
7412                host_tbuf = &tbuf;
7413            } else {
7414                host_tbuf = NULL;
7415            }
7416            if (!(p = lock_user_string(arg1)))
7417                return -TARGET_EFAULT;
7418            ret = get_errno(utime(p, host_tbuf));
7419            unlock_user(p, arg1, 0);
7420        }
7421        return ret;
7422#endif
7423#ifdef TARGET_NR_utimes
7424    case TARGET_NR_utimes:
7425        {
7426            struct timeval *tvp, tv[2];
7427            if (arg2) {
7428                if (copy_from_user_timeval(&tv[0], arg2)
7429                    || copy_from_user_timeval(&tv[1],
7430                                              arg2 + sizeof(struct target_timeval)))
7431                    return -TARGET_EFAULT;
7432                tvp = tv;
7433            } else {
7434                tvp = NULL;
7435            }
7436            if (!(p = lock_user_string(arg1)))
7437                return -TARGET_EFAULT;
7438            ret = get_errno(utimes(p, tvp));
7439            unlock_user(p, arg1, 0);
7440        }
7441        return ret;
7442#endif
7443#if defined(TARGET_NR_futimesat)
7444    case TARGET_NR_futimesat:
7445        {
7446            struct timeval *tvp, tv[2];
7447            if (arg3) {
7448                if (copy_from_user_timeval(&tv[0], arg3)
7449                    || copy_from_user_timeval(&tv[1],
7450                                              arg3 + sizeof(struct target_timeval)))
7451                    return -TARGET_EFAULT;
7452                tvp = tv;
7453            } else {
7454                tvp = NULL;
7455            }
7456            if (!(p = lock_user_string(arg2))) {
7457                return -TARGET_EFAULT;
7458            }
7459            ret = get_errno(futimesat(arg1, path(p), tvp));
7460            unlock_user(p, arg2, 0);
7461        }
7462        return ret;
7463#endif
7464#ifdef TARGET_NR_access
7465    case TARGET_NR_access:
7466        if (!(p = lock_user_string(arg1))) {
7467            return -TARGET_EFAULT;
7468        }
7469        ret = get_errno(access(path(p), arg2));
7470        unlock_user(p, arg1, 0);
7471        return ret;
7472#endif
7473#if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7474    case TARGET_NR_faccessat:
7475        if (!(p = lock_user_string(arg2))) {
7476            return -TARGET_EFAULT;
7477        }
7478        ret = get_errno(faccessat(arg1, p, arg3, 0));
7479        unlock_user(p, arg2, 0);
7480        return ret;
7481#endif
7482#ifdef TARGET_NR_nice /* not on alpha */
7483    case TARGET_NR_nice:
7484        return get_errno(nice(arg1));
7485#endif
7486    case TARGET_NR_sync:
7487        sync();
7488        return 0;
7489#if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7490    case TARGET_NR_syncfs:
7491        return get_errno(syncfs(arg1));
7492#endif
7493    case TARGET_NR_kill:
7494        return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7495#ifdef TARGET_NR_rename
7496    case TARGET_NR_rename:
7497        {
7498            void *p2;
7499            p = lock_user_string(arg1);
7500            p2 = lock_user_string(arg2);
7501            if (!p || !p2)
7502                ret = -TARGET_EFAULT;
7503            else
7504                ret = get_errno(rename(p, p2));
7505            unlock_user(p2, arg2, 0);
7506            unlock_user(p, arg1, 0);
7507        }
7508        return ret;
7509#endif
7510#if defined(TARGET_NR_renameat)
7511    case TARGET_NR_renameat:
7512        {
7513            void *p2;
7514            p  = lock_user_string(arg2);
7515            p2 = lock_user_string(arg4);
7516            if (!p || !p2)
7517                ret = -TARGET_EFAULT;
7518            else
7519                ret = get_errno(renameat(arg1, p, arg3, p2));
7520            unlock_user(p2, arg4, 0);
7521            unlock_user(p, arg2, 0);
7522        }
7523        return ret;
7524#endif
7525#if defined(TARGET_NR_renameat2)
7526    case TARGET_NR_renameat2:
7527        {
7528            void *p2;
7529            p  = lock_user_string(arg2);
7530            p2 = lock_user_string(arg4);
7531            if (!p || !p2) {
7532                ret = -TARGET_EFAULT;
7533            } else {
7534                ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7535            }
7536            unlock_user(p2, arg4, 0);
7537            unlock_user(p, arg2, 0);
7538        }
7539        return ret;
7540#endif
7541#ifdef TARGET_NR_mkdir
7542    case TARGET_NR_mkdir:
7543        if (!(p = lock_user_string(arg1)))
7544            return -TARGET_EFAULT;
7545        ret = get_errno(mkdir(p, arg2));
7546        unlock_user(p, arg1, 0);
7547        return ret;
7548#endif
7549#if defined(TARGET_NR_mkdirat)
7550    case TARGET_NR_mkdirat:
7551        if (!(p = lock_user_string(arg2)))
7552            return -TARGET_EFAULT;
7553        ret = get_errno(mkdirat(arg1, p, arg3));
7554        unlock_user(p, arg2, 0);
7555        return ret;
7556#endif
7557#ifdef TARGET_NR_rmdir
7558    case TARGET_NR_rmdir:
7559        if (!(p = lock_user_string(arg1)))
7560            return -TARGET_EFAULT;
7561        ret = get_errno(rmdir(p));
7562        unlock_user(p, arg1, 0);
7563        return ret;
7564#endif
7565    case TARGET_NR_dup:
7566        ret = get_errno(dup(arg1));
7567        if (ret >= 0) {
7568            fd_trans_dup(arg1, ret);
7569        }
7570        return ret;
7571#ifdef TARGET_NR_pipe
7572    case TARGET_NR_pipe:
7573        return do_pipe(cpu_env, arg1, 0, 0);
7574#endif
7575#ifdef TARGET_NR_pipe2
7576    case TARGET_NR_pipe2:
7577        return do_pipe(cpu_env, arg1,
7578                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7579#endif
7580    case TARGET_NR_times:
7581        {
7582            struct target_tms *tmsp;
7583            struct tms tms;
7584            ret = get_errno(times(&tms));
7585            if (arg1) {
7586                tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7587                if (!tmsp)
7588                    return -TARGET_EFAULT;
7589                tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7590                tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7591                tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7592                tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7593            }
7594            if (!is_error(ret))
7595                ret = host_to_target_clock_t(ret);
7596        }
7597        return ret;
7598    case TARGET_NR_acct:
7599        if (arg1 == 0) {
7600            ret = get_errno(acct(NULL));
7601        } else {
7602            if (!(p = lock_user_string(arg1))) {
7603                return -TARGET_EFAULT;
7604            }
7605            ret = get_errno(acct(path(p)));
7606            unlock_user(p, arg1, 0);
7607        }
7608        return ret;
7609#ifdef TARGET_NR_umount2
7610    case TARGET_NR_umount2:
7611        if (!(p = lock_user_string(arg1)))
7612            return -TARGET_EFAULT;
7613        ret = get_errno(umount2(p, arg2));
7614        unlock_user(p, arg1, 0);
7615        return ret;
7616#endif
7617    case TARGET_NR_ioctl:
7618        return do_ioctl(arg1, arg2, arg3);
7619#ifdef TARGET_NR_fcntl
7620    case TARGET_NR_fcntl:
7621        return do_fcntl(arg1, arg2, arg3);
7622#endif
7623    case TARGET_NR_setpgid:
7624        return get_errno(setpgid(arg1, arg2));
7625    case TARGET_NR_umask:
7626        return get_errno(umask(arg1));
7627    case TARGET_NR_chroot:
7628        if (!(p = lock_user_string(arg1)))
7629            return -TARGET_EFAULT;
7630        ret = get_errno(chroot(p));
7631        unlock_user(p, arg1, 0);
7632        return ret;
7633#ifdef TARGET_NR_dup2
7634    case TARGET_NR_dup2:
7635        ret = get_errno(dup2(arg1, arg2));
7636        if (ret >= 0) {
7637            fd_trans_dup(arg1, arg2);
7638        }
7639        return ret;
7640#endif
7641#if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7642    case TARGET_NR_dup3:
7643    {
7644        int host_flags;
7645
7646        if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7647            return -EINVAL;
7648        }
7649        host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7650        ret = get_errno(dup3(arg1, arg2, host_flags));
7651        if (ret >= 0) {
7652            fd_trans_dup(arg1, arg2);
7653        }
7654        return ret;
7655    }
7656#endif
7657#ifdef TARGET_NR_getppid /* not on alpha */
7658    case TARGET_NR_getppid:
7659        return get_errno(getppid());
7660#endif
7661#ifdef TARGET_NR_getpgrp
7662    case TARGET_NR_getpgrp:
7663        return get_errno(getpgrp());
7664#endif
7665    case TARGET_NR_setsid:
7666        return get_errno(setsid());
7667#ifdef TARGET_NR_sigaction
7668    case TARGET_NR_sigaction:
7669        {
7670#if defined(TARGET_ALPHA)
7671            struct target_sigaction act, oact, *pact = 0;
7672            struct target_old_sigaction *old_act;
7673            if (arg2) {
7674                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7675                    return -TARGET_EFAULT;
7676                act._sa_handler = old_act->_sa_handler;
7677                target_siginitset(&act.sa_mask, old_act->sa_mask);
7678                act.sa_flags = old_act->sa_flags;
7679                act.sa_restorer = 0;
7680                unlock_user_struct(old_act, arg2, 0);
7681                pact = &act;
7682            }
7683            ret = get_errno(do_sigaction(arg1, pact, &oact));
7684            if (!is_error(ret) && arg3) {
7685                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7686                    return -TARGET_EFAULT;
7687                old_act->_sa_handler = oact._sa_handler;
7688                old_act->sa_mask = oact.sa_mask.sig[0];
7689                old_act->sa_flags = oact.sa_flags;
7690                unlock_user_struct(old_act, arg3, 1);
7691            }
7692#elif defined(TARGET_MIPS)
7693            struct target_sigaction act, oact, *pact, *old_act;
7694
7695            if (arg2) {
7696                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7697                    return -TARGET_EFAULT;
7698                act._sa_handler = old_act->_sa_handler;
7699                target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7700                act.sa_flags = old_act->sa_flags;
7701                unlock_user_struct(old_act, arg2, 0);
7702                pact = &act;
7703            } else {
7704                pact = NULL;
7705            }
7706
7707            ret = get_errno(do_sigaction(arg1, pact, &oact));
7708
7709            if (!is_error(ret) && arg3) {
7710                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7711                    return -TARGET_EFAULT;
7712                old_act->_sa_handler = oact._sa_handler;
7713                old_act->sa_flags = oact.sa_flags;
7714                old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7715                old_act->sa_mask.sig[1] = 0;
7716                old_act->sa_mask.sig[2] = 0;
7717                old_act->sa_mask.sig[3] = 0;
7718                unlock_user_struct(old_act, arg3, 1);
7719            }
7720#else
7721            struct target_old_sigaction *old_act;
7722            struct target_sigaction act, oact, *pact;
7723            if (arg2) {
7724                if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7725                    return -TARGET_EFAULT;
7726                act._sa_handler = old_act->_sa_handler;
7727                target_siginitset(&act.sa_mask, old_act->sa_mask);
7728                act.sa_flags = old_act->sa_flags;
7729                act.sa_restorer = old_act->sa_restorer;
7730#ifdef TARGET_ARCH_HAS_KA_RESTORER
7731                act.ka_restorer = 0;
7732#endif
7733                unlock_user_struct(old_act, arg2, 0);
7734                pact = &act;
7735            } else {
7736                pact = NULL;
7737            }
7738            ret = get_errno(do_sigaction(arg1, pact, &oact));
7739            if (!is_error(ret) && arg3) {
7740                if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7741                    return -TARGET_EFAULT;
7742                old_act->_sa_handler = oact._sa_handler;
7743                old_act->sa_mask = oact.sa_mask.sig[0];
7744                old_act->sa_flags = oact.sa_flags;
7745                old_act->sa_restorer = oact.sa_restorer;
7746                unlock_user_struct(old_act, arg3, 1);
7747            }
7748#endif
7749        }
7750        return ret;
7751#endif
7752    case TARGET_NR_rt_sigaction:
7753        {
7754#if defined(TARGET_ALPHA)
7755            /* For Alpha and SPARC this is a 5 argument syscall, with
7756             * a 'restorer' parameter which must be copied into the
7757             * sa_restorer field of the sigaction struct.
7758             * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7759             * and arg5 is the sigsetsize.
7760             * Alpha also has a separate rt_sigaction struct that it uses
7761             * here; SPARC uses the usual sigaction struct.
7762             */
7763            struct target_rt_sigaction *rt_act;
7764            struct target_sigaction act, oact, *pact = 0;
7765
7766            if (arg4 != sizeof(target_sigset_t)) {
7767                return -TARGET_EINVAL;
7768            }
7769            if (arg2) {
7770                if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7771                    return -TARGET_EFAULT;
7772                act._sa_handler = rt_act->_sa_handler;
7773                act.sa_mask = rt_act->sa_mask;
7774                act.sa_flags = rt_act->sa_flags;
7775                act.sa_restorer = arg5;
7776                unlock_user_struct(rt_act, arg2, 0);
7777                pact = &act;
7778            }
7779            ret = get_errno(do_sigaction(arg1, pact, &oact));
7780            if (!is_error(ret) && arg3) {
7781                if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7782                    return -TARGET_EFAULT;
7783                rt_act->_sa_handler = oact._sa_handler;
7784                rt_act->sa_mask = oact.sa_mask;
7785                rt_act->sa_flags = oact.sa_flags;
7786                unlock_user_struct(rt_act, arg3, 1);
7787            }
7788#else
7789#ifdef TARGET_SPARC
7790            target_ulong restorer = arg4;
7791            target_ulong sigsetsize = arg5;
7792#else
7793            target_ulong sigsetsize = arg4;
7794#endif
7795            struct target_sigaction *act;
7796            struct target_sigaction *oact;
7797
7798            if (sigsetsize != sizeof(target_sigset_t)) {
7799                return -TARGET_EINVAL;
7800            }
7801            if (arg2) {
7802                if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7803                    return -TARGET_EFAULT;
7804                }
7805#ifdef TARGET_ARCH_HAS_KA_RESTORER
7806                act->ka_restorer = restorer;
7807#endif
7808            } else {
7809                act = NULL;
7810            }
7811            if (arg3) {
7812                if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7813                    ret = -TARGET_EFAULT;
7814                    goto rt_sigaction_fail;
7815                }
7816            } else
7817                oact = NULL;
7818            ret = get_errno(do_sigaction(arg1, act, oact));
7819        rt_sigaction_fail:
7820            if (act)
7821                unlock_user_struct(act, arg2, 0);
7822            if (oact)
7823                unlock_user_struct(oact, arg3, 1);
7824#endif
7825        }
7826        return ret;
7827#ifdef TARGET_NR_sgetmask /* not on alpha */
7828    case TARGET_NR_sgetmask:
7829        {
7830            sigset_t cur_set;
7831            abi_ulong target_set;
7832            ret = do_sigprocmask(0, NULL, &cur_set);
7833            if (!ret) {
7834                host_to_target_old_sigset(&target_set, &cur_set);
7835                ret = target_set;
7836            }
7837        }
7838        return ret;
7839#endif
7840#ifdef TARGET_NR_ssetmask /* not on alpha */
7841    case TARGET_NR_ssetmask:
7842        {
7843            sigset_t set, oset;
7844            abi_ulong target_set = arg1;
7845            target_to_host_old_sigset(&set, &target_set);
7846            ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7847            if (!ret) {
7848                host_to_target_old_sigset(&target_set, &oset);
7849                ret = target_set;
7850            }
7851        }
7852        return ret;
7853#endif
7854#ifdef TARGET_NR_sigprocmask
7855    case TARGET_NR_sigprocmask:
7856        {
7857#if defined(TARGET_ALPHA)
7858            sigset_t set, oldset;
7859            abi_ulong mask;
7860            int how;
7861
7862            switch (arg1) {
7863            case TARGET_SIG_BLOCK:
7864                how = SIG_BLOCK;
7865                break;
7866            case TARGET_SIG_UNBLOCK:
7867                how = SIG_UNBLOCK;
7868                break;
7869            case TARGET_SIG_SETMASK:
7870                how = SIG_SETMASK;
7871                break;
7872            default:
7873                return -TARGET_EINVAL;
7874            }
7875            mask = arg2;
7876            target_to_host_old_sigset(&set, &mask);
7877
7878            ret = do_sigprocmask(how, &set, &oldset);
7879            if (!is_error(ret)) {
7880                host_to_target_old_sigset(&mask, &oldset);
7881                ret = mask;
7882                ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7883            }
7884#else
7885            sigset_t set, oldset, *set_ptr;
7886            int how;
7887
7888            if (arg2) {
7889                switch (arg1) {
7890                case TARGET_SIG_BLOCK:
7891                    how = SIG_BLOCK;
7892                    break;
7893                case TARGET_SIG_UNBLOCK:
7894                    how = SIG_UNBLOCK;
7895                    break;
7896                case TARGET_SIG_SETMASK:
7897                    how = SIG_SETMASK;
7898                    break;
7899                default:
7900                    return -TARGET_EINVAL;
7901                }
7902                if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7903                    return -TARGET_EFAULT;
7904                target_to_host_old_sigset(&set, p);
7905                unlock_user(p, arg2, 0);
7906                set_ptr = &set;
7907            } else {
7908                how = 0;
7909                set_ptr = NULL;
7910            }
7911            ret = do_sigprocmask(how, set_ptr, &oldset);
7912            if (!is_error(ret) && arg3) {
7913                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7914                    return -TARGET_EFAULT;
7915                host_to_target_old_sigset(p, &oldset);
7916                unlock_user(p, arg3, sizeof(target_sigset_t));
7917            }
7918#endif
7919        }
7920        return ret;
7921#endif
7922    case TARGET_NR_rt_sigprocmask:
7923        {
7924            int how = arg1;
7925            sigset_t set, oldset, *set_ptr;
7926
7927            if (arg4 != sizeof(target_sigset_t)) {
7928                return -TARGET_EINVAL;
7929            }
7930
7931            if (arg2) {
7932                switch(how) {
7933                case TARGET_SIG_BLOCK:
7934                    how = SIG_BLOCK;
7935                    break;
7936                case TARGET_SIG_UNBLOCK:
7937                    how = SIG_UNBLOCK;
7938                    break;
7939                case TARGET_SIG_SETMASK:
7940                    how = SIG_SETMASK;
7941                    break;
7942                default:
7943                    return -TARGET_EINVAL;
7944                }
7945                if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7946                    return -TARGET_EFAULT;
7947                target_to_host_sigset(&set, p);
7948                unlock_user(p, arg2, 0);
7949                set_ptr = &set;
7950            } else {
7951                how = 0;
7952                set_ptr = NULL;
7953            }
7954            ret = do_sigprocmask(how, set_ptr, &oldset);
7955            if (!is_error(ret) && arg3) {
7956                if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7957                    return -TARGET_EFAULT;
7958                host_to_target_sigset(p, &oldset);
7959                unlock_user(p, arg3, sizeof(target_sigset_t));
7960            }
7961        }
7962        return ret;
7963#ifdef TARGET_NR_sigpending
7964    case TARGET_NR_sigpending:
7965        {
7966            sigset_t set;
7967            ret = get_errno(sigpending(&set));
7968            if (!is_error(ret)) {
7969                if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7970                    return -TARGET_EFAULT;
7971                host_to_target_old_sigset(p, &set);
7972                unlock_user(p, arg1, sizeof(target_sigset_t));
7973            }
7974        }
7975        return ret;
7976#endif
7977    case TARGET_NR_rt_sigpending:
7978        {
7979            sigset_t set;
7980
7981            /* Yes, this check is >, not != like most. We follow the kernel's
7982             * logic and it does it like this because it implements
7983             * NR_sigpending through the same code path, and in that case
7984             * the old_sigset_t is smaller in size.
7985             */
7986            if (arg2 > sizeof(target_sigset_t)) {
7987                return -TARGET_EINVAL;
7988            }
7989
7990            ret = get_errno(sigpending(&set));
7991            if (!is_error(ret)) {
7992                if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7993                    return -TARGET_EFAULT;
7994                host_to_target_sigset(p, &set);
7995                unlock_user(p, arg1, sizeof(target_sigset_t));
7996            }
7997        }
7998        return ret;
7999#ifdef TARGET_NR_sigsuspend
8000    case TARGET_NR_sigsuspend:
8001        {
8002            TaskState *ts = cpu->opaque;
8003#if defined(TARGET_ALPHA)
8004            abi_ulong mask = arg1;
8005            target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8006#else
8007            if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8008                return -TARGET_EFAULT;
8009            target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8010            unlock_user(p, arg1, 0);
8011#endif
8012            ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8013                                               SIGSET_T_SIZE));
8014            if (ret != -TARGET_ERESTARTSYS) {
8015                ts->in_sigsuspend = 1;
8016            }
8017        }
8018        return ret;
8019#endif
8020    case TARGET_NR_rt_sigsuspend:
8021        {
8022            TaskState *ts = cpu->opaque;
8023
8024            if (arg2 != sizeof(target_sigset_t)) {
8025                return -TARGET_EINVAL;
8026            }
8027            if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8028                return -TARGET_EFAULT;
8029            target_to_host_sigset(&ts->sigsuspend_mask, p);
8030            unlock_user(p, arg1, 0);
8031            ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8032                                               SIGSET_T_SIZE));
8033            if (ret != -TARGET_ERESTARTSYS) {
8034                ts->in_sigsuspend = 1;
8035            }
8036        }
8037        return ret;
8038    case TARGET_NR_rt_sigtimedwait:
8039        {
8040            sigset_t set;
8041            struct timespec uts, *puts;
8042            siginfo_t uinfo;
8043
8044            if (arg4 != sizeof(target_sigset_t)) {
8045                return -TARGET_EINVAL;
8046            }
8047
8048            if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8049                return -TARGET_EFAULT;
8050            target_to_host_sigset(&set, p);
8051            unlock_user(p, arg1, 0);
8052            if (arg3) {
8053                puts = &uts;
8054                target_to_host_timespec(puts, arg3);
8055            } else {
8056                puts = NULL;
8057            }
8058            ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8059                                                 SIGSET_T_SIZE));
8060            if (!is_error(ret)) {
8061                if (arg2) {
8062                    p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8063                                  0);
8064                    if (!p) {
8065                        return -TARGET_EFAULT;
8066                    }
8067                    host_to_target_siginfo(p, &uinfo);
8068                    unlock_user(p, arg2, sizeof(target_siginfo_t));
8069                }
8070                ret = host_to_target_signal(ret);
8071            }
8072        }
8073        return ret;
8074    case TARGET_NR_rt_sigqueueinfo:
8075        {
8076            siginfo_t uinfo;
8077
8078            p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8079            if (!p) {
8080                return -TARGET_EFAULT;
8081            }
8082            target_to_host_siginfo(&uinfo, p);
8083            unlock_user(p, arg3, 0);
8084            ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8085        }
8086        return ret;
8087    case TARGET_NR_rt_tgsigqueueinfo:
8088        {
8089            siginfo_t uinfo;
8090
8091            p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8092            if (!p) {
8093                return -TARGET_EFAULT;
8094            }
8095            target_to_host_siginfo(&uinfo, p);
8096            unlock_user(p, arg4, 0);
8097            ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8098        }
8099        return ret;
8100#ifdef TARGET_NR_sigreturn
8101    case TARGET_NR_sigreturn:
8102        if (block_signals()) {
8103            return -TARGET_ERESTARTSYS;
8104        }
8105        return do_sigreturn(cpu_env);
8106#endif
8107    case TARGET_NR_rt_sigreturn:
8108        if (block_signals()) {
8109            return -TARGET_ERESTARTSYS;
8110        }
8111        return do_rt_sigreturn(cpu_env);
8112    case TARGET_NR_sethostname:
8113        if (!(p = lock_user_string(arg1)))
8114            return -TARGET_EFAULT;
8115        ret = get_errno(sethostname(p, arg2));
8116        unlock_user(p, arg1, 0);
8117        return ret;
8118#ifdef TARGET_NR_setrlimit
8119    case TARGET_NR_setrlimit:
8120        {
8121            int resource = target_to_host_resource(arg1);
8122            struct target_rlimit *target_rlim;
8123            struct rlimit rlim;
8124            if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8125                return -TARGET_EFAULT;
8126            rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8127            rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8128            unlock_user_struct(target_rlim, arg2, 0);
8129            /*
8130             * If we just passed through resource limit settings for memory then
8131             * they would also apply to QEMU's own allocations, and QEMU will
8132             * crash or hang or die if its allocations fail. Ideally we would
8133             * track the guest allocations in QEMU and apply the limits ourselves.
8134             * For now, just tell the guest the call succeeded but don't actually
8135             * limit anything.
8136             */
8137            if (resource != RLIMIT_AS &&
8138                resource != RLIMIT_DATA &&
8139                resource != RLIMIT_STACK) {
8140                return get_errno(setrlimit(resource, &rlim));
8141            } else {
8142                return 0;
8143            }
8144        }
8145#endif
8146#ifdef TARGET_NR_getrlimit
8147    case TARGET_NR_getrlimit:
8148        {
8149            int resource = target_to_host_resource(arg1);
8150            struct target_rlimit *target_rlim;
8151            struct rlimit rlim;
8152
8153            ret = get_errno(getrlimit(resource, &rlim));
8154            if (!is_error(ret)) {
8155                if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8156                    return -TARGET_EFAULT;
8157                target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8158                target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8159                unlock_user_struct(target_rlim, arg2, 1);
8160            }
8161        }
8162        return ret;
8163#endif
8164    case TARGET_NR_getrusage:
8165        {
8166            struct rusage rusage;
8167            ret = get_errno(getrusage(arg1, &rusage));
8168            if (!is_error(ret)) {
8169                ret = host_to_target_rusage(arg2, &rusage);
8170            }
8171        }
8172        return ret;
8173    case TARGET_NR_gettimeofday:
8174        {
8175            struct timeval tv;
8176            ret = get_errno(gettimeofday(&tv, NULL));
8177            if (!is_error(ret)) {
8178                if (copy_to_user_timeval(arg1, &tv))
8179                    return -TARGET_EFAULT;
8180            }
8181        }
8182        return ret;
8183    case TARGET_NR_settimeofday:
8184        {
8185            struct timeval tv, *ptv = NULL;
8186            struct timezone tz, *ptz = NULL;
8187
8188            if (arg1) {
8189                if (copy_from_user_timeval(&tv, arg1)) {
8190                    return -TARGET_EFAULT;
8191                }
8192                ptv = &tv;
8193            }
8194
8195            if (arg2) {
8196                if (copy_from_user_timezone(&tz, arg2)) {
8197                    return -TARGET_EFAULT;
8198                }
8199                ptz = &tz;
8200            }
8201
8202            return get_errno(settimeofday(ptv, ptz));
8203        }
8204#if defined(TARGET_NR_select)
8205    case TARGET_NR_select:
8206#if defined(TARGET_WANT_NI_OLD_SELECT)
8207        /* some architectures used to have old_select here
8208         * but now ENOSYS it.
8209         */
8210        ret = -TARGET_ENOSYS;
8211#elif defined(TARGET_WANT_OLD_SYS_SELECT)
8212        ret = do_old_select(arg1);
8213#else
8214        ret = do_select(arg1, arg2, arg3, arg4, arg5);
8215#endif
8216        return ret;
8217#endif
8218#ifdef TARGET_NR_pselect6
8219    case TARGET_NR_pselect6:
8220        {
8221            abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8222            fd_set rfds, wfds, efds;
8223            fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8224            struct timespec ts, *ts_ptr;
8225
8226            /*
8227             * The 6th arg is actually two args smashed together,
8228             * so we cannot use the C library.
8229             */
8230            sigset_t set;
8231            struct {
8232                sigset_t *set;
8233                size_t size;
8234            } sig, *sig_ptr;
8235
8236            abi_ulong arg_sigset, arg_sigsize, *arg7;
8237            target_sigset_t *target_sigset;
8238
8239            n = arg1;
8240            rfd_addr = arg2;
8241            wfd_addr = arg3;
8242            efd_addr = arg4;
8243            ts_addr = arg5;
8244
8245            ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8246            if (ret) {
8247                return ret;
8248            }
8249            ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8250            if (ret) {
8251                return ret;
8252            }
8253            ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8254            if (ret) {
8255                return ret;
8256            }
8257
8258            /*
8259             * This takes a timespec, and not a timeval, so we cannot
8260             * use the do_select() helper ...
8261             */
8262            if (ts_addr) {
8263                if (target_to_host_timespec(&ts, ts_addr)) {
8264                    return -TARGET_EFAULT;
8265                }
8266                ts_ptr = &ts;
8267            } else {
8268                ts_ptr = NULL;
8269            }
8270
8271            /* Extract the two packed args for the sigset */
8272            if (arg6) {
8273                sig_ptr = &sig;
8274                sig.size = SIGSET_T_SIZE;
8275
8276                arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8277                if (!arg7) {
8278                    return -TARGET_EFAULT;
8279                }
8280                arg_sigset = tswapal(arg7[0]);
8281                arg_sigsize = tswapal(arg7[1]);
8282                unlock_user(arg7, arg6, 0);
8283
8284                if (arg_sigset) {
8285                    sig.set = &set;
8286                    if (arg_sigsize != sizeof(*target_sigset)) {
8287                        /* Like the kernel, we enforce correct size sigsets */
8288                        return -TARGET_EINVAL;
8289                    }
8290                    target_sigset = lock_user(VERIFY_READ, arg_sigset,
8291                                              sizeof(*target_sigset), 1);
8292                    if (!target_sigset) {
8293                        return -TARGET_EFAULT;
8294                    }
8295                    target_to_host_sigset(&set, target_sigset);
8296                    unlock_user(target_sigset, arg_sigset, 0);
8297                } else {
8298                    sig.set = NULL;
8299                }
8300            } else {
8301                sig_ptr = NULL;
8302            }
8303
8304            ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8305                                          ts_ptr, sig_ptr));
8306
8307            if (!is_error(ret)) {
8308                if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8309                    return -TARGET_EFAULT;
8310                if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8311                    return -TARGET_EFAULT;
8312                if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8313                    return -TARGET_EFAULT;
8314
8315                if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8316                    return -TARGET_EFAULT;
8317            }
8318        }
8319        return ret;
8320#endif
8321#ifdef TARGET_NR_symlink
8322    case TARGET_NR_symlink:
8323        {
8324            void *p2;
8325            p = lock_user_string(arg1);
8326            p2 = lock_user_string(arg2);
8327            if (!p || !p2)
8328                ret = -TARGET_EFAULT;
8329            else
8330                ret = get_errno(symlink(p, p2));
8331            unlock_user(p2, arg2, 0);
8332            unlock_user(p, arg1, 0);
8333        }
8334        return ret;
8335#endif
8336#if defined(TARGET_NR_symlinkat)
8337    case TARGET_NR_symlinkat:
8338        {
8339            void *p2;
8340            p  = lock_user_string(arg1);
8341            p2 = lock_user_string(arg3);
8342            if (!p || !p2)
8343                ret = -TARGET_EFAULT;
8344            else
8345                ret = get_errno(symlinkat(p, arg2, p2));
8346            unlock_user(p2, arg3, 0);
8347            unlock_user(p, arg1, 0);
8348        }
8349        return ret;
8350#endif
8351#ifdef TARGET_NR_readlink
8352    case TARGET_NR_readlink:
8353        {
8354            void *p2;
8355            p = lock_user_string(arg1);
8356            p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8357            if (!p || !p2) {
8358                ret = -TARGET_EFAULT;
8359            } else if (!arg3) {
8360                /* Short circuit this for the magic exe check. */
8361                ret = -TARGET_EINVAL;
8362            } else if (is_proc_myself((const char *)p, "exe")) {
8363                char real[PATH_MAX], *temp;
8364                temp = realpath(exec_path, real);
8365                /* Return value is # of bytes that we wrote to the buffer. */
8366                if (temp == NULL) {
8367                    ret = get_errno(-1);
8368                } else {
8369                    /* Don't worry about sign mismatch as earlier mapping
8370                     * logic would have thrown a bad address error. */
8371                    ret = MIN(strlen(real), arg3);
8372                    /* We cannot NUL terminate the string. */
8373                    memcpy(p2, real, ret);
8374                }
8375            } else {
8376                ret = get_errno(readlink(path(p), p2, arg3));
8377            }
8378            unlock_user(p2, arg2, ret);
8379            unlock_user(p, arg1, 0);
8380        }
8381        return ret;
8382#endif
8383#if defined(TARGET_NR_readlinkat)
8384    case TARGET_NR_readlinkat:
8385        {
8386            void *p2;
8387            p  = lock_user_string(arg2);
8388            p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8389            if (!p || !p2) {
8390                ret = -TARGET_EFAULT;
8391            } else if (is_proc_myself((const char *)p, "exe")) {
8392                char real[PATH_MAX], *temp;
8393                temp = realpath(exec_path, real);
8394                ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8395                snprintf((char *)p2, arg4, "%s", real);
8396            } else {
8397                ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8398            }
8399            unlock_user(p2, arg3, ret);
8400            unlock_user(p, arg2, 0);
8401        }
8402        return ret;
8403#endif
8404#ifdef TARGET_NR_swapon
8405    case TARGET_NR_swapon:
8406        if (!(p = lock_user_string(arg1)))
8407            return -TARGET_EFAULT;
8408        ret = get_errno(swapon(p, arg2));
8409        unlock_user(p, arg1, 0);
8410        return ret;
8411#endif
8412    case TARGET_NR_reboot:
8413        if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8414           /* arg4 must be ignored in all other cases */
8415           p = lock_user_string(arg4);
8416           if (!p) {
8417               return -TARGET_EFAULT;
8418           }
8419           ret = get_errno(reboot(arg1, arg2, arg3, p));
8420           unlock_user(p, arg4, 0);
8421        } else {
8422           ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8423        }
8424        return ret;
8425#ifdef TARGET_NR_mmap
8426    case TARGET_NR_mmap:
8427#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8428    (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8429    defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8430    || defined(TARGET_S390X)
8431        {
8432            abi_ulong *v;
8433            abi_ulong v1, v2, v3, v4, v5, v6;
8434            if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8435                return -TARGET_EFAULT;
8436            v1 = tswapal(v[0]);
8437            v2 = tswapal(v[1]);
8438            v3 = tswapal(v[2]);
8439            v4 = tswapal(v[3]);
8440            v5 = tswapal(v[4]);
8441            v6 = tswapal(v[5]);
8442            unlock_user(v, arg1, 0);
8443            ret = get_errno(target_mmap(v1, v2, v3,
8444                                        target_to_host_bitmask(v4, mmap_flags_tbl),
8445                                        v5, v6));
8446        }
8447#else
8448        ret = get_errno(target_mmap(arg1, arg2, arg3,
8449                                    target_to_host_bitmask(arg4, mmap_flags_tbl),
8450                                    arg5,
8451                                    arg6));
8452#endif
8453        return ret;
8454#endif
8455#ifdef TARGET_NR_mmap2
8456    case TARGET_NR_mmap2:
8457#ifndef MMAP_SHIFT
8458#define MMAP_SHIFT 12
8459#endif
8460        ret = target_mmap(arg1, arg2, arg3,
8461                          target_to_host_bitmask(arg4, mmap_flags_tbl),
8462                          arg5, arg6 << MMAP_SHIFT);
8463        return get_errno(ret);
8464#endif
8465    case TARGET_NR_munmap:
8466        return get_errno(target_munmap(arg1, arg2));
8467    case TARGET_NR_mprotect:
8468        {
8469            TaskState *ts = cpu->opaque;
8470            /* Special hack to detect libc making the stack executable.  */
8471            if ((arg3 & PROT_GROWSDOWN)
8472                && arg1 >= ts->info->stack_limit
8473                && arg1 <= ts->info->start_stack) {
8474                arg3 &= ~PROT_GROWSDOWN;
8475                arg2 = arg2 + arg1 - ts->info->stack_limit;
8476                arg1 = ts->info->stack_limit;
8477            }
8478        }
8479        return get_errno(target_mprotect(arg1, arg2, arg3));
8480#ifdef TARGET_NR_mremap
8481    case TARGET_NR_mremap:
8482        return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8483#endif
8484        /* ??? msync/mlock/munlock are broken for softmmu.  */
8485#ifdef TARGET_NR_msync
8486    case TARGET_NR_msync:
8487        return get_errno(msync(g2h(arg1), arg2, arg3));
8488#endif
8489#ifdef TARGET_NR_mlock
8490    case TARGET_NR_mlock:
8491        return get_errno(mlock(g2h(arg1), arg2));
8492#endif
8493#ifdef TARGET_NR_munlock
8494    case TARGET_NR_munlock:
8495        return get_errno(munlock(g2h(arg1), arg2));
8496#endif
8497#ifdef TARGET_NR_mlockall
8498    case TARGET_NR_mlockall:
8499        return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8500#endif
8501#ifdef TARGET_NR_munlockall
8502    case TARGET_NR_munlockall:
8503        return get_errno(munlockall());
8504#endif
8505#ifdef TARGET_NR_truncate
8506    case TARGET_NR_truncate:
8507        if (!(p = lock_user_string(arg1)))
8508            return -TARGET_EFAULT;
8509        ret = get_errno(truncate(p, arg2));
8510        unlock_user(p, arg1, 0);
8511        return ret;
8512#endif
8513#ifdef TARGET_NR_ftruncate
8514    case TARGET_NR_ftruncate:
8515        return get_errno(ftruncate(arg1, arg2));
8516#endif
8517    case TARGET_NR_fchmod:
8518        return get_errno(fchmod(arg1, arg2));
8519#if defined(TARGET_NR_fchmodat)
8520    case TARGET_NR_fchmodat:
8521        if (!(p = lock_user_string(arg2)))
8522            return -TARGET_EFAULT;
8523        ret = get_errno(fchmodat(arg1, p, arg3, 0));
8524        unlock_user(p, arg2, 0);
8525        return ret;
8526#endif
8527    case TARGET_NR_getpriority:
8528        /* Note that negative values are valid for getpriority, so we must
8529           differentiate based on errno settings.  */
8530        errno = 0;
8531        ret = getpriority(arg1, arg2);
8532        if (ret == -1 && errno != 0) {
8533            return -host_to_target_errno(errno);
8534        }
8535#ifdef TARGET_ALPHA
8536        /* Return value is the unbiased priority.  Signal no error.  */
8537        ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8538#else
8539        /* Return value is a biased priority to avoid negative numbers.  */
8540        ret = 20 - ret;
8541#endif
8542        return ret;
8543    case TARGET_NR_setpriority:
8544        return get_errno(setpriority(arg1, arg2, arg3));
8545#ifdef TARGET_NR_statfs
8546    case TARGET_NR_statfs:
8547        if (!(p = lock_user_string(arg1))) {
8548            return -TARGET_EFAULT;
8549        }
8550        ret = get_errno(statfs(path(p), &stfs));
8551        unlock_user(p, arg1, 0);
8552    convert_statfs:
8553        if (!is_error(ret)) {
8554            struct target_statfs *target_stfs;
8555
8556            if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8557                return -TARGET_EFAULT;
8558            __put_user(stfs.f_type, &target_stfs->f_type);
8559            __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8560            __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8561            __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8562            __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8563            __put_user(stfs.f_files, &target_stfs->f_files);
8564            __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8565            __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8566            __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8567            __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8568            __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8569#ifdef _STATFS_F_FLAGS
8570            __put_user(stfs.f_flags, &target_stfs->f_flags);
8571#else
8572            __put_user(0, &target_stfs->f_flags);
8573#endif
8574            memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8575            unlock_user_struct(target_stfs, arg2, 1);
8576        }
8577        return ret;
8578#endif
8579#ifdef TARGET_NR_fstatfs
8580    case TARGET_NR_fstatfs:
8581        ret = get_errno(fstatfs(arg1, &stfs));
8582        goto convert_statfs;
8583#endif
8584#ifdef TARGET_NR_statfs64
8585    case TARGET_NR_statfs64:
8586        if (!(p = lock_user_string(arg1))) {
8587            return -TARGET_EFAULT;
8588        }
8589        ret = get_errno(statfs(path(p), &stfs));
8590        unlock_user(p, arg1, 0);
8591    convert_statfs64:
8592        if (!is_error(ret)) {
8593            struct target_statfs64 *target_stfs;
8594
8595            if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8596                return -TARGET_EFAULT;
8597            __put_user(stfs.f_type, &target_stfs->f_type);
8598            __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8599            __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8600            __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8601            __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8602            __put_user(stfs.f_files, &target_stfs->f_files);
8603            __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8604            __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8605            __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8606            __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8607            __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8608            memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8609            unlock_user_struct(target_stfs, arg3, 1);
8610        }
8611        return ret;
8612    case TARGET_NR_fstatfs64:
8613        ret = get_errno(fstatfs(arg1, &stfs));
8614        goto convert_statfs64;
8615#endif
8616#ifdef TARGET_NR_socketcall
8617    case TARGET_NR_socketcall:
8618        return do_socketcall(arg1, arg2);
8619#endif
8620#ifdef TARGET_NR_accept
8621    case TARGET_NR_accept:
8622        return do_accept4(arg1, arg2, arg3, 0);
8623#endif
8624#ifdef TARGET_NR_accept4
8625    case TARGET_NR_accept4:
8626        return do_accept4(arg1, arg2, arg3, arg4);
8627#endif
8628#ifdef TARGET_NR_bind
8629    case TARGET_NR_bind:
8630        return do_bind(arg1, arg2, arg3);
8631#endif
8632#ifdef TARGET_NR_connect
8633    case TARGET_NR_connect:
8634        return do_connect(arg1, arg2, arg3);
8635#endif
8636#ifdef TARGET_NR_getpeername
8637    case TARGET_NR_getpeername:
8638        return do_getpeername(arg1, arg2, arg3);
8639#endif
8640#ifdef TARGET_NR_getsockname
8641    case TARGET_NR_getsockname:
8642        return do_getsockname(arg1, arg2, arg3);
8643#endif
8644#ifdef TARGET_NR_getsockopt
8645    case TARGET_NR_getsockopt:
8646        return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8647#endif
8648#ifdef TARGET_NR_listen
8649    case TARGET_NR_listen:
8650        return get_errno(listen(arg1, arg2));
8651#endif
8652#ifdef TARGET_NR_recv
8653    case TARGET_NR_recv:
8654        return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8655#endif
8656#ifdef TARGET_NR_recvfrom
8657    case TARGET_NR_recvfrom:
8658        return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8659#endif
8660#ifdef TARGET_NR_recvmsg
8661    case TARGET_NR_recvmsg:
8662        return do_sendrecvmsg(arg1, arg2, arg3, 0);
8663#endif
8664#ifdef TARGET_NR_send
8665    case TARGET_NR_send:
8666        return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8667#endif
8668#ifdef TARGET_NR_sendmsg
8669    case TARGET_NR_sendmsg:
8670        return do_sendrecvmsg(arg1, arg2, arg3, 1);
8671#endif
8672#ifdef TARGET_NR_sendmmsg
8673    case TARGET_NR_sendmmsg:
8674        return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8675    case TARGET_NR_recvmmsg:
8676        return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8677#endif
8678#ifdef TARGET_NR_sendto
8679    case TARGET_NR_sendto:
8680        return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8681#endif
8682#ifdef TARGET_NR_shutdown
8683    case TARGET_NR_shutdown:
8684        return get_errno(shutdown(arg1, arg2));
8685#endif
8686#if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8687    case TARGET_NR_getrandom:
8688        p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8689        if (!p) {
8690            return -TARGET_EFAULT;
8691        }
8692        ret = get_errno(getrandom(p, arg2, arg3));
8693        unlock_user(p, arg1, ret);
8694        return ret;
8695#endif
8696#ifdef TARGET_NR_socket
8697    case TARGET_NR_socket:
8698        return do_socket(arg1, arg2, arg3);
8699#endif
8700#ifdef TARGET_NR_socketpair
8701    case TARGET_NR_socketpair:
8702        return do_socketpair(arg1, arg2, arg3, arg4);
8703#endif
8704#ifdef TARGET_NR_setsockopt
8705    case TARGET_NR_setsockopt:
8706        return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8707#endif
8708#if defined(TARGET_NR_syslog)
8709    case TARGET_NR_syslog:
8710        {
8711            int len = arg2;
8712
8713            switch (arg1) {
8714            case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
8715            case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
8716            case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
8717            case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
8718            case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
8719            case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8720            case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
8721            case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
8722                return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8723            case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
8724            case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
8725            case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
8726                {
8727                    if (len < 0) {
8728                        return -TARGET_EINVAL;
8729                    }
8730                    if (len == 0) {
8731                        return 0;
8732                    }
8733                    p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8734                    if (!p) {
8735                        return -TARGET_EFAULT;
8736                    }
8737                    ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8738                    unlock_user(p, arg2, arg3);
8739                }
8740                return ret;
8741            default:
8742                return -TARGET_EINVAL;
8743            }
8744        }
8745        break;
8746#endif
8747    case TARGET_NR_setitimer:
8748        {
8749            struct itimerval value, ovalue, *pvalue;
8750
8751            if (arg2) {
8752                pvalue = &value;
8753                if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8754                    || copy_from_user_timeval(&pvalue->it_value,
8755                                              arg2 + sizeof(struct target_timeval)))
8756                    return -TARGET_EFAULT;
8757            } else {
8758                pvalue = NULL;
8759            }
8760            ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8761            if (!is_error(ret) && arg3) {
8762                if (copy_to_user_timeval(arg3,
8763                                         &ovalue.it_interval)
8764                    || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8765                                            &ovalue.it_value))
8766                    return -TARGET_EFAULT;
8767            }
8768        }
8769        return ret;
8770    case TARGET_NR_getitimer:
8771        {
8772            struct itimerval value;
8773
8774            ret = get_errno(getitimer(arg1, &value));
8775            if (!is_error(ret) && arg2) {
8776                if (copy_to_user_timeval(arg2,
8777                                         &value.it_interval)
8778                    || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8779                                            &value.it_value))
8780                    return -TARGET_EFAULT;
8781            }
8782        }
8783        return ret;
8784#ifdef TARGET_NR_stat
8785    case TARGET_NR_stat:
8786        if (!(p = lock_user_string(arg1))) {
8787            return -TARGET_EFAULT;
8788        }
8789        ret = get_errno(stat(path(p), &st));
8790        unlock_user(p, arg1, 0);
8791        goto do_stat;
8792#endif
8793#ifdef TARGET_NR_lstat
8794    case TARGET_NR_lstat:
8795        if (!(p = lock_user_string(arg1))) {
8796            return -TARGET_EFAULT;
8797        }
8798        ret = get_errno(lstat(path(p), &st));
8799        unlock_user(p, arg1, 0);
8800        goto do_stat;
8801#endif
8802#ifdef TARGET_NR_fstat
8803    case TARGET_NR_fstat:
8804        {
8805            ret = get_errno(fstat(arg1, &st));
8806#if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8807        do_stat:
8808#endif
8809            if (!is_error(ret)) {
8810                struct target_stat *target_st;
8811
8812                if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8813                    return -TARGET_EFAULT;
8814                memset(target_st, 0, sizeof(*target_st));
8815                __put_user(st.st_dev, &target_st->st_dev);
8816                __put_user(st.st_ino, &target_st->st_ino);
8817                __put_user(st.st_mode, &target_st->st_mode);
8818                __put_user(st.st_uid, &target_st->st_uid);
8819                __put_user(st.st_gid, &target_st->st_gid);
8820                __put_user(st.st_nlink, &target_st->st_nlink);
8821                __put_user(st.st_rdev, &target_st->st_rdev);
8822                __put_user(st.st_size, &target_st->st_size);
8823                __put_user(st.st_blksize, &target_st->st_blksize);
8824                __put_user(st.st_blocks, &target_st->st_blocks);
8825                __put_user(st.st_atime, &target_st->target_st_atime);
8826                __put_user(st.st_mtime, &target_st->target_st_mtime);
8827                __put_user(st.st_ctime, &target_st->target_st_ctime);
8828                unlock_user_struct(target_st, arg2, 1);
8829            }
8830        }
8831        return ret;
8832#endif
8833    case TARGET_NR_vhangup:
8834        return get_errno(vhangup());
8835#ifdef TARGET_NR_syscall
8836    case TARGET_NR_syscall:
8837        return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8838                          arg6, arg7, arg8, 0);
8839#endif
8840    case TARGET_NR_wait4:
8841        {
8842            int status;
8843            abi_long status_ptr = arg2;
8844            struct rusage rusage, *rusage_ptr;
8845            abi_ulong target_rusage = arg4;
8846            abi_long rusage_err;
8847            if (target_rusage)
8848                rusage_ptr = &rusage;
8849            else
8850                rusage_ptr = NULL;
8851            ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8852            if (!is_error(ret)) {
8853                if (status_ptr && ret) {
8854                    status = host_to_target_waitstatus(status);
8855                    if (put_user_s32(status, status_ptr))
8856                        return -TARGET_EFAULT;
8857                }
8858                if (target_rusage) {
8859                    rusage_err = host_to_target_rusage(target_rusage, &rusage);
8860                    if (rusage_err) {
8861                        ret = rusage_err;
8862                    }
8863                }
8864            }
8865        }
8866        return ret;
8867#ifdef TARGET_NR_swapoff
8868    case TARGET_NR_swapoff:
8869        if (!(p = lock_user_string(arg1)))
8870            return -TARGET_EFAULT;
8871        ret = get_errno(swapoff(p));
8872        unlock_user(p, arg1, 0);
8873        return ret;
8874#endif
8875    case TARGET_NR_sysinfo:
8876        {
8877            struct target_sysinfo *target_value;
8878            struct sysinfo value;
8879            ret = get_errno(sysinfo(&value));
8880            if (!is_error(ret) && arg1)
8881            {
8882                if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8883                    return -TARGET_EFAULT;
8884                __put_user(value.uptime, &target_value->uptime);
8885                __put_user(value.loads[0], &target_value->loads[0]);
8886                __put_user(value.loads[1], &target_value->loads[1]);
8887                __put_user(value.loads[2], &target_value->loads[2]);
8888                __put_user(value.totalram, &target_value->totalram);
8889                __put_user(value.freeram, &target_value->freeram);
8890                __put_user(value.sharedram, &target_value->sharedram);
8891                __put_user(value.bufferram, &target_value->bufferram);
8892                __put_user(value.totalswap, &target_value->totalswap);
8893                __put_user(value.freeswap, &target_value->freeswap);
8894                __put_user(value.procs, &target_value->procs);
8895                __put_user(value.totalhigh, &target_value->totalhigh);
8896                __put_user(value.freehigh, &target_value->freehigh);
8897                __put_user(value.mem_unit, &target_value->mem_unit);
8898                unlock_user_struct(target_value, arg1, 1);
8899            }
8900        }
8901        return ret;
8902#ifdef TARGET_NR_ipc
8903    case TARGET_NR_ipc:
8904        return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8905#endif
8906#ifdef TARGET_NR_semget
8907    case TARGET_NR_semget:
8908        return get_errno(semget(arg1, arg2, arg3));
8909#endif
8910#ifdef TARGET_NR_semop
8911    case TARGET_NR_semop:
8912        return do_semop(arg1, arg2, arg3);
8913#endif
8914#ifdef TARGET_NR_semctl
8915    case TARGET_NR_semctl:
8916        return do_semctl(arg1, arg2, arg3, arg4);
8917#endif
8918#ifdef TARGET_NR_msgctl
8919    case TARGET_NR_msgctl:
8920        return do_msgctl(arg1, arg2, arg3);
8921#endif
8922#ifdef TARGET_NR_msgget
8923    case TARGET_NR_msgget:
8924        return get_errno(msgget(arg1, arg2));
8925#endif
8926#ifdef TARGET_NR_msgrcv
8927    case TARGET_NR_msgrcv:
8928        return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8929#endif
8930#ifdef TARGET_NR_msgsnd
8931    case TARGET_NR_msgsnd:
8932        return do_msgsnd(arg1, arg2, arg3, arg4);
8933#endif
8934#ifdef TARGET_NR_shmget
8935    case TARGET_NR_shmget:
8936        return get_errno(shmget(arg1, arg2, arg3));
8937#endif
8938#ifdef TARGET_NR_shmctl
8939    case TARGET_NR_shmctl:
8940        return do_shmctl(arg1, arg2, arg3);
8941#endif
8942#ifdef TARGET_NR_shmat
8943    case TARGET_NR_shmat:
8944        return do_shmat(cpu_env, arg1, arg2, arg3);
8945#endif
8946#ifdef TARGET_NR_shmdt
8947    case TARGET_NR_shmdt:
8948        return do_shmdt(arg1);
8949#endif
8950    case TARGET_NR_fsync:
8951        return get_errno(fsync(arg1));
8952    case TARGET_NR_clone:
8953        /* Linux manages to have three different orderings for its
8954         * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8955         * match the kernel's CONFIG_CLONE_* settings.
8956         * Microblaze is further special in that it uses a sixth
8957         * implicit argument to clone for the TLS pointer.
8958         */
8959#if defined(TARGET_MICROBLAZE)
8960        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8961#elif defined(TARGET_CLONE_BACKWARDS)
8962        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8963#elif defined(TARGET_CLONE_BACKWARDS2)
8964        ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8965#else
8966        ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8967#endif
8968        return ret;
8969#ifdef __NR_exit_group
8970        /* new thread calls */
8971    case TARGET_NR_exit_group:
8972        preexit_cleanup(cpu_env, arg1);
8973        return get_errno(exit_group(arg1));
8974#endif
8975    case TARGET_NR_setdomainname:
8976        if (!(p = lock_user_string(arg1)))
8977            return -TARGET_EFAULT;
8978        ret = get_errno(setdomainname(p, arg2));
8979        unlock_user(p, arg1, 0);
8980        return ret;
8981    case TARGET_NR_uname:
8982        /* no need to transcode because we use the linux syscall */
8983        {
8984            struct new_utsname * buf;
8985
8986            if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8987                return -TARGET_EFAULT;
8988            ret = get_errno(sys_uname(buf));
8989            if (!is_error(ret)) {
8990                /* Overwrite the native machine name with whatever is being
8991                   emulated. */
8992                g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8993                          sizeof(buf->machine));
8994                /* Allow the user to override the reported release.  */
8995                if (qemu_uname_release && *qemu_uname_release) {
8996                    g_strlcpy(buf->release, qemu_uname_release,
8997                              sizeof(buf->release));
8998                }
8999            }
9000            unlock_user_struct(buf, arg1, 1);
9001        }
9002        return ret;
9003#ifdef TARGET_I386
9004    case TARGET_NR_modify_ldt:
9005        return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9006#if !defined(TARGET_X86_64)
9007    case TARGET_NR_vm86:
9008        return do_vm86(cpu_env, arg1, arg2);
9009#endif
9010#endif
9011    case TARGET_NR_adjtimex:
9012        {
9013            struct timex host_buf;
9014
9015            if (target_to_host_timex(&host_buf, arg1) != 0) {
9016                return -TARGET_EFAULT;
9017            }
9018            ret = get_errno(adjtimex(&host_buf));
9019            if (!is_error(ret)) {
9020                if (host_to_target_timex(arg1, &host_buf) != 0) {
9021                    return -TARGET_EFAULT;
9022                }
9023            }
9024        }
9025        return ret;
9026#if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9027    case TARGET_NR_clock_adjtime:
9028        {
9029            struct timex htx, *phtx = &htx;
9030
9031            if (target_to_host_timex(phtx, arg2) != 0) {
9032                return -TARGET_EFAULT;
9033            }
9034            ret = get_errno(clock_adjtime(arg1, phtx));
9035            if (!is_error(ret) && phtx) {
9036                if (host_to_target_timex(arg2, phtx) != 0) {
9037                    return -TARGET_EFAULT;
9038                }
9039            }
9040        }
9041        return ret;
9042#endif
9043    case TARGET_NR_getpgid:
9044        return get_errno(getpgid(arg1));
9045    case TARGET_NR_fchdir:
9046        return get_errno(fchdir(arg1));
9047    case TARGET_NR_personality:
9048        return get_errno(personality(arg1));
9049#ifdef TARGET_NR__llseek /* Not on alpha */
9050    case TARGET_NR__llseek:
9051        {
9052            int64_t res;
9053#if !defined(__NR_llseek)
9054            res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9055            if (res == -1) {
9056                ret = get_errno(res);
9057            } else {
9058                ret = 0;
9059            }
9060#else
9061            ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9062#endif
9063            if ((ret == 0) && put_user_s64(res, arg4)) {
9064                return -TARGET_EFAULT;
9065            }
9066        }
9067        return ret;
9068#endif
9069#ifdef TARGET_NR_getdents
9070    case TARGET_NR_getdents:
9071#ifdef EMULATE_GETDENTS_WITH_GETDENTS
9072#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9073        {
9074            struct target_dirent *target_dirp;
9075            struct linux_dirent *dirp;
9076            abi_long count = arg3;
9077
9078            dirp = g_try_malloc(count);
9079            if (!dirp) {
9080                return -TARGET_ENOMEM;
9081            }
9082
9083            ret = get_errno(sys_getdents(arg1, dirp, count));
9084            if (!is_error(ret)) {
9085                struct linux_dirent *de;
9086                struct target_dirent *tde;
9087                int len = ret;
9088                int reclen, treclen;
9089                int count1, tnamelen;
9090
9091                count1 = 0;
9092                de = dirp;
9093                if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9094                    return -TARGET_EFAULT;
9095                tde = target_dirp;
9096                while (len > 0) {
9097                    reclen = de->d_reclen;
9098                    tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9099                    assert(tnamelen >= 0);
9100                    treclen = tnamelen + offsetof(struct target_dirent, d_name);
9101                    assert(count1 + treclen <= count);
9102                    tde->d_reclen = tswap16(treclen);
9103                    tde->d_ino = tswapal(de->d_ino);
9104                    tde->d_off = tswapal(de->d_off);
9105                    memcpy(tde->d_name, de->d_name, tnamelen);
9106                    de = (struct linux_dirent *)((char *)de + reclen);
9107                    len -= reclen;
9108                    tde = (struct target_dirent *)((char *)tde + treclen);
9109                    count1 += treclen;
9110                }
9111                ret = count1;
9112                unlock_user(target_dirp, arg2, ret);
9113            }
9114            g_free(dirp);
9115        }
9116#else
9117        {
9118            struct linux_dirent *dirp;
9119            abi_long count = arg3;
9120
9121            if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9122                return -TARGET_EFAULT;
9123            ret = get_errno(sys_getdents(arg1, dirp, count));
9124            if (!is_error(ret)) {
9125                struct linux_dirent *de;
9126                int len = ret;
9127                int reclen;
9128                de = dirp;
9129                while (len > 0) {
9130                    reclen = de->d_reclen;
9131                    if (reclen > len)
9132                        break;
9133                    de->d_reclen = tswap16(reclen);
9134                    tswapls(&de->d_ino);
9135                    tswapls(&de->d_off);
9136                    de = (struct linux_dirent *)((char *)de + reclen);
9137                    len -= reclen;
9138                }
9139            }
9140            unlock_user(dirp, arg2, ret);
9141        }
9142#endif
9143#else
9144        /* Implement getdents in terms of getdents64 */
9145        {
9146            struct linux_dirent64 *dirp;
9147            abi_long count = arg3;
9148
9149            dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9150            if (!dirp) {
9151                return -TARGET_EFAULT;
9152            }
9153            ret = get_errno(sys_getdents64(arg1, dirp, count));
9154            if (!is_error(ret)) {
9155                /* Convert the dirent64 structs to target dirent.  We do this
9156                 * in-place, since we can guarantee that a target_dirent is no
9157                 * larger than a dirent64; however this means we have to be
9158                 * careful to read everything before writing in the new format.
9159                 */
9160                struct linux_dirent64 *de;
9161                struct target_dirent *tde;
9162                int len = ret;
9163                int tlen = 0;
9164
9165                de = dirp;
9166                tde = (struct target_dirent *)dirp;
9167                while (len > 0) {
9168                    int namelen, treclen;
9169                    int reclen = de->d_reclen;
9170                    uint64_t ino = de->d_ino;
9171                    int64_t off = de->d_off;
9172                    uint8_t type = de->d_type;
9173
9174                    namelen = strlen(de->d_name);
9175                    treclen = offsetof(struct target_dirent, d_name)
9176                        + namelen + 2;
9177                    treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9178
9179                    memmove(tde->d_name, de->d_name, namelen + 1);
9180                    tde->d_ino = tswapal(ino);
9181                    tde->d_off = tswapal(off);
9182                    tde->d_reclen = tswap16(treclen);
9183                    /* The target_dirent type is in what was formerly a padding
9184                     * byte at the end of the structure:
9185                     */
9186                    *(((char *)tde) + treclen - 1) = type;
9187
9188                    de = (struct linux_dirent64 *)((char *)de + reclen);
9189                    tde = (struct target_dirent *)((char *)tde + treclen);
9190                    len -= reclen;
9191                    tlen += treclen;
9192                }
9193                ret = tlen;
9194            }
9195            unlock_user(dirp, arg2, ret);
9196        }
9197#endif
9198        return ret;
9199#endif /* TARGET_NR_getdents */
9200#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9201    case TARGET_NR_getdents64:
9202        {
9203            struct linux_dirent64 *dirp;
9204            abi_long count = arg3;
9205            if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9206                return -TARGET_EFAULT;
9207            ret = get_errno(sys_getdents64(arg1, dirp, count));
9208            if (!is_error(ret)) {
9209                struct linux_dirent64 *de;
9210                int len = ret;
9211                int reclen;
9212                de = dirp;
9213                while (len > 0) {
9214                    reclen = de->d_reclen;
9215                    if (reclen > len)
9216                        break;
9217                    de->d_reclen = tswap16(reclen);
9218                    tswap64s((uint64_t *)&de->d_ino);
9219                    tswap64s((uint64_t *)&de->d_off);
9220                    de = (struct linux_dirent64 *)((char *)de + reclen);
9221                    len -= reclen;
9222                }
9223            }
9224            unlock_user(dirp, arg2, ret);
9225        }
9226        return ret;
9227#endif /* TARGET_NR_getdents64 */
9228#if defined(TARGET_NR__newselect)
9229    case TARGET_NR__newselect:
9230        return do_select(arg1, arg2, arg3, arg4, arg5);
9231#endif
9232#if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9233# ifdef TARGET_NR_poll
9234    case TARGET_NR_poll:
9235# endif
9236# ifdef TARGET_NR_ppoll
9237    case TARGET_NR_ppoll:
9238# endif
9239        {
9240            struct target_pollfd *target_pfd;
9241            unsigned int nfds = arg2;
9242            struct pollfd *pfd;
9243            unsigned int i;
9244
9245            pfd = NULL;
9246            target_pfd = NULL;
9247            if (nfds) {
9248                if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9249                    return -TARGET_EINVAL;
9250                }
9251
9252                target_pfd = lock_user(VERIFY_WRITE, arg1,
9253                                       sizeof(struct target_pollfd) * nfds, 1);
9254                if (!target_pfd) {
9255                    return -TARGET_EFAULT;
9256                }
9257
9258                pfd = alloca(sizeof(struct pollfd) * nfds);
9259                for (i = 0; i < nfds; i++) {
9260                    pfd[i].fd = tswap32(target_pfd[i].fd);
9261                    pfd[i].events = tswap16(target_pfd[i].events);
9262                }
9263            }
9264
9265            switch (num) {
9266# ifdef TARGET_NR_ppoll
9267            case TARGET_NR_ppoll:
9268            {
9269                struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9270                target_sigset_t *target_set;
9271                sigset_t _set, *set = &_set;
9272
9273                if (arg3) {
9274                    if (target_to_host_timespec(timeout_ts, arg3)) {
9275                        unlock_user(target_pfd, arg1, 0);
9276                        return -TARGET_EFAULT;
9277                    }
9278                } else {
9279                    timeout_ts = NULL;
9280                }
9281
9282                if (arg4) {
9283                    if (arg5 != sizeof(target_sigset_t)) {
9284                        unlock_user(target_pfd, arg1, 0);
9285                        return -TARGET_EINVAL;
9286                    }
9287
9288                    target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9289                    if (!target_set) {
9290                        unlock_user(target_pfd, arg1, 0);
9291                        return -TARGET_EFAULT;
9292                    }
9293                    target_to_host_sigset(set, target_set);
9294                } else {
9295                    set = NULL;
9296                }
9297
9298                ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9299                                           set, SIGSET_T_SIZE));
9300
9301                if (!is_error(ret) && arg3) {
9302                    host_to_target_timespec(arg3, timeout_ts);
9303                }
9304                if (arg4) {
9305                    unlock_user(target_set, arg4, 0);
9306                }
9307                break;
9308            }
9309# endif
9310# ifdef TARGET_NR_poll
9311            case TARGET_NR_poll:
9312            {
9313                struct timespec ts, *pts;
9314
9315                if (arg3 >= 0) {
9316                    /* Convert ms to secs, ns */
9317                    ts.tv_sec = arg3 / 1000;
9318                    ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9319                    pts = &ts;
9320                } else {
9321                    /* -ve poll() timeout means "infinite" */
9322                    pts = NULL;
9323                }
9324                ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9325                break;
9326            }
9327# endif
9328            default:
9329                g_assert_not_reached();
9330            }
9331
9332            if (!is_error(ret)) {
9333                for(i = 0; i < nfds; i++) {
9334                    target_pfd[i].revents = tswap16(pfd[i].revents);
9335                }
9336            }
9337            unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9338        }
9339        return ret;
9340#endif
9341    case TARGET_NR_flock:
9342        /* NOTE: the flock constant seems to be the same for every
9343           Linux platform */
9344        return get_errno(safe_flock(arg1, arg2));
9345    case TARGET_NR_readv:
9346        {
9347            struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9348            if (vec != NULL) {
9349                ret = get_errno(safe_readv(arg1, vec, arg3));
9350                unlock_iovec(vec, arg2, arg3, 1);
9351            } else {
9352                ret = -host_to_target_errno(errno);
9353            }
9354        }
9355        return ret;
9356    case TARGET_NR_writev:
9357        {
9358            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9359            if (vec != NULL) {
9360                ret = get_errno(safe_writev(arg1, vec, arg3));
9361                unlock_iovec(vec, arg2, arg3, 0);
9362            } else {
9363                ret = -host_to_target_errno(errno);
9364            }
9365        }
9366        return ret;
9367#if defined(TARGET_NR_preadv)
9368    case TARGET_NR_preadv:
9369        {
9370            struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9371            if (vec != NULL) {
9372                unsigned long low, high;
9373
9374                target_to_host_low_high(arg4, arg5, &low, &high);
9375                ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9376                unlock_iovec(vec, arg2, arg3, 1);
9377            } else {
9378                ret = -host_to_target_errno(errno);
9379           }
9380        }
9381        return ret;
9382#endif
9383#if defined(TARGET_NR_pwritev)
9384    case TARGET_NR_pwritev:
9385        {
9386            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9387            if (vec != NULL) {
9388                unsigned long low, high;
9389
9390                target_to_host_low_high(arg4, arg5, &low, &high);
9391                ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9392                unlock_iovec(vec, arg2, arg3, 0);
9393            } else {
9394                ret = -host_to_target_errno(errno);
9395           }
9396        }
9397        return ret;
9398#endif
9399    case TARGET_NR_getsid:
9400        return get_errno(getsid(arg1));
9401#if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9402    case TARGET_NR_fdatasync:
9403        return get_errno(fdatasync(arg1));
9404#endif
9405#ifdef TARGET_NR__sysctl
9406    case TARGET_NR__sysctl:
9407        /* We don't implement this, but ENOTDIR is always a safe
9408           return value. */
9409        return -TARGET_ENOTDIR;
9410#endif
9411    case TARGET_NR_sched_getaffinity:
9412        {
9413            unsigned int mask_size;
9414            unsigned long *mask;
9415
9416            /*
9417             * sched_getaffinity needs multiples of ulong, so need to take
9418             * care of mismatches between target ulong and host ulong sizes.
9419             */
9420            if (arg2 & (sizeof(abi_ulong) - 1)) {
9421                return -TARGET_EINVAL;
9422            }
9423            mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9424
9425            mask = alloca(mask_size);
9426            memset(mask, 0, mask_size);
9427            ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9428
9429            if (!is_error(ret)) {
9430                if (ret > arg2) {
9431                    /* More data returned than the caller's buffer will fit.
9432                     * This only happens if sizeof(abi_long) < sizeof(long)
9433                     * and the caller passed us a buffer holding an odd number
9434                     * of abi_longs. If the host kernel is actually using the
9435                     * extra 4 bytes then fail EINVAL; otherwise we can just
9436                     * ignore them and only copy the interesting part.
9437                     */
9438                    int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9439                    if (numcpus > arg2 * 8) {
9440                        return -TARGET_EINVAL;
9441                    }
9442                    ret = arg2;
9443                }
9444
9445                if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9446                    return -TARGET_EFAULT;
9447                }
9448            }
9449        }
9450        return ret;
9451    case TARGET_NR_sched_setaffinity:
9452        {
9453            unsigned int mask_size;
9454            unsigned long *mask;
9455
9456            /*
9457             * sched_setaffinity needs multiples of ulong, so need to take
9458             * care of mismatches between target ulong and host ulong sizes.
9459             */
9460            if (arg2 & (sizeof(abi_ulong) - 1)) {
9461                return -TARGET_EINVAL;
9462            }
9463            mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9464            mask = alloca(mask_size);
9465
9466            ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9467            if (ret) {
9468                return ret;
9469            }
9470
9471            return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9472        }
9473    case TARGET_NR_getcpu:
9474        {
9475            unsigned cpu, node;
9476            ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9477                                       arg2 ? &node : NULL,
9478                                       NULL));
9479            if (is_error(ret)) {
9480                return ret;
9481            }
9482            if (arg1 && put_user_u32(cpu, arg1)) {
9483                return -TARGET_EFAULT;
9484            }
9485            if (arg2 && put_user_u32(node, arg2)) {
9486                return -TARGET_EFAULT;
9487            }
9488        }
9489        return ret;
9490    case TARGET_NR_sched_setparam:
9491        {
9492            struct sched_param *target_schp;
9493            struct sched_param schp;
9494
9495            if (arg2 == 0) {
9496                return -TARGET_EINVAL;
9497            }
9498            if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9499                return -TARGET_EFAULT;
9500            schp.sched_priority = tswap32(target_schp->sched_priority);
9501            unlock_user_struct(target_schp, arg2, 0);
9502            return get_errno(sched_setparam(arg1, &schp));
9503        }
9504    case TARGET_NR_sched_getparam:
9505        {
9506            struct sched_param *target_schp;
9507            struct sched_param schp;
9508
9509            if (arg2 == 0) {
9510                return -TARGET_EINVAL;
9511            }
9512            ret = get_errno(sched_getparam(arg1, &schp));
9513            if (!is_error(ret)) {
9514                if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9515                    return -TARGET_EFAULT;
9516                target_schp->sched_priority = tswap32(schp.sched_priority);
9517                unlock_user_struct(target_schp, arg2, 1);
9518            }
9519        }
9520        return ret;
9521    case TARGET_NR_sched_setscheduler:
9522        {
9523            struct sched_param *target_schp;
9524            struct sched_param schp;
9525            if (arg3 == 0) {
9526                return -TARGET_EINVAL;
9527            }
9528            if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9529                return -TARGET_EFAULT;
9530            schp.sched_priority = tswap32(target_schp->sched_priority);
9531            unlock_user_struct(target_schp, arg3, 0);
9532            return get_errno(sched_setscheduler(arg1, arg2, &schp));
9533        }
9534    case TARGET_NR_sched_getscheduler:
9535        return get_errno(sched_getscheduler(arg1));
9536    case TARGET_NR_sched_yield:
9537        return get_errno(sched_yield());
9538    case TARGET_NR_sched_get_priority_max:
9539        return get_errno(sched_get_priority_max(arg1));
9540    case TARGET_NR_sched_get_priority_min:
9541        return get_errno(sched_get_priority_min(arg1));
9542    case TARGET_NR_sched_rr_get_interval:
9543        {
9544            struct timespec ts;
9545            ret = get_errno(sched_rr_get_interval(arg1, &ts));
9546            if (!is_error(ret)) {
9547                ret = host_to_target_timespec(arg2, &ts);
9548            }
9549        }
9550        return ret;
9551    case TARGET_NR_nanosleep:
9552        {
9553            struct timespec req, rem;
9554            target_to_host_timespec(&req, arg1);
9555            ret = get_errno(safe_nanosleep(&req, &rem));
9556            if (is_error(ret) && arg2) {
9557                host_to_target_timespec(arg2, &rem);
9558            }
9559        }
9560        return ret;
9561    case TARGET_NR_prctl:
9562        switch (arg1) {
9563        case PR_GET_PDEATHSIG:
9564        {
9565            int deathsig;
9566            ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9567            if (!is_error(ret) && arg2
9568                && put_user_ual(deathsig, arg2)) {
9569                return -TARGET_EFAULT;
9570            }
9571            return ret;
9572        }
9573#ifdef PR_GET_NAME
9574        case PR_GET_NAME:
9575        {
9576            void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9577            if (!name) {
9578                return -TARGET_EFAULT;
9579            }
9580            ret = get_errno(prctl(arg1, (unsigned long)name,
9581                                  arg3, arg4, arg5));
9582            unlock_user(name, arg2, 16);
9583            return ret;
9584        }
9585        case PR_SET_NAME:
9586        {
9587            void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9588            if (!name) {
9589                return -TARGET_EFAULT;
9590            }
9591            ret = get_errno(prctl(arg1, (unsigned long)name,
9592                                  arg3, arg4, arg5));
9593            unlock_user(name, arg2, 0);
9594            return ret;
9595        }
9596#endif
9597#ifdef TARGET_MIPS
9598        case TARGET_PR_GET_FP_MODE:
9599        {
9600            CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9601            ret = 0;
9602            if (env->CP0_Status & (1 << CP0St_FR)) {
9603                ret |= TARGET_PR_FP_MODE_FR;
9604            }
9605            if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9606                ret |= TARGET_PR_FP_MODE_FRE;
9607            }
9608            return ret;
9609        }
9610        case TARGET_PR_SET_FP_MODE:
9611        {
9612            CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9613            bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9614            bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9615            bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9616            bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9617
9618            const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9619                                            TARGET_PR_FP_MODE_FRE;
9620
9621            /* If nothing to change, return right away, successfully.  */
9622            if (old_fr == new_fr && old_fre == new_fre) {
9623                return 0;
9624            }
9625            /* Check the value is valid */
9626            if (arg2 & ~known_bits) {
9627                return -TARGET_EOPNOTSUPP;
9628            }
9629            /* Setting FRE without FR is not supported.  */
9630            if (new_fre && !new_fr) {
9631                return -TARGET_EOPNOTSUPP;
9632            }
9633            if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9634                /* FR1 is not supported */
9635                return -TARGET_EOPNOTSUPP;
9636            }
9637            if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9638                && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9639                /* cannot set FR=0 */
9640                return -TARGET_EOPNOTSUPP;
9641            }
9642            if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9643                /* Cannot set FRE=1 */
9644                return -TARGET_EOPNOTSUPP;
9645            }
9646
9647            int i;
9648            fpr_t *fpr = env->active_fpu.fpr;
9649            for (i = 0; i < 32 ; i += 2) {
9650                if (!old_fr && new_fr) {
9651                    fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9652                } else if (old_fr && !new_fr) {
9653                    fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9654                }
9655            }
9656
9657            if (new_fr) {
9658                env->CP0_Status |= (1 << CP0St_FR);
9659                env->hflags |= MIPS_HFLAG_F64;
9660            } else {
9661                env->CP0_Status &= ~(1 << CP0St_FR);
9662                env->hflags &= ~MIPS_HFLAG_F64;
9663            }
9664            if (new_fre) {
9665                env->CP0_Config5 |= (1 << CP0C5_FRE);
9666                if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9667                    env->hflags |= MIPS_HFLAG_FRE;
9668                }
9669            } else {
9670                env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9671                env->hflags &= ~MIPS_HFLAG_FRE;
9672            }
9673
9674            return 0;
9675        }
9676#endif /* MIPS */
9677#ifdef TARGET_AARCH64
9678        case TARGET_PR_SVE_SET_VL:
9679            /*
9680             * We cannot support either PR_SVE_SET_VL_ONEXEC or
9681             * PR_SVE_VL_INHERIT.  Note the kernel definition
9682             * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9683             * even though the current architectural maximum is VQ=16.
9684             */
9685            ret = -TARGET_EINVAL;
9686            if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9687                && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9688                CPUARMState *env = cpu_env;
9689                ARMCPU *cpu = arm_env_get_cpu(env);
9690                uint32_t vq, old_vq;
9691
9692                old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9693                vq = MAX(arg2 / 16, 1);
9694                vq = MIN(vq, cpu->sve_max_vq);
9695
9696                if (vq < old_vq) {
9697                    aarch64_sve_narrow_vq(env, vq);
9698                }
9699                env->vfp.zcr_el[1] = vq - 1;
9700                ret = vq * 16;
9701            }
9702            return ret;
9703        case TARGET_PR_SVE_GET_VL:
9704            ret = -TARGET_EINVAL;
9705            {
9706                ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9707                if (cpu_isar_feature(aa64_sve, cpu)) {
9708                    ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9709                }
9710            }
9711            return ret;
9712        case TARGET_PR_PAC_RESET_KEYS:
9713            {
9714                CPUARMState *env = cpu_env;
9715                ARMCPU *cpu = arm_env_get_cpu(env);
9716
9717                if (arg3 || arg4 || arg5) {
9718                    return -TARGET_EINVAL;
9719                }
9720                if (cpu_isar_feature(aa64_pauth, cpu)) {
9721                    int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9722                               TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9723                               TARGET_PR_PAC_APGAKEY);
9724                    if (arg2 == 0) {
9725                        arg2 = all;
9726                    } else if (arg2 & ~all) {
9727                        return -TARGET_EINVAL;
9728                    }
9729                    if (arg2 & TARGET_PR_PAC_APIAKEY) {
9730                        arm_init_pauth_key(&env->apia_key);
9731                    }
9732                    if (arg2 & TARGET_PR_PAC_APIBKEY) {
9733                        arm_init_pauth_key(&env->apib_key);
9734                    }
9735                    if (arg2 & TARGET_PR_PAC_APDAKEY) {
9736                        arm_init_pauth_key(&env->apda_key);
9737                    }
9738                    if (arg2 & TARGET_PR_PAC_APDBKEY) {
9739                        arm_init_pauth_key(&env->apdb_key);
9740                    }
9741                    if (arg2 & TARGET_PR_PAC_APGAKEY) {
9742                        arm_init_pauth_key(&env->apga_key);
9743                    }
9744                    return 0;
9745                }
9746            }
9747            return -TARGET_EINVAL;
9748#endif /* AARCH64 */
9749        case PR_GET_SECCOMP:
9750        case PR_SET_SECCOMP:
9751            /* Disable seccomp to prevent the target disabling syscalls we
9752             * need. */
9753            return -TARGET_EINVAL;
9754        default:
9755            /* Most prctl options have no pointer arguments */
9756            return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9757        }
9758        break;
9759#ifdef TARGET_NR_arch_prctl
9760    case TARGET_NR_arch_prctl:
9761#if defined(TARGET_I386) && !defined(TARGET_ABI32)
9762        return do_arch_prctl(cpu_env, arg1, arg2);
9763#else
9764#error unreachable
9765#endif
9766#endif
9767#ifdef TARGET_NR_pread64
9768    case TARGET_NR_pread64:
9769        if (regpairs_aligned(cpu_env, num)) {
9770            arg4 = arg5;
9771            arg5 = arg6;
9772        }
9773        if (arg2 == 0 && arg3 == 0) {
9774            /* Special-case NULL buffer and zero length, which should succeed */
9775            p = 0;
9776        } else {
9777            p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9778            if (!p) {
9779                return -TARGET_EFAULT;
9780            }
9781        }
9782        ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9783        unlock_user(p, arg2, ret);
9784        return ret;
9785    case TARGET_NR_pwrite64:
9786        if (regpairs_aligned(cpu_env, num)) {
9787            arg4 = arg5;
9788            arg5 = arg6;
9789        }
9790        if (arg2 == 0 && arg3 == 0) {
9791            /* Special-case NULL buffer and zero length, which should succeed */
9792            p = 0;
9793        } else {
9794            p = lock_user(VERIFY_READ, arg2, arg3, 1);
9795            if (!p) {
9796                return -TARGET_EFAULT;
9797            }
9798        }
9799        ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9800        unlock_user(p, arg2, 0);
9801        return ret;
9802#endif
9803    case TARGET_NR_getcwd:
9804        if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9805            return -TARGET_EFAULT;
9806        ret = get_errno(sys_getcwd1(p, arg2));
9807        unlock_user(p, arg1, ret);
9808        return ret;
9809    case TARGET_NR_capget:
9810    case TARGET_NR_capset:
9811    {
9812        struct target_user_cap_header *target_header;
9813        struct target_user_cap_data *target_data = NULL;
9814        struct __user_cap_header_struct header;
9815        struct __user_cap_data_struct data[2];
9816        struct __user_cap_data_struct *dataptr = NULL;
9817        int i, target_datalen;
9818        int data_items = 1;
9819
9820        if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9821            return -TARGET_EFAULT;
9822        }
9823        header.version = tswap32(target_header->version);
9824        header.pid = tswap32(target_header->pid);
9825
9826        if (header.version != _LINUX_CAPABILITY_VERSION) {
9827            /* Version 2 and up takes pointer to two user_data structs */
9828            data_items = 2;
9829        }
9830
9831        target_datalen = sizeof(*target_data) * data_items;
9832
9833        if (arg2) {
9834            if (num == TARGET_NR_capget) {
9835                target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9836            } else {
9837                target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9838            }
9839            if (!target_data) {
9840                unlock_user_struct(target_header, arg1, 0);
9841                return -TARGET_EFAULT;
9842            }
9843
9844            if (num == TARGET_NR_capset) {
9845                for (i = 0; i < data_items; i++) {
9846                    data[i].effective = tswap32(target_data[i].effective);
9847                    data[i].permitted = tswap32(target_data[i].permitted);
9848                    data[i].inheritable = tswap32(target_data[i].inheritable);
9849                }
9850            }
9851
9852            dataptr = data;
9853        }
9854
9855        if (num == TARGET_NR_capget) {
9856            ret = get_errno(capget(&header, dataptr));
9857        } else {
9858            ret = get_errno(capset(&header, dataptr));
9859        }
9860
9861        /* The kernel always updates version for both capget and capset */
9862        target_header->version = tswap32(header.version);
9863        unlock_user_struct(target_header, arg1, 1);
9864
9865        if (arg2) {
9866            if (num == TARGET_NR_capget) {
9867                for (i = 0; i < data_items; i++) {
9868                    target_data[i].effective = tswap32(data[i].effective);
9869                    target_data[i].permitted = tswap32(data[i].permitted);
9870                    target_data[i].inheritable = tswap32(data[i].inheritable);
9871                }
9872                unlock_user(target_data, arg2, target_datalen);
9873            } else {
9874                unlock_user(target_data, arg2, 0);
9875            }
9876        }
9877        return ret;
9878    }
9879    case TARGET_NR_sigaltstack:
9880        return do_sigaltstack(arg1, arg2,
9881                              get_sp_from_cpustate((CPUArchState *)cpu_env));
9882
9883#ifdef CONFIG_SENDFILE
9884#ifdef TARGET_NR_sendfile
9885    case TARGET_NR_sendfile:
9886    {
9887        off_t *offp = NULL;
9888        off_t off;
9889        if (arg3) {
9890            ret = get_user_sal(off, arg3);
9891            if (is_error(ret)) {
9892                return ret;
9893            }
9894            offp = &off;
9895        }
9896        ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9897        if (!is_error(ret) && arg3) {
9898            abi_long ret2 = put_user_sal(off, arg3);
9899            if (is_error(ret2)) {
9900                ret = ret2;
9901            }
9902        }
9903        return ret;
9904    }
9905#endif
9906#ifdef TARGET_NR_sendfile64
9907    case TARGET_NR_sendfile64:
9908    {
9909        off_t *offp = NULL;
9910        off_t off;
9911        if (arg3) {
9912            ret = get_user_s64(off, arg3);
9913            if (is_error(ret)) {
9914                return ret;
9915            }
9916            offp = &off;
9917        }
9918        ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9919        if (!is_error(ret) && arg3) {
9920            abi_long ret2 = put_user_s64(off, arg3);
9921            if (is_error(ret2)) {
9922                ret = ret2;
9923            }
9924        }
9925        return ret;
9926    }
9927#endif
9928#endif
9929#ifdef TARGET_NR_vfork
9930    case TARGET_NR_vfork:
9931        return get_errno(do_fork(cpu_env,
9932                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9933                         0, 0, 0, 0));
9934#endif
9935#ifdef TARGET_NR_ugetrlimit
9936    case TARGET_NR_ugetrlimit:
9937    {
9938        struct rlimit rlim;
9939        int resource = target_to_host_resource(arg1);
9940        ret = get_errno(getrlimit(resource, &rlim));
9941        if (!is_error(ret)) {
9942            struct target_rlimit *target_rlim;
9943            if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9944                return -TARGET_EFAULT;
9945            target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9946            target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9947            unlock_user_struct(target_rlim, arg2, 1);
9948        }
9949        return ret;
9950    }
9951#endif
9952#ifdef TARGET_NR_truncate64
9953    case TARGET_NR_truncate64:
9954        if (!(p = lock_user_string(arg1)))
9955            return -TARGET_EFAULT;
9956        ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9957        unlock_user(p, arg1, 0);
9958        return ret;
9959#endif
9960#ifdef TARGET_NR_ftruncate64
9961    case TARGET_NR_ftruncate64:
9962        return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9963#endif
9964#ifdef TARGET_NR_stat64
9965    case TARGET_NR_stat64:
9966        if (!(p = lock_user_string(arg1))) {
9967            return -TARGET_EFAULT;
9968        }
9969        ret = get_errno(stat(path(p), &st));
9970        unlock_user(p, arg1, 0);
9971        if (!is_error(ret))
9972            ret = host_to_target_stat64(cpu_env, arg2, &st);
9973        return ret;
9974#endif
9975#ifdef TARGET_NR_lstat64
9976    case TARGET_NR_lstat64:
9977        if (!(p = lock_user_string(arg1))) {
9978            return -TARGET_EFAULT;
9979        }
9980        ret = get_errno(lstat(path(p), &st));
9981        unlock_user(p, arg1, 0);
9982        if (!is_error(ret))
9983            ret = host_to_target_stat64(cpu_env, arg2, &st);
9984        return ret;
9985#endif
9986#ifdef TARGET_NR_fstat64
9987    case TARGET_NR_fstat64:
9988        ret = get_errno(fstat(arg1, &st));
9989        if (!is_error(ret))
9990            ret = host_to_target_stat64(cpu_env, arg2, &st);
9991        return ret;
9992#endif
9993#if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9994#ifdef TARGET_NR_fstatat64
9995    case TARGET_NR_fstatat64:
9996#endif
9997#ifdef TARGET_NR_newfstatat
9998    case TARGET_NR_newfstatat:
9999#endif
10000        if (!(p = lock_user_string(arg2))) {
10001            return -TARGET_EFAULT;
10002        }
10003        ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10004        unlock_user(p, arg2, 0);
10005        if (!is_error(ret))
10006            ret = host_to_target_stat64(cpu_env, arg3, &st);
10007        return ret;
10008#endif
10009#ifdef TARGET_NR_lchown
10010    case TARGET_NR_lchown:
10011        if (!(p = lock_user_string(arg1)))
10012            return -TARGET_EFAULT;
10013        ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10014        unlock_user(p, arg1, 0);
10015        return ret;
10016#endif
10017#ifdef TARGET_NR_getuid
10018    case TARGET_NR_getuid:
10019        return get_errno(high2lowuid(getuid()));
10020#endif
10021#ifdef TARGET_NR_getgid
10022    case TARGET_NR_getgid:
10023        return get_errno(high2lowgid(getgid()));
10024#endif
10025#ifdef TARGET_NR_geteuid
10026    case TARGET_NR_geteuid:
10027        return get_errno(high2lowuid(geteuid()));
10028#endif
10029#ifdef TARGET_NR_getegid
10030    case TARGET_NR_getegid:
10031        return get_errno(high2lowgid(getegid()));
10032#endif
10033    case TARGET_NR_setreuid:
10034        return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10035    case TARGET_NR_setregid:
10036        return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10037    case TARGET_NR_getgroups:
10038        {
10039            int gidsetsize = arg1;
10040            target_id *target_grouplist;
10041            gid_t *grouplist;
10042            int i;
10043
10044            grouplist = alloca(gidsetsize * sizeof(gid_t));
10045            ret = get_errno(getgroups(gidsetsize, grouplist));
10046            if (gidsetsize == 0)
10047                return ret;
10048            if (!is_error(ret)) {
10049                target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10050                if (!target_grouplist)
10051                    return -TARGET_EFAULT;
10052                for(i = 0;i < ret; i++)
10053                    target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10054                unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10055            }
10056        }
10057        return ret;
10058    case TARGET_NR_setgroups:
10059        {
10060            int gidsetsize = arg1;
10061            target_id *target_grouplist;
10062            gid_t *grouplist = NULL;
10063            int i;
10064            if (gidsetsize) {
10065                grouplist = alloca(gidsetsize * sizeof(gid_t));
10066                target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10067                if (!target_grouplist) {
10068                    return -TARGET_EFAULT;
10069                }
10070                for (i = 0; i < gidsetsize; i++) {
10071                    grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10072                }
10073                unlock_user(target_grouplist, arg2, 0);
10074            }
10075            return get_errno(setgroups(gidsetsize, grouplist));
10076        }
10077    case TARGET_NR_fchown:
10078        return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10079#if defined(TARGET_NR_fchownat)
10080    case TARGET_NR_fchownat:
10081        if (!(p = lock_user_string(arg2))) 
10082            return -TARGET_EFAULT;
10083        ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10084                                 low2highgid(arg4), arg5));
10085        unlock_user(p, arg2, 0);
10086        return ret;
10087#endif
10088#ifdef TARGET_NR_setresuid
10089    case TARGET_NR_setresuid:
10090        return get_errno(sys_setresuid(low2highuid(arg1),
10091                                       low2highuid(arg2),
10092                                       low2highuid(arg3)));
10093#endif
10094#ifdef TARGET_NR_getresuid
10095    case TARGET_NR_getresuid:
10096        {
10097            uid_t ruid, euid, suid;
10098            ret = get_errno(getresuid(&ruid, &euid, &suid));
10099            if (!is_error(ret)) {
10100                if (put_user_id(high2lowuid(ruid), arg1)
10101                    || put_user_id(high2lowuid(euid), arg2)
10102                    || put_user_id(high2lowuid(suid), arg3))
10103                    return -TARGET_EFAULT;
10104            }
10105        }
10106        return ret;
10107#endif
10108#ifdef TARGET_NR_getresgid
10109    case TARGET_NR_setresgid:
10110        return get_errno(sys_setresgid(low2highgid(arg1),
10111                                       low2highgid(arg2),
10112                                       low2highgid(arg3)));
10113#endif
10114#ifdef TARGET_NR_getresgid
10115    case TARGET_NR_getresgid:
10116        {
10117            gid_t rgid, egid, sgid;
10118            ret = get_errno(getresgid(&rgid, &egid, &sgid));
10119            if (!is_error(ret)) {
10120                if (put_user_id(high2lowgid(rgid), arg1)
10121                    || put_user_id(high2lowgid(egid), arg2)
10122                    || put_user_id(high2lowgid(sgid), arg3))
10123                    return -TARGET_EFAULT;
10124            }
10125        }
10126        return ret;
10127#endif
10128#ifdef TARGET_NR_chown
10129    case TARGET_NR_chown:
10130        if (!(p = lock_user_string(arg1)))
10131            return -TARGET_EFAULT;
10132        ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10133        unlock_user(p, arg1, 0);
10134        return ret;
10135#endif
10136    case TARGET_NR_setuid:
10137        return get_errno(sys_setuid(low2highuid(arg1)));
10138    case TARGET_NR_setgid:
10139        return get_errno(sys_setgid(low2highgid(arg1)));
10140    case TARGET_NR_setfsuid:
10141        return get_errno(setfsuid(arg1));
10142    case TARGET_NR_setfsgid:
10143        return get_errno(setfsgid(arg1));
10144
10145#ifdef TARGET_NR_lchown32
10146    case TARGET_NR_lchown32:
10147        if (!(p = lock_user_string(arg1)))
10148            return -TARGET_EFAULT;
10149        ret = get_errno(lchown(p, arg2, arg3));
10150        unlock_user(p, arg1, 0);
10151        return ret;
10152#endif
10153#ifdef TARGET_NR_getuid32
10154    case TARGET_NR_getuid32:
10155        return get_errno(getuid());
10156#endif
10157
10158#if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10159   /* Alpha specific */
10160    case TARGET_NR_getxuid:
10161         {
10162            uid_t euid;
10163            euid=geteuid();
10164            ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10165         }
10166        return get_errno(getuid());
10167#endif
10168#if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10169   /* Alpha specific */
10170    case TARGET_NR_getxgid:
10171         {
10172            uid_t egid;
10173            egid=getegid();
10174            ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10175         }
10176        return get_errno(getgid());
10177#endif
10178#if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10179    /* Alpha specific */
10180    case TARGET_NR_osf_getsysinfo:
10181        ret = -TARGET_EOPNOTSUPP;
10182        switch (arg1) {
10183          case TARGET_GSI_IEEE_FP_CONTROL:
10184            {
10185                uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10186
10187                /* Copied from linux ieee_fpcr_to_swcr.  */
10188                swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10189                swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10190                swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10191                                        | SWCR_TRAP_ENABLE_DZE
10192                                        | SWCR_TRAP_ENABLE_OVF);
10193                swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10194                                        | SWCR_TRAP_ENABLE_INE);
10195                swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10196                swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10197
10198                if (put_user_u64 (swcr, arg2))
10199                        return -TARGET_EFAULT;
10200                ret = 0;
10201            }
10202            break;
10203
10204          /* case GSI_IEEE_STATE_AT_SIGNAL:
10205             -- Not implemented in linux kernel.
10206             case GSI_UACPROC:
10207             -- Retrieves current unaligned access state; not much used.
10208             case GSI_PROC_TYPE:
10209             -- Retrieves implver information; surely not used.
10210             case GSI_GET_HWRPB:
10211             -- Grabs a copy of the HWRPB; surely not used.
10212          */
10213        }
10214        return ret;
10215#endif
10216#if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10217    /* Alpha specific */
10218    case TARGET_NR_osf_setsysinfo:
10219        ret = -TARGET_EOPNOTSUPP;
10220        switch (arg1) {
10221          case TARGET_SSI_IEEE_FP_CONTROL:
10222            {
10223                uint64_t swcr, fpcr, orig_fpcr;
10224
10225                if (get_user_u64 (swcr, arg2)) {
10226                    return -TARGET_EFAULT;
10227                }
10228                orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10229                fpcr = orig_fpcr & FPCR_DYN_MASK;
10230
10231                /* Copied from linux ieee_swcr_to_fpcr.  */
10232                fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10233                fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10234                fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10235                                  | SWCR_TRAP_ENABLE_DZE
10236                                  | SWCR_TRAP_ENABLE_OVF)) << 48;
10237                fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10238                                  | SWCR_TRAP_ENABLE_INE)) << 57;
10239                fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10240                fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10241
10242                cpu_alpha_store_fpcr(cpu_env, fpcr);
10243                ret = 0;
10244            }
10245            break;
10246
10247          case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10248            {
10249                uint64_t exc, fpcr, orig_fpcr;
10250                int si_code;
10251
10252                if (get_user_u64(exc, arg2)) {
10253                    return -TARGET_EFAULT;
10254                }
10255
10256                orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10257
10258                /* We only add to the exception status here.  */
10259                fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10260
10261                cpu_alpha_store_fpcr(cpu_env, fpcr);
10262                ret = 0;
10263
10264                /* Old exceptions are not signaled.  */
10265                fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10266
10267                /* If any exceptions set by this call,
10268                   and are unmasked, send a signal.  */
10269                si_code = 0;
10270                if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10271                    si_code = TARGET_FPE_FLTRES;
10272                }
10273                if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10274                    si_code = TARGET_FPE_FLTUND;
10275                }
10276                if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10277                    si_code = TARGET_FPE_FLTOVF;
10278                }
10279                if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10280                    si_code = TARGET_FPE_FLTDIV;
10281                }
10282                if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10283                    si_code = TARGET_FPE_FLTINV;
10284                }
10285                if (si_code != 0) {
10286                    target_siginfo_t info;
10287                    info.si_signo = SIGFPE;
10288                    info.si_errno = 0;
10289                    info.si_code = si_code;
10290                    info._sifields._sigfault._addr
10291                        = ((CPUArchState *)cpu_env)->pc;
10292                    queue_signal((CPUArchState *)cpu_env, info.si_signo,
10293                                 QEMU_SI_FAULT, &info);
10294                }
10295            }
10296            break;
10297
10298          /* case SSI_NVPAIRS:
10299             -- Used with SSIN_UACPROC to enable unaligned accesses.
10300             case SSI_IEEE_STATE_AT_SIGNAL:
10301             case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10302             -- Not implemented in linux kernel
10303          */
10304        }
10305        return ret;
10306#endif
10307#ifdef TARGET_NR_osf_sigprocmask
10308    /* Alpha specific.  */
10309    case TARGET_NR_osf_sigprocmask:
10310        {
10311            abi_ulong mask;
10312            int how;
10313            sigset_t set, oldset;
10314
10315            switch(arg1) {
10316            case TARGET_SIG_BLOCK:
10317                how = SIG_BLOCK;
10318                break;
10319            case TARGET_SIG_UNBLOCK:
10320                how = SIG_UNBLOCK;
10321                break;
10322            case TARGET_SIG_SETMASK:
10323                how = SIG_SETMASK;
10324                break;
10325            default:
10326                return -TARGET_EINVAL;
10327            }
10328            mask = arg2;
10329            target_to_host_old_sigset(&set, &mask);
10330            ret = do_sigprocmask(how, &set, &oldset);
10331            if (!ret) {
10332                host_to_target_old_sigset(&mask, &oldset);
10333                ret = mask;
10334            }
10335        }
10336        return ret;
10337#endif
10338
10339#ifdef TARGET_NR_getgid32
10340    case TARGET_NR_getgid32:
10341        return get_errno(getgid());
10342#endif
10343#ifdef TARGET_NR_geteuid32
10344    case TARGET_NR_geteuid32:
10345        return get_errno(geteuid());
10346#endif
10347#ifdef TARGET_NR_getegid32
10348    case TARGET_NR_getegid32:
10349        return get_errno(getegid());
10350#endif
10351#ifdef TARGET_NR_setreuid32
10352    case TARGET_NR_setreuid32:
10353        return get_errno(setreuid(arg1, arg2));
10354#endif
10355#ifdef TARGET_NR_setregid32
10356    case TARGET_NR_setregid32:
10357        return get_errno(setregid(arg1, arg2));
10358#endif
10359#ifdef TARGET_NR_getgroups32
10360    case TARGET_NR_getgroups32:
10361        {
10362            int gidsetsize = arg1;
10363            uint32_t *target_grouplist;
10364            gid_t *grouplist;
10365            int i;
10366
10367            grouplist = alloca(gidsetsize * sizeof(gid_t));
10368            ret = get_errno(getgroups(gidsetsize, grouplist));
10369            if (gidsetsize == 0)
10370                return ret;
10371            if (!is_error(ret)) {
10372                target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10373                if (!target_grouplist) {
10374                    return -TARGET_EFAULT;
10375                }
10376                for(i = 0;i < ret; i++)
10377                    target_grouplist[i] = tswap32(grouplist[i]);
10378                unlock_user(target_grouplist, arg2, gidsetsize * 4);
10379            }
10380        }
10381        return ret;
10382#endif
10383#ifdef TARGET_NR_setgroups32
10384    case TARGET_NR_setgroups32:
10385        {
10386            int gidsetsize = arg1;
10387            uint32_t *target_grouplist;
10388            gid_t *grouplist;
10389            int i;
10390
10391            grouplist = alloca(gidsetsize * sizeof(gid_t));
10392            target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10393            if (!target_grouplist) {
10394                return -TARGET_EFAULT;
10395            }
10396            for(i = 0;i < gidsetsize; i++)
10397                grouplist[i] = tswap32(target_grouplist[i]);
10398            unlock_user(target_grouplist, arg2, 0);
10399            return get_errno(setgroups(gidsetsize, grouplist));
10400        }
10401#endif
10402#ifdef TARGET_NR_fchown32
10403    case TARGET_NR_fchown32:
10404        return get_errno(fchown(arg1, arg2, arg3));
10405#endif
10406#ifdef TARGET_NR_setresuid32
10407    case TARGET_NR_setresuid32:
10408        return get_errno(sys_setresuid(arg1, arg2, arg3));
10409#endif
10410#ifdef TARGET_NR_getresuid32
10411    case TARGET_NR_getresuid32:
10412        {
10413            uid_t ruid, euid, suid;
10414            ret = get_errno(getresuid(&ruid, &euid, &suid));
10415            if (!is_error(ret)) {
10416                if (put_user_u32(ruid, arg1)
10417                    || put_user_u32(euid, arg2)
10418                    || put_user_u32(suid, arg3))
10419                    return -TARGET_EFAULT;
10420            }
10421        }
10422        return ret;
10423#endif
10424#ifdef TARGET_NR_setresgid32
10425    case TARGET_NR_setresgid32:
10426        return get_errno(sys_setresgid(arg1, arg2, arg3));
10427#endif
10428#ifdef TARGET_NR_getresgid32
10429    case TARGET_NR_getresgid32:
10430        {
10431            gid_t rgid, egid, sgid;
10432            ret = get_errno(getresgid(&rgid, &egid, &sgid));
10433            if (!is_error(ret)) {
10434                if (put_user_u32(rgid, arg1)
10435                    || put_user_u32(egid, arg2)
10436                    || put_user_u32(sgid, arg3))
10437                    return -TARGET_EFAULT;
10438            }
10439        }
10440        return ret;
10441#endif
10442#ifdef TARGET_NR_chown32
10443    case TARGET_NR_chown32:
10444        if (!(p = lock_user_string(arg1)))
10445            return -TARGET_EFAULT;
10446        ret = get_errno(chown(p, arg2, arg3));
10447        unlock_user(p, arg1, 0);
10448        return ret;
10449#endif
10450#ifdef TARGET_NR_setuid32
10451    case TARGET_NR_setuid32:
10452        return get_errno(sys_setuid(arg1));
10453#endif
10454#ifdef TARGET_NR_setgid32
10455    case TARGET_NR_setgid32:
10456        return get_errno(sys_setgid(arg1));
10457#endif
10458#ifdef TARGET_NR_setfsuid32
10459    case TARGET_NR_setfsuid32:
10460        return get_errno(setfsuid(arg1));
10461#endif
10462#ifdef TARGET_NR_setfsgid32
10463    case TARGET_NR_setfsgid32:
10464        return get_errno(setfsgid(arg1));
10465#endif
10466#ifdef TARGET_NR_mincore
10467    case TARGET_NR_mincore:
10468        {
10469            void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10470            if (!a) {
10471                return -TARGET_ENOMEM;
10472            }
10473            p = lock_user_string(arg3);
10474            if (!p) {
10475                ret = -TARGET_EFAULT;
10476            } else {
10477                ret = get_errno(mincore(a, arg2, p));
10478                unlock_user(p, arg3, ret);
10479            }
10480            unlock_user(a, arg1, 0);
10481        }
10482        return ret;
10483#endif
10484#ifdef TARGET_NR_arm_fadvise64_64
10485    case TARGET_NR_arm_fadvise64_64:
10486        /* arm_fadvise64_64 looks like fadvise64_64 but
10487         * with different argument order: fd, advice, offset, len
10488         * rather than the usual fd, offset, len, advice.
10489         * Note that offset and len are both 64-bit so appear as
10490         * pairs of 32-bit registers.
10491         */
10492        ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10493                            target_offset64(arg5, arg6), arg2);
10494        return -host_to_target_errno(ret);
10495#endif
10496
10497#if TARGET_ABI_BITS == 32
10498
10499#ifdef TARGET_NR_fadvise64_64
10500    case TARGET_NR_fadvise64_64:
10501#if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10502        /* 6 args: fd, advice, offset (high, low), len (high, low) */
10503        ret = arg2;
10504        arg2 = arg3;
10505        arg3 = arg4;
10506        arg4 = arg5;
10507        arg5 = arg6;
10508        arg6 = ret;
10509#else
10510        /* 6 args: fd, offset (high, low), len (high, low), advice */
10511        if (regpairs_aligned(cpu_env, num)) {
10512            /* offset is in (3,4), len in (5,6) and advice in 7 */
10513            arg2 = arg3;
10514            arg3 = arg4;
10515            arg4 = arg5;
10516            arg5 = arg6;
10517            arg6 = arg7;
10518        }
10519#endif
10520        ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10521                            target_offset64(arg4, arg5), arg6);
10522        return -host_to_target_errno(ret);
10523#endif
10524
10525#ifdef TARGET_NR_fadvise64
10526    case TARGET_NR_fadvise64:
10527        /* 5 args: fd, offset (high, low), len, advice */
10528        if (regpairs_aligned(cpu_env, num)) {
10529            /* offset is in (3,4), len in 5 and advice in 6 */
10530            arg2 = arg3;
10531            arg3 = arg4;
10532            arg4 = arg5;
10533            arg5 = arg6;
10534        }
10535        ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10536        return -host_to_target_errno(ret);
10537#endif
10538
10539#else /* not a 32-bit ABI */
10540#if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10541#ifdef TARGET_NR_fadvise64_64
10542    case TARGET_NR_fadvise64_64:
10543#endif
10544#ifdef TARGET_NR_fadvise64
10545    case TARGET_NR_fadvise64:
10546#endif
10547#ifdef TARGET_S390X
10548        switch (arg4) {
10549        case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10550        case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10551        case 6: arg4 = POSIX_FADV_DONTNEED; break;
10552        case 7: arg4 = POSIX_FADV_NOREUSE; break;
10553        default: break;
10554        }
10555#endif
10556        return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10557#endif
10558#endif /* end of 64-bit ABI fadvise handling */
10559
10560#ifdef TARGET_NR_madvise
10561    case TARGET_NR_madvise:
10562        /* A straight passthrough may not be safe because qemu sometimes
10563           turns private file-backed mappings into anonymous mappings.
10564           This will break MADV_DONTNEED.
10565           This is a hint, so ignoring and returning success is ok.  */
10566        return 0;
10567#endif
10568#if TARGET_ABI_BITS == 32
10569    case TARGET_NR_fcntl64:
10570    {
10571        int cmd;
10572        struct flock64 fl;
10573        from_flock64_fn *copyfrom = copy_from_user_flock64;
10574        to_flock64_fn *copyto = copy_to_user_flock64;
10575
10576#ifdef TARGET_ARM
10577        if (!((CPUARMState *)cpu_env)->eabi) {
10578            copyfrom = copy_from_user_oabi_flock64;
10579            copyto = copy_to_user_oabi_flock64;
10580        }
10581#endif
10582
10583        cmd = target_to_host_fcntl_cmd(arg2);
10584        if (cmd == -TARGET_EINVAL) {
10585            return cmd;
10586        }
10587
10588        switch(arg2) {
10589        case TARGET_F_GETLK64:
10590            ret = copyfrom(&fl, arg3);
10591            if (ret) {
10592                break;
10593            }
10594            ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10595            if (ret == 0) {
10596                ret = copyto(arg3, &fl);
10597            }
10598            break;
10599
10600        case TARGET_F_SETLK64:
10601        case TARGET_F_SETLKW64:
10602            ret = copyfrom(&fl, arg3);
10603            if (ret) {
10604                break;
10605            }
10606            ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10607            break;
10608        default:
10609            ret = do_fcntl(arg1, arg2, arg3);
10610            break;
10611        }
10612        return ret;
10613    }
10614#endif
10615#ifdef TARGET_NR_cacheflush
10616    case TARGET_NR_cacheflush:
10617        /* self-modifying code is handled automatically, so nothing needed */
10618        return 0;
10619#endif
10620#ifdef TARGET_NR_getpagesize
10621    case TARGET_NR_getpagesize:
10622        return TARGET_PAGE_SIZE;
10623#endif
10624    case TARGET_NR_gettid:
10625        return get_errno(sys_gettid());
10626#ifdef TARGET_NR_readahead
10627    case TARGET_NR_readahead:
10628#if TARGET_ABI_BITS == 32
10629        if (regpairs_aligned(cpu_env, num)) {
10630            arg2 = arg3;
10631            arg3 = arg4;
10632            arg4 = arg5;
10633        }
10634        ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10635#else
10636        ret = get_errno(readahead(arg1, arg2, arg3));
10637#endif
10638        return ret;
10639#endif
10640#ifdef CONFIG_ATTR
10641#ifdef TARGET_NR_setxattr
10642    case TARGET_NR_listxattr:
10643    case TARGET_NR_llistxattr:
10644    {
10645        void *p, *b = 0;
10646        if (arg2) {
10647            b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10648            if (!b) {
10649                return -TARGET_EFAULT;
10650            }
10651        }
10652        p = lock_user_string(arg1);
10653        if (p) {
10654            if (num == TARGET_NR_listxattr) {
10655                ret = get_errno(listxattr(p, b, arg3));
10656            } else {
10657                ret = get_errno(llistxattr(p, b, arg3));
10658            }
10659        } else {
10660            ret = -TARGET_EFAULT;
10661        }
10662        unlock_user(p, arg1, 0);
10663        unlock_user(b, arg2, arg3);
10664        return ret;
10665    }
10666    case TARGET_NR_flistxattr:
10667    {
10668        void *b = 0;
10669        if (arg2) {
10670            b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10671            if (!b) {
10672                return -TARGET_EFAULT;
10673            }
10674        }
10675        ret = get_errno(flistxattr(arg1, b, arg3));
10676        unlock_user(b, arg2, arg3);
10677        return ret;
10678    }
10679    case TARGET_NR_setxattr:
10680    case TARGET_NR_lsetxattr:
10681        {
10682            void *p, *n, *v = 0;
10683            if (arg3) {
10684                v = lock_user(VERIFY_READ, arg3, arg4, 1);
10685                if (!v) {
10686                    return -TARGET_EFAULT;
10687                }
10688            }
10689            p = lock_user_string(arg1);
10690            n = lock_user_string(arg2);
10691            if (p && n) {
10692                if (num == TARGET_NR_setxattr) {
10693                    ret = get_errno(setxattr(p, n, v, arg4, arg5));
10694                } else {
10695                    ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10696                }
10697            } else {
10698                ret = -TARGET_EFAULT;
10699            }
10700            unlock_user(p, arg1, 0);
10701            unlock_user(n, arg2, 0);
10702            unlock_user(v, arg3, 0);
10703        }
10704        return ret;
10705    case TARGET_NR_fsetxattr:
10706        {
10707            void *n, *v = 0;
10708            if (arg3) {
10709                v = lock_user(VERIFY_READ, arg3, arg4, 1);
10710                if (!v) {
10711                    return -TARGET_EFAULT;
10712                }
10713            }
10714            n = lock_user_string(arg2);
10715            if (n) {
10716                ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10717            } else {
10718                ret = -TARGET_EFAULT;
10719            }
10720            unlock_user(n, arg2, 0);
10721            unlock_user(v, arg3, 0);
10722        }
10723        return ret;
10724    case TARGET_NR_getxattr:
10725    case TARGET_NR_lgetxattr:
10726        {
10727            void *p, *n, *v = 0;
10728            if (arg3) {
10729                v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10730                if (!v) {
10731                    return -TARGET_EFAULT;
10732                }
10733            }
10734            p = lock_user_string(arg1);
10735            n = lock_user_string(arg2);
10736            if (p && n) {
10737                if (num == TARGET_NR_getxattr) {
10738                    ret = get_errno(getxattr(p, n, v, arg4));
10739                } else {
10740                    ret = get_errno(lgetxattr(p, n, v, arg4));
10741                }
10742            } else {
10743                ret = -TARGET_EFAULT;
10744            }
10745            unlock_user(p, arg1, 0);
10746            unlock_user(n, arg2, 0);
10747            unlock_user(v, arg3, arg4);
10748        }
10749        return ret;
10750    case TARGET_NR_fgetxattr:
10751        {
10752            void *n, *v = 0;
10753            if (arg3) {
10754                v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10755                if (!v) {
10756                    return -TARGET_EFAULT;
10757                }
10758            }
10759            n = lock_user_string(arg2);
10760            if (n) {
10761                ret = get_errno(fgetxattr(arg1, n, v, arg4));
10762            } else {
10763                ret = -TARGET_EFAULT;
10764            }
10765            unlock_user(n, arg2, 0);
10766            unlock_user(v, arg3, arg4);
10767        }
10768        return ret;
10769    case TARGET_NR_removexattr:
10770    case TARGET_NR_lremovexattr:
10771        {
10772            void *p, *n;
10773            p = lock_user_string(arg1);
10774            n = lock_user_string(arg2);
10775            if (p && n) {
10776                if (num == TARGET_NR_removexattr) {
10777                    ret = get_errno(removexattr(p, n));
10778                } else {
10779                    ret = get_errno(lremovexattr(p, n));
10780                }
10781            } else {
10782                ret = -TARGET_EFAULT;
10783            }
10784            unlock_user(p, arg1, 0);
10785            unlock_user(n, arg2, 0);
10786        }
10787        return ret;
10788    case TARGET_NR_fremovexattr:
10789        {
10790            void *n;
10791            n = lock_user_string(arg2);
10792            if (n) {
10793                ret = get_errno(fremovexattr(arg1, n));
10794            } else {
10795                ret = -TARGET_EFAULT;
10796            }
10797            unlock_user(n, arg2, 0);
10798        }
10799        return ret;
10800#endif
10801#endif /* CONFIG_ATTR */
10802#ifdef TARGET_NR_set_thread_area
10803    case TARGET_NR_set_thread_area:
10804#if defined(TARGET_MIPS)
10805      ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10806      return 0;
10807#elif defined(TARGET_CRIS)
10808      if (arg1 & 0xff)
10809          ret = -TARGET_EINVAL;
10810      else {
10811          ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10812          ret = 0;
10813      }
10814      return ret;
10815#elif defined(TARGET_I386) && defined(TARGET_ABI32)
10816      return do_set_thread_area(cpu_env, arg1);
10817#elif defined(TARGET_M68K)
10818      {
10819          TaskState *ts = cpu->opaque;
10820          ts->tp_value = arg1;
10821          return 0;
10822      }
10823#else
10824      return -TARGET_ENOSYS;
10825#endif
10826#endif
10827#ifdef TARGET_NR_get_thread_area
10828    case TARGET_NR_get_thread_area:
10829#if defined(TARGET_I386) && defined(TARGET_ABI32)
10830        return do_get_thread_area(cpu_env, arg1);
10831#elif defined(TARGET_M68K)
10832        {
10833            TaskState *ts = cpu->opaque;
10834            return ts->tp_value;
10835        }
10836#else
10837        return -TARGET_ENOSYS;
10838#endif
10839#endif
10840#ifdef TARGET_NR_getdomainname
10841    case TARGET_NR_getdomainname:
10842        return -TARGET_ENOSYS;
10843#endif
10844
10845#ifdef TARGET_NR_clock_settime
10846    case TARGET_NR_clock_settime:
10847    {
10848        struct timespec ts;
10849
10850        ret = target_to_host_timespec(&ts, arg2);
10851        if (!is_error(ret)) {
10852            ret = get_errno(clock_settime(arg1, &ts));
10853        }
10854        return ret;
10855    }
10856#endif
10857#ifdef TARGET_NR_clock_gettime
10858    case TARGET_NR_clock_gettime:
10859    {
10860        struct timespec ts;
10861        ret = get_errno(clock_gettime(arg1, &ts));
10862        if (!is_error(ret)) {
10863            ret = host_to_target_timespec(arg2, &ts);
10864        }
10865        return ret;
10866    }
10867#endif
10868#ifdef TARGET_NR_clock_getres
10869    case TARGET_NR_clock_getres:
10870    {
10871        struct timespec ts;
10872        ret = get_errno(clock_getres(arg1, &ts));
10873        if (!is_error(ret)) {
10874            host_to_target_timespec(arg2, &ts);
10875        }
10876        return ret;
10877    }
10878#endif
10879#ifdef TARGET_NR_clock_nanosleep
10880    case TARGET_NR_clock_nanosleep:
10881    {
10882        struct timespec ts;
10883        target_to_host_timespec(&ts, arg3);
10884        ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10885                                             &ts, arg4 ? &ts : NULL));
10886        if (arg4)
10887            host_to_target_timespec(arg4, &ts);
10888
10889#if defined(TARGET_PPC)
10890        /* clock_nanosleep is odd in that it returns positive errno values.
10891         * On PPC, CR0 bit 3 should be set in such a situation. */
10892        if (ret && ret != -TARGET_ERESTARTSYS) {
10893            ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10894        }
10895#endif
10896        return ret;
10897    }
10898#endif
10899
10900#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10901    case TARGET_NR_set_tid_address:
10902        return get_errno(set_tid_address((int *)g2h(arg1)));
10903#endif
10904
10905    case TARGET_NR_tkill:
10906        return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10907
10908    case TARGET_NR_tgkill:
10909        return get_errno(safe_tgkill((int)arg1, (int)arg2,
10910                         target_to_host_signal(arg3)));
10911
10912#ifdef TARGET_NR_set_robust_list
10913    case TARGET_NR_set_robust_list:
10914    case TARGET_NR_get_robust_list:
10915        /* The ABI for supporting robust futexes has userspace pass
10916         * the kernel a pointer to a linked list which is updated by
10917         * userspace after the syscall; the list is walked by the kernel
10918         * when the thread exits. Since the linked list in QEMU guest
10919         * memory isn't a valid linked list for the host and we have
10920         * no way to reliably intercept the thread-death event, we can't
10921         * support these. Silently return ENOSYS so that guest userspace
10922         * falls back to a non-robust futex implementation (which should
10923         * be OK except in the corner case of the guest crashing while
10924         * holding a mutex that is shared with another process via
10925         * shared memory).
10926         */
10927        return -TARGET_ENOSYS;
10928#endif
10929
10930#if defined(TARGET_NR_utimensat)
10931    case TARGET_NR_utimensat:
10932        {
10933            struct timespec *tsp, ts[2];
10934            if (!arg3) {
10935                tsp = NULL;
10936            } else {
10937                target_to_host_timespec(ts, arg3);
10938                target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10939                tsp = ts;
10940            }
10941            if (!arg2)
10942                ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10943            else {
10944                if (!(p = lock_user_string(arg2))) {
10945                    return -TARGET_EFAULT;
10946                }
10947                ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10948                unlock_user(p, arg2, 0);
10949            }
10950        }
10951        return ret;
10952#endif
10953    case TARGET_NR_futex:
10954        return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10955#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10956    case TARGET_NR_inotify_init:
10957        ret = get_errno(sys_inotify_init());
10958        if (ret >= 0) {
10959            fd_trans_register(ret, &target_inotify_trans);
10960        }
10961        return ret;
10962#endif
10963#ifdef CONFIG_INOTIFY1
10964#if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10965    case TARGET_NR_inotify_init1:
10966        ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10967                                          fcntl_flags_tbl)));
10968        if (ret >= 0) {
10969            fd_trans_register(ret, &target_inotify_trans);
10970        }
10971        return ret;
10972#endif
10973#endif
10974#if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10975    case TARGET_NR_inotify_add_watch:
10976        p = lock_user_string(arg2);
10977        ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10978        unlock_user(p, arg2, 0);
10979        return ret;
10980#endif
10981#if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10982    case TARGET_NR_inotify_rm_watch:
10983        return get_errno(sys_inotify_rm_watch(arg1, arg2));
10984#endif
10985
10986#if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10987    case TARGET_NR_mq_open:
10988        {
10989            struct mq_attr posix_mq_attr;
10990            struct mq_attr *pposix_mq_attr;
10991            int host_flags;
10992
10993            host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10994            pposix_mq_attr = NULL;
10995            if (arg4) {
10996                if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
10997                    return -TARGET_EFAULT;
10998                }
10999                pposix_mq_attr = &posix_mq_attr;
11000            }
11001            p = lock_user_string(arg1 - 1);
11002            if (!p) {
11003                return -TARGET_EFAULT;
11004            }
11005            ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11006            unlock_user (p, arg1, 0);
11007        }
11008        return ret;
11009
11010    case TARGET_NR_mq_unlink:
11011        p = lock_user_string(arg1 - 1);
11012        if (!p) {
11013            return -TARGET_EFAULT;
11014        }
11015        ret = get_errno(mq_unlink(p));
11016        unlock_user (p, arg1, 0);
11017        return ret;
11018
11019    case TARGET_NR_mq_timedsend:
11020        {
11021            struct timespec ts;
11022
11023            p = lock_user (VERIFY_READ, arg2, arg3, 1);
11024            if (arg5 != 0) {
11025                target_to_host_timespec(&ts, arg5);
11026                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11027                host_to_target_timespec(arg5, &ts);
11028            } else {
11029                ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11030            }
11031            unlock_user (p, arg2, arg3);
11032        }
11033        return ret;
11034
11035    case TARGET_NR_mq_timedreceive:
11036        {
11037            struct timespec ts;
11038            unsigned int prio;
11039
11040            p = lock_user (VERIFY_READ, arg2, arg3, 1);
11041            if (arg5 != 0) {
11042                target_to_host_timespec(&ts, arg5);
11043                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11044                                                     &prio, &ts));
11045                host_to_target_timespec(arg5, &ts);
11046            } else {
11047                ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11048                                                     &prio, NULL));
11049            }
11050            unlock_user (p, arg2, arg3);
11051            if (arg4 != 0)
11052                put_user_u32(prio, arg4);
11053        }
11054        return ret;
11055
11056    /* Not implemented for now... */
11057/*     case TARGET_NR_mq_notify: */
11058/*         break; */
11059
11060    case TARGET_NR_mq_getsetattr:
11061        {
11062            struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11063            ret = 0;
11064            if (arg2 != 0) {
11065                copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11066                ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11067                                           &posix_mq_attr_out));
11068            } else if (arg3 != 0) {
11069                ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11070            }
11071            if (ret == 0 && arg3 != 0) {
11072                copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11073            }
11074        }
11075        return ret;
11076#endif
11077
11078#ifdef CONFIG_SPLICE
11079#ifdef TARGET_NR_tee
11080    case TARGET_NR_tee:
11081        {
11082            ret = get_errno(tee(arg1,arg2,arg3,arg4));
11083        }
11084        return ret;
11085#endif
11086#ifdef TARGET_NR_splice
11087    case TARGET_NR_splice:
11088        {
11089            loff_t loff_in, loff_out;
11090            loff_t *ploff_in = NULL, *ploff_out = NULL;
11091            if (arg2) {
11092                if (get_user_u64(loff_in, arg2)) {
11093                    return -TARGET_EFAULT;
11094                }
11095                ploff_in = &loff_in;
11096            }
11097            if (arg4) {
11098                if (get_user_u64(loff_out, arg4)) {
11099                    return -TARGET_EFAULT;
11100                }
11101                ploff_out = &loff_out;
11102            }
11103            ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11104            if (arg2) {
11105                if (put_user_u64(loff_in, arg2)) {
11106                    return -TARGET_EFAULT;
11107                }
11108            }
11109            if (arg4) {
11110                if (put_user_u64(loff_out, arg4)) {
11111                    return -TARGET_EFAULT;
11112                }
11113            }
11114        }
11115        return ret;
11116#endif
11117#ifdef TARGET_NR_vmsplice
11118        case TARGET_NR_vmsplice:
11119        {
11120            struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11121            if (vec != NULL) {
11122                ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11123                unlock_iovec(vec, arg2, arg3, 0);
11124            } else {
11125                ret = -host_to_target_errno(errno);
11126            }
11127        }
11128        return ret;
11129#endif
11130#endif /* CONFIG_SPLICE */
11131#ifdef CONFIG_EVENTFD
11132#if defined(TARGET_NR_eventfd)
11133    case TARGET_NR_eventfd:
11134        ret = get_errno(eventfd(arg1, 0));
11135        if (ret >= 0) {
11136            fd_trans_register(ret, &target_eventfd_trans);
11137        }
11138        return ret;
11139#endif
11140#if defined(TARGET_NR_eventfd2)
11141    case TARGET_NR_eventfd2:
11142    {
11143        int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11144        if (arg2 & TARGET_O_NONBLOCK) {
11145            host_flags |= O_NONBLOCK;
11146        }
11147        if (arg2 & TARGET_O_CLOEXEC) {
11148            host_flags |= O_CLOEXEC;
11149        }
11150        ret = get_errno(eventfd(arg1, host_flags));
11151        if (ret >= 0) {
11152            fd_trans_register(ret, &target_eventfd_trans);
11153        }
11154        return ret;
11155    }
11156#endif
11157#endif /* CONFIG_EVENTFD  */
11158#if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11159    case TARGET_NR_fallocate:
11160#if TARGET_ABI_BITS == 32
11161        ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11162                                  target_offset64(arg5, arg6)));
11163#else
11164        ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11165#endif
11166        return ret;
11167#endif
11168#if defined(CONFIG_SYNC_FILE_RANGE)
11169#if defined(TARGET_NR_sync_file_range)
11170    case TARGET_NR_sync_file_range:
11171#if TARGET_ABI_BITS == 32
11172#if defined(TARGET_MIPS)
11173        ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11174                                        target_offset64(arg5, arg6), arg7));
11175#else
11176        ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11177                                        target_offset64(arg4, arg5), arg6));
11178#endif /* !TARGET_MIPS */
11179#else
11180        ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11181#endif
11182        return ret;
11183#endif
11184#if defined(TARGET_NR_sync_file_range2)
11185    case TARGET_NR_sync_file_range2:
11186        /* This is like sync_file_range but the arguments are reordered */
11187#if TARGET_ABI_BITS == 32
11188        ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11189                                        target_offset64(arg5, arg6), arg2));
11190#else
11191        ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11192#endif
11193        return ret;
11194#endif
11195#endif
11196#if defined(TARGET_NR_signalfd4)
11197    case TARGET_NR_signalfd4:
11198        return do_signalfd4(arg1, arg2, arg4);
11199#endif
11200#if defined(TARGET_NR_signalfd)
11201    case TARGET_NR_signalfd:
11202        return do_signalfd4(arg1, arg2, 0);
11203#endif
11204#if defined(CONFIG_EPOLL)
11205#if defined(TARGET_NR_epoll_create)
11206    case TARGET_NR_epoll_create:
11207        return get_errno(epoll_create(arg1));
11208#endif
11209#if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11210    case TARGET_NR_epoll_create1:
11211        return get_errno(epoll_create1(arg1));
11212#endif
11213#if defined(TARGET_NR_epoll_ctl)
11214    case TARGET_NR_epoll_ctl:
11215    {
11216        struct epoll_event ep;
11217        struct epoll_event *epp = 0;
11218        if (arg4) {
11219            struct target_epoll_event *target_ep;
11220            if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11221                return -TARGET_EFAULT;
11222            }
11223            ep.events = tswap32(target_ep->events);
11224            /* The epoll_data_t union is just opaque data to the kernel,
11225             * so we transfer all 64 bits across and need not worry what
11226             * actual data type it is.
11227             */
11228            ep.data.u64 = tswap64(target_ep->data.u64);
11229            unlock_user_struct(target_ep, arg4, 0);
11230            epp = &ep;
11231        }
11232        return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11233    }
11234#endif
11235
11236#if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11237#if defined(TARGET_NR_epoll_wait)
11238    case TARGET_NR_epoll_wait:
11239#endif
11240#if defined(TARGET_NR_epoll_pwait)
11241    case TARGET_NR_epoll_pwait:
11242#endif
11243    {
11244        struct target_epoll_event *target_ep;
11245        struct epoll_event *ep;
11246        int epfd = arg1;
11247        int maxevents = arg3;
11248        int timeout = arg4;
11249
11250        if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11251            return -TARGET_EINVAL;
11252        }
11253
11254        target_ep = lock_user(VERIFY_WRITE, arg2,
11255                              maxevents * sizeof(struct target_epoll_event), 1);
11256        if (!target_ep) {
11257            return -TARGET_EFAULT;
11258        }
11259
11260        ep = g_try_new(struct epoll_event, maxevents);
11261        if (!ep) {
11262            unlock_user(target_ep, arg2, 0);
11263            return -TARGET_ENOMEM;
11264        }
11265
11266        switch (num) {
11267#if defined(TARGET_NR_epoll_pwait)
11268        case TARGET_NR_epoll_pwait:
11269        {
11270            target_sigset_t *target_set;
11271            sigset_t _set, *set = &_set;
11272
11273            if (arg5) {
11274                if (arg6 != sizeof(target_sigset_t)) {
11275                    ret = -TARGET_EINVAL;
11276                    break;
11277                }
11278
11279                target_set = lock_user(VERIFY_READ, arg5,
11280                                       sizeof(target_sigset_t), 1);
11281                if (!target_set) {
11282                    ret = -TARGET_EFAULT;
11283                    break;
11284                }
11285                target_to_host_sigset(set, target_set);
11286                unlock_user(target_set, arg5, 0);
11287            } else {
11288                set = NULL;
11289            }
11290
11291            ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11292                                             set, SIGSET_T_SIZE));
11293            break;
11294        }
11295#endif
11296#if defined(TARGET_NR_epoll_wait)
11297        case TARGET_NR_epoll_wait:
11298            ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11299                                             NULL, 0));
11300            break;
11301#endif
11302        default:
11303            ret = -TARGET_ENOSYS;
11304        }
11305        if (!is_error(ret)) {
11306            int i;
11307            for (i = 0; i < ret; i++) {
11308                target_ep[i].events = tswap32(ep[i].events);
11309                target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11310            }
11311            unlock_user(target_ep, arg2,
11312                        ret * sizeof(struct target_epoll_event));
11313        } else {
11314            unlock_user(target_ep, arg2, 0);
11315        }
11316        g_free(ep);
11317        return ret;
11318    }
11319#endif
11320#endif
11321#ifdef TARGET_NR_prlimit64
11322    case TARGET_NR_prlimit64:
11323    {
11324        /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11325        struct target_rlimit64 *target_rnew, *target_rold;
11326        struct host_rlimit64 rnew, rold, *rnewp = 0;
11327        int resource = target_to_host_resource(arg2);
11328        if (arg3) {
11329            if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11330                return -TARGET_EFAULT;
11331            }
11332            rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11333            rnew.rlim_max = tswap64(target_rnew->rlim_max);
11334            unlock_user_struct(target_rnew, arg3, 0);
11335            rnewp = &rnew;
11336        }
11337
11338        ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11339        if (!is_error(ret) && arg4) {
11340            if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11341                return -TARGET_EFAULT;
11342            }
11343            target_rold->rlim_cur = tswap64(rold.rlim_cur);
11344            target_rold->rlim_max = tswap64(rold.rlim_max);
11345            unlock_user_struct(target_rold, arg4, 1);
11346        }
11347        return ret;
11348    }
11349#endif
11350#ifdef TARGET_NR_gethostname
11351    case TARGET_NR_gethostname:
11352    {
11353        char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11354        if (name) {
11355            ret = get_errno(gethostname(name, arg2));
11356            unlock_user(name, arg1, arg2);
11357        } else {
11358            ret = -TARGET_EFAULT;
11359        }
11360        return ret;
11361    }
11362#endif
11363#ifdef TARGET_NR_atomic_cmpxchg_32
11364    case TARGET_NR_atomic_cmpxchg_32:
11365    {
11366        /* should use start_exclusive from main.c */
11367        abi_ulong mem_value;
11368        if (get_user_u32(mem_value, arg6)) {
11369            target_siginfo_t info;
11370            info.si_signo = SIGSEGV;
11371            info.si_errno = 0;
11372            info.si_code = TARGET_SEGV_MAPERR;
11373            info._sifields._sigfault._addr = arg6;
11374            queue_signal((CPUArchState *)cpu_env, info.si_signo,
11375                         QEMU_SI_FAULT, &info);
11376            ret = 0xdeadbeef;
11377
11378        }
11379        if (mem_value == arg2)
11380            put_user_u32(arg1, arg6);
11381        return mem_value;
11382    }
11383#endif
11384#ifdef TARGET_NR_atomic_barrier
11385    case TARGET_NR_atomic_barrier:
11386        /* Like the kernel implementation and the
11387           qemu arm barrier, no-op this? */
11388        return 0;
11389#endif
11390
11391#ifdef TARGET_NR_timer_create
11392    case TARGET_NR_timer_create:
11393    {
11394        /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11395
11396        struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11397
11398        int clkid = arg1;
11399        int timer_index = next_free_host_timer();
11400
11401        if (timer_index < 0) {
11402            ret = -TARGET_EAGAIN;
11403        } else {
11404            timer_t *phtimer = g_posix_timers  + timer_index;
11405
11406            if (arg2) {
11407                phost_sevp = &host_sevp;
11408                ret = target_to_host_sigevent(phost_sevp, arg2);
11409                if (ret != 0) {
11410                    return ret;
11411                }
11412            }
11413
11414            ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11415            if (ret) {
11416                phtimer = NULL;
11417            } else {
11418                if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11419                    return -TARGET_EFAULT;
11420                }
11421            }
11422        }
11423        return ret;
11424    }
11425#endif
11426
11427#ifdef TARGET_NR_timer_settime
11428    case TARGET_NR_timer_settime:
11429    {
11430        /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11431         * struct itimerspec * old_value */
11432        target_timer_t timerid = get_timer_id(arg1);
11433
11434        if (timerid < 0) {
11435            ret = timerid;
11436        } else if (arg3 == 0) {
11437            ret = -TARGET_EINVAL;
11438        } else {
11439            timer_t htimer = g_posix_timers[timerid];
11440            struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11441
11442            if (target_to_host_itimerspec(&hspec_new, arg3)) {
11443                return -TARGET_EFAULT;
11444            }
11445            ret = get_errno(
11446                          timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11447            if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11448                return -TARGET_EFAULT;
11449            }
11450        }
11451        return ret;
11452    }
11453#endif
11454
11455#ifdef TARGET_NR_timer_gettime
11456    case TARGET_NR_timer_gettime:
11457    {
11458        /* args: timer_t timerid, struct itimerspec *curr_value */
11459        target_timer_t timerid = get_timer_id(arg1);
11460
11461        if (timerid < 0) {
11462            ret = timerid;
11463        } else if (!arg2) {
11464            ret = -TARGET_EFAULT;
11465        } else {
11466            timer_t htimer = g_posix_timers[timerid];
11467            struct itimerspec hspec;
11468            ret = get_errno(timer_gettime(htimer, &hspec));
11469
11470            if (host_to_target_itimerspec(arg2, &hspec)) {
11471                ret = -TARGET_EFAULT;
11472            }
11473        }
11474        return ret;
11475    }
11476#endif
11477
11478#ifdef TARGET_NR_timer_getoverrun
11479    case TARGET_NR_timer_getoverrun:
11480    {
11481        /* args: timer_t timerid */
11482        target_timer_t timerid = get_timer_id(arg1);
11483
11484        if (timerid < 0) {
11485            ret = timerid;
11486        } else {
11487            timer_t htimer = g_posix_timers[timerid];
11488            ret = get_errno(timer_getoverrun(htimer));
11489        }
11490        fd_trans_unregister(ret);
11491        return ret;
11492    }
11493#endif
11494
11495#ifdef TARGET_NR_timer_delete
11496    case TARGET_NR_timer_delete:
11497    {
11498        /* args: timer_t timerid */
11499        target_timer_t timerid = get_timer_id(arg1);
11500
11501        if (timerid < 0) {
11502            ret = timerid;
11503        } else {
11504            timer_t htimer = g_posix_timers[timerid];
11505            ret = get_errno(timer_delete(htimer));
11506            g_posix_timers[timerid] = 0;
11507        }
11508        return ret;
11509    }
11510#endif
11511
11512#if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11513    case TARGET_NR_timerfd_create:
11514        return get_errno(timerfd_create(arg1,
11515                          target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11516#endif
11517
11518#if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11519    case TARGET_NR_timerfd_gettime:
11520        {
11521            struct itimerspec its_curr;
11522
11523            ret = get_errno(timerfd_gettime(arg1, &its_curr));
11524
11525            if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11526                return -TARGET_EFAULT;
11527            }
11528        }
11529        return ret;
11530#endif
11531
11532#if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11533    case TARGET_NR_timerfd_settime:
11534        {
11535            struct itimerspec its_new, its_old, *p_new;
11536
11537            if (arg3) {
11538                if (target_to_host_itimerspec(&its_new, arg3)) {
11539                    return -TARGET_EFAULT;
11540                }
11541                p_new = &its_new;
11542            } else {
11543                p_new = NULL;
11544            }
11545
11546            ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11547
11548            if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11549                return -TARGET_EFAULT;
11550            }
11551        }
11552        return ret;
11553#endif
11554
11555#if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11556    case TARGET_NR_ioprio_get:
11557        return get_errno(ioprio_get(arg1, arg2));
11558#endif
11559
11560#if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11561    case TARGET_NR_ioprio_set:
11562        return get_errno(ioprio_set(arg1, arg2, arg3));
11563#endif
11564
11565#if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11566    case TARGET_NR_setns:
11567        return get_errno(setns(arg1, arg2));
11568#endif
11569#if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11570    case TARGET_NR_unshare:
11571        return get_errno(unshare(arg1));
11572#endif
11573#if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11574    case TARGET_NR_kcmp:
11575        return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11576#endif
11577#ifdef TARGET_NR_swapcontext
11578    case TARGET_NR_swapcontext:
11579        /* PowerPC specific.  */
11580        return do_swapcontext(cpu_env, arg1, arg2, arg3);
11581#endif
11582
11583    default:
11584        qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11585        return -TARGET_ENOSYS;
11586    }
11587    return ret;
11588}
11589
11590abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11591                    abi_long arg2, abi_long arg3, abi_long arg4,
11592                    abi_long arg5, abi_long arg6, abi_long arg7,
11593                    abi_long arg8)
11594{
11595    CPUState *cpu = ENV_GET_CPU(cpu_env);
11596    abi_long ret;
11597
11598#ifdef DEBUG_ERESTARTSYS
11599    /* Debug-only code for exercising the syscall-restart code paths
11600     * in the per-architecture cpu main loops: restart every syscall
11601     * the guest makes once before letting it through.
11602     */
11603    {
11604        static bool flag;
11605        flag = !flag;
11606        if (flag) {
11607            return -TARGET_ERESTARTSYS;
11608        }
11609    }
11610#endif
11611
11612    trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11613                             arg5, arg6, arg7, arg8);
11614
11615    if (unlikely(do_strace)) {
11616        print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11617        ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11618                          arg5, arg6, arg7, arg8);
11619        print_syscall_ret(num, ret);
11620    } else {
11621        ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11622                          arg5, arg6, arg7, arg8);
11623    }
11624
11625    trace_guest_user_syscall_ret(cpu, num, ret);
11626    return ret;
11627}
11628